diff --git a/.Rbuildignore b/.Rbuildignore index b90fe6b21..7109ad47f 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -1,8 +1,21 @@ ^.*\.Rproj$ +^.travis.yml +^README.md +^\.Rprofile$ ^\.Rproj\.user$ -^src/tbb/build/lib_.*$ +^appveyor\.yml$ +^check$ +^doc$ +^gen$ +^libs$ ^inst/lib$ -^.travis.yml -^tests - - +^inst/libs$ +^revdep$ +^src/.*\.o$ +^src/tbb/build$ +^tags$ +^tests/testthat/pkg/RcppParallelTest/src/.*\.dll$ +^tests/testthat/pkg/RcppParallelTest/src/.*\.s?o$ +^tools/tbb$ +^\.github$ +^patches \ No newline at end of file diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 000000000..1b8afb741 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +Makevars text eol=lf diff --git a/.github/.gitignore b/.github/.gitignore new file mode 100644 index 000000000..2d19fc766 --- /dev/null +++ b/.github/.gitignore @@ -0,0 +1 @@ +*.html diff --git a/.github/workflows/R-CMD-check.yaml b/.github/workflows/R-CMD-check.yaml new file mode 100644 index 000000000..897585d50 --- /dev/null +++ b/.github/workflows/R-CMD-check.yaml @@ -0,0 +1,39 @@ +# Workflow derived from https://github.com/r-lib/actions/tree/v2/examples +# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help +on: + push: + branches: [main, master] + pull_request: + branches: [main, master] + +name: R-CMD-check + +jobs: + R-CMD-check: + runs-on: ${{ matrix.config.os }} + + name: ${{ matrix.config.os }} (${{ matrix.config.r }}) + + strategy: + fail-fast: false + matrix: + config: + - {os: macOS-latest, r: 'release'} + - {os: ubuntu-latest, r: 'release'} + - {os: windows-latest, r: 'release'} + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + R_KEEP_PKG_SOURCE: yes + steps: + - uses: actions/checkout@v3 + + - uses: r-lib/actions/setup-r@v2 + with: + use-public-rspm: true + + - uses: r-lib/actions/setup-r-dependencies@v2 + with: + extra-packages: any::rcmdcheck + needs: check + + - uses: r-lib/actions/check-r-package@v2 diff --git a/.gitignore b/.gitignore index 728e389b9..240c3f21c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,20 @@ +.Rprofile .Rproj.user .Rhistory .RData .DS_Store +check +inst/doc +inst/lib +inst/libs +libs +revdep +src-i386 +src-x64 +tbb.log + +src/tbb/build +src/tbb/build-tbb + +R/tbb-autodetected.R + diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 7471a4f24..000000000 --- a/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -language: cpp - -before_install: - - curl -OL http://raw.github.com/craigcitro/r-travis/master/scripts/travis-tool.sh - - chmod 755 ./travis-tool.sh - - ./travis-tool.sh bootstrap - - ./travis-tool.sh github_package hadley/testthat - - if [ "$RCPP" = "Rcpp" ]; then ./travis-tool.sh github_package RcppCore/Rcpp; fi - - if [ "$RCPP" = "Rcpp11" ]; then ./travis-tool.sh github_package Rcpp11/Rcpp11; ./travis-tool.sh github_package Rcpp11/attributes; fi - -script: - - R CMD INSTALL . - - cd tests - - Rscript testthat.R - -notifications: - email: - on_success: change - on_failure: change - -env: - - RCPP=Rcpp - - RCPP=Rcpp11 diff --git a/DESCRIPTION b/DESCRIPTION index c4684ac3b..d51de8a09 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,17 +1,39 @@ Package: RcppParallel Type: Package -Title: Parallel Programming Tools for Rcpp -Version: 4.3.3 -Date: 2014-10-20 -Author: JJ Allaire; Romain Francois; Intel, Inc.; Marcus Geelnard -Maintainer: JJ Allaire -Description: High level functions for doing parallel programming with Rcpp. - For example, the parallelFor function can be used to convert the work of - a standard serial "for" loop into a parallel one and the parallelReduce - function can be used for accumulating aggregate or other values. -Suggests: Rcpp, testthat -SystemRequirements: GNU make -License: GPL-2 -Collate: - 'options.R' - +Title: Parallel Programming Tools for 'Rcpp' +Version: 5.1.10.9000 +Authors@R: c( + person("Kevin", "Ushey", role = c("aut", "cre"), email = "kevin@rstudio.com", + comment = c(ORCID = "0000-0003-2880-7407")), + person("JJ", "Allaire", role = c("aut"), email = "jj@rstudio.com"), + person("Romain", "Francois", role = c("aut", "cph")), + person("Gregory", "Vandenbrouck", role = "aut"), + person("Marcus", "Geelnard", role = c("aut", "cph"), + comment = "TinyThread library, https://tinythreadpp.bitsnbites.eu/"), + person("Hamada S.", "Badr", email = "badr@jhu.edu", role = c("ctb"), + comment = c(ORCID = "0000-0002-9808-2344")), + person("Dirk", "Eddelbuettel", role = c("aut"), email = "edd@debian.org", + comment = c(ORCID = "0000-0001-6419-907X")), + person(family = "Intel", role = c("aut", "cph"), comment = "oneTBB library"), + person(family = "UXL Foundation", role = c("aut", "cph"), comment = "oneTBB library"), + person(family = "Microsoft", role = "cph"), + person(family = "Posit, PBC", role = "cph") + ) +Description: High level functions for parallel programming with 'Rcpp'. + For example, the 'parallelFor()' function can be used to convert the work of + a standard serial "for" loop into a parallel one and the 'parallelReduce()' + function can be used for accumulating aggregate or other values. +Depends: R (>= 3.6.0) +Suggests: + Rcpp, + RUnit, + knitr, + rmarkdown +Roxygen: list(markdown = TRUE) +SystemRequirements: CMake (>= 3.5) +License: GPL (>= 3) +URL: https://rcppcore.github.io/RcppParallel/, https://github.com/RcppCore/RcppParallel +BugReports: https://github.com/RcppCore/RcppParallel/issues +Biarch: TRUE +RoxygenNote: 7.3.2 +Encoding: UTF-8 diff --git a/NAMESPACE b/NAMESPACE index 0cc39947a..94da28d51 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -1,3 +1,9 @@ -export(setThreadOptions) -export(defaultNumThreads) +# Generated by roxygen2: do not edit by hand +export(CxxFlags) +export(LdFlags) +export(RcppParallel.package.skeleton) +export(RcppParallelLibs) +export(defaultNumThreads) +export(setThreadOptions) +export(tbbLibraryPath) diff --git a/NEWS b/NEWS deleted file mode 100644 index 72129cc89..000000000 --- a/NEWS +++ /dev/null @@ -1,10 +0,0 @@ -RcppParallel 4.3.1 ------------------------------------------------------------------------- - -* Update to TBB 4.3 (fixes clang compilation error in platform.h) -* Forward CXX to TBB Makefile - -RcppParallel 4.2.5 ------------------------------------------------------------------------- - -* Initial release diff --git a/NEWS.md b/NEWS.md new file mode 100644 index 000000000..80cdc71e8 --- /dev/null +++ b/NEWS.md @@ -0,0 +1,203 @@ + +## RcppParallel 6.0.0 (UNRELEASED) + +* RcppParallel no longer includes tbb headers as part of the RcppParallel/TBB.h + header, and instead only exposes its TBB-specific APIs for parallel work. + +* RcppParallel now bundles oneTBB 2022.0.0. Note that the TBB ABI has changed; + packages which depend on RcppParallel may need to be rebuilt. + +* On Windows, RcppParallel now uses the copy of TBB provided by Rtools. + If TBB is not available, RcppParallel will use only the fallback 'tinythread' + implementation. In practice, this implies that RcppParallel will now only + provide a TBB backend with R (>= 4.2.0). + +## RcppParallel 5.1.11 + +* Compatibility fixes for LLVM 21. + +## RcppParallel 5.1.10 + +* Fixed an issue where packages linking to RcppParallel could inadverently + depend on internals of the TBB library available during compilation, even + if the package did not explicitly use TBB itself. + +## RcppParallel 5.1.9 + +* RcppParallel no longer passes `-rpath` when building / linking on Windows. + This fixes build issues when building RcppParallel when using the LLVM + linker on Windows. (@kalibera) + +## RcppParallel 5.1.8 + +* RcppParallel now explicitly links to the bundled copy of TBB on macOS. (#206; @jeroen) + +## RcppParallel 5.1.7 + +* Remove deprecated `std::iterator`. (#192; @Enchufa2) + +## RcppParallel 5.1.6 + +* Patch for TBB to allow compilation with gcc-13. + +* Fixed a memory leak that could occur when using TinyThread on POSIX systems. + (#185; @dipertix and and @kevinushey) + +## RcppParallel 5.1.5 + +* Patches to ensure compatibility with the R 4.2.0 UCRT toolchain on Windows, + adapted from patches contributed by Tomas Kalibera. + +* Fixed an issue where setting `TBB_ROOT` (or `TBB_INC` / `TBB_LIB`) would + copy rather than symlink the associated libraries. (#161) + +## RcppParallel 5.1.4 + +* Fixed an issue causing client packages of RcppParallel to fail to compile + on Solaris. + +## RcppParallel 5.1.3 + +* Fixed an issue that prevented compilation of RcppParallel with R (< 4.0.0) + of R on Windows. + +* The `RCPP_PARALLEL_USE_TBBMALLOC_PROXY` environment variable can now be used + to control whether RcppParallel loads the `tbbmalloc_proxy` library on load. + See https://www.threadingbuildingblocks.org/docs/help/tbb_userguide/Automically_Replacing_malloc.html + for more information. + +## RcppParallel 5.1.2 + +* `RcppParallel` gains the `tbbLibraryPath()` function, to be used when attempting + to query the location of the TBB libraries that `RcppParallel` has been + configured to use. This may be useful for R packages which wish to explicitly + use, or link to, these libraries. + +## RcppParallel 5.1.1 + +* Updated bundled version of TBB (Intel TBB 2019 Update 8). + +* RcppParallel can now be configured to use an external copy of TBB, via the + `TBB_LIB` and `TBB_INC` environment variables. These should be set to the + directories containing the TBB libraries and headers, respectively. + +* Added support for the latest versions of Intel oneAPI TBB / oneTBB. + +* Updated TBB functionality for the new interface. + +* Falling back to building TBB from local source code. + +* Backward TBB compatibility based on `__TBB_tbb_stddef_H`. + +* Resolved conflicts between system and local TBB headers. + +* Fixed URLs, used HTTPS, and minor cleanups. + +* Updated package DESCRIPTION and bumped version. + +* `setThreadOptions(...)` can again be called multiple times per session. + The requested number of threads will be used for invocations to `parallelFor()` + and `parallelReduce()` that don't explicitly request a specific number of threads. + +* The `parallelFor()` and `parallelReduce()` functions gain the `numThreads` + argument, allowing one to limit the number of threads used for a + particular computation. + +## RcppParallel 5.0.3 + +* Fixed compilation on macOS M1 machines. + +## RcppParallel 5.0.2 + +* `setThreadOptions(...)` can now only be called once per session, to avoid + segfaults when compiling RcppParallel / TBB with gcc 10.1. Subsequent + calls to `setThreadOptions(...)` are ignored. + +## RcppParallel 5.0.1 + +* Fixed compilation issue on OpenSUSE Tumbleweed with -flto=auto + +* Fixed compilation when CPPFLAGS = -I/usr/local/include and a version + of libtbb is installed there + +## RcppParallel 5.0.0 + +* RcppParallel backend can now be customized with RCPP_PARALLEL_BACKEND + environment variable (supported values are 'tbb' and 'tinythread') + +* Fixed issue when compiling RcppParallel on macOS Catalina + +* Fixed issue when compiling RcppParallel with Rtools40 + +## RcppParallel 4.4.4 + +* Fixed an issue when compiling RcppParallel with clang-9 on Fedora + +## RcppParallel 4.4.3 + +* Suppress gcc-9 warnings related -Wclass-memaccess + +* Added TBB headers for serial TBB operations (#90, @mikejiang) + +* Fixed row iterator constructor (#87, @wtianyi) + +* Fixed compilation on FreeBSD + +## RcppParallel 4.4.2 + +* Suppress gcc-8 warnings related to -Wclass-memaccess + +* Use PKG_CXXFLAGS rather than PKG_CPPFLAGS + +* Remove unused dependency on the BH package + +## RcppParallel 4.4.1 + +* Ensure user-specified R configuration passed to TBB + +* Work around warnings emitted by gcc 8 + +## RcppParallel 4.4.0 + +* Respect user-defined compiler settings (e.g. from ~/.R/Makevars). + +* Remove TBB's attempts to suppress compiler diagnostics. + +* Allow setting the number of threads to use via RCPP_PARALLEL_NUM_THREADS + environment variable. + +* Update to TBB 2018 Update 1. + +* Add native registration of compiled functions. + +## RcppParallel 4.3.20 + +* Add support for Rtools 3.3 w/ GCC 4.9 + +## RcppParallel 4.3.14 + +* Add support for TBB on Solaris + +* Fix failure to compile on OS X Snow Leopard R toolchain + +* Add const and non-const operator[] for RMatrix class + +## RcppParallel 4.3.8 + +* Add tbbmalloc library + +* Correctly pass clang to TBB configure when R is using clang + +## RcppParallel 4.3.6 + +* Support for TBB on Windows + +## RcppParallel 4.3.3 + +* Update to TBB 4.3 (fixes clang compilation error in platform.h) + +* Forward CXX to TBB Makefile + +## RcppParallel 4.2.5 + +* Initial release diff --git a/R/RcppParallel-package.R b/R/RcppParallel-package.R new file mode 100644 index 000000000..79081f9fb --- /dev/null +++ b/R/RcppParallel-package.R @@ -0,0 +1,24 @@ + +#' Parallel programming tools for Rcpp +#' +#' High level functions for doing parallel programming with Rcpp. For example, +#' the `parallelFor()` function can be used to convert the work of a +#' standard serial "for" loop into a parallel one, and the `parallelReduce()` +#' function can be used for accumulating aggregate or other values. +#' +#' The high level interface enables safe and robust parallel programming +#' without direct manipulation of operating system threads. On Windows, macOS, +#' and Linux systems the underlying implementation is based on Intel TBB +#' (Threading Building Blocks). On other platforms, a less-performant fallback +#' implementation based on the TinyThread library is used. +#' +#' For additional documentation, see the package website at: +#' +#' +#' +#' +#' @name RcppParallel-package +#' @docType package +#' @aliases RcppParallel RcppParallel-package +#' @keywords package parallel +NULL diff --git a/R/aaa.R b/R/aaa.R new file mode 100644 index 000000000..568a2aac9 --- /dev/null +++ b/R/aaa.R @@ -0,0 +1,8 @@ + +# stubs that get overridden via configure script +TBB_ENABLED <- TRUE +TBB_LIB <- "" +TBB_INC <- "" + +TBB_NAME <- "tbb" +TBB_MALLOC_NAME <- "tbbmalloc" \ No newline at end of file diff --git a/R/flags.R b/R/flags.R new file mode 100644 index 000000000..6ae1f8280 --- /dev/null +++ b/R/flags.R @@ -0,0 +1,53 @@ + +#' Compilation flags for RcppParallel +#' +#' Output the compiler or linker flags required to build against RcppParallel. +#' +#' These functions are typically called from `Makevars` as follows: +#' +#' ``` +#' PKG_LIBS += $(shell "${R_HOME}/bin/Rscript" -e "RcppParallel::LdFlags()") +#' ``` +#' +#' On Windows, the flags ensure that the package links with the built-in TBB +#' library. On Linux and macOS, the output is empty, because TBB is loaded +#' dynamically on load by `RcppParallel`. +#' +#' \R packages using RcppParallel should also add the following to their +#' `NAMESPACE` file: +#' +#' ``` +#' importFrom(RcppParallel, RcppParallelLibs) +#' ``` +#' +#' This is necessary to ensure that \pkg{RcppParallel} (and so, TBB) is loaded +#' and available. +#' +#' @name flags +#' @rdname flags +#' @aliases RcppParallelLibs LdFlags CxxFlags +#' +#' @return Returns \code{NULL}, invisibly. These functions are called for +#' their side effects (writing the associated flags to stdout). +#' +NULL + + +#' @name flags +#' @export +CxxFlags <- function() { + cat(tbbCxxFlags()) +} + +#' @name flags +#' @export +LdFlags <- function() { + cat(tbbLdFlags()) +} + +#' @name flags +#' @export +RcppParallelLibs <- function() { + LdFlags() +} + diff --git a/R/options.R b/R/options.R index f31be7d64..9b54a37c5 100644 --- a/R/options.R +++ b/R/options.R @@ -1,42 +1,48 @@ - -dllInfo <- NULL - -.onLoad <- function(libname, pkgname) { - - # load tbb on supported platforms - sysname <- Sys.info()['sysname'] - tbbSupported <- list( - "Darwin" = "libtbb.dylib", "Linux" = "libtbb.so.2", "Windows" = "tbb.dll" - ) - if (sysname %in% names(tbbSupported)) { - dll <- system.file(paste("lib/", tbbSupported[[sysname]], sep = ""), package = "RcppParallel") - if (!file.exists(dll)) { - warning(paste("TBB library", dll, "not found.")) - } else { - dllInfo <<- dyn.load(dll, local = FALSE, now = TRUE) - } - } - - # load the package library - library.dynam("RcppParallel", pkgname, libname) - - # set default thread options - setThreadOptions() -} - -.onUnload <- function(libpath) { - - # unload the package library - library.dynam.unload("RcppParallel", libpath) - - # unload tbb if we loaded it - if (!is.null(dllInfo)) - dyn.unload(dllInfo[["path"]]) -} - -setThreadOptions <- function(numThreads = "auto", stackSize = "auto") { - +#' Thread options for RcppParallel +#' +#' Set thread options (number of threads to use for task scheduling and stack +#' size per-thread) for RcppParallel. +#' +#' RcppParallel is automatically initialized with the default number of threads +#' and thread stack size when it loads. You can call `setThreadOptions()` at +#' any time to change the defaults. +#' +#' The `parallelFor()` and `parallelReduce()` also accept `numThreads` as +#' an argument, if you'd like to control the number of threads specifically +#' to be made available for a particular parallel function call. Note that +#' this value is advisory, and TBB may choose a smaller number of threads +#' if the number of requested threads cannot be honored on the system. +#' +#' @aliases setThreadOptions defaultNumThreads +#' +#' @param numThreads +#' Number of threads to use for task scheduling. Call `defaultNumThreads()` +#' to determine the the default value used for "auto". +#' +#' @param stackSize +#' Stack size (in bytes) to use for worker threads. The +#' default used for "auto" is 2MB on 32-bit systems and 4MB on 64-bit systems +#' (note that this parameter has no effect on Windows). +#' +#' @return +#' `defaultNumThreads()` returns the default number of threads used by +#' RcppParallel, if another value isn't specified either via +#' `setThreadOptions()` or explicitly in calls to `parallelFor()` and +#' `parallelReduce()`. +#' +#' @examples +#' +#' \dontrun{ +#' library(RcppParallel) +#' setThreadOptions(numThreads = 4) +#' defaultNumThreads() +#' } +#' +#' @export setThreadOptions +setThreadOptions <- function(numThreads = "auto", + stackSize = "auto") +{ # validate and resolve numThreads if (identical(numThreads, "auto")) numThreads <- -1L @@ -47,23 +53,33 @@ setThreadOptions <- function(numThreads = "auto", stackSize = "auto") { # validate and resolve stackSize if (identical(stackSize, "auto")) - stackSize = 0L + stackSize <- 0L else if (!is.numeric(stackSize)) stop("stackSize must be an integer") else stackSize <- as.integer(stackSize) - # Call setThreadOptions if using tbb - if (!is.null(dllInfo)) { - invisible(.Call("setThreadOptions", numThreads, stackSize, - PACKAGE = "RcppParallel")) - } - - if (numThreads != -1) + # set RCPP_PARALLEL_NUM_THREADS + if (numThreads == -1L) + Sys.unsetenv("RCPP_PARALLEL_NUM_THREADS") + else Sys.setenv(RCPP_PARALLEL_NUM_THREADS = numThreads) + + # set RCPP_PARALLEL_STACK_SIZE + if (stackSize == 0L) + Sys.unsetenv("RCPP_PARALLEL_STACK_SIZE") + else + Sys.setenv(RCPP_PARALLEL_STACK_SIZE = stackSize) } +#' @rdname setThreadOptions +#' @export defaultNumThreads <- function() { .Call("defaultNumThreads", PACKAGE = "RcppParallel") } +isUsingTbb <- function() { + backend <- Sys.getenv("RCPP_PARALLEL_BACKEND", "tbb") + identical(backend, "tbb") +} + diff --git a/R/platform.R b/R/platform.R new file mode 100644 index 000000000..fe7569ff1 --- /dev/null +++ b/R/platform.R @@ -0,0 +1,26 @@ + +is_windows <- function() { + .Platform$OS.type == "windows" +} + +is_mac <- function() { + Sys.info()[["sysname"]] == "Darwin" +} + +is_unix <- function() { + .Platform$OS.type == "unix" +} + +is_solaris <- function() { + Sys.info()[["sysname"]] == "SunOS" +} + +is_sparc <- function() { + info <- Sys.info() + all( + info[["sysname"]] == "SunOS", + info[["machine"]] != "i86pc" + ) +} + + diff --git a/R/plugin.R b/R/plugin.R new file mode 100644 index 000000000..78dc2aab2 --- /dev/null +++ b/R/plugin.R @@ -0,0 +1,15 @@ + +# Inline plugin used by sourceCpp. +inlineCxxPlugin <- function() { + + list( + env = list( + PKG_CXXFLAGS = tbbCxxFlags(), + PKG_LIBS = tbbLdFlags() + ), + includes = "#include ", + LinkingTo = "RcppParallel", + body = identity, + Depends = "RcppParallel" + ) +} diff --git a/R/skeleton.R b/R/skeleton.R new file mode 100644 index 000000000..4b12b711d --- /dev/null +++ b/R/skeleton.R @@ -0,0 +1,150 @@ + +#' Create a skeleton for a new package depending on RcppParallel +#' +#' \code{RcppParallel.package.skeleton} automates the creation of a new source +#' package that intends to use features of RcppParallel. +#' +#' It is based on the \link[utils]{package.skeleton} function which it executes +#' first. +#' +#' In addition to \link[Rcpp]{Rcpp.package.skeleton} : +#' +#' The \samp{DESCRIPTION} file gains an Imports line requesting that the +#' package depends on RcppParallel and a LinkingTo line so that the package +#' finds RcppParallel header files. +#' +#' The \samp{NAMESPACE} gains a \code{useDynLib} directive as well as an +#' \code{importFrom(RcppParallel, evalCpp} to ensure instantiation of +#' RcppParallel. +#' +#' The \samp{src} directory is created if it does not exists and a +#' \samp{Makevars} file is added setting the environment variables +#' \samp{PKG_LIBS} to accomodate the necessary flags to link with the +#' RcppParallel library. +#' +#' If the \code{example_code} argument is set to \code{TRUE}, example files +#' \samp{vector-sum.cpp} is created in the \samp{src} directory. +#' \code{Rcpp::compileAttributes()} is then called to generate +#' \code{src/RcppExports.cpp} and \code{R/RcppExports.R}. These files are given +#' as an example and should eventually by removed from the generated package. +#' +#' @param name The name of your R package. +#' @param example_code If \code{TRUE}, example C++ code using RcppParallel is +#' added to the package. +#' @param ... Optional arguments passed to \link[Rcpp]{Rcpp.package.skeleton}. +#' @return Nothing, used for its side effects +#' @seealso \link[utils]{package.skeleton} +#' @references Read the \emph{Writing R Extensions} manual for more details. +#' +#' Once you have created a \emph{source} package you need to install it: see +#' the \emph{R Installation and Administration} manual, \code{\link{INSTALL}} +#' and \code{\link{install.packages}}. +#' @keywords programming +#' @examples +#' +#' \dontrun{ +#' # simple package +#' RcppParallel.package.skeleton("foobar") +#' } +#' +#' @export RcppParallel.package.skeleton +RcppParallel.package.skeleton <- function(name = "anRpackage", + example_code = TRUE, + ...) +{ + # call Rcpp.package.skeleton() -- provide 'list' explicitly + # and clean up after + env <- new.env(parent = emptyenv()) + env$dummy <- NULL + Rcpp::Rcpp.package.skeleton( + name = name, + attributes = FALSE, + module = FALSE, + example_code = FALSE, + environment = env, + list = "dummy", + ... + ) + + # move to generated package directory + owd <- setwd(name) + on.exit(setwd(owd), add = TRUE) + + # remove dummy stuff + unlink("data", recursive=TRUE) + unlink("man/dummy.Rd") + unlink("Read-and-delete-me") + lns <- readLines("NAMESPACE") + writeLines(lns[!grepl("dummy", lns)], "NAMESPACE") + unlink("src/init.c") + + message("\nAdding RcppParallel settings") + + # update DESCRIPTION file + desc <- read.dcf("DESCRIPTION", all = TRUE, keep.white = TRUE) + version <- sprintf("RcppParallel (>= %s)", utils::packageVersion("RcppParallel")) + + desc$Imports <- paste0(desc$Imports, ", ", version) + message(" >> added Imports: ", desc$Imports) + + desc$LinkingTo <- paste0(desc$LinkingTo, ", RcppParallel") + message(" >> added LinkingTo: ", desc$LinkingTo) + + desc$SystemRequirements <- "GNU make" + message(" >> added SystemRequirements: GNU make") + + write.dcf(desc, file = "DESCRIPTION", keep.white = TRUE) + + # update NAMESPACE file + message(" >> added importFrom(RcppParallel,RcppParallelLibs) directive to NAMESPACE") + cat("importFrom(RcppParallel,RcppParallelLibs)", + file = "NAMESPACE", + sep = "\n", + append = TRUE) + + # write Makevars files + dir.create("src", showWarnings = FALSE) + + # src/Makevars + message(" >> added src/Makevars") + cat( + c( + '# We also need importFrom(RcppParallel,RcppParallelLibs) in NAMESPACE', + 'PKG_LIBS += $(shell ${R_HOME}/bin/Rscript -e "RcppParallel::RcppParallelLibs()")' + ), + file = "src/Makevars", + sep = "\n" + ) + + # src/Makevars.win + message(" >> added src/Makevars.win") + cat( + c( + 'PKG_CXXFLAGS += -DRCPP_PARALLEL_USE_TBB=1', + 'PKG_LIBS += $(shell "${R_HOME}/bin${R_ARCH_BIN}/Rscript.exe" -e "RcppParallel::RcppParallelLibs()")' + ), + file = "src/Makevars.win", + sep = "\n" + ) + + # write an example script using RcppParallel + if (example_code) { + + message(" >> added example file src/vector-sum.cpp") + file.copy( + system.file("skeleton/vector-sum.cpp", package = "RcppParallel"), + "src/vector-sum.cpp" + ) + + message(" >> added example documentation man/vector-sum.Rd") + file.copy( + system.file("skeleton/vector-sum.Rd", package = "RcppParallel"), + "man/vector-sum.Rd" + ) + + message(" >> compiled Rcpp attributes") + Rcpp::compileAttributes() + } + + TRUE +} diff --git a/R/tbb-autodetected.R.in b/R/tbb-autodetected.R.in new file mode 100644 index 000000000..7cc750e09 --- /dev/null +++ b/R/tbb-autodetected.R.in @@ -0,0 +1,7 @@ + +TBB_ENABLED <- @TBB_ENABLED@ +TBB_LIB <- "@TBB_LIB@" +TBB_INC <- "@TBB_INC@" + +TBB_NAME <- "@TBB_NAME@" +TBB_MALLOC_NAME <- "@TBB_MALLOC_NAME@" \ No newline at end of file diff --git a/R/tbb.R b/R/tbb.R new file mode 100644 index 000000000..3d8fbb328 --- /dev/null +++ b/R/tbb.R @@ -0,0 +1,135 @@ + +#' Get the Path to a TBB Library +#' +#' Retrieve the path to a TBB library. This can be useful for \R packages +#' using RcppParallel that wish to use, or re-use, the version of TBB that +#' RcppParallel has been configured to use. +#' +#' @param name +#' The name of the TBB library to be resolved. Normally, this is one of +#' `tbb`, `tbbmalloc`, or `tbbmalloc_proxy`. When `NULL`, the library +#' path containing the TBB libraries is returned instead. +#' +#' @export +tbbLibraryPath <- function(name = NULL) { + + # library paths for different OSes + sysname <- Sys.info()[["sysname"]] + + # find root for TBB install + tbbRoot <- Sys.getenv("TBB_LIB", unset = tbbRoot()) + if (is.null(name)) + return(tbbRoot) + + # form library names + tbbLibNames <- list( + "Darwin" = paste0("lib", name, ".dylib"), + "Windows" = paste0("lib", name, c("12", ""), ".a"), + "SunOS" = paste0("lib", name, ".so"), + "Linux" = paste0("lib", name, c(".so.2", ".so")) + ) + + # skip systems that we know not to be compatible + isCompatible <- !is_sparc() && !is.null(tbbLibNames[[sysname]]) + if (!isCompatible) + return(NULL) + + # find the request library (if any) + libNames <- tbbLibNames[[sysname]] + for (libName in libNames) { + + tbbName <- file.path(tbbRoot, libName) + if (file.exists(tbbName)) + return(tbbName) + + arch <- if (nzchar(.Platform$r_arch)) .Platform$r_arch + suffix <- paste(c("lib", arch, libName), collapse = "/") + tbbName <- system.file(suffix, package = "RcppParallel") + if (file.exists(tbbName)) + return(tbbName) + + } + +} + +tbbCxxFlags <- function() { + + if (!TBB_ENABLED) + return("-DRCPP_PARALLEL_USE_TBB=0") + + flags <- c("-DRCPP_PARALLEL_USE_TBB=1") + + # if TBB_INC is set, apply those library paths + tbbInc <- Sys.getenv("TBB_INC", unset = TBB_INC) + if (!file.exists(tbbInc)) { + tbbInc <- system.file("include", package = "RcppParallel") + } + + # add include path + if (nzchar(tbbInc) && file.exists(tbbInc)) { + + # prefer new interface if version.h exists -- we keep this + # for compatibility with packages like StanHeaders, rstan + versionPath <- file.path(tbbInc, "tbb/version.h") + if (file.exists(versionPath)) + flags <- c(flags, "-DTBB_INTERFACE_NEW") + + # now add the include path + flags <- c(flags, paste0("-I", asBuildPath(tbbInc))) + + } + + # return flags as string + paste(flags, collapse = " ") + +} + +# Return the linker flags required for TBB on this platform +tbbLdFlags <- function() { + + # on Windows, we statically link to oneTBB + if (is_windows()) { + + libPath <- system.file("libs", package = "RcppParallel") + if (nzchar(.Platform$r_arch)) + libPath <- file.path(libPath, .Platform$r_arch) + + ldFlags <- sprintf("-L%s -lRcppParallel", asBuildPath(libPath)) + return(ldFlags) + + } + + # shortcut if TBB_LIB defined + tbbLib <- Sys.getenv("TBB_LINK_LIB", Sys.getenv("TBB_LIB", unset = TBB_LIB)) + if (nzchar(tbbLib)) { + if (R.version$os == "emscripten") { + fmt <- "-L%1$s -l%2$s" + return(sprintf(fmt, asBuildPath(tbbLib), TBB_NAME)) + } + fmt <- "-L%1$s -Wl,-rpath,%1$s -l%2$s -l%3$s" + return(sprintf(fmt, asBuildPath(tbbLib), TBB_NAME, TBB_MALLOC_NAME)) + } + + # explicitly link on macOS + # https://github.com/RcppCore/RcppParallel/issues/206 + if (is_mac()) { + fmt <- "-L%s -l%s -l%s" + return(sprintf(fmt, asBuildPath(tbbLibraryPath()), TBB_NAME, TBB_MALLOC_NAME)) + } + + # nothing required on other platforms + "" + +} + +tbbRoot <- function() { + + if (nzchar(TBB_LIB)) + return(TBB_LIB) + + rArch <- .Platform$r_arch + parts <- c("lib", if (nzchar(rArch)) rArch) + libDir <- paste(parts, collapse = "/") + system.file(libDir, package = "RcppParallel") + +} diff --git a/R/utils.R b/R/utils.R new file mode 100644 index 000000000..53ced6389 --- /dev/null +++ b/R/utils.R @@ -0,0 +1,24 @@ + +# generate paths consumable by the compilers and linkers +# in particular, on Windows and Solaris, this means the path _cannot_ be quoted !! +asBuildPath <- function(path) { + + # normalize paths using forward slashes + path <- normalizePath(path, winslash = "/", mustWork = FALSE) + + # prefer short path names if the path has spaces + if (is_windows() && grepl(" ", path, fixed = TRUE)) + path <- utils::shortPathName(path) + + # if we still have spaces, and we're not Windows or Solaris, try quoting + if (grepl(" ", path, fixed = TRUE) && !is_solaris()) + path <- shQuote(path) + + # ensure we use forward slashes, even on Windows + path <- chartr("\\", "/", path) + + # return path + path + +} + diff --git a/R/zzz.R b/R/zzz.R new file mode 100644 index 000000000..54374e097 --- /dev/null +++ b/R/zzz.R @@ -0,0 +1,71 @@ + +# !diagnostics suppress=.dllInfo,.tbbDllInfo,.tbbMallocDllInfo,.tbbMallocProxyDllInfo + +# NOTE: we intentionally do _not_ load tbbmalloc_proxy by default, as its +# intended use is to replace the default allocator, something that may be +# dangerous to do by default. in addition, TBB's documentation recommends +# only loading explicitly via e.g. LD_PRELOAD +.dllInfo <- NULL +.tbbDllInfo <- NULL +.tbbMallocDllInfo <- NULL +.tbbMallocProxyDllInfo <- NULL + +loadTbbLibrary <- function(name) { + # TBB is statically linked on Windows + if (is_windows()) { + return(NULL) + } + path <- tbbLibraryPath(name) + if (is.null(path)) + return(NULL) + + if (!file.exists(path)) { + warning("TBB library ", shQuote(name), " not found.") + return(NULL) + } + + dyn.load(path, local = FALSE, now = TRUE) + +} + +.onLoad <- function(libname, pkgname) { + + # on Windows, load RcppParallel first + if (.Platform$OS.type == "windows") { + .dllInfo <<- library.dynam("RcppParallel", pkgname, libname) + } + + # load tbb, tbbmalloc + .tbbDllInfo <<- loadTbbLibrary("tbb") + .tbbMallocDllInfo <<- loadTbbLibrary("tbbmalloc") + + # load tbbmalloc_proxy, but only if requested + useTbbMallocProxy <- Sys.getenv("RCPP_PARALLEL_USE_TBBMALLOC_PROXY", unset = "FALSE") + if (useTbbMallocProxy %in% c("TRUE", "True", "true", "1")) + .tbbMallocProxyDllInfo <<- loadTbbLibrary("tbbmalloc_proxy") + + # load RcppParallel library if available + if (.Platform$OS.type != "windows") { + .dllInfo <<- library.dynam("RcppParallel", pkgname, libname, local = FALSE) + } + +} + +.onUnload <- function(libpath) { + + # unload the package library + if (!is.null(.dllInfo)) + library.dynam.unload("RcppParallel", libpath) + + # NOTE: we do not explicitly unload tbbmalloc_proxy as switching + # the allocator at runtime can cause issues + + # unload tbbmalloc if we loaded it + if (!is.null(.tbbMallocDllInfo)) + dyn.unload(.tbbMallocDllInfo[["path"]]) + + # unload tbb if we loaded it + if (!is.null(.tbbDllInfo)) + dyn.unload(.tbbDllInfo[["path"]]) + +} diff --git a/README.md b/README.md index 4b56c6ba3..620e6d21b 100644 --- a/README.md +++ b/README.md @@ -1,75 +1,63 @@ -## RcppParallel Package -High level functions for doing parallel programming with Rcpp. For example, the `parallelFor` function can be used to convert the work of a standard serial "for" loop into a parallel one and the `parallelReduce` function can be used for accumulating aggregate or other values. +## RcppParallel -The high level interface enables safe and robust parallel programming without direct manipulation of operating system threads. The underlying implementation differs by platform: on Linux and Mac systems the [Intel TBB](https://www.threadingbuildingblocks.org/) (Threading Building Blocks) are used while on Windows systems the [TinyThread](http://tinythreadpp.bitsnbites.eu/) library is used. + +[![CRAN](https://www.r-pkg.org/badges/version/RcppParallel)](https://cran.r-project.org/package=RcppParallel) +[![R-CMD-check](https://github.com/RcppCore/RcppParallel/actions/workflows/R-CMD-check.yaml/badge.svg)](https://github.com/RcppCore/RcppParallel/actions/workflows/R-CMD-check.yaml) + -### Examples +High level functions for parallel programming with Rcpp. The `parallelFor()` function can be used to convert the work of a standard serial "for" loop into a parallel one, and the `parallelReduce()` function can be used for accumulating aggregate or other values. -Here are links to some examples that illustrate using RcppParallel. Performance benchmarks were executed on a 2.6GHz Haswell MacBook Pro with 4 cores (8 with hyperthreading). +The high level interface enables safe and robust parallel programming without direct manipulation of operating system threads. On Windows, macOS, and Linux systems, the underlying implementation is based on [Intel TBB](https://github.com/oneapi-src/oneTBB) (Threading Building Blocks). On other platforms, a less-performant fallback implementation based on the [TinyThread](https://tinythreadpp.bitsnbites.eu/) library is used. -[Parallel Matrix Transform](http://gallery.rcpp.org/articles/parallel-matrix-transform/) --- Demonstrates using `parallelFor` to transform a matrix (take the square root of each element) in parallel. In this example the parallel version performs about 2.5x faster than the serial version. +For additional documentation on using RcppParallel see the package website at http://rcppcore.github.io/RcppParallel/. -[Parallel Vector Sum](http://gallery.rcpp.org/articles/parallel-vector-sum/) --- Demonstrates using `parallelReduce` to take the sum of a vector in parallel. In this example the parallel version performs 4.5x faster than the serial version. -[Parallel Distance Matrix](http://gallery.rcpp.org/articles/parallel-distance-matrix/) --- Demonstrates using `parallelFor` to compute pairwise distances for each row in an input data matrix. In this example the parallel version performs 5.5x faster than the serial version. +### Intel TBB -[Parallel Inner Product](http://gallery.rcpp.org/articles/parallel-inner-product/) --- Demonstrates using `parallelReduce` to compute the inner product of two vectors in parallel. In this example the parallel version performs 2.5x faster than the serial version. +`RcppParallel` supports the new interface of Intel TBB, and can be configured to use an external copy of TBB (e.g., with [`oneTBB`](https://github.com/oneapi-src/oneTBB) or the system TBB library), using the `TBB_LIB` and `TBB_INC` environment variables. -Note that the benchmark times above are for the TBB back-end (Posix systems only). Performance on Windows will be about 30-50% slower as a result of less sophisticated thread scheduling. +To build the development version of `RcppParallel` with [`oneTBB`](https://github.com/oneapi-src/oneTBB): -### Usage +- Install [`oneTBB`](https://github.com/oneapi-src/oneTBB). -You can install the RcppParallel package from CRAN as follows: +For example, installing [`oneTBB`](https://github.com/oneapi-src/oneTBB) on Linux 64-bit (`x86_64`) to `$HOME` directory (change if needed!): -```s -install.packages("RcppParallel") -``` - -#### sourceCpp +```bash +TBB_RELEASE="https://api.github.com/repos/oneapi-src/oneTBB/releases/latest" +TBB_TAG=$(curl --silent $TBB_RELEASE | grep -Po '"tag_name": "\K.*?(?=")') +TBB_VERSION=${TBB_TAG#?} -You can use the RcppParallel library from within a standalone C++ source file as follows: +wget https://github.com/oneapi-src/oneTBB/releases/download/v$TBB_VERSION/oneapi-tbb-$TBB_VERSION-lin.tgz +tar zxvf oneapi-tbb-$TBB_VERSION-lin.tgz -C $HOME -```cpp -// [[Rcpp::depends(RcppParallel)]] -#include +export TBB="$HOME/oneapi-tbb-$TBB_VERSION" ``` +Note that you may replace `TBB_VERSION=${TBB_TAG#?}` with a custom version number if needed ( check available releases [here](https://github.com/oneapi-src/oneTBB/releases) ). -#### Packages +- Set the TBB environment variables (specifically: `TBB` for the installation prefix, `TBB_INC` for the directory that includes the header files, and `TBB_LIB` for the libraries directory). -If you want to use RcppParallel from within an R package you add the following to your DESCRIPTION file: +For example, installing [`oneTBB`](https://github.com/oneapi-src/oneTBB) on Linux 64-bit (`x86_64`) to `$HOME` directory (change if needed!): -```yaml -Imports: RcppParallel -LinkingTo: RcppParallel -``` +```bash +source $TBB/env/vars.sh intel64 -And the following to your NAMESPACE file: - -```s -import(RcppParallel) +export TBB_INC="$TBB/include" +export TBB_LIB="$TBB/lib/intel64/gcc4.8" ``` -### License - -The RcppParallel package is made available under the [GPLv2](http://www.gnu.org/licenses/old-licenses/gpl-2.0.html). +- Build the development version of `RcppParallel`: -The TinyThread library is licensed under the [zlib/libpng](http://www.opensource.org/licenses/zlib-license.php) license as described [here](https://gitorious.org/tinythread/tinythreadpp/source/master:README.txt). +```r +install.packages("remotes") +remotes::install_github("RcppCore/RcppParallel") +``` -The Intel TBB Library is licensed under the GPLv2 (as described at https://www.threadingbuildingblocks.org/Licensing): -TBB and other open-source software available from this site is licensed under GPLv2 with the (libstdc++) runtime exception. Specifically, the TBB open-source license is the same license used by the GNU libstdc++ library in gcc 4.2.1 (and earlier). For a complete description of the license, please visit the official GNU website for [GPLv2](http://www.gnu.org/licenses/old-licenses/gpl-2.0.html) and for the [runtime exception](https://www.threadingbuildingblocks.org/licensing#runtime-exception). +### License -**Runtime Exception** +The RcppParallel package is made available under the [GPLv2](http://www.gnu.org/licenses/old-licenses/gpl-2.0.html) license. -``` -As a special exception, you may use this file as part of a free software -library without restriction. Specifically, if other files instantiate -templates or use macros or inline functions from this file, or you compile -this file and link it with other files to produce an executable, this -file does not by itself cause the resulting executable to be covered by -the GNU General Public License. This exception does not however -invalidate any other reasons why the executable file might be covered by -the GNU General Public License. -``` +The [TinyThread library](https://tinythreadpp.bitsnbites.eu/) is licensed under the [zlib/libpng](https://opensource.org/licenses/zlib-license.php) license. +The Intel TBB Library is licensed under the Apache 2.0 license, as described at https://github.com/oneapi-src/oneTBB/blob/master/LICENSE.txt. diff --git a/RcppParallel.Rproj b/RcppParallel.Rproj index c83fc49a5..2970ea06c 100644 --- a/RcppParallel.Rproj +++ b/RcppParallel.Rproj @@ -1,4 +1,5 @@ Version: 1.0 +ProjectId: 8e3d73b0-404c-42f5-b2ef-46f759f65dd4 RestoreWorkspace: No SaveWorkspace: No @@ -12,6 +13,10 @@ Encoding: UTF-8 RnwWeave: Sweave LaTeX: pdfLaTeX +AutoAppendNewline: Yes + BuildType: Package -PackageInstallArgs: --no-multiarch --with-keep.source +PackageCleanBeforeInstall: No +PackageInstallArgs: --with-keep.source PackageCheckArgs: --as-cran +PackageRoxygenize: rd,collate,namespace diff --git a/cleanup b/cleanup new file mode 100755 index 000000000..ddbda5cf6 --- /dev/null +++ b/cleanup @@ -0,0 +1,3 @@ +#!/usr/bin/env sh +: "${R_HOME=`R RHOME`}" +"${R_HOME}/bin/Rscript" tools/config.R cleanup "$@" diff --git a/cleanup.win b/cleanup.win new file mode 100755 index 000000000..918a2f0c8 --- /dev/null +++ b/cleanup.win @@ -0,0 +1,2 @@ +#!/usr/bin/env sh +"${R_HOME}/bin${R_ARCH_BIN}/Rscript.exe" tools/config.R cleanup "$@" diff --git a/configure b/configure new file mode 100755 index 000000000..b08715386 --- /dev/null +++ b/configure @@ -0,0 +1,3 @@ +#!/usr/bin/env sh +: "${R_HOME=`R RHOME`}" +"${R_HOME}/bin/Rscript" tools/config.R configure "$@" diff --git a/configure.win b/configure.win new file mode 100755 index 000000000..a47d1205c --- /dev/null +++ b/configure.win @@ -0,0 +1,2 @@ +#!/usr/bin/env sh +"${R_HOME}/bin${R_ARCH_BIN}/Rscript.exe" tools/config.R configure "$@" diff --git a/doc/rtools_tbb_notes.md b/doc/rtools_tbb_notes.md new file mode 100644 index 000000000..01a8d00c6 --- /dev/null +++ b/doc/rtools_tbb_notes.md @@ -0,0 +1,269 @@ +# Differences MinGW / Rtools + +## cmd + +### MinGW + +cmd is an sh wrapper which invokes the 32bit version of Windows' shell: + + $ where cmd + C:\MinGW\msys\1.0\bin\cmd + c:\Windows\System32\cmd.exe + $ cat /c/MinGW/msys/1.0/bin/cmd + #!/bin/sh + # Copyright (C) 2002, Earnie Boyd + # mailto:earnie@users.sf.net + # This file is part of Minimal SYStem. + # http://www.mingw.org/msys.shtml + # File: cmd + + "$COMSPEC" "$@" + $ echo $COMSPEC + C:\windows\SysWOW64\cmd.exe + +### Rtools + +No wrapper; cmd is the default Windows shell (64bit on a 64bit OS): + + $ where cmd + C:\Windows\System32\cmd.exe + + +## paths + +### MinGW + +- 3 styles supported: `/drive/path`, `drive:/path`, `drive:\path` (properly escaped). +- The *actual* path (returned when querying) is `/drive/path`. + + + $ cd f:/tmp/testpath + $ pwd + /f/tmp/testpath + $ cd f:\\tmp\\testpath + $ pwd + /f/tmp/testpath + $ cd /f/tmp/testpath + $ pwd + /f/tmp/testpath + +### Rtools + +- 3 styles supported: `/cygdrive/drive/path`, `drive:/path`, `drive:\path` (properly escaped). +- The *actual* path (returned when querying) is `/cygdrive/drive/path`. +- `/drive/path` is **not** supported. + + $ cd f:/tmp/testpath + $ pwd + /cygdrive/f/tmp/testpath + $ cd f:\\tmp\\testpath + $ pwd + /cygdrive/f/tmp/testpath + $ cd /f/tmp/testpath + cd: can't cd to /f/tmp/testpath + $ cd /cygdrive/f/tmp/testpath + $ pwd + /cygdrive/f/tmp/testpath + + +## parameters + +### MinGW + +Shell will sometimes automatically replace parameters starting with forward slashes using the rules described [here](http://www.mingw.org/wiki/Posix_path_conversion). +Since forward slashes are often used to mark option parameters in Windows command line applications (/o is the Windows equivalent of -o and --option for Unix), this can cause very subtle and hard to debug issues. The fix is to double the forward slashes, or to quote (when possible) + + $ ls /d + $RECYCLE.BIN System Volume Information pagefile.sys + + $ cmd /c "echo hi" + hi + + $ cmd /C "echo hi" + Microsoft Windows [Version 6.3.9600] + (c) 2013 Microsoft Corporation. All rights reserved. + + f:\tmp\testpath>exit + + $ cmd /C "echo hi /runtime" + 'untime"' is not recognized as an internal or external command, + operable program or batch file. + + $ cmd //C "echo hi /runtime" + hi /runtime + + +### Rtools + +No parameter replacements. What you type is what you get. + + $ ls /d + ls: cannot access /d: No such file or directory + $ cmd /c "echo hi" + hi + $ cmd /C "echo hi" + hi + $ cmd /C "echo hi /runtime" + hi /runtime + + +## uname + +Only present in MinGW + +## g++ + +### MinGW + +default full paths supported + + $ g++ /f/tmp/testpath/test.cpp + $ echo $? + 0 + +### Rtools + +default full paths are not supported + + $ ls /cygdrive/f/tmp/testpath/test.cpp + /cygdrive/f/tmp/testpath/test.cpp + $ cat /cygdrive/f/tmp/testpath/test.cpp + int main() {return 0;} + $ g++ /cygdrive/f/tmp/testpath/test.cpp + g++.exe: error: /cygdrive/f/tmp/testpath/test.cpp: No such file or directory + g++.exe: fatal error: no input files + compilation terminated. + +This means that great care must be taken to ensure paths are never queried and instead always manually constructed. Examples of queried paths that end up being g++ incompatible in the context of a Makefile: + +- don't use `$(shell pwd)` +- don't use `$(CURDIR)` +- only use **relative paths** in `VPATH` + + + $ cat Makefile + .PHONY: all + all: test.o foo.o + @echo DIR=$(CURDIR) + + %.o: %.cpp + @echo $< + + $ make VPATH=f:/tmp/testpath/subdir + test.cpp + /cygdrive/f/tmp/testpath/subdir/foo.cpp + DIR=/cygdrive/f/tmp/testpath + + $ make VPATH=subdir + test.cpp + subdir/foo.cpp + DIR=/cygdrive/f/tmp/testpath + +# Bugs and tips + +## Modal dialog asking to insert disk in drive + +This bug is in MinGW only (not Rtools). This is due to hard-coded paths pointing to I: drive in g++ + + $ strings c:/MinGW/bin/g++.exe | grep i: | grep mingw + i:/p/giaw/mingw/share/locale + i:/p/giaw/mingw/share/locale + +If no I: drive exists, or the path does not exist, then g++ silently ignores it. *However*, if I: happens to point to a removable drive, then you get a modal dialog. + +Fix: go in "Disk Management" and rename the drive. + +## Full paths + +g++ is not the only tool affected by full paths in the context of Rtools. Since Rtools doesn't perform any auto-conversion and `/` is used as a option marker for many Windows command line applications, +some of them end up being confused. For example: + + $ cat test.js + WScript.Echo( "Hi" ); + $ cscript /nologo test.js + Hi + $ pwd + /cygdrive/f/tmp/testpath + $ cscript /nologo /cygdrive/f/tmp/testpath/test.js + Input Error: Unknown option "/cygdrive/f/tmp/testpath/test.js" specified. + $ cscript /nologo f:/tmp/testpath/test.js + Hi + $ + +Recommendation is to always use relative paths. + +## Changing the shell in make + +The `.SHELLFLAGS` variable doesn't work in the context of Rtools (silently ignored), making it impossible to change default shell to cmd: the default for SHELLFLAGS is `-c`, which is suitable for `sh` and not `cmd`: + + + $ cmd /c dir /b notfound + File Not Found + $ cmd -c dir /b notfound + Microsoft Windows [Version 6.3.9600] + (c) 2013 Microsoft Corporation. All rights reserved. + + F:\tmp\testpath>exit + + $ cat Makefile + SHELL=cmd + .SHELLFLAGS=/c + TEST=$(shell dir /b notfound) + + .PHONY: all + all: + @echo hi + + $ make + Microsoft Windows [Version 6.3.9600] + (c) 2013 Microsoft Corporation. All rights reserved. + + F:\tmp\testpath>exit + + +The Rtools-only `--win32` command line option for make didn't properly work for me (but I was dealing with complex Makefiles). + +## Hangs in make + +There does not appear to be a built-in tracing or time-out mechanism in make regarding sub-processes. One effective way to figure out what's hanging is "Task Manager", "Details", then add column "Command Line". +In my case, most hangs were due to some variation of `cmd -c something` or `cmd c: something` (both variations end up leaving cmd.exe running) instead of `cmd /c something`. The first one is due to SHELLFLAGS not working, the second to auto param replacement. + +## Rule not found errors + +i.e. "no rule to make target" errors. +In some cases, the `VPATH` parser gets confused, and then **all** paths specified are *silently* ignored. This can even happen for simple (no spaces, etc.), valid (exist) paths. + + $ cat Makefile + .PHONY: all + all: test.o foo.o + @echo hi + + %.o: %.cpp + @echo $< + $ pwd + /cygdrive/f/tmp/testpath + $ make "VPATH=f:/tmp/testpath/subdir" + test.cpp + /cygdrive/f/tmp/testpath/subdir/foo.cpp + hi + $ make "VPATH=f:/tmp/testpath subdir" + test.cpp + subdir/foo.cpp + hi + $ make "VPATH=subdir f:/tmp/testpath" + test.cpp + subdir/foo.cpp + hi + $ make "VPATH=f:/tmp/testpath f:/tmp/testpath/subdir" + test.cpp + make: *** No rule to make target `foo.o', needed by `all'. Stop. + + +### Incorrect rule firing + +These can be caused by the previous issue: VPATH not working, therefore target file not found, therefore other rule firing. + +This can also be caused by MinGW and RTools using a different version of make. A quick way to test it is to manually/explicitly create the rule, and see if it is firing. + + + diff --git a/inst/.gitignore b/inst/.gitignore index c3af85790..93f665e4d 100644 --- a/inst/.gitignore +++ b/inst/.gitignore @@ -1 +1,2 @@ lib/ +libs/ diff --git a/inst/include/.gitignore b/inst/include/.gitignore new file mode 100644 index 000000000..4c130be3e --- /dev/null +++ b/inst/include/.gitignore @@ -0,0 +1,7 @@ + +# These TBB libraries are copied in at configure time. +/index.html +/oneapi +/serial +/tbb + diff --git a/inst/include/RcppParallel.h b/inst/include/RcppParallel.h index a31601b53..87d6eb5ca 100644 --- a/inst/include/RcppParallel.h +++ b/inst/include/RcppParallel.h @@ -5,43 +5,77 @@ // TinyThread implementation #include "RcppParallel/TinyThread.h" -// Makevars owns setting this to 1 if TBB supported +// Use TBB only where it's known to compile and work correctly +// (NOTE: Windows TBB is temporarily opt-in for packages for +// compatibility with CRAN packages not previously configured +// to link to TBB in Makevars.win) #ifndef RCPP_PARALLEL_USE_TBB - #define RCPP_PARALLEL_USE_TBB 0 +# if defined(__APPLE__) || defined(__gnu_linux__) || (defined(__sun) && defined(__SVR4) && !defined(__sparc)) +# define RCPP_PARALLEL_USE_TBB 1 +# else +# define RCPP_PARALLEL_USE_TBB 0 +# endif #endif #if RCPP_PARALLEL_USE_TBB - #include "RcppParallel/TBB.h" +# include "RcppParallel/TBB.h" #endif +#include "RcppParallel/Backend.h" #include "RcppParallel/RVector.h" #include "RcppParallel/RMatrix.h" namespace RcppParallel { -inline void parallelFor(std::size_t begin, std::size_t end, - Worker& worker, std::size_t grainSize = 1) { - +inline void parallelFor(std::size_t begin, + std::size_t end, + Worker& worker, + std::size_t grainSize = 1, + int numThreads = -1) +{ + grainSize = resolveValue("RCPP_PARALLEL_GRAIN_SIZE", grainSize, std::size_t(1)); + numThreads = resolveValue("RCPP_PARALLEL_NUM_THREADS", numThreads, -1); + #if RCPP_PARALLEL_USE_TBB - tbbParallelFor(begin, end, worker, grainSize); + if (internal::backend() == internal::BACKEND_TBB) + tbbParallelFor(begin, end, worker, grainSize, numThreads); + else + ttParallelFor(begin, end, worker, grainSize); #else ttParallelFor(begin, end, worker, grainSize); #endif - } template -inline void parallelReduce(std::size_t begin, std::size_t end, - Reducer& reducer, std::size_t grainSize = 1) { - +inline void parallelReduce(std::size_t begin, + std::size_t end, + Reducer& reducer, + std::size_t grainSize = 1, + int numThreads = -1) +{ + grainSize = resolveValue("RCPP_PARALLEL_GRAIN_SIZE", grainSize, std::size_t(1)); + numThreads = resolveValue("RCPP_PARALLEL_NUM_THREADS", numThreads, -1); + #if RCPP_PARALLEL_USE_TBB - tbbParallelReduce(begin, end, reducer, grainSize); + if (internal::backend() == internal::BACKEND_TBB) + tbbParallelReduce(begin, end, reducer, grainSize, numThreads); + else + ttParallelReduce(begin, end, reducer, grainSize); #else ttParallelReduce(begin, end, reducer, grainSize); #endif - } -} // namespace RcppParallel +} // end namespace RcppParallel + +// TRUE and FALSE macros that may come with system headers on some systems +// But conflict with R.h (R_ext/Boolean.h) +// TRUE and FALSE macros should be undef in RcppParallel.h +#ifdef TRUE + #undef TRUE +#endif +#ifdef FALSE + #undef FALSE +#endif #endif // __RCPP_PARALLEL__ diff --git a/inst/include/RcppParallel/Backend.h b/inst/include/RcppParallel/Backend.h new file mode 100644 index 000000000..13971b9e0 --- /dev/null +++ b/inst/include/RcppParallel/Backend.h @@ -0,0 +1,84 @@ + +#ifndef __RCPP_PARALLEL_BACKEND__ +#define __RCPP_PARALLEL_BACKEND__ + +#include +#include + +extern "C" { +void REprintf(const char*, ...); +} + +namespace RcppParallel { +namespace internal { + +enum backend_type { + BACKEND_TBB, + BACKEND_TINYTHREAD +}; + +#if RCPP_PARALLEL_USE_TBB + +inline backend_type defaultBackend() +{ + return BACKEND_TBB; +} + +#else + +inline backend_type defaultBackend() +{ + return BACKEND_TINYTHREAD; +} + +#endif + +inline const char* backendToString(backend_type backend) +{ + switch (backend) + { + case BACKEND_TBB: + return "tbb"; + case BACKEND_TINYTHREAD: + return "tinythread"; + } + + // shouldn't be reached but need to silence compiler warnings + return "tbb"; +} + +inline backend_type backend() +{ + const char* requestedBackend = std::getenv("RCPP_PARALLEL_BACKEND"); + if (requestedBackend == NULL) + { + return defaultBackend(); + } + else if (std::strcmp(requestedBackend, "tbb") == 0) + { +#if RCPP_PARALLEL_USE_TBB + return BACKEND_TBB; +#else + const char* msg = + "tbb backend is not available; using tinythread instead"; + + REprintf("%s\n", msg); + return BACKEND_TINYTHREAD; +#endif + } + else if (strcmp(requestedBackend, "tinythread") == 0) + { + return BACKEND_TINYTHREAD; + } + else + { + const char* fmt = "unknown parallel backend '%s'; using '%s' instead\n"; + REprintf(fmt, requestedBackend, backendToString(defaultBackend())); + return defaultBackend(); + } +} + +} // namespace internal +} // namespace RcppParallel + +#endif /* __RCPP_PARALLEL_BACKEND__ */ diff --git a/inst/include/RcppParallel/Common.h b/inst/include/RcppParallel/Common.h index 497f58c5a..ae2b1327d 100644 --- a/inst/include/RcppParallel/Common.h +++ b/inst/include/RcppParallel/Common.h @@ -1,33 +1,86 @@ #ifndef __RCPP_PARALLEL_COMMON__ #define __RCPP_PARALLEL_COMMON__ +#include #include +#include + +#include +#include +#include +#include namespace RcppParallel { +template +inline int resolveValue(const char* envvar, + T requestedValue, + U defaultValue) +{ + // if the requested value is non-zero and not the default, we can use it + bool useRequestedValue = + requestedValue != static_cast(defaultValue) && + requestedValue > 0; + + if (useRequestedValue) + return requestedValue; + + // otherwise, try reading the default from associated envvar + // if the environment variable is unset, use the default + const char* var = getenv(envvar); + if (var == NULL) + return defaultValue; + + // try to convert the string to a number + // if an error occurs during conversion, just use default + errno = 0; + char* end; + long value = strtol(var, &end, 10); + + // check for conversion failure + if (end == var || *end != '\0' || errno == ERANGE) + return defaultValue; + + // okay, return the parsed environment variable value + return value; +} + +// Tag type used for disambiguating splitting constructors +struct Split {}; + // Work executed within a background thread. We implement dynamic // dispatch using vtables so we can have a stable type to cast // to from the void* passed to the worker thread (required because // the tinythreads interface allows to pass only a void* to the // thread main rather than a generic type / template) - -struct Worker -{ +struct Worker +{ // construct and destruct (delete virtually) Worker() {} virtual ~Worker() {} - + // dispatch work over a range of values - virtual void operator()(std::size_t begin, std::size_t end) = 0; - + virtual void operator()(std::size_t begin, std::size_t end) = 0; + +private: // disable copying and assignment Worker(const Worker&); void operator=(const Worker&); }; -// Tag type used for disambiguating splitting constructors +// Used for controlling the stack size for threads / tasks within a scope. +class ThreadStackSizeControl +{ +public: + ThreadStackSizeControl(); + ~ThreadStackSizeControl(); + +private: + // COPYING: not copyable + ThreadStackSizeControl(const ThreadStackSizeControl&); + ThreadStackSizeControl& operator=(const ThreadStackSizeControl&); +}; -struct Split {}; } // namespace RcppParallel diff --git a/inst/include/RcppParallel/RMatrix.h b/inst/include/RcppParallel/RMatrix.h index 0e6891eeb..576be720f 100644 --- a/inst/include/RcppParallel/RMatrix.h +++ b/inst/include/RcppParallel/RMatrix.h @@ -14,16 +14,21 @@ class RMatrix { public: template - class row_iterator - : public std::iterator { + class row_iterator { public: - inline row_iterator(Row& row, std::size_t i) + using iterator_category = std::random_access_iterator_tag; + using value_type = V; + using difference_type = std::size_t; + using pointer = value_type*; + using reference = value_type&; + + inline row_iterator(Row& row, difference_type i) : start_(row.start_), parentNrow_(row.parent_.nrow()), index_(i) { } - inline row_iterator(std::size_t start, std::size_t parentNrow, std::size_t index) + inline row_iterator(pointer start, difference_type parentNrow, difference_type index) : start_(start), parentNrow_(parentNrow), index_(index) { } @@ -41,7 +46,7 @@ class RMatrix { } inline row_iterator operator++(int) { - iterator tmp(*this); + row_iterator tmp(*this); operator++(); return tmp; } @@ -57,23 +62,23 @@ class RMatrix { return tmp ; } - row_iterator operator+(std::size_t n) const { + row_iterator operator+(difference_type n) const { return row_iterator(start_, parentNrow_ ,index_ + n ) ; } - row_iterator operator-(std::size_t n) const { + row_iterator operator-(difference_type n) const { return row_iterator(start_, parentNrow_, index_ - n ) ; } - std::size_t operator+(const row_iterator& other) const { + difference_type operator+(const row_iterator& other) const { return index_ + other.index_; } - std::size_t operator-(const row_iterator& other) const { + difference_type operator-(const row_iterator& other) const { return index_ - other.index_ ; } - row_iterator& operator+=(std::size_t n) { index_ += n ; return *this; } - row_iterator& operator-=(std::size_t n) { index_ -= n ; return *this; } + row_iterator& operator+=(difference_type n) { index_ += n ; return *this; } + row_iterator& operator-=(difference_type n) { index_ -= n ; return *this; } bool operator==(const row_iterator& other) const { return index_ == other.index_; } bool operator!=(const row_iterator& other) const { return index_ != other.index_; } @@ -83,16 +88,16 @@ class RMatrix { bool operator>=(const row_iterator& other) const { return index_ >= other.index_; } - inline V& operator*() { return start_[index_ * parentNrow_]; } + inline reference operator*() { return start_[index_ * parentNrow_]; } - inline V* operator->() { return &(start_[index_ * parentNrow_]); } + inline pointer operator->() { return &(start_[index_ * parentNrow_]); } - inline V& operator[](int i) { return start_[(index_+i) * parentNrow_]; } + inline reference operator[](int i) { return start_[(index_+i) * parentNrow_]; } private: - V* start_; - std::size_t parentNrow_; - std::size_t index_; + pointer start_; + difference_type parentNrow_; + difference_type index_; }; typedef row_iterator iterator; @@ -129,7 +134,11 @@ class RMatrix { inline size_t length() const { return parent_.ncol(); } - + + inline size_t size() const { + return parent_.ncol(); + } + inline T& operator[](std::size_t i) { return start_[i * parent_.nrow()]; } @@ -174,6 +183,7 @@ class RMatrix { inline const_iterator end() const { return end_; } inline size_t length() const { return end_ - begin_; } + inline size_t size() const { return end_ - begin_; } inline T& operator[](std::size_t i) { return *(begin_ + i); @@ -199,7 +209,7 @@ class RMatrix { { } - inline RMatrix(const T* data, std::size_t nrow, std::size_t ncol) + inline RMatrix(T* data, std::size_t nrow, std::size_t ncol) : data_(data), nrow_(nrow), ncol_(ncol) { } @@ -239,6 +249,14 @@ class RMatrix { return Column(*const_cast(this), i); } + inline T& operator[](std::size_t i) { + return *(data_ + i); + } + + inline const T& operator[](std::size_t i) const { + return *(data_ + i); + } + private: T* data_; std::size_t nrow_; diff --git a/inst/include/RcppParallel/RVector.h b/inst/include/RcppParallel/RVector.h index b59e11c58..d7005146f 100644 --- a/inst/include/RcppParallel/RVector.h +++ b/inst/include/RcppParallel/RVector.h @@ -42,6 +42,7 @@ class RVector { inline const_iterator begin() const { return begin_; } inline const_iterator end() const { return end_; } + inline std::size_t size() const { return end_ - begin_; } inline std::size_t length() const { return end_ - begin_; } inline T& operator[](std::size_t i) { diff --git a/inst/include/RcppParallel/TBB.h b/inst/include/RcppParallel/TBB.h index 86a1ba781..5ac5722d8 100644 --- a/inst/include/RcppParallel/TBB.h +++ b/inst/include/RcppParallel/TBB.h @@ -3,73 +3,162 @@ #include "Common.h" -#include +#ifndef TBB_PREVIEW_GLOBAL_CONTROL +# define TBB_PREVIEW_GLOBAL_CONTROL 1 +#endif -namespace RcppParallel { +// For compatibility with existing packages on CRAN. +#include "tbb/blocked_range.h" +#include "tbb/concurrent_unordered_set.h" +#include "tbb/concurrent_unordered_map.h" +#include "tbb/global_control.h" +#include "tbb/mutex.h" +#include "tbb/parallel_for.h" +#include "tbb/parallel_for_each.h" +#include "tbb/parallel_reduce.h" +#include "tbb/parallel_sort.h" +#include "tbb/spin_mutex.h" -namespace { +// For compatibility with older R packages. +namespace tbb { -struct TBBWorker -{ - explicit TBBWorker(Worker& worker) : worker_(worker) {} +#ifndef __TBB_task_scheduler_init_H +#define __TBB_task_scheduler_init_H + +class task_scheduler_init { - void operator()(const tbb::blocked_range& r) const { - worker_(r.begin(), r.end()); +public: + task_scheduler_init( + int number_of_threads = -1, + std::size_t stack_size = 0) + { } - -private: - Worker& worker_; + + static int default_num_threads() + { + return 2; + } + + static const int automatic = -1; + static const int deferred = -2; + }; -template -struct TBBReducer -{ - explicit TBBReducer(Reducer& reducer) - : pSplitReducer_(NULL), reducer_(reducer) +#endif + +} // end namespace tbb + + +namespace RcppParallel { + +// This class is primarily used to implement type erasure. The goals here were: +// +// 1. Hide the tbb symbols / implementation details from client R packages. +// That is, they should get the tools they need only via RcppParallel. +// +// 2. Do this in a way that preserves binary compatibility with pre-existing +// classes that make use of parallelReduce(). +// +// 3. Ensure that those packages, when re-compiled without source changes, +// can still function as expected. +// +// The downside here is that all the indirection through std::function<> +// and the requirement for RTTI is probably expensive, but I couldn't find +// a better way forward that could also preserve binary compatibility with +// existing pre-built pacakges. +// +// Hopefully, in a future release, we can do away with this wrapper, once +// packages have been rebuilt and no longer implicitly depend on TBB internals. +struct ReducerWrapper { + + template + ReducerWrapper(T* reducer) + { + self_ = reinterpret_cast(reducer); + owned_ = false; + + work_ = [&](void* self, std::size_t begin, std::size_t end) + { + (*reinterpret_cast(self))(begin, end); + }; + + split_ = [&](void* object, Split split) + { + return new T(*reinterpret_cast(object), split); + }; + + join_ = [&](void* self, void* other) + { + (*reinterpret_cast(self)).join(*reinterpret_cast(other)); + }; + + deleter_ = [&](void* object) + { + delete (T*) object; + }; + } + + ~ReducerWrapper() { + if (owned_) + { + deleter_(self_); + self_ = nullptr; + } } - - TBBReducer(TBBReducer& tbbReducer, tbb::split) - : pSplitReducer_(new Reducer(tbbReducer.reducer_, RcppParallel::Split())), - reducer_(*pSplitReducer_) + + void operator()(std::size_t begin, std::size_t end) const { + work_(self_, begin, end); } - - virtual ~TBBReducer() { delete pSplitReducer_; } - void operator()(const tbb::blocked_range& r) { - reducer_(r.begin(), r.end()); + ReducerWrapper(const ReducerWrapper& rhs, Split split) + { + self_ = rhs.split_(rhs.self_, split); + owned_ = true; + + work_ = rhs.work_; + split_ = rhs.split_; + join_ = rhs.join_; + deleter_ = rhs.deleter_; } - - void join(const TBBReducer& tbbReducer) { - reducer_.join(tbbReducer.reducer_); + + void join(const ReducerWrapper& rhs) const + { + join_(self_, rhs.self_); } - + private: - Reducer* pSplitReducer_; - Reducer& reducer_; + void* self_ = nullptr; + bool owned_ = false; + + std::function work_; + std::function split_; + std::function join_; + std::function deleter_; }; - -} // anonymous namespace +void tbbParallelFor(std::size_t begin, + std::size_t end, + Worker& worker, + std::size_t grainSize = 1, + int numThreads = -1); -inline void tbbParallelFor(std::size_t begin, std::size_t end, - Worker& worker, std::size_t grainSize = 1) { - - TBBWorker tbbWorker(worker); - - tbb::parallel_for(tbb::blocked_range(begin, end, grainSize), - tbbWorker); -} +void tbbParallelReduceImpl(std::size_t begin, + std::size_t end, + ReducerWrapper& wrapper, + std::size_t grainSize = 1, + int numThreads = -1); template -inline void tbbParallelReduce(std::size_t begin, std::size_t end, - Reducer& reducer, std::size_t grainSize = 1) { - - TBBReducer tbbReducer(reducer); - - tbb::parallel_reduce(tbb::blocked_range(begin, end, grainSize), - tbbReducer); +void tbbParallelReduce(std::size_t begin, + std::size_t end, + Reducer& reducer, + std::size_t grainSize = 1, + int numThreads = -1) +{ + ReducerWrapper wrapper(&reducer); + tbbParallelReduceImpl(begin, end, wrapper, grainSize, numThreads); } } // namespace RcppParallel diff --git a/inst/include/RcppParallel/TinyThread.h b/inst/include/RcppParallel/TinyThread.h index 54d55b310..dfa3dcb73 100644 --- a/inst/include/RcppParallel/TinyThread.h +++ b/inst/include/RcppParallel/TinyThread.h @@ -2,12 +2,12 @@ #define __RCPP_PARALLEL_TINYTHREAD__ #include +#include #include "Common.h" #include -#include #include @@ -89,10 +89,15 @@ std::vector splitInputRange(const IndexRange& range, // allocate ranges std::vector ranges; std::size_t begin = range.begin(); + std::size_t end = begin; while (begin < range.end()) { - std::size_t end = std::min(begin + grainSize, range.end()); - ranges.push_back(IndexRange(begin, end)); - begin = end; + if ((range.end() - (begin + grainSize)) < grainSize) + end = range.end(); + else + end = std::min(begin + grainSize, range.end()); + + ranges.push_back(IndexRange(begin, end)); + begin = end; } // return ranges @@ -102,19 +107,19 @@ std::vector splitInputRange(const IndexRange& range, } // anonymous namespace // Execute the Worker over the IndexRange in parallel -inline void ttParallelFor(std::size_t begin, std::size_t end, - Worker& worker, std::size_t grainSize = 1) { - - using namespace tthread; - +inline void ttParallelFor(std::size_t begin, + std::size_t end, + Worker& worker, + std::size_t grainSize = 1) +{ // split the work IndexRange inputRange(begin, end); std::vector ranges = splitInputRange(inputRange, grainSize); // create threads - std::vector threads; + std::vector threads; for (std::size_t i = 0; i -inline void ttParallelReduce(std::size_t begin, std::size_t end, - Reducer& reducer, std::size_t grainSize = 1) { - - using namespace tthread; - +inline void ttParallelReduce(std::size_t begin, + std::size_t end, + Reducer& reducer, + std::size_t grainSize = 1) +{ // split the work IndexRange inputRange(begin, end); std::vector ranges = splitInputRange(inputRange, grainSize); // create threads (split for each thread and track the allocated workers) - std::vector threads; + std::vector threads; std::vector workers; for (std::size_t i = 0; i friend class aggregator_ext; - uintptr_t status; - aggregator_operation* my_next; -public: - enum aggregator_operation_status { agg_waiting=0, agg_finished }; - aggregator_operation() : status(agg_waiting), my_next(NULL) {} - /// Call start before handling this operation - void start() { call_itt_notify(acquired, &status); } - /// Call finish when done handling this operation - /** The operation will be released to its originating thread, and possibly deleted. */ - void finish() { itt_store_word_with_release(status, uintptr_t(agg_finished)); } - aggregator_operation* next() { return itt_hide_load_word(my_next);} - void set_next(aggregator_operation* n) { itt_hide_store_word(my_next, n); } -}; - -namespace internal { - -class basic_operation_base : public aggregator_operation { - friend class basic_handler; - virtual void apply_body() = 0; -public: - basic_operation_base() : aggregator_operation() {} - virtual ~basic_operation_base() {} -}; - -template -class basic_operation : public basic_operation_base, no_assign { - const Body& my_body; - /*override*/ void apply_body() { my_body(); } -public: - basic_operation(const Body& b) : basic_operation_base(), my_body(b) {} -}; - -class basic_handler { -public: - basic_handler() {} - void operator()(aggregator_operation* op_list) const { - while (op_list) { - // ITT note: &(op_list->status) tag is used to cover accesses to the operation data. - // The executing thread "acquires" the tag (see start()) and then performs - // the associated operation w/o triggering a race condition diagnostics. - // A thread that created the operation is waiting for its status (see execute_impl()), - // so when this thread is done with the operation, it will "release" the tag - // and update the status (see finish()) to give control back to the waiting thread. - basic_operation_base& request = static_cast(*op_list); - // IMPORTANT: need to advance op_list to op_list->next() before calling request.finish() - op_list = op_list->next(); - request.start(); - request.apply_body(); - request.finish(); - } - } -}; - -} // namespace internal - -//! Aggregator base class and expert interface -/** An aggregator for collecting operations coming from multiple sources and executing - them serially on a single thread. */ -template -class aggregator_ext : tbb::internal::no_copy { -public: - aggregator_ext(const handler_type& h) : handler_busy(0), handle_operations(h) { mailbox = NULL; } - - //! EXPERT INTERFACE: Enter a user-made operation into the aggregator's mailbox. - /** Details of user-made operations must be handled by user-provided handler */ - void process(aggregator_operation *op) { execute_impl(*op); } - - protected: - /** Place operation in mailbox, then either handle mailbox or wait for the operation - to be completed by a different thread. */ - void execute_impl(aggregator_operation& op) { - aggregator_operation* res; - - // ITT note: &(op.status) tag is used to cover accesses to this operation. This - // thread has created the operation, and now releases it so that the handler - // thread may handle the associated operation w/o triggering a race condition; - // thus this tag will be acquired just before the operation is handled in the - // handle_operations functor. - call_itt_notify(releasing, &(op.status)); - // insert the operation in the queue - do { - // ITT may flag the following line as a race; it is a false positive: - // This is an atomic read; we don't provide itt_hide_load_word for atomics - op.my_next = res = mailbox; // NOT A RACE - } while (mailbox.compare_and_swap(&op, res) != res); - if (!res) { // first in the list; handle the operations - // ITT note: &mailbox tag covers access to the handler_busy flag, which this - // waiting handler thread will try to set before entering handle_operations. - call_itt_notify(acquired, &mailbox); - start_handle_operations(); - __TBB_ASSERT(op.status, NULL); - } - else { // not first; wait for op to be ready - call_itt_notify(prepare, &(op.status)); - spin_wait_while_eq(op.status, uintptr_t(aggregator_operation::agg_waiting)); - itt_load_word_with_acquire(op.status); - } - } - - - private: - //! An atomically updated list (aka mailbox) of aggregator_operations - atomic mailbox; - - //! Controls thread access to handle_operations - /** Behaves as boolean flag where 0=false, 1=true */ - uintptr_t handler_busy; - - handler_type handle_operations; - - //! Trigger the handling of operations when the handler is free - void start_handle_operations() { - aggregator_operation *pending_operations; - - // ITT note: &handler_busy tag covers access to mailbox as it is passed - // between active and waiting handlers. Below, the waiting handler waits until - // the active handler releases, and the waiting handler acquires &handler_busy as - // it becomes the active_handler. The release point is at the end of this - // function, when all operations in mailbox have been handled by the - // owner of this aggregator. - call_itt_notify(prepare, &handler_busy); - // get handler_busy: only one thread can possibly spin here at a time - spin_wait_until_eq(handler_busy, uintptr_t(0)); - call_itt_notify(acquired, &handler_busy); - // acquire fence not necessary here due to causality rule and surrounding atomics - __TBB_store_with_release(handler_busy, uintptr_t(1)); - - // ITT note: &mailbox tag covers access to the handler_busy flag itself. - // Capturing the state of the mailbox signifies that handler_busy has been - // set and a new active handler will now process that list's operations. - call_itt_notify(releasing, &mailbox); - // grab pending_operations - pending_operations = mailbox.fetch_and_store(NULL); - - // handle all the operations - handle_operations(pending_operations); - - // release the handler - itt_store_word_with_release(handler_busy, uintptr_t(0)); - } -}; - -//! Basic aggregator interface -class aggregator : private aggregator_ext { -public: - aggregator() : aggregator_ext(internal::basic_handler()) {} - //! BASIC INTERFACE: Enter a function for exclusive execution by the aggregator. - /** The calling thread stores the function object in a basic_operation and - places the operation in the aggregator's mailbox */ - template - void execute(const Body& b) { - internal::basic_operation op(b); - this->execute_impl(op); - } -}; - -} // namespace interface6 - -using interface6::aggregator; -using interface6::aggregator_ext; -using interface6::aggregator_operation; - -} // namespace tbb - -#endif // __TBB__aggregator_H diff --git a/inst/include/tbb/aligned_space.h b/inst/include/tbb/aligned_space.h deleted file mode 100644 index d2015972d..000000000 --- a/inst/include/tbb/aligned_space.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_aligned_space_H -#define __TBB_aligned_space_H - -#include "tbb_stddef.h" -#include "tbb_machine.h" - -namespace tbb { - -//! Block of space aligned sufficiently to construct an array T with N elements. -/** The elements are not constructed or destroyed by this class. - @ingroup memory_allocation */ -template -class aligned_space { -private: - typedef __TBB_TypeWithAlignmentAtLeastAsStrict(T) element_type; - element_type array[(sizeof(T)*N+sizeof(element_type)-1)/sizeof(element_type)]; -public: - //! Pointer to beginning of array - T* begin() {return internal::punned_cast(this);} - - //! Pointer to one past last element in array. - T* end() {return begin()+N;} -}; - -} // namespace tbb - -#endif /* __TBB_aligned_space_H */ diff --git a/inst/include/tbb/atomic.h b/inst/include/tbb/atomic.h deleted file mode 100644 index 45bf31740..000000000 --- a/inst/include/tbb/atomic.h +++ /dev/null @@ -1,556 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_atomic_H -#define __TBB_atomic_H - -#include - -#if _MSC_VER -#define __TBB_LONG_LONG __int64 -#else -#define __TBB_LONG_LONG long long -#endif /* _MSC_VER */ - -#include "tbb_machine.h" - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings - #pragma warning (push) - #pragma warning (disable: 4244 4267 4512) -#endif - -namespace tbb { - -//! Specifies memory semantics. -enum memory_semantics { - //! Sequential consistency - full_fence, - //! Acquire - acquire, - //! Release - release, - //! No ordering - relaxed -}; - -//! @cond INTERNAL -namespace internal { - -#if __TBB_ATTRIBUTE_ALIGNED_PRESENT - #define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f __attribute__ ((aligned(a))); -#elif __TBB_DECLSPEC_ALIGN_PRESENT - #define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f; -#else - #error Do not know syntax for forcing alignment. -#endif - -template -struct atomic_rep; // Primary template declared, but never defined. - -template<> -struct atomic_rep<1> { // Specialization - typedef int8_t word; -}; -template<> -struct atomic_rep<2> { // Specialization - typedef int16_t word; -}; -template<> -struct atomic_rep<4> { // Specialization -#if _MSC_VER && !_WIN64 - // Work-around that avoids spurious /Wp64 warnings - typedef intptr_t word; -#else - typedef int32_t word; -#endif -}; -#if __TBB_64BIT_ATOMICS -template<> -struct atomic_rep<8> { // Specialization - typedef int64_t word; -}; -#endif - -template -struct aligned_storage; - -//the specializations are needed to please MSVC syntax of __declspec(align()) which accept _literal_ constants only -#if __TBB_ATOMIC_CTORS - #define ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(S) \ - template \ - struct aligned_storage { \ - __TBB_DECL_ATOMIC_FIELD(value_type,my_value,S) \ - aligned_storage() = default ; \ - constexpr aligned_storage(value_type value):my_value(value){} \ - }; \ - -#else - #define ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(S) \ - template \ - struct aligned_storage { \ - __TBB_DECL_ATOMIC_FIELD(value_type,my_value,S) \ - }; \ - -#endif - -template -struct aligned_storage { - value_type my_value; -#if __TBB_ATOMIC_CTORS - aligned_storage() = default ; - constexpr aligned_storage(value_type value):my_value(value){} -#endif -}; - -ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(2) -ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(4) -#if __TBB_64BIT_ATOMICS -ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(8) -#endif - -template -struct atomic_traits; // Primary template declared, but not defined. - -#define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M) \ - template<> struct atomic_traits { \ - typedef atomic_rep::word word; \ - inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \ - return __TBB_machine_cmpswp##S##M(location,new_value,comparand); \ - } \ - inline static word fetch_and_add( volatile void* location, word addend ) { \ - return __TBB_machine_fetchadd##S##M(location,addend); \ - } \ - inline static word fetch_and_store( volatile void* location, word value ) { \ - return __TBB_machine_fetchstore##S##M(location,value); \ - } \ - }; - -#define __TBB_DECL_ATOMIC_PRIMITIVES(S) \ - template \ - struct atomic_traits { \ - typedef atomic_rep::word word; \ - inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \ - return __TBB_machine_cmpswp##S(location,new_value,comparand); \ - } \ - inline static word fetch_and_add( volatile void* location, word addend ) { \ - return __TBB_machine_fetchadd##S(location,addend); \ - } \ - inline static word fetch_and_store( volatile void* location, word value ) { \ - return __TBB_machine_fetchstore##S(location,value); \ - } \ - }; - -template -struct atomic_load_store_traits; // Primary template declaration - -#define __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(M) \ - template<> struct atomic_load_store_traits { \ - template \ - inline static T load( const volatile T& location ) { \ - return __TBB_load_##M( location ); \ - } \ - template \ - inline static void store( volatile T& location, T value ) { \ - __TBB_store_##M( location, value ); \ - } \ - } - -#if __TBB_USE_FENCED_ATOMICS -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,full_fence) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,full_fence) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,full_fence) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,acquire) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,acquire) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,acquire) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,release) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,release) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,release) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,relaxed) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,relaxed) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,relaxed) -#if __TBB_64BIT_ATOMICS -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,full_fence) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,acquire) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,release) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,relaxed) -#endif -#else /* !__TBB_USE_FENCED_ATOMICS */ -__TBB_DECL_ATOMIC_PRIMITIVES(1) -__TBB_DECL_ATOMIC_PRIMITIVES(2) -__TBB_DECL_ATOMIC_PRIMITIVES(4) -#if __TBB_64BIT_ATOMICS -__TBB_DECL_ATOMIC_PRIMITIVES(8) -#endif -#endif /* !__TBB_USE_FENCED_ATOMICS */ - -__TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(full_fence); -__TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(acquire); -__TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(release); -__TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(relaxed); - -//! Additive inverse of 1 for type T. -/** Various compilers issue various warnings if -1 is used with various integer types. - The baroque expression below avoids all the warnings (we hope). */ -#define __TBB_MINUS_ONE(T) (T(T(0)-T(1))) - -//! Base class that provides basic functionality for atomic without fetch_and_add. -/** Works for any type T that has the same size as an integral type, has a trivial constructor/destructor, - and can be copied/compared by memcpy/memcmp. */ -template -struct atomic_impl { -protected: - aligned_storage my_storage; -private: - //TODO: rechecks on recent versions of gcc if union is still the _only_ way to do a conversion without warnings - //! Union type used to convert type T to underlying integral type. - template - union converter { - typedef typename atomic_rep::word bits_type; - converter(){} - converter(value_type a_value) : value(a_value) {} - value_type value; - bits_type bits; - }; - - template - static typename converter::bits_type to_bits(value_t value){ - return converter(value).bits; - } - template - static value_t to_value(typename converter::bits_type bits){ - converter u; - u.bits = bits; - return u.value; - } - - template - union ptr_converter; //Primary template declared, but never defined. - - template - union ptr_converter { - ptr_converter(){} - ptr_converter(value_t* a_value) : value(a_value) {} - value_t* value; - uintptr_t bits; - }; - //TODO: check if making to_bits accepting reference (thus unifying it with to_bits_ref) - //does not hurt performance - template - static typename converter::bits_type & to_bits_ref(value_t& value){ - //TODO: this #ifdef is temporary workaround, as union conversion seems to fail - //on suncc for 64 bit types for 32 bit target - #if !__SUNPRO_CC - return *(typename converter::bits_type*)ptr_converter(&value).bits; - #else - return *(typename converter::bits_type*)(&value); - #endif - } - - -public: - typedef T value_type; - -#if __TBB_ATOMIC_CTORS - atomic_impl() = default ; - constexpr atomic_impl(value_type value):my_storage(value){} -#endif - template - value_type fetch_and_store( value_type value ) { - return to_value( - internal::atomic_traits::fetch_and_store( &my_storage.my_value, to_bits(value) ) - ); - } - - value_type fetch_and_store( value_type value ) { - return fetch_and_store(value); - } - - template - value_type compare_and_swap( value_type value, value_type comparand ) { - return to_value( - internal::atomic_traits::compare_and_swap( &my_storage.my_value, to_bits(value), to_bits(comparand) ) - ); - } - - value_type compare_and_swap( value_type value, value_type comparand ) { - return compare_and_swap(value,comparand); - } - - operator value_type() const volatile { // volatile qualifier here for backwards compatibility - return to_value( - __TBB_load_with_acquire( to_bits_ref(my_storage.my_value) ) - ); - } - - template - value_type load () const { - return to_value( - internal::atomic_load_store_traits::load( to_bits_ref(my_storage.my_value) ) - ); - } - - value_type load () const { - return load(); - } - - template - void store ( value_type value ) { - internal::atomic_load_store_traits::store( to_bits_ref(my_storage.my_value), to_bits(value)); - } - - void store ( value_type value ) { - store( value ); - } - -protected: - value_type store_with_release( value_type rhs ) { - //TODO: unify with store - __TBB_store_with_release( to_bits_ref(my_storage.my_value), to_bits(rhs) ); - return rhs; - } -}; - -//! Base class that provides basic functionality for atomic with fetch_and_add. -/** I is the underlying type. - D is the difference type. - StepType should be char if I is an integral type, and T if I is a T*. */ -template -struct atomic_impl_with_arithmetic: atomic_impl { -public: - typedef I value_type; -#if __TBB_ATOMIC_CTORS - atomic_impl_with_arithmetic() = default ; - constexpr atomic_impl_with_arithmetic(value_type value): atomic_impl(value){} -#endif - template - value_type fetch_and_add( D addend ) { - return value_type(internal::atomic_traits::fetch_and_add( &this->my_storage.my_value, addend*sizeof(StepType) )); - } - - value_type fetch_and_add( D addend ) { - return fetch_and_add(addend); - } - - template - value_type fetch_and_increment() { - return fetch_and_add(1); - } - - value_type fetch_and_increment() { - return fetch_and_add(1); - } - - template - value_type fetch_and_decrement() { - return fetch_and_add(__TBB_MINUS_ONE(D)); - } - - value_type fetch_and_decrement() { - return fetch_and_add(__TBB_MINUS_ONE(D)); - } - -public: - value_type operator+=( D value ) { - return fetch_and_add(value)+value; - } - - value_type operator-=( D value ) { - // Additive inverse of value computed using binary minus, - // instead of unary minus, for sake of avoiding compiler warnings. - return operator+=(D(0)-value); - } - - value_type operator++() { - return fetch_and_add(1)+1; - } - - value_type operator--() { - return fetch_and_add(__TBB_MINUS_ONE(D))-1; - } - - value_type operator++(int) { - return fetch_and_add(1); - } - - value_type operator--(int) { - return fetch_and_add(__TBB_MINUS_ONE(D)); - } -}; - -} /* Internal */ -//! @endcond - -//! Primary template for atomic. -/** See the Reference for details. - @ingroup synchronization */ -template -struct atomic: internal::atomic_impl { -#if __TBB_ATOMIC_CTORS - atomic() = default; - constexpr atomic(T arg): internal::atomic_impl(arg) {} -#endif - T operator=( T rhs ) { - // "this" required here in strict ISO C++ because store_with_release is a dependent name - return this->store_with_release(rhs); - } - atomic& operator=( const atomic& rhs ) {this->store_with_release(rhs); return *this;} -}; - -#if __TBB_ATOMIC_CTORS - #define __TBB_DECL_ATOMIC(T) \ - template<> struct atomic: internal::atomic_impl_with_arithmetic { \ - atomic() = default; \ - constexpr atomic(T arg): internal::atomic_impl_with_arithmetic(arg) {} \ - \ - T operator=( T rhs ) {return store_with_release(rhs);} \ - atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \ - }; -#else - #define __TBB_DECL_ATOMIC(T) \ - template<> struct atomic: internal::atomic_impl_with_arithmetic { \ - T operator=( T rhs ) {return store_with_release(rhs);} \ - atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \ - }; -#endif - -#if __TBB_64BIT_ATOMICS -//TODO: consider adding non-default (and atomic) copy constructor for 32bit platform -__TBB_DECL_ATOMIC(__TBB_LONG_LONG) -__TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG) -#else -// test_atomic will verify that sizeof(long long)==8 -#endif -__TBB_DECL_ATOMIC(long) -__TBB_DECL_ATOMIC(unsigned long) - -#if _MSC_VER && !_WIN64 -#if __TBB_ATOMIC_CTORS -/* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option. - It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T) - with an operator=(U) that explicitly converts the U to a T. Types T and U should be - type synonyms on the platform. Type U should be the wider variant of T from the - perspective of /Wp64. */ -#define __TBB_DECL_ATOMIC_ALT(T,U) \ - template<> struct atomic: internal::atomic_impl_with_arithmetic { \ - atomic() = default ; \ - constexpr atomic(T arg): internal::atomic_impl_with_arithmetic(arg) {} \ - T operator=( U rhs ) {return store_with_release(T(rhs));} \ - atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \ - }; -#else -#define __TBB_DECL_ATOMIC_ALT(T,U) \ - template<> struct atomic: internal::atomic_impl_with_arithmetic { \ - T operator=( U rhs ) {return store_with_release(T(rhs));} \ - atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \ - }; -#endif -__TBB_DECL_ATOMIC_ALT(unsigned,size_t) -__TBB_DECL_ATOMIC_ALT(int,ptrdiff_t) -#else -__TBB_DECL_ATOMIC(unsigned) -__TBB_DECL_ATOMIC(int) -#endif /* _MSC_VER && !_WIN64 */ - -__TBB_DECL_ATOMIC(unsigned short) -__TBB_DECL_ATOMIC(short) -__TBB_DECL_ATOMIC(char) -__TBB_DECL_ATOMIC(signed char) -__TBB_DECL_ATOMIC(unsigned char) - -#if !_MSC_VER || defined(_NATIVE_WCHAR_T_DEFINED) -__TBB_DECL_ATOMIC(wchar_t) -#endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */ - -//! Specialization for atomic with arithmetic and operator->. -template struct atomic: internal::atomic_impl_with_arithmetic { -#if __TBB_ATOMIC_CTORS - atomic() = default ; - constexpr atomic(T* arg): internal::atomic_impl_with_arithmetic(arg) {} -#endif - T* operator=( T* rhs ) { - // "this" required here in strict ISO C++ because store_with_release is a dependent name - return this->store_with_release(rhs); - } - atomic& operator=( const atomic& rhs ) { - this->store_with_release(rhs); return *this; - } - T* operator->() const { - return (*this); - } -}; - -//! Specialization for atomic, for sake of not allowing arithmetic or operator->. -template<> struct atomic: internal::atomic_impl { -#if __TBB_ATOMIC_CTORS - atomic() = default ; - constexpr atomic(void* arg): internal::atomic_impl(arg) {} -#endif - void* operator=( void* rhs ) { - // "this" required here in strict ISO C++ because store_with_release is a dependent name - return this->store_with_release(rhs); - } - atomic& operator=( const atomic& rhs ) { - this->store_with_release(rhs); return *this; - } -}; - -// Helpers to workaround ugly syntax of calling template member function of a -// template class with template argument dependent on template parameters. - -template -T load ( const atomic& a ) { return a.template load(); } - -template -void store ( atomic& a, T value ) { a.template store(value); } - -namespace interface6{ -//! Make an atomic for use in an initialization (list), as an alternative to zero-initialization or normal assignment. -template -atomic make_atomic(T t) { - atomic a; - store(a,t); - return a; -} -} -using interface6::make_atomic; - -namespace internal { -template -void swap(atomic & lhs, atomic & rhs){ - T tmp = load(lhs); - store(lhs,load(rhs)); - store(rhs,tmp); -} - -// only to aid in the gradual conversion of ordinary variables to proper atomics -template -inline atomic& as_atomic( T& t ) { - return (atomic&)t; -} -} // namespace tbb::internal - -} // namespace tbb - -#if _MSC_VER && !__INTEL_COMPILER - #pragma warning (pop) -#endif // warnings 4244, 4267 are back - -#endif /* __TBB_atomic_H */ diff --git a/inst/include/tbb/blocked_range2d.h b/inst/include/tbb/blocked_range2d.h deleted file mode 100644 index 230a94ae7..000000000 --- a/inst/include/tbb/blocked_range2d.h +++ /dev/null @@ -1,108 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_blocked_range2d_H -#define __TBB_blocked_range2d_H - -#include "tbb_stddef.h" -#include "blocked_range.h" - -namespace tbb { - -//! A 2-dimensional range that models the Range concept. -/** @ingroup algorithms */ -template -class blocked_range2d { -public: - //! Type for size of an iteration range - typedef blocked_range row_range_type; - typedef blocked_range col_range_type; - -private: - row_range_type my_rows; - col_range_type my_cols; - -public: - - blocked_range2d( RowValue row_begin, RowValue row_end, typename row_range_type::size_type row_grainsize, - ColValue col_begin, ColValue col_end, typename col_range_type::size_type col_grainsize ) : - my_rows(row_begin,row_end,row_grainsize), - my_cols(col_begin,col_end,col_grainsize) - { - } - - blocked_range2d( RowValue row_begin, RowValue row_end, - ColValue col_begin, ColValue col_end ) : - my_rows(row_begin,row_end), - my_cols(col_begin,col_end) - { - } - - //! True if range is empty - bool empty() const { - // Yes, it is a logical OR here, not AND. - return my_rows.empty() || my_cols.empty(); - } - - //! True if range is divisible into two pieces. - bool is_divisible() const { - return my_rows.is_divisible() || my_cols.is_divisible(); - } - - blocked_range2d( blocked_range2d& r, split ) : - my_rows(r.my_rows), - my_cols(r.my_cols) - { - split split_obj; - do_split(r, split_obj); - } - -#if __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES - //! Static field to support proportional split - static const bool is_divisible_in_proportion = true; - - blocked_range2d( blocked_range2d& r, proportional_split& proportion ) : - my_rows(r.my_rows), - my_cols(r.my_cols) - { - do_split(r, proportion); - } -#endif /* __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES */ - - template - void do_split( blocked_range2d& r, Split& split_obj ) - { - if( my_rows.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_rows.grainsize()) ) { - my_cols.my_begin = col_range_type::do_split(r.my_cols, split_obj); - } else { - my_rows.my_begin = row_range_type::do_split(r.my_rows, split_obj); - } - } - - //! The rows of the iteration space - const row_range_type& rows() const {return my_rows;} - - //! The columns of the iteration space - const col_range_type& cols() const {return my_cols;} -}; - -} // namespace tbb - -#endif /* __TBB_blocked_range2d_H */ diff --git a/inst/include/tbb/cache_aligned_allocator.h b/inst/include/tbb/cache_aligned_allocator.h deleted file mode 100644 index 253ef7b2e..000000000 --- a/inst/include/tbb/cache_aligned_allocator.h +++ /dev/null @@ -1,137 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_cache_aligned_allocator_H -#define __TBB_cache_aligned_allocator_H - -#include -#include "tbb_stddef.h" -#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - #include // std::forward -#endif - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - //! Cache/sector line size. - /** @ingroup memory_allocation */ - size_t __TBB_EXPORTED_FUNC NFS_GetLineSize(); - - //! Allocate memory on cache/sector line boundary. - /** @ingroup memory_allocation */ - void* __TBB_EXPORTED_FUNC NFS_Allocate( size_t n_element, size_t element_size, void* hint ); - - //! Free memory allocated by NFS_Allocate. - /** Freeing a NULL pointer is allowed, but has no effect. - @ingroup memory_allocation */ - void __TBB_EXPORTED_FUNC NFS_Free( void* ); -} -//! @endcond - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Workaround for erroneous "unreferenced parameter" warning in method destroy. - #pragma warning (push) - #pragma warning (disable: 4100) -#endif - -//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 -/** The members are ordered the same way they are in section 20.4.1 - of the ISO C++ standard. - @ingroup memory_allocation */ -template -class cache_aligned_allocator { -public: - typedef typename internal::allocator_type::value_type value_type; - typedef value_type* pointer; - typedef const value_type* const_pointer; - typedef value_type& reference; - typedef const value_type& const_reference; - typedef size_t size_type; - typedef ptrdiff_t difference_type; - template struct rebind { - typedef cache_aligned_allocator other; - }; - - cache_aligned_allocator() throw() {} - cache_aligned_allocator( const cache_aligned_allocator& ) throw() {} - template cache_aligned_allocator(const cache_aligned_allocator&) throw() {} - - pointer address(reference x) const {return &x;} - const_pointer address(const_reference x) const {return &x;} - - //! Allocate space for n objects, starting on a cache/sector line. - pointer allocate( size_type n, const void* hint=0 ) { - // The "hint" argument is always ignored in NFS_Allocate thus const_cast shouldn't hurt - return pointer(internal::NFS_Allocate( n, sizeof(value_type), const_cast(hint) )); - } - - //! Free block of memory that starts on a cache line - void deallocate( pointer p, size_type ) { - internal::NFS_Free(p); - } - - //! Largest value for which method allocate might succeed. - size_type max_size() const throw() { - return (~size_t(0)-internal::NFS_MaxLineSize)/sizeof(value_type); - } - - //! Copy-construct value at location pointed to by p. -#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - template - void construct(U *p, Args&&... args) - { ::new((void *)p) U(std::forward(args)...); } -#else // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC -#if __TBB_CPP11_RVALUE_REF_PRESENT - void construct( pointer p, value_type&& value ) {::new((void*)(p)) value_type(std::move(value));} -#endif - void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);} -#endif // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - - //! Destroy value at location pointed to by p. - void destroy( pointer p ) {p->~value_type();} -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4100 is back - -//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 -/** @ingroup memory_allocation */ -template<> -class cache_aligned_allocator { -public: - typedef void* pointer; - typedef const void* const_pointer; - typedef void value_type; - template struct rebind { - typedef cache_aligned_allocator other; - }; -}; - -template -inline bool operator==( const cache_aligned_allocator&, const cache_aligned_allocator& ) {return true;} - -template -inline bool operator!=( const cache_aligned_allocator&, const cache_aligned_allocator& ) {return false;} - -} // namespace tbb - -#endif /* __TBB_cache_aligned_allocator_H */ diff --git a/inst/include/tbb/combinable.h b/inst/include/tbb/combinable.h deleted file mode 100644 index 566606d6f..000000000 --- a/inst/include/tbb/combinable.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_combinable_H -#define __TBB_combinable_H - -#include "enumerable_thread_specific.h" -#include "cache_aligned_allocator.h" - -namespace tbb { -/** \name combinable - **/ -//@{ -//! Thread-local storage with optional reduction -/** @ingroup containers */ - template - class combinable { - private: - typedef typename tbb::cache_aligned_allocator my_alloc; - - typedef typename tbb::enumerable_thread_specific my_ets_type; - my_ets_type my_ets; - - public: - - combinable() { } - - template - combinable( finit _finit) : my_ets(_finit) { } - - //! destructor - ~combinable() { - } - - combinable(const combinable& other) : my_ets(other.my_ets) { } - - combinable & operator=( const combinable & other) { my_ets = other.my_ets; return *this; } - - void clear() { my_ets.clear(); } - - T& local() { return my_ets.local(); } - - T& local(bool & exists) { return my_ets.local(exists); } - - // combine_func_t has signature T(T,T) or T(const T&, const T&) - template - T combine(combine_func_t f_combine) { return my_ets.combine(f_combine); } - - // combine_func_t has signature void(T) or void(const T&) - template - void combine_each(combine_func_t f_combine) { my_ets.combine_each(f_combine); } - - }; -} // namespace tbb -#endif /* __TBB_combinable_H */ diff --git a/inst/include/tbb/compat/condition_variable b/inst/include/tbb/compat/condition_variable deleted file mode 100644 index 89c2ccf55..000000000 --- a/inst/include/tbb/compat/condition_variable +++ /dev/null @@ -1,457 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_condition_variable_H -#define __TBB_condition_variable_H - -#if _WIN32||_WIN64 -#include "../machine/windows_api.h" - -namespace tbb { -namespace interface5 { -namespace internal { -struct condition_variable_using_event -{ - //! Event for blocking waiting threads. - HANDLE event; - //! Protects invariants involving n_waiters, release_count, and epoch. - CRITICAL_SECTION mutex; - //! Number of threads waiting on this condition variable - int n_waiters; - //! Number of threads remaining that should no longer wait on this condition variable. - int release_count; - //! To keep threads from waking up prematurely with earlier signals. - unsigned epoch; -}; -}}} // namespace tbb::interface5::internal - -#ifndef CONDITION_VARIABLE_INIT -typedef void* CONDITION_VARIABLE; -typedef CONDITION_VARIABLE* PCONDITION_VARIABLE; -#endif - -#else /* if not _WIN32||_WIN64 */ -#include // some systems need it for ETIMEDOUT -#include -#if __linux__ -#include -#else /* generic Unix */ -#include -#endif -#endif /* _WIN32||_WIN64 */ - -#include "../tbb_stddef.h" -#include "../mutex.h" -#include "../tbb_thread.h" -#include "../tbb_exception.h" -#include "../tbb_profiling.h" - -namespace tbb { - -namespace interface5 { - -// C++0x standard working draft 30.4.3 -// Lock tag types -struct defer_lock_t { }; //! do not acquire ownership of the mutex -struct try_to_lock_t { }; //! try to acquire ownership of the mutex without blocking -struct adopt_lock_t { }; //! assume the calling thread has already -const defer_lock_t defer_lock = {}; -const try_to_lock_t try_to_lock = {}; -const adopt_lock_t adopt_lock = {}; - -// C++0x standard working draft 30.4.3.1 -//! lock_guard -template -class lock_guard : tbb::internal::no_copy { -public: - //! mutex type - typedef M mutex_type; - - //! Constructor - /** precondition: If mutex_type is not a recursive mutex, the calling thread - does not own the mutex m. */ - explicit lock_guard(mutex_type& m) : pm(m) {m.lock();} - - //! Adopt_lock constructor - /** precondition: the calling thread owns the mutex m. */ - lock_guard(mutex_type& m, adopt_lock_t) : pm(m) {} - - //! Destructor - ~lock_guard() { pm.unlock(); } -private: - mutex_type& pm; -}; - -// C++0x standard working draft 30.4.3.2 -//! unique_lock -template -class unique_lock : tbb::internal::no_copy { - friend class condition_variable; -public: - typedef M mutex_type; - - // 30.4.3.2.1 construct/copy/destroy - // NB: Without constructors that take an r-value reference to a unique_lock, the following constructor is of little use. - //! Constructor - /** postcondition: pm==0 && owns==false */ - unique_lock() : pm(NULL), owns(false) {} - - //! Constructor - /** precondition: if mutex_type is not a recursive mutex, the calling thread - does not own the mutex m. If the precondition is not met, a deadlock occurs. - postcondition: pm==&m and owns==true */ - explicit unique_lock(mutex_type& m) : pm(&m) {m.lock(); owns=true;} - - //! Defer_lock constructor - /** postcondition: pm==&m and owns==false */ - unique_lock(mutex_type& m, defer_lock_t) : pm(&m), owns(false) {} - - //! Try_to_lock constructor - /** precondition: if mutex_type is not a recursive mutex, the calling thread - does not own the mutex m. If the precondition is not met, a deadlock occurs. - postcondition: pm==&m and owns==res where res is the value returned by - the call to m.try_lock(). */ - unique_lock(mutex_type& m, try_to_lock_t) : pm(&m) {owns = m.try_lock();} - - //! Adopt_lock constructor - /** precondition: the calling thread owns the mutex. If it does not, mutex->unlock() would fail. - postcondition: pm==&m and owns==true */ - unique_lock(mutex_type& m, adopt_lock_t) : pm(&m), owns(true) {} - - //! Timed unique_lock acquisition. - /** To avoid requiring support for namespace chrono, this method deviates from the working draft in that - it uses tbb::tick_count::interval_t to specify the time duration. */ - unique_lock(mutex_type& m, const tick_count::interval_t &i) : pm(&m) {owns = try_lock_for( i );} - - //! Destructor - ~unique_lock() { if( owns ) pm->unlock(); } - - // 30.4.3.2.2 locking - //! Lock the mutex and own it. - void lock() { - if( pm ) { - if( !owns ) { - pm->lock(); - owns = true; - } else - throw_exception_v4( tbb::internal::eid_possible_deadlock ); - } else - throw_exception_v4( tbb::internal::eid_operation_not_permitted ); - __TBB_ASSERT( owns, NULL ); - } - - //! Try to lock the mutex. - /** If successful, note that this lock owns it. Otherwise, set it false. */ - bool try_lock() { - if( pm ) { - if( !owns ) - owns = pm->try_lock(); - else - throw_exception_v4( tbb::internal::eid_possible_deadlock ); - } else - throw_exception_v4( tbb::internal::eid_operation_not_permitted ); - return owns; - } - - //! Try to lock the mutex. - bool try_lock_for( const tick_count::interval_t &i ); - - //! Unlock the mutex - /** And note that this lock no longer owns it. */ - void unlock() { - if( owns ) { - pm->unlock(); - owns = false; - } else - throw_exception_v4( tbb::internal::eid_operation_not_permitted ); - __TBB_ASSERT( !owns, NULL ); - } - - // 30.4.3.2.3 modifiers - //! Swap the two unique locks - void swap(unique_lock& u) { - mutex_type* t_pm = u.pm; u.pm = pm; pm = t_pm; - bool t_owns = u.owns; u.owns = owns; owns = t_owns; - } - - //! Release control over the mutex. - mutex_type* release() { - mutex_type* o_pm = pm; - pm = NULL; - owns = false; - return o_pm; - } - - // 30.4.3.2.4 observers - //! Does this lock own the mutex? - bool owns_lock() const { return owns; } - - // TODO: Un-comment 'explicit' when the last non-C++0x compiler support is dropped - //! Does this lock own the mutex? - /*explicit*/ operator bool() const { return owns; } - - //! Return the mutex that this lock currently has. - mutex_type* mutex() const { return pm; } - -private: - mutex_type* pm; - bool owns; -}; - -template -bool unique_lock::try_lock_for( const tick_count::interval_t &i) -{ - const int unique_lock_tick = 100; /* microseconds; 0.1 milliseconds */ - // the smallest wait-time is 0.1 milliseconds. - bool res = pm->try_lock(); - int duration_in_micro; - if( !res && (duration_in_micro=int(i.seconds()*1e6))>unique_lock_tick ) { - tick_count::interval_t i_100( double(unique_lock_tick)/1e6 /* seconds */); // 100 microseconds = 0.1*10E-3 - do { - this_tbb_thread::sleep(i_100); // sleep for 100 micro seconds - duration_in_micro -= unique_lock_tick; - res = pm->try_lock(); - } while( !res && duration_in_micro>unique_lock_tick ); - } - return (owns=res); -} - -//! Swap the two unique locks that have the mutexes of same type -template -void swap(unique_lock& x, unique_lock& y) { x.swap( y ); } - -namespace internal { - -#if _WIN32||_WIN64 -union condvar_impl_t { - condition_variable_using_event cv_event; - CONDITION_VARIABLE cv_native; -}; -void __TBB_EXPORTED_FUNC internal_initialize_condition_variable( condvar_impl_t& cv ); -void __TBB_EXPORTED_FUNC internal_destroy_condition_variable( condvar_impl_t& cv ); -void __TBB_EXPORTED_FUNC internal_condition_variable_notify_one( condvar_impl_t& cv ); -void __TBB_EXPORTED_FUNC internal_condition_variable_notify_all( condvar_impl_t& cv ); -bool __TBB_EXPORTED_FUNC internal_condition_variable_wait( condvar_impl_t& cv, mutex* mtx, const tick_count::interval_t* i = NULL ); - -#else /* if !(_WIN32||_WIN64), i.e., POSIX threads */ -typedef pthread_cond_t condvar_impl_t; -#endif - -} // namespace internal - -//! cv_status -/** C++0x standard working draft 30.5 */ -enum cv_status { no_timeout, timeout }; - -//! condition variable -/** C++0x standard working draft 30.5.1 - @ingroup synchronization */ -class condition_variable : tbb::internal::no_copy { -public: - //! Constructor - condition_variable() { -#if _WIN32||_WIN64 - internal_initialize_condition_variable( my_cv ); -#else - pthread_cond_init( &my_cv, NULL ); -#endif - } - - //! Destructor - ~condition_variable() { - //precondition: There shall be no thread blocked on *this. -#if _WIN32||_WIN64 - internal_destroy_condition_variable( my_cv ); -#else - pthread_cond_destroy( &my_cv ); -#endif - } - - //! Notify one thread and wake it up - void notify_one() { -#if _WIN32||_WIN64 - internal_condition_variable_notify_one( my_cv ); -#else - pthread_cond_signal( &my_cv ); -#endif - } - - //! Notify all threads - void notify_all() { -#if _WIN32||_WIN64 - internal_condition_variable_notify_all( my_cv ); -#else - pthread_cond_broadcast( &my_cv ); -#endif - } - - //! Release the mutex associated with the lock and wait on this condition variable - void wait(unique_lock& lock); - - //! Wait on this condition variable while pred is false - template - void wait(unique_lock& lock, Predicate pred) { - while( !pred() ) - wait( lock ); - } - - //! Timed version of wait() - cv_status wait_for(unique_lock& lock, const tick_count::interval_t &i ); - - //! Timed version of the predicated wait - /** The loop terminates when pred() returns true or when the time duration specified by rel_time (i) has elapsed. */ - template - bool wait_for(unique_lock& lock, const tick_count::interval_t &i, Predicate pred) - { - while( !pred() ) { - cv_status st = wait_for( lock, i ); - if( st==timeout ) - return pred(); - } - return true; - } - - // C++0x standard working draft. 30.2.3 - typedef internal::condvar_impl_t* native_handle_type; - - native_handle_type native_handle() { return (native_handle_type) &my_cv; } - -private: - internal::condvar_impl_t my_cv; -}; - - -#if _WIN32||_WIN64 -inline void condition_variable::wait( unique_lock& lock ) -{ - __TBB_ASSERT( lock.owns, NULL ); - lock.owns = false; - if( !internal_condition_variable_wait( my_cv, lock.mutex() ) ) { - int ec = GetLastError(); - // on Windows 7, SleepConditionVariableCS() may return ERROR_TIMEOUT while the doc says it returns WAIT_TIMEOUT - __TBB_ASSERT_EX( ec!=WAIT_TIMEOUT&&ec!=ERROR_TIMEOUT, NULL ); - lock.owns = true; - throw_exception_v4( tbb::internal::eid_condvar_wait_failed ); - } - lock.owns = true; -} - -inline cv_status condition_variable::wait_for( unique_lock& lock, const tick_count::interval_t& i ) -{ - cv_status rc = no_timeout; - __TBB_ASSERT( lock.owns, NULL ); - lock.owns = false; - // condvar_wait could be SleepConditionVariableCS (or SleepConditionVariableSRW) or our own pre-vista cond_var_wait() - if( !internal_condition_variable_wait( my_cv, lock.mutex(), &i ) ) { - int ec = GetLastError(); - if( ec==WAIT_TIMEOUT || ec==ERROR_TIMEOUT ) - rc = timeout; - else { - lock.owns = true; - throw_exception_v4( tbb::internal::eid_condvar_wait_failed ); - } - } - lock.owns = true; - return rc; -} - -#else /* !(_WIN32||_WIN64) */ -inline void condition_variable::wait( unique_lock& lock ) -{ - __TBB_ASSERT( lock.owns, NULL ); - lock.owns = false; - if( pthread_cond_wait( &my_cv, lock.mutex()->native_handle() ) ) { - lock.owns = true; - throw_exception_v4( tbb::internal::eid_condvar_wait_failed ); - } - // upon successful return, the mutex has been locked and is owned by the calling thread. - lock.owns = true; -} - -inline cv_status condition_variable::wait_for( unique_lock& lock, const tick_count::interval_t& i ) -{ -#if __linux__ - struct timespec req; - double sec = i.seconds(); - clock_gettime( CLOCK_REALTIME, &req ); - req.tv_sec += static_cast(sec); - req.tv_nsec += static_cast( (sec - static_cast(sec))*1e9 ); -#else /* generic Unix */ - struct timeval tv; - struct timespec req; - double sec = i.seconds(); - int status = gettimeofday(&tv, NULL); - __TBB_ASSERT_EX( status==0, "gettimeofday failed" ); - req.tv_sec = tv.tv_sec + static_cast(sec); - req.tv_nsec = tv.tv_usec*1000 + static_cast( (sec - static_cast(sec))*1e9 ); -#endif /*(choice of OS) */ - if( req.tv_nsec>=1e9 ) { - req.tv_sec += 1; - req.tv_nsec -= static_cast(1e9); - } - __TBB_ASSERT( 0<=req.tv_nsec && req.tv_nsec<1e9, NULL ); - - int ec; - cv_status rc = no_timeout; - __TBB_ASSERT( lock.owns, NULL ); - lock.owns = false; - if( ( ec=pthread_cond_timedwait( &my_cv, lock.mutex()->native_handle(), &req ) ) ) { - if( ec==ETIMEDOUT ) - rc = timeout; - else { - __TBB_ASSERT( lock.try_lock()==false, NULL ); - lock.owns = true; - throw_exception_v4( tbb::internal::eid_condvar_wait_failed ); - } - } - lock.owns = true; - return rc; -} -#endif /* !(_WIN32||_WIN64) */ - -} // namespace interface5 - -__TBB_DEFINE_PROFILING_SET_NAME(interface5::condition_variable) - -} // namespace tbb - -#if TBB_IMPLEMENT_CPP0X - -namespace std { - -using tbb::interface5::defer_lock_t; -using tbb::interface5::try_to_lock_t; -using tbb::interface5::adopt_lock_t; -using tbb::interface5::defer_lock; -using tbb::interface5::try_to_lock; -using tbb::interface5::adopt_lock; -using tbb::interface5::lock_guard; -using tbb::interface5::unique_lock; -using tbb::interface5::swap; /* this is for void std::swap(unique_lock&,unique_lock&) */ -using tbb::interface5::condition_variable; -using tbb::interface5::cv_status; -using tbb::interface5::timeout; -using tbb::interface5::no_timeout; - -} // namespace std - -#endif /* TBB_IMPLEMENT_CPP0X */ - -#endif /* __TBB_condition_variable_H */ diff --git a/inst/include/tbb/compat/ppl.h b/inst/include/tbb/compat/ppl.h deleted file mode 100644 index 9012e0acd..000000000 --- a/inst/include/tbb/compat/ppl.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_compat_ppl_H -#define __TBB_compat_ppl_H - -#include "../task_group.h" -#include "../parallel_invoke.h" -#include "../parallel_for_each.h" -#include "../parallel_for.h" -#include "../tbb_exception.h" -#include "../critical_section.h" -#include "../reader_writer_lock.h" -#include "../combinable.h" - -namespace Concurrency { - -#if __TBB_TASK_GROUP_CONTEXT - using tbb::task_handle; - using tbb::task_group_status; - using tbb::task_group; - using tbb::structured_task_group; - using tbb::invalid_multiple_scheduling; - using tbb::missing_wait; - using tbb::make_task; - - using tbb::not_complete; - using tbb::complete; - using tbb::canceled; - - using tbb::is_current_task_group_canceling; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - using tbb::parallel_invoke; - using tbb::strict_ppl::parallel_for; - using tbb::parallel_for_each; - using tbb::critical_section; - using tbb::reader_writer_lock; - using tbb::combinable; - - using tbb::improper_lock; - -} // namespace Concurrency - -#endif /* __TBB_compat_ppl_H */ diff --git a/inst/include/tbb/compat/thread b/inst/include/tbb/compat/thread deleted file mode 100644 index 64197bfc0..000000000 --- a/inst/include/tbb/compat/thread +++ /dev/null @@ -1,46 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_thread_H -#define __TBB_thread_H - -#include "../tbb_thread.h" - -#if TBB_IMPLEMENT_CPP0X - -namespace std { - -typedef tbb::tbb_thread thread; - -namespace this_thread { - using tbb::this_tbb_thread::get_id; - using tbb::this_tbb_thread::yield; - - inline void sleep_for(const tbb::tick_count::interval_t& rel_time) { - tbb::internal::thread_sleep_v3( rel_time ); - } - -} - -} - -#endif /* TBB_IMPLEMENT_CPP0X */ - -#endif /* __TBB_thread_H */ diff --git a/inst/include/tbb/compat/tuple b/inst/include/tbb/compat/tuple deleted file mode 100644 index 00b7809ca..000000000 --- a/inst/include/tbb/compat/tuple +++ /dev/null @@ -1,488 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_tuple_H -#define __TBB_tuple_H - -#include -#include "../tbb_stddef.h" - -// build preprocessor variables for varying number of arguments -// Need the leading comma so the empty __TBB_T_PACK will not cause a syntax error. -#if __TBB_VARIADIC_MAX <= 5 -#define __TBB_T_PACK -#define __TBB_U_PACK -#define __TBB_TYPENAME_T_PACK -#define __TBB_TYPENAME_U_PACK -#define __TBB_NULL_TYPE_PACK -#define __TBB_REF_T_PARAM_PACK -#define __TBB_CONST_REF_T_PARAM_PACK -#define __TBB_T_PARAM_LIST_PACK -#define __TBB_CONST_NULL_REF_PACK -// -#elif __TBB_VARIADIC_MAX == 6 -#define __TBB_T_PACK ,__T5 -#define __TBB_U_PACK ,__U5 -#define __TBB_TYPENAME_T_PACK , typename __T5 -#define __TBB_TYPENAME_U_PACK , typename __U5 -#define __TBB_NULL_TYPE_PACK , null_type -#define __TBB_REF_T_PARAM_PACK ,__T5& t5 -#define __TBB_CONST_REF_T_PARAM_PACK ,const __T5& t5 -#define __TBB_T_PARAM_LIST_PACK ,t5 -#define __TBB_CONST_NULL_REF_PACK , const null_type& -// -#elif __TBB_VARIADIC_MAX == 7 -#define __TBB_T_PACK ,__T5, __T6 -#define __TBB_U_PACK ,__U5, __U6 -#define __TBB_TYPENAME_T_PACK , typename __T5 , typename __T6 -#define __TBB_TYPENAME_U_PACK , typename __U5 , typename __U6 -#define __TBB_NULL_TYPE_PACK , null_type, null_type -#define __TBB_REF_T_PARAM_PACK ,__T5& t5, __T6& t6 -#define __TBB_CONST_REF_T_PARAM_PACK ,const __T5& t5, const __T6& t6 -#define __TBB_T_PARAM_LIST_PACK ,t5 ,t6 -#define __TBB_CONST_NULL_REF_PACK , const null_type&, const null_type& -// -#elif __TBB_VARIADIC_MAX == 8 -#define __TBB_T_PACK ,__T5, __T6, __T7 -#define __TBB_U_PACK ,__U5, __U6, __U7 -#define __TBB_TYPENAME_T_PACK , typename __T5 , typename __T6, typename __T7 -#define __TBB_TYPENAME_U_PACK , typename __U5 , typename __U6, typename __U7 -#define __TBB_NULL_TYPE_PACK , null_type, null_type, null_type -#define __TBB_REF_T_PARAM_PACK ,__T5& t5, __T6& t6, __T7& t7 -#define __TBB_CONST_REF_T_PARAM_PACK , const __T5& t5, const __T6& t6, const __T7& t7 -#define __TBB_T_PARAM_LIST_PACK ,t5 ,t6 ,t7 -#define __TBB_CONST_NULL_REF_PACK , const null_type&, const null_type&, const null_type& -// -#elif __TBB_VARIADIC_MAX == 9 -#define __TBB_T_PACK ,__T5, __T6, __T7, __T8 -#define __TBB_U_PACK ,__U5, __U6, __U7, __U8 -#define __TBB_TYPENAME_T_PACK , typename __T5, typename __T6, typename __T7, typename __T8 -#define __TBB_TYPENAME_U_PACK , typename __U5, typename __U6, typename __U7, typename __U8 -#define __TBB_NULL_TYPE_PACK , null_type, null_type, null_type, null_type -#define __TBB_REF_T_PARAM_PACK ,__T5& t5, __T6& t6, __T7& t7, __T8& t8 -#define __TBB_CONST_REF_T_PARAM_PACK , const __T5& t5, const __T6& t6, const __T7& t7, const __T8& t8 -#define __TBB_T_PARAM_LIST_PACK ,t5 ,t6 ,t7 ,t8 -#define __TBB_CONST_NULL_REF_PACK , const null_type&, const null_type&, const null_type&, const null_type& -// -#elif __TBB_VARIADIC_MAX >= 10 -#define __TBB_T_PACK ,__T5, __T6, __T7, __T8, __T9 -#define __TBB_U_PACK ,__U5, __U6, __U7, __U8, __U9 -#define __TBB_TYPENAME_T_PACK , typename __T5, typename __T6, typename __T7, typename __T8, typename __T9 -#define __TBB_TYPENAME_U_PACK , typename __U5, typename __U6, typename __U7, typename __U8, typename __U9 -#define __TBB_NULL_TYPE_PACK , null_type, null_type, null_type, null_type, null_type -#define __TBB_REF_T_PARAM_PACK ,__T5& t5, __T6& t6, __T7& t7, __T8& t8, __T9& t9 -#define __TBB_CONST_REF_T_PARAM_PACK , const __T5& t5, const __T6& t6, const __T7& t7, const __T8& t8, const __T9& t9 -#define __TBB_T_PARAM_LIST_PACK ,t5 ,t6 ,t7 ,t8 ,t9 -#define __TBB_CONST_NULL_REF_PACK , const null_type&, const null_type&, const null_type&, const null_type&, const null_type& -#endif - - - -namespace tbb { -namespace interface5 { - -namespace internal { -struct null_type { }; -} -using internal::null_type; - -// tuple forward declaration -template = 6 -, typename __T5=null_type -#if __TBB_VARIADIC_MAX >= 7 -, typename __T6=null_type -#if __TBB_VARIADIC_MAX >= 8 -, typename __T7=null_type -#if __TBB_VARIADIC_MAX >= 9 -, typename __T8=null_type -#if __TBB_VARIADIC_MAX >= 10 -, typename __T9=null_type -#endif -#endif -#endif -#endif -#endif -> -class tuple; - -namespace internal { - -// const null_type temp -inline const null_type cnull() { return null_type(); } - -// cons forward declaration -template struct cons; - -// type of a component of the cons -template -struct component { - typedef typename __T::tail_type next; - typedef typename component<__N-1,next>::type type; -}; - -template -struct component<0,__T> { - typedef typename __T::head_type type; -}; - -template<> -struct component<0,null_type> { - typedef null_type type; -}; - -// const version of component - -template -struct component<__N, const __T> -{ - typedef typename __T::tail_type next; - typedef const typename component<__N-1,next>::type type; -}; - -template -struct component<0, const __T> -{ - typedef const typename __T::head_type type; -}; - - -// helper class for getting components of cons -template< int __N> -struct get_helper { -template -inline static typename component<__N, cons<__HT,__TT> >::type& get(cons<__HT,__TT>& ti) { - return get_helper<__N-1>::get(ti.tail); -} -template -inline static typename component<__N, cons<__HT,__TT> >::type const& get(const cons<__HT,__TT>& ti) { - return get_helper<__N-1>::get(ti.tail); -} -}; - -template<> -struct get_helper<0> { -template -inline static typename component<0, cons<__HT,__TT> >::type& get(cons<__HT,__TT>& ti) { - return ti.head; -} -template -inline static typename component<0, cons<__HT,__TT> >::type const& get(const cons<__HT,__TT>& ti) { - return ti.head; -} -}; - -// traits adaptor -template -struct tuple_traits { - typedef cons <__T0, typename tuple_traits<__T1, __T2, __T3, __T4 __TBB_T_PACK , null_type>::U > U; -}; - -template -struct tuple_traits<__T0, null_type, null_type, null_type, null_type __TBB_NULL_TYPE_PACK > { - typedef cons<__T0, null_type> U; -}; - -template<> -struct tuple_traits { - typedef null_type U; -}; - - -// core cons defs -template -struct cons{ - - typedef __HT head_type; - typedef __TT tail_type; - - head_type head; - tail_type tail; - - static const int length = 1 + tail_type::length; - - // default constructors - explicit cons() : head(), tail() { } - - // non-default constructors - cons(head_type& h, const tail_type& t) : head(h), tail(t) { } - - template - cons(const __T0& t0, const __T1& t1, const __T2& t2, const __T3& t3, const __T4& t4 __TBB_CONST_REF_T_PARAM_PACK) : - head(t0), tail(t1, t2, t3, t4 __TBB_T_PARAM_LIST_PACK, cnull()) { } - - template - cons(__T0& t0, __T1& t1, __T2& t2, __T3& t3, __T4& t4 __TBB_REF_T_PARAM_PACK) : - head(t0), tail(t1, t2, t3, t4 __TBB_T_PARAM_LIST_PACK , cnull()) { } - - template - cons(const cons<__HT1,__TT1>& other) : head(other.head), tail(other.tail) { } - - cons& operator=(const cons& other) { head = other.head; tail = other.tail; return *this; } - - friend bool operator==(const cons& me, const cons& other) { - return me.head == other.head && me.tail == other.tail; - } - friend bool operator<(const cons& me, const cons& other) { - return me.head < other.head || (!(other.head < me.head) && me.tail < other.tail); - } - friend bool operator>(const cons& me, const cons& other) { return other=(const cons& me, const cons& other) { return !(meother); } - - template - friend bool operator==(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { - return me.head == other.head && me.tail == other.tail; - } - - template - friend bool operator<(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { - return me.head < other.head || (!(other.head < me.head) && me.tail < other.tail); - } - - template - friend bool operator>(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { return other - friend bool operator!=(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { return !(me==other); } - - template - friend bool operator>=(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { return !(me - friend bool operator<=(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { return !(me>other); } - - -}; // cons - - -template -struct cons<__HT,null_type> { - - typedef __HT head_type; - typedef null_type tail_type; - - head_type head; - - static const int length = 1; - - // default constructor - cons() : head() { /*std::cout << "default constructor 1\n";*/ } - - cons(const null_type&, const null_type&, const null_type&, const null_type&, const null_type& __TBB_CONST_NULL_REF_PACK) : head() { /*std::cout << "default constructor 2\n";*/ } - - // non-default constructor - template - cons(__T1& t1, const null_type&, const null_type&, const null_type&, const null_type& __TBB_CONST_NULL_REF_PACK) : head(t1) { /*std::cout << "non-default a1, t1== " << t1 << "\n";*/} - - cons(head_type& h, const null_type& = null_type() ) : head(h) { } - cons(const head_type& t0, const null_type&, const null_type&, const null_type&, const null_type& __TBB_CONST_NULL_REF_PACK) : head(t0) { } - - // converting constructor - template - cons(__HT1 h1, const null_type&, const null_type&, const null_type&, const null_type& __TBB_CONST_NULL_REF_PACK) : head(h1) { } - - // copy constructor - template - cons( const cons<__HT1, null_type>& other) : head(other.head) { } - - // assignment operator - cons& operator=(const cons& other) { head = other.head; return *this; } - - friend bool operator==(const cons& me, const cons& other) { return me.head == other.head; } - friend bool operator<(const cons& me, const cons& other) { return me.head < other.head; } - friend bool operator>(const cons& me, const cons& other) { return otherother); } - friend bool operator>=(const cons& me, const cons& other) {return !(me - friend bool operator==(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { - return me.head == other.head; - } - - template - friend bool operator<(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { - return me.head < other.head; - } - - template - friend bool operator>(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { return other - friend bool operator!=(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { return !(me==other); } - - template - friend bool operator<=(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { return !(me>other); } - - template - friend bool operator>=(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { return !(me -struct cons { typedef null_type tail_type; static const int length = 0; }; - -// wrapper for default constructor -template -inline const __T wrap_dcons(__T*) { return __T(); } - -} // namespace internal - -// tuple definition -template -class tuple : public internal::tuple_traits<__T0, __T1, __T2, __T3, __T4 __TBB_T_PACK >::U { - // friends - template friend class tuple_size; - template friend struct tuple_element; - - // stl components - typedef tuple<__T0,__T1,__T2,__T3,__T4 __TBB_T_PACK > value_type; - typedef value_type *pointer; - typedef const value_type *const_pointer; - typedef value_type &reference; - typedef const value_type &const_reference; - typedef size_t size_type; - - typedef typename internal::tuple_traits<__T0,__T1,__T2,__T3, __T4 __TBB_T_PACK >::U my_cons; - -public: - tuple(const __T0& t0=internal::wrap_dcons((__T0*)NULL) - ,const __T1& t1=internal::wrap_dcons((__T1*)NULL) - ,const __T2& t2=internal::wrap_dcons((__T2*)NULL) - ,const __T3& t3=internal::wrap_dcons((__T3*)NULL) - ,const __T4& t4=internal::wrap_dcons((__T4*)NULL) -#if __TBB_VARIADIC_MAX >= 6 - ,const __T5& t5=internal::wrap_dcons((__T5*)NULL) -#if __TBB_VARIADIC_MAX >= 7 - ,const __T6& t6=internal::wrap_dcons((__T6*)NULL) -#if __TBB_VARIADIC_MAX >= 8 - ,const __T7& t7=internal::wrap_dcons((__T7*)NULL) -#if __TBB_VARIADIC_MAX >= 9 - ,const __T8& t8=internal::wrap_dcons((__T8*)NULL) -#if __TBB_VARIADIC_MAX >= 10 - ,const __T9& t9=internal::wrap_dcons((__T9*)NULL) -#endif -#endif -#endif -#endif -#endif - ) : - my_cons(t0,t1,t2,t3,t4 __TBB_T_PARAM_LIST_PACK) { } - - template - struct internal_tuple_element { - typedef typename internal::component<__N,my_cons>::type type; - }; - - template - typename internal_tuple_element<__N>::type& get() { return internal::get_helper<__N>::get(*this); } - - template - typename internal_tuple_element<__N>::type const& get() const { return internal::get_helper<__N>::get(*this); } - - template - tuple& operator=(const internal::cons<__U1,__U2>& other) { - my_cons::operator=(other); - return *this; - } - - template - tuple& operator=(const std::pair<__U1,__U2>& other) { - // __TBB_ASSERT(tuple_size::value == 2, "Invalid size for pair to tuple assignment"); - this->head = other.first; - this->tail.head = other.second; - return *this; - } - - friend bool operator==(const tuple& me, const tuple& other) {return static_cast(me)==(other);} - friend bool operator<(const tuple& me, const tuple& other) {return static_cast(me)<(other);} - friend bool operator>(const tuple& me, const tuple& other) {return static_cast(me)>(other);} - friend bool operator!=(const tuple& me, const tuple& other) {return static_cast(me)!=(other);} - friend bool operator>=(const tuple& me, const tuple& other) {return static_cast(me)>=(other);} - friend bool operator<=(const tuple& me, const tuple& other) {return static_cast(me)<=(other);} - -}; // tuple - -// empty tuple -template<> -class tuple : public null_type { -}; - -// helper classes - -template < typename __T> -class tuple_size { -public: - static const size_t value = 1 + tuple_size::value; -}; - -template <> -class tuple_size > { -public: - static const size_t value = 0; -}; - -template <> -class tuple_size { -public: - static const size_t value = 0; -}; - -template -struct tuple_element { - typedef typename internal::component<__N, typename __T::my_cons>::type type; -}; - -template -inline static typename tuple_element<__N,tuple<__T0,__T1,__T2,__T3,__T4 __TBB_T_PACK > >::type& - get(tuple<__T0,__T1,__T2,__T3,__T4 __TBB_T_PACK >& t) { return internal::get_helper<__N>::get(t); } - -template -inline static typename tuple_element<__N,tuple<__T0,__T1,__T2,__T3,__T4 __TBB_T_PACK > >::type const& - get(const tuple<__T0,__T1,__T2,__T3,__T4 __TBB_T_PACK >& t) { return internal::get_helper<__N>::get(t); } - -} // interface5 -} // tbb - -#if !__TBB_CPP11_TUPLE_PRESENT -namespace tbb { - namespace flow { - using tbb::interface5::tuple; - using tbb::interface5::tuple_size; - using tbb::interface5::tuple_element; - using tbb::interface5::get; - } -} -#endif - -#undef __TBB_T_PACK -#undef __TBB_U_PACK -#undef __TBB_TYPENAME_T_PACK -#undef __TBB_TYPENAME_U_PACK -#undef __TBB_NULL_TYPE_PACK -#undef __TBB_REF_T_PARAM_PACK -#undef __TBB_CONST_REF_T_PARAM_PACK -#undef __TBB_T_PARAM_LIST_PACK -#undef __TBB_CONST_NULL_REF_PACK - -#endif /* __TBB_tuple_H */ diff --git a/inst/include/tbb/concurrent_hash_map.h b/inst/include/tbb/concurrent_hash_map.h deleted file mode 100644 index 27b710f5a..000000000 --- a/inst/include/tbb/concurrent_hash_map.h +++ /dev/null @@ -1,1417 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_hash_map_H -#define __TBB_concurrent_hash_map_H - -#include "tbb_stddef.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include -#include // Need std::pair -#include // Need std::memset - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "cache_aligned_allocator.h" -#include "tbb_allocator.h" -#include "spin_rw_mutex.h" -#include "atomic.h" -#include "tbb_exception.h" -#include "tbb_profiling.h" -#include "internal/_concurrent_unordered_impl.h" // Need tbb_hasher -#if __TBB_INITIALIZER_LISTS_PRESENT -#include -#endif -#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS -#include -#endif -#if __TBB_STATISTICS -#include -#endif - -namespace tbb { - -//! hash_compare that is default argument for concurrent_hash_map -template -struct tbb_hash_compare { - static size_t hash( const Key& a ) { return tbb_hasher(a); } - static bool equal( const Key& a, const Key& b ) { return a == b; } -}; - -namespace interface5 { - - template, typename A = tbb_allocator > > - class concurrent_hash_map; - - //! @cond INTERNAL - namespace internal { - using namespace tbb::internal; - - - //! Type of a hash code. - typedef size_t hashcode_t; - //! Node base type - struct hash_map_node_base : tbb::internal::no_copy { - //! Mutex type - typedef spin_rw_mutex mutex_t; - //! Scoped lock type for mutex - typedef mutex_t::scoped_lock scoped_t; - //! Next node in chain - hash_map_node_base *next; - mutex_t mutex; - }; - //! Incompleteness flag value - static hash_map_node_base *const rehash_req = reinterpret_cast(size_t(3)); - //! Rehashed empty bucket flag - static hash_map_node_base *const empty_rehashed = reinterpret_cast(size_t(0)); - //! base class of concurrent_hash_map - class hash_map_base { - public: - //! Size type - typedef size_t size_type; - //! Type of a hash code. - typedef size_t hashcode_t; - //! Segment index type - typedef size_t segment_index_t; - //! Node base type - typedef hash_map_node_base node_base; - //! Bucket type - struct bucket : tbb::internal::no_copy { - //! Mutex type for buckets - typedef spin_rw_mutex mutex_t; - //! Scoped lock type for mutex - typedef mutex_t::scoped_lock scoped_t; - mutex_t mutex; - node_base *node_list; - }; - //! Count of segments in the first block - static size_type const embedded_block = 1; - //! Count of segments in the first block - static size_type const embedded_buckets = 1< my_mask; - //! Segment pointers table. Also prevents false sharing between my_mask and my_size - segments_table_t my_table; - //! Size of container in stored items - atomic my_size; // It must be in separate cache line from my_mask due to performance effects - //! Zero segment - bucket my_embedded_segment[embedded_buckets]; -#if __TBB_STATISTICS - atomic my_info_resizes; // concurrent ones - mutable atomic my_info_restarts; // race collisions - atomic my_info_rehashes; // invocations of rehash_bucket -#endif - //! Constructor - hash_map_base() { - std::memset( this, 0, pointers_per_table*sizeof(segment_ptr_t) // 32*4=128 or 64*8=512 - + sizeof(my_size) + sizeof(my_mask) // 4+4 or 8+8 - + embedded_buckets*sizeof(bucket) ); // n*8 or n*16 - for( size_type i = 0; i < embedded_block; i++ ) // fill the table - my_table[i] = my_embedded_segment + segment_base(i); - my_mask = embedded_buckets - 1; - __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks"); -#if __TBB_STATISTICS - my_info_resizes = 0; // concurrent ones - my_info_restarts = 0; // race collisions - my_info_rehashes = 0; // invocations of rehash_bucket -#endif - } - - //! @return segment index of given index in the array - static segment_index_t segment_index_of( size_type index ) { - return segment_index_t( __TBB_Log2( index|1 ) ); - } - - //! @return the first array index of given segment - static segment_index_t segment_base( segment_index_t k ) { - return (segment_index_t(1)<(ptr) > uintptr_t(63); - } - - //! Initialize buckets - static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) { - if( is_initial ) std::memset(ptr, 0, sz*sizeof(bucket) ); - else for(size_type i = 0; i < sz; i++, ptr++) { - *reinterpret_cast(&ptr->mutex) = 0; - ptr->node_list = rehash_req; - } - } - - //! Add node @arg n to bucket @arg b - static void add_to_bucket( bucket *b, node_base *n ) { - __TBB_ASSERT(b->node_list != rehash_req, NULL); - n->next = b->node_list; - b->node_list = n; // its under lock and flag is set - } - - //! Exception safety helper - struct enable_segment_failsafe : tbb::internal::no_copy { - segment_ptr_t *my_segment_ptr; - enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {} - ~enable_segment_failsafe() { - if( my_segment_ptr ) *my_segment_ptr = 0; // indicate no allocation in progress - } - }; - - //! Enable segment - void enable_segment( segment_index_t k, bool is_initial = false ) { - __TBB_ASSERT( k, "Zero segment must be embedded" ); - enable_segment_failsafe watchdog( my_table, k ); - cache_aligned_allocator alloc; - size_type sz; - __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment"); - if( k >= first_block ) { - sz = segment_size( k ); - segment_ptr_t ptr = alloc.allocate( sz ); - init_buckets( ptr, sz, is_initial ); - itt_hide_store_word( my_table[k], ptr ); - sz <<= 1;// double it to get entire capacity of the container - } else { // the first block - __TBB_ASSERT( k == embedded_block, "Wrong segment index" ); - sz = segment_size( first_block ); - segment_ptr_t ptr = alloc.allocate( sz - embedded_buckets ); - init_buckets( ptr, sz - embedded_buckets, is_initial ); - ptr -= segment_base(embedded_block); - for(segment_index_t i = embedded_block; i < first_block; i++) // calc the offsets - itt_hide_store_word( my_table[i], ptr + segment_base(i) ); - } - itt_store_word_with_release( my_mask, sz-1 ); - watchdog.my_segment_ptr = 0; - } - - //! Get bucket by (masked) hashcode - bucket *get_bucket( hashcode_t h ) const throw() { // TODO: add throw() everywhere? - segment_index_t s = segment_index_of( h ); - h -= segment_base(s); - segment_ptr_t seg = my_table[s]; - __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" ); - return &seg[h]; - } - - // internal serial rehashing helper - void mark_rehashed_levels( hashcode_t h ) throw () { - segment_index_t s = segment_index_of( h ); - while( segment_ptr_t seg = my_table[++s] ) - if( seg[h].node_list == rehash_req ) { - seg[h].node_list = empty_rehashed; - mark_rehashed_levels( h + ((hashcode_t)1<node_list) != rehash_req ) - { -#if __TBB_STATISTICS - my_info_restarts++; // race collisions -#endif - return true; - } - } - return false; - } - - //! Insert a node and check for load factor. @return segment index to enable. - segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) { - size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted - add_to_bucket( b, n ); - // check load factor - if( sz >= mask ) { // TODO: add custom load_factor - segment_index_t new_seg = __TBB_Log2( mask+1 ); //optimized segment_index_of - __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated"); - static const segment_ptr_t is_allocating = (segment_ptr_t)2; - if( !itt_hide_load_word(my_table[new_seg]) - && as_atomic(my_table[new_seg]).compare_and_swap(is_allocating, NULL) == NULL ) - return new_seg; // The value must be processed - } - return 0; - } - - //! Prepare enough segments for number of buckets - void reserve(size_type buckets) { - if( !buckets-- ) return; - bool is_initial = !my_size; - for( size_type m = my_mask; buckets > m; m = my_mask ) - enable_segment( segment_index_of( m+1 ), is_initial ); - } - //! Swap hash_map_bases - void internal_swap(hash_map_base &table) { - using std::swap; - swap(this->my_mask, table.my_mask); - swap(this->my_size, table.my_size); - for(size_type i = 0; i < embedded_buckets; i++) - swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list); - for(size_type i = embedded_block; i < pointers_per_table; i++) - swap(this->my_table[i], table.my_table[i]); - } - }; - - template - class hash_map_range; - - //! Meets requirements of a forward iterator for STL */ - /** Value is either the T or const T type of the container. - @ingroup containers */ - template - class hash_map_iterator - : public std::iterator - { - typedef Container map_type; - typedef typename Container::node node; - typedef hash_map_base::node_base node_base; - typedef hash_map_base::bucket bucket; - - template - friend bool operator==( const hash_map_iterator& i, const hash_map_iterator& j ); - - template - friend bool operator!=( const hash_map_iterator& i, const hash_map_iterator& j ); - - template - friend ptrdiff_t operator-( const hash_map_iterator& i, const hash_map_iterator& j ); - - template - friend class hash_map_iterator; - - template - friend class hash_map_range; - - void advance_to_next_bucket() { // TODO?: refactor to iterator_base class - size_t k = my_index+1; - while( my_bucket && k <= my_map->my_mask ) { - // Following test uses 2's-complement wizardry - if( k& (k-2) ) // not the beginning of a segment - ++my_bucket; - else my_bucket = my_map->get_bucket( k ); - my_node = static_cast( my_bucket->node_list ); - if( hash_map_base::is_valid(my_node) ) { - my_index = k; return; - } - ++k; - } - my_bucket = 0; my_node = 0; my_index = k; // the end - } -#if !defined(_MSC_VER) || defined(__INTEL_COMPILER) - template - friend class interface5::concurrent_hash_map; -#else - public: // workaround -#endif - //! concurrent_hash_map over which we are iterating. - const Container *my_map; - - //! Index in hash table for current item - size_t my_index; - - //! Pointer to bucket - const bucket *my_bucket; - - //! Pointer to node that has current item - node *my_node; - - hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ); - - public: - //! Construct undefined iterator - hash_map_iterator() {} - hash_map_iterator( const hash_map_iterator &other ) : - my_map(other.my_map), - my_index(other.my_index), - my_bucket(other.my_bucket), - my_node(other.my_node) - {} - Value& operator*() const { - __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" ); - return my_node->item; - } - Value* operator->() const {return &operator*();} - hash_map_iterator& operator++(); - - //! Post increment - hash_map_iterator operator++(int) { - hash_map_iterator old(*this); - operator++(); - return old; - } - }; - - template - hash_map_iterator::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) : - my_map(&map), - my_index(index), - my_bucket(b), - my_node( static_cast(n) ) - { - if( b && !hash_map_base::is_valid(n) ) - advance_to_next_bucket(); - } - - template - hash_map_iterator& hash_map_iterator::operator++() { - my_node = static_cast( my_node->next ); - if( !my_node ) advance_to_next_bucket(); - return *this; - } - - template - bool operator==( const hash_map_iterator& i, const hash_map_iterator& j ) { - return i.my_node == j.my_node && i.my_map == j.my_map; - } - - template - bool operator!=( const hash_map_iterator& i, const hash_map_iterator& j ) { - return i.my_node != j.my_node || i.my_map != j.my_map; - } - - //! Range class used with concurrent_hash_map - /** @ingroup containers */ - template - class hash_map_range { - typedef typename Iterator::map_type map_type; - Iterator my_begin; - Iterator my_end; - mutable Iterator my_midpoint; - size_t my_grainsize; - //! Set my_midpoint to point approximately half way between my_begin and my_end. - void set_midpoint() const; - template friend class hash_map_range; - public: - //! Type for size of a range - typedef std::size_t size_type; - typedef typename Iterator::value_type value_type; - typedef typename Iterator::reference reference; - typedef typename Iterator::difference_type difference_type; - typedef Iterator iterator; - - //! True if range is empty. - bool empty() const {return my_begin==my_end;} - - //! True if range can be partitioned into two subranges. - bool is_divisible() const { - return my_midpoint!=my_end; - } - //! Split range. - hash_map_range( hash_map_range& r, split ) : - my_end(r.my_end), - my_grainsize(r.my_grainsize) - { - r.my_end = my_begin = r.my_midpoint; - __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" ); - __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" ); - set_midpoint(); - r.set_midpoint(); - } - //! type conversion - template - hash_map_range( hash_map_range& r) : - my_begin(r.my_begin), - my_end(r.my_end), - my_midpoint(r.my_midpoint), - my_grainsize(r.my_grainsize) - {} - //! Init range with container and grainsize specified - hash_map_range( const map_type &map, size_type grainsize_ = 1 ) : - my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ), - my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ), - my_grainsize( grainsize_ ) - { - __TBB_ASSERT( grainsize_>0, "grainsize must be positive" ); - set_midpoint(); - } - const Iterator& begin() const {return my_begin;} - const Iterator& end() const {return my_end;} - //! The grain size for this range. - size_type grainsize() const {return my_grainsize;} - }; - - template - void hash_map_range::set_midpoint() const { - // Split by groups of nodes - size_t m = my_end.my_index-my_begin.my_index; - if( m > my_grainsize ) { - m = my_begin.my_index + m/2u; - hash_map_base::bucket *b = my_begin.my_map->get_bucket(m); - my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list); - } else { - my_midpoint = my_end; - } - __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index, - "my_begin is after my_midpoint" ); - __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index, - "my_midpoint is after my_end" ); - __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end, - "[my_begin, my_midpoint) range should not be empty" ); - } - - } // internal -//! @endcond - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Suppress "conditional expression is constant" warning. - #pragma warning( push ) - #pragma warning( disable: 4127 ) -#endif - -//! Unordered map from Key to T. -/** concurrent_hash_map is associative container with concurrent access. - -@par Compatibility - The class meets all Container Requirements from C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1). - -@par Exception Safety - - Hash function is not permitted to throw an exception. User-defined types Key and T are forbidden from throwing an exception in destructors. - - If exception happens during insert() operations, it has no effect (unless exception raised by HashCompare::hash() function during grow_segment). - - If exception happens during operator=() operation, the container can have a part of source items, and methods size() and empty() can return wrong results. - -@par Changes since TBB 2.1 - - Replaced internal algorithm and data structure. Patent is pending. - - Added buckets number argument for constructor - -@par Changes since TBB 2.0 - - Fixed exception-safety - - Added template argument for allocator - - Added allocator argument in constructors - - Added constructor from a range of iterators - - Added several new overloaded insert() methods - - Added get_allocator() - - Added swap() - - Added count() - - Added overloaded erase(accessor &) and erase(const_accessor&) - - Added equal_range() [const] - - Added [const_]pointer, [const_]reference, and allocator_type types - - Added global functions: operator==(), operator!=(), and swap() - - @ingroup containers */ -template -class concurrent_hash_map : protected internal::hash_map_base { - template - friend class internal::hash_map_iterator; - - template - friend class internal::hash_map_range; - -public: - typedef Key key_type; - typedef T mapped_type; - typedef std::pair value_type; - typedef hash_map_base::size_type size_type; - typedef ptrdiff_t difference_type; - typedef value_type *pointer; - typedef const value_type *const_pointer; - typedef value_type &reference; - typedef const value_type &const_reference; - typedef internal::hash_map_iterator iterator; - typedef internal::hash_map_iterator const_iterator; - typedef internal::hash_map_range range_type; - typedef internal::hash_map_range const_range_type; - typedef Allocator allocator_type; - -protected: - friend class const_accessor; - struct node; - typedef typename Allocator::template rebind::other node_allocator_type; - node_allocator_type my_allocator; - HashCompare my_hash_compare; - - struct node : public node_base { - value_type item; - node( const Key &key ) : item(key, T()) {} - node( const Key &key, const T &t ) : item(key, t) {} -#if __TBB_CPP11_RVALUE_REF_PRESENT - node( value_type&& i ) : item(std::move(i)){} -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - node( const value_type& i ) : item(i) {} - - // exception-safe allocation, see C++ Standard 2003, clause 5.3.4p17 - void *operator new( size_t /*size*/, node_allocator_type &a ) { - void *ptr = a.allocate(1); - if(!ptr) - tbb::internal::throw_exception(tbb::internal::eid_bad_alloc); - return ptr; - } - // match placement-new form above to be called if exception thrown in constructor - void operator delete( void *ptr, node_allocator_type &a ) { a.deallocate(static_cast(ptr),1); } - }; - - void delete_node( node_base *n ) { - my_allocator.destroy( static_cast(n) ); - my_allocator.deallocate( static_cast(n), 1); - } - - static node* allocate_node_copy_construct(node_allocator_type& allocator, const Key &key, const T * t){ - return new( allocator ) node(key, *t); - } - - static node* allocate_node_default_construct(node_allocator_type& allocator, const Key &key, const T * ){ - return new( allocator ) node(key); - } - - static node* do_not_allocate_node(node_allocator_type& , const Key &, const T * ){ - __TBB_ASSERT(false,"this dummy function should not be called"); - return NULL; - } - - node *search_bucket( const key_type &key, bucket *b ) const { - node *n = static_cast( b->node_list ); - while( is_valid(n) && !my_hash_compare.equal(key, n->item.first) ) - n = static_cast( n->next ); - __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket"); - return n; - } - - //! bucket accessor is to find, rehash, acquire a lock, and access a bucket - class bucket_accessor : public bucket::scoped_t { - bucket *my_b; - public: - bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); } - //! find a bucket by masked hashcode, optionally rehash, and acquire the lock - inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { - my_b = base->get_bucket( h ); - // TODO: actually, notification is unnecessary here, just hiding double-check - if( itt_load_word_with_acquire(my_b->node_list) == internal::rehash_req - && try_acquire( my_b->mutex, /*write=*/true ) ) - { - if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h ); //recursive rehashing - } - else bucket::scoped_t::acquire( my_b->mutex, writer ); - __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL); - } - //! check whether bucket is locked for write - bool is_writer() { return bucket::scoped_t::is_writer; } - //! get bucket pointer - bucket *operator() () { return my_b; } - }; - - // TODO refactor to hash_base - void rehash_bucket( bucket *b_new, const hashcode_t h ) { - __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)"); - __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" ); - __TBB_store_with_release(b_new->node_list, internal::empty_rehashed); // mark rehashed - hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit -#if __TBB_STATISTICS - my_info_rehashes++; // invocations of rehash_bucket -#endif - - bucket_accessor b_old( this, h & mask ); - - mask = (mask<<1) | 1; // get full mask for new bucket - __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL ); - restart: - for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) { - hashcode_t c = my_hash_compare.hash( static_cast(n)->item.first ); -#if TBB_USE_ASSERT - hashcode_t bmask = h & (mask>>1); - bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1; // minimal mask of parent bucket - __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" ); -#endif - if( (c & mask) == h ) { - if( !b_old.is_writer() ) - if( !b_old.upgrade_to_writer() ) { - goto restart; // node ptr can be invalid due to concurrent erase - } - *p = n->next; // exclude from b_old - add_to_bucket( b_new, n ); - } else p = &n->next; // iterate to next item - } - } - - struct call_clear_on_leave { - concurrent_hash_map* my_ch_map; - call_clear_on_leave( concurrent_hash_map* a_ch_map ) : my_ch_map(a_ch_map) {} - void dismiss() {my_ch_map = 0;} - ~call_clear_on_leave(){ - if (my_ch_map){ - my_ch_map->clear(); - } - } - }; -public: - - class accessor; - //! Combines data access, locking, and garbage collection. - class const_accessor : private node::scoped_t /*which derived from no_copy*/ { - friend class concurrent_hash_map; - friend class accessor; - public: - //! Type of value - typedef const typename concurrent_hash_map::value_type value_type; - - //! True if result is empty. - bool empty() const { return !my_node; } - - //! Set to null - void release() { - if( my_node ) { - node::scoped_t::release(); - my_node = 0; - } - } - - //! Return reference to associated value in hash table. - const_reference operator*() const { - __TBB_ASSERT( my_node, "attempt to dereference empty accessor" ); - return my_node->item; - } - - //! Return pointer to associated value in hash table. - const_pointer operator->() const { - return &operator*(); - } - - //! Create empty result - const_accessor() : my_node(NULL) {} - - //! Destroy result after releasing the underlying reference. - ~const_accessor() { - my_node = NULL; // scoped lock's release() is called in its destructor - } - protected: - bool is_writer() { return node::scoped_t::is_writer; } - node *my_node; - hashcode_t my_hash; - }; - - //! Allows write access to elements and combines data access, locking, and garbage collection. - class accessor: public const_accessor { - public: - //! Type of value - typedef typename concurrent_hash_map::value_type value_type; - - //! Return reference to associated value in hash table. - reference operator*() const { - __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" ); - return this->my_node->item; - } - - //! Return pointer to associated value in hash table. - pointer operator->() const { - return &operator*(); - } - }; - - //! Construct empty table. - concurrent_hash_map( const allocator_type &a = allocator_type() ) - : internal::hash_map_base(), my_allocator(a) - {} - - //! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level. - concurrent_hash_map( size_type n, const allocator_type &a = allocator_type() ) - : my_allocator(a) - { - reserve( n ); - } - - //! Copy constructor - concurrent_hash_map( const concurrent_hash_map &table, const allocator_type &a = allocator_type() ) - : internal::hash_map_base(), my_allocator(a) - { - internal_copy(table); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Move constructor - concurrent_hash_map( concurrent_hash_map &&table ) - : internal::hash_map_base(), my_allocator(std::move(table.get_allocator())) - { - swap(table); - } - - //! Move constructor - concurrent_hash_map( concurrent_hash_map &&table, const allocator_type &a ) - : internal::hash_map_base(), my_allocator(a) - { - if (a == table.get_allocator()){ - this->swap(table); - }else{ - call_clear_on_leave scope_guard(this); - internal_copy(std::make_move_iterator(table.begin()), std::make_move_iterator(table.end())); - scope_guard.dismiss(); - } - } -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - - //! Construction with copying iteration range and given allocator instance - template - concurrent_hash_map( I first, I last, const allocator_type &a = allocator_type() ) - : my_allocator(a) - { - reserve( std::distance(first, last) ); // TODO: load_factor? - internal_copy(first, last); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level. - concurrent_hash_map( std::initializer_list il, const allocator_type &a = allocator_type() ) - : my_allocator(a) - { - reserve(il.size()); - internal_copy(il.begin(), il.end()); - } - -#endif //__TBB_INITIALIZER_LISTS_PRESENT - - //! Assignment - concurrent_hash_map& operator=( const concurrent_hash_map &table ) { - if( this!=&table ) { - clear(); - internal_copy(table); - } - return *this; - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Move Assignment - concurrent_hash_map& operator=( concurrent_hash_map &&table ) { - if(this != &table){ - typedef typename tbb::internal::allocator_traits::propagate_on_container_move_assignment pocma_t; - if(pocma_t::value || this->my_allocator == table.my_allocator) { - concurrent_hash_map trash (std::move(*this)); - //TODO: swapping allocators here may be a problem, replace with single direction moving iff pocma is set - this->swap(table); - } else { - //do per element move - concurrent_hash_map moved_copy(std::move(table), this->my_allocator); - this->swap(moved_copy); - } - } - return *this; - } -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Assignment - concurrent_hash_map& operator=( std::initializer_list il ) { - clear(); - reserve(il.size()); - internal_copy(il.begin(), il.end()); - return *this; - } -#endif //__TBB_INITIALIZER_LISTS_PRESENT - - - //! Rehashes and optionally resizes the whole table. - /** Useful to optimize performance before or after concurrent operations. - Also enables using of find() and count() concurrent methods in serial context. */ - void rehash(size_type n = 0); - - //! Clear table - void clear(); - - //! Clear table and destroy it. - ~concurrent_hash_map() { clear(); } - - //------------------------------------------------------------------------ - // Parallel algorithm support - //------------------------------------------------------------------------ - range_type range( size_type grainsize=1 ) { - return range_type( *this, grainsize ); - } - const_range_type range( size_type grainsize=1 ) const { - return const_range_type( *this, grainsize ); - } - - //------------------------------------------------------------------------ - // STL support - not thread-safe methods - //------------------------------------------------------------------------ - iterator begin() { return iterator( *this, 0, my_embedded_segment, my_embedded_segment->node_list ); } - iterator end() { return iterator( *this, 0, 0, 0 ); } - const_iterator begin() const { return const_iterator( *this, 0, my_embedded_segment, my_embedded_segment->node_list ); } - const_iterator end() const { return const_iterator( *this, 0, 0, 0 ); } - std::pair equal_range( const Key& key ) { return internal_equal_range( key, end() ); } - std::pair equal_range( const Key& key ) const { return internal_equal_range( key, end() ); } - - //! Number of items in table. - size_type size() const { return my_size; } - - //! True if size()==0. - bool empty() const { return my_size == 0; } - - //! Upper bound on size. - size_type max_size() const {return (~size_type(0))/sizeof(node);} - - //! Returns the current number of buckets - size_type bucket_count() const { return my_mask+1; } - - //! return allocator object - allocator_type get_allocator() const { return this->my_allocator; } - - //! swap two instances. Iterators are invalidated - void swap( concurrent_hash_map &table ); - - //------------------------------------------------------------------------ - // concurrent map operations - //------------------------------------------------------------------------ - - //! Return count of items (0 or 1) - size_type count( const Key &key ) const { - return const_cast(this)->lookup(/*insert*/false, key, NULL, NULL, /*write=*/false, &do_not_allocate_node ); - } - - //! Find item and acquire a read lock on the item. - /** Return true if item is found, false otherwise. */ - bool find( const_accessor &result, const Key &key ) const { - result.release(); - return const_cast(this)->lookup(/*insert*/false, key, NULL, &result, /*write=*/false, &do_not_allocate_node ); - } - - //! Find item and acquire a write lock on the item. - /** Return true if item is found, false otherwise. */ - bool find( accessor &result, const Key &key ) { - result.release(); - return lookup(/*insert*/false, key, NULL, &result, /*write=*/true, &do_not_allocate_node ); - } - - //! Insert item (if not already present) and acquire a read lock on the item. - /** Returns true if item is new. */ - bool insert( const_accessor &result, const Key &key ) { - result.release(); - return lookup(/*insert*/true, key, NULL, &result, /*write=*/false, &allocate_node_default_construct ); - } - - //! Insert item (if not already present) and acquire a write lock on the item. - /** Returns true if item is new. */ - bool insert( accessor &result, const Key &key ) { - result.release(); - return lookup(/*insert*/true, key, NULL, &result, /*write=*/true, &allocate_node_default_construct ); - } - - //! Insert item by copying if there is no such key present already and acquire a read lock on the item. - /** Returns true if item is new. */ - bool insert( const_accessor &result, const value_type &value ) { - result.release(); - return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/false, &allocate_node_copy_construct ); - } - - //! Insert item by copying if there is no such key present already and acquire a write lock on the item. - /** Returns true if item is new. */ - bool insert( accessor &result, const value_type &value ) { - result.release(); - return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/true, &allocate_node_copy_construct ); - } - - //! Insert item by copying if there is no such key present already - /** Returns true if item is inserted. */ - bool insert( const value_type &value ) { - return lookup(/*insert*/true, value.first, &value.second, NULL, /*write=*/false, &allocate_node_copy_construct ); - } - - //! Insert range [first, last) - template - void insert( I first, I last ) { - for ( ; first != last; ++first ) - insert( *first ); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Insert initializer list - void insert( std::initializer_list il ) { - insert( il.begin(), il.end() ); - } -#endif //__TBB_INITIALIZER_LISTS_PRESENT - - //! Erase item. - /** Return true if item was erased by particularly this call. */ - bool erase( const Key& key ); - - //! Erase item by const_accessor. - /** Return true if item was erased by particularly this call. */ - bool erase( const_accessor& item_accessor ) { - return exclude( item_accessor ); - } - - //! Erase item by accessor. - /** Return true if item was erased by particularly this call. */ - bool erase( accessor& item_accessor ) { - return exclude( item_accessor ); - } - -protected: - //! Insert or find item and optionally acquire a lock on the item. - bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key &, const T * ) ) ; - - //! delete item by accessor - bool exclude( const_accessor &item_accessor ); - - //! Returns an iterator for an item defined by the key, or for the next item after it (if upper==true) - template - std::pair internal_equal_range( const Key& key, I end ) const; - - //! Copy "source" to *this, where *this must start out empty. - void internal_copy( const concurrent_hash_map& source ); - - template - void internal_copy( I first, I last ); - - //! Fast find when no concurrent erasure is used. For internal use inside TBB only! - /** Return pointer to item with given key, or NULL if no such item exists. - Must not be called concurrently with erasure operations. */ - const_pointer internal_fast_find( const Key& key ) const { - hashcode_t h = my_hash_compare.hash( key ); - hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask ); - node *n; - restart: - __TBB_ASSERT((m&(m+1))==0, "data structure is invalid"); - bucket *b = get_bucket( h & m ); - // TODO: actually, notification is unnecessary here, just hiding double-check - if( itt_load_word_with_acquire(b->node_list) == internal::rehash_req ) - { - bucket::scoped_t lock; - if( lock.try_acquire( b->mutex, /*write=*/true ) ) { - if( b->node_list == internal::rehash_req) - const_cast(this)->rehash_bucket( b, h & m ); //recursive rehashing - } - else lock.acquire( b->mutex, /*write=*/false ); - __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL); - } - n = search_bucket( key, b ); - if( n ) - return &n->item; - else if( check_mask_race( h, m ) ) - goto restart; - return 0; - } -}; - -template -bool concurrent_hash_map::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key&, const T*) ) { - __TBB_ASSERT( !result || !result->my_node, NULL ); - bool return_value; - hashcode_t const h = my_hash_compare.hash( key ); - hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask ); - segment_index_t grow_segment = 0; - node *n, *tmp_n = 0; - restart: - {//lock scope - __TBB_ASSERT((m&(m+1))==0, "data structure is invalid"); - return_value = false; - // get bucket - bucket_accessor b( this, h & m ); - - // find a node - n = search_bucket( key, b() ); - if( op_insert ) { - // [opt] insert a key - if( !n ) { - if( !tmp_n ) { - tmp_n = allocate_node(my_allocator, key, t); - } - if( !b.is_writer() && !b.upgrade_to_writer() ) { // TODO: improved insertion - // Rerun search_list, in case another thread inserted the item during the upgrade. - n = search_bucket( key, b() ); - if( is_valid(n) ) { // unfortunately, it did - b.downgrade_to_reader(); - goto exists; - } - } - if( check_mask_race(h, m) ) - goto restart; // b.release() is done in ~b(). - // insert and set flag to grow the container - grow_segment = insert_new_node( b(), n = tmp_n, m ); - tmp_n = 0; - return_value = true; - } - } else { // find or count - if( !n ) { - if( check_mask_race( h, m ) ) - goto restart; // b.release() is done in ~b(). TODO: replace by continue - return false; - } - return_value = true; - } - exists: - if( !result ) goto check_growth; - // TODO: the following seems as generic/regular operation - // acquire the item - if( !result->try_acquire( n->mutex, write ) ) { - for( tbb::internal::atomic_backoff backoff(true);; ) { - if( result->try_acquire( n->mutex, write ) ) break; - if( !backoff.bounded_pause() ) { - // the wait takes really long, restart the operation - b.release(); - __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" ); - __TBB_Yield(); - m = (hashcode_t) itt_load_word_with_acquire( my_mask ); - goto restart; - } - } - } - }//lock scope - result->my_node = n; - result->my_hash = h; -check_growth: - // [opt] grow the container - if( grow_segment ) { -#if __TBB_STATISTICS - my_info_resizes++; // concurrent ones -#endif - enable_segment( grow_segment ); - } - if( tmp_n ) // if op_insert only - delete_node( tmp_n ); - return return_value; -} - -template -template -std::pair concurrent_hash_map::internal_equal_range( const Key& key, I end_ ) const { - hashcode_t h = my_hash_compare.hash( key ); - hashcode_t m = my_mask; - __TBB_ASSERT((m&(m+1))==0, "data structure is invalid"); - h &= m; - bucket *b = get_bucket( h ); - while( b->node_list == internal::rehash_req ) { - m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit - b = get_bucket( h &= m ); - } - node *n = search_bucket( key, b ); - if( !n ) - return std::make_pair(end_, end_); - iterator lower(*this, h, b, n), upper(lower); - return std::make_pair(lower, ++upper); -} - -template -bool concurrent_hash_map::exclude( const_accessor &item_accessor ) { - __TBB_ASSERT( item_accessor.my_node, NULL ); - node_base *const n = item_accessor.my_node; - hashcode_t const h = item_accessor.my_hash; - hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask ); - do { - // get bucket - bucket_accessor b( this, h & m, /*writer=*/true ); - node_base **p = &b()->node_list; - while( *p && *p != n ) - p = &(*p)->next; - if( !*p ) { // someone else was first - if( check_mask_race( h, m ) ) - continue; - item_accessor.release(); - return false; - } - __TBB_ASSERT( *p == n, NULL ); - *p = n->next; // remove from container - my_size--; - break; - } while(true); - if( !item_accessor.is_writer() ) // need to get exclusive lock - item_accessor.upgrade_to_writer(); // return value means nothing here - item_accessor.release(); - delete_node( n ); // Only one thread can delete it - return true; -} - -template -bool concurrent_hash_map::erase( const Key &key ) { - node_base *n; - hashcode_t const h = my_hash_compare.hash( key ); - hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask ); -restart: - {//lock scope - // get bucket - bucket_accessor b( this, h & m ); - search: - node_base **p = &b()->node_list; - n = *p; - while( is_valid(n) && !my_hash_compare.equal(key, static_cast(n)->item.first ) ) { - p = &n->next; - n = *p; - } - if( !n ) { // not found, but mask could be changed - if( check_mask_race( h, m ) ) - goto restart; - return false; - } - else if( !b.is_writer() && !b.upgrade_to_writer() ) { - if( check_mask_race( h, m ) ) // contended upgrade, check mask - goto restart; - goto search; - } - *p = n->next; - my_size--; - } - { - typename node::scoped_t item_locker( n->mutex, /*write=*/true ); - } - // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor! - delete_node( n ); // Only one thread can delete it due to write lock on the bucket - return true; -} - -template -void concurrent_hash_map::swap(concurrent_hash_map &table) { - //TODO: respect C++11 allocator_traits::propogate_on_constainer_swap - using std::swap; - swap(this->my_allocator, table.my_allocator); - swap(this->my_hash_compare, table.my_hash_compare); - internal_swap(table); -} - -template -void concurrent_hash_map::rehash(size_type sz) { - reserve( sz ); // TODO: add reduction of number of buckets as well - hashcode_t mask = my_mask; - hashcode_t b = (mask+1)>>1; // size or first index of the last segment - __TBB_ASSERT((b&(b-1))==0, NULL); // zero or power of 2 - bucket *bp = get_bucket( b ); // only the last segment should be scanned for rehashing - for(; b <= mask; b++, bp++ ) { - node_base *n = bp->node_list; - __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" ); - __TBB_ASSERT( *reinterpret_cast(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" ); - if( n == internal::rehash_req ) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one - hashcode_t h = b; bucket *b_old = bp; - do { - __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" ); - hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit - b_old = get_bucket( h &= m ); - } while( b_old->node_list == internal::rehash_req ); - // now h - is index of the root rehashed bucket b_old - mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments - for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) { - hashcode_t c = my_hash_compare.hash( static_cast(q)->item.first ); - if( (c & mask) != h ) { // should be rehashed - *p = q->next; // exclude from b_old - bucket *b_new = get_bucket( c & mask ); - __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" ); - add_to_bucket( b_new, q ); - } else p = &q->next; // iterate to next item - } - } - } -#if TBB_USE_PERFORMANCE_WARNINGS - int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics - static bool reported = false; -#endif -#if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS - for( b = 0; b <= mask; b++ ) {// only last segment should be scanned for rehashing - if( b & (b-2) ) ++bp; // not the beginning of a segment - else bp = get_bucket( b ); - node_base *n = bp->node_list; - __TBB_ASSERT( *reinterpret_cast(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" ); - __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" ); -#if TBB_USE_PERFORMANCE_WARNINGS - if( n == internal::empty_rehashed ) empty_buckets++; - else if( n->next ) overpopulated_buckets++; -#endif -#if TBB_USE_ASSERT - for( ; is_valid(n); n = n->next ) { - hashcode_t h = my_hash_compare.hash( static_cast(n)->item.first ) & mask; - __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" ); - } -#endif - } -#endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS -#if TBB_USE_PERFORMANCE_WARNINGS - if( buckets > current_size) empty_buckets -= buckets - current_size; - else overpopulated_buckets -= current_size - buckets; // TODO: load_factor? - if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) { - tbb::internal::runtime_warning( - "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d", - typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets ); - reported = true; - } -#endif -} - -template -void concurrent_hash_map::clear() { - hashcode_t m = my_mask; - __TBB_ASSERT((m&(m+1))==0, "data structure is invalid"); -#if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS -#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS - int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics - static bool reported = false; -#endif - bucket *bp = 0; - // check consistency - for( segment_index_t b = 0; b <= m; b++ ) { - if( b & (b-2) ) ++bp; // not the beginning of a segment - else bp = get_bucket( b ); - node_base *n = bp->node_list; - __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" ); - __TBB_ASSERT( *reinterpret_cast(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" ); -#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS - if( n == internal::empty_rehashed ) empty_buckets++; - else if( n == internal::rehash_req ) buckets--; - else if( n->next ) overpopulated_buckets++; -#endif -#if __TBB_EXTRA_DEBUG - for(; is_valid(n); n = n->next ) { - hashcode_t h = my_hash_compare.hash( static_cast(n)->item.first ); - h &= m; - __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" ); - } -#endif - } -#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS -#if __TBB_STATISTICS - printf( "items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d" - " concurrent: resizes=%u rehashes=%u restarts=%u\n", - current_size, int(m+1), buckets, empty_buckets, overpopulated_buckets, - unsigned(my_info_resizes), unsigned(my_info_rehashes), unsigned(my_info_restarts) ); - my_info_resizes = 0; // concurrent ones - my_info_restarts = 0; // race collisions - my_info_rehashes = 0; // invocations of rehash_bucket -#endif - if( buckets > current_size) empty_buckets -= buckets - current_size; - else overpopulated_buckets -= current_size - buckets; // TODO: load_factor? - if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) { - tbb::internal::runtime_warning( - "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d", - typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets ); - reported = true; - } -#endif -#endif//TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS - my_size = 0; - segment_index_t s = segment_index_of( m ); - __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" ); - cache_aligned_allocator alloc; - do { - __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" ); - segment_ptr_t buckets_ptr = my_table[s]; - size_type sz = segment_size( s ? s : 1 ); - for( segment_index_t i = 0; i < sz; i++ ) - for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) { - buckets_ptr[i].node_list = n->next; - delete_node( n ); - } - if( s >= first_block) // the first segment or the next - alloc.deallocate( buckets_ptr, sz ); - else if( s == embedded_block && embedded_block != first_block ) - alloc.deallocate( buckets_ptr, segment_size(first_block)-embedded_buckets ); - if( s >= embedded_block ) my_table[s] = 0; - } while(s-- > 0); - my_mask = embedded_buckets - 1; -} - -template -void concurrent_hash_map::internal_copy( const concurrent_hash_map& source ) { - reserve( source.my_size ); // TODO: load_factor? - hashcode_t mask = source.my_mask; - if( my_mask == mask ) { // optimized version - bucket *dst = 0, *src = 0; - bool rehash_required = false; - for( hashcode_t k = 0; k <= mask; k++ ) { - if( k & (k-2) ) ++dst,src++; // not the beginning of a segment - else { dst = get_bucket( k ); src = source.get_bucket( k ); } - __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table"); - node *n = static_cast( src->node_list ); - if( n == internal::rehash_req ) { // source is not rehashed, items are in previous buckets - rehash_required = true; - dst->node_list = internal::rehash_req; - } else for(; n; n = static_cast( n->next ) ) { - add_to_bucket( dst, new( my_allocator ) node(n->item.first, n->item.second) ); - ++my_size; // TODO: replace by non-atomic op - } - } - if( rehash_required ) rehash(); - } else internal_copy( source.begin(), source.end() ); -} - -template -template -void concurrent_hash_map::internal_copy(I first, I last) { - hashcode_t m = my_mask; - for(; first != last; ++first) { - hashcode_t h = my_hash_compare.hash( (*first).first ); - bucket *b = get_bucket( h & m ); - __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table"); - node *n = new( my_allocator ) node(*first); - add_to_bucket( b, n ); - ++my_size; // TODO: replace by non-atomic op - } -} - -} // namespace interface5 - -using interface5::concurrent_hash_map; - - -template -inline bool operator==(const concurrent_hash_map &a, const concurrent_hash_map &b) { - if(a.size() != b.size()) return false; - typename concurrent_hash_map::const_iterator i(a.begin()), i_end(a.end()); - typename concurrent_hash_map::const_iterator j, j_end(b.end()); - for(; i != i_end; ++i) { - j = b.equal_range(i->first).first; - if( j == j_end || !(i->second == j->second) ) return false; - } - return true; -} - -template -inline bool operator!=(const concurrent_hash_map &a, const concurrent_hash_map &b) -{ return !(a == b); } - -template -inline void swap(concurrent_hash_map &a, concurrent_hash_map &b) -{ a.swap( b ); } - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning( pop ) -#endif // warning 4127 is back - -} // namespace tbb - -#endif /* __TBB_concurrent_hash_map_H */ diff --git a/inst/include/tbb/concurrent_lru_cache.h b/inst/include/tbb/concurrent_lru_cache.h deleted file mode 100644 index dbf0f1f82..000000000 --- a/inst/include/tbb/concurrent_lru_cache.h +++ /dev/null @@ -1,235 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_lru_cache_H -#define __TBB_concurrent_lru_cache_H - -#if ! TBB_PREVIEW_CONCURRENT_LRU_CACHE - #error Set TBB_PREVIEW_CONCURRENT_LRU_CACHE to include concurrent_lru_cache.h -#endif - -#include -#include - -#include "tbb_stddef.h" -#include "atomic.h" -#include "internal/_aggregator_impl.h" - -namespace tbb{ -namespace interface6 { - - -template -class concurrent_lru_cache : internal::no_assign{ -private: - typedef concurrent_lru_cache self_type; - typedef value_functor_type value_function_type; - typedef std::size_t ref_counter_type; - struct map_value_type; - typedef std::map map_storage_type; - typedef std::list lru_list_type; - struct map_value_type { - value_type my_value; - ref_counter_type my_ref_counter; - typename lru_list_type::iterator my_lru_list_iterator; - bool my_is_ready; - - map_value_type (value_type const& a_value, ref_counter_type a_ref_counter, typename lru_list_type::iterator a_lru_list_iterator, bool a_is_ready) - : my_value(a_value), my_ref_counter(a_ref_counter), my_lru_list_iterator (a_lru_list_iterator), my_is_ready(a_is_ready) - {} - }; - - class handle_object; - - struct aggregator_operation; - typedef aggregator_operation aggregated_operation_type; - typedef tbb::internal::aggregating_functor aggregator_function_type; - friend class tbb::internal::aggregating_functor; - typedef tbb::internal::aggregator aggregator_type; - -private: - value_function_type my_value_function; - std::size_t const my_number_of_lru_history_items; - map_storage_type my_map_storage; - lru_list_type my_lru_list; - aggregator_type my_aggregator; - -public: - typedef handle_object handle; - -public: - concurrent_lru_cache(value_function_type f, std::size_t number_of_lru_history_items) - : my_value_function(f),my_number_of_lru_history_items(number_of_lru_history_items) - { - my_aggregator.initialize_handler(aggregator_function_type(this)); - } - - handle_object operator[](key_type k){ - retrieve_aggregator_operation op(k); - my_aggregator.execute(&op); - if (op.is_new_value_needed()){ - op.result().second.my_value = my_value_function(k); - __TBB_store_with_release(op.result().second.my_is_ready, true); - }else{ - tbb::internal::spin_wait_while_eq(op.result().second.my_is_ready,false); - } - return handle_object(*this,op.result()); - } -private: - void signal_end_of_usage(typename map_storage_type::reference value_ref){ - signal_end_of_usage_aggregator_operation op(value_ref); - my_aggregator.execute(&op); - } - -private: - struct handle_move_t:no_assign{ - concurrent_lru_cache & my_cache_ref; - typename map_storage_type::reference my_map_record_ref; - handle_move_t(concurrent_lru_cache & cache_ref, typename map_storage_type::reference value_ref):my_cache_ref(cache_ref),my_map_record_ref(value_ref) {}; - }; - class handle_object { - concurrent_lru_cache * my_cache_pointer; - typename map_storage_type::reference my_map_record_ref; - public: - handle_object(concurrent_lru_cache & cache_ref, typename map_storage_type::reference value_ref):my_cache_pointer(&cache_ref), my_map_record_ref(value_ref) {} - handle_object(handle_move_t m):my_cache_pointer(&m.my_cache_ref), my_map_record_ref(m.my_map_record_ref){} - operator handle_move_t(){ return move(*this);} - value_type& value(){ - __TBB_ASSERT(my_cache_pointer,"get value from moved from object?"); - return my_map_record_ref.second.my_value; - } - ~handle_object(){ - if (my_cache_pointer){ - my_cache_pointer->signal_end_of_usage(my_map_record_ref); - } - } - private: - friend handle_move_t move(handle_object& h){ - return handle_object::move(h); - } - static handle_move_t move(handle_object& h){ - __TBB_ASSERT(h.my_cache_pointer,"move from the same object twice ?"); - concurrent_lru_cache * cache_pointer = NULL; - std::swap(cache_pointer,h.my_cache_pointer); - return handle_move_t(*cache_pointer,h.my_map_record_ref); - } - private: - void operator=(handle_object&); -#if __SUNPRO_CC - // Presumably due to a compiler error, private copy constructor - // breaks expressions like handle h = cache[key]; - public: -#endif - handle_object(handle_object &); - }; -private: - //TODO: looks like aggregator_operation is a perfect match for statically typed variant type - struct aggregator_operation : tbb::internal::aggregated_operation{ - enum e_op_type {op_retive, op_signal_end_of_usage}; - //TODO: try to use pointer to function apply_visitor here - //TODO: try virtual functions and measure the difference - e_op_type my_operation_type; - aggregator_operation(e_op_type operation_type): my_operation_type(operation_type) {} - void cast_and_handle(self_type& container ){ - if (my_operation_type==op_retive){ - static_cast(this)->handle(container); - }else{ - static_cast(this)->handle(container); - } - } - }; - struct retrieve_aggregator_operation : aggregator_operation, private internal::no_assign { - key_type my_key; - typename map_storage_type::pointer my_result_map_record_pointer; - bool my_is_new_value_needed; - retrieve_aggregator_operation(key_type key):aggregator_operation(aggregator_operation::op_retive),my_key(key),my_is_new_value_needed(false){} - void handle(self_type& container ){ - my_result_map_record_pointer = & container.retrieve_serial(my_key,my_is_new_value_needed); - } - typename map_storage_type::reference result(){ return * my_result_map_record_pointer; } - bool is_new_value_needed(){return my_is_new_value_needed;} - }; - struct signal_end_of_usage_aggregator_operation : aggregator_operation, private internal::no_assign { - typename map_storage_type::reference my_map_record_ref; - signal_end_of_usage_aggregator_operation(typename map_storage_type::reference map_record_ref):aggregator_operation(aggregator_operation::op_signal_end_of_usage),my_map_record_ref(map_record_ref){} - void handle(self_type& container ){ - container.signal_end_of_usage_serial(my_map_record_ref); - } - }; - -private: - void handle_operations(aggregator_operation* op_list){ - while(op_list){ - op_list->cast_and_handle(*this); - aggregator_operation* tmp = op_list; - op_list=op_list->next; - tbb::internal::itt_store_word_with_release(tmp->status, uintptr_t(1)); - } - } - -private: - typename map_storage_type::reference retrieve_serial(key_type k, bool& is_new_value_needed){ - typename map_storage_type::iterator it = my_map_storage.find(k); - if (it == my_map_storage.end()){ - it = my_map_storage.insert(it,std::make_pair(k,map_value_type(value_type(),0,my_lru_list.end(),false))); - is_new_value_needed = true; - }else { - typename lru_list_type::iterator list_it = it->second.my_lru_list_iterator; - if (list_it!=my_lru_list.end()) { - __TBB_ASSERT(!it->second.my_ref_counter,"item to be evicted should not have a live references"); - //item is going to be used. Therefore it is not a subject for eviction - //so - remove it from LRU history. - my_lru_list.erase(list_it); - it->second.my_lru_list_iterator= my_lru_list.end(); - } - } - ++(it->second.my_ref_counter); - return *it; - } - - void signal_end_of_usage_serial(typename map_storage_type::reference map_record_ref){ - typename map_storage_type::iterator it = my_map_storage.find(map_record_ref.first); - __TBB_ASSERT(it!=my_map_storage.end(),"cache should not return past-end iterators to outer world"); - __TBB_ASSERT(&(*it) == &map_record_ref,"dangling reference has been returned to outside world? data race ?"); - __TBB_ASSERT( my_lru_list.end()== std::find(my_lru_list.begin(),my_lru_list.end(),it), - "object in use should not be in list of unused objects "); - if (! --(it->second.my_ref_counter)){ - //it was the last reference so put it to the LRU history - if (my_lru_list.size()>=my_number_of_lru_history_items){ - //evict items in order to get a space - size_t number_of_elements_to_evict = 1 + my_lru_list.size() - my_number_of_lru_history_items; - for (size_t i=0; isecond.my_ref_counter,"item to be evicted should not have a live references"); - my_lru_list.pop_back(); - my_map_storage.erase(it_to_evict); - } - } - my_lru_list.push_front(it); - it->second.my_lru_list_iterator = my_lru_list.begin(); - } - } -}; -} // namespace interface6 - -using interface6::concurrent_lru_cache; - -} // namespace tbb -#endif //__TBB_concurrent_lru_cache_H diff --git a/inst/include/tbb/concurrent_priority_queue.h b/inst/include/tbb/concurrent_priority_queue.h deleted file mode 100644 index 245034fb2..000000000 --- a/inst/include/tbb/concurrent_priority_queue.h +++ /dev/null @@ -1,457 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_priority_queue_H -#define __TBB_concurrent_priority_queue_H - -#include "atomic.h" -#include "cache_aligned_allocator.h" -#include "tbb_exception.h" -#include "tbb_stddef.h" -#include "tbb_profiling.h" -#include "internal/_aggregator_impl.h" -#include -#include -#include - -#if __TBB_INITIALIZER_LISTS_PRESENT - #include -#endif - -namespace tbb { -namespace interface5 { - -using namespace tbb::internal; - -//! Concurrent priority queue -template , typename A=cache_aligned_allocator > -class concurrent_priority_queue { - public: - //! Element type in the queue. - typedef T value_type; - - //! Reference type - typedef T& reference; - - //! Const reference type - typedef const T& const_reference; - - //! Integral type for representing size of the queue. - typedef size_t size_type; - - //! Difference type for iterator - typedef ptrdiff_t difference_type; - - //! Allocator type - typedef A allocator_type; - - //! Constructs a new concurrent_priority_queue with default capacity - explicit concurrent_priority_queue(const allocator_type& a = allocator_type()) : mark(0), my_size(0), data(a) - { - my_aggregator.initialize_handler(my_functor_t(this)); - } - - //! Constructs a new concurrent_priority_queue with init_sz capacity - explicit concurrent_priority_queue(size_type init_capacity, const allocator_type& a = allocator_type()) : - mark(0), my_size(0), data(a) - { - data.reserve(init_capacity); - my_aggregator.initialize_handler(my_functor_t(this)); - } - - //! [begin,end) constructor - template - concurrent_priority_queue(InputIterator begin, InputIterator end, const allocator_type& a = allocator_type()) : - mark(0), data(begin, end, a) - { - my_aggregator.initialize_handler(my_functor_t(this)); - heapify(); - my_size = data.size(); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Constructor from std::initializer_list - concurrent_priority_queue(std::initializer_list init_list, const allocator_type &a = allocator_type()) : - mark(0),data(init_list.begin(), init_list.end(), a) - { - my_aggregator.initialize_handler(my_functor_t(this)); - heapify(); - my_size = data.size(); - } -#endif //# __TBB_INITIALIZER_LISTS_PRESENT - - //! Copy constructor - /** This operation is unsafe if there are pending concurrent operations on the src queue. */ - explicit concurrent_priority_queue(const concurrent_priority_queue& src) : mark(src.mark), - my_size(src.my_size), data(src.data.begin(), src.data.end(), src.data.get_allocator()) - { - my_aggregator.initialize_handler(my_functor_t(this)); - heapify(); - } - - //! Copy constructor with specific allocator - /** This operation is unsafe if there are pending concurrent operations on the src queue. */ - concurrent_priority_queue(const concurrent_priority_queue& src, const allocator_type& a) : mark(src.mark), - my_size(src.my_size), data(src.data.begin(), src.data.end(), a) - { - my_aggregator.initialize_handler(my_functor_t(this)); - heapify(); - } - - //! Assignment operator - /** This operation is unsafe if there are pending concurrent operations on the src queue. */ - concurrent_priority_queue& operator=(const concurrent_priority_queue& src) { - if (this != &src) { - vector_t(src.data.begin(), src.data.end(), src.data.get_allocator()).swap(data); - mark = src.mark; - my_size = src.my_size; - } - return *this; - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Move constructor - /** This operation is unsafe if there are pending concurrent operations on the src queue. */ - concurrent_priority_queue(concurrent_priority_queue&& src) : mark(src.mark), - my_size(src.my_size), data(std::move(src.data)) - { - my_aggregator.initialize_handler(my_functor_t(this)); - } - - //! Move constructor with specific allocator - /** This operation is unsafe if there are pending concurrent operations on the src queue. */ - concurrent_priority_queue(concurrent_priority_queue&& src, const allocator_type& a) : mark(src.mark), - my_size(src.my_size), -#if __TBB_ALLOCATOR_TRAITS_PRESENT - data(std::move(src.data), a) -#else - // Some early version of C++11 STL vector does not have a constructor of vector(vector&& , allocator). - // It seems that the reason is absence of support of allocator_traits (stateful allocators). - data(a) -#endif //__TBB_ALLOCATOR_TRAITS_PRESENT - { - my_aggregator.initialize_handler(my_functor_t(this)); -#if !__TBB_ALLOCATOR_TRAITS_PRESENT - if (a != src.data.get_allocator()){ - data.reserve(src.data.size()); - data.assign(std::make_move_iterator(src.data.begin()), std::make_move_iterator(src.data.end())); - }else{ - data = std::move(src.data); - } -#endif //!__TBB_ALLOCATOR_TRAITS_PRESENT - } - - //! Move assignment operator - /** This operation is unsafe if there are pending concurrent operations on the src queue. */ - concurrent_priority_queue& operator=( concurrent_priority_queue&& src) { - if (this != &src) { - mark = src.mark; - my_size = src.my_size; -#if !__TBB_ALLOCATOR_TRAITS_PRESENT - if (data.get_allocator() != src.data.get_allocator()){ - vector_t(std::make_move_iterator(src.data.begin()), std::make_move_iterator(src.data.end()), data.get_allocator()).swap(data); - }else -#endif //!__TBB_ALLOCATOR_TRAITS_PRESENT - { - data = std::move(src.data); - } - } - return *this; - } -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - - //! Assign the queue from [begin,end) range, not thread-safe - template - void assign(InputIterator begin, InputIterator end) { - vector_t(begin, end, data.get_allocator()).swap(data); - mark = 0; - my_size = data.size(); - heapify(); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Assign the queue from std::initializer_list, not thread-safe - void assign(std::initializer_list il) { this->assign(il.begin(), il.end()); } - - //! Assign from std::initializer_list, not thread-safe - concurrent_priority_queue& operator=(std::initializer_list il) { - this->assign(il.begin(), il.end()); - return *this; - } -#endif //# __TBB_INITIALIZER_LISTS_PRESENT - - //! Returns true if empty, false otherwise - /** Returned value may not reflect results of pending operations. - This operation reads shared data and will trigger a race condition. */ - bool empty() const { return size()==0; } - - //! Returns the current number of elements contained in the queue - /** Returned value may not reflect results of pending operations. - This operation reads shared data and will trigger a race condition. */ - size_type size() const { return __TBB_load_with_acquire(my_size); } - - //! Pushes elem onto the queue, increasing capacity of queue if necessary - /** This operation can be safely used concurrently with other push, try_pop or emplace operations. */ - void push(const_reference elem) { - cpq_operation op_data(elem, PUSH_OP); - my_aggregator.execute(&op_data); - if (op_data.status == FAILED) // exception thrown - throw_exception(eid_bad_alloc); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Pushes elem onto the queue, increasing capacity of queue if necessary - /** This operation can be safely used concurrently with other push, try_pop or emplace operations. */ - void push(value_type &&elem) { - cpq_operation op_data(elem, PUSH_RVALUE_OP); - my_aggregator.execute(&op_data); - if (op_data.status == FAILED) // exception thrown - throw_exception(eid_bad_alloc); - } - -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT - //! Constructs a new element using args as the arguments for its construction and pushes it onto the queue */ - /** This operation can be safely used concurrently with other push, try_pop or emplace operations. */ - template - void emplace(Args&&... args) { - push(value_type(std::forward(args)...)); - } -#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */ -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - - //! Gets a reference to and removes highest priority element - /** If a highest priority element was found, sets elem and returns true, - otherwise returns false. - This operation can be safely used concurrently with other push, try_pop or emplace operations. */ - bool try_pop(reference elem) { - cpq_operation op_data(POP_OP); - op_data.elem = &elem; - my_aggregator.execute(&op_data); - return op_data.status==SUCCEEDED; - } - - //! Clear the queue; not thread-safe - /** This operation is unsafe if there are pending concurrent operations on the queue. - Resets size, effectively emptying queue; does not free space. - May not clear elements added in pending operations. */ - void clear() { - data.clear(); - mark = 0; - my_size = 0; - } - - //! Swap this queue with another; not thread-safe - /** This operation is unsafe if there are pending concurrent operations on the queue. */ - void swap(concurrent_priority_queue& q) { - using std::swap; - data.swap(q.data); - swap(mark, q.mark); - swap(my_size, q.my_size); - } - - //! Return allocator object - allocator_type get_allocator() const { return data.get_allocator(); } - - private: - enum operation_type {INVALID_OP, PUSH_OP, POP_OP, PUSH_RVALUE_OP}; - enum operation_status { WAIT=0, SUCCEEDED, FAILED }; - - class cpq_operation : public aggregated_operation { - public: - operation_type type; - union { - value_type *elem; - size_type sz; - }; - cpq_operation(const_reference e, operation_type t) : - type(t), elem(const_cast(&e)) {} - cpq_operation(operation_type t) : type(t) {} - }; - - class my_functor_t { - concurrent_priority_queue *cpq; - public: - my_functor_t() {} - my_functor_t(concurrent_priority_queue *cpq_) : cpq(cpq_) {} - void operator()(cpq_operation* op_list) { - cpq->handle_operations(op_list); - } - }; - - typedef tbb::internal::aggregator< my_functor_t, cpq_operation > aggregator_t; - aggregator_t my_aggregator; - //! Padding added to avoid false sharing - char padding1[NFS_MaxLineSize - sizeof(aggregator_t)]; - //! The point at which unsorted elements begin - size_type mark; - __TBB_atomic size_type my_size; - Compare compare; - //! Padding added to avoid false sharing - char padding2[NFS_MaxLineSize - (2*sizeof(size_type)) - sizeof(Compare)]; - //! Storage for the heap of elements in queue, plus unheapified elements - /** data has the following structure: - - binary unheapified - heap elements - ____|_______|____ - | | | - v v v - [_|...|_|_|...|_| |...| ] - 0 ^ ^ ^ - | | |__capacity - | |__my_size - |__mark - - Thus, data stores the binary heap starting at position 0 through - mark-1 (it may be empty). Then there are 0 or more elements - that have not yet been inserted into the heap, in positions - mark through my_size-1. */ - typedef std::vector vector_t; - vector_t data; - - void handle_operations(cpq_operation *op_list) { - cpq_operation *tmp, *pop_list=NULL; - - __TBB_ASSERT(mark == data.size(), NULL); - - // First pass processes all constant (amortized; reallocation may happen) time pushes and pops. - while (op_list) { - // ITT note: &(op_list->status) tag is used to cover accesses to op_list - // node. This thread is going to handle the operation, and so will acquire it - // and perform the associated operation w/o triggering a race condition; the - // thread that created the operation is waiting on the status field, so when - // this thread is done with the operation, it will perform a - // store_with_release to give control back to the waiting thread in - // aggregator::insert_operation. - call_itt_notify(acquired, &(op_list->status)); - __TBB_ASSERT(op_list->type != INVALID_OP, NULL); - tmp = op_list; - op_list = itt_hide_load_word(op_list->next); - if (tmp->type == POP_OP) { - if (mark < data.size() && - compare(data[0], data[data.size()-1])) { - // there are newly pushed elems and the last one - // is higher than top - *(tmp->elem) = move(data[data.size()-1]); - __TBB_store_with_release(my_size, my_size-1); - itt_store_word_with_release(tmp->status, uintptr_t(SUCCEEDED)); - data.pop_back(); - __TBB_ASSERT(mark<=data.size(), NULL); - } - else { // no convenient item to pop; postpone - itt_hide_store_word(tmp->next, pop_list); - pop_list = tmp; - } - } else { // PUSH_OP or PUSH_RVALUE_OP - __TBB_ASSERT(tmp->type == PUSH_OP || tmp->type == PUSH_RVALUE_OP, "Unknown operation" ); - __TBB_TRY{ - if (tmp->type == PUSH_OP) { - data.push_back(*(tmp->elem)); - } else { - data.push_back(move(*(tmp->elem))); - } - __TBB_store_with_release(my_size, my_size + 1); - itt_store_word_with_release(tmp->status, uintptr_t(SUCCEEDED)); - } __TBB_CATCH(...) { - itt_store_word_with_release(tmp->status, uintptr_t(FAILED)); - } - } - } - - // second pass processes pop operations - while (pop_list) { - tmp = pop_list; - pop_list = itt_hide_load_word(pop_list->next); - __TBB_ASSERT(tmp->type == POP_OP, NULL); - if (data.empty()) { - itt_store_word_with_release(tmp->status, uintptr_t(FAILED)); - } - else { - __TBB_ASSERT(mark<=data.size(), NULL); - if (mark < data.size() && - compare(data[0], data[data.size()-1])) { - // there are newly pushed elems and the last one is - // higher than top - *(tmp->elem) = move(data[data.size()-1]); - __TBB_store_with_release(my_size, my_size-1); - itt_store_word_with_release(tmp->status, uintptr_t(SUCCEEDED)); - data.pop_back(); - } - else { // extract top and push last element down heap - *(tmp->elem) = move(data[0]); - __TBB_store_with_release(my_size, my_size-1); - itt_store_word_with_release(tmp->status, uintptr_t(SUCCEEDED)); - reheap(); - } - } - } - - // heapify any leftover pushed elements before doing the next - // batch of operations - if (mark0) mark = 1; - for (; mark>1; - if (!compare(data[parent], to_place)) break; - data[cur_pos] = move(data[parent]); - cur_pos = parent; - } while( cur_pos ); - data[cur_pos] = move(to_place); - } - } - - //! Re-heapify after an extraction - /** Re-heapify by pushing last element down the heap from the root. */ - void reheap() { - size_type cur_pos=0, child=1; - - while (child < mark) { - size_type target = child; - if (child+1 < mark && compare(data[child], data[child+1])) - ++target; - // target now has the higher priority child - if (compare(data[target], data[data.size()-1])) break; - data[cur_pos] = move(data[target]); - cur_pos = target; - child = (cur_pos<<1)+1; - } - if (cur_pos != data.size()-1) - data[cur_pos] = move(data[data.size()-1]); - data.pop_back(); - if (mark > data.size()) mark = data.size(); - } -}; - -} // namespace interface5 - -using interface5::concurrent_priority_queue; - -} // namespace tbb - -#endif /* __TBB_concurrent_priority_queue_H */ diff --git a/inst/include/tbb/concurrent_queue.h b/inst/include/tbb/concurrent_queue.h deleted file mode 100644 index 2cead237c..000000000 --- a/inst/include/tbb/concurrent_queue.h +++ /dev/null @@ -1,462 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_queue_H -#define __TBB_concurrent_queue_H - -#include "internal/_concurrent_queue_impl.h" - -namespace tbb { - -namespace strict_ppl { - -//! A high-performance thread-safe non-blocking concurrent queue. -/** Multiple threads may each push and pop concurrently. - Assignment construction is not allowed. - @ingroup containers */ -template > -class concurrent_queue: public internal::concurrent_queue_base_v3 { - template friend class internal::concurrent_queue_iterator; - - //! Allocator type - typedef typename A::template rebind::other page_allocator_type; - page_allocator_type my_allocator; - - //! Allocates a block of size n (bytes) - /*override*/ virtual void *allocate_block( size_t n ) { - void *b = reinterpret_cast(my_allocator.allocate( n )); - if( !b ) - internal::throw_exception(internal::eid_bad_alloc); - return b; - } - - //! Deallocates block created by allocate_block. - /*override*/ virtual void deallocate_block( void *b, size_t n ) { - my_allocator.deallocate( reinterpret_cast(b), n ); - } - - static void copy_construct_item(T* location, const void* src){ - new (location) T(*static_cast(src)); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - static void move_construct_item(T* location, const void* src) { - new (location) T( std::move(*static_cast(const_cast(src))) ); - } -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ -public: - //! Element type in the queue. - typedef T value_type; - - //! Reference type - typedef T& reference; - - //! Const reference type - typedef const T& const_reference; - - //! Integral type for representing size of the queue. - typedef size_t size_type; - - //! Difference type for iterator - typedef ptrdiff_t difference_type; - - //! Allocator type - typedef A allocator_type; - - //! Construct empty queue - explicit concurrent_queue(const allocator_type& a = allocator_type()) : - my_allocator( a ) - { - } - - //! [begin,end) constructor - template - concurrent_queue( InputIterator begin, InputIterator end, const allocator_type& a = allocator_type()) : - my_allocator( a ) - { - for( ; begin != end; ++begin ) - this->push(*begin); - } - - //! Copy constructor - concurrent_queue( const concurrent_queue& src, const allocator_type& a = allocator_type()) : - internal::concurrent_queue_base_v3(), my_allocator( a ) - { - this->assign( src, copy_construct_item ); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Move constructors - concurrent_queue( concurrent_queue&& src ) : - internal::concurrent_queue_base_v3(), my_allocator( std::move(src.my_allocator) ) - { - this->internal_swap( src ); - } - - concurrent_queue( concurrent_queue&& src, const allocator_type& a ) : - internal::concurrent_queue_base_v3(), my_allocator( a ) - { - // checking that memory allocated by one instance of allocator can be deallocated - // with another - if( my_allocator == src.my_allocator) { - this->internal_swap( src ); - } else { - // allocators are different => performing per-element move - this->assign( src, move_construct_item ); - src.clear(); - } - } -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - - //! Destroy queue - ~concurrent_queue(); - - //! Enqueue an item at tail of queue. - void push( const T& source ) { - this->internal_push( &source, copy_construct_item ); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - void push( T&& source ) { - this->internal_push( &source, move_construct_item ); - } - -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT - template - void emplace( Arguments&&... args ) { - push( T(std::forward( args )...) ); - } -#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - - //! Attempt to dequeue an item from head of queue. - /** Does not wait for item to become available. - Returns true if successful; false otherwise. */ - bool try_pop( T& result ) { - return this->internal_try_pop( &result ); - } - - //! Return the number of items in the queue; thread unsafe - size_type unsafe_size() const {return this->internal_size();} - - //! Equivalent to size()==0. - bool empty() const {return this->internal_empty();} - - //! Clear the queue. not thread-safe. - void clear() ; - - //! Return allocator object - allocator_type get_allocator() const { return this->my_allocator; } - - typedef internal::concurrent_queue_iterator iterator; - typedef internal::concurrent_queue_iterator const_iterator; - - //------------------------------------------------------------------------ - // The iterators are intended only for debugging. They are slow and not thread safe. - //------------------------------------------------------------------------ - iterator unsafe_begin() {return iterator(*this);} - iterator unsafe_end() {return iterator();} - const_iterator unsafe_begin() const {return const_iterator(*this);} - const_iterator unsafe_end() const {return const_iterator();} -} ; - -template -concurrent_queue::~concurrent_queue() { - clear(); - this->internal_finish_clear(); -} - -template -void concurrent_queue::clear() { - while( !empty() ) { - T value; - this->internal_try_pop(&value); - } -} - -} // namespace strict_ppl - -//! A high-performance thread-safe blocking concurrent bounded queue. -/** This is the pre-PPL TBB concurrent queue which supports boundedness and blocking semantics. - Note that method names agree with the PPL-style concurrent queue. - Multiple threads may each push and pop concurrently. - Assignment construction is not allowed. - @ingroup containers */ -template > -class concurrent_bounded_queue: public internal::concurrent_queue_base_v8 { - template friend class internal::concurrent_queue_iterator; - - //! Allocator type - typedef typename A::template rebind::other page_allocator_type; - page_allocator_type my_allocator; - - typedef typename concurrent_queue_base_v3::padded_page padded_page; - typedef typename concurrent_queue_base_v3::copy_specifics copy_specifics; - - //! Class used to ensure exception-safety of method "pop" - class destroyer: internal::no_copy { - T& my_value; - public: - destroyer( T& value ) : my_value(value) {} - ~destroyer() {my_value.~T();} - }; - - T& get_ref( page& p, size_t index ) { - __TBB_ASSERT( index(static_cast(&p))->last)[index]; - } - - /*override*/ virtual void copy_item( page& dst, size_t index, const void* src ) { - new( &get_ref(dst,index) ) T(*static_cast(src)); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - /*override*/ virtual void move_item( page& dst, size_t index, const void* src ) { - new( &get_ref(dst,index) ) T( std::move(*static_cast(const_cast(src))) ); - } -#else - /*override*/ virtual void move_item( page&, size_t, const void* ) { - __TBB_ASSERT( false, "Unreachable code" ); - } -#endif - - /*override*/ virtual void copy_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) { - new( &get_ref(dst,dindex) ) T( get_ref( const_cast(src), sindex ) ); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - /*override*/ virtual void move_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) { - new( &get_ref(dst,dindex) ) T( std::move(get_ref( const_cast(src), sindex )) ); - } -#else - /*override*/ virtual void move_page_item( page&, size_t, const page&, size_t ) { - __TBB_ASSERT( false, "Unreachable code" ); - } -#endif - - /*override*/ virtual void assign_and_destroy_item( void* dst, page& src, size_t index ) { - T& from = get_ref(src,index); - destroyer d(from); - *static_cast(dst) = tbb::internal::move( from ); - } - - /*override*/ virtual page *allocate_page() { - size_t n = sizeof(padded_page) + (items_per_page-1)*sizeof(T); - page *p = reinterpret_cast(my_allocator.allocate( n )); - if( !p ) - internal::throw_exception(internal::eid_bad_alloc); - return p; - } - - /*override*/ virtual void deallocate_page( page *p ) { - size_t n = sizeof(padded_page) + (items_per_page-1)*sizeof(T); - my_allocator.deallocate( reinterpret_cast(p), n ); - } - -public: - //! Element type in the queue. - typedef T value_type; - - //! Allocator type - typedef A allocator_type; - - //! Reference type - typedef T& reference; - - //! Const reference type - typedef const T& const_reference; - - //! Integral type for representing size of the queue. - /** Note that the size_type is a signed integral type. - This is because the size can be negative if there are pending pops without corresponding pushes. */ - typedef std::ptrdiff_t size_type; - - //! Difference type for iterator - typedef std::ptrdiff_t difference_type; - - //! Construct empty queue - explicit concurrent_bounded_queue(const allocator_type& a = allocator_type()) : - concurrent_queue_base_v8( sizeof(T) ), my_allocator( a ) - { - } - - //! Copy constructor - concurrent_bounded_queue( const concurrent_bounded_queue& src, const allocator_type& a = allocator_type()) - : concurrent_queue_base_v8( sizeof(T) ), my_allocator( a ) - { - assign( src ); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Move constructors - concurrent_bounded_queue( concurrent_bounded_queue&& src ) - : concurrent_queue_base_v8( sizeof(T) ), my_allocator( std::move(src.my_allocator) ) - { - internal_swap( src ); - } - - concurrent_bounded_queue( concurrent_bounded_queue&& src, const allocator_type& a ) - : concurrent_queue_base_v8( sizeof(T) ), my_allocator( a ) - { - // checking that memory allocated by one instance of allocator can be deallocated - // with another - if( my_allocator == src.my_allocator) { - this->internal_swap( src ); - } else { - // allocators are different => performing per-element move - this->move_content( src ); - src.clear(); - } - } -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - - //! [begin,end) constructor - template - concurrent_bounded_queue( InputIterator begin, InputIterator end, - const allocator_type& a = allocator_type()) - : concurrent_queue_base_v8( sizeof(T) ), my_allocator( a ) - { - for( ; begin != end; ++begin ) - internal_push_if_not_full(&*begin); - } - - //! Destroy queue - ~concurrent_bounded_queue(); - - //! Enqueue an item at tail of queue. - void push( const T& source ) { - internal_push( &source ); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Move an item at tail of queue. - void push( T&& source ) { - internal_push_move( &source ); - } - -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT - template - void emplace( Arguments&&... args ) { - push( T(std::forward( args )...) ); - } -#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */ -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - - //! Dequeue item from head of queue. - /** Block until an item becomes available, and then dequeue it. */ - void pop( T& destination ) { - internal_pop( &destination ); - } - -#if TBB_USE_EXCEPTIONS - //! Abort all pending queue operations - void abort() { - internal_abort(); - } -#endif - - //! Enqueue an item at tail of queue if queue is not already full. - /** Does not wait for queue to become not full. - Returns true if item is pushed; false if queue was already full. */ - bool try_push( const T& source ) { - return internal_push_if_not_full( &source ); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Move an item at tail of queue if queue is not already full. - /** Does not wait for queue to become not full. - Returns true if item is pushed; false if queue was already full. */ - bool try_push( T&& source ) { - return internal_push_move_if_not_full( &source ); - } -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT - template - bool try_emplace( Arguments&&... args ) { - return try_push( T(std::forward( args )...) ); - } -#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */ -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - - //! Attempt to dequeue an item from head of queue. - /** Does not wait for item to become available. - Returns true if successful; false otherwise. */ - bool try_pop( T& destination ) { - return internal_pop_if_present( &destination ); - } - - //! Return number of pushes minus number of pops. - /** Note that the result can be negative if there are pops waiting for the - corresponding pushes. The result can also exceed capacity() if there - are push operations in flight. */ - size_type size() const {return internal_size();} - - //! Equivalent to size()<=0. - bool empty() const {return internal_empty();} - - //! Maximum number of allowed elements - size_type capacity() const { - return my_capacity; - } - - //! Set the capacity - /** Setting the capacity to 0 causes subsequent try_push operations to always fail, - and subsequent push operations to block forever. */ - void set_capacity( size_type new_capacity ) { - internal_set_capacity( new_capacity, sizeof(T) ); - } - - //! return allocator object - allocator_type get_allocator() const { return this->my_allocator; } - - //! clear the queue. not thread-safe. - void clear() ; - - typedef internal::concurrent_queue_iterator iterator; - typedef internal::concurrent_queue_iterator const_iterator; - - //------------------------------------------------------------------------ - // The iterators are intended only for debugging. They are slow and not thread safe. - //------------------------------------------------------------------------ - iterator unsafe_begin() {return iterator(*this);} - iterator unsafe_end() {return iterator();} - const_iterator unsafe_begin() const {return const_iterator(*this);} - const_iterator unsafe_end() const {return const_iterator();} - -}; - -template -concurrent_bounded_queue::~concurrent_bounded_queue() { - clear(); - internal_finish_clear(); -} - -template -void concurrent_bounded_queue::clear() { - while( !empty() ) { - T value; - internal_pop_if_present(&value); - } -} - -using strict_ppl::concurrent_queue; - -} // namespace tbb - -#endif /* __TBB_concurrent_queue_H */ diff --git a/inst/include/tbb/concurrent_unordered_map.h b/inst/include/tbb/concurrent_unordered_map.h deleted file mode 100644 index b2f54174a..000000000 --- a/inst/include/tbb/concurrent_unordered_map.h +++ /dev/null @@ -1,326 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -/* Container implementations in this header are based on PPL implementations - provided by Microsoft. */ - -#ifndef __TBB_concurrent_unordered_map_H -#define __TBB_concurrent_unordered_map_H - -#include "internal/_concurrent_unordered_impl.h" - -namespace tbb -{ - -namespace interface5 { - -// Template class for hash map traits -template -class concurrent_unordered_map_traits -{ -protected: - typedef std::pair value_type; - typedef Key key_type; - typedef Hash_compare hash_compare; - typedef typename Allocator::template rebind::other allocator_type; - enum { allow_multimapping = Allow_multimapping }; - - concurrent_unordered_map_traits() : my_hash_compare() {} - concurrent_unordered_map_traits(const hash_compare& hc) : my_hash_compare(hc) {} - - class value_compare : public std::binary_function - { - friend class concurrent_unordered_map_traits; - - public: - bool operator()(const value_type& left, const value_type& right) const - { - return (my_hash_compare(left.first, right.first)); - } - - value_compare(const hash_compare& comparator) : my_hash_compare(comparator) {} - - protected: - hash_compare my_hash_compare; // the comparator predicate for keys - }; - - template - static const Key& get_key(const std::pair& value) { - return (value.first); - } - - hash_compare my_hash_compare; // the comparator predicate for keys -}; - -template , typename Key_equality = std::equal_to, - typename Allocator = tbb::tbb_allocator > > -class concurrent_unordered_map : - public internal::concurrent_unordered_base< concurrent_unordered_map_traits, Allocator, false> > -{ - // Base type definitions - typedef internal::hash_compare hash_compare; - typedef concurrent_unordered_map_traits traits_type; - typedef internal::concurrent_unordered_base< traits_type > base_type; -#if __TBB_EXTRA_DEBUG -public: -#endif - using traits_type::allow_multimapping; -public: - using base_type::end; - using base_type::find; - using base_type::insert; - - // Type definitions - typedef Key key_type; - typedef typename base_type::value_type value_type; - typedef T mapped_type; - typedef Hasher hasher; - typedef Key_equality key_equal; - typedef hash_compare key_compare; - - typedef typename base_type::allocator_type allocator_type; - typedef typename base_type::pointer pointer; - typedef typename base_type::const_pointer const_pointer; - typedef typename base_type::reference reference; - typedef typename base_type::const_reference const_reference; - - typedef typename base_type::size_type size_type; - typedef typename base_type::difference_type difference_type; - - typedef typename base_type::iterator iterator; - typedef typename base_type::const_iterator const_iterator; - typedef typename base_type::iterator local_iterator; - typedef typename base_type::const_iterator const_local_iterator; - - // Construction/destruction/copying - explicit concurrent_unordered_map(size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), - const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) - { - } - - concurrent_unordered_map(const Allocator& a) : base_type(base_type::initial_bucket_number, key_compare(), a) - { - } - - template - concurrent_unordered_map(Iterator first, Iterator last, size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), - const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) - { - insert(first, last); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Constructor from initializer_list - concurrent_unordered_map(std::initializer_list il, size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), - const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) - { - this->insert(il.begin(),il.end()); - } -#endif //# __TBB_INITIALIZER_LISTS_PRESENT - -#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN - concurrent_unordered_map(const concurrent_unordered_map& table) - : base_type(table) - { - } - - concurrent_unordered_map& operator=(const concurrent_unordered_map& table) - { - return static_cast(base_type::operator=(table)); - } - - concurrent_unordered_map(concurrent_unordered_map&& table) - : base_type(std::move(table)) - { - } - - concurrent_unordered_map& operator=(concurrent_unordered_map&& table) - { - return static_cast(base_type::operator=(std::move(table))); - } -#endif //__TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN - - concurrent_unordered_map(const concurrent_unordered_map& table, const Allocator& a) - : base_type(table, a) - { - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - concurrent_unordered_map(concurrent_unordered_map&& table, const Allocator& a) : base_type(std::move(table), a) - { - } -#endif - // Observers - mapped_type& operator[](const key_type& key) - { - iterator where = find(key); - - if (where == end()) - { - where = insert(std::pair(key, mapped_type())).first; - } - - return ((*where).second); - } - - mapped_type& at(const key_type& key) - { - iterator where = find(key); - - if (where == end()) - { - tbb::internal::throw_exception(tbb::internal::eid_invalid_key); - } - - return ((*where).second); - } - - const mapped_type& at(const key_type& key) const - { - const_iterator where = find(key); - - if (where == end()) - { - tbb::internal::throw_exception(tbb::internal::eid_invalid_key); - } - - return ((*where).second); - } -}; - -template < typename Key, typename T, typename Hasher = tbb::tbb_hash, typename Key_equality = std::equal_to, - typename Allocator = tbb::tbb_allocator > > -class concurrent_unordered_multimap : - public internal::concurrent_unordered_base< concurrent_unordered_map_traits< Key, T, - internal::hash_compare, Allocator, true> > -{ - // Base type definitions - typedef internal::hash_compare hash_compare; - typedef concurrent_unordered_map_traits traits_type; - typedef internal::concurrent_unordered_base base_type; -#if __TBB_EXTRA_DEBUG -public: -#endif - using traits_type::allow_multimapping; -public: - using base_type::insert; - - // Type definitions - typedef Key key_type; - typedef typename base_type::value_type value_type; - typedef T mapped_type; - typedef Hasher hasher; - typedef Key_equality key_equal; - typedef hash_compare key_compare; - - typedef typename base_type::allocator_type allocator_type; - typedef typename base_type::pointer pointer; - typedef typename base_type::const_pointer const_pointer; - typedef typename base_type::reference reference; - typedef typename base_type::const_reference const_reference; - - typedef typename base_type::size_type size_type; - typedef typename base_type::difference_type difference_type; - - typedef typename base_type::iterator iterator; - typedef typename base_type::const_iterator const_iterator; - typedef typename base_type::iterator local_iterator; - typedef typename base_type::const_iterator const_local_iterator; - - // Construction/destruction/copying - explicit concurrent_unordered_multimap(size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), - const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) - { - } - - concurrent_unordered_multimap(const Allocator& a) : base_type(base_type::initial_bucket_number, key_compare(), a) - { - } - - template - concurrent_unordered_multimap(Iterator first, Iterator last, size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), - const allocator_type& a = allocator_type()) - : base_type(n_of_buckets,key_compare(_Hasher,_Key_equality), a) - { - insert(first, last); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Constructor from initializer_list - concurrent_unordered_multimap(std::initializer_list il, size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), - const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) - { - this->insert(il.begin(),il.end()); - } -#endif //# __TBB_INITIALIZER_LISTS_PRESENT - -#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN - concurrent_unordered_multimap(const concurrent_unordered_multimap& table) - : base_type(table) - { - } - - concurrent_unordered_multimap& operator=(const concurrent_unordered_multimap& table) - { - return static_cast(base_type::operator=(table)); - } - - concurrent_unordered_multimap(concurrent_unordered_multimap&& table) - : base_type(std::move(table)) - { - } - - concurrent_unordered_multimap& operator=(concurrent_unordered_multimap&& table) - { - return static_cast(base_type::operator=(std::move(table))); - } -#endif //__TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN - - concurrent_unordered_multimap(const concurrent_unordered_multimap& table, const Allocator& a) - : base_type(table, a) - { - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - concurrent_unordered_multimap(concurrent_unordered_multimap&& table, const Allocator& a) : base_type(std::move(table), a) - { - } -#endif -}; -} // namespace interface5 - -using interface5::concurrent_unordered_map; -using interface5::concurrent_unordered_multimap; - -} // namespace tbb - -#endif// __TBB_concurrent_unordered_map_H diff --git a/inst/include/tbb/concurrent_unordered_set.h b/inst/include/tbb/concurrent_unordered_set.h deleted file mode 100644 index 846351869..000000000 --- a/inst/include/tbb/concurrent_unordered_set.h +++ /dev/null @@ -1,269 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -/* Container implementations in this header are based on PPL implementations - provided by Microsoft. */ - -#ifndef __TBB_concurrent_unordered_set_H -#define __TBB_concurrent_unordered_set_H - -#include "internal/_concurrent_unordered_impl.h" - -namespace tbb -{ - -namespace interface5 { - -// Template class for hash set traits -template -class concurrent_unordered_set_traits -{ -protected: - typedef Key value_type; - typedef Key key_type; - typedef Hash_compare hash_compare; - typedef typename Allocator::template rebind::other allocator_type; - enum { allow_multimapping = Allow_multimapping }; - - concurrent_unordered_set_traits() : my_hash_compare() {} - concurrent_unordered_set_traits(const hash_compare& hc) : my_hash_compare(hc) {} - - typedef hash_compare value_compare; - - static const Key& get_key(const value_type& value) { - return value; - } - - hash_compare my_hash_compare; // the comparator predicate for keys -}; - -template , typename Key_equality = std::equal_to, typename Allocator = tbb::tbb_allocator > -class concurrent_unordered_set : public internal::concurrent_unordered_base< concurrent_unordered_set_traits, Allocator, false> > -{ - // Base type definitions - typedef internal::hash_compare hash_compare; - typedef internal::concurrent_unordered_base< concurrent_unordered_set_traits > base_type; - typedef concurrent_unordered_set_traits, Allocator, false> traits_type; -#if __TBB_EXTRA_DEBUG -public: -#endif - using traits_type::allow_multimapping; -public: - using base_type::insert; - - // Type definitions - typedef Key key_type; - typedef typename base_type::value_type value_type; - typedef Key mapped_type; - typedef Hasher hasher; - typedef Key_equality key_equal; - typedef hash_compare key_compare; - - typedef typename base_type::allocator_type allocator_type; - typedef typename base_type::pointer pointer; - typedef typename base_type::const_pointer const_pointer; - typedef typename base_type::reference reference; - typedef typename base_type::const_reference const_reference; - - typedef typename base_type::size_type size_type; - typedef typename base_type::difference_type difference_type; - - typedef typename base_type::iterator iterator; - typedef typename base_type::const_iterator const_iterator; - typedef typename base_type::iterator local_iterator; - typedef typename base_type::const_iterator const_local_iterator; - - // Construction/destruction/copying - explicit concurrent_unordered_set(size_type n_of_buckets = base_type::initial_bucket_number, const hasher& a_hasher = hasher(), - const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) - { - } - - concurrent_unordered_set(const Allocator& a) : base_type(base_type::initial_bucket_number, key_compare(), a) - { - } - - template - concurrent_unordered_set(Iterator first, Iterator last, size_type n_of_buckets = base_type::initial_bucket_number, const hasher& a_hasher = hasher(), - const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) - { - insert(first, last); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Constructor from initializer_list - concurrent_unordered_set(std::initializer_list il, size_type n_of_buckets = base_type::initial_bucket_number, const hasher& a_hasher = hasher(), - const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) - { - this->insert(il.begin(),il.end()); - } -#endif //# __TBB_INITIALIZER_LISTS_PRESENT - -#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN - concurrent_unordered_set(const concurrent_unordered_set& table) - : base_type(table) - { - } - - concurrent_unordered_set& operator=(const concurrent_unordered_set& table) - { - return static_cast(base_type::operator=(table)); - } - - concurrent_unordered_set(concurrent_unordered_set&& table) - : base_type(std::move(table)) - { - } - - concurrent_unordered_set& operator=(concurrent_unordered_set&& table) - { - return static_cast(base_type::operator=(std::move(table))); - } -#endif //__TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN - - concurrent_unordered_set(const concurrent_unordered_set& table, const Allocator& a) - : base_type(table, a) - { - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - concurrent_unordered_set(concurrent_unordered_set&& table, const Allocator& a) - : base_type(std::move(table), a) - { - } -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - -}; - -template , typename Key_equality = std::equal_to, - typename Allocator = tbb::tbb_allocator > -class concurrent_unordered_multiset : - public internal::concurrent_unordered_base< concurrent_unordered_set_traits, Allocator, true> > -{ - // Base type definitions - typedef internal::hash_compare hash_compare; - typedef concurrent_unordered_set_traits traits_type; - typedef internal::concurrent_unordered_base< traits_type > base_type; -#if __TBB_EXTRA_DEBUG -public: -#endif - using traits_type::allow_multimapping; -public: - using base_type::insert; - - // Type definitions - typedef Key key_type; - typedef typename base_type::value_type value_type; - typedef Key mapped_type; - typedef Hasher hasher; - typedef Key_equality key_equal; - typedef hash_compare key_compare; - - typedef typename base_type::allocator_type allocator_type; - typedef typename base_type::pointer pointer; - typedef typename base_type::const_pointer const_pointer; - typedef typename base_type::reference reference; - typedef typename base_type::const_reference const_reference; - - typedef typename base_type::size_type size_type; - typedef typename base_type::difference_type difference_type; - - typedef typename base_type::iterator iterator; - typedef typename base_type::const_iterator const_iterator; - typedef typename base_type::iterator local_iterator; - typedef typename base_type::const_iterator const_local_iterator; - - // Construction/destruction/copying - explicit concurrent_unordered_multiset(size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), - const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) - { - } - - concurrent_unordered_multiset(const Allocator& a) : base_type(base_type::initial_bucket_number, key_compare(), a) - { - } - - template - concurrent_unordered_multiset(Iterator first, Iterator last, size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), - const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) - { - insert(first, last); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Constructor from initializer_list - concurrent_unordered_multiset(std::initializer_list il, size_type n_of_buckets = base_type::initial_bucket_number, const hasher& a_hasher = hasher(), - const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) - { - this->insert(il.begin(),il.end()); - } -#endif //# __TBB_INITIALIZER_LISTS_PRESENT - -#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN - concurrent_unordered_multiset(const concurrent_unordered_multiset& table) - : base_type(table) - { - } - - concurrent_unordered_multiset& operator=(const concurrent_unordered_multiset& table) - { - return static_cast(base_type::operator=(table)); - } - - concurrent_unordered_multiset(concurrent_unordered_multiset&& table) - : base_type(std::move(table)) - { - } - - concurrent_unordered_multiset& operator=(concurrent_unordered_multiset&& table) - { - return static_cast(base_type::operator=(std::move(table))); - } -#endif //__TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN - - concurrent_unordered_multiset(const concurrent_unordered_multiset& table, const Allocator& a) - : base_type(table, a) - { - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - concurrent_unordered_multiset(concurrent_unordered_multiset&& table, const Allocator& a) - : base_type(std::move(table), a) - { - } -#endif //__TBB_CPP11_RVALUE_REF_PRESENT -}; -} // namespace interface5 - -using interface5::concurrent_unordered_set; -using interface5::concurrent_unordered_multiset; - -} // namespace tbb - -#endif// __TBB_concurrent_unordered_set_H diff --git a/inst/include/tbb/concurrent_vector.h b/inst/include/tbb/concurrent_vector.h deleted file mode 100644 index 3b6ad3203..000000000 --- a/inst/include/tbb/concurrent_vector.h +++ /dev/null @@ -1,1334 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_vector_H -#define __TBB_concurrent_vector_H - -#include "tbb_stddef.h" -#include "tbb_exception.h" -#include "atomic.h" -#include "cache_aligned_allocator.h" -#include "blocked_range.h" -#include "tbb_machine.h" -#include "tbb_profiling.h" -#include -#include // for memset() - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#if _MSC_VER==1500 && !__INTEL_COMPILER - // VS2008/VC9 seems to have an issue; limits pull in math.h - #pragma warning( push ) - #pragma warning( disable: 4985 ) -#endif -#include /* std::numeric_limits */ -#if _MSC_VER==1500 && !__INTEL_COMPILER - #pragma warning( pop ) -#endif - -#if __TBB_INITIALIZER_LISTS_PRESENT - #include -#endif - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (push) -#if defined(_Wp64) - #pragma warning (disable: 4267) -#endif - #pragma warning (disable: 4127) //warning C4127: conditional expression is constant -#endif - -namespace tbb { - -template > -class concurrent_vector; - -template -class vector_iterator; - -//! @cond INTERNAL -namespace internal { - - //! Bad allocation marker - static void *const vector_allocation_error_flag = reinterpret_cast(size_t(63)); - - //! Exception helper function - template - void handle_unconstructed_elements(T* array, size_t n_of_elements){ - std::memset(array, 0, n_of_elements * sizeof(T)); - } - - //! Base class of concurrent vector implementation. - /** @ingroup containers */ - class concurrent_vector_base_v3 { - protected: - - // Basic types declarations - typedef size_t segment_index_t; - typedef size_t size_type; - - // Using enumerations due to Mac linking problems of static const variables - enum { - // Size constants - default_initial_segments = 1, // 2 initial items - //! Number of slots for segment pointers inside the class - pointers_per_short_table = 3, // to fit into 8 words of entire structure - pointers_per_long_table = sizeof(segment_index_t) * 8 // one segment per bit - }; - - struct segment_not_used {}; - struct segment_allocated {}; - struct segment_allocation_failed {}; - - class segment_t; - class segment_value_t { - void* array; - private: - //TODO: More elegant way to grant access to selected functions _only_? - friend class segment_t; - explicit segment_value_t(void* an_array):array(an_array) {} - public: - friend bool operator==(segment_value_t const& lhs, segment_not_used ) { return lhs.array == 0;} - friend bool operator==(segment_value_t const& lhs, segment_allocated) { return lhs.array > internal::vector_allocation_error_flag;} - friend bool operator==(segment_value_t const& lhs, segment_allocation_failed) { return lhs.array == internal::vector_allocation_error_flag;} - template - friend bool operator!=(segment_value_t const& lhs, argument_type arg) { return ! (lhs == arg);} - - template - T* pointer() const { return static_cast(const_cast(array)); } - }; - - // Segment pointer. - class segment_t { - atomic array; - public: - segment_t(){ store(segment_not_used());} - //Copy ctor and assignment operator are defined to ease using of stl algorithms. - //These algorithms usually not a synchronization point, so, semantic is - //intentionally relaxed here. - segment_t(segment_t const& rhs ){ array.store(rhs.array.load());} - - void swap(segment_t & rhs ){ - tbb::internal::swap(array, rhs.array); - } - - segment_t& operator=(segment_t const& rhs ){ - array.store(rhs.array.load()); - return *this; - } - - template - segment_value_t load() const { return segment_value_t(array.load());} - - template - void store(segment_not_used) { - array.store(0); - } - - template - void store(segment_allocation_failed) { - __TBB_ASSERT(load() != segment_allocated(),"transition from \"allocated\" to \"allocation failed\" state looks non-logical"); - array.store(internal::vector_allocation_error_flag); - } - - template - void store(void* allocated_segment_pointer) __TBB_NOEXCEPT(true) { - __TBB_ASSERT(segment_value_t(allocated_segment_pointer) == segment_allocated(), - "other overloads of store should be used for marking segment as not_used or allocation_failed" ); - array.store(allocated_segment_pointer); - } - -#if TBB_USE_ASSERT - ~segment_t() { - __TBB_ASSERT(load() != segment_allocated(), "should have been freed by clear" ); - } -#endif /* TBB_USE_ASSERT */ - }; - friend void swap(segment_t & , segment_t & ) __TBB_NOEXCEPT(true); - - // Data fields - - //! allocator function pointer - void* (*vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t); - - //! count of segments in the first block - atomic my_first_block; - - //! Requested size of vector - atomic my_early_size; - - //! Pointer to the segments table - atomic my_segment; - - //! embedded storage of segment pointers - segment_t my_storage[pointers_per_short_table]; - - // Methods - - concurrent_vector_base_v3() { - //Here the semantic is intentionally relaxed. - //The reason this is next: - //Object that is in middle of construction (i.e. its constructor is not yet finished) - //cannot be used concurrently until the construction is finished. - //Thus to flag other threads that construction is finished, some synchronization with - //acquire-release semantic should be done by the (external) code that uses the vector. - //So, no need to do the synchronization inside the vector. - - my_early_size.store(0); - my_first_block.store(0); // here is not default_initial_segments - my_segment.store(my_storage); - } - - __TBB_EXPORTED_METHOD ~concurrent_vector_base_v3(); - - //these helpers methods use the fact that segments are allocated so - //that every segment size is a (increasing) power of 2. - //with one exception 0 segment has size of 2 as well segment 1; - //e.g. size of segment with index of 3 is 2^3=8; - static segment_index_t segment_index_of( size_type index ) { - return segment_index_t( __TBB_Log2( index|1 ) ); - } - - static segment_index_t segment_base( segment_index_t k ) { - return (segment_index_t(1)< - friend class vector_iterator; - - }; - - inline void swap(concurrent_vector_base_v3::segment_t & lhs, concurrent_vector_base_v3::segment_t & rhs) __TBB_NOEXCEPT(true) { - lhs.swap(rhs); - } - - typedef concurrent_vector_base_v3 concurrent_vector_base; - - //! Meets requirements of a forward iterator for STL and a Value for a blocked_range.*/ - /** Value is either the T or const T type of the container. - @ingroup containers */ - template - class vector_iterator - { - //! concurrent_vector over which we are iterating. - Container* my_vector; - - //! Index into the vector - size_t my_index; - - //! Caches my_vector->internal_subscript(my_index) - /** NULL if cached value is not available */ - mutable Value* my_item; - - template - friend vector_iterator operator+( ptrdiff_t offset, const vector_iterator& v ); - - template - friend bool operator==( const vector_iterator& i, const vector_iterator& j ); - - template - friend bool operator<( const vector_iterator& i, const vector_iterator& j ); - - template - friend ptrdiff_t operator-( const vector_iterator& i, const vector_iterator& j ); - - template - friend class internal::vector_iterator; - -#if !defined(_MSC_VER) || defined(__INTEL_COMPILER) - template - friend class tbb::concurrent_vector; -#else -public: // workaround for MSVC -#endif - - vector_iterator( const Container& vector, size_t index, void *ptr = 0 ) : - my_vector(const_cast(&vector)), - my_index(index), - my_item(static_cast(ptr)) - {} - - public: - //! Default constructor - vector_iterator() : my_vector(NULL), my_index(~size_t(0)), my_item(NULL) {} - - vector_iterator( const vector_iterator& other ) : - my_vector(other.my_vector), - my_index(other.my_index), - my_item(other.my_item) - {} - - vector_iterator operator+( ptrdiff_t offset ) const { - return vector_iterator( *my_vector, my_index+offset ); - } - vector_iterator &operator+=( ptrdiff_t offset ) { - my_index+=offset; - my_item = NULL; - return *this; - } - vector_iterator operator-( ptrdiff_t offset ) const { - return vector_iterator( *my_vector, my_index-offset ); - } - vector_iterator &operator-=( ptrdiff_t offset ) { - my_index-=offset; - my_item = NULL; - return *this; - } - Value& operator*() const { - Value* item = my_item; - if( !item ) { - item = my_item = &my_vector->internal_subscript(my_index); - } - __TBB_ASSERT( item==&my_vector->internal_subscript(my_index), "corrupt cache" ); - return *item; - } - Value& operator[]( ptrdiff_t k ) const { - return my_vector->internal_subscript(my_index+k); - } - Value* operator->() const {return &operator*();} - - //! Pre increment - vector_iterator& operator++() { - size_t element_index = ++my_index; - if( my_item ) { - //TODO: consider using of knowledge about "first_block optimization" here as well? - if( concurrent_vector_base::is_first_element_in_segment(element_index)) { - //if the iterator crosses a segment boundary, the pointer become invalid - //as possibly next segment is in another memory location - my_item= NULL; - } else { - ++my_item; - } - } - return *this; - } - - //! Pre decrement - vector_iterator& operator--() { - __TBB_ASSERT( my_index>0, "operator--() applied to iterator already at beginning of concurrent_vector" ); - size_t element_index = my_index--; - if( my_item ) { - if(concurrent_vector_base::is_first_element_in_segment(element_index)) { - //if the iterator crosses a segment boundary, the pointer become invalid - //as possibly next segment is in another memory location - my_item= NULL; - } else { - --my_item; - } - } - return *this; - } - - //! Post increment - vector_iterator operator++(int) { - vector_iterator result = *this; - operator++(); - return result; - } - - //! Post decrement - vector_iterator operator--(int) { - vector_iterator result = *this; - operator--(); - return result; - } - - // STL support - - typedef ptrdiff_t difference_type; - typedef Value value_type; - typedef Value* pointer; - typedef Value& reference; - typedef std::random_access_iterator_tag iterator_category; - }; - - template - vector_iterator operator+( ptrdiff_t offset, const vector_iterator& v ) { - return vector_iterator( *v.my_vector, v.my_index+offset ); - } - - template - bool operator==( const vector_iterator& i, const vector_iterator& j ) { - return i.my_index==j.my_index && i.my_vector == j.my_vector; - } - - template - bool operator!=( const vector_iterator& i, const vector_iterator& j ) { - return !(i==j); - } - - template - bool operator<( const vector_iterator& i, const vector_iterator& j ) { - return i.my_index - bool operator>( const vector_iterator& i, const vector_iterator& j ) { - return j - bool operator>=( const vector_iterator& i, const vector_iterator& j ) { - return !(i - bool operator<=( const vector_iterator& i, const vector_iterator& j ) { - return !(j - ptrdiff_t operator-( const vector_iterator& i, const vector_iterator& j ) { - return ptrdiff_t(i.my_index)-ptrdiff_t(j.my_index); - } - - template - class allocator_base { - public: - typedef typename A::template - rebind::other allocator_type; - allocator_type my_allocator; - - allocator_base(const allocator_type &a = allocator_type() ) : my_allocator(a) {} - - }; - -} // namespace internal -//! @endcond - -//! Concurrent vector container -/** concurrent_vector is a container having the following main properties: - - It provides random indexed access to its elements. The index of the first element is 0. - - It ensures safe concurrent growing its size (different threads can safely append new elements). - - Adding new elements does not invalidate existing iterators and does not change indices of existing items. - -@par Compatibility - The class meets all Container Requirements and Reversible Container Requirements from - C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1). But it doesn't meet - Sequence Requirements due to absence of insert() and erase() methods. - -@par Exception Safety - Methods working with memory allocation and/or new elements construction can throw an - exception if allocator fails to allocate memory or element's default constructor throws one. - Concurrent vector's element of type T must conform to the following requirements: - - Throwing an exception is forbidden for destructor of T. - - Default constructor of T must not throw an exception OR its non-virtual destructor must safely work when its object memory is zero-initialized. - . - Otherwise, the program's behavior is undefined. -@par - If an exception happens inside growth or assignment operation, an instance of the vector becomes invalid unless it is stated otherwise in the method documentation. - Invalid state means: - - There are no guarantees that all items were initialized by a constructor. The rest of items is zero-filled, including item where exception happens. - - An invalid vector instance cannot be repaired; it is unable to grow anymore. - - Size and capacity reported by the vector are incorrect, and calculated as if the failed operation were successful. - - Attempt to access not allocated elements using operator[] or iterators results in access violation or segmentation fault exception, and in case of using at() method a C++ exception is thrown. - . - If a concurrent grow operation successfully completes, all the elements it has added to the vector will remain valid and accessible even if one of subsequent grow operations fails. - -@par Fragmentation - Unlike an STL vector, a concurrent_vector does not move existing elements if it needs - to allocate more memory. The container is divided into a series of contiguous arrays of - elements. The first reservation, growth, or assignment operation determines the size of - the first array. Using small number of elements as initial size incurs fragmentation that - may increase element access time. Internal layout can be optimized by method compact() that - merges several smaller arrays into one solid. - -@par Changes since TBB 2.1 - - Fixed guarantees of concurrent_vector::size() and grow_to_at_least() methods to assure elements are allocated. - - Methods end()/rbegin()/back() are partly thread-safe since they use size() to get the end of vector - - Added resize() methods (not thread-safe) - - Added cbegin/cend/crbegin/crend methods - - Changed return type of methods grow* and push_back to iterator - -@par Changes since TBB 2.0 - - Implemented exception-safety guarantees - - Added template argument for allocator - - Added allocator argument in constructors - - Faster index calculation - - First growth call specifies a number of segments to be merged in the first allocation. - - Fixed memory blow up for swarm of vector's instances of small size - - Added grow_by(size_type n, const_reference t) growth using copying constructor to init new items. - - Added STL-like constructors. - - Added operators ==, < and derivatives - - Added at() method, approved for using after an exception was thrown inside the vector - - Added get_allocator() method. - - Added assign() methods - - Added compact() method to defragment first segments - - Added swap() method - - range() defaults on grainsize = 1 supporting auto grainsize algorithms. - - @ingroup containers */ -template -class concurrent_vector: protected internal::allocator_base, - private internal::concurrent_vector_base { -private: - template - class generic_range_type: public blocked_range { - public: - typedef T value_type; - typedef T& reference; - typedef const T& const_reference; - typedef I iterator; - typedef ptrdiff_t difference_type; - generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range(begin_,end_,grainsize_) {} - template - generic_range_type( const generic_range_type& r) : blocked_range(r.begin(),r.end(),r.grainsize()) {} - generic_range_type( generic_range_type& r, split ) : blocked_range(r,split()) {} - }; - - template - friend class internal::vector_iterator; - -public: - //------------------------------------------------------------------------ - // STL compatible types - //------------------------------------------------------------------------ - typedef internal::concurrent_vector_base_v3::size_type size_type; - typedef typename internal::allocator_base::allocator_type allocator_type; - - typedef T value_type; - typedef ptrdiff_t difference_type; - typedef T& reference; - typedef const T& const_reference; - typedef T *pointer; - typedef const T *const_pointer; - - typedef internal::vector_iterator iterator; - typedef internal::vector_iterator const_iterator; - -#if !defined(_MSC_VER) || _CPPLIB_VER>=300 - // Assume ISO standard definition of std::reverse_iterator - typedef std::reverse_iterator reverse_iterator; - typedef std::reverse_iterator const_reverse_iterator; -#else - // Use non-standard std::reverse_iterator - typedef std::reverse_iterator reverse_iterator; - typedef std::reverse_iterator const_reverse_iterator; -#endif /* defined(_MSC_VER) && (_MSC_VER<1300) */ - - //------------------------------------------------------------------------ - // Parallel algorithm support - //------------------------------------------------------------------------ - typedef generic_range_type range_type; - typedef generic_range_type const_range_type; - - //------------------------------------------------------------------------ - // STL compatible constructors & destructors - //------------------------------------------------------------------------ - - //! Construct empty vector. - explicit concurrent_vector(const allocator_type &a = allocator_type()) - : internal::allocator_base(a), internal::concurrent_vector_base() - { - vector_allocator_ptr = &internal_allocator; - } - - //Constructors are not required to have synchronization - //(for more details see comment in the concurrent_vector_base constructor). -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Constructor from initializer_list - concurrent_vector(std::initializer_list init_list, const allocator_type &a = allocator_type()) - : internal::allocator_base(a), internal::concurrent_vector_base() - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_assign_iterators(init_list.begin(), init_list.end()); - } __TBB_CATCH(...) { - segment_t *table = my_segment.load();; - internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load()); - __TBB_RETHROW(); - } - - } -#endif //# __TBB_INITIALIZER_LISTS_PRESENT - - //! Copying constructor - concurrent_vector( const concurrent_vector& vector, const allocator_type& a = allocator_type() ) - : internal::allocator_base(a), internal::concurrent_vector_base() - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_copy(vector, sizeof(T), ©_array); - } __TBB_CATCH(...) { - segment_t *table = my_segment.load(); - internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load()); - __TBB_RETHROW(); - } - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Move constructor - //TODO add __TBB_NOEXCEPT(true) and static_assert(std::has_nothrow_move_constructor::value) - concurrent_vector( concurrent_vector&& source) - : internal::allocator_base(std::move(source)), internal::concurrent_vector_base() - { - vector_allocator_ptr = &internal_allocator; - concurrent_vector_base_v3::internal_swap(source); - } - - concurrent_vector( concurrent_vector&& source, const allocator_type& a) - : internal::allocator_base(a), internal::concurrent_vector_base() - { - vector_allocator_ptr = &internal_allocator; - //C++ standard requires instances of an allocator being compared for equality, - //which means that memory allocated by one instance is possible to deallocate with the other one. - if (a == source.my_allocator) { - concurrent_vector_base_v3::internal_swap(source); - } else { - __TBB_TRY { - internal_copy(source, sizeof(T), &move_array); - } __TBB_CATCH(...) { - segment_t *table = my_segment.load(); - internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load()); - __TBB_RETHROW(); - } - } - } - -#endif - - //! Copying constructor for vector with different allocator type - template - concurrent_vector( const concurrent_vector& vector, const allocator_type& a = allocator_type() ) - : internal::allocator_base(a), internal::concurrent_vector_base() - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_copy(vector.internal_vector_base(), sizeof(T), ©_array); - } __TBB_CATCH(...) { - segment_t *table = my_segment.load(); - internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load() ); - __TBB_RETHROW(); - } - } - - //! Construction with initial size specified by argument n - explicit concurrent_vector(size_type n) - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array ); - } __TBB_CATCH(...) { - segment_t *table = my_segment.load(); - internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load() ); - __TBB_RETHROW(); - } - } - - //! Construction with initial size specified by argument n, initialization by copying of t, and given allocator instance - concurrent_vector(size_type n, const_reference t, const allocator_type& a = allocator_type()) - : internal::allocator_base(a) - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_resize( n, sizeof(T), max_size(), static_cast(&t), &destroy_array, &initialize_array_by ); - } __TBB_CATCH(...) { - segment_t *table = my_segment.load(); - internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load() ); - __TBB_RETHROW(); - } - } - - //! Construction with copying iteration range and given allocator instance - template - concurrent_vector(I first, I last, const allocator_type &a = allocator_type()) - : internal::allocator_base(a) - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_assign_range(first, last, static_cast::is_integer> *>(0) ); - } __TBB_CATCH(...) { - segment_t *table = my_segment.load(); - internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load() ); - __TBB_RETHROW(); - } - } - - //! Assignment - concurrent_vector& operator=( const concurrent_vector& vector ) { - if( this != &vector ) - internal_assign(vector, sizeof(T), &destroy_array, &assign_array, ©_array); - return *this; - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //TODO: add __TBB_NOEXCEPT() - //! Move assignment - concurrent_vector& operator=( concurrent_vector&& other ) { - __TBB_ASSERT(this != &other, "Move assignment to itself is prohibited "); - typedef typename tbb::internal::allocator_traits::propagate_on_container_move_assignment pocma_t; - if(pocma_t::value || this->my_allocator == other.my_allocator) { - concurrent_vector trash (std::move(*this)); - internal_swap(other); - if (pocma_t::value) { - this->my_allocator = std::move(other.my_allocator); - } - } else { - internal_assign(other, sizeof(T), &destroy_array, &move_assign_array, &move_array); - } - return *this; - } -#endif - //TODO: add an template assignment operator? (i.e. with different element type) - - //! Assignment for vector with different allocator type - template - concurrent_vector& operator=( const concurrent_vector& vector ) { - if( static_cast( this ) != static_cast( &vector ) ) - internal_assign(vector.internal_vector_base(), - sizeof(T), &destroy_array, &assign_array, ©_array); - return *this; - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Assignment for initializer_list - concurrent_vector& operator=( std::initializer_list init_list ) { - internal_clear(&destroy_array); - internal_assign_iterators(init_list.begin(), init_list.end()); - return *this; - } -#endif //#if __TBB_INITIALIZER_LISTS_PRESENT - - //------------------------------------------------------------------------ - // Concurrent operations - //------------------------------------------------------------------------ - //! Grow by "delta" elements. - /** Returns iterator pointing to the first new element. */ - iterator grow_by( size_type delta ) { - return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size.load()); - } - - //! Grow by "delta" elements using copying constructor. - /** Returns iterator pointing to the first new element. */ - iterator grow_by( size_type delta, const_reference t ) { - return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast(&t) ) : my_early_size.load()); - } - - /** Returns iterator pointing to the first new element. */ - template - iterator grow_by( I first, I last ) { - typename std::iterator_traits::difference_type delta = std::distance(first, last); - __TBB_ASSERT( delta >= 0, NULL); - - return iterator(*this, delta ? internal_grow_by(delta, sizeof(T), ©_range, static_cast(&first)) : my_early_size.load()); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - /** Returns iterator pointing to the first new element. */ - iterator grow_by( std::initializer_list init_list ) { - return grow_by( init_list.begin(), init_list.end() ); - } -#endif //#if __TBB_INITIALIZER_LISTS_PRESENT - - //! Append minimal sequence of elements such that size()>=n. - /** The new elements are default constructed. Blocks until all elements in range [0..n) are allocated. - May return while other elements are being constructed by other threads. - Returns iterator that points to beginning of appended sequence. - If no elements were appended, returns iterator pointing to nth element. */ - iterator grow_to_at_least( size_type n ) { - size_type m=0; - if( n ) { - m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array, NULL ); - if( m>n ) m=n; - } - return iterator(*this, m); - }; - - /** Analogous to grow_to_at_least( size_type n ) with exception that the new - elements are initialized by copying of t instead of default construction. */ - iterator grow_to_at_least( size_type n, const_reference t ) { - size_type m=0; - if( n ) { - m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array_by, &t); - if( m>n ) m=n; - } - return iterator(*this, m); - }; - - //! Push item - /** Returns iterator pointing to the new element. */ - iterator push_back( const_reference item ) - { - size_type k; - T* ptr = static_cast(internal_push_back(sizeof(T),k)); - element_construction_guard g(ptr); - new(ptr) T(item); - g.dismiss(); - return iterator(*this, k, ptr); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Push item, move-aware - /** Returns iterator pointing to the new element. */ - iterator push_back( T&& item ) - { - size_type k; - T* ptr = static_cast(internal_push_back(sizeof(T),k)); - element_construction_guard g(ptr); - new(ptr) T(std::move(item)); - g.dismiss(); - return iterator(*this, k, ptr); - } -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT - //! Push item, create item "in place" with provided arguments - /** Returns iterator pointing to the new element. */ - template - iterator emplace_back( Args&&... args) - { - size_type k; - T* ptr = static_cast(internal_push_back(sizeof(T),k)); - element_construction_guard g(ptr); - new(ptr) T( std::forward(args)...); - g.dismiss(); - return iterator(*this, k, ptr); - } -#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - //! Get reference to element at given index. - /** This method is thread-safe for concurrent reads, and also while growing the vector, - as long as the calling thread has checked that index < size(). */ - reference operator[]( size_type index ) { - return internal_subscript(index); - } - - //! Get const reference to element at given index. - const_reference operator[]( size_type index ) const { - return internal_subscript(index); - } - - //! Get reference to element at given index. Throws exceptions on errors. - reference at( size_type index ) { - return internal_subscript_with_exceptions(index); - } - - //! Get const reference to element at given index. Throws exceptions on errors. - const_reference at( size_type index ) const { - return internal_subscript_with_exceptions(index); - } - - //! Get range for iterating with parallel algorithms - range_type range( size_t grainsize = 1 ) { - return range_type( begin(), end(), grainsize ); - } - - //! Get const range for iterating with parallel algorithms - const_range_type range( size_t grainsize = 1 ) const { - return const_range_type( begin(), end(), grainsize ); - } - - //------------------------------------------------------------------------ - // Capacity - //------------------------------------------------------------------------ - //! Return size of vector. It may include elements under construction - size_type size() const { - size_type sz = my_early_size, cp = internal_capacity(); - return cp < sz ? cp : sz; - } - - //! Return false if vector is not empty or has elements under construction at least. - bool empty() const {return !my_early_size;} - - //! Maximum size to which array can grow without allocating more memory. Concurrent allocations are not included in the value. - size_type capacity() const {return internal_capacity();} - - //! Allocate enough space to grow to size n without having to allocate more memory later. - /** Like most of the methods provided for STL compatibility, this method is *not* thread safe. - The capacity afterwards may be bigger than the requested reservation. */ - void reserve( size_type n ) { - if( n ) - internal_reserve(n, sizeof(T), max_size()); - } - - //! Resize the vector. Not thread-safe. - void resize( size_type n ) { - internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array ); - } - - //! Resize the vector, copy t for new elements. Not thread-safe. - void resize( size_type n, const_reference t ) { - internal_resize( n, sizeof(T), max_size(), static_cast(&t), &destroy_array, &initialize_array_by ); - } - - //! Optimize memory usage and fragmentation. - void shrink_to_fit(); - - //! Upper bound on argument to reserve. - size_type max_size() const {return (~size_type(0))/sizeof(T);} - - //------------------------------------------------------------------------ - // STL support - //------------------------------------------------------------------------ - - //! start iterator - iterator begin() {return iterator(*this,0);} - //! end iterator - iterator end() {return iterator(*this,size());} - //! start const iterator - const_iterator begin() const {return const_iterator(*this,0);} - //! end const iterator - const_iterator end() const {return const_iterator(*this,size());} - //! start const iterator - const_iterator cbegin() const {return const_iterator(*this,0);} - //! end const iterator - const_iterator cend() const {return const_iterator(*this,size());} - //! reverse start iterator - reverse_iterator rbegin() {return reverse_iterator(end());} - //! reverse end iterator - reverse_iterator rend() {return reverse_iterator(begin());} - //! reverse start const iterator - const_reverse_iterator rbegin() const {return const_reverse_iterator(end());} - //! reverse end const iterator - const_reverse_iterator rend() const {return const_reverse_iterator(begin());} - //! reverse start const iterator - const_reverse_iterator crbegin() const {return const_reverse_iterator(end());} - //! reverse end const iterator - const_reverse_iterator crend() const {return const_reverse_iterator(begin());} - //! the first item - reference front() { - __TBB_ASSERT( size()>0, NULL); - return (my_segment[0].template load().template pointer())[0]; - } - //! the first item const - const_reference front() const { - __TBB_ASSERT( size()>0, NULL); - return static_cast(my_segment[0].array)[0]; - } - //! the last item - reference back() { - __TBB_ASSERT( size()>0, NULL); - return internal_subscript( size()-1 ); - } - //! the last item const - const_reference back() const { - __TBB_ASSERT( size()>0, NULL); - return internal_subscript( size()-1 ); - } - //! return allocator object - allocator_type get_allocator() const { return this->my_allocator; } - - //! assign n items by copying t item - void assign(size_type n, const_reference t) { - clear(); - internal_resize( n, sizeof(T), max_size(), static_cast(&t), &destroy_array, &initialize_array_by ); - } - - //! assign range [first, last) - template - void assign(I first, I last) { - clear(); internal_assign_range( first, last, static_cast::is_integer> *>(0) ); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! assigns an initializer list - void assign(std::initializer_list init_list) { - clear(); internal_assign_iterators( init_list.begin(), init_list.end()); - } -#endif //# __TBB_INITIALIZER_LISTS_PRESENT - - //! swap two instances - void swap(concurrent_vector &vector) { - using std::swap; - if( this != &vector ) { - concurrent_vector_base_v3::internal_swap(static_cast(vector)); - swap(this->my_allocator, vector.my_allocator); - } - } - - //! Clear container while keeping memory allocated. - /** To free up the memory, use in conjunction with method compact(). Not thread safe **/ - void clear() { - internal_clear(&destroy_array); - } - - //! Clear and destroy vector. - ~concurrent_vector() { - segment_t *table = my_segment.load(); - internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load() ); - // base class destructor call should be then - } - - const internal::concurrent_vector_base_v3 &internal_vector_base() const { return *this; } -private: - //! Allocate k items - static void *internal_allocator(internal::concurrent_vector_base_v3 &vb, size_t k) { - return static_cast&>(vb).my_allocator.allocate(k); - } - //! Free k segments from table - void internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block); - - //! Get reference to element at given index. - T& internal_subscript( size_type index ) const; - - //! Get reference to element at given index with errors checks - T& internal_subscript_with_exceptions( size_type index ) const; - - //! assign n items by copying t - void internal_assign_n(size_type n, const_pointer p) { - internal_resize( n, sizeof(T), max_size(), static_cast(p), &destroy_array, p? &initialize_array_by : &initialize_array ); - } - - //! helper class - template class is_integer_tag; - - //! assign integer items by copying when arguments are treated as iterators. See C++ Standard 2003 23.1.1p9 - template - void internal_assign_range(I first, I last, is_integer_tag *) { - internal_assign_n(static_cast(first), &static_cast(last)); - } - //! inline proxy assign by iterators - template - void internal_assign_range(I first, I last, is_integer_tag *) { - internal_assign_iterators(first, last); - } - //! assign by iterators - template - void internal_assign_iterators(I first, I last); - - //these functions are marked __TBB_EXPORTED_FUNC as they are called from within the library - - //! Construct n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC initialize_array( void* begin, const void*, size_type n ); - - //! Copy-construct n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC initialize_array_by( void* begin, const void* src, size_type n ); - - //! Copy-construct n instances of T by copying single element pointed to by src, starting at "dst". - static void __TBB_EXPORTED_FUNC copy_array( void* dst, const void* src, size_type n ); - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Move-construct n instances of T, starting at "dst" by copying according element of src array. - static void __TBB_EXPORTED_FUNC move_array( void* dst, const void* src, size_type n ); - //! Move-assign (using operator=) n instances of T, starting at "dst" by assigning according element of src array. - static void __TBB_EXPORTED_FUNC move_assign_array( void* dst, const void* src, size_type n ); -#endif - //! Copy-construct n instances of T, starting at "dst" by iterator range of [p_type_erased_iterator, p_type_erased_iterator+n). - template - static void __TBB_EXPORTED_FUNC copy_range( void* dst, const void* p_type_erased_iterator, size_type n ); - - //! Assign (using operator=) n instances of T, starting at "dst" by assigning according element of src array. - static void __TBB_EXPORTED_FUNC assign_array( void* dst, const void* src, size_type n ); - - //! Destroy n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC destroy_array( void* begin, size_type n ); - - //! Exception-aware helper class for filling a segment by exception-danger operators of user class - class internal_loop_guide : internal::no_copy { - public: - const pointer array; - const size_type n; - size_type i; - - static const T* as_const_pointer(const void *ptr) { return static_cast(ptr); } - static T* as_pointer(const void *src) { return static_cast(const_cast(src)); } - - internal_loop_guide(size_type ntrials, void *ptr) - : array(as_pointer(ptr)), n(ntrials), i(0) {} - void init() { for(; i < n; ++i) new( &array[i] ) T(); } - void init(const void *src) { for(; i < n; ++i) new( &array[i] ) T(*as_const_pointer(src)); } - void copy(const void *src) { for(; i < n; ++i) new( &array[i] ) T(as_const_pointer(src)[i]); } - void assign(const void *src) { for(; i < n; ++i) array[i] = as_const_pointer(src)[i]; } -#if __TBB_CPP11_RVALUE_REF_PRESENT - void move_assign(const void *src) { for(; i < n; ++i) array[i] = std::move(as_pointer(src)[i]); } - void move_construct(const void *src) { for(; i < n; ++i) new( &array[i] ) T( std::move(as_pointer(src)[i]) ); } -#endif - //TODO: rename to construct_range - template void iterate(I &src) { for(; i < n; ++i, ++src) new( &array[i] ) T( *src ); } - ~internal_loop_guide() { - if(i < n) {// if an exception was raised, fill the rest of items with zeros - internal::handle_unconstructed_elements(array+i, n-i); - } - } - }; - - class element_construction_guard : internal::no_copy{ - pointer element; - public: - element_construction_guard(pointer an_element) : element (an_element){} - void dismiss(){ element = NULL; } - ~element_construction_guard(){ - if (element){ - internal::handle_unconstructed_elements(element, 1); - } - } - }; -}; - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) -#pragma warning (push) -#pragma warning (disable: 4701) // potentially uninitialized local variable "old" -#endif -template -void concurrent_vector::shrink_to_fit() { - internal_segments_table old; - __TBB_TRY { - if( internal_compact( sizeof(T), &old, &destroy_array, ©_array ) ) - internal_free_segments( old.table, pointers_per_long_table, old.first_block ); // free joined and unnecessary segments - } __TBB_CATCH(...) { - if( old.first_block ) // free segment allocated for compacting. Only for support of exceptions in ctor of user T[ype] - internal_free_segments( old.table, 1, old.first_block ); - __TBB_RETHROW(); - } -} -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) -#pragma warning (pop) -#endif // warning 4701 is back - -template -void concurrent_vector::internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block) { - // Free the arrays - while( k > first_block ) { - --k; - segment_value_t segment_value = table[k].load(); - table[k].store(segment_not_used()); - if( segment_value == segment_allocated() ) // check for correct segment pointer - this->my_allocator.deallocate( (segment_value.pointer()), segment_size(k) ); - } - segment_value_t segment_value = table[0].load(); - if( segment_value == segment_allocated() ) { - __TBB_ASSERT( first_block > 0, NULL ); - while(k > 0) table[--k].store(segment_not_used()); - this->my_allocator.deallocate( (segment_value.pointer()), segment_size(first_block) ); - } -} - -template -T& concurrent_vector::internal_subscript( size_type index ) const { - //TODO: unify both versions of internal_subscript - __TBB_ASSERT( index < my_early_size, "index out of bounds" ); - size_type j = index; - segment_index_t k = segment_base_index_of( j ); - __TBB_ASSERT( my_segment.load() != my_storage || k < pointers_per_short_table, "index is being allocated" ); - //no need in load with acquire (load) since thread works in own space or gets - //the information about added elements via some form of external synchronization - //TODO: why not make a load of my_segment relaxed as well ? - //TODO: add an assertion that my_segment[k] is properly aligned to please ITT - segment_value_t segment_value = my_segment[k].template load(); - __TBB_ASSERT( segment_value != segment_allocation_failed(), "the instance is broken by bad allocation. Use at() instead" ); - __TBB_ASSERT( segment_value != segment_not_used(), "index is being allocated" ); - return (( segment_value.pointer()))[j]; -} - -template -T& concurrent_vector::internal_subscript_with_exceptions( size_type index ) const { - if( index >= my_early_size ) - internal::throw_exception(internal::eid_out_of_range); // throw std::out_of_range - size_type j = index; - segment_index_t k = segment_base_index_of( j ); - //TODO: refactor this condition into separate helper function, e.g. fits_into_small_table - if( my_segment.load() == my_storage && k >= pointers_per_short_table ) - internal::throw_exception(internal::eid_segment_range_error); // throw std::range_error - // no need in load with acquire (load) since thread works in own space or gets - //the information about added elements via some form of external synchronization - //TODO: why not make a load of my_segment relaxed as well ? - //TODO: add an assertion that my_segment[k] is properly aligned to please ITT - segment_value_t segment_value = my_segment[k].template load(); - if( segment_value != segment_allocated() ) // check for correct segment pointer - internal::throw_exception(internal::eid_index_range_error); // throw std::range_error - return (segment_value.pointer())[j]; -} - -template template -void concurrent_vector::internal_assign_iterators(I first, I last) { - __TBB_ASSERT(my_early_size == 0, NULL); - size_type n = std::distance(first, last); - if( !n ) return; - internal_reserve(n, sizeof(T), max_size()); - my_early_size = n; - segment_index_t k = 0; - //TODO: unify segment iteration code with concurrent_base_v3::helper - size_type sz = segment_size( my_first_block ); - while( sz < n ) { - internal_loop_guide loop(sz, my_segment[k].template load().template pointer()); - loop.iterate(first); - n -= sz; - if( !k ) k = my_first_block; - else { ++k; sz <<= 1; } - } - internal_loop_guide loop(n, my_segment[k].template load().template pointer()); - loop.iterate(first); -} - -template -void concurrent_vector::initialize_array( void* begin, const void *, size_type n ) { - internal_loop_guide loop(n, begin); loop.init(); -} - -template -void concurrent_vector::initialize_array_by( void* begin, const void *src, size_type n ) { - internal_loop_guide loop(n, begin); loop.init(src); -} - -template -void concurrent_vector::copy_array( void* dst, const void* src, size_type n ) { - internal_loop_guide loop(n, dst); loop.copy(src); -} - -#if __TBB_CPP11_RVALUE_REF_PRESENT -template -void concurrent_vector::move_array( void* dst, const void* src, size_type n ) { - internal_loop_guide loop(n, dst); loop.move_construct(src); -} - -template -void concurrent_vector::move_assign_array( void* dst, const void* src, size_type n ) { - internal_loop_guide loop(n, dst); loop.move_assign(src); -} -#endif - -template -template -void concurrent_vector::copy_range( void* dst, const void* p_type_erased_iterator, size_type n ){ - I & iterator ((*const_cast(static_cast(p_type_erased_iterator)))); - internal_loop_guide loop(n, dst); loop.iterate(iterator); -} - -template -void concurrent_vector::assign_array( void* dst, const void* src, size_type n ) { - internal_loop_guide loop(n, dst); loop.assign(src); -} - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warning - #pragma warning (push) - #pragma warning (disable: 4189) -#endif -template -void concurrent_vector::destroy_array( void* begin, size_type n ) { - T* array = static_cast(begin); - for( size_type j=n; j>0; --j ) - array[j-1].~T(); // destructors are supposed to not throw any exceptions -} -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4189 is back - -// concurrent_vector's template functions -template -inline bool operator==(const concurrent_vector &a, const concurrent_vector &b) { - //TODO: call size() only once per vector (in operator==) - // Simply: return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin()); - if(a.size() != b.size()) return false; - typename concurrent_vector::const_iterator i(a.begin()); - typename concurrent_vector::const_iterator j(b.begin()); - for(; i != a.end(); ++i, ++j) - if( !(*i == *j) ) return false; - return true; -} - -template -inline bool operator!=(const concurrent_vector &a, const concurrent_vector &b) -{ return !(a == b); } - -template -inline bool operator<(const concurrent_vector &a, const concurrent_vector &b) -{ return (std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end())); } - -template -inline bool operator>(const concurrent_vector &a, const concurrent_vector &b) -{ return b < a; } - -template -inline bool operator<=(const concurrent_vector &a, const concurrent_vector &b) -{ return !(b < a); } - -template -inline bool operator>=(const concurrent_vector &a, const concurrent_vector &b) -{ return !(a < b); } - -template -inline void swap(concurrent_vector &a, concurrent_vector &b) -{ a.swap( b ); } - -} // namespace tbb - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4267,4127 are back - -#endif /* __TBB_concurrent_vector_H */ diff --git a/inst/include/tbb/critical_section.h b/inst/include/tbb/critical_section.h deleted file mode 100644 index b12cdcd8d..000000000 --- a/inst/include/tbb/critical_section.h +++ /dev/null @@ -1,133 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef _TBB_CRITICAL_SECTION_H_ -#define _TBB_CRITICAL_SECTION_H_ - -#if _WIN32||_WIN64 -#include "machine/windows_api.h" -#else -#include -#include -#endif // _WIN32||WIN64 - -#include "tbb_stddef.h" -#include "tbb_thread.h" -#include "tbb_exception.h" - -#include "tbb_profiling.h" - -namespace tbb { - - namespace internal { -class critical_section_v4 : internal::no_copy { -#if _WIN32||_WIN64 - CRITICAL_SECTION my_impl; -#else - pthread_mutex_t my_impl; -#endif - tbb_thread::id my_tid; -public: - - void __TBB_EXPORTED_METHOD internal_construct(); - - critical_section_v4() { -#if _WIN32||_WIN64 - InitializeCriticalSectionEx( &my_impl, 4000, 0 ); -#else - pthread_mutex_init(&my_impl, NULL); -#endif - internal_construct(); - } - - ~critical_section_v4() { - __TBB_ASSERT(my_tid == tbb_thread::id(), "Destroying a still-held critical section"); -#if _WIN32||_WIN64 - DeleteCriticalSection(&my_impl); -#else - pthread_mutex_destroy(&my_impl); -#endif - } - - class scoped_lock : internal::no_copy { - private: - critical_section_v4 &my_crit; - public: - scoped_lock( critical_section_v4& lock_me) :my_crit(lock_me) { - my_crit.lock(); - } - - ~scoped_lock() { - my_crit.unlock(); - } - }; - - void lock() { - tbb_thread::id local_tid = this_tbb_thread::get_id(); - if(local_tid == my_tid) throw_exception( eid_improper_lock ); -#if _WIN32||_WIN64 - EnterCriticalSection( &my_impl ); -#else - int rval = pthread_mutex_lock(&my_impl); - __TBB_ASSERT_EX(!rval, "critical_section::lock: pthread_mutex_lock failed"); -#endif - __TBB_ASSERT(my_tid == tbb_thread::id(), NULL); - my_tid = local_tid; - } - - bool try_lock() { - bool gotlock; - tbb_thread::id local_tid = this_tbb_thread::get_id(); - if(local_tid == my_tid) return false; -#if _WIN32||_WIN64 - gotlock = TryEnterCriticalSection( &my_impl ) != 0; -#else - int rval = pthread_mutex_trylock(&my_impl); - // valid returns are 0 (locked) and [EBUSY] - __TBB_ASSERT(rval == 0 || rval == EBUSY, "critical_section::trylock: pthread_mutex_trylock failed"); - gotlock = rval == 0; -#endif - if(gotlock) { - my_tid = local_tid; - } - return gotlock; - } - - void unlock() { - __TBB_ASSERT(this_tbb_thread::get_id() == my_tid, "thread unlocking critical_section is not thread that locked it"); - my_tid = tbb_thread::id(); -#if _WIN32||_WIN64 - LeaveCriticalSection( &my_impl ); -#else - int rval = pthread_mutex_unlock(&my_impl); - __TBB_ASSERT_EX(!rval, "critical_section::unlock: pthread_mutex_unlock failed"); -#endif - } - - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = true; -}; // critical_section_v4 -} // namespace internal -typedef internal::critical_section_v4 critical_section; - -__TBB_DEFINE_PROFILING_SET_NAME(critical_section) -} // namespace tbb -#endif // _TBB_CRITICAL_SECTION_H_ diff --git a/inst/include/tbb/enumerable_thread_specific.h b/inst/include/tbb/enumerable_thread_specific.h deleted file mode 100644 index d838d1182..000000000 --- a/inst/include/tbb/enumerable_thread_specific.h +++ /dev/null @@ -1,1002 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_enumerable_thread_specific_H -#define __TBB_enumerable_thread_specific_H - -#include "concurrent_vector.h" -#include "tbb_thread.h" -#include "tbb_allocator.h" -#include "tbb_profiling.h" -#include "cache_aligned_allocator.h" -#include "aligned_space.h" -#include // for memcpy - -#if _WIN32||_WIN64 -#include "machine/windows_api.h" -#else -#include -#endif - -namespace tbb { - -//! enum for selecting between single key and key-per-instance versions -enum ets_key_usage_type { ets_key_per_instance, ets_no_key }; - -namespace interface6 { - - //! @cond - namespace internal { - - using namespace tbb::internal; - - template - class ets_base: tbb::internal::no_copy { - protected: -#if _WIN32||_WIN64 - typedef DWORD key_type; -#else - typedef pthread_t key_type; -#endif -#if __TBB_PROTECTED_NESTED_CLASS_BROKEN - public: -#endif - struct slot; - - struct array { - array* next; - size_t lg_size; - slot& at( size_t k ) { - return ((slot*)(void*)(this+1))[k]; - } - size_t size() const {return (size_t)1<>(8*sizeof(size_t)-lg_size); - } - }; - struct slot { - key_type key; - void* ptr; - bool empty() const {return !key;} - bool match( key_type k ) const {return key==k;} - bool claim( key_type k ) { - __TBB_ASSERT(sizeof(tbb::atomic)==sizeof(key_type), NULL); - return tbb::internal::punned_cast*>(&key)->compare_and_swap(k,0)==0; - } - }; -#if __TBB_PROTECTED_NESTED_CLASS_BROKEN - protected: -#endif - - static key_type key_of_current_thread() { - tbb::tbb_thread::id id = tbb::this_tbb_thread::get_id(); - key_type k; - memcpy( &k, &id, sizeof(k) ); - return k; - } - - //! Root of linked list of arrays of decreasing size. - /** NULL if and only if my_count==0. - Each array in the list is half the size of its predecessor. */ - atomic my_root; - atomic my_count; - virtual void* create_local() = 0; - virtual void* create_array(size_t _size) = 0; // _size in bytes - virtual void free_array(void* ptr, size_t _size) = 0; // _size in bytes - array* allocate( size_t lg_size ) { - size_t n = 1<(create_array( sizeof(array)+n*sizeof(slot) )); - a->lg_size = lg_size; - std::memset( a+1, 0, n*sizeof(slot) ); - return a; - } - void free(array* a) { - size_t n = 1<<(a->lg_size); - free_array( (void *)a, size_t(sizeof(array)+n*sizeof(slot)) ); - } - static size_t hash( key_type k ) { - // Multiplicative hashing. Client should use *upper* bits. - // casts required for Mac gcc4.* compiler - return uintptr_t(k)*tbb::internal::select_size_t_constant<0x9E3779B9,0x9E3779B97F4A7C15ULL>::value; - } - - ets_base() {my_root=NULL; my_count=0;} - virtual ~ets_base(); // g++ complains if this is not virtual... - void* table_lookup( bool& exists ); - void table_clear(); - // table_find is used in copying ETS, so is not used in concurrent context. So - // we don't need itt annotations for it. - slot& table_find( key_type k ) { - size_t h = hash(k); - array* r = my_root; - size_t mask = r->mask(); - for(size_t i = r->start(h);;i=(i+1)&mask) { - slot& s = r->at(i); - if( s.empty() || s.match(k) ) - return s; - } - } - void table_reserve_for_copy( const ets_base& other ) { - __TBB_ASSERT(!my_root,NULL); - __TBB_ASSERT(!my_count,NULL); - if( other.my_root ) { - array* a = allocate(other.my_root->lg_size); - a->next = NULL; - my_root = a; - my_count = other.my_count; - } - } - }; - - template - ets_base::~ets_base() { - __TBB_ASSERT(!my_root, NULL); - } - - template - void ets_base::table_clear() { - while( array* r = my_root ) { - my_root = r->next; - free(r); - } - my_count = 0; - } - - template - void* ets_base::table_lookup( bool& exists ) { - const key_type k = key_of_current_thread(); - - __TBB_ASSERT(k!=0,NULL); - void* found; - size_t h = hash(k); - for( array* r=my_root; r; r=r->next ) { - call_itt_notify(acquired,r); - size_t mask=r->mask(); - for(size_t i = r->start(h); ;i=(i+1)&mask) { - slot& s = r->at(i); - if( s.empty() ) break; - if( s.match(k) ) { - if( r==my_root ) { - // Success at top level - exists = true; - return s.ptr; - } else { - // Success at some other level. Need to insert at top level. - exists = true; - found = s.ptr; - goto insert; - } - } - } - } - // Key does not yet exist. The density of slots in the table does not exceed 0.5, - // for if this will occur a new table is allocated with double the current table - // size, which is swapped in as the new root table. So an empty slot is guaranteed. - exists = false; - found = create_local(); - { - size_t c = ++my_count; - array* r = my_root; - call_itt_notify(acquired,r); - if( !r || c>r->size()/2 ) { - size_t s = r ? r->lg_size : 2; - while( c>size_t(1)<<(s-1) ) ++s; - array* a = allocate(s); - for(;;) { - a->next = r; - call_itt_notify(releasing,a); - array* new_r = my_root.compare_and_swap(a,r); - if( new_r==r ) break; - call_itt_notify(acquired, new_r); - if( new_r->lg_size>=s ) { - // Another thread inserted an equal or bigger array, so our array is superfluous. - free(a); - break; - } - r = new_r; - } - } - } - insert: - // Whether a slot has been found in an older table, or if it has been inserted at this level, - // it has already been accounted for in the total. Guaranteed to be room for it, and it is - // not present, so search for empty slot and use it. - array* ir = my_root; - call_itt_notify(acquired, ir); - size_t mask = ir->mask(); - for(size_t i = ir->start(h);;i=(i+1)&mask) { - slot& s = ir->at(i); - if( s.empty() ) { - if( s.claim(k) ) { - s.ptr = found; - return found; - } - } - } - } - - //! Specialization that exploits native TLS - template <> - class ets_base: protected ets_base { - typedef ets_base super; -#if _WIN32||_WIN64 -#if __TBB_WIN8UI_SUPPORT - typedef DWORD tls_key_t; - void create_key() { my_key = FlsAlloc(NULL); } - void destroy_key() { FlsFree(my_key); } - void set_tls(void * value) { FlsSetValue(my_key, (LPVOID)value); } - void* get_tls() { return (void *)FlsGetValue(my_key); } -#else - typedef DWORD tls_key_t; - void create_key() { my_key = TlsAlloc(); } - void destroy_key() { TlsFree(my_key); } - void set_tls(void * value) { TlsSetValue(my_key, (LPVOID)value); } - void* get_tls() { return (void *)TlsGetValue(my_key); } -#endif -#else - typedef pthread_key_t tls_key_t; - void create_key() { pthread_key_create(&my_key, NULL); } - void destroy_key() { pthread_key_delete(my_key); } - void set_tls( void * value ) const { pthread_setspecific(my_key, value); } - void* get_tls() const { return pthread_getspecific(my_key); } -#endif - tls_key_t my_key; - virtual void* create_local() = 0; - virtual void* create_array(size_t _size) = 0; // _size in bytes - virtual void free_array(void* ptr, size_t _size) = 0; // size in bytes - public: - ets_base() {create_key();} - ~ets_base() {destroy_key();} - void* table_lookup( bool& exists ) { - void* found = get_tls(); - if( found ) { - exists=true; - } else { - found = super::table_lookup(exists); - set_tls(found); - } - return found; - } - void table_clear() { - destroy_key(); - create_key(); - super::table_clear(); - } - }; - - //! Random access iterator for traversing the thread local copies. - template< typename Container, typename Value > - class enumerable_thread_specific_iterator -#if defined(_WIN64) && defined(_MSC_VER) - // Ensure that Microsoft's internal template function _Val_type works correctly. - : public std::iterator -#endif /* defined(_WIN64) && defined(_MSC_VER) */ - { - //! current position in the concurrent_vector - - Container *my_container; - typename Container::size_type my_index; - mutable Value *my_value; - - template - friend enumerable_thread_specific_iterator operator+( ptrdiff_t offset, - const enumerable_thread_specific_iterator& v ); - - template - friend bool operator==( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ); - - template - friend bool operator<( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ); - - template - friend ptrdiff_t operator-( const enumerable_thread_specific_iterator& i, const enumerable_thread_specific_iterator& j ); - - template - friend class enumerable_thread_specific_iterator; - - public: - - enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) : - my_container(&const_cast(container)), my_index(index), my_value(NULL) {} - - //! Default constructor - enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {} - - template - enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator& other ) : - my_container( other.my_container ), my_index( other.my_index), my_value( const_cast(other.my_value) ) {} - - enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const { - return enumerable_thread_specific_iterator(*my_container, my_index + offset); - } - - enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) { - my_index += offset; - my_value = NULL; - return *this; - } - - enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const { - return enumerable_thread_specific_iterator( *my_container, my_index-offset ); - } - - enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) { - my_index -= offset; - my_value = NULL; - return *this; - } - - Value& operator*() const { - Value* value = my_value; - if( !value ) { - value = my_value = reinterpret_cast(&(*my_container)[my_index].value); - } - __TBB_ASSERT( value==reinterpret_cast(&(*my_container)[my_index].value), "corrupt cache" ); - return *value; - } - - Value& operator[]( ptrdiff_t k ) const { - return (*my_container)[my_index + k].value; - } - - Value* operator->() const {return &operator*();} - - enumerable_thread_specific_iterator& operator++() { - ++my_index; - my_value = NULL; - return *this; - } - - enumerable_thread_specific_iterator& operator--() { - --my_index; - my_value = NULL; - return *this; - } - - //! Post increment - enumerable_thread_specific_iterator operator++(int) { - enumerable_thread_specific_iterator result = *this; - ++my_index; - my_value = NULL; - return result; - } - - //! Post decrement - enumerable_thread_specific_iterator operator--(int) { - enumerable_thread_specific_iterator result = *this; - --my_index; - my_value = NULL; - return result; - } - - // STL support - typedef ptrdiff_t difference_type; - typedef Value value_type; - typedef Value* pointer; - typedef Value& reference; - typedef std::random_access_iterator_tag iterator_category; - }; - - template - enumerable_thread_specific_iterator operator+( ptrdiff_t offset, - const enumerable_thread_specific_iterator& v ) { - return enumerable_thread_specific_iterator( v.my_container, v.my_index + offset ); - } - - template - bool operator==( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return i.my_index==j.my_index && i.my_container == j.my_container; - } - - template - bool operator!=( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return !(i==j); - } - - template - bool operator<( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return i.my_index - bool operator>( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return j - bool operator>=( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return !(i - bool operator<=( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return !(j - ptrdiff_t operator-( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return i.my_index-j.my_index; - } - - template - class segmented_iterator -#if defined(_WIN64) && defined(_MSC_VER) - : public std::iterator -#endif - { - template - friend bool operator==(const segmented_iterator& i, const segmented_iterator& j); - - template - friend bool operator!=(const segmented_iterator& i, const segmented_iterator& j); - - template - friend class segmented_iterator; - - public: - - segmented_iterator() {my_segcont = NULL;} - - segmented_iterator( const SegmentedContainer& _segmented_container ) : - my_segcont(const_cast(&_segmented_container)), - outer_iter(my_segcont->end()) { } - - ~segmented_iterator() {} - - typedef typename SegmentedContainer::iterator outer_iterator; - typedef typename SegmentedContainer::value_type InnerContainer; - typedef typename InnerContainer::iterator inner_iterator; - - // STL support - typedef ptrdiff_t difference_type; - typedef Value value_type; - typedef typename SegmentedContainer::size_type size_type; - typedef Value* pointer; - typedef Value& reference; - typedef std::input_iterator_tag iterator_category; - - // Copy Constructor - template - segmented_iterator(const segmented_iterator& other) : - my_segcont(other.my_segcont), - outer_iter(other.outer_iter), - // can we assign a default-constructed iterator to inner if we're at the end? - inner_iter(other.inner_iter) - {} - - // assignment - template - segmented_iterator& operator=( const segmented_iterator& other) { - if(this != &other) { - my_segcont = other.my_segcont; - outer_iter = other.outer_iter; - if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter; - } - return *this; - } - - // allow assignment of outer iterator to segmented iterator. Once it is - // assigned, move forward until a non-empty inner container is found or - // the end of the outer container is reached. - segmented_iterator& operator=(const outer_iterator& new_outer_iter) { - __TBB_ASSERT(my_segcont != NULL, NULL); - // check that this iterator points to something inside the segmented container - for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) { - if( !outer_iter->empty() ) { - inner_iter = outer_iter->begin(); - break; - } - } - return *this; - } - - // pre-increment - segmented_iterator& operator++() { - advance_me(); - return *this; - } - - // post-increment - segmented_iterator operator++(int) { - segmented_iterator tmp = *this; - operator++(); - return tmp; - } - - bool operator==(const outer_iterator& other_outer) const { - __TBB_ASSERT(my_segcont != NULL, NULL); - return (outer_iter == other_outer && - (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin())); - } - - bool operator!=(const outer_iterator& other_outer) const { - return !operator==(other_outer); - - } - - // (i)* RHS - reference operator*() const { - __TBB_ASSERT(my_segcont != NULL, NULL); - __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container"); - __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // should never happen - return *inner_iter; - } - - // i-> - pointer operator->() const { return &operator*();} - - private: - SegmentedContainer* my_segcont; - outer_iterator outer_iter; - inner_iterator inner_iter; - - void advance_me() { - __TBB_ASSERT(my_segcont != NULL, NULL); - __TBB_ASSERT(outer_iter != my_segcont->end(), NULL); // not true if there are no inner containers - __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // not true if the inner containers are all empty. - ++inner_iter; - while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) { - inner_iter = outer_iter->begin(); - } - } - }; // segmented_iterator - - template - bool operator==( const segmented_iterator& i, - const segmented_iterator& j ) { - if(i.my_segcont != j.my_segcont) return false; - if(i.my_segcont == NULL) return true; - if(i.outer_iter != j.outer_iter) return false; - if(i.outer_iter == i.my_segcont->end()) return true; - return i.inner_iter == j.inner_iter; - } - - // != - template - bool operator!=( const segmented_iterator& i, - const segmented_iterator& j ) { - return !(i==j); - } - - template - struct destruct_only: tbb::internal::no_copy { - tbb::aligned_space value; - ~destruct_only() {value.begin()[0].~T();} - }; - - template - struct construct_by_default: tbb::internal::no_assign { - void construct(void*where) {new(where) T();} // C++ note: the () in T() ensure zero initialization. - construct_by_default( int ) {} - }; - - template - struct construct_by_exemplar: tbb::internal::no_assign { - const T exemplar; - void construct(void*where) {new(where) T(exemplar);} - construct_by_exemplar( const T& t ) : exemplar(t) {} - }; - - template - struct construct_by_finit: tbb::internal::no_assign { - Finit f; - void construct(void* where) {new(where) T(f());} - construct_by_finit( const Finit& f_ ) : f(f_) {} - }; - - // storage for initialization function pointer - template - class callback_base { - public: - // Clone *this - virtual callback_base* clone() = 0; - // Destruct and free *this - virtual void destroy() = 0; - // Need virtual destructor to satisfy GCC compiler warning - virtual ~callback_base() { } - // Construct T at where - virtual void construct(void* where) = 0; - }; - - template - class callback_leaf: public callback_base, Constructor { - template callback_leaf( const X& x ) : Constructor(x) {} - - typedef typename tbb::tbb_allocator my_allocator_type; - - /*override*/ callback_base* clone() { - void* where = my_allocator_type().allocate(1); - return new(where) callback_leaf(*this); - } - - /*override*/ void destroy() { - my_allocator_type().destroy(this); - my_allocator_type().deallocate(this,1); - } - - /*override*/ void construct(void* where) { - Constructor::construct(where); - } - public: - template - static callback_base* make( const X& x ) { - void* where = my_allocator_type().allocate(1); - return new(where) callback_leaf(x); - } - }; - - //! Template for adding padding in order to avoid false sharing - /** ModularSize should be sizeof(U) modulo the cache line size. - All maintenance of the space will be done explicitly on push_back, - and all thread local copies must be destroyed before the concurrent - vector is deleted. - */ - template - struct ets_element { - ets_element() { /* avoid cl warning C4345 about default initialization of POD types */ } - char value[ModularSize==0 ? sizeof(U) : sizeof(U)+(tbb::internal::NFS_MaxLineSize-ModularSize)]; - void unconstruct() { - tbb::internal::punned_cast(&value)->~U(); - } - }; - - } // namespace internal - //! @endcond - - //! The enumerable_thread_specific container - /** enumerable_thread_specific has the following properties: - - thread-local copies are lazily created, with default, exemplar or function initialization. - - thread-local copies do not move (during lifetime, and excepting clear()) so the address of a copy is invariant. - - the contained objects need not have operator=() defined if combine is not used. - - enumerable_thread_specific containers may be copy-constructed or assigned. - - thread-local copies can be managed by hash-table, or can be accessed via TLS storage for speed. - - outside of parallel contexts, the contents of all thread-local copies are accessible by iterator or using combine or combine_each methods - - @par Segmented iterator - When the thread-local objects are containers with input_iterators defined, a segmented iterator may - be used to iterate over all the elements of all thread-local copies. - - @par combine and combine_each - - Both methods are defined for enumerable_thread_specific. - - combine() requires the the type T have operator=() defined. - - neither method modifies the contents of the object (though there is no guarantee that the applied methods do not modify the object.) - - Both are evaluated in serial context (the methods are assumed to be non-benign.) - - @ingroup containers */ - template , - ets_key_usage_type ETS_key_type=ets_no_key > - class enumerable_thread_specific: internal::ets_base { - - template friend class enumerable_thread_specific; - - typedef internal::ets_element padded_element; - - //! A generic range, used to create range objects from the iterators - template - class generic_range_type: public blocked_range { - public: - typedef T value_type; - typedef T& reference; - typedef const T& const_reference; - typedef I iterator; - typedef ptrdiff_t difference_type; - generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range(begin_,end_,grainsize_) {} - template - generic_range_type( const generic_range_type& r) : blocked_range(r.begin(),r.end(),r.grainsize()) {} - generic_range_type( generic_range_type& r, split ) : blocked_range(r,split()) {} - }; - - typedef typename Allocator::template rebind< padded_element >::other padded_allocator_type; - typedef tbb::concurrent_vector< padded_element, padded_allocator_type > internal_collection_type; - - internal::callback_base *my_construct_callback; - - internal_collection_type my_locals; - - /*override*/ void* create_local() { - void* lref = &*my_locals.grow_by(1); - my_construct_callback->construct(lref); - return lref; - } - - void unconstruct_locals() { - for(typename internal_collection_type::iterator cvi = my_locals.begin(); cvi != my_locals.end(); ++cvi) { - cvi->unconstruct(); - } - } - - typedef typename Allocator::template rebind< uintptr_t >::other array_allocator_type; - - // _size is in bytes - /*override*/ void* create_array(size_t _size) { - size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t); - return array_allocator_type().allocate(nelements); - } - - /*override*/ void free_array( void* _ptr, size_t _size) { - size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t); - array_allocator_type().deallocate( reinterpret_cast(_ptr),nelements); - } - - public: - - //! Basic types - typedef Allocator allocator_type; - typedef T value_type; - typedef T& reference; - typedef const T& const_reference; - typedef T* pointer; - typedef const T* const_pointer; - typedef typename internal_collection_type::size_type size_type; - typedef typename internal_collection_type::difference_type difference_type; - - // Iterator types - typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator; - typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator; - - // Parallel range types - typedef generic_range_type< iterator > range_type; - typedef generic_range_type< const_iterator > const_range_type; - - //! Default constructor. Each local instance of T is default constructed. - enumerable_thread_specific() : - my_construct_callback( internal::callback_leaf >::make(/*dummy argument*/0) ) - {} - - //! Constructor with initializer functor. Each local instance of T is constructed by T(finit()). - template - enumerable_thread_specific( Finit finit ) : - my_construct_callback( internal::callback_leaf >::make( finit ) ) - {} - - //! Constructor with exemplar. Each local instance of T is copied-constructed from the exemplar. - enumerable_thread_specific(const T& exemplar) : - my_construct_callback( internal::callback_leaf >::make( exemplar ) ) - {} - - //! Destructor - ~enumerable_thread_specific() { - my_construct_callback->destroy(); - this->clear(); // deallocation before the derived class is finished destructing - // So free(array *) is still accessible - } - - //! returns reference to local, discarding exists - reference local() { - bool exists; - return local(exists); - } - - //! Returns reference to calling thread's local copy, creating one if necessary - reference local(bool& exists) { - void* ptr = this->table_lookup(exists); - return *(T*)ptr; - } - - //! Get the number of local copies - size_type size() const { return my_locals.size(); } - - //! true if there have been no local copies created - bool empty() const { return my_locals.empty(); } - - //! begin iterator - iterator begin() { return iterator( my_locals, 0 ); } - //! end iterator - iterator end() { return iterator(my_locals, my_locals.size() ); } - - //! begin const iterator - const_iterator begin() const { return const_iterator(my_locals, 0); } - - //! end const iterator - const_iterator end() const { return const_iterator(my_locals, my_locals.size()); } - - //! Get range for parallel algorithms - range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); } - - //! Get const range for parallel algorithms - const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); } - - //! Destroys local copies - void clear() { - unconstruct_locals(); - my_locals.clear(); - this->table_clear(); - // callback is not destroyed - // exemplar is not destroyed - } - - private: - - template - void internal_copy( const enumerable_thread_specific& other); - - public: - - template - enumerable_thread_specific( const enumerable_thread_specific& other ) : internal::ets_base () - { - internal_copy(other); - } - - enumerable_thread_specific( const enumerable_thread_specific& other ) : internal::ets_base () - { - internal_copy(other); - } - - private: - - template - enumerable_thread_specific & - internal_assign(const enumerable_thread_specific& other) { - if(static_cast( this ) != static_cast( &other )) { - this->clear(); - my_construct_callback->destroy(); - my_construct_callback = 0; - internal_copy( other ); - } - return *this; - } - - public: - - // assignment - enumerable_thread_specific& operator=(const enumerable_thread_specific& other) { - return internal_assign(other); - } - - template - enumerable_thread_specific& operator=(const enumerable_thread_specific& other) - { - return internal_assign(other); - } - - // combine_func_t has signature T(T,T) or T(const T&, const T&) - template - T combine(combine_func_t f_combine) { - if(begin() == end()) { - internal::destruct_only location; - my_construct_callback->construct(location.value.begin()); - return *location.value.begin(); - } - const_iterator ci = begin(); - T my_result = *ci; - while(++ci != end()) - my_result = f_combine( my_result, *ci ); - return my_result; - } - - // combine_func_t has signature void(T) or void(const T&) - template - void combine_each(combine_func_t f_combine) { - for(const_iterator ci = begin(); ci != end(); ++ci) { - f_combine( *ci ); - } - } - - }; // enumerable_thread_specific - - template - template - void enumerable_thread_specific::internal_copy( const enumerable_thread_specific& other) { - // Initialize my_construct_callback first, so that it is valid even if rest of this routine throws an exception. - my_construct_callback = other.my_construct_callback->clone(); - - typedef internal::ets_base base; - __TBB_ASSERT(my_locals.size()==0,NULL); - this->table_reserve_for_copy( other ); - for( base::array* r=other.my_root; r; r=r->next ) { - for( size_t i=0; isize(); ++i ) { - base::slot& s1 = r->at(i); - if( !s1.empty() ) { - base::slot& s2 = this->table_find(s1.key); - if( s2.empty() ) { - void* lref = &*my_locals.grow_by(1); - s2.ptr = new(lref) T(*(U*)s1.ptr); - s2.key = s1.key; - } else { - // Skip the duplicate - } - } - } - } - } - - template< typename Container > - class flattened2d { - - // This intermediate typedef is to address issues with VC7.1 compilers - typedef typename Container::value_type conval_type; - - public: - - //! Basic types - typedef typename conval_type::size_type size_type; - typedef typename conval_type::difference_type difference_type; - typedef typename conval_type::allocator_type allocator_type; - typedef typename conval_type::value_type value_type; - typedef typename conval_type::reference reference; - typedef typename conval_type::const_reference const_reference; - typedef typename conval_type::pointer pointer; - typedef typename conval_type::const_pointer const_pointer; - - typedef typename internal::segmented_iterator iterator; - typedef typename internal::segmented_iterator const_iterator; - - flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) : - my_container(const_cast(&c)), my_begin(b), my_end(e) { } - - flattened2d( const Container &c ) : - my_container(const_cast(&c)), my_begin(c.begin()), my_end(c.end()) { } - - iterator begin() { return iterator(*my_container) = my_begin; } - iterator end() { return iterator(*my_container) = my_end; } - const_iterator begin() const { return const_iterator(*my_container) = my_begin; } - const_iterator end() const { return const_iterator(*my_container) = my_end; } - - size_type size() const { - size_type tot_size = 0; - for(typename Container::const_iterator i = my_begin; i != my_end; ++i) { - tot_size += i->size(); - } - return tot_size; - } - - private: - - Container *my_container; - typename Container::const_iterator my_begin; - typename Container::const_iterator my_end; - - }; - - template - flattened2d flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) { - return flattened2d(c, b, e); - } - - template - flattened2d flatten2d(const Container &c) { - return flattened2d(c); - } - -} // interface6 - -namespace internal { -using interface6::internal::segmented_iterator; -} - -using interface6::enumerable_thread_specific; -using interface6::flattened2d; -using interface6::flatten2d; - -} // namespace tbb - -#endif diff --git a/inst/include/tbb/flow_graph.h b/inst/include/tbb/flow_graph.h deleted file mode 100644 index 46087e706..000000000 --- a/inst/include/tbb/flow_graph.h +++ /dev/null @@ -1,3237 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_flow_graph_H -#define __TBB_flow_graph_H - -#include "tbb_stddef.h" -#include "atomic.h" -#include "spin_mutex.h" -#include "null_mutex.h" -#include "spin_rw_mutex.h" -#include "null_rw_mutex.h" -#include "task.h" -#include "cache_aligned_allocator.h" -#include "tbb_exception.h" -#include "internal/_aggregator_impl.h" -#include "tbb_profiling.h" - -#if TBB_DEPRECATED_FLOW_ENQUEUE -#define FLOW_SPAWN(a) tbb::task::enqueue((a)) -#else -#define FLOW_SPAWN(a) tbb::task::spawn((a)) -#endif - -// use the VC10 or gcc version of tuple if it is available. -#if __TBB_CPP11_TUPLE_PRESENT - #include -namespace tbb { - namespace flow { - using std::tuple; - using std::tuple_size; - using std::tuple_element; - using std::get; - } -} -#else - #include "compat/tuple" -#endif - -#include -#include - -/** @file - \brief The graph related classes and functions - - There are some applications that best express dependencies as messages - passed between nodes in a graph. These messages may contain data or - simply act as signals that a predecessors has completed. The graph - class and its associated node classes can be used to express such - applications. -*/ - -namespace tbb { -namespace flow { - -//! An enumeration the provides the two most common concurrency levels: unlimited and serial -enum concurrency { unlimited = 0, serial = 1 }; - -namespace interface7 { - -namespace internal { - template class successor_cache; - template class broadcast_cache; - template class round_robin_cache; -} - -//! An empty class used for messages that mean "I'm done" -class continue_msg {}; - -template< typename T > class sender; -template< typename T > class receiver; -class continue_receiver; - -//! Pure virtual template class that defines a sender of messages of type T -template< typename T > -class sender { -public: - //! The output type of this sender - typedef T output_type; - - //! The successor type for this node - typedef receiver successor_type; - - virtual ~sender() {} - - //! Add a new successor to this node - virtual bool register_successor( successor_type &r ) = 0; - - //! Removes a successor from this node - virtual bool remove_successor( successor_type &r ) = 0; - - //! Request an item from the sender - virtual bool try_get( T & ) { return false; } - - //! Reserves an item in the sender - virtual bool try_reserve( T & ) { return false; } - - //! Releases the reserved item - virtual bool try_release( ) { return false; } - - //! Consumes the reserved item - virtual bool try_consume( ) { return false; } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - //! interface to record edges for traversal & deletion - virtual void internal_add_built_successor( successor_type & ) = 0; - virtual void internal_delete_built_successor( successor_type & ) = 0; - virtual void copy_successors( std::vector &) = 0; - virtual size_t successor_count() = 0; -#endif -}; - -template< typename T > class limiter_node; // needed for resetting decrementer -template< typename R, typename B > class run_and_put_task; - -static tbb::task * const SUCCESSFULLY_ENQUEUED = (task *)-1; - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES -// flags to modify the behavior of the graph reset(). Can be combined. -enum reset_flags { - rf_reset_protocol = 0, - rf_reset_bodies = 1<<0, // delete the current node body, reset to a copy of the initial node body. - rf_extract = 1<<1 // delete edges (extract() for single node, reset() for graph.) -}; - -#define __TBB_PFG_RESET_ARG(exp) exp -#define __TBB_COMMA , -#else -#define __TBB_PFG_RESET_ARG(exp) /* nothing */ -#define __TBB_COMMA /* nothing */ -#endif - -// enqueue left task if necessary. Returns the non-enqueued task if there is one. -static inline tbb::task *combine_tasks( tbb::task * left, tbb::task * right) { - // if no RHS task, don't change left. - if(right == NULL) return left; - // right != NULL - if(left == NULL) return right; - if(left == SUCCESSFULLY_ENQUEUED) return right; - // left contains a task - if(right != SUCCESSFULLY_ENQUEUED) { - // both are valid tasks - FLOW_SPAWN(*left); - return right; - } - return left; -} - -//! Pure virtual template class that defines a receiver of messages of type T -template< typename T > -class receiver { -public: - //! The input type of this receiver - typedef T input_type; - - //! The predecessor type for this node - typedef sender predecessor_type; - - //! Destructor - virtual ~receiver() {} - - //! Put an item to the receiver - bool try_put( const T& t ) { - task *res = try_put_task(t); - if(!res) return false; - if (res != SUCCESSFULLY_ENQUEUED) FLOW_SPAWN(*res); - return true; - } - - //! put item to successor; return task to run the successor if possible. -protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - virtual task *try_put_task(const T& t) = 0; -public: - - //! Add a predecessor to the node - virtual bool register_predecessor( predecessor_type & ) { return false; } - - //! Remove a predecessor from the node - virtual bool remove_predecessor( predecessor_type & ) { return false; } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - virtual void internal_add_built_predecessor( predecessor_type & ) = 0; - virtual void internal_delete_built_predecessor( predecessor_type & ) = 0; - virtual void copy_predecessors( std::vector & ) = 0; - virtual size_t predecessor_count() = 0; -#endif - -protected: - //! put receiver back in initial state - template friend class limiter_node; - virtual void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags f = rf_reset_protocol ) ) = 0; - - template - friend class internal::successor_cache; - virtual bool is_continue_receiver() { return false; } -}; - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES -//* holder of edges both for caches and for those nodes which do not have predecessor caches. -// C == receiver< ... > or sender< ... >, depending. -template -class edge_container { - -public: - typedef std::vector edge_vector; - - void add_edge( C &s) { - built_edges.push_back( &s ); - } - - void delete_edge( C &s) { - for ( typename edge_vector::iterator i = built_edges.begin(); i != built_edges.end(); ++i ) { - if ( *i == &s ) { - (void)built_edges.erase(i); - return; // only remove one predecessor per request - } - } - } - - void copy_edges( edge_vector &v) { - v = built_edges; - } - - size_t edge_count() { - return (size_t)(built_edges.size()); - } - - void clear() { - built_edges.clear(); - } - - template< typename S > void sender_extract( S &s ); - template< typename R > void receiver_extract( R &r ); - -private: - edge_vector built_edges; -}; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - -//! Base class for receivers of completion messages -/** These receivers automatically reset, but cannot be explicitly waited on */ -class continue_receiver : public receiver< continue_msg > { -public: - - //! The input type - typedef continue_msg input_type; - - //! The predecessor type for this node - typedef sender< continue_msg > predecessor_type; - - //! Constructor - continue_receiver( int number_of_predecessors = 0 ) { - my_predecessor_count = my_initial_predecessor_count = number_of_predecessors; - my_current_count = 0; - } - - //! Copy constructor - continue_receiver( const continue_receiver& src ) : receiver() { - my_predecessor_count = my_initial_predecessor_count = src.my_initial_predecessor_count; - my_current_count = 0; - } - - //! Destructor - virtual ~continue_receiver() { } - - //! Increments the trigger threshold - /* override */ bool register_predecessor( predecessor_type & ) { - spin_mutex::scoped_lock l(my_mutex); - ++my_predecessor_count; - return true; - } - - //! Decrements the trigger threshold - /** Does not check to see if the removal of the predecessor now makes the current count - exceed the new threshold. So removing a predecessor while the graph is active can cause - unexpected results. */ - /* override */ bool remove_predecessor( predecessor_type & ) { - spin_mutex::scoped_lock l(my_mutex); - --my_predecessor_count; - return true; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector predecessor_vector_type; - - /*override*/ void internal_add_built_predecessor( predecessor_type &s) { - spin_mutex::scoped_lock l(my_mutex); - my_built_predecessors.add_edge( s ); - } - - /*override*/ void internal_delete_built_predecessor( predecessor_type &s) { - spin_mutex::scoped_lock l(my_mutex); - my_built_predecessors.delete_edge(s); - } - - /*override*/ void copy_predecessors( predecessor_vector_type &v) { - spin_mutex::scoped_lock l(my_mutex); - my_built_predecessors.copy_edges(v); - } - - /*override*/ size_t predecessor_count() { - spin_mutex::scoped_lock l(my_mutex); - return my_built_predecessors.edge_count(); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - -protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - // execute body is supposed to be too small to create a task for. - /* override */ task *try_put_task( const input_type & ) { - { - spin_mutex::scoped_lock l(my_mutex); - if ( ++my_current_count < my_predecessor_count ) - return SUCCESSFULLY_ENQUEUED; - else - my_current_count = 0; - } - task * res = execute(); - if(!res) return SUCCESSFULLY_ENQUEUED; - return res; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - edge_container my_built_predecessors; -#endif - spin_mutex my_mutex; - int my_predecessor_count; - int my_current_count; - int my_initial_predecessor_count; - // the friend declaration in the base class did not eliminate the "protected class" - // error in gcc 4.1.2 - template friend class limiter_node; - /*override*/void reset_receiver( __TBB_PFG_RESET_ARG(reset_flags f) ) - { - my_current_count = 0; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - if(f & rf_extract) { - my_built_predecessors.receiver_extract(*this); - my_predecessor_count = my_initial_predecessor_count; - } -#endif - } - - //! Does whatever should happen when the threshold is reached - /** This should be very fast or else spawn a task. This is - called while the sender is blocked in the try_put(). */ - virtual task * execute() = 0; - template - friend class internal::successor_cache; - /*override*/ bool is_continue_receiver() { return true; } -}; -} // interface7 -} // flow -} // tbb - -#include "internal/_flow_graph_trace_impl.h" - -namespace tbb { -namespace flow { -namespace interface7 { - -#include "internal/_flow_graph_types_impl.h" -#include "internal/_flow_graph_impl.h" -using namespace internal::graph_policy_namespace; - -class graph; -class graph_node; - -template -class graph_iterator { - friend class graph; - friend class graph_node; -public: - typedef size_t size_type; - typedef GraphNodeType value_type; - typedef GraphNodeType* pointer; - typedef GraphNodeType& reference; - typedef const GraphNodeType& const_reference; - typedef std::forward_iterator_tag iterator_category; - - //! Default constructor - graph_iterator() : my_graph(NULL), current_node(NULL) {} - - //! Copy constructor - graph_iterator(const graph_iterator& other) : - my_graph(other.my_graph), current_node(other.current_node) - {} - - //! Assignment - graph_iterator& operator=(const graph_iterator& other) { - if (this != &other) { - my_graph = other.my_graph; - current_node = other.current_node; - } - return *this; - } - - //! Dereference - reference operator*() const; - - //! Dereference - pointer operator->() const; - - //! Equality - bool operator==(const graph_iterator& other) const { - return ((my_graph == other.my_graph) && (current_node == other.current_node)); - } - - //! Inequality - bool operator!=(const graph_iterator& other) const { return !(operator==(other)); } - - //! Pre-increment - graph_iterator& operator++() { - internal_forward(); - return *this; - } - - //! Post-increment - graph_iterator operator++(int) { - graph_iterator result = *this; - operator++(); - return result; - } - -private: - // the graph over which we are iterating - GraphContainerType *my_graph; - // pointer into my_graph's my_nodes list - pointer current_node; - - //! Private initializing constructor for begin() and end() iterators - graph_iterator(GraphContainerType *g, bool begin); - void internal_forward(); -}; - -//! The graph class -/** This class serves as a handle to the graph */ -class graph : tbb::internal::no_copy { - friend class graph_node; - - template< typename Body > - class run_task : public task { - public: - run_task( Body& body ) : my_body(body) {} - task *execute() { - my_body(); - return NULL; - } - private: - Body my_body; - }; - - template< typename Receiver, typename Body > - class run_and_put_task : public task { - public: - run_and_put_task( Receiver &r, Body& body ) : my_receiver(r), my_body(body) {} - task *execute() { - task *res = my_receiver.try_put_task( my_body() ); - if(res == SUCCESSFULLY_ENQUEUED) res = NULL; - return res; - } - private: - Receiver &my_receiver; - Body my_body; - }; - -public: - //! Constructs a graph with isolated task_group_context - explicit graph() : my_nodes(NULL), my_nodes_last(NULL) - { - own_context = true; - cancelled = false; - caught_exception = false; - my_context = new task_group_context(); - my_root_task = ( new ( task::allocate_root(*my_context) ) empty_task ); - my_root_task->set_ref_count(1); - tbb::internal::fgt_graph( this ); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_is_active = true; -#endif - } - - //! Constructs a graph with use_this_context as context - explicit graph(task_group_context& use_this_context) : - my_context(&use_this_context), my_nodes(NULL), my_nodes_last(NULL) - { - own_context = false; - my_root_task = ( new ( task::allocate_root(*my_context) ) empty_task ); - my_root_task->set_ref_count(1); - tbb::internal::fgt_graph( this ); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_is_active = true; -#endif - } - - //! Destroys the graph. - /** Calls wait_for_all, then destroys the root task and context. */ - ~graph() { - wait_for_all(); - my_root_task->set_ref_count(0); - task::destroy( *my_root_task ); - if (own_context) delete my_context; - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - void set_name( const char *name ) { - tbb::internal::fgt_graph_desc( this, name ); - } -#endif - - //! Used to register that an external entity may still interact with the graph. - /** The graph will not return from wait_for_all until a matching number of decrement_wait_count calls - is made. */ - void increment_wait_count() { - if (my_root_task) - my_root_task->increment_ref_count(); - } - - //! Deregisters an external entity that may have interacted with the graph. - /** The graph will not return from wait_for_all until all the number of decrement_wait_count calls - matches the number of increment_wait_count calls. */ - void decrement_wait_count() { - if (my_root_task) - my_root_task->decrement_ref_count(); - } - - //! Spawns a task that runs a body and puts its output to a specific receiver - /** The task is spawned as a child of the graph. This is useful for running tasks - that need to block a wait_for_all() on the graph. For example a one-off source. */ - template< typename Receiver, typename Body > - void run( Receiver &r, Body body ) { - FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *my_root_task ) ) - run_and_put_task< Receiver, Body >( r, body )) ); - } - - //! Spawns a task that runs a function object - /** The task is spawned as a child of the graph. This is useful for running tasks - that need to block a wait_for_all() on the graph. For example a one-off source. */ - template< typename Body > - void run( Body body ) { - FLOW_SPAWN( * new ( task::allocate_additional_child_of( *my_root_task ) ) run_task< Body >( body ) ); - } - - //! Wait until graph is idle and decrement_wait_count calls equals increment_wait_count calls. - /** The waiting thread will go off and steal work while it is block in the wait_for_all. */ - void wait_for_all() { - cancelled = false; - caught_exception = false; - if (my_root_task) { -#if TBB_USE_EXCEPTIONS - try { -#endif - my_root_task->wait_for_all(); - cancelled = my_context->is_group_execution_cancelled(); -#if TBB_USE_EXCEPTIONS - } - catch(...) { - my_root_task->set_ref_count(1); - my_context->reset(); - caught_exception = true; - cancelled = true; - throw; - } -#endif - my_context->reset(); // consistent with behavior in catch() - my_root_task->set_ref_count(1); - } - } - - //! Returns the root task of the graph - task * root_task() { -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - if (!my_is_active) - return NULL; - else -#endif - return my_root_task; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - void set_active(bool a = true) { - my_is_active = a; - } - - bool is_active() { - return my_is_active; - } -#endif - - // ITERATORS - template - friend class graph_iterator; - - // Graph iterator typedefs - typedef graph_iterator iterator; - typedef graph_iterator const_iterator; - - // Graph iterator constructors - //! start iterator - iterator begin() { return iterator(this, true); } - //! end iterator - iterator end() { return iterator(this, false); } - //! start const iterator - const_iterator begin() const { return const_iterator(this, true); } - //! end const iterator - const_iterator end() const { return const_iterator(this, false); } - //! start const iterator - const_iterator cbegin() const { return const_iterator(this, true); } - //! end const iterator - const_iterator cend() const { return const_iterator(this, false); } - - //! return status of graph execution - bool is_cancelled() { return cancelled; } - bool exception_thrown() { return caught_exception; } - - // thread-unsafe state reset. - void reset(__TBB_PFG_RESET_ARG(reset_flags f = rf_reset_protocol)); - -private: - task *my_root_task; - task_group_context *my_context; - bool own_context; - bool cancelled; - bool caught_exception; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - bool my_is_active; -#endif - - - graph_node *my_nodes, *my_nodes_last; - - spin_mutex nodelist_mutex; - void register_node(graph_node *n); - void remove_node(graph_node *n); - -}; // class graph - -template -graph_iterator::graph_iterator(C *g, bool begin) : my_graph(g), current_node(NULL) -{ - if (begin) current_node = my_graph->my_nodes; - //else it is an end iterator by default -} - -template -typename graph_iterator::reference graph_iterator::operator*() const { - __TBB_ASSERT(current_node, "graph_iterator at end"); - return *operator->(); -} - -template -typename graph_iterator::pointer graph_iterator::operator->() const { - return current_node; -} - - -template -void graph_iterator::internal_forward() { - if (current_node) current_node = current_node->next; -} - -//! The base of all graph nodes. -class graph_node : tbb::internal::no_assign { - friend class graph; - template - friend class graph_iterator; -protected: - graph& my_graph; - graph_node *next, *prev; -public: - graph_node(graph& g) : my_graph(g) { - my_graph.register_node(this); - } - virtual ~graph_node() { - my_graph.remove_node(this); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - virtual void set_name( const char *name ) = 0; -#endif - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - virtual void extract( reset_flags f=rf_extract ) { - bool a = my_graph.is_active(); - my_graph.set_active(false); - reset((reset_flags)(f|rf_extract)); - my_graph.set_active(a); - } -#endif - -protected: - virtual void reset(__TBB_PFG_RESET_ARG(reset_flags f=rf_reset_protocol)) = 0; -}; - -inline void graph::register_node(graph_node *n) { - n->next = NULL; - { - spin_mutex::scoped_lock lock(nodelist_mutex); - n->prev = my_nodes_last; - if (my_nodes_last) my_nodes_last->next = n; - my_nodes_last = n; - if (!my_nodes) my_nodes = n; - } -} - -inline void graph::remove_node(graph_node *n) { - { - spin_mutex::scoped_lock lock(nodelist_mutex); - __TBB_ASSERT(my_nodes && my_nodes_last, "graph::remove_node: Error: no registered nodes"); - if (n->prev) n->prev->next = n->next; - if (n->next) n->next->prev = n->prev; - if (my_nodes_last == n) my_nodes_last = n->prev; - if (my_nodes == n) my_nodes = n->next; - } - n->prev = n->next = NULL; -} - -inline void graph::reset( __TBB_PFG_RESET_ARG( reset_flags f )) { - // reset context - task *saved_my_root_task = my_root_task; - my_root_task = NULL; - if(my_context) my_context->reset(); - cancelled = false; - caught_exception = false; - // reset all the nodes comprising the graph - for(iterator ii = begin(); ii != end(); ++ii) { - graph_node *my_p = &(*ii); - my_p->reset(__TBB_PFG_RESET_ARG(f)); - } - my_root_task = saved_my_root_task; -} - - -#include "internal/_flow_graph_node_impl.h" - -//! An executable node that acts as a source, i.e. it has no predecessors -template < typename Output > -class source_node : public graph_node, public sender< Output > { -protected: - using graph_node::my_graph; -public: - //! The type of the output message, which is complete - typedef Output output_type; - - //! The type of successors of this node - typedef receiver< Output > successor_type; - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector successor_vector_type; -#endif - - //! Constructor for a node with a successor - template< typename Body > - source_node( graph &g, Body body, bool is_active = true ) - : graph_node(g), my_active(is_active), init_my_active(is_active), - my_body( new internal::source_body_leaf< output_type, Body>(body) ), - my_reserved(false), my_has_cached_item(false) - { - my_successors.set_owner(this); - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_SOURCE_NODE, &this->my_graph, - static_cast *>(this), this->my_body ); - } - - //! Copy constructor - source_node( const source_node& src ) : - graph_node(src.my_graph), sender(), - my_active(src.init_my_active), - init_my_active(src.init_my_active), my_body( src.my_body->clone() ), - my_reserved(false), my_has_cached_item(false) - { - my_successors.set_owner(this); - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_SOURCE_NODE, &this->my_graph, - static_cast *>(this), this->my_body ); - } - - //! The destructor - ~source_node() { delete my_body; } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - - //! Add a new successor to this node - /* override */ bool register_successor( successor_type &r ) { - spin_mutex::scoped_lock lock(my_mutex); - my_successors.register_successor(r); - if ( my_active ) - spawn_put(); - return true; - } - - //! Removes a successor from this node - /* override */ bool remove_successor( successor_type &r ) { - spin_mutex::scoped_lock lock(my_mutex); - my_successors.remove_successor(r); - return true; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void internal_add_built_successor( successor_type &r) { - spin_mutex::scoped_lock lock(my_mutex); - my_successors.internal_add_built_successor(r); - } - - /*override*/void internal_delete_built_successor( successor_type &r) { - spin_mutex::scoped_lock lock(my_mutex); - my_successors.internal_delete_built_successor(r); - } - - /*override*/size_t successor_count() { - spin_mutex::scoped_lock lock(my_mutex); - return my_successors.successor_count(); - } - - /*override*/void copy_successors(successor_vector_type &v) { - spin_mutex::scoped_lock l(my_mutex); - my_successors.copy_successors(v); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - //! Request an item from the node - /*override */ bool try_get( output_type &v ) { - spin_mutex::scoped_lock lock(my_mutex); - if ( my_reserved ) - return false; - - if ( my_has_cached_item ) { - v = my_cached_item; - my_has_cached_item = false; - return true; - } - // we've been asked to provide an item, but we have none. enqueue a task to - // provide one. - spawn_put(); - return false; - } - - //! Reserves an item. - /* override */ bool try_reserve( output_type &v ) { - spin_mutex::scoped_lock lock(my_mutex); - if ( my_reserved ) { - return false; - } - - if ( my_has_cached_item ) { - v = my_cached_item; - my_reserved = true; - return true; - } else { - return false; - } - } - - //! Release a reserved item. - /** true = item has been released and so remains in sender, dest must request or reserve future items */ - /* override */ bool try_release( ) { - spin_mutex::scoped_lock lock(my_mutex); - __TBB_ASSERT( my_reserved && my_has_cached_item, "releasing non-existent reservation" ); - my_reserved = false; - if(!my_successors.empty()) - spawn_put(); - return true; - } - - //! Consumes a reserved item - /* override */ bool try_consume( ) { - spin_mutex::scoped_lock lock(my_mutex); - __TBB_ASSERT( my_reserved && my_has_cached_item, "consuming non-existent reservation" ); - my_reserved = false; - my_has_cached_item = false; - if ( !my_successors.empty() ) { - spawn_put(); - } - return true; - } - - //! Activates a node that was created in the inactive state - void activate() { - spin_mutex::scoped_lock lock(my_mutex); - my_active = true; - if ( !my_successors.empty() ) - spawn_put(); - } - - template - Body copy_function_object() { - internal::source_body &body_ref = *this->my_body; - return dynamic_cast< internal::source_body_leaf & >(body_ref).get_body(); - } - -protected: - - //! resets the source_node to its initial state - void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - my_active = init_my_active; - my_reserved =false; - if(my_has_cached_item) { - my_has_cached_item = false; - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_successors.reset(f); - if(f & rf_reset_bodies) my_body->reset_body(); -#endif - } - -private: - spin_mutex my_mutex; - bool my_active; - bool init_my_active; - internal::source_body *my_body; - internal::broadcast_cache< output_type > my_successors; - bool my_reserved; - bool my_has_cached_item; - output_type my_cached_item; - - // used by apply_body, can invoke body of node. - bool try_reserve_apply_body(output_type &v) { - spin_mutex::scoped_lock lock(my_mutex); - if ( my_reserved ) { - return false; - } - if ( !my_has_cached_item ) { - tbb::internal::fgt_begin_body( my_body ); - bool r = (*my_body)(my_cached_item); - tbb::internal::fgt_end_body( my_body ); - if (r) { - my_has_cached_item = true; - } - } - if ( my_has_cached_item ) { - v = my_cached_item; - my_reserved = true; - return true; - } else { - return false; - } - } - - //! Spawns a task that applies the body - /* override */ void spawn_put( ) { - task* tp = this->my_graph.root_task(); - if(tp) { - FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *tp ) ) - internal:: source_task_bypass < source_node< output_type > >( *this ) ) ); - } - } - - friend class internal::source_task_bypass< source_node< output_type > >; - //! Applies the body. Returning SUCCESSFULLY_ENQUEUED okay; forward_task_bypass will handle it. - /* override */ task * apply_body_bypass( ) { - output_type v; - if ( !try_reserve_apply_body(v) ) - return NULL; - - task *last_task = my_successors.try_put_task(v); - if ( last_task ) - try_consume(); - else - try_release(); - return last_task; - } -}; // source_node - -//! Implements a function node that supports Input -> Output -template < typename Input, typename Output = continue_msg, graph_buffer_policy = queueing, typename Allocator=cache_aligned_allocator > -class function_node : public graph_node, public internal::function_input, public internal::function_output { -protected: - using graph_node::my_graph; -public: - typedef Input input_type; - typedef Output output_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; - typedef internal::function_input fInput_type; - typedef internal::function_output fOutput_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEAURES - typedef std::vector predecessor_vector_type; - typedef std::vector successor_vector_type; -#endif - - //! Constructor - template< typename Body > - function_node( graph &g, size_t concurrency, Body body ) : - graph_node(g), internal::function_input(g, concurrency, body) { - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_FUNCTION_NODE, &this->graph_node::my_graph, static_cast *>(this), - static_cast *>(this), this->my_body ); - } - - //! Copy constructor - function_node( const function_node& src ) : - graph_node(src.my_graph), internal::function_input( src ), - fOutput_type() { - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_FUNCTION_NODE, &this->my_graph, static_cast *>(this), - static_cast *>(this), this->my_body ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - -protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - using fInput_type::try_put_task; - - // override of graph_node's reset. - /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) { - fInput_type::reset_function_input(__TBB_PFG_RESET_ARG(f)); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - successors().reset(f); - __TBB_ASSERT(!(f & rf_extract) || successors().empty(), "function_node successors not empty"); - __TBB_ASSERT(this->my_predecessors.empty(), "function_node predecessors not empty"); -#endif - } - - /* override */ internal::broadcast_cache &successors () { return fOutput_type::my_successors; } -}; - -//! Implements a function node that supports Input -> Output -template < typename Input, typename Output, typename Allocator > -class function_node : public graph_node, public internal::function_input, public internal::function_output { -protected: - using graph_node::my_graph; -public: - typedef Input input_type; - typedef Output output_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; - typedef internal::function_input fInput_type; - typedef internal::function_input_queue queue_type; - typedef internal::function_output fOutput_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector predecessor_vector_type; - typedef std::vector successor_vector_type; -#endif - - //! Constructor - template< typename Body > - function_node( graph &g, size_t concurrency, Body body ) : - graph_node(g), fInput_type( g, concurrency, body, new queue_type() ) { - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_FUNCTION_NODE, &this->graph_node::my_graph, static_cast *>(this), - static_cast *>(this), this->my_body ); - } - - //! Copy constructor - function_node( const function_node& src ) : - graph_node(src.graph_node::my_graph), fInput_type( src, new queue_type() ), fOutput_type() { - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_FUNCTION_NODE, &this->graph_node::my_graph, static_cast *>(this), - static_cast *>(this), this->my_body ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - -protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - using fInput_type::try_put_task; - - /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - fInput_type::reset_function_input(__TBB_PFG_RESET_ARG(f)); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - successors().reset(f); - __TBB_ASSERT(!(f & rf_extract) || successors().empty(), "function_node successors not empty"); - __TBB_ASSERT(!(f & rf_extract) || this->my_predecessors.empty(), "function_node predecessors not empty"); -#endif - - } - - /* override */ internal::broadcast_cache &successors () { return fOutput_type::my_successors; } -}; - -//! implements a function node that supports Input -> (set of outputs) -// Output is a tuple of output types. -template < typename Input, typename Output, graph_buffer_policy = queueing, typename Allocator=cache_aligned_allocator > -class multifunction_node : - public graph_node, - public internal::multifunction_input - < - Input, - typename internal::wrap_tuple_elements< - tbb::flow::tuple_size::value, // #elements in tuple - internal::multifunction_output, // wrap this around each element - Output // the tuple providing the types - >::type, - Allocator - > { -protected: - using graph_node::my_graph; -private: - static const int N = tbb::flow::tuple_size::value; -public: - typedef Input input_type; - typedef typename internal::wrap_tuple_elements::type output_ports_type; -private: - typedef typename internal::multifunction_input base_type; - typedef typename internal::function_input_queue queue_type; -public: - template - multifunction_node( graph &g, size_t concurrency, Body body ) : - graph_node(g), base_type(g,concurrency, body) { - tbb::internal::fgt_multioutput_node_with_body( tbb::internal::FLOW_MULTIFUNCTION_NODE, - &this->graph_node::my_graph, static_cast *>(this), - this->output_ports(), this->my_body ); - } - - multifunction_node( const multifunction_node &other) : - graph_node(other.graph_node::my_graph), base_type(other) { - tbb::internal::fgt_multioutput_node_with_body( tbb::internal::FLOW_MULTIFUNCTION_NODE, - &this->graph_node::my_graph, static_cast *>(this), - this->output_ports(), this->my_body ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_multioutput_node_desc( this, name ); - } -#endif - - // all the guts are in multifunction_input... -protected: - /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) { base_type::reset(__TBB_PFG_RESET_ARG(f)); } -}; // multifunction_node - -template < typename Input, typename Output, typename Allocator > -class multifunction_node : public graph_node, public internal::multifunction_input::value, internal::multifunction_output, Output>::type, Allocator> { -protected: - using graph_node::my_graph; - static const int N = tbb::flow::tuple_size::value; -public: - typedef Input input_type; - typedef typename internal::wrap_tuple_elements::type output_ports_type; -private: - typedef typename internal::multifunction_input base_type; - typedef typename internal::function_input_queue queue_type; -public: - template - multifunction_node( graph &g, size_t concurrency, Body body) : - graph_node(g), base_type(g,concurrency, body, new queue_type()) { - tbb::internal::fgt_multioutput_node_with_body( tbb::internal::FLOW_MULTIFUNCTION_NODE, - &this->graph_node::my_graph, static_cast *>(this), - this->output_ports(), this->my_body ); - } - - multifunction_node( const multifunction_node &other) : - graph_node(other.graph_node::my_graph), base_type(other, new queue_type()) { - tbb::internal::fgt_multioutput_node_with_body( tbb::internal::FLOW_MULTIFUNCTION_NODE, - &this->graph_node::my_graph, static_cast *>(this), - this->output_ports(), this->my_body ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_multioutput_node_desc( this, name ); - } -#endif - - // all the guts are in multifunction_input... -protected: - /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) { base_type::reset(__TBB_PFG_RESET_ARG(f)); } -}; // multifunction_node - -//! split_node: accepts a tuple as input, forwards each element of the tuple to its -// successors. The node has unlimited concurrency, so though it is marked as -// "rejecting" it does not reject inputs. -template > -class split_node : public multifunction_node { - static const int N = tbb::flow::tuple_size::value; - typedef multifunction_node base_type; -public: - typedef typename base_type::output_ports_type output_ports_type; -private: - struct splitting_body { - void operator()(const TupleType& t, output_ports_type &p) { - internal::emit_element::emit_this(t, p); - } - }; -public: - typedef TupleType input_type; - typedef Allocator allocator_type; - split_node(graph &g) : base_type(g, unlimited, splitting_body()) { - tbb::internal::fgt_multioutput_node( tbb::internal::FLOW_SPLIT_NODE, &this->graph_node::my_graph, - static_cast *>(this), this->output_ports() ); - } - - split_node( const split_node & other) : base_type(other) { - tbb::internal::fgt_multioutput_node( tbb::internal::FLOW_SPLIT_NODE, &this->graph_node::my_graph, - static_cast *>(this), this->output_ports() ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_multioutput_node_desc( this, name ); - } -#endif - -}; - -//! Implements an executable node that supports continue_msg -> Output -template -class continue_node : public graph_node, public internal::continue_input, public internal::function_output { -protected: - using graph_node::my_graph; -public: - typedef continue_msg input_type; - typedef Output output_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; - typedef internal::continue_input fInput_type; - typedef internal::function_output fOutput_type; - - //! Constructor for executable node with continue_msg -> Output - template - continue_node( graph &g, Body body ) : - graph_node(g), internal::continue_input( g, body ) { - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_CONTINUE_NODE, &this->my_graph, - static_cast *>(this), - static_cast *>(this), this->my_body ); - } - - - //! Constructor for executable node with continue_msg -> Output - template - continue_node( graph &g, int number_of_predecessors, Body body ) : - graph_node(g), internal::continue_input( g, number_of_predecessors, body ) { - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_CONTINUE_NODE, &this->my_graph, - static_cast *>(this), - static_cast *>(this), this->my_body ); - } - - //! Copy constructor - continue_node( const continue_node& src ) : - graph_node(src.graph_node::my_graph), internal::continue_input(src), - internal::function_output() { - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_CONTINUE_NODE, &this->my_graph, - static_cast *>(this), - static_cast *>(this), this->my_body ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - -protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - using fInput_type::try_put_task; - - /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) { - fInput_type::reset_receiver(__TBB_PFG_RESET_ARG(f)); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - successors().reset(f); - __TBB_ASSERT(!(f & rf_extract) || successors().empty(), "continue_node not reset"); -#endif - } - - /* override */ internal::broadcast_cache &successors () { return fOutput_type::my_successors; } -}; // continue_node - -template< typename T > -class overwrite_node : public graph_node, public receiver, public sender { -protected: - using graph_node::my_graph; -public: - typedef T input_type; - typedef T output_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector predecessor_vector_type; - typedef std::vector successor_vector_type; -#endif - - overwrite_node(graph &g) : graph_node(g), my_buffer_is_valid(false) { - my_successors.set_owner( this ); - tbb::internal::fgt_node( tbb::internal::FLOW_OVERWRITE_NODE, &this->my_graph, - static_cast *>(this), static_cast *>(this) ); - } - - // Copy constructor; doesn't take anything from src; default won't work - overwrite_node( const overwrite_node& src ) : - graph_node(src.my_graph), receiver(), sender(), my_buffer_is_valid(false) - { - my_successors.set_owner( this ); - tbb::internal::fgt_node( tbb::internal::FLOW_OVERWRITE_NODE, &this->my_graph, - static_cast *>(this), static_cast *>(this) ); - } - - ~overwrite_node() {} - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - - /* override */ bool register_successor( successor_type &s ) { - spin_mutex::scoped_lock l( my_mutex ); - task* tp = this->my_graph.root_task(); // just to test if we are resetting - if (my_buffer_is_valid && tp) { - // We have a valid value that must be forwarded immediately. - if ( s.try_put( my_buffer ) || !s.register_predecessor( *this ) ) { - // We add the successor: it accepted our put or it rejected it but won't let us become a predecessor - my_successors.register_successor( s ); - } else { - // We don't add the successor: it rejected our put and we became its predecessor instead - return false; - } - } else { - // No valid value yet, just add as successor - my_successors.register_successor( s ); - } - return true; - } - - /* override */ bool remove_successor( successor_type &s ) { - spin_mutex::scoped_lock l( my_mutex ); - my_successors.remove_successor(s); - return true; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void internal_add_built_successor( successor_type &s) { - spin_mutex::scoped_lock l( my_mutex ); - my_successors.internal_add_built_successor(s); - } - - /*override*/void internal_delete_built_successor( successor_type &s) { - spin_mutex::scoped_lock l( my_mutex ); - my_successors.internal_delete_built_successor(s); - } - - /*override*/size_t successor_count() { - spin_mutex::scoped_lock l( my_mutex ); - return my_successors.successor_count(); - } - - /*override*/ void copy_successors(successor_vector_type &v) { - spin_mutex::scoped_lock l( my_mutex ); - my_successors.copy_successors(v); - } - - /*override*/ void internal_add_built_predecessor( predecessor_type &p) { - spin_mutex::scoped_lock l( my_mutex ); - my_built_predecessors.add_edge(p); - } - - /*override*/ void internal_delete_built_predecessor( predecessor_type &p) { - spin_mutex::scoped_lock l( my_mutex ); - my_built_predecessors.delete_edge(p); - } - - /*override*/size_t predecessor_count() { - spin_mutex::scoped_lock l( my_mutex ); - return my_built_predecessors.edge_count(); - } - - /*override*/void copy_predecessors(predecessor_vector_type &v) { - spin_mutex::scoped_lock l( my_mutex ); - my_built_predecessors.copy_edges(v); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - /* override */ bool try_get( input_type &v ) { - spin_mutex::scoped_lock l( my_mutex ); - if ( my_buffer_is_valid ) { - v = my_buffer; - return true; - } - return false; - } - - bool is_valid() { - spin_mutex::scoped_lock l( my_mutex ); - return my_buffer_is_valid; - } - - void clear() { - spin_mutex::scoped_lock l( my_mutex ); - my_buffer_is_valid = false; - } - -protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - /* override */ task * try_put_task( const input_type &v ) { - spin_mutex::scoped_lock l( my_mutex ); - my_buffer = v; - my_buffer_is_valid = true; - task * rtask = my_successors.try_put_task(v); - if(!rtask) rtask = SUCCESSFULLY_ENQUEUED; - return rtask; - } - - /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - my_buffer_is_valid = false; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_successors.reset(f); - if (f&rf_extract) { - my_built_predecessors.receiver_extract(*this); - } -#endif - } - - spin_mutex my_mutex; - internal::broadcast_cache< input_type, null_rw_mutex > my_successors; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - edge_container > my_built_predecessors; -#endif - input_type my_buffer; - bool my_buffer_is_valid; - /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) {} -}; // overwrite_node - -template< typename T > -class write_once_node : public overwrite_node { -public: - typedef T input_type; - typedef T output_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; - - //! Constructor - write_once_node(graph& g) : overwrite_node(g) { - tbb::internal::fgt_node( tbb::internal::FLOW_WRITE_ONCE_NODE, &(this->my_graph), - static_cast *>(this), - static_cast *>(this) ); - } - - //! Copy constructor: call base class copy constructor - write_once_node( const write_once_node& src ) : overwrite_node(src) { - tbb::internal::fgt_node( tbb::internal::FLOW_WRITE_ONCE_NODE, &(this->my_graph), - static_cast *>(this), - static_cast *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - -protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - /* override */ task *try_put_task( const T &v ) { - spin_mutex::scoped_lock l( this->my_mutex ); - if ( this->my_buffer_is_valid ) { - return NULL; - } else { - this->my_buffer = v; - this->my_buffer_is_valid = true; - task *res = this->my_successors.try_put_task(v); - if(!res) res = SUCCESSFULLY_ENQUEUED; - return res; - } - } -}; - -//! Forwards messages of type T to all successors -template -class broadcast_node : public graph_node, public receiver, public sender { -protected: - using graph_node::my_graph; -public: - typedef T input_type; - typedef T output_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector predecessor_vector_type; - typedef std::vector successor_vector_type; -#endif -private: - internal::broadcast_cache my_successors; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - edge_container my_built_predecessors; - spin_mutex pred_mutex; -#endif -public: - - broadcast_node(graph& g) : graph_node(g) { - my_successors.set_owner( this ); - tbb::internal::fgt_node( tbb::internal::FLOW_BROADCAST_NODE, &this->my_graph, - static_cast *>(this), static_cast *>(this) ); - } - - // Copy constructor - broadcast_node( const broadcast_node& src ) : - graph_node(src.my_graph), receiver(), sender() - { - my_successors.set_owner( this ); - tbb::internal::fgt_node( tbb::internal::FLOW_BROADCAST_NODE, &this->my_graph, - static_cast *>(this), static_cast *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - - //! Adds a successor - virtual bool register_successor( receiver &r ) { - my_successors.register_successor( r ); - return true; - } - - //! Removes s as a successor - virtual bool remove_successor( receiver &r ) { - my_successors.remove_successor( r ); - return true; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/ void internal_add_built_successor(successor_type &r) { - my_successors.internal_add_built_successor(r); - } - - /*override*/ void internal_delete_built_successor(successor_type &r) { - my_successors.internal_delete_built_successor(r); - } - - /*override*/ size_t successor_count() { - return my_successors.successor_count(); - } - - /*override*/ void copy_successors(successor_vector_type &v) { - my_successors.copy_successors(v); - } - - /*override*/ void internal_add_built_predecessor( predecessor_type &p) { - my_built_predecessors.add_edge(p); - } - - /*override*/ void internal_delete_built_predecessor( predecessor_type &p) { - my_built_predecessors.delete_edge(p); - } - - /*override*/ size_t predecessor_count() { - return my_built_predecessors.edge_count(); - } - - /*override*/ void copy_predecessors(predecessor_vector_type &v) { - my_built_predecessors.copy_edges(v); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - -protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - //! build a task to run the successor if possible. Default is old behavior. - /*override*/ task *try_put_task(const T& t) { - task *new_task = my_successors.try_put_task(t); - if(!new_task) new_task = SUCCESSFULLY_ENQUEUED; - return new_task; - } - - /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) { -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_successors.reset(f); - if (f&rf_extract) { - my_built_predecessors.receiver_extract(*this); - } - __TBB_ASSERT(!(f & rf_extract) || my_successors.empty(), "Error resetting broadcast_node"); -#endif - } - /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) {} -}; // broadcast_node - -//! Forwards messages in arbitrary order -template > -class buffer_node : public graph_node, public internal::reservable_item_buffer, public receiver, public sender { -protected: - using graph_node::my_graph; -public: - typedef T input_type; - typedef T output_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; - typedef buffer_node my_class; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector predecessor_vector_type; - typedef std::vector successor_vector_type; -#endif -protected: - typedef size_t size_type; - internal::round_robin_cache< T, null_rw_mutex > my_successors; - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - edge_container my_built_predecessors; -#endif - - friend class internal::forward_task_bypass< buffer_node< T, A > >; - - enum op_type {reg_succ, rem_succ, req_item, res_item, rel_res, con_res, put_item, try_fwd_task -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - , add_blt_succ, del_blt_succ, - add_blt_pred, del_blt_pred, - blt_succ_cnt, blt_pred_cnt, - blt_succ_cpy, blt_pred_cpy // create vector copies of preds and succs -#endif - }; - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - - // implements the aggregator_operation concept - class buffer_operation : public internal::aggregated_operation< buffer_operation > { - public: - char type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - task * ltask; - union { - input_type *elem; - successor_type *r; - predecessor_type *p; - size_t cnt_val; - successor_vector_type *svec; - predecessor_vector_type *pvec; - }; -#else - T *elem; - task * ltask; - successor_type *r; -#endif - buffer_operation(const T& e, op_type t) : type(char(t)) - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - , ltask(NULL), elem(const_cast(&e)) -#else - , elem(const_cast(&e)) , ltask(NULL) -#endif - {} - buffer_operation(op_type t) : type(char(t)), ltask(NULL) {} - }; - - bool forwarder_busy; - typedef internal::aggregating_functor my_handler; - friend class internal::aggregating_functor; - internal::aggregator< my_handler, buffer_operation> my_aggregator; - - virtual void handle_operations(buffer_operation *op_list) { - buffer_operation *tmp = NULL; - bool try_forwarding=false; - while (op_list) { - tmp = op_list; - op_list = op_list->next; - switch (tmp->type) { - case reg_succ: internal_reg_succ(tmp); try_forwarding = true; break; - case rem_succ: internal_rem_succ(tmp); break; - case req_item: internal_pop(tmp); break; - case res_item: internal_reserve(tmp); break; - case rel_res: internal_release(tmp); try_forwarding = true; break; - case con_res: internal_consume(tmp); try_forwarding = true; break; - case put_item: internal_push(tmp); try_forwarding = (tmp->status == SUCCEEDED); break; - case try_fwd_task: internal_forward_task(tmp); break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - // edge recording - case add_blt_succ: internal_add_built_succ(tmp); break; - case del_blt_succ: internal_del_built_succ(tmp); break; - case add_blt_pred: internal_add_built_pred(tmp); break; - case del_blt_pred: internal_del_built_pred(tmp); break; - case blt_succ_cnt: internal_succ_cnt(tmp); break; - case blt_pred_cnt: internal_pred_cnt(tmp); break; - case blt_succ_cpy: internal_copy_succs(tmp); break; - case blt_pred_cpy: internal_copy_preds(tmp); break; -#endif - } - } - if (try_forwarding && !forwarder_busy) { - task* tp = this->my_graph.root_task(); - if(tp) { - forwarder_busy = true; - task *new_task = new(task::allocate_additional_child_of(*tp)) internal:: - forward_task_bypass - < buffer_node >(*this); - // tmp should point to the last item handled by the aggregator. This is the operation - // the handling thread enqueued. So modifying that record will be okay. - tbb::task *z = tmp->ltask; - tmp->ltask = combine_tasks(z, new_task); // in case the op generated a task - } - } - } - - inline task *grab_forwarding_task( buffer_operation &op_data) { - return op_data.ltask; - } - - inline bool enqueue_forwarding_task(buffer_operation &op_data) { - task *ft = grab_forwarding_task(op_data); - if(ft) { - FLOW_SPAWN(*ft); - return true; - } - return false; - } - - //! This is executed by an enqueued task, the "forwarder" - virtual task *forward_task() { - buffer_operation op_data(try_fwd_task); - task *last_task = NULL; - do { - op_data.status = WAIT; - op_data.ltask = NULL; - my_aggregator.execute(&op_data); - tbb::task *xtask = op_data.ltask; - last_task = combine_tasks(last_task, xtask); - } while (op_data.status == SUCCEEDED); - return last_task; - } - - //! Register successor - virtual void internal_reg_succ(buffer_operation *op) { - my_successors.register_successor(*(op->r)); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - //! Remove successor - virtual void internal_rem_succ(buffer_operation *op) { - my_successors.remove_successor(*(op->r)); - __TBB_store_with_release(op->status, SUCCEEDED); - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - virtual void internal_add_built_succ(buffer_operation *op) { - my_successors.internal_add_built_successor(*(op->r)); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - virtual void internal_del_built_succ(buffer_operation *op) { - my_successors.internal_delete_built_successor(*(op->r)); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - virtual void internal_add_built_pred(buffer_operation *op) { - my_built_predecessors.add_edge(*(op->p)); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - virtual void internal_del_built_pred(buffer_operation *op) { - my_built_predecessors.delete_edge(*(op->p)); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - virtual void internal_succ_cnt(buffer_operation *op) { - op->cnt_val = my_successors.successor_count(); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - virtual void internal_pred_cnt(buffer_operation *op) { - op->cnt_val = my_built_predecessors.edge_count(); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - virtual void internal_copy_succs(buffer_operation *op) { - my_successors.copy_successors(*(op->svec)); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - virtual void internal_copy_preds(buffer_operation *op) { - my_built_predecessors.copy_edges(*(op->pvec)); - __TBB_store_with_release(op->status, SUCCEEDED); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - //! Tries to forward valid items to successors - virtual void internal_forward_task(buffer_operation *op) { - if (this->my_reserved || !this->my_item_valid(this->my_tail-1)) { - __TBB_store_with_release(op->status, FAILED); - this->forwarder_busy = false; - return; - } - T i_copy; - task * last_task = NULL; - size_type counter = my_successors.size(); - // Try forwarding, giving each successor a chance - while (counter>0 && !this->buffer_empty() && this->my_item_valid(this->my_tail-1)) { - this->copy_back(i_copy); - task *new_task = my_successors.try_put_task(i_copy); - if(new_task) { - last_task = combine_tasks(last_task, new_task); - this->destroy_back(); - } - --counter; - } - op->ltask = last_task; // return task - if (last_task && !counter) { - __TBB_store_with_release(op->status, SUCCEEDED); - } - else { - __TBB_store_with_release(op->status, FAILED); - forwarder_busy = false; - } - } - - virtual void internal_push(buffer_operation *op) { - this->push_back(*(op->elem)); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - virtual void internal_pop(buffer_operation *op) { - if(this->pop_back(*(op->elem))) { - __TBB_store_with_release(op->status, SUCCEEDED); - } - else { - __TBB_store_with_release(op->status, FAILED); - } - } - - virtual void internal_reserve(buffer_operation *op) { - if(this->reserve_front(*(op->elem))) { - __TBB_store_with_release(op->status, SUCCEEDED); - } - else { - __TBB_store_with_release(op->status, FAILED); - } - } - - virtual void internal_consume(buffer_operation *op) { - this->consume_front(); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - virtual void internal_release(buffer_operation *op) { - this->release_front(); - __TBB_store_with_release(op->status, SUCCEEDED); - } - -public: - //! Constructor - buffer_node( graph &g ) : graph_node(g), internal::reservable_item_buffer(), - forwarder_busy(false) { - my_successors.set_owner(this); - my_aggregator.initialize_handler(my_handler(this)); - tbb::internal::fgt_node( tbb::internal::FLOW_BUFFER_NODE, &this->my_graph, - static_cast *>(this), static_cast *>(this) ); - } - - //! Copy constructor - buffer_node( const buffer_node& src ) : graph_node(src.my_graph), - internal::reservable_item_buffer(), receiver(), sender() { - forwarder_busy = false; - my_successors.set_owner(this); - my_aggregator.initialize_handler(my_handler(this)); - tbb::internal::fgt_node( tbb::internal::FLOW_BUFFER_NODE, &this->my_graph, - static_cast *>(this), static_cast *>(this) ); - } - - virtual ~buffer_node() {} - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - - // - // message sender implementation - // - - //! Adds a new successor. - /** Adds successor r to the list of successors; may forward tasks. */ - /* override */ bool register_successor( successor_type &r ) { - buffer_operation op_data(reg_succ); - op_data.r = &r; - my_aggregator.execute(&op_data); - (void)enqueue_forwarding_task(op_data); - return true; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/ void internal_add_built_successor( successor_type &r) { - buffer_operation op_data(add_blt_succ); - op_data.r = &r; - my_aggregator.execute(&op_data); - } - - /*override*/ void internal_delete_built_successor( successor_type &r) { - buffer_operation op_data(del_blt_succ); - op_data.r = &r; - my_aggregator.execute(&op_data); - } - - /*override*/ void internal_add_built_predecessor( predecessor_type &p) { - buffer_operation op_data(add_blt_pred); - op_data.p = &p; - my_aggregator.execute(&op_data); - } - - /*override*/ void internal_delete_built_predecessor( predecessor_type &p) { - buffer_operation op_data(del_blt_pred); - op_data.p = &p; - my_aggregator.execute(&op_data); - } - - /*override*/ size_t predecessor_count() { - buffer_operation op_data(blt_pred_cnt); - my_aggregator.execute(&op_data); - return op_data.cnt_val; - } - - /*override*/ size_t successor_count() { - buffer_operation op_data(blt_succ_cnt); - my_aggregator.execute(&op_data); - return op_data.cnt_val; - } - - /*override*/ void copy_predecessors( predecessor_vector_type &v ) { - buffer_operation op_data(blt_pred_cpy); - op_data.pvec = &v; - my_aggregator.execute(&op_data); - } - - /*override*/ void copy_successors( successor_vector_type &v ) { - buffer_operation op_data(blt_succ_cpy); - op_data.svec = &v; - my_aggregator.execute(&op_data); - } -#endif - - //! Removes a successor. - /** Removes successor r from the list of successors. - It also calls r.remove_predecessor(*this) to remove this node as a predecessor. */ - /* override */ bool remove_successor( successor_type &r ) { - r.remove_predecessor(*this); - buffer_operation op_data(rem_succ); - op_data.r = &r; - my_aggregator.execute(&op_data); - // even though this operation does not cause a forward, if we are the handler, and - // a forward is scheduled, we may be the first to reach this point after the aggregator, - // and so should check for the task. - (void)enqueue_forwarding_task(op_data); - return true; - } - - //! Request an item from the buffer_node - /** true = v contains the returned item
- false = no item has been returned */ - /* override */ bool try_get( T &v ) { - buffer_operation op_data(req_item); - op_data.elem = &v; - my_aggregator.execute(&op_data); - (void)enqueue_forwarding_task(op_data); - return (op_data.status==SUCCEEDED); - } - - //! Reserves an item. - /** false = no item can be reserved
- true = an item is reserved */ - /* override */ bool try_reserve( T &v ) { - buffer_operation op_data(res_item); - op_data.elem = &v; - my_aggregator.execute(&op_data); - (void)enqueue_forwarding_task(op_data); - return (op_data.status==SUCCEEDED); - } - - //! Release a reserved item. - /** true = item has been released and so remains in sender */ - /* override */ bool try_release() { - buffer_operation op_data(rel_res); - my_aggregator.execute(&op_data); - (void)enqueue_forwarding_task(op_data); - return true; - } - - //! Consumes a reserved item. - /** true = item is removed from sender and reservation removed */ - /* override */ bool try_consume() { - buffer_operation op_data(con_res); - my_aggregator.execute(&op_data); - (void)enqueue_forwarding_task(op_data); - return true; - } - -protected: - - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - //! receive an item, return a task *if possible - /* override */ task *try_put_task(const T &t) { - buffer_operation op_data(t, put_item); - my_aggregator.execute(&op_data); - task *ft = grab_forwarding_task(op_data); - // sequencer_nodes can return failure (if an item has been previously inserted) - // We have to spawn the returned task if our own operation fails. - - if(ft && op_data.status == FAILED) { - // we haven't succeeded queueing the item, but for some reason the - // call returned a task (if another request resulted in a successful - // forward this could happen.) Queue the task and reset the pointer. - FLOW_SPAWN(*ft); ft = NULL; - } - else if(!ft && op_data.status == SUCCEEDED) { - ft = SUCCESSFULLY_ENQUEUED; - } - return ft; - } - - /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - internal::reservable_item_buffer::reset(); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_successors.reset(f); - if (f&rf_extract) { - my_built_predecessors.receiver_extract(*this); - } -#endif - forwarder_busy = false; - } - - /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) { } - -}; // buffer_node - -//! Forwards messages in FIFO order -template > -class queue_node : public buffer_node { -protected: - typedef buffer_node base_type; - typedef typename base_type::size_type size_type; - typedef typename base_type::buffer_operation queue_operation; - - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - - /* override */ void internal_forward_task(queue_operation *op) { - if (this->my_reserved || !this->my_item_valid(this->my_head)) { - __TBB_store_with_release(op->status, FAILED); - this->forwarder_busy = false; - return; - } - T i_copy; - task *last_task = NULL; - size_type counter = this->my_successors.size(); - // Keep trying to send items while there is at least one accepting successor - while (counter>0 && this->my_item_valid(this->my_head)) { - this->copy_front(i_copy); - task *new_task = this->my_successors.try_put_task(i_copy); - if(new_task) { - this->destroy_front(); - last_task = combine_tasks(last_task, new_task); - } - --counter; - } - op->ltask = last_task; - if (last_task && !counter) - __TBB_store_with_release(op->status, SUCCEEDED); - else { - __TBB_store_with_release(op->status, FAILED); - this->forwarder_busy = false; - } - } - - /* override */ void internal_pop(queue_operation *op) { - if ( this->my_reserved || !this->my_item_valid(this->my_head)){ - __TBB_store_with_release(op->status, FAILED); - } - else { - this->pop_front(*(op->elem)); - __TBB_store_with_release(op->status, SUCCEEDED); - } - } - /* override */ void internal_reserve(queue_operation *op) { - if (this->my_reserved || !this->my_item_valid(this->my_head)) { - __TBB_store_with_release(op->status, FAILED); - } - else { - this->reserve_front(*(op->elem)); - __TBB_store_with_release(op->status, SUCCEEDED); - } - } - /* override */ void internal_consume(queue_operation *op) { - this->consume_front(); - __TBB_store_with_release(op->status, SUCCEEDED); - } - -public: - typedef T input_type; - typedef T output_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; - - //! Constructor - queue_node( graph &g ) : base_type(g) { - tbb::internal::fgt_node( tbb::internal::FLOW_QUEUE_NODE, &(this->my_graph), - static_cast *>(this), - static_cast *>(this) ); - } - - //! Copy constructor - queue_node( const queue_node& src) : base_type(src) { - tbb::internal::fgt_node( tbb::internal::FLOW_QUEUE_NODE, &(this->my_graph), - static_cast *>(this), - static_cast *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - - /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - base_type::reset(__TBB_PFG_RESET_ARG(f)); - } -}; // queue_node - -//! Forwards messages in sequence order -template< typename T, typename A=cache_aligned_allocator > -class sequencer_node : public queue_node { - internal::function_body< T, size_t > *my_sequencer; - // my_sequencer should be a benign function and must be callable - // from a parallel context. Does this mean it needn't be reset? -public: - typedef T input_type; - typedef T output_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; - - //! Constructor - template< typename Sequencer > - sequencer_node( graph &g, const Sequencer& s ) : queue_node(g), - my_sequencer(new internal::function_body_leaf< T, size_t, Sequencer>(s) ) { - tbb::internal::fgt_node( tbb::internal::FLOW_SEQUENCER_NODE, &(this->my_graph), - static_cast *>(this), - static_cast *>(this) ); - } - - //! Copy constructor - sequencer_node( const sequencer_node& src ) : queue_node(src), - my_sequencer( src.my_sequencer->clone() ) { - tbb::internal::fgt_node( tbb::internal::FLOW_SEQUENCER_NODE, &(this->my_graph), - static_cast *>(this), - static_cast *>(this) ); - } - - //! Destructor - ~sequencer_node() { delete my_sequencer; } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - -protected: - typedef typename buffer_node::size_type size_type; - typedef typename buffer_node::buffer_operation sequencer_operation; - - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - -private: - /* override */ void internal_push(sequencer_operation *op) { - size_type tag = (*my_sequencer)(*(op->elem)); -#if !TBB_DEPRECATED_SEQUENCER_DUPLICATES - if(tag < this->my_head) { - // have already emitted a message with this tag - __TBB_store_with_release(op->status, FAILED); - return; - } -#endif - // cannot modify this->my_tail now; the buffer would be inconsistent. - size_t new_tail = (tag+1 > this->my_tail) ? tag+1 : this->my_tail; - - if(this->size(new_tail) > this->capacity()) { - this->grow_my_array(this->size(new_tail)); - } - this->my_tail = new_tail; - if(this->place_item(tag,*(op->elem))) { - __TBB_store_with_release(op->status, SUCCEEDED); - } - else { - // already have a message with this tag - __TBB_store_with_release(op->status, FAILED); - } - } -}; // sequencer_node - -//! Forwards messages in priority order -template< typename T, typename Compare = std::less, typename A=cache_aligned_allocator > -class priority_queue_node : public buffer_node { -public: - typedef T input_type; - typedef T output_type; - typedef buffer_node base_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; - - //! Constructor - priority_queue_node( graph &g ) : buffer_node(g), mark(0) { - tbb::internal::fgt_node( tbb::internal::FLOW_PRIORITY_QUEUE_NODE, &(this->my_graph), - static_cast *>(this), - static_cast *>(this) ); - } - - //! Copy constructor - priority_queue_node( const priority_queue_node &src ) : buffer_node(src), mark(0) { - tbb::internal::fgt_node( tbb::internal::FLOW_PRIORITY_QUEUE_NODE, &(this->my_graph), - static_cast *>(this), - static_cast *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - - -protected: - - /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - mark = 0; - base_type::reset(__TBB_PFG_RESET_ARG(f)); - } - - typedef typename buffer_node::size_type size_type; - typedef typename buffer_node::item_type item_type; - typedef typename buffer_node::buffer_operation prio_operation; - - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - - /* override */ void handle_operations(prio_operation *op_list) { - prio_operation *tmp = op_list /*, *pop_list*/ ; - bool try_forwarding=false; - while (op_list) { - tmp = op_list; - op_list = op_list->next; - switch (tmp->type) { - case buffer_node::reg_succ: this->internal_reg_succ(tmp); try_forwarding = true; break; - case buffer_node::rem_succ: this->internal_rem_succ(tmp); break; - case buffer_node::put_item: internal_push(tmp); try_forwarding = true; break; - case buffer_node::try_fwd_task: internal_forward_task(tmp); break; - case buffer_node::rel_res: internal_release(tmp); try_forwarding = true; break; - case buffer_node::con_res: internal_consume(tmp); try_forwarding = true; break; - case buffer_node::req_item: internal_pop(tmp); break; - case buffer_node::res_item: internal_reserve(tmp); break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - case buffer_node::add_blt_succ: this->internal_add_built_succ(tmp); break; - case buffer_node::del_blt_succ: this->internal_del_built_succ(tmp); break; - case buffer_node::add_blt_pred: this->internal_add_built_pred(tmp); break; - case buffer_node::del_blt_pred: this->internal_del_built_pred(tmp); break; - case buffer_node::blt_succ_cnt: this->internal_succ_cnt(tmp); break; - case buffer_node::blt_pred_cnt: this->internal_pred_cnt(tmp); break; - case buffer_node::blt_succ_cpy: this->internal_copy_succs(tmp); break; - case buffer_node::blt_pred_cpy: this->internal_copy_preds(tmp); break; -#endif - } - } - // process pops! for now, no special pop processing - if (markmy_tail) heapify(); - if (try_forwarding && !this->forwarder_busy) { - task* tp = this->my_graph.root_task(); - if(tp) { - this->forwarder_busy = true; - task *new_task = new(task::allocate_additional_child_of(*tp)) internal:: - forward_task_bypass - < buffer_node >(*this); - // tmp should point to the last item handled by the aggregator. This is the operation - // the handling thread enqueued. So modifying that record will be okay. - tbb::task *tmp1 = tmp->ltask; - tmp->ltask = combine_tasks(tmp1, new_task); - } - } - } - - //! Tries to forward valid items to successors - /* override */ void internal_forward_task(prio_operation *op) { - T i_copy; - task * last_task = NULL; // flagged when a successor accepts - size_type counter = this->my_successors.size(); - - if (this->my_reserved || this->my_tail == 0) { - __TBB_store_with_release(op->status, FAILED); - this->forwarder_busy = false; - return; - } - // Keep trying to send while there exists an accepting successor - while (counter>0 && this->my_tail > 0) { - i_copy = this->get_my_item(0); - task * new_task = this->my_successors.try_put_task(i_copy); - if ( new_task ) { - last_task = combine_tasks(last_task, new_task); - this->destroy_item(0); // we've forwarded this item - if (mark == this->my_tail) --mark; - if(--(this->my_tail)) { // didn't consume last item on heap - this->move_item(0,this->my_tail); - } - if (this->my_tail > 1) // don't reheap for heap of size 1 - reheap(); - } - --counter; - } - op->ltask = last_task; - if (last_task && !counter) - __TBB_store_with_release(op->status, SUCCEEDED); - else { - __TBB_store_with_release(op->status, FAILED); - this->forwarder_busy = false; - } - } - - /* override */ void internal_push(prio_operation *op) { - if ( this->my_tail >= this->my_array_size ) - this->grow_my_array( this->my_tail + 1 ); - (void) this->place_item(this->my_tail, *(op->elem)); - ++(this->my_tail); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - /* override */ void internal_pop(prio_operation *op) { - // if empty or already reserved, don't pop - if ( this->my_reserved == true || this->my_tail == 0 ) { - __TBB_store_with_release(op->status, FAILED); - return; - } - if (markmy_tail && // item pushed, no re-heap - compare(this->get_my_item(0), - this->get_my_item(this->my_tail-1))) { - // there are newly pushed elems; last one higher than top - // copy the data - this->fetch_item(this->my_tail-1, *(op->elem)); - __TBB_store_with_release(op->status, SUCCEEDED); - --(this->my_tail); - return; - } - // extract and push the last element down heap - *(op->elem) = this->get_my_item(0); // copy the data, item 0 still valid - __TBB_store_with_release(op->status, SUCCEEDED); - if (mark == this->my_tail) --mark; - __TBB_ASSERT(this->my_item_valid(this->my_tail - 1), NULL); - if(--(this->my_tail)) { - // there were two or more items in heap. Move the - // last item to the top of the heap - this->set_my_item(0,this->get_my_item(this->my_tail)); - } - this->destroy_item(this->my_tail); - if (this->my_tail > 1) // don't reheap for heap of size 1 - reheap(); - } - - /* override */ void internal_reserve(prio_operation *op) { - if (this->my_reserved == true || this->my_tail == 0) { - __TBB_store_with_release(op->status, FAILED); - return; - } - this->my_reserved = true; - *(op->elem) = reserved_item = this->get_my_item(0); - if (mark == this->my_tail) --mark; - --(this->my_tail); - __TBB_store_with_release(op->status, SUCCEEDED); - this->set_my_item(0, this->get_my_item(this->my_tail)); - this->destroy_item(this->my_tail); - if (this->my_tail > 1) - reheap(); - } - - /* override */ void internal_consume(prio_operation *op) { - this->my_reserved = false; - __TBB_store_with_release(op->status, SUCCEEDED); - } - /* override */ void internal_release(prio_operation *op) { - if (this->my_tail >= this->my_array_size) - this->grow_my_array( this->my_tail + 1 ); - this->set_my_item(this->my_tail, reserved_item); - ++(this->my_tail); - this->my_reserved = false; - __TBB_store_with_release(op->status, SUCCEEDED); - heapify(); - } -private: - Compare compare; - size_type mark; - input_type reserved_item; - - // turn array into heap - void heapify() { - if (!mark) mark = 1; - for (; markmy_tail; ++mark) { // for each unheaped element - size_type cur_pos = mark; - input_type to_place; - this->fetch_item(mark,to_place); - do { // push to_place up the heap - size_type parent = (cur_pos-1)>>1; - if (!compare(this->get_my_item(parent), to_place)) - break; - this->move_item(cur_pos, parent); - cur_pos = parent; - } while( cur_pos ); - (void) this->place_item(cur_pos, to_place); - } - } - - // otherwise heapified array with new root element; rearrange to heap - void reheap() { - size_type cur_pos=0, child=1; - while (child < mark) { - size_type target = child; - if (child+1get_my_item(child), - this->get_my_item(child+1))) - ++target; - // target now has the higher priority child - if (compare(this->get_my_item(target), - this->get_my_item(cur_pos))) - break; - // swap - this->swap_items(cur_pos, target); - cur_pos = target; - child = (cur_pos<<1)+1; - } - } -}; // priority_queue_node - -//! Forwards messages only if the threshold has not been reached -/** This node forwards items until its threshold is reached. - It contains no buffering. If the downstream node rejects, the - message is dropped. */ -template< typename T > -class limiter_node : public graph_node, public receiver< T >, public sender< T > { -protected: - using graph_node::my_graph; -public: - typedef T input_type; - typedef T output_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector successor_vector_type; - typedef std::vector predecessor_vector_type; -#endif - -private: - size_t my_threshold; - size_t my_count; //number of successful puts - size_t my_tries; //number of active put attempts - internal::reservable_predecessor_cache< T, spin_mutex > my_predecessors; - spin_mutex my_mutex; - internal::broadcast_cache< T > my_successors; - int init_decrement_predecessors; - - friend class internal::forward_task_bypass< limiter_node >; - - // Let decrementer call decrement_counter() - friend class internal::decrementer< limiter_node >; - - bool check_conditions() { // always called under lock - return ( my_count + my_tries < my_threshold && !my_predecessors.empty() && !my_successors.empty() ); - } - - // only returns a valid task pointer or NULL, never SUCCESSFULLY_ENQUEUED - task *forward_task() { - input_type v; - task *rval = NULL; - bool reserved = false; - { - spin_mutex::scoped_lock lock(my_mutex); - if ( check_conditions() ) - ++my_tries; - else - return NULL; - } - - //SUCCESS - // if we can reserve and can put, we consume the reservation - // we increment the count and decrement the tries - if ( (my_predecessors.try_reserve(v)) == true ){ - reserved=true; - if ( (rval = my_successors.try_put_task(v)) != NULL ){ - { - spin_mutex::scoped_lock lock(my_mutex); - ++my_count; - --my_tries; - my_predecessors.try_consume(); - if ( check_conditions() ) { - task* tp = this->my_graph.root_task(); - if ( tp ) { - task *rtask = new ( task::allocate_additional_child_of( *tp ) ) - internal::forward_task_bypass< limiter_node >( *this ); - FLOW_SPAWN (*rtask); - } - } - } - return rval; - } - } - //FAILURE - //if we can't reserve, we decrement the tries - //if we can reserve but can't put, we decrement the tries and release the reservation - { - spin_mutex::scoped_lock lock(my_mutex); - --my_tries; - if (reserved) my_predecessors.try_release(); - if ( check_conditions() ) { - task* tp = this->my_graph.root_task(); - if ( tp ) { - task *rtask = new ( task::allocate_additional_child_of( *tp ) ) - internal::forward_task_bypass< limiter_node >( *this ); - __TBB_ASSERT(!rval, "Have two tasks to handle"); - return rtask; - } - } - return rval; - } - } - - void forward() { - __TBB_ASSERT(false, "Should never be called"); - return; - } - - task * decrement_counter() { - { - spin_mutex::scoped_lock lock(my_mutex); - if(my_count) --my_count; - } - return forward_task(); - } - -public: - //! The internal receiver< continue_msg > that decrements the count - internal::decrementer< limiter_node > decrement; - - //! Constructor - limiter_node(graph &g, size_t threshold, int num_decrement_predecessors=0) : - graph_node(g), my_threshold(threshold), my_count(0), my_tries(0), - init_decrement_predecessors(num_decrement_predecessors), - decrement(num_decrement_predecessors) - { - my_predecessors.set_owner(this); - my_successors.set_owner(this); - decrement.set_owner(this); - tbb::internal::fgt_node( tbb::internal::FLOW_LIMITER_NODE, &this->my_graph, - static_cast *>(this), static_cast *>(&decrement), - static_cast *>(this) ); - } - - //! Copy constructor - limiter_node( const limiter_node& src ) : - graph_node(src.my_graph), receiver(), sender(), - my_threshold(src.my_threshold), my_count(0), my_tries(0), - init_decrement_predecessors(src.init_decrement_predecessors), - decrement(src.init_decrement_predecessors) - { - my_predecessors.set_owner(this); - my_successors.set_owner(this); - decrement.set_owner(this); - tbb::internal::fgt_node( tbb::internal::FLOW_LIMITER_NODE, &this->my_graph, - static_cast *>(this), static_cast *>(&decrement), - static_cast *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - - //! Replace the current successor with this new successor - /* override */ bool register_successor( receiver &r ) { - spin_mutex::scoped_lock lock(my_mutex); - bool was_empty = my_successors.empty(); - my_successors.register_successor(r); - //spawn a forward task if this is the only successor - if ( was_empty && !my_predecessors.empty() && my_count + my_tries < my_threshold ) { - task* tp = this->my_graph.root_task(); - if ( tp ) { - FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *tp ) ) - internal::forward_task_bypass < limiter_node >( *this ) ) ); - } - } - return true; - } - - //! Removes a successor from this node - /** r.remove_predecessor(*this) is also called. */ - /* override */ bool remove_successor( receiver &r ) { - r.remove_predecessor(*this); - my_successors.remove_successor(r); - return true; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void internal_add_built_successor(receiver &src) { - my_successors.internal_add_built_successor(src); - } - - /*override*/void internal_delete_built_successor(receiver &src) { - my_successors.internal_delete_built_successor(src); - } - - /*override*/size_t successor_count() { return my_successors.successor_count(); } - - /*override*/ void copy_successors(successor_vector_type &v) { - my_successors.copy_successors(v); - } - - /*override*/void internal_add_built_predecessor(sender &src) { - my_predecessors.internal_add_built_predecessor(src); - } - - /*override*/void internal_delete_built_predecessor(sender &src) { - my_predecessors.internal_delete_built_predecessor(src); - } - - /*override*/size_t predecessor_count() { return my_predecessors.predecessor_count(); } - - /*override*/ void copy_predecessors(predecessor_vector_type &v) { - my_predecessors.copy_predecessors(v); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - //! Adds src to the list of cached predecessors. - /* override */ bool register_predecessor( predecessor_type &src ) { - spin_mutex::scoped_lock lock(my_mutex); - my_predecessors.add( src ); - task* tp = this->my_graph.root_task(); - if ( my_count + my_tries < my_threshold && !my_successors.empty() && tp ) { - FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *tp ) ) - internal::forward_task_bypass < limiter_node >( *this ) ) ); - } - return true; - } - - //! Removes src from the list of cached predecessors. - /* override */ bool remove_predecessor( predecessor_type &src ) { - my_predecessors.remove( src ); - return true; - } - -protected: - - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - //! Puts an item to this receiver - /* override */ task *try_put_task( const T &t ) { - { - spin_mutex::scoped_lock lock(my_mutex); - if ( my_count + my_tries >= my_threshold ) - return NULL; - else - ++my_tries; - } - - task * rtask = my_successors.try_put_task(t); - - if ( !rtask ) { // try_put_task failed. - spin_mutex::scoped_lock lock(my_mutex); - --my_tries; - task* tp = this->my_graph.root_task(); - if ( check_conditions() && tp ) { - rtask = new ( task::allocate_additional_child_of( *tp ) ) - internal::forward_task_bypass< limiter_node >( *this ); - } - } - else { - spin_mutex::scoped_lock lock(my_mutex); - ++my_count; - --my_tries; - } - return rtask; - } - - /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - my_count = 0; - my_predecessors.reset(__TBB_PFG_RESET_ARG(f)); - decrement.reset_receiver(__TBB_PFG_RESET_ARG(f)); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_successors.reset(f); -#endif - } - - /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags f)) { my_predecessors.reset(__TBB_PFG_RESET_ARG(f)); } -}; // limiter_node - -#include "internal/_flow_graph_join_impl.h" - -using internal::reserving_port; -using internal::queueing_port; -using internal::tag_matching_port; -using internal::input_port; -using internal::tag_value; -using internal::NO_TAG; - -template class join_node; - -template -class join_node: public internal::unfolded_join_node::value, reserving_port, OutputTuple, reserving> { -private: - static const int N = tbb::flow::tuple_size::value; - typedef typename internal::unfolded_join_node unfolded_type; -public: - typedef OutputTuple output_type; - typedef typename unfolded_type::input_ports_type input_ports_type; - join_node(graph &g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_RESERVING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - join_node(const join_node &other) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_RESERVING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - -}; - -template -class join_node: public internal::unfolded_join_node::value, queueing_port, OutputTuple, queueing> { -private: - static const int N = tbb::flow::tuple_size::value; - typedef typename internal::unfolded_join_node unfolded_type; -public: - typedef OutputTuple output_type; - typedef typename unfolded_type::input_ports_type input_ports_type; - join_node(graph &g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_QUEUEING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - join_node(const join_node &other) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_QUEUEING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - -}; - -// template for tag_matching join_node -template -class join_node : public internal::unfolded_join_node::value, - tag_matching_port, OutputTuple, tag_matching> { -private: - static const int N = tbb::flow::tuple_size::value; - typedef typename internal::unfolded_join_node unfolded_type; -public: - typedef OutputTuple output_type; - typedef typename unfolded_type::input_ports_type input_ports_type; - - template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1) : unfolded_type(g, b0, b1) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2) : unfolded_type(g, b0, b1, b2) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3) : unfolded_type(g, b0, b1, b2, b3) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4) : - unfolded_type(g, b0, b1, b2, b3, b4) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } -#if __TBB_VARIADIC_MAX >= 6 - template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5) : - unfolded_type(g, b0, b1, b2, b3, b4, b5) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } -#endif -#if __TBB_VARIADIC_MAX >= 7 - template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6) : - unfolded_type(g, b0, b1, b2, b3, b4, b5, b6) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } -#endif -#if __TBB_VARIADIC_MAX >= 8 - template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6, - __TBB_B7 b7) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } -#endif -#if __TBB_VARIADIC_MAX >= 9 - template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6, - __TBB_B7 b7, __TBB_B8 b8) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7, b8) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } -#endif -#if __TBB_VARIADIC_MAX >= 10 - template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6, - __TBB_B7 b7, __TBB_B8 b8, __TBB_B9 b9) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } -#endif - join_node(const join_node &other) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - -}; - -// indexer node -#include "internal/_flow_graph_indexer_impl.h" - -struct indexer_null_type {}; - -template class indexer_node; - -//indexer node specializations -template -class indexer_node : public internal::unfolded_indexer_node > { -private: - static const int N = 1; -public: - typedef tuple InputTuple; - typedef typename internal::tagged_msg output_type; - typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif -}; - -template -class indexer_node : public internal::unfolded_indexer_node > { -private: - static const int N = 2; -public: - typedef tuple InputTuple; - typedef typename internal::tagged_msg output_type; - typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif -}; - -template -class indexer_node : public internal::unfolded_indexer_node > { -private: - static const int N = 3; -public: - typedef tuple InputTuple; - typedef typename internal::tagged_msg output_type; - typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif -}; - -template -class indexer_node : public internal::unfolded_indexer_node > { -private: - static const int N = 4; -public: - typedef tuple InputTuple; - typedef typename internal::tagged_msg output_type; - typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif -}; - -template -class indexer_node : public internal::unfolded_indexer_node > { -private: - static const int N = 5; -public: - typedef tuple InputTuple; - typedef typename internal::tagged_msg output_type; - typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif -}; - -#if __TBB_VARIADIC_MAX >= 6 -template -class indexer_node : public internal::unfolded_indexer_node > { -private: - static const int N = 6; -public: - typedef tuple InputTuple; - typedef typename internal::tagged_msg output_type; - typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif -}; -#endif //variadic max 6 - -#if __TBB_VARIADIC_MAX >= 7 -template -class indexer_node : public internal::unfolded_indexer_node > { -private: - static const int N = 7; -public: - typedef tuple InputTuple; - typedef typename internal::tagged_msg output_type; - typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif -}; -#endif //variadic max 7 - -#if __TBB_VARIADIC_MAX >= 8 -template -class indexer_node : public internal::unfolded_indexer_node > { -private: - static const int N = 8; -public: - typedef tuple InputTuple; - typedef typename internal::tagged_msg output_type; - typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif -}; -#endif //variadic max 8 - -#if __TBB_VARIADIC_MAX >= 9 -template -class indexer_node : public internal::unfolded_indexer_node > { -private: - static const int N = 9; -public: - typedef tuple InputTuple; - typedef typename internal::tagged_msg output_type; - typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif -}; -#endif //variadic max 9 - -#if __TBB_VARIADIC_MAX >= 10 -template -class indexer_node/*default*/ : public internal::unfolded_indexer_node > { -private: - static const int N = 10; -public: - typedef tuple InputTuple; - typedef typename internal::tagged_msg output_type; - typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif -}; -#endif //variadic max 10 - -//! Makes an edge between a single predecessor and a single successor -template< typename T > -inline void make_edge( sender &p, receiver &s ) { -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - s.internal_add_built_predecessor(p); - p.internal_add_built_successor(s); -#endif - p.register_successor( s ); - tbb::internal::fgt_make_edge( &p, &s ); -} - -//! Makes an edge between a single predecessor and a single successor -template< typename T > -inline void remove_edge( sender &p, receiver &s ) { - p.remove_successor( s ); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - // TODO: should we try to remove p from the predecessor list of s, in case the edge is reversed? - p.internal_delete_built_successor(s); - s.internal_delete_built_predecessor(p); -#endif - tbb::internal::fgt_remove_edge( &p, &s ); -} - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES -template -template< typename S > -void edge_container::sender_extract( S &s ) { - edge_vector e = built_edges; - for ( typename edge_vector::iterator i = e.begin(); i != e.end(); ++i ) { - remove_edge(s, **i); - } -} - -template -template< typename R > -void edge_container::receiver_extract( R &r ) { - edge_vector e = built_edges; - for ( typename edge_vector::iterator i = e.begin(); i != e.end(); ++i ) { - remove_edge(**i, r); - } -} -#endif - -//! Returns a copy of the body from a function or continue node -template< typename Body, typename Node > -Body copy_body( Node &n ) { - return n.template copy_function_object(); -} - -} // interface7 - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - using interface7::reset_flags; - using interface7::rf_reset_protocol; - using interface7::rf_reset_bodies; - using interface7::rf_extract; -#endif - - using interface7::graph; - using interface7::graph_node; - using interface7::continue_msg; - using interface7::sender; - using interface7::receiver; - using interface7::continue_receiver; - - using interface7::source_node; - using interface7::function_node; - using interface7::multifunction_node; - using interface7::split_node; - using interface7::internal::output_port; - using interface7::indexer_node; - using interface7::internal::tagged_msg; - using interface7::internal::cast_to; - using interface7::internal::is_a; - using interface7::continue_node; - using interface7::overwrite_node; - using interface7::write_once_node; - using interface7::broadcast_node; - using interface7::buffer_node; - using interface7::queue_node; - using interface7::sequencer_node; - using interface7::priority_queue_node; - using interface7::limiter_node; - using namespace interface7::internal::graph_policy_namespace; - using interface7::join_node; - using interface7::input_port; - using interface7::copy_body; - using interface7::make_edge; - using interface7::remove_edge; - using interface7::internal::NO_TAG; - using interface7::internal::tag_value; - -} // flow -} // tbb - -#undef __TBB_PFG_RESET_ARG -#undef __TBB_COMMA - -#endif // __TBB_flow_graph_H diff --git a/inst/include/tbb/index.html b/inst/include/tbb/index.html deleted file mode 100644 index 6ceb5da61..000000000 --- a/inst/include/tbb/index.html +++ /dev/null @@ -1,29 +0,0 @@ - - - -

Overview

-Include files for Intel® Threading Building Blocks classes and functions. - -
Click here to see all files in the directory. - -

Directories

-
-
compat -
Include files for source level compatibility with other frameworks. -
internal -
Include files with implementation details; not for direct use. -
machine -
Include files for low-level architecture specific functionality; not for direct use. -
- -
-Up to parent directory -

-Copyright © 2005-2014 Intel Corporation. All Rights Reserved. -

-Intel is a registered trademark or trademark of Intel Corporation -or its subsidiaries in the United States and other countries. -

-* Other names and brands may be claimed as the property of others. - - diff --git a/inst/include/tbb/internal/_aggregator_impl.h b/inst/include/tbb/internal/_aggregator_impl.h deleted file mode 100644 index 854cb4eef..000000000 --- a/inst/include/tbb/internal/_aggregator_impl.h +++ /dev/null @@ -1,180 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB__aggregator_impl_H -#define __TBB__aggregator_impl_H - -#include "../atomic.h" -#if !__TBBMALLOC_BUILD -#include "../tbb_profiling.h" -#endif - -namespace tbb { -namespace interface6 { -namespace internal { - -using namespace tbb::internal; - -//! aggregated_operation base class -template -class aggregated_operation { - public: - uintptr_t status; - Derived *next; - aggregated_operation() : status(0), next(NULL) {} -}; - -//! Aggregator base class -/** An aggregator for collecting operations coming from multiple sources and executing - them serially on a single thread. operation_type must be derived from - aggregated_operation. The parameter handler_type is a functor that will be passed the - list of operations and is expected to handle each operation appropriately, setting the - status of each operation to non-zero.*/ -template < typename operation_type > -class aggregator_generic { -public: - aggregator_generic() : handler_busy(false) { pending_operations = NULL; } - - //! Place operation in list - /** Place operation in list and either handle list or wait for operation to - complete. - long_life_time specifies life time of an operation inserting in an aggregator. - "Long" (long_life_time == true) life time operation can be accessed - even after executing it. - "Short" (long_life_time == false) life time operations can be destroyed - during executing so any access to it after executing is invalid.*/ - template < typename handler_type > - void execute(operation_type *op, handler_type &handle_operations, bool long_life_time = true) { - operation_type *res; - // op->status should be read before inserting the operation in the - // aggregator queue since it can become invalid after executing a - // handler (if the operation has 'short' life time.) - const uintptr_t status = op->status; - - // ITT note: &(op->status) tag is used to cover accesses to this op node. This - // thread has created the operation, and now releases it so that the handler - // thread may handle the associated operation w/o triggering a race condition; - // thus this tag will be acquired just before the operation is handled in the - // handle_operations functor. - call_itt_notify(releasing, &(op->status)); - // insert the operation in the queue. - do { - // ITT may flag the following line as a race; it is a false positive: - // This is an atomic read; we don't provide itt_hide_load_word for atomics - op->next = res = pending_operations; // NOT A RACE - } while (pending_operations.compare_and_swap(op, res) != res); - if (!res) { // first in the list; handle the operations. - // ITT note: &pending_operations tag covers access to the handler_busy flag, - // which this waiting handler thread will try to set before entering - // handle_operations. - call_itt_notify(acquired, &pending_operations); - start_handle_operations(handle_operations); - // The operation with 'short' life time can already be destroyed. - if (long_life_time) - __TBB_ASSERT(op->status, NULL); - } - // not first; wait for op to be ready. - else if (!status) { // operation is blocking here. - __TBB_ASSERT(long_life_time, "The blocking operation cannot have 'short' life time. Since it can already be destroyed."); - call_itt_notify(prepare, &(op->status)); - spin_wait_while_eq(op->status, uintptr_t(0)); - itt_load_word_with_acquire(op->status); - } - } - - private: - //! An atomically updated list (aka mailbox) of pending operations - atomic pending_operations; - //! Controls thread access to handle_operations - uintptr_t handler_busy; - - //! Trigger the handling of operations when the handler is free - template < typename handler_type > - void start_handle_operations( handler_type &handle_operations ) { - operation_type *op_list; - - // ITT note: &handler_busy tag covers access to pending_operations as it is passed - // between active and waiting handlers. Below, the waiting handler waits until - // the active handler releases, and the waiting handler acquires &handler_busy as - // it becomes the active_handler. The release point is at the end of this - // function, when all operations in pending_operations have been handled by the - // owner of this aggregator. - call_itt_notify(prepare, &handler_busy); - // get the handler_busy: - // only one thread can possibly spin here at a time - spin_wait_until_eq(handler_busy, uintptr_t(0)); - call_itt_notify(acquired, &handler_busy); - // acquire fence not necessary here due to causality rule and surrounding atomics - __TBB_store_with_release(handler_busy, uintptr_t(1)); - - // ITT note: &pending_operations tag covers access to the handler_busy flag - // itself. Capturing the state of the pending_operations signifies that - // handler_busy has been set and a new active handler will now process that list's - // operations. - call_itt_notify(releasing, &pending_operations); - // grab pending_operations - op_list = pending_operations.fetch_and_store(NULL); - - // handle all the operations - handle_operations(op_list); - - // release the handler - itt_store_word_with_release(handler_busy, uintptr_t(0)); - } -}; - -template < typename handler_type, typename operation_type > -class aggregator : public aggregator_generic { - handler_type handle_operations; -public: - aggregator() {} - explicit aggregator(handler_type h) : handle_operations(h) {} - - void initialize_handler(handler_type h) { handle_operations = h; } - - void execute(operation_type *op) { - aggregator_generic::execute(op, handle_operations); - } -}; - -// the most-compatible friend declaration (vs, gcc, icc) is -// template friend class aggregating_functor; -template -class aggregating_functor { - aggregating_class *fi; -public: - aggregating_functor() {} - aggregating_functor(aggregating_class *fi_) : fi(fi_) {} - void operator()(operation_list* op_list) { fi->handle_operations(op_list); } -}; - -} // namespace internal -} // namespace interface6 - -namespace internal { - using interface6::internal::aggregated_operation; - using interface6::internal::aggregator_generic; - using interface6::internal::aggregator; - using interface6::internal::aggregating_functor; -} // namespace internal - -} // namespace tbb - -#endif // __TBB__aggregator_impl_H diff --git a/inst/include/tbb/internal/_concurrent_queue_impl.h b/inst/include/tbb/internal/_concurrent_queue_impl.h deleted file mode 100644 index 41248baff..000000000 --- a/inst/include/tbb/internal/_concurrent_queue_impl.h +++ /dev/null @@ -1,1082 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB__concurrent_queue_impl_H -#define __TBB__concurrent_queue_impl_H - -#ifndef __TBB_concurrent_queue_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#include "../tbb_stddef.h" -#include "../tbb_machine.h" -#include "../atomic.h" -#include "../spin_mutex.h" -#include "../cache_aligned_allocator.h" -#include "../tbb_exception.h" -#include "../tbb_profiling.h" -#include -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -namespace tbb { - -#if !__TBB_TEMPLATE_FRIENDS_BROKEN - -// forward declaration -namespace strict_ppl { -template class concurrent_queue; -} - -template class concurrent_bounded_queue; - -#endif - -//! For internal use only. -namespace strict_ppl { - -//! @cond INTERNAL -namespace internal { - -using namespace tbb::internal; - -typedef size_t ticket; - -template class micro_queue ; -template class micro_queue_pop_finalizer ; -template class concurrent_queue_base_v3; -template struct concurrent_queue_rep; - -//! parts of concurrent_queue_rep that do not have references to micro_queue -/** - * For internal use only. - */ -struct concurrent_queue_rep_base : no_copy { - template friend class micro_queue; - template friend class concurrent_queue_base_v3; - -protected: - //! Approximately n_queue/golden ratio - static const size_t phi = 3; - -public: - // must be power of 2 - static const size_t n_queue = 8; - - //! Prefix on a page - struct page { - page* next; - uintptr_t mask; - }; - - atomic head_counter; - char pad1[NFS_MaxLineSize-sizeof(atomic)]; - atomic tail_counter; - char pad2[NFS_MaxLineSize-sizeof(atomic)]; - - //! Always a power of 2 - size_t items_per_page; - - //! Size of an item - size_t item_size; - - //! number of invalid entries in the queue - atomic n_invalid_entries; - - char pad3[NFS_MaxLineSize-sizeof(size_t)-sizeof(size_t)-sizeof(atomic)]; -} ; - -inline bool is_valid_page(const concurrent_queue_rep_base::page* p) { - return uintptr_t(p)>1; -} - -//! Abstract class to define interface for page allocation/deallocation -/** - * For internal use only. - */ -class concurrent_queue_page_allocator -{ - template friend class micro_queue ; - template friend class micro_queue_pop_finalizer ; -protected: - virtual ~concurrent_queue_page_allocator() {} -private: - virtual concurrent_queue_rep_base::page* allocate_page() = 0; - virtual void deallocate_page( concurrent_queue_rep_base::page* p ) = 0; -} ; - -#if _MSC_VER && !defined(__INTEL_COMPILER) -// unary minus operator applied to unsigned type, result still unsigned -#pragma warning( push ) -#pragma warning( disable: 4146 ) -#endif - -//! A queue using simple locking. -/** For efficiency, this class has no constructor. - The caller is expected to zero-initialize it. */ -template -class micro_queue : no_copy { -public: - typedef void (*item_constructor_t)(T* location, const void* src); -private: - typedef concurrent_queue_rep_base::page page; - - //! Class used to ensure exception-safety of method "pop" - class destroyer: no_copy { - T& my_value; - public: - destroyer( T& value ) : my_value(value) {} - ~destroyer() {my_value.~T();} - }; - - void copy_item( page& dst, size_t dindex, const void* src, item_constructor_t construct_item ) { - construct_item( &get_ref(dst, dindex), src ); - } - - void copy_item( page& dst, size_t dindex, const page& src, size_t sindex, - item_constructor_t construct_item ) - { - T& src_item = get_ref( const_cast(src), sindex ); - construct_item( &get_ref(dst, dindex), static_cast(&src_item) ); - } - - void assign_and_destroy_item( void* dst, page& src, size_t index ) { - T& from = get_ref(src,index); - destroyer d(from); - *static_cast(dst) = tbb::internal::move( from ); - } - - void spin_wait_until_my_turn( atomic& counter, ticket k, concurrent_queue_rep_base& rb ) const ; - -public: - friend class micro_queue_pop_finalizer; - - struct padded_page: page { - //! Not defined anywhere - exists to quiet warnings. - padded_page(); - //! Not defined anywhere - exists to quiet warnings. - void operator=( const padded_page& ); - //! Must be last field. - T last; - }; - - static T& get_ref( page& p, size_t index ) { - return (&static_cast(static_cast(&p))->last)[index]; - } - - atomic head_page; - atomic head_counter; - - atomic tail_page; - atomic tail_counter; - - spin_mutex page_mutex; - - void push( const void* item, ticket k, concurrent_queue_base_v3& base, - item_constructor_t construct_item ) ; - - bool pop( void* dst, ticket k, concurrent_queue_base_v3& base ) ; - - micro_queue& assign( const micro_queue& src, concurrent_queue_base_v3& base, - item_constructor_t construct_item ) ; - - page* make_copy( concurrent_queue_base_v3& base, const page* src_page, size_t begin_in_page, - size_t end_in_page, ticket& g_index, item_constructor_t construct_item ) ; - - void invalidate_page_and_rethrow( ticket k ) ; -}; - -template -void micro_queue::spin_wait_until_my_turn( atomic& counter, ticket k, concurrent_queue_rep_base& rb ) const { - for( atomic_backoff b(true);;b.pause() ) { - ticket c = counter; - if( c==k ) return; - else if( c&1 ) { - ++rb.n_invalid_entries; - throw_exception( eid_bad_last_alloc ); - } - } -} - -template -void micro_queue::push( const void* item, ticket k, concurrent_queue_base_v3& base, - item_constructor_t construct_item ) -{ - k &= -concurrent_queue_rep_base::n_queue; - page* p = NULL; - size_t index = modulo_power_of_two( k/concurrent_queue_rep_base::n_queue, base.my_rep->items_per_page); - if( !index ) { - __TBB_TRY { - concurrent_queue_page_allocator& pa = base; - p = pa.allocate_page(); - } __TBB_CATCH (...) { - ++base.my_rep->n_invalid_entries; - invalidate_page_and_rethrow( k ); - } - p->mask = 0; - p->next = NULL; - } - - if( tail_counter != k ) spin_wait_until_my_turn( tail_counter, k, *base.my_rep ); - call_itt_notify(acquired, &tail_counter); - - if( p ) { - spin_mutex::scoped_lock lock( page_mutex ); - page* q = tail_page; - if( is_valid_page(q) ) - q->next = p; - else - head_page = p; - tail_page = p; - } else { - p = tail_page; - } - - __TBB_TRY { - copy_item( *p, index, item, construct_item ); - // If no exception was thrown, mark item as present. - itt_hide_store_word(p->mask, p->mask | uintptr_t(1)<n_invalid_entries; - call_itt_notify(releasing, &tail_counter); - tail_counter += concurrent_queue_rep_base::n_queue; - __TBB_RETHROW(); - } -} - -template -bool micro_queue::pop( void* dst, ticket k, concurrent_queue_base_v3& base ) { - k &= -concurrent_queue_rep_base::n_queue; - if( head_counter!=k ) spin_wait_until_eq( head_counter, k ); - call_itt_notify(acquired, &head_counter); - if( tail_counter==k ) spin_wait_while_eq( tail_counter, k ); - call_itt_notify(acquired, &tail_counter); - page& p = *head_page; - __TBB_ASSERT( &p, NULL ); - size_t index = modulo_power_of_two( k/concurrent_queue_rep_base::n_queue, base.my_rep->items_per_page ); - bool success = false; - { - micro_queue_pop_finalizer finalizer( *this, base, k+concurrent_queue_rep_base::n_queue, index==base.my_rep->items_per_page-1 ? &p : NULL ); - if( p.mask & uintptr_t(1)<n_invalid_entries; - } - } - return success; -} - -template -micro_queue& micro_queue::assign( const micro_queue& src, concurrent_queue_base_v3& base, - item_constructor_t construct_item ) -{ - head_counter = src.head_counter; - tail_counter = src.tail_counter; - - const page* srcp = src.head_page; - if( is_valid_page(srcp) ) { - ticket g_index = head_counter; - __TBB_TRY { - size_t n_items = (tail_counter-head_counter)/concurrent_queue_rep_base::n_queue; - size_t index = modulo_power_of_two( head_counter/concurrent_queue_rep_base::n_queue, base.my_rep->items_per_page ); - size_t end_in_first_page = (index+n_itemsitems_per_page)?(index+n_items):base.my_rep->items_per_page; - - head_page = make_copy( base, srcp, index, end_in_first_page, g_index, construct_item ); - page* cur_page = head_page; - - if( srcp != src.tail_page ) { - for( srcp = srcp->next; srcp!=src.tail_page; srcp=srcp->next ) { - cur_page->next = make_copy( base, srcp, 0, base.my_rep->items_per_page, g_index, construct_item ); - cur_page = cur_page->next; - } - - __TBB_ASSERT( srcp==src.tail_page, NULL ); - size_t last_index = modulo_power_of_two( tail_counter/concurrent_queue_rep_base::n_queue, base.my_rep->items_per_page ); - if( last_index==0 ) last_index = base.my_rep->items_per_page; - - cur_page->next = make_copy( base, srcp, 0, last_index, g_index, construct_item ); - cur_page = cur_page->next; - } - tail_page = cur_page; - } __TBB_CATCH (...) { - invalidate_page_and_rethrow( g_index ); - } - } else { - head_page = tail_page = NULL; - } - return *this; -} - -template -void micro_queue::invalidate_page_and_rethrow( ticket k ) { - // Append an invalid page at address 1 so that no more pushes are allowed. - page* invalid_page = (page*)uintptr_t(1); - { - spin_mutex::scoped_lock lock( page_mutex ); - itt_store_word_with_release(tail_counter, k+concurrent_queue_rep_base::n_queue+1); - page* q = tail_page; - if( is_valid_page(q) ) - q->next = invalid_page; - else - head_page = invalid_page; - tail_page = invalid_page; - } - __TBB_RETHROW(); -} - -template -concurrent_queue_rep_base::page* micro_queue::make_copy( concurrent_queue_base_v3& base, - const concurrent_queue_rep_base::page* src_page, size_t begin_in_page, size_t end_in_page, - ticket& g_index, item_constructor_t construct_item ) -{ - concurrent_queue_page_allocator& pa = base; - page* new_page = pa.allocate_page(); - new_page->next = NULL; - new_page->mask = src_page->mask; - for( ; begin_in_page!=end_in_page; ++begin_in_page, ++g_index ) - if( new_page->mask & uintptr_t(1)< -class micro_queue_pop_finalizer: no_copy { - typedef concurrent_queue_rep_base::page page; - ticket my_ticket; - micro_queue& my_queue; - page* my_page; - concurrent_queue_page_allocator& allocator; -public: - micro_queue_pop_finalizer( micro_queue& queue, concurrent_queue_base_v3& b, ticket k, page* p ) : - my_ticket(k), my_queue(queue), my_page(p), allocator(b) - {} - ~micro_queue_pop_finalizer() ; -}; - -template -micro_queue_pop_finalizer::~micro_queue_pop_finalizer() { - page* p = my_page; - if( is_valid_page(p) ) { - spin_mutex::scoped_lock lock( my_queue.page_mutex ); - page* q = p->next; - my_queue.head_page = q; - if( !is_valid_page(q) ) { - my_queue.tail_page = NULL; - } - } - itt_store_word_with_release(my_queue.head_counter, my_ticket); - if( is_valid_page(p) ) { - allocator.deallocate_page( p ); - } -} - -#if _MSC_VER && !defined(__INTEL_COMPILER) -#pragma warning( pop ) -#endif // warning 4146 is back - -template class concurrent_queue_iterator_rep ; -template class concurrent_queue_iterator_base_v3; - -//! representation of concurrent_queue_base -/** - * the class inherits from concurrent_queue_rep_base and defines an array of micro_queue's - */ -template -struct concurrent_queue_rep : public concurrent_queue_rep_base { - micro_queue array[n_queue]; - - //! Map ticket to an array index - static size_t index( ticket k ) { - return k*phi%n_queue; - } - - micro_queue& choose( ticket k ) { - // The formula here approximates LRU in a cache-oblivious way. - return array[index(k)]; - } -}; - -//! base class of concurrent_queue -/** - * The class implements the interface defined by concurrent_queue_page_allocator - * and has a pointer to an instance of concurrent_queue_rep. - */ -template -class concurrent_queue_base_v3: public concurrent_queue_page_allocator { - //! Internal representation - concurrent_queue_rep* my_rep; - - friend struct concurrent_queue_rep; - friend class micro_queue; - friend class concurrent_queue_iterator_rep; - friend class concurrent_queue_iterator_base_v3; - -protected: - typedef typename concurrent_queue_rep::page page; - -private: - typedef typename micro_queue::padded_page padded_page; - typedef typename micro_queue::item_constructor_t item_constructor_t; - - /* override */ virtual page *allocate_page() { - concurrent_queue_rep& r = *my_rep; - size_t n = sizeof(padded_page) + (r.items_per_page-1)*sizeof(T); - return reinterpret_cast(allocate_block ( n )); - } - - /* override */ virtual void deallocate_page( concurrent_queue_rep_base::page *p ) { - concurrent_queue_rep& r = *my_rep; - size_t n = sizeof(padded_page) + (r.items_per_page-1)*sizeof(T); - deallocate_block( reinterpret_cast(p), n ); - } - - //! custom allocator - virtual void *allocate_block( size_t n ) = 0; - - //! custom de-allocator - virtual void deallocate_block( void *p, size_t n ) = 0; - -protected: - concurrent_queue_base_v3(); - - /* override */ virtual ~concurrent_queue_base_v3() { -#if TBB_USE_ASSERT - size_t nq = my_rep->n_queue; - for( size_t i=0; iarray[i].tail_page==NULL, "pages were not freed properly" ); -#endif /* TBB_USE_ASSERT */ - cache_aligned_allocator >().deallocate(my_rep,1); - } - - //! Enqueue item at tail of queue - void internal_push( const void* src, item_constructor_t construct_item ) { - concurrent_queue_rep& r = *my_rep; - ticket k = r.tail_counter++; - r.choose(k).push( src, k, *this, construct_item ); - } - - //! Attempt to dequeue item from queue. - /** NULL if there was no item to dequeue. */ - bool internal_try_pop( void* dst ) ; - - //! Get size of queue; result may be invalid if queue is modified concurrently - size_t internal_size() const ; - - //! check if the queue is empty; thread safe - bool internal_empty() const ; - - //! free any remaining pages - /* note that the name may be misleading, but it remains so due to a historical accident. */ - void internal_finish_clear() ; - - //! Obsolete - void internal_throw_exception() const { - throw_exception( eid_bad_alloc ); - } - - //! copy or move internal representation - void assign( const concurrent_queue_base_v3& src, item_constructor_t construct_item ) ; - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! swap internal representation - void internal_swap( concurrent_queue_base_v3& src ) { - std::swap( my_rep, src.my_rep ); - } -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ -}; - -template -concurrent_queue_base_v3::concurrent_queue_base_v3() { - const size_t item_size = sizeof(T); - my_rep = cache_aligned_allocator >().allocate(1); - __TBB_ASSERT( (size_t)my_rep % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->head_counter % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->tail_counter % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->array % NFS_GetLineSize()==0, "alignment error" ); - memset(my_rep,0,sizeof(concurrent_queue_rep)); - my_rep->item_size = item_size; - my_rep->items_per_page = item_size<= 8 ? 32 : - item_size<= 16 ? 16 : - item_size<= 32 ? 8 : - item_size<= 64 ? 4 : - item_size<=128 ? 2 : - 1; -} - -template -bool concurrent_queue_base_v3::internal_try_pop( void* dst ) { - concurrent_queue_rep& r = *my_rep; - ticket k; - do { - k = r.head_counter; - for(;;) { - if( (ptrdiff_t)(r.tail_counter-k)<=0 ) { - // Queue is empty - return false; - } - // Queue had item with ticket k when we looked. Attempt to get that item. - ticket tk=k; -#if defined(_MSC_VER) && defined(_Wp64) - #pragma warning (push) - #pragma warning (disable: 4267) -#endif - k = r.head_counter.compare_and_swap( tk+1, tk ); -#if defined(_MSC_VER) && defined(_Wp64) - #pragma warning (pop) -#endif - if( k==tk ) - break; - // Another thread snatched the item, retry. - } - } while( !r.choose( k ).pop( dst, k, *this ) ); - return true; -} - -template -size_t concurrent_queue_base_v3::internal_size() const { - concurrent_queue_rep& r = *my_rep; - __TBB_ASSERT( sizeof(ptrdiff_t)<=sizeof(size_t), NULL ); - ticket hc = r.head_counter; - size_t nie = r.n_invalid_entries; - ticket tc = r.tail_counter; - __TBB_ASSERT( hc!=tc || !nie, NULL ); - ptrdiff_t sz = tc-hc-nie; - return sz<0 ? 0 : size_t(sz); -} - -template -bool concurrent_queue_base_v3::internal_empty() const { - concurrent_queue_rep& r = *my_rep; - ticket tc = r.tail_counter; - ticket hc = r.head_counter; - // if tc!=r.tail_counter, the queue was not empty at some point between the two reads. - return tc==r.tail_counter && tc==hc+r.n_invalid_entries ; -} - -template -void concurrent_queue_base_v3::internal_finish_clear() { - concurrent_queue_rep& r = *my_rep; - size_t nq = r.n_queue; - for( size_t i=0; i -void concurrent_queue_base_v3::assign( const concurrent_queue_base_v3& src, - item_constructor_t construct_item ) -{ - concurrent_queue_rep& r = *my_rep; - r.items_per_page = src.my_rep->items_per_page; - - // copy concurrent_queue_rep data - r.head_counter = src.my_rep->head_counter; - r.tail_counter = src.my_rep->tail_counter; - r.n_invalid_entries = src.my_rep->n_invalid_entries; - - // copy or move micro_queues - for( size_t i = 0; i < r.n_queue; ++i ) - r.array[i].assign( src.my_rep->array[i], *this, construct_item); - - __TBB_ASSERT( r.head_counter==src.my_rep->head_counter && r.tail_counter==src.my_rep->tail_counter, - "the source concurrent queue should not be concurrently modified." ); -} - -template class concurrent_queue_iterator; - -template -class concurrent_queue_iterator_rep: no_assign { - typedef typename micro_queue::padded_page padded_page; -public: - ticket head_counter; - const concurrent_queue_base_v3& my_queue; - typename concurrent_queue_base_v3::page* array[concurrent_queue_rep::n_queue]; - concurrent_queue_iterator_rep( const concurrent_queue_base_v3& queue ) : - head_counter(queue.my_rep->head_counter), - my_queue(queue) - { - for( size_t k=0; k::n_queue; ++k ) - array[k] = queue.my_rep->array[k].head_page; - } - - //! Set item to point to kth element. Return true if at end of queue or item is marked valid; false otherwise. - bool get_item( T*& item, size_t k ) ; -}; - -template -bool concurrent_queue_iterator_rep::get_item( T*& item, size_t k ) { - if( k==my_queue.my_rep->tail_counter ) { - item = NULL; - return true; - } else { - typename concurrent_queue_base_v3::page* p = array[concurrent_queue_rep::index(k)]; - __TBB_ASSERT(p,NULL); - size_t i = modulo_power_of_two( k/concurrent_queue_rep::n_queue, my_queue.my_rep->items_per_page ); - item = µ_queue::get_ref(*p,i); - return (p->mask & uintptr_t(1)< -class concurrent_queue_iterator_base_v3 : no_assign { - //! Represents concurrent_queue over which we are iterating. - /** NULL if one past last element in queue. */ - concurrent_queue_iterator_rep* my_rep; - - template - friend bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); - - template - friend bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); -protected: - //! Pointer to current item - Value* my_item; - - //! Default constructor - concurrent_queue_iterator_base_v3() : my_rep(NULL), my_item(NULL) { -#if __TBB_GCC_OPTIMIZER_ORDERING_BROKEN - __TBB_compiler_fence(); -#endif - } - - //! Copy constructor - concurrent_queue_iterator_base_v3( const concurrent_queue_iterator_base_v3& i ) - : no_assign(), my_rep(NULL), my_item(NULL) { - assign(i); - } - - //! Construct iterator pointing to head of queue. - concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue ) ; - - //! Assignment - void assign( const concurrent_queue_iterator_base_v3& other ) ; - - //! Advance iterator one step towards tail of queue. - void advance() ; - - //! Destructor - ~concurrent_queue_iterator_base_v3() { - cache_aligned_allocator >().deallocate(my_rep, 1); - my_rep = NULL; - } -}; - -template -concurrent_queue_iterator_base_v3::concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue ) { - my_rep = cache_aligned_allocator >().allocate(1); - new( my_rep ) concurrent_queue_iterator_rep(queue); - size_t k = my_rep->head_counter; - if( !my_rep->get_item(my_item, k) ) advance(); -} - -template -void concurrent_queue_iterator_base_v3::assign( const concurrent_queue_iterator_base_v3& other ) { - if( my_rep!=other.my_rep ) { - if( my_rep ) { - cache_aligned_allocator >().deallocate(my_rep, 1); - my_rep = NULL; - } - if( other.my_rep ) { - my_rep = cache_aligned_allocator >().allocate(1); - new( my_rep ) concurrent_queue_iterator_rep( *other.my_rep ); - } - } - my_item = other.my_item; -} - -template -void concurrent_queue_iterator_base_v3::advance() { - __TBB_ASSERT( my_item, "attempt to increment iterator past end of queue" ); - size_t k = my_rep->head_counter; - const concurrent_queue_base_v3& queue = my_rep->my_queue; -#if TBB_USE_ASSERT - Value* tmp; - my_rep->get_item(tmp,k); - __TBB_ASSERT( my_item==tmp, NULL ); -#endif /* TBB_USE_ASSERT */ - size_t i = modulo_power_of_two( k/concurrent_queue_rep::n_queue, queue.my_rep->items_per_page ); - if( i==queue.my_rep->items_per_page-1 ) { - typename concurrent_queue_base_v3::page*& root = my_rep->array[concurrent_queue_rep::index(k)]; - root = root->next; - } - // advance k - my_rep->head_counter = ++k; - if( !my_rep->get_item(my_item, k) ) advance(); -} - -//! Similar to C++0x std::remove_cv -/** "tbb_" prefix added to avoid overload confusion with C++0x implementations. */ -template struct tbb_remove_cv {typedef T type;}; -template struct tbb_remove_cv {typedef T type;}; -template struct tbb_remove_cv {typedef T type;}; -template struct tbb_remove_cv {typedef T type;}; - -//! Meets requirements of a forward iterator for STL. -/** Value is either the T or const T type of the container. - @ingroup containers */ -template -class concurrent_queue_iterator: public concurrent_queue_iterator_base_v3::type>, - public std::iterator { -#if !__TBB_TEMPLATE_FRIENDS_BROKEN - template - friend class ::tbb::strict_ppl::concurrent_queue; -#else -public: // workaround for MSVC -#endif - //! Construct iterator pointing to head of queue. - concurrent_queue_iterator( const concurrent_queue_base_v3& queue ) : - concurrent_queue_iterator_base_v3::type>(queue) - { - } - -public: - concurrent_queue_iterator() {} - - concurrent_queue_iterator( const concurrent_queue_iterator& other ) : - concurrent_queue_iterator_base_v3::type>(other) - {} - - //! Iterator assignment - concurrent_queue_iterator& operator=( const concurrent_queue_iterator& other ) { - this->assign(other); - return *this; - } - - //! Reference to current item - Value& operator*() const { - return *static_cast(this->my_item); - } - - Value* operator->() const {return &operator*();} - - //! Advance to next item in queue - concurrent_queue_iterator& operator++() { - this->advance(); - return *this; - } - - //! Post increment - Value* operator++(int) { - Value* result = &operator*(); - operator++(); - return result; - } -}; // concurrent_queue_iterator - - -template -bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { - return i.my_item==j.my_item; -} - -template -bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { - return i.my_item!=j.my_item; -} - -} // namespace internal - -//! @endcond - -} // namespace strict_ppl - -//! @cond INTERNAL -namespace internal { - -class concurrent_queue_rep; -class concurrent_queue_iterator_rep; -class concurrent_queue_iterator_base_v3; -template class concurrent_queue_iterator; - -//! For internal use only. -/** Type-independent portion of concurrent_queue. - @ingroup containers */ -class concurrent_queue_base_v3: no_copy { - //! Internal representation - concurrent_queue_rep* my_rep; - - friend class concurrent_queue_rep; - friend struct micro_queue; - friend class micro_queue_pop_finalizer; - friend class concurrent_queue_iterator_rep; - friend class concurrent_queue_iterator_base_v3; -protected: - //! Prefix on a page - struct page { - page* next; - uintptr_t mask; - }; - - //! Capacity of the queue - ptrdiff_t my_capacity; - - //! Always a power of 2 - size_t items_per_page; - - //! Size of an item - size_t item_size; - - enum copy_specifics { copy, move }; - -#if __TBB_PROTECTED_NESTED_CLASS_BROKEN -public: -#endif - template - struct padded_page: page { - //! Not defined anywhere - exists to quiet warnings. - padded_page(); - //! Not defined anywhere - exists to quiet warnings. - void operator=( const padded_page& ); - //! Must be last field. - T last; - }; - -private: - virtual void copy_item( page& dst, size_t index, const void* src ) = 0; - virtual void assign_and_destroy_item( void* dst, page& src, size_t index ) = 0; -protected: - __TBB_EXPORTED_METHOD concurrent_queue_base_v3( size_t item_size ); - virtual __TBB_EXPORTED_METHOD ~concurrent_queue_base_v3(); - - //! Enqueue item at tail of queue using copy operation - void __TBB_EXPORTED_METHOD internal_push( const void* src ); - - //! Dequeue item from head of queue - void __TBB_EXPORTED_METHOD internal_pop( void* dst ); - - //! Abort all pending queue operations - void __TBB_EXPORTED_METHOD internal_abort(); - - //! Attempt to enqueue item onto queue using copy operation - bool __TBB_EXPORTED_METHOD internal_push_if_not_full( const void* src ); - - //! Attempt to dequeue item from queue. - /** NULL if there was no item to dequeue. */ - bool __TBB_EXPORTED_METHOD internal_pop_if_present( void* dst ); - - //! Get size of queue - ptrdiff_t __TBB_EXPORTED_METHOD internal_size() const; - - //! Check if the queue is emtpy - bool __TBB_EXPORTED_METHOD internal_empty() const; - - //! Set the queue capacity - void __TBB_EXPORTED_METHOD internal_set_capacity( ptrdiff_t capacity, size_t element_size ); - - //! custom allocator - virtual page *allocate_page() = 0; - - //! custom de-allocator - virtual void deallocate_page( page *p ) = 0; - - //! free any remaining pages - /* note that the name may be misleading, but it remains so due to a historical accident. */ - void __TBB_EXPORTED_METHOD internal_finish_clear() ; - - //! throw an exception - void __TBB_EXPORTED_METHOD internal_throw_exception() const; - - //! copy internal representation - void __TBB_EXPORTED_METHOD assign( const concurrent_queue_base_v3& src ) ; - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! swap queues - void internal_swap( concurrent_queue_base_v3& src ) { - std::swap( my_capacity, src.my_capacity ); - std::swap( items_per_page, src.items_per_page ); - std::swap( item_size, src.item_size ); - std::swap( my_rep, src.my_rep ); - } -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - - //! Enqueues item at tail of queue using specified operation (copy or move) - void internal_insert_item( const void* src, copy_specifics op_type ); - - //! Attempts to enqueue at tail of queue using specified operation (copy or move) - bool internal_insert_if_not_full( const void* src, copy_specifics op_type ); - - //! Assigns one queue to another using specified operation (copy or move) - void internal_assign( const concurrent_queue_base_v3& src, copy_specifics op_type ); -private: - virtual void copy_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) = 0; -}; - -//! For internal use only. -/** Backward compatible modification of concurrent_queue_base_v3 - @ingroup containers */ -class concurrent_queue_base_v8: public concurrent_queue_base_v3 { -protected: - concurrent_queue_base_v8( size_t item_sz ) : concurrent_queue_base_v3( item_sz ) {} - - //! move items - void __TBB_EXPORTED_METHOD move_content( concurrent_queue_base_v8& src ) ; - - //! Attempt to enqueue item onto queue using move operation - bool __TBB_EXPORTED_METHOD internal_push_move_if_not_full( const void* src ); - - //! Enqueue item at tail of queue using move operation - void __TBB_EXPORTED_METHOD internal_push_move( const void* src ); -private: - friend struct micro_queue; - virtual void move_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) = 0; - virtual void move_item( page& dst, size_t index, const void* src ) = 0; -}; - -//! Type-independent portion of concurrent_queue_iterator. -/** @ingroup containers */ -class concurrent_queue_iterator_base_v3 { - //! concurrent_queue over which we are iterating. - /** NULL if one past last element in queue. */ - concurrent_queue_iterator_rep* my_rep; - - template - friend bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); - - template - friend bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); - - void initialize( const concurrent_queue_base_v3& queue, size_t offset_of_data ); -protected: - //! Pointer to current item - void* my_item; - - //! Default constructor - concurrent_queue_iterator_base_v3() : my_rep(NULL), my_item(NULL) {} - - //! Copy constructor - concurrent_queue_iterator_base_v3( const concurrent_queue_iterator_base_v3& i ) : my_rep(NULL), my_item(NULL) { - assign(i); - } - - //! Obsolete entry point for constructing iterator pointing to head of queue. - /** Does not work correctly for SSE types. */ - __TBB_EXPORTED_METHOD concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue ); - - //! Construct iterator pointing to head of queue. - __TBB_EXPORTED_METHOD concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue, size_t offset_of_data ); - - //! Assignment - void __TBB_EXPORTED_METHOD assign( const concurrent_queue_iterator_base_v3& i ); - - //! Advance iterator one step towards tail of queue. - void __TBB_EXPORTED_METHOD advance(); - - //! Destructor - __TBB_EXPORTED_METHOD ~concurrent_queue_iterator_base_v3(); -}; - -typedef concurrent_queue_iterator_base_v3 concurrent_queue_iterator_base; - -//! Meets requirements of a forward iterator for STL. -/** Value is either the T or const T type of the container. - @ingroup containers */ -template -class concurrent_queue_iterator: public concurrent_queue_iterator_base, - public std::iterator { - -#if !defined(_MSC_VER) || defined(__INTEL_COMPILER) - template - friend class ::tbb::concurrent_bounded_queue; -#else -public: // workaround for MSVC -#endif - - //! Construct iterator pointing to head of queue. - concurrent_queue_iterator( const concurrent_queue_base_v3& queue ) : - concurrent_queue_iterator_base_v3(queue,__TBB_offsetof(concurrent_queue_base_v3::padded_page,last)) - { - } - -public: - concurrent_queue_iterator() {} - - /** If Value==Container::value_type, then this routine is the copy constructor. - If Value==const Container::value_type, then this routine is a conversion constructor. */ - concurrent_queue_iterator( const concurrent_queue_iterator& other ) : - concurrent_queue_iterator_base_v3(other) - {} - - //! Iterator assignment - concurrent_queue_iterator& operator=( const concurrent_queue_iterator& other ) { - assign(other); - return *this; - } - - //! Reference to current item - Value& operator*() const { - return *static_cast(my_item); - } - - Value* operator->() const {return &operator*();} - - //! Advance to next item in queue - concurrent_queue_iterator& operator++() { - advance(); - return *this; - } - - //! Post increment - Value* operator++(int) { - Value* result = &operator*(); - operator++(); - return result; - } -}; // concurrent_queue_iterator - - -template -bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { - return i.my_item==j.my_item; -} - -template -bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { - return i.my_item!=j.my_item; -} - -} // namespace internal; - -//! @endcond - -} // namespace tbb - -#endif /* __TBB__concurrent_queue_impl_H */ diff --git a/inst/include/tbb/internal/_concurrent_unordered_impl.h b/inst/include/tbb/internal/_concurrent_unordered_impl.h deleted file mode 100644 index 77a86394f..000000000 --- a/inst/include/tbb/internal/_concurrent_unordered_impl.h +++ /dev/null @@ -1,1565 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -/* Container implementations in this header are based on PPL implementations - provided by Microsoft. */ - -#ifndef __TBB__concurrent_unordered_impl_H -#define __TBB__concurrent_unordered_impl_H -#if !defined(__TBB_concurrent_unordered_map_H) && !defined(__TBB_concurrent_unordered_set_H) && !defined(__TBB_concurrent_hash_map_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#include "../tbb_stddef.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include -#include // Need std::pair -#include // Need std::equal_to (in ../concurrent_unordered_*.h) -#include // For tbb_hasher -#include // Need std::memset - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "../atomic.h" -#include "../tbb_exception.h" -#include "../tbb_allocator.h" - -#if __TBB_INITIALIZER_LISTS_PRESENT - #include -#endif - -namespace tbb { -namespace interface5 { -//! @cond INTERNAL -namespace internal { - -template -class split_ordered_list; -template -class concurrent_unordered_base; - -// Forward list iterators (without skipping dummy elements) -template -class flist_iterator : public std::iterator -{ - template - friend class split_ordered_list; - template - friend class concurrent_unordered_base; - template - friend class flist_iterator; - - typedef typename Solist::nodeptr_t nodeptr_t; -public: - typedef typename Solist::value_type value_type; - typedef typename Solist::difference_type difference_type; - typedef typename Solist::pointer pointer; - typedef typename Solist::reference reference; - - flist_iterator() : my_node_ptr(0) {} - flist_iterator( const flist_iterator &other ) - : my_node_ptr(other.my_node_ptr) {} - - reference operator*() const { return my_node_ptr->my_element; } - pointer operator->() const { return &**this; } - - flist_iterator& operator++() { - my_node_ptr = my_node_ptr->my_next; - return *this; - } - - flist_iterator operator++(int) { - flist_iterator tmp = *this; - ++*this; - return tmp; - } - -protected: - flist_iterator(nodeptr_t pnode) : my_node_ptr(pnode) {} - nodeptr_t get_node_ptr() const { return my_node_ptr; } - - nodeptr_t my_node_ptr; - - template - friend bool operator==( const flist_iterator &i, const flist_iterator &j ); - template - friend bool operator!=( const flist_iterator& i, const flist_iterator& j ); -}; - -template -bool operator==( const flist_iterator &i, const flist_iterator &j ) { - return i.my_node_ptr == j.my_node_ptr; -} -template -bool operator!=( const flist_iterator& i, const flist_iterator& j ) { - return i.my_node_ptr != j.my_node_ptr; -} - -// Split-order list iterators, needed to skip dummy elements -template -class solist_iterator : public flist_iterator -{ - typedef flist_iterator base_type; - typedef typename Solist::nodeptr_t nodeptr_t; - using base_type::get_node_ptr; - template - friend class split_ordered_list; - template - friend class solist_iterator; - template - friend bool operator==( const solist_iterator &i, const solist_iterator &j ); - template - friend bool operator!=( const solist_iterator& i, const solist_iterator& j ); - - const Solist *my_list_ptr; - solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {} - -public: - typedef typename Solist::value_type value_type; - typedef typename Solist::difference_type difference_type; - typedef typename Solist::pointer pointer; - typedef typename Solist::reference reference; - - solist_iterator() {} - solist_iterator(const solist_iterator &other ) - : base_type(other), my_list_ptr(other.my_list_ptr) {} - - reference operator*() const { - return this->base_type::operator*(); - } - - pointer operator->() const { - return (&**this); - } - - solist_iterator& operator++() { - do ++(*(base_type *)this); - while (get_node_ptr() != NULL && get_node_ptr()->is_dummy()); - - return (*this); - } - - solist_iterator operator++(int) { - solist_iterator tmp = *this; - do ++*this; - while (get_node_ptr() != NULL && get_node_ptr()->is_dummy()); - - return (tmp); - } -}; - -template -bool operator==( const solist_iterator &i, const solist_iterator &j ) { - return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr; -} -template -bool operator!=( const solist_iterator& i, const solist_iterator& j ) { - return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr; -} - -// Forward type and class definitions -typedef size_t sokey_t; - - -// Forward list in which elements are sorted in a split-order -template -class split_ordered_list -{ -public: - typedef split_ordered_list self_type; - typedef typename Allocator::template rebind::other allocator_type; - struct node; - typedef node *nodeptr_t; - - typedef typename allocator_type::size_type size_type; - typedef typename allocator_type::difference_type difference_type; - typedef typename allocator_type::pointer pointer; - typedef typename allocator_type::const_pointer const_pointer; - typedef typename allocator_type::reference reference; - typedef typename allocator_type::const_reference const_reference; - typedef typename allocator_type::value_type value_type; - - typedef solist_iterator const_iterator; - typedef solist_iterator iterator; - typedef flist_iterator raw_const_iterator; - typedef flist_iterator raw_iterator; - - // Node that holds the element in a split-ordered list - struct node : tbb::internal::no_assign - { - private: - // for compilers that try to generate default constructors though they are not needed. - node(); // VS 2008, 2010, 2012 - public: - // Initialize the node with the given order key - void init(sokey_t order_key) { - my_order_key = order_key; - my_next = NULL; - } - - // Return the order key (needed for hashing) - sokey_t get_order_key() const { // TODO: remove - return my_order_key; - } - - // Inserts the new element in the list in an atomic fashion - nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node) - { - // Try to change the next pointer on the current element to a new element, only if it still points to the cached next - nodeptr_t exchange_node = tbb::internal::as_atomic(my_next).compare_and_swap(new_node, current_node); - - if (exchange_node == current_node) // TODO: why this branch? - { - // Operation succeeded, return the new node - return new_node; - } - else - { - // Operation failed, return the "interfering" node - return exchange_node; - } - } - - // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets - // in the hash table to quickly index into the right subsection of the split-ordered list. - bool is_dummy() const { - return (my_order_key & 0x1) == 0; - } - - - nodeptr_t my_next; // Next element in the list - value_type my_element; // Element storage - sokey_t my_order_key; // Order key for this element - }; - - // Allocate a new node with the given order key and value - nodeptr_t create_node(sokey_t order_key, const T &value) { - nodeptr_t pnode = my_node_allocator.allocate(1); - - __TBB_TRY { - new(static_cast(&pnode->my_element)) T(value); - pnode->init(order_key); - } __TBB_CATCH(...) { - my_node_allocator.deallocate(pnode, 1); - __TBB_RETHROW(); - } - - return (pnode); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //TODO: try to combine both implementations using poor man forward - //TODO: use RAII scoped guard instead of explicit catch - // Allocate a new node with the given order key and value - nodeptr_t create_node(sokey_t order_key, T &&value) { - nodeptr_t pnode = my_node_allocator.allocate(1); - - __TBB_TRY { - new(static_cast(&pnode->my_element)) T(std::move(value)); - pnode->init(order_key); - } __TBB_CATCH(...) { - my_node_allocator.deallocate(pnode, 1); - __TBB_RETHROW(); - } - - return (pnode); - } -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - - // Allocate a new node with the given order key; used to allocate dummy nodes - nodeptr_t create_node(sokey_t order_key) { - nodeptr_t pnode = my_node_allocator.allocate(1); - pnode->init(order_key); - return (pnode); - } - - split_ordered_list(allocator_type a = allocator_type()) - : my_node_allocator(a), my_element_count(0) - { - // Immediately allocate a dummy node with order key of 0. This node - // will always be the head of the list. - my_head = create_node(0); - } - - ~split_ordered_list() - { - // Clear the list - clear(); - - // Remove the head element which is not cleared by clear() - nodeptr_t pnode = my_head; - my_head = NULL; - - __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node"); - - destroy_node(pnode); - } - - // Common forward list functions - - allocator_type get_allocator() const { - return (my_node_allocator); - } - - void clear() { - nodeptr_t pnext; - nodeptr_t pnode = my_head; - - __TBB_ASSERT(my_head != NULL, "Invalid head list node"); - pnext = pnode->my_next; - pnode->my_next = NULL; - pnode = pnext; - - while (pnode != NULL) - { - pnext = pnode->my_next; - destroy_node(pnode); - pnode = pnext; - } - - my_element_count = 0; - } - - // Returns a first non-dummy element in the SOL - iterator begin() { - return first_real_iterator(raw_begin()); - } - - // Returns a first non-dummy element in the SOL - const_iterator begin() const { - return first_real_iterator(raw_begin()); - } - - iterator end() { - return (iterator(0, this)); - } - - const_iterator end() const { - return (const_iterator(0, this)); - } - - const_iterator cbegin() const { - return (((const self_type *)this)->begin()); - } - - const_iterator cend() const { - return (((const self_type *)this)->end()); - } - - // Checks if the number of elements (non-dummy) is 0 - bool empty() const { - return (my_element_count == 0); - } - - // Returns the number of non-dummy elements in the list - size_type size() const { - return my_element_count; - } - - // Returns the maximum size of the list, determined by the allocator - size_type max_size() const { - return my_node_allocator.max_size(); - } - - // Swaps 'this' list with the passed in one - void swap(self_type& other) - { - if (this == &other) - { - // Nothing to do - return; - } - - std::swap(my_element_count, other.my_element_count); - std::swap(my_head, other.my_head); - } - - // Split-order list functions - - // Returns a first element in the SOL, which is always a dummy - raw_iterator raw_begin() { - return raw_iterator(my_head); - } - - // Returns a first element in the SOL, which is always a dummy - raw_const_iterator raw_begin() const { - return raw_const_iterator(my_head); - } - - raw_iterator raw_end() { - return raw_iterator(0); - } - - raw_const_iterator raw_end() const { - return raw_const_iterator(0); - } - - static sokey_t get_order_key(const raw_const_iterator& it) { - return it.get_node_ptr()->get_order_key(); - } - - static sokey_t get_safe_order_key(const raw_const_iterator& it) { - if( !it.get_node_ptr() ) return ~sokey_t(0); - return it.get_node_ptr()->get_order_key(); - } - - // Returns a public iterator version of the internal iterator. Public iterator must not - // be a dummy private iterator. - iterator get_iterator(raw_iterator it) { - __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)"); - return iterator(it.get_node_ptr(), this); - } - - // Returns a public iterator version of the internal iterator. Public iterator must not - // be a dummy private iterator. - const_iterator get_iterator(raw_const_iterator it) const { - __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)"); - return const_iterator(it.get_node_ptr(), this); - } - - // Returns a non-const version of the raw_iterator - raw_iterator get_iterator(raw_const_iterator it) { - return raw_iterator(it.get_node_ptr()); - } - - // Returns a non-const version of the iterator - static iterator get_iterator(const_iterator it) { - return iterator(it.my_node_ptr, it.my_list_ptr); - } - - // Returns a public iterator version of a first non-dummy internal iterator at or after - // the passed in internal iterator. - iterator first_real_iterator(raw_iterator it) - { - // Skip all dummy, internal only iterators - while (it != raw_end() && it.get_node_ptr()->is_dummy()) - ++it; - - return iterator(it.get_node_ptr(), this); - } - - // Returns a public iterator version of a first non-dummy internal iterator at or after - // the passed in internal iterator. - const_iterator first_real_iterator(raw_const_iterator it) const - { - // Skip all dummy, internal only iterators - while (it != raw_end() && it.get_node_ptr()->is_dummy()) - ++it; - - return const_iterator(it.get_node_ptr(), this); - } - - // Erase an element using the allocator - void destroy_node(nodeptr_t pnode) { - if (!pnode->is_dummy()) my_node_allocator.destroy(pnode); - my_node_allocator.deallocate(pnode, 1); - } - - // Try to insert a new element in the list. If insert fails, return the node that - // was inserted instead. - nodeptr_t try_insert(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) { - new_node->my_next = current_node; - return previous->atomic_set_next(new_node, current_node); - } - - // Insert a new element between passed in iterators - std::pair try_insert(raw_iterator it, raw_iterator next, const value_type &value, sokey_t order_key, size_type *new_count) - { - nodeptr_t pnode = create_node(order_key, value); - nodeptr_t inserted_node = try_insert(it.get_node_ptr(), pnode, next.get_node_ptr()); - - if (inserted_node == pnode) - { - // If the insert succeeded, check that the order is correct and increment the element count - check_range(); - *new_count = __TBB_FetchAndAddW((uintptr_t*)&my_element_count, uintptr_t(1)); - return std::pair(iterator(pnode, this), true); - } - else - { - // If the insert failed (element already there), then delete the new one - destroy_node(pnode); - return std::pair(end(), false); - } - } - - // Insert a new dummy element, starting search at a parent dummy element - raw_iterator insert_dummy(raw_iterator it, sokey_t order_key) - { - raw_iterator last = raw_end(); - raw_iterator where = it; - - __TBB_ASSERT(where != last, "Invalid head node"); - - ++where; - - // Create a dummy element up front, even though it may be discarded (due to concurrent insertion) - nodeptr_t dummy_node = create_node(order_key); - - for (;;) - { - __TBB_ASSERT(it != last, "Invalid head list node"); - - // If the head iterator is at the end of the list, or past the point where this dummy - // node needs to be inserted, then try to insert it. - if (where == last || get_order_key(where) > order_key) - { - __TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list"); - - // Try to insert it in the right place - nodeptr_t inserted_node = try_insert(it.get_node_ptr(), dummy_node, where.get_node_ptr()); - - if (inserted_node == dummy_node) - { - // Insertion succeeded, check the list for order violations - check_range(); - return raw_iterator(dummy_node); - } - else - { - // Insertion failed: either dummy node was inserted by another thread, or - // a real element was inserted at exactly the same place as dummy node. - // Proceed with the search from the previous location where order key was - // known to be larger (note: this is legal only because there is no safe - // concurrent erase operation supported). - where = it; - ++where; - continue; - } - } - else if (get_order_key(where) == order_key) - { - // Another dummy node with the same value found, discard the new one. - destroy_node(dummy_node); - return where; - } - - // Move the iterator forward - it = where; - ++where; - } - - } - - // This erase function can handle both real and dummy nodes - void erase_node(raw_iterator previous, raw_const_iterator& where) - { - nodeptr_t pnode = (where++).get_node_ptr(); - nodeptr_t prevnode = previous.get_node_ptr(); - __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators"); - prevnode->my_next = pnode->my_next; - - destroy_node(pnode); - } - - // Erase the element (previous node needs to be passed because this is a forward only list) - iterator erase_node(raw_iterator previous, const_iterator where) - { - raw_const_iterator it = where; - erase_node(previous, it); - my_element_count--; - - return get_iterator(first_real_iterator(it)); - } - - // Move all elements from the passed in split-ordered list to this one - void move_all(self_type& source) - { - raw_const_iterator first = source.raw_begin(); - raw_const_iterator last = source.raw_end(); - - if (first == last) - return; - - nodeptr_t previous_node = my_head; - raw_const_iterator begin_iterator = first++; - - // Move all elements one by one, including dummy ones - for (raw_const_iterator it = first; it != last;) - { - nodeptr_t pnode = it.get_node_ptr(); - - nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element); - previous_node = try_insert(previous_node, dummy_node, NULL); - __TBB_ASSERT(previous_node != NULL, "Insertion must succeed"); - raw_const_iterator where = it++; - source.erase_node(get_iterator(begin_iterator), where); - } - check_range(); - } - - -private: - //Need to setup private fields of split_ordered_list in move constructor and assignment of concurrent_unordered_base - template - friend class concurrent_unordered_base; - - // Check the list for order violations - void check_range() - { -#if TBB_USE_ASSERT - for (raw_iterator it = raw_begin(); it != raw_end(); ++it) - { - raw_iterator next_iterator = it; - ++next_iterator; - - __TBB_ASSERT(next_iterator == end() || next_iterator.get_node_ptr()->get_order_key() >= it.get_node_ptr()->get_order_key(), "!!! List order inconsistency !!!"); - } -#endif - } - - typename allocator_type::template rebind::other my_node_allocator; // allocator object for nodes - size_type my_element_count; // Total item count, not counting dummy nodes - nodeptr_t my_head; // pointer to head node -}; - -// Template class for hash compare -template -class hash_compare -{ -public: - typedef Hasher hasher; - typedef Key_equality key_equal; - - hash_compare() {} - - hash_compare(Hasher a_hasher) : my_hash_object(a_hasher) {} - - hash_compare(Hasher a_hasher, Key_equality a_keyeq) : my_hash_object(a_hasher), my_key_compare_object(a_keyeq) {} - - size_t operator()(const Key& key) const { - return ((size_t)my_hash_object(key)); - } - - bool operator()(const Key& key1, const Key& key2) const { - return (!my_key_compare_object(key1, key2)); - } - - Hasher my_hash_object; // The hash object - Key_equality my_key_compare_object; // The equality comparator object -}; - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) -#pragma warning(push) -#pragma warning(disable: 4127) // warning C4127: conditional expression is constant -#endif - -template -class concurrent_unordered_base : public Traits -{ -protected: - // Type definitions - typedef concurrent_unordered_base self_type; - typedef typename Traits::value_type value_type; - typedef typename Traits::key_type key_type; - typedef typename Traits::hash_compare hash_compare; - typedef typename Traits::value_compare value_compare; - typedef typename Traits::allocator_type allocator_type; - typedef typename hash_compare::hasher hasher; - typedef typename hash_compare::key_equal key_equal; - typedef typename allocator_type::pointer pointer; - typedef typename allocator_type::const_pointer const_pointer; - typedef typename allocator_type::reference reference; - typedef typename allocator_type::const_reference const_reference; - typedef typename allocator_type::size_type size_type; - typedef typename allocator_type::difference_type difference_type; - typedef split_ordered_list solist_t; - typedef typename solist_t::nodeptr_t nodeptr_t; - // Iterators that walk the entire split-order list, including dummy nodes - typedef typename solist_t::raw_iterator raw_iterator; - typedef typename solist_t::raw_const_iterator raw_const_iterator; - typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets - typedef typename solist_t::const_iterator const_iterator; - typedef iterator local_iterator; - typedef const_iterator const_local_iterator; - using Traits::my_hash_compare; - using Traits::get_key; - using Traits::allow_multimapping; - - static const size_type initial_bucket_number = 8; // Initial number of buckets -private: - typedef std::pair pairii_t; - typedef std::pair paircc_t; - - static size_type const pointers_per_table = sizeof(size_type) * 8; // One bucket segment per bit - static const size_type initial_bucket_load = 4; // Initial maximum number of elements per bucket - - struct call_internal_clear_on_exit{ - concurrent_unordered_base* my_instance; - call_internal_clear_on_exit(concurrent_unordered_base* instance) : my_instance(instance) {} - void dismiss(){ my_instance = NULL;} - ~call_internal_clear_on_exit(){ - if (my_instance){ - my_instance->internal_clear(); - } - } - }; -protected: - // Constructors/Destructors - concurrent_unordered_base(size_type n_of_buckets = initial_bucket_number, - const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type()) - : Traits(hc), my_solist(a), - my_allocator(a), my_maximum_bucket_size((float) initial_bucket_load) - { - if( n_of_buckets == 0) ++n_of_buckets; - my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)n_of_buckets*2-1); // round up to power of 2 - internal_init(); - } - - concurrent_unordered_base(const concurrent_unordered_base& right, const allocator_type& a) - : Traits(right.my_hash_compare), my_solist(a), my_allocator(a) - { - internal_init(); - internal_copy(right); - } - - concurrent_unordered_base(const concurrent_unordered_base& right) - : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator()) - { - //FIXME:exception safety seems to be broken here - internal_init(); - internal_copy(right); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - concurrent_unordered_base(concurrent_unordered_base&& right) - : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator()) - { - internal_init(); - swap(right); - } - - concurrent_unordered_base(concurrent_unordered_base&& right, const allocator_type& a) - : Traits(right.my_hash_compare), my_solist(a), my_allocator(a) - { - call_internal_clear_on_exit clear_buckets_on_exception(this); - - internal_init(); - if (a == right.get_allocator()){ - this->swap(right); - }else{ - my_maximum_bucket_size = right.my_maximum_bucket_size; - my_number_of_buckets = right.my_number_of_buckets; - my_solist.my_element_count = right.my_solist.my_element_count; - - if (! right.my_solist.empty()){ - nodeptr_t previous_node = my_solist.my_head; - - // Move all elements one by one, including dummy ones - for (raw_const_iterator it = ++(right.my_solist.raw_begin()), last = right.my_solist.raw_end(); it != last; ++it) - { - const nodeptr_t pnode = it.get_node_ptr(); - nodeptr_t node; - if (pnode->is_dummy()) { - node = my_solist.create_node(pnode->get_order_key()); - size_type bucket = __TBB_ReverseBits(pnode->get_order_key()) % my_number_of_buckets; - set_bucket(bucket, node); - }else{ - node = my_solist.create_node(pnode->get_order_key(), std::move(pnode->my_element)); - } - - previous_node = my_solist.try_insert(previous_node, node, NULL); - __TBB_ASSERT(previous_node != NULL, "Insertion of node failed. Concurrent inserts in constructor ?"); - } - my_solist.check_range(); - } - } - - clear_buckets_on_exception.dismiss(); - } - -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - - concurrent_unordered_base& operator=(const concurrent_unordered_base& right) { - if (this != &right) - internal_copy(right); - return (*this); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - concurrent_unordered_base& operator=(concurrent_unordered_base&& other) - { - if(this != &other){ - typedef typename tbb::internal::allocator_traits::propagate_on_container_move_assignment pocma_t; - if(pocma_t::value || this->my_allocator == other.my_allocator) { - concurrent_unordered_base trash (std::move(*this)); - swap(other); - if (pocma_t::value) { - using std::swap; - //TODO: swapping allocators here may be a problem, replace with single direction moving - swap(this->my_solist.my_node_allocator, other.my_solist.my_node_allocator); - swap(this->my_allocator, other.my_allocator); - } - } else { - concurrent_unordered_base moved_copy(std::move(other),this->my_allocator); - this->swap(moved_copy); - } - } - return *this; - } - -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! assignment operator from initializer_list - concurrent_unordered_base& operator=(std::initializer_list il) - { - this->clear(); - this->insert(il.begin(),il.end()); - return (*this); - } -#endif //# __TBB_INITIALIZER_LISTS_PRESENT - - - ~concurrent_unordered_base() { - // Delete all node segments - internal_clear(); - } - -public: - allocator_type get_allocator() const { - return my_solist.get_allocator(); - } - - // Size and capacity function - bool empty() const { - return my_solist.empty(); - } - - size_type size() const { - return my_solist.size(); - } - - size_type max_size() const { - return my_solist.max_size(); - } - - // Iterators - iterator begin() { - return my_solist.begin(); - } - - const_iterator begin() const { - return my_solist.begin(); - } - - iterator end() { - return my_solist.end(); - } - - const_iterator end() const { - return my_solist.end(); - } - - const_iterator cbegin() const { - return my_solist.cbegin(); - } - - const_iterator cend() const { - return my_solist.cend(); - } - - // Parallel traversal support - class const_range_type : tbb::internal::no_assign { - const concurrent_unordered_base &my_table; - raw_const_iterator my_begin_node; - raw_const_iterator my_end_node; - mutable raw_const_iterator my_midpoint_node; - public: - //! Type for size of a range - typedef typename concurrent_unordered_base::size_type size_type; - typedef typename concurrent_unordered_base::value_type value_type; - typedef typename concurrent_unordered_base::reference reference; - typedef typename concurrent_unordered_base::difference_type difference_type; - typedef typename concurrent_unordered_base::const_iterator iterator; - - //! True if range is empty. - bool empty() const {return my_begin_node == my_end_node;} - - //! True if range can be partitioned into two subranges. - bool is_divisible() const { - return my_midpoint_node != my_end_node; - } - //! Split range. - const_range_type( const_range_type &r, split ) : - my_table(r.my_table), my_end_node(r.my_end_node) - { - r.my_end_node = my_begin_node = r.my_midpoint_node; - __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" ); - __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" ); - set_midpoint(); - r.set_midpoint(); - } - //! Init range with container and grainsize specified - const_range_type( const concurrent_unordered_base &a_table ) : - my_table(a_table), my_begin_node(a_table.my_solist.begin()), - my_end_node(a_table.my_solist.end()) - { - set_midpoint(); - } - iterator begin() const { return my_table.my_solist.get_iterator(my_begin_node); } - iterator end() const { return my_table.my_solist.get_iterator(my_end_node); } - //! The grain size for this range. - size_type grainsize() const { return 1; } - - //! Set my_midpoint_node to point approximately half way between my_begin_node and my_end_node. - void set_midpoint() const { - if( my_begin_node == my_end_node ) // not divisible - my_midpoint_node = my_end_node; - else { - sokey_t begin_key = solist_t::get_safe_order_key(my_begin_node); - sokey_t end_key = solist_t::get_safe_order_key(my_end_node); - size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets; - while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket); - if(__TBB_ReverseBits(mid_bucket) > begin_key) { - // found a dummy_node between begin and end - my_midpoint_node = my_table.my_solist.first_real_iterator(my_table.get_bucket( mid_bucket )); - } - else { - // didn't find a dummy node between begin and end. - my_midpoint_node = my_end_node; - } -#if TBB_USE_ASSERT - { - sokey_t mid_key = solist_t::get_safe_order_key(my_midpoint_node); - __TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" ); - __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" ); - } -#endif // TBB_USE_ASSERT - } - } - }; - - class range_type : public const_range_type { - public: - typedef typename concurrent_unordered_base::iterator iterator; - //! Split range. - range_type( range_type &r, split ) : const_range_type( r, split() ) {} - //! Init range with container and grainsize specified - range_type( const concurrent_unordered_base &a_table ) : const_range_type(a_table) {} - - iterator begin() const { return solist_t::get_iterator( const_range_type::begin() ); } - iterator end() const { return solist_t::get_iterator( const_range_type::end() ); } - }; - - range_type range() { - return range_type( *this ); - } - - const_range_type range() const { - return const_range_type( *this ); - } - - // Modifiers - std::pair insert(const value_type& value) { - return internal_insert(value); - } - - iterator insert(const_iterator, const value_type& value) { - // Ignore hint - return insert(value).first; - } - - template - void insert(Iterator first, Iterator last) { - for (Iterator it = first; it != last; ++it) - insert(*it); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Insert initializer list - void insert(std::initializer_list il) { - insert(il.begin(), il.end()); - } -#endif - - iterator unsafe_erase(const_iterator where) { - return internal_erase(where); - } - - iterator unsafe_erase(const_iterator first, const_iterator last) { - while (first != last) - unsafe_erase(first++); - return my_solist.get_iterator(first); - } - - size_type unsafe_erase(const key_type& key) { - pairii_t where = equal_range(key); - size_type item_count = internal_distance(where.first, where.second); - unsafe_erase(where.first, where.second); - return item_count; - } - - void swap(concurrent_unordered_base& right) { - if (this != &right) { - std::swap(my_hash_compare, right.my_hash_compare); // TODO: check what ADL meant here - my_solist.swap(right.my_solist); - internal_swap_buckets(right); - std::swap(my_number_of_buckets, right.my_number_of_buckets); - std::swap(my_maximum_bucket_size, right.my_maximum_bucket_size); - } - } - - // Observers - hasher hash_function() const { - return my_hash_compare.my_hash_object; - } - - key_equal key_eq() const { - return my_hash_compare.my_key_compare_object; - } - - void clear() { - // Clear list - my_solist.clear(); - - // Clear buckets - internal_clear(); - - // Initialize bucket 0 - __TBB_ASSERT(my_buckets[0] == NULL, NULL); - raw_iterator dummy_node = my_solist.raw_begin(); - set_bucket(0, dummy_node); - } - - // Lookup - iterator find(const key_type& key) { - return internal_find(key); - } - - const_iterator find(const key_type& key) const { - return const_cast(this)->internal_find(key); - } - - size_type count(const key_type& key) const { - if(allow_multimapping) { - paircc_t answer = equal_range(key); - size_type item_count = internal_distance(answer.first, answer.second); - return item_count; - } else { - return const_cast(this)->internal_find(key) == end()?0:1; - } - } - - std::pair equal_range(const key_type& key) { - return internal_equal_range(key); - } - - std::pair equal_range(const key_type& key) const { - return const_cast(this)->internal_equal_range(key); - } - - // Bucket interface - for debugging - size_type unsafe_bucket_count() const { - return my_number_of_buckets; - } - - size_type unsafe_max_bucket_count() const { - return segment_size(pointers_per_table-1); - } - - size_type unsafe_bucket_size(size_type bucket) { - size_type item_count = 0; - if (is_initialized(bucket)) { - raw_iterator it = get_bucket(bucket); - ++it; - for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it) - ++item_count; - } - return item_count; - } - - size_type unsafe_bucket(const key_type& key) const { - sokey_t order_key = (sokey_t) my_hash_compare(key); - size_type bucket = order_key % my_number_of_buckets; - return bucket; - } - - // If the bucket is initialized, return a first non-dummy element in it - local_iterator unsafe_begin(size_type bucket) { - if (!is_initialized(bucket)) - return end(); - - raw_iterator it = get_bucket(bucket); - return my_solist.first_real_iterator(it); - } - - // If the bucket is initialized, return a first non-dummy element in it - const_local_iterator unsafe_begin(size_type bucket) const - { - if (!is_initialized(bucket)) - return end(); - - raw_const_iterator it = get_bucket(bucket); - return my_solist.first_real_iterator(it); - } - - // @REVIEW: Takes O(n) - // Returns the iterator after the last non-dummy element in the bucket - local_iterator unsafe_end(size_type bucket) - { - if (!is_initialized(bucket)) - return end(); - - raw_iterator it = get_bucket(bucket); - - // Find the end of the bucket, denoted by the dummy element - do ++it; - while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy()); - - // Return the first real element past the end of the bucket - return my_solist.first_real_iterator(it); - } - - // @REVIEW: Takes O(n) - // Returns the iterator after the last non-dummy element in the bucket - const_local_iterator unsafe_end(size_type bucket) const - { - if (!is_initialized(bucket)) - return end(); - - raw_const_iterator it = get_bucket(bucket); - - // Find the end of the bucket, denoted by the dummy element - do ++it; - while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy()); - - // Return the first real element past the end of the bucket - return my_solist.first_real_iterator(it); - } - - const_local_iterator unsafe_cbegin(size_type bucket) const { - return ((const self_type *) this)->unsafe_begin(bucket); - } - - const_local_iterator unsafe_cend(size_type bucket) const { - return ((const self_type *) this)->unsafe_end(bucket); - } - - // Hash policy - float load_factor() const { - return (float) size() / (float) unsafe_bucket_count(); - } - - float max_load_factor() const { - return my_maximum_bucket_size; - } - - void max_load_factor(float newmax) { - if (newmax != newmax || newmax < 0) - tbb::internal::throw_exception(tbb::internal::eid_invalid_load_factor); - my_maximum_bucket_size = newmax; - } - - // This function is a noop, because the underlying split-ordered list - // is already sorted, so an increase in the bucket number will be - // reflected next time this bucket is touched. - void rehash(size_type buckets) { - size_type current_buckets = my_number_of_buckets; - if (current_buckets >= buckets) - return; - my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)buckets*2-1); // round up to power of 2 - } - -private: - - // Initialize the hash and keep the first bucket open - void internal_init() { - // Allocate an array of segment pointers - memset(my_buckets, 0, pointers_per_table * sizeof(void *)); - - // Initialize bucket 0 - raw_iterator dummy_node = my_solist.raw_begin(); - set_bucket(0, dummy_node); - } - - void internal_clear() { - for (size_type index = 0; index < pointers_per_table; ++index) { - if (my_buckets[index] != NULL) { - size_type sz = segment_size(index); - for (size_type index2 = 0; index2 < sz; ++index2) - my_allocator.destroy(&my_buckets[index][index2]); - my_allocator.deallocate(my_buckets[index], sz); - my_buckets[index] = 0; - } - } - } - - void internal_copy(const self_type& right) { - clear(); - - my_maximum_bucket_size = right.my_maximum_bucket_size; - my_number_of_buckets = right.my_number_of_buckets; - - __TBB_TRY { - insert(right.begin(), right.end()); - my_hash_compare = right.my_hash_compare; - } __TBB_CATCH(...) { - my_solist.clear(); - __TBB_RETHROW(); - } - } - - void internal_swap_buckets(concurrent_unordered_base& right) - { - // Swap all node segments - for (size_type index = 0; index < pointers_per_table; ++index) - { - raw_iterator * iterator_pointer = my_buckets[index]; - my_buckets[index] = right.my_buckets[index]; - right.my_buckets[index] = iterator_pointer; - } - } - - //TODO: why not use std::distance? - // Hash APIs - size_type internal_distance(const_iterator first, const_iterator last) const - { - size_type num = 0; - - for (const_iterator it = first; it != last; ++it) - ++num; - - return num; - } - - // Insert an element in the hash given its value - std::pair internal_insert(const value_type& value) - { - sokey_t order_key = (sokey_t) my_hash_compare(get_key(value)); - size_type bucket = order_key % my_number_of_buckets; - - // If bucket is empty, initialize it first - if (!is_initialized(bucket)) - init_bucket(bucket); - - size_type new_count = 0; - order_key = split_order_key_regular(order_key); - raw_iterator it = get_bucket(bucket); - raw_iterator last = my_solist.raw_end(); - raw_iterator where = it; - - __TBB_ASSERT(where != last, "Invalid head node"); - - // First node is a dummy node - ++where; - - for (;;) - { - if (where == last || solist_t::get_order_key(where) > order_key) - { - // Try to insert it in the right place - std::pair result = my_solist.try_insert(it, where, value, order_key, &new_count); - - if (result.second) - { - // Insertion succeeded, adjust the table size, if needed - adjust_table_size(new_count, my_number_of_buckets); - return result; - } - else - { - // Insertion failed: either the same node was inserted by another thread, or - // another element was inserted at exactly the same place as this node. - // Proceed with the search from the previous location where order key was - // known to be larger (note: this is legal only because there is no safe - // concurrent erase operation supported). - where = it; - ++where; - continue; - } - } - else if (!allow_multimapping && solist_t::get_order_key(where) == order_key && my_hash_compare(get_key(*where), get_key(value)) == 0) - { - // Element already in the list, return it - return std::pair(my_solist.get_iterator(where), false); - } - - // Move the iterator forward - it = where; - ++where; - } - } - - // Find the element in the split-ordered list - iterator internal_find(const key_type& key) - { - sokey_t order_key = (sokey_t) my_hash_compare(key); - size_type bucket = order_key % my_number_of_buckets; - - // If bucket is empty, initialize it first - if (!is_initialized(bucket)) - init_bucket(bucket); - - order_key = split_order_key_regular(order_key); - raw_iterator last = my_solist.raw_end(); - - for (raw_iterator it = get_bucket(bucket); it != last; ++it) - { - if (solist_t::get_order_key(it) > order_key) - { - // If the order key is smaller than the current order key, the element - // is not in the hash. - return end(); - } - else if (solist_t::get_order_key(it) == order_key) - { - // The fact that order keys match does not mean that the element is found. - // Key function comparison has to be performed to check whether this is the - // right element. If not, keep searching while order key is the same. - if (!my_hash_compare(get_key(*it), key)) - return my_solist.get_iterator(it); - } - } - - return end(); - } - - // Erase an element from the list. This is not a concurrency safe function. - iterator internal_erase(const_iterator it) - { - key_type key = get_key(*it); - sokey_t order_key = (sokey_t) my_hash_compare(key); - size_type bucket = order_key % my_number_of_buckets; - - // If bucket is empty, initialize it first - if (!is_initialized(bucket)) - init_bucket(bucket); - - order_key = split_order_key_regular(order_key); - - raw_iterator previous = get_bucket(bucket); - raw_iterator last = my_solist.raw_end(); - raw_iterator where = previous; - - __TBB_ASSERT(where != last, "Invalid head node"); - - // First node is a dummy node - ++where; - - for (;;) { - if (where == last) - return end(); - else if (my_solist.get_iterator(where) == it) - return my_solist.erase_node(previous, it); - - // Move the iterator forward - previous = where; - ++where; - } - } - - // Return the [begin, end) pair of iterators with the same key values. - // This operation makes sense only if mapping is many-to-one. - pairii_t internal_equal_range(const key_type& key) - { - sokey_t order_key = (sokey_t) my_hash_compare(key); - size_type bucket = order_key % my_number_of_buckets; - - // If bucket is empty, initialize it first - if (!is_initialized(bucket)) - init_bucket(bucket); - - order_key = split_order_key_regular(order_key); - raw_iterator end_it = my_solist.raw_end(); - - for (raw_iterator it = get_bucket(bucket); it != end_it; ++it) - { - if (solist_t::get_order_key(it) > order_key) - { - // There is no element with the given key - return pairii_t(end(), end()); - } - else if (solist_t::get_order_key(it) == order_key && !my_hash_compare(get_key(*it), key)) - { - iterator first = my_solist.get_iterator(it); - iterator last = first; - do ++last; while( allow_multimapping && last != end() && !my_hash_compare(get_key(*last), key) ); - return pairii_t(first, last); - } - } - - return pairii_t(end(), end()); - } - - // Bucket APIs - void init_bucket(size_type bucket) - { - // Bucket 0 has no parent. - __TBB_ASSERT( bucket != 0, "The first bucket must always be initialized"); - - size_type parent_bucket = get_parent(bucket); - - // All parent_bucket buckets have to be initialized before this bucket is - if (!is_initialized(parent_bucket)) - init_bucket(parent_bucket); - - raw_iterator parent = get_bucket(parent_bucket); - - // Create a dummy first node in this bucket - raw_iterator dummy_node = my_solist.insert_dummy(parent, split_order_key_dummy(bucket)); - set_bucket(bucket, dummy_node); - } - - void adjust_table_size(size_type total_elements, size_type current_size) - { - // Grow the table by a factor of 2 if possible and needed - if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size ) - { - // Double the size of the hash only if size has not changed in between loads - my_number_of_buckets.compare_and_swap(2u*current_size, current_size); - //Simple "my_number_of_buckets.compare_and_swap( current_size<<1, current_size );" does not work for VC8 - //due to overzealous compiler warnings in /Wp64 mode - } - } - - size_type get_parent(size_type bucket) const - { - // Unsets bucket's most significant turned-on bit - size_type msb = __TBB_Log2((uintptr_t)bucket); - return bucket & ~(size_type(1) << msb); - } - - - // Dynamic sized array (segments) - //! @return segment index of given index in the array - static size_type segment_index_of( size_type index ) { - return size_type( __TBB_Log2( uintptr_t(index|1) ) ); - } - - //! @return the first array index of given segment - static size_type segment_base( size_type k ) { - return (size_type(1)< my_number_of_buckets; // Current table size - solist_t my_solist; // List where all the elements are kept - typename allocator_type::template rebind::other my_allocator; // Allocator object for segments - float my_maximum_bucket_size; // Maximum size of the bucket - atomic my_buckets[pointers_per_table]; // The segment table -}; -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) -#pragma warning(pop) // warning 4127 is back -#endif - -//! Hash multiplier -static const size_t hash_multiplier = tbb::internal::select_size_t_constant<2654435769U, 11400714819323198485ULL>::value; -} // namespace internal -//! @endcond -//! Hasher functions -template -inline size_t tbb_hasher( const T& t ) { - return static_cast( t ) * internal::hash_multiplier; -} -template -inline size_t tbb_hasher( P* ptr ) { - size_t const h = reinterpret_cast( ptr ); - return (h >> 3) ^ h; -} -template -inline size_t tbb_hasher( const std::basic_string& s ) { - size_t h = 0; - for( const E* c = s.c_str(); *c; ++c ) - h = static_cast(*c) ^ (h * internal::hash_multiplier); - return h; -} -template -inline size_t tbb_hasher( const std::pair& p ) { - return tbb_hasher(p.first) ^ tbb_hasher(p.second); -} -} // namespace interface5 -using interface5::tbb_hasher; - - -// Template class for hash compare -template -class tbb_hash -{ -public: - tbb_hash() {} - - size_t operator()(const Key& key) const - { - return tbb_hasher(key); - } -}; - -} // namespace tbb -#endif// __TBB__concurrent_unordered_impl_H diff --git a/inst/include/tbb/internal/_flow_graph_impl.h b/inst/include/tbb/internal/_flow_graph_impl.h deleted file mode 100644 index 97da56df7..000000000 --- a/inst/include/tbb/internal/_flow_graph_impl.h +++ /dev/null @@ -1,757 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB__flow_graph_impl_H -#define __TBB__flow_graph_impl_H - -#ifndef __TBB_flow_graph_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -namespace internal { - - namespace graph_policy_namespace { - enum graph_buffer_policy { rejecting, reserving, queueing, tag_matching }; - } - -// -------------- function_body containers ---------------------- - - //! A functor that takes no input and generates a value of type Output - template< typename Output > - class source_body : tbb::internal::no_assign { - public: - virtual ~source_body() {} - virtual bool operator()(Output &output) = 0; - virtual source_body* clone() = 0; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - virtual void reset_body() = 0; -#endif - }; - - //! The leaf for source_body - template< typename Output, typename Body> - class source_body_leaf : public source_body { - public: - source_body_leaf( const Body &_body ) : body(_body), init_body(_body) { } - /*override*/ bool operator()(Output &output) { return body( output ); } - /*override*/ source_body_leaf* clone() { - return new source_body_leaf< Output, Body >(init_body); - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/ void reset_body() { - body = init_body; - } -#endif - Body get_body() { return body; } - private: - Body body; - Body init_body; - }; - - //! A functor that takes an Input and generates an Output - template< typename Input, typename Output > - class function_body : tbb::internal::no_assign { - public: - virtual ~function_body() {} - virtual Output operator()(const Input &input) = 0; - virtual function_body* clone() = 0; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - virtual void reset_body() = 0; -#endif - }; - - //! the leaf for function_body - template - class function_body_leaf : public function_body< Input, Output > { - public: - function_body_leaf( const B &_body ) : body(_body), init_body(_body) { } - Output operator()(const Input &i) { return body(i); } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/ void reset_body() { - body = init_body; - } -#endif - B get_body() { return body; } - /*override*/ function_body_leaf* clone() { - return new function_body_leaf< Input, Output, B >(init_body); - } - private: - B body; - B init_body; - }; - - //! the leaf for function_body specialized for Input and output of continue_msg - template - class function_body_leaf< continue_msg, continue_msg, B> : public function_body< continue_msg, continue_msg > { - public: - function_body_leaf( const B &_body ) : body(_body), init_body(_body) { } - continue_msg operator()( const continue_msg &i ) { - body(i); - return i; - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/ void reset_body() { - body = init_body; - } -#endif - B get_body() { return body; } - /*override*/ function_body_leaf* clone() { - return new function_body_leaf< continue_msg, continue_msg, B >(init_body); - } - private: - B body; - B init_body; - }; - - //! the leaf for function_body specialized for Output of continue_msg - template - class function_body_leaf< Input, continue_msg, B> : public function_body< Input, continue_msg > { - public: - function_body_leaf( const B &_body ) : body(_body), init_body(_body) { } - continue_msg operator()(const Input &i) { - body(i); - return continue_msg(); - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/ void reset_body() { - body = init_body; - } -#endif - B get_body() { return body; } - /*override*/ function_body_leaf* clone() { - return new function_body_leaf< Input, continue_msg, B >(init_body); - } - private: - B body; - B init_body; - }; - - //! the leaf for function_body specialized for Input of continue_msg - template - class function_body_leaf< continue_msg, Output, B > : public function_body< continue_msg, Output > { - public: - function_body_leaf( const B &_body ) : body(_body), init_body(_body) { } - Output operator()(const continue_msg &i) { - return body(i); - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/ void reset_body() { - body = init_body; - } -#endif - B get_body() { return body; } - /*override*/ function_body_leaf* clone() { - return new function_body_leaf< continue_msg, Output, B >(init_body); - } - private: - B body; - B init_body; - }; - - //! function_body that takes an Input and a set of output ports - template - class multifunction_body : tbb::internal::no_assign { - public: - virtual ~multifunction_body () {} - virtual void operator()(const Input &/* input*/, OutputSet &/*oset*/) = 0; - virtual multifunction_body* clone() = 0; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - virtual void reset_body() = 0; -#endif - }; - - //! leaf for multifunction. OutputSet can be a std::tuple or a vector. - template - class multifunction_body_leaf : public multifunction_body { - public: - multifunction_body_leaf(const B &_body) : body(_body), init_body(_body) { } - void operator()(const Input &input, OutputSet &oset) { - body(input, oset); // body may explicitly put() to one or more of oset. - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/ void reset_body() { - body = init_body; - } -#endif - B get_body() { return body; } - /*override*/ multifunction_body_leaf* clone() { - return new multifunction_body_leaf(init_body); - } - private: - B body; - B init_body; - }; - -// --------------------------- end of function_body containers ------------------------ - -// --------------------------- node task bodies --------------------------------------- - - //! A task that calls a node's forward_task function - template< typename NodeType > - class forward_task_bypass : public task { - - NodeType &my_node; - - public: - - forward_task_bypass( NodeType &n ) : my_node(n) {} - - task *execute() { - task * new_task = my_node.forward_task(); - if (new_task == SUCCESSFULLY_ENQUEUED) new_task = NULL; - return new_task; - } - }; - - //! A task that calls a node's apply_body_bypass function, passing in an input of type Input - // return the task* unless it is SUCCESSFULLY_ENQUEUED, in which case return NULL - template< typename NodeType, typename Input > - class apply_body_task_bypass : public task { - - NodeType &my_node; - Input my_input; - - public: - - apply_body_task_bypass( NodeType &n, const Input &i ) : my_node(n), my_input(i) {} - - task *execute() { - task * next_task = my_node.apply_body_bypass( my_input ); - if(next_task == SUCCESSFULLY_ENQUEUED) next_task = NULL; - return next_task; - } - }; - - //! A task that calls a node's apply_body function with no input - template< typename NodeType > - class source_task_bypass : public task { - - NodeType &my_node; - - public: - - source_task_bypass( NodeType &n ) : my_node(n) {} - - task *execute() { - task *new_task = my_node.apply_body_bypass( ); - if(new_task == SUCCESSFULLY_ENQUEUED) return NULL; - return new_task; - } - }; - -// ------------------------ end of node task bodies ----------------------------------- - - //! An empty functor that takes an Input and returns a default constructed Output - template< typename Input, typename Output > - struct empty_body { - Output operator()( const Input & ) const { return Output(); } - }; - - //! A node_cache maintains a std::queue of elements of type T. Each operation is protected by a lock. - template< typename T, typename M=spin_mutex > - class node_cache { - public: - - typedef size_t size_type; - - bool empty() { - typename my_mutex_type::scoped_lock lock( my_mutex ); - return internal_empty(); - } - - void add( T &n ) { - typename my_mutex_type::scoped_lock lock( my_mutex ); - internal_push(n); - } - - void remove( T &n ) { - typename my_mutex_type::scoped_lock lock( my_mutex ); - for ( size_t i = internal_size(); i != 0; --i ) { - T &s = internal_pop(); - if ( &s == &n ) return; // only remove one predecessor per request - internal_push(s); - } - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector predecessor_vector_type; - void internal_add_built_predecessor( T &n ) { - typename my_mutex_type::scoped_lock lock( my_mutex ); - my_built_predecessors.add_edge(n); - } - - void internal_delete_built_predecessor( T &n ) { - typename my_mutex_type::scoped_lock lock( my_mutex ); - my_built_predecessors.delete_edge(n); - } - - void copy_predecessors( predecessor_vector_type &v) { - typename my_mutex_type::scoped_lock lock( my_mutex ); - my_built_predecessors.copy_edges(v); - } - - size_t predecessor_count() { - typename my_mutex_type::scoped_lock lock(my_mutex); - return (size_t)(my_built_predecessors.edge_count()); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - protected: - - typedef M my_mutex_type; - my_mutex_type my_mutex; - std::queue< T * > my_q; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - edge_container my_built_predecessors; -#endif - - // Assumes lock is held - inline bool internal_empty( ) { - return my_q.empty(); - } - - // Assumes lock is held - inline size_type internal_size( ) { - return my_q.size(); - } - - // Assumes lock is held - inline void internal_push( T &n ) { - my_q.push(&n); - } - - // Assumes lock is held - inline T &internal_pop() { - T *v = my_q.front(); - my_q.pop(); - return *v; - } - - }; - - //! A cache of predecessors that only supports try_get - template< typename T, typename M=spin_mutex > - class predecessor_cache : public node_cache< sender, M > { - public: - typedef M my_mutex_type; - typedef T output_type; - typedef sender predecessor_type; - typedef receiver successor_type; - - predecessor_cache( ) : my_owner( NULL ) { } - - void set_owner( successor_type *owner ) { my_owner = owner; } - - bool get_item( output_type &v ) { - - bool msg = false; - - do { - predecessor_type *src; - { - typename my_mutex_type::scoped_lock lock(this->my_mutex); - if ( this->internal_empty() ) { - break; - } - src = &this->internal_pop(); - } - - // Try to get from this sender - msg = src->try_get( v ); - - if (msg == false) { - // Relinquish ownership of the edge - if ( my_owner) - src->register_successor( *my_owner ); - } else { - // Retain ownership of the edge - this->add(*src); - } - } while ( msg == false ); - return msg; - } - - void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - if(my_owner) { - for(;;) { - predecessor_type *src; - { - if(this->internal_empty()) break; - src = &this->internal_pop(); - } - src->register_successor( *my_owner); - } - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - if (f&rf_extract && my_owner) - my_built_predecessors.receiver_extract(*my_owner); - __TBB_ASSERT(!(f&rf_extract) || this->internal_empty(), "predecessor cache not empty"); -#endif - } - - protected: - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - using node_cache< sender, M >::my_built_predecessors; -#endif - successor_type *my_owner; - }; - - //! An cache of predecessors that supports requests and reservations - template< typename T, typename M=spin_mutex > - class reservable_predecessor_cache : public predecessor_cache< T, M > { - public: - typedef M my_mutex_type; - typedef T output_type; - typedef sender predecessor_type; - typedef receiver successor_type; - - reservable_predecessor_cache( ) : reserved_src(NULL) { } - - bool - try_reserve( output_type &v ) { - bool msg = false; - - do { - { - typename my_mutex_type::scoped_lock lock(this->my_mutex); - if ( reserved_src || this->internal_empty() ) - return false; - - reserved_src = &this->internal_pop(); - } - - // Try to get from this sender - msg = reserved_src->try_reserve( v ); - - if (msg == false) { - typename my_mutex_type::scoped_lock lock(this->my_mutex); - // Relinquish ownership of the edge - reserved_src->register_successor( *this->my_owner ); - reserved_src = NULL; - } else { - // Retain ownership of the edge - this->add( *reserved_src ); - } - } while ( msg == false ); - - return msg; - } - - bool - try_release( ) { - reserved_src->try_release( ); - reserved_src = NULL; - return true; - } - - bool - try_consume( ) { - reserved_src->try_consume( ); - reserved_src = NULL; - return true; - } - - void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - reserved_src = NULL; - predecessor_cache::reset(__TBB_PFG_RESET_ARG(f)); - } - - private: - predecessor_type *reserved_src; - }; - - - //! An abstract cache of successors - template - class successor_cache : tbb::internal::no_copy { - protected: - - typedef M my_mutex_type; - my_mutex_type my_mutex; - - typedef receiver *pointer_type; - typedef std::list< pointer_type > my_successors_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - edge_container > my_built_successors; -#endif - my_successors_type my_successors; - - sender *my_owner; - - public: -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector successor_vector_type; - void internal_add_built_successor( receiver &r) { - typename my_mutex_type::scoped_lock l(my_mutex, true); - my_built_successors.add_edge( r ); - } - - void internal_delete_built_successor( receiver &r) { - typename my_mutex_type::scoped_lock l(my_mutex, true); - my_built_successors.delete_edge(r); - } - - void copy_successors( successor_vector_type &v) { - typename my_mutex_type::scoped_lock l(my_mutex, false); - my_built_successors.copy_edges(v); - } - - size_t successor_count() { - typename my_mutex_type::scoped_lock l(my_mutex,false); - return my_built_successors.edge_count(); - } - - void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - if (f&rf_extract && my_owner) - my_built_successors.sender_extract(*my_owner); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - successor_cache( ) : my_owner(NULL) {} - - void set_owner( sender *owner ) { my_owner = owner; } - - virtual ~successor_cache() {} - - void register_successor( receiver &r ) { - typename my_mutex_type::scoped_lock l(my_mutex, true); - my_successors.push_back( &r ); - } - - void remove_successor( receiver &r ) { - typename my_mutex_type::scoped_lock l(my_mutex, true); - for ( typename my_successors_type::iterator i = my_successors.begin(); - i != my_successors.end(); ++i ) { - if ( *i == & r ) { - my_successors.erase(i); - break; - } - } - } - - bool empty() { - typename my_mutex_type::scoped_lock l(my_mutex, false); - return my_successors.empty(); - } - - void clear() { - my_successors.clear(); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_built_successors.clear(); -#endif - } - - virtual task * try_put_task( const T &t ) = 0; - }; - - //! An abstract cache of successors, specialized to continue_msg - template<> - class successor_cache< continue_msg > : tbb::internal::no_copy { - protected: - - typedef spin_rw_mutex my_mutex_type; - my_mutex_type my_mutex; - - typedef receiver *pointer_type; - typedef std::list< pointer_type > my_successors_type; - my_successors_type my_successors; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - edge_container > my_built_successors; -#endif - - sender *my_owner; - - public: - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector successor_vector_type; - void internal_add_built_successor( receiver &r) { - my_mutex_type::scoped_lock l(my_mutex, true); - my_built_successors.add_edge( r ); - } - - void internal_delete_built_successor( receiver &r) { - my_mutex_type::scoped_lock l(my_mutex, true); - my_built_successors.delete_edge(r); - } - - void copy_successors( successor_vector_type &v) { - my_mutex_type::scoped_lock l(my_mutex, false); - my_built_successors.copy_edges(v); - } - - size_t successor_count() { - my_mutex_type::scoped_lock l(my_mutex,false); - return my_built_successors.edge_count(); - } - - void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - if (f&rf_extract && my_owner) - my_built_successors.sender_extract(*my_owner); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - successor_cache( ) : my_owner(NULL) {} - - void set_owner( sender *owner ) { my_owner = owner; } - - virtual ~successor_cache() {} - - void register_successor( receiver &r ) { - my_mutex_type::scoped_lock l(my_mutex, true); - my_successors.push_back( &r ); - if ( my_owner && r.is_continue_receiver() ) { - r.register_predecessor( *my_owner ); - } - } - - void remove_successor( receiver &r ) { - my_mutex_type::scoped_lock l(my_mutex, true); - for ( my_successors_type::iterator i = my_successors.begin(); - i != my_successors.end(); ++i ) { - if ( *i == & r ) { - // TODO: Check if we need to test for continue_receiver before - // removing from r. - if ( my_owner ) - r.remove_predecessor( *my_owner ); - my_successors.erase(i); - break; - } - } - } - - bool empty() { - my_mutex_type::scoped_lock l(my_mutex, false); - return my_successors.empty(); - } - - void clear() { - my_successors.clear(); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_built_successors.clear(); -#endif - } - - virtual task * try_put_task( const continue_msg &t ) = 0; - - }; - - //! A cache of successors that are broadcast to - template - class broadcast_cache : public successor_cache { - typedef M my_mutex_type; - typedef std::list< receiver * > my_successors_type; - - public: - - broadcast_cache( ) {} - - // as above, but call try_put_task instead, and return the last task we received (if any) - /*override*/ task * try_put_task( const T &t ) { - task * last_task = NULL; - bool upgraded = true; - typename my_mutex_type::scoped_lock l(this->my_mutex, upgraded); - typename my_successors_type::iterator i = this->my_successors.begin(); - while ( i != this->my_successors.end() ) { - task *new_task = (*i)->try_put_task(t); - last_task = combine_tasks(last_task, new_task); // enqueue if necessary - if(new_task) { - ++i; - } - else { // failed - if ( (*i)->register_predecessor(*this->my_owner) ) { - if (!upgraded) { - l.upgrade_to_writer(); - upgraded = true; - } - i = this->my_successors.erase(i); - } else { - ++i; - } - } - } - return last_task; - } - - }; - - //! A cache of successors that are put in a round-robin fashion - template - class round_robin_cache : public successor_cache { - typedef size_t size_type; - typedef M my_mutex_type; - typedef std::list< receiver * > my_successors_type; - - public: - - round_robin_cache( ) {} - - size_type size() { - typename my_mutex_type::scoped_lock l(this->my_mutex, false); - return this->my_successors.size(); - } - - /*override*/task *try_put_task( const T &t ) { - bool upgraded = true; - typename my_mutex_type::scoped_lock l(this->my_mutex, upgraded); - typename my_successors_type::iterator i = this->my_successors.begin(); - while ( i != this->my_successors.end() ) { - task *new_task = (*i)->try_put_task(t); - if ( new_task ) { - return new_task; - } else { - if ( (*i)->register_predecessor(*this->my_owner) ) { - if (!upgraded) { - l.upgrade_to_writer(); - upgraded = true; - } - i = this->my_successors.erase(i); - } - else { - ++i; - } - } - } - return NULL; - } - }; - - template - class decrementer : public continue_receiver, tbb::internal::no_copy { - - T *my_node; - - task *execute() { - return my_node->decrement_counter(); - } - - public: - - typedef continue_msg input_type; - typedef continue_msg output_type; - decrementer( int number_of_predecessors = 0 ) : continue_receiver( number_of_predecessors ) { } - void set_owner( T *node ) { my_node = node; } - }; - -} - -#endif // __TBB__flow_graph_impl_H - diff --git a/inst/include/tbb/internal/_flow_graph_indexer_impl.h b/inst/include/tbb/internal/_flow_graph_indexer_impl.h deleted file mode 100644 index 947e1d414..000000000 --- a/inst/include/tbb/internal/_flow_graph_indexer_impl.h +++ /dev/null @@ -1,453 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB__flow_graph_indexer_impl_H -#define __TBB__flow_graph_indexer_impl_H - -#ifndef __TBB_flow_graph_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#include "tbb/internal/_flow_graph_types_impl.h" - -namespace internal { - - // Output of the indexer_node is a tbb::flow::tagged_msg, and will be of - // the form tagged_msg - // where the value of tag will indicate which result was put to the - // successor. - - template - task* do_try_put(const T &v, void *p) { - typename IndexerNodeBaseType::output_type o(K, v); - return reinterpret_cast(p)->try_put_task(&o); - } - - template - struct indexer_helper { - template - static inline void set_indexer_node_pointer(PortTuple &my_input, IndexerNodeBaseType *p) { - typedef typename tuple_element::type T; - task *(*indexer_node_put_task)(const T&, void *) = do_try_put; - tbb::flow::get(my_input).set_up(p, indexer_node_put_task); - indexer_helper::template set_indexer_node_pointer(my_input, p); - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - template - static inline void reset_inputs(InputTuple &my_input, reset_flags f) { - join_helper::reset_inputs(my_input, f); - tbb::flow::get(my_input).reset_receiver(f); - } -#endif - }; - - template - struct indexer_helper { - template - static inline void set_indexer_node_pointer(PortTuple &my_input, IndexerNodeBaseType *p) { - typedef typename tuple_element<0, TupleTypes>::type T; - task *(*indexer_node_put_task)(const T&, void *) = do_try_put; - tbb::flow::get<0>(my_input).set_up(p, indexer_node_put_task); - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - template - static inline void reset_inputs(InputTuple &my_input, reset_flags f) { - tbb::flow::get<0>(my_input).reset_receiver(f); - } -#endif - }; - - template - class indexer_input_port : public receiver { - private: - void* my_indexer_ptr; - typedef task* (* forward_function_ptr)(T const &, void* ); - forward_function_ptr my_try_put_task; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - spin_mutex my_pred_mutex; - edge_container > my_built_predecessors; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - public: -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - indexer_input_port() : my_pred_mutex() {} - indexer_input_port( const indexer_input_port & /*other*/ ) : receiver(), my_pred_mutex() { - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - void set_up(void *p, forward_function_ptr f) { - my_indexer_ptr = p; - my_try_put_task = f; - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector *> predecessor_vector_type; - /*override*/size_t predecessor_count() { - spin_mutex::scoped_lock l(my_pred_mutex); - return my_built_predecessors.edge_count(); - } - /*override*/void internal_add_built_predecessor(sender &p) { - spin_mutex::scoped_lock l(my_pred_mutex); - my_built_predecessors.add_edge(p); - } - /*override*/void internal_delete_built_predecessor(sender &p) { - spin_mutex::scoped_lock l(my_pred_mutex); - my_built_predecessors.delete_edge(p); - } - /*override*/void copy_predecessors( predecessor_vector_type &v) { - spin_mutex::scoped_lock l(my_pred_mutex); - return my_built_predecessors.copy_edges(v); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - task *try_put_task(const T &v) { - return my_try_put_task(v, my_indexer_ptr); - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - public: - /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags f)) { - if(f&rf_extract) my_built_predecessors.receiver_extract(*this); - } -#else - /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) { } -#endif - - }; - - template - class indexer_node_FE { - public: - static const int N = tbb::flow::tuple_size::value; - typedef OutputType output_type; - typedef InputTuple input_type; - - input_type &input_ports() { return my_inputs; } - protected: - input_type my_inputs; - }; - - //! indexer_node_base - template - class indexer_node_base : public graph_node, public indexer_node_FE, - public sender { - protected: - using graph_node::my_graph; - public: - static const size_t N = tbb::flow::tuple_size::value; - typedef OutputType output_type; - typedef StructTypes tuple_types; - typedef receiver successor_type; - typedef indexer_node_FE input_ports_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector successor_vector_type; -#endif - - private: - // ----------- Aggregator ------------ - enum op_type { reg_succ, rem_succ, try__put_task -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - , add_blt_succ, del_blt_succ, - blt_succ_cnt, blt_succ_cpy -#endif - }; - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - typedef indexer_node_base my_class; - - class indexer_node_base_operation : public aggregated_operation { - public: - char type; - union { - output_type const *my_arg; - successor_type *my_succ; - task *bypass_t; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - size_t cnt_val; - successor_vector_type *succv; -#endif - }; - indexer_node_base_operation(const output_type* e, op_type t) : - type(char(t)), my_arg(e) {} - indexer_node_base_operation(const successor_type &s, op_type t) : type(char(t)), - my_succ(const_cast(&s)) {} - indexer_node_base_operation(op_type t) : type(char(t)) {} - }; - - typedef internal::aggregating_functor my_handler; - friend class internal::aggregating_functor; - aggregator my_aggregator; - - void handle_operations(indexer_node_base_operation* op_list) { - indexer_node_base_operation *current; - while(op_list) { - current = op_list; - op_list = op_list->next; - switch(current->type) { - - case reg_succ: - my_successors.register_successor(*(current->my_succ)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - - case rem_succ: - my_successors.remove_successor(*(current->my_succ)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case try__put_task: { - current->bypass_t = my_successors.try_put_task(*(current->my_arg)); - __TBB_store_with_release(current->status, SUCCEEDED); // return of try_put_task actual return value - } - break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - case add_blt_succ: - my_successors.internal_add_built_successor(*(current->my_succ)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case del_blt_succ: - my_successors.internal_delete_built_successor(*(current->my_succ)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case blt_succ_cnt: - current->cnt_val = my_successors.successor_count(); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case blt_succ_cpy: - my_successors.copy_successors(*(current->succv)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - } - } - } - // ---------- end aggregator ----------- - public: - indexer_node_base(graph& g) : graph_node(g), input_ports_type() { - indexer_helper::set_indexer_node_pointer(this->my_inputs, this); - my_successors.set_owner(this); - my_aggregator.initialize_handler(my_handler(this)); - } - - indexer_node_base(const indexer_node_base& other) : graph_node(other.my_graph), input_ports_type(), sender() { - indexer_helper::set_indexer_node_pointer(this->my_inputs, this); - my_successors.set_owner(this); - my_aggregator.initialize_handler(my_handler(this)); - } - - bool register_successor(successor_type &r) { - indexer_node_base_operation op_data(r, reg_succ); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - - bool remove_successor( successor_type &r) { - indexer_node_base_operation op_data(r, rem_succ); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - - task * try_put_task(output_type const *v) { - indexer_node_base_operation op_data(v, try__put_task); - my_aggregator.execute(&op_data); - return op_data.bypass_t; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - void internal_add_built_successor( successor_type &r) { - indexer_node_base_operation op_data(r, add_blt_succ); - my_aggregator.execute(&op_data); - } - - void internal_delete_built_successor( successor_type &r) { - indexer_node_base_operation op_data(r, del_blt_succ); - my_aggregator.execute(&op_data); - } - - size_t successor_count() { - indexer_node_base_operation op_data(blt_succ_cnt); - my_aggregator.execute(&op_data); - return op_data.cnt_val; - } - - void copy_successors( successor_vector_type &v) { - indexer_node_base_operation op_data(blt_succ_cpy); - op_data.succv = &v; - my_aggregator.execute(&op_data); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - protected: - /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) { -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_successors.reset(f); - indexer_helper::reset_inputs(this->my_inputs, f); -#endif - } - - private: - broadcast_cache my_successors; - }; //indexer_node_base - - - template struct input_types; - - template - struct input_types<1, InputTuple> { - typedef typename tuple_element<0, InputTuple>::type first_type; - typedef typename internal::tagged_msg type; - }; - - template - struct input_types<2, InputTuple> { - typedef typename tuple_element<0, InputTuple>::type first_type; - typedef typename tuple_element<1, InputTuple>::type second_type; - typedef typename internal::tagged_msg type; - }; - - template - struct input_types<3, InputTuple> { - typedef typename tuple_element<0, InputTuple>::type first_type; - typedef typename tuple_element<1, InputTuple>::type second_type; - typedef typename tuple_element<2, InputTuple>::type third_type; - typedef typename internal::tagged_msg type; - }; - - template - struct input_types<4, InputTuple> { - typedef typename tuple_element<0, InputTuple>::type first_type; - typedef typename tuple_element<1, InputTuple>::type second_type; - typedef typename tuple_element<2, InputTuple>::type third_type; - typedef typename tuple_element<3, InputTuple>::type fourth_type; - typedef typename internal::tagged_msg type; - }; - - template - struct input_types<5, InputTuple> { - typedef typename tuple_element<0, InputTuple>::type first_type; - typedef typename tuple_element<1, InputTuple>::type second_type; - typedef typename tuple_element<2, InputTuple>::type third_type; - typedef typename tuple_element<3, InputTuple>::type fourth_type; - typedef typename tuple_element<4, InputTuple>::type fifth_type; - typedef typename internal::tagged_msg type; - }; - - template - struct input_types<6, InputTuple> { - typedef typename tuple_element<0, InputTuple>::type first_type; - typedef typename tuple_element<1, InputTuple>::type second_type; - typedef typename tuple_element<2, InputTuple>::type third_type; - typedef typename tuple_element<3, InputTuple>::type fourth_type; - typedef typename tuple_element<4, InputTuple>::type fifth_type; - typedef typename tuple_element<5, InputTuple>::type sixth_type; - typedef typename internal::tagged_msg type; - }; - - template - struct input_types<7, InputTuple> { - typedef typename tuple_element<0, InputTuple>::type first_type; - typedef typename tuple_element<1, InputTuple>::type second_type; - typedef typename tuple_element<2, InputTuple>::type third_type; - typedef typename tuple_element<3, InputTuple>::type fourth_type; - typedef typename tuple_element<4, InputTuple>::type fifth_type; - typedef typename tuple_element<5, InputTuple>::type sixth_type; - typedef typename tuple_element<6, InputTuple>::type seventh_type; - typedef typename internal::tagged_msg type; - }; - - - template - struct input_types<8, InputTuple> { - typedef typename tuple_element<0, InputTuple>::type first_type; - typedef typename tuple_element<1, InputTuple>::type second_type; - typedef typename tuple_element<2, InputTuple>::type third_type; - typedef typename tuple_element<3, InputTuple>::type fourth_type; - typedef typename tuple_element<4, InputTuple>::type fifth_type; - typedef typename tuple_element<5, InputTuple>::type sixth_type; - typedef typename tuple_element<6, InputTuple>::type seventh_type; - typedef typename tuple_element<7, InputTuple>::type eighth_type; - typedef typename internal::tagged_msg type; - }; - - - template - struct input_types<9, InputTuple> { - typedef typename tuple_element<0, InputTuple>::type first_type; - typedef typename tuple_element<1, InputTuple>::type second_type; - typedef typename tuple_element<2, InputTuple>::type third_type; - typedef typename tuple_element<3, InputTuple>::type fourth_type; - typedef typename tuple_element<4, InputTuple>::type fifth_type; - typedef typename tuple_element<5, InputTuple>::type sixth_type; - typedef typename tuple_element<6, InputTuple>::type seventh_type; - typedef typename tuple_element<7, InputTuple>::type eighth_type; - typedef typename tuple_element<8, InputTuple>::type nineth_type; - typedef typename internal::tagged_msg type; - }; - - template - struct input_types<10, InputTuple> { - typedef typename tuple_element<0, InputTuple>::type first_type; - typedef typename tuple_element<1, InputTuple>::type second_type; - typedef typename tuple_element<2, InputTuple>::type third_type; - typedef typename tuple_element<3, InputTuple>::type fourth_type; - typedef typename tuple_element<4, InputTuple>::type fifth_type; - typedef typename tuple_element<5, InputTuple>::type sixth_type; - typedef typename tuple_element<6, InputTuple>::type seventh_type; - typedef typename tuple_element<7, InputTuple>::type eighth_type; - typedef typename tuple_element<8, InputTuple>::type nineth_type; - typedef typename tuple_element<9, InputTuple>::type tenth_type; - typedef typename internal::tagged_msg type; - }; - - // type generators - template - struct indexer_types : public input_types::value, OutputTuple> { - static const int N = tbb::flow::tuple_size::value; - typedef typename input_types::type output_type; - typedef typename wrap_tuple_elements::type input_ports_type; - typedef internal::indexer_node_FE indexer_FE_type; - typedef internal::indexer_node_base indexer_base_type; - }; - - template - class unfolded_indexer_node : public indexer_types::indexer_base_type { - public: - typedef typename indexer_types::input_ports_type input_ports_type; - typedef OutputTuple tuple_types; - typedef typename indexer_types::output_type output_type; - private: - typedef typename indexer_types::indexer_base_type base_type; - public: - unfolded_indexer_node(graph& g) : base_type(g) {} - unfolded_indexer_node(const unfolded_indexer_node &other) : base_type(other) {} - }; - -} /* namespace internal */ - -#endif /* __TBB__flow_graph_indexer_impl_H */ diff --git a/inst/include/tbb/internal/_flow_graph_item_buffer_impl.h b/inst/include/tbb/internal/_flow_graph_item_buffer_impl.h deleted file mode 100644 index f6c0a820e..000000000 --- a/inst/include/tbb/internal/_flow_graph_item_buffer_impl.h +++ /dev/null @@ -1,279 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB__flow_graph_item_buffer_impl_H -#define __TBB__flow_graph_item_buffer_impl_H - -#ifndef __TBB_flow_graph_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#include "tbb/internal/_flow_graph_types_impl.h" // for aligned_pair - -// in namespace tbb::flow::interface7 (included in _flow_graph_node_impl.h) - - //! Expandable buffer of items. The possible operations are push, pop, - //* tests for empty and so forth. No mutual exclusion is built in. - //* objects are constructed into and explicitly-destroyed. get_my_item gives - // a read-only reference to the item in the buffer. set_my_item may be called - // with either an empty or occupied slot. - - using internal::aligned_pair; - using internal::alignment_of; - -namespace internal { - - template > - class item_buffer { - public: - typedef T item_type; - enum buffer_item_state { no_item=0, has_item=1, reserved_item=2 }; - protected: - typedef size_t size_type; - typedef typename aligned_pair::type buffer_item_type; - typedef typename A::template rebind::other allocator_type; - - buffer_item_type *my_array; - size_type my_array_size; - static const size_type initial_buffer_size = 4; - size_type my_head; - size_type my_tail; - - bool buffer_empty() { return my_head == my_tail; } - - buffer_item_type &item(size_type i) { - __TBB_ASSERT(!(size_type(&(my_array[i&(my_array_size-1)].second))%alignment_of::value),NULL); - __TBB_ASSERT(!(size_type(&(my_array[i&(my_array_size-1)].first))%alignment_of::value), NULL); - return my_array[i & (my_array_size - 1) ]; - } - - bool my_item_valid(size_type i) { return item(i).second != no_item; } - bool my_item_reserved(size_type i) { return item(i).second == reserved_item; } - - // object management in buffer - const item_type &get_my_item(size_t i) { - __TBB_ASSERT(my_item_valid(i),"attempt to get invalid item"); - item_type *itm = (tbb::internal::punned_cast(&(item(i).first))); - return *(const item_type *)itm; - } - - // may be called with an empty slot or a slot that has already been constructed into. - void set_my_item(size_t i, const item_type &o) { - if(item(i).second != no_item) { - destroy_item(i); - } - new(&(item(i).first)) item_type(o); - item(i).second = has_item; - } - - // destructively-fetch an object from the buffer - void fetch_item(size_t i, item_type &o) { - __TBB_ASSERT(my_item_valid(i), "Trying to fetch an empty slot"); - o = get_my_item(i); // could have std::move assign semantics - destroy_item(i); - } - - // move an existing item from one slot to another. The moved-to slot must be unoccupied, - // the moved-from slot must exist and not be reserved. The after, from will be empty, - // to will be occupied but not reserved - void move_item(size_t to, size_t from) { - __TBB_ASSERT(!my_item_valid(to), "Trying to move to a non-empty slot"); - __TBB_ASSERT(my_item_valid(from), "Trying to move from an empty slot"); - set_my_item(to, get_my_item(from)); // could have std::move semantics - destroy_item(from); - - } - - // put an item in an empty slot. Return true if successful, else false - bool place_item(size_t here, const item_type &me) { -#if !TBB_DEPRECATED_SEQUENCER_DUPLICATES - if(my_item_valid(here)) return false; -#endif - set_my_item(here, me); - return true; - } - - // could be implemented with std::move semantics - void swap_items(size_t i, size_t j) { - __TBB_ASSERT(my_item_valid(i) && my_item_valid(j), "attempt to swap invalid item(s)"); - item_type temp = get_my_item(i); - set_my_item(i, get_my_item(j)); - set_my_item(j, temp); - } - - void destroy_item(size_type i) { - __TBB_ASSERT(my_item_valid(i), "destruction of invalid item"); - (tbb::internal::punned_cast(&(item(i).first)))->~item_type(); - item(i).second = no_item; - } - - // returns a copy of the front - void copy_front(item_type &v) { - __TBB_ASSERT(my_item_valid(my_head), "attempt to fetch head non-item"); - v = get_my_item(my_head); - } - // returns a copy of the back - void copy_back(item_type &v) { - __TBB_ASSERT(my_item_valid(my_tail-1), "attempt to fetch head non-item"); - v = get_my_item(my_tail-1); - } - - // following methods are for reservation of the front of a bufffer. - void reserve_item(size_type i) { __TBB_ASSERT(my_item_valid(i) && !my_item_reserved(i), "item cannot be reserved"); item(i).second = reserved_item; } - void release_item(size_type i) { __TBB_ASSERT(my_item_reserved(i), "item is not reserved"); item(i).second = has_item; } - - void destroy_front() { destroy_item(my_head); ++my_head; } - void destroy_back() { destroy_item(my_tail-1); --my_tail; } - - // we have to be able to test against a new tail value without changing my_tail - // grow_array doesn't work if we change my_tail when the old array is too small - size_type size(size_t new_tail = 0) { return (new_tail ? new_tail : my_tail) - my_head; } - size_type capacity() { return my_array_size; } - // sequencer_node does not use this method, so we don't - // need a version that passes in the new_tail value. - bool buffer_full() { return size() >= capacity(); } - - //! Grows the internal array. - void grow_my_array( size_t minimum_size ) { - // test that we haven't made the structure inconsistent. - __TBB_ASSERT(capacity() >= my_tail - my_head, "total items exceed capacity"); - size_type new_size = my_array_size ? 2*my_array_size : initial_buffer_size; - while( new_size > - class reservable_item_buffer : public item_buffer { - protected: - using item_buffer::my_item_valid; - using item_buffer::my_head; - - public: - reservable_item_buffer() : item_buffer(), my_reserved(false) {} - void reset() {my_reserved = false; item_buffer::reset(); } - protected: - - bool reserve_front(T &v) { - if(my_reserved || !my_item_valid(my_head)) return false; - my_reserved = true; - // reserving the head - this->copy_front(v); - this->reserve_item(this->my_head); - return true; - } - - void consume_front() { - __TBB_ASSERT(my_reserved, "Attempt to consume a non-reserved item"); - this->destroy_front(); - my_reserved = false; - } - - void release_front() { - __TBB_ASSERT(my_reserved, "Attempt to release a non-reserved item"); - this->release_item(this->my_head); - my_reserved = false; - } - - bool my_reserved; - }; - -} // namespace internal - -#endif // __TBB__flow_graph_item_buffer_impl_H diff --git a/inst/include/tbb/internal/_flow_graph_join_impl.h b/inst/include/tbb/internal/_flow_graph_join_impl.h deleted file mode 100644 index 4ef893ef9..000000000 --- a/inst/include/tbb/internal/_flow_graph_join_impl.h +++ /dev/null @@ -1,1739 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB__flow_graph_join_impl_H -#define __TBB__flow_graph_join_impl_H - -#ifndef __TBB_flow_graph_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#include "_flow_graph_types_impl.h" - -namespace internal { - - typedef size_t tag_value; - static const tag_value NO_TAG = tag_value(-1); - - struct forwarding_base { - forwarding_base(graph &g) : my_graph_ptr(&g), current_tag(NO_TAG) {} - virtual ~forwarding_base() {} - // decrement_port_count may create a forwarding task. If we cannot handle the task - // ourselves, ask decrement_port_count to deal with it. - virtual task * decrement_port_count(bool handle_task) = 0; - virtual void increment_port_count() = 0; - virtual task * increment_tag_count(tag_value /*t*/, bool /*handle_task*/) {return NULL;} - // moved here so input ports can queue tasks - graph* my_graph_ptr; - tag_value current_tag; // so ports can refer to FE's desired items - }; - - template< int N > - struct join_helper { - - template< typename TupleType, typename PortType > - static inline void set_join_node_pointer(TupleType &my_input, PortType *port) { - tbb::flow::get( my_input ).set_join_node_pointer(port); - join_helper::set_join_node_pointer( my_input, port ); - } - template< typename TupleType > - static inline void consume_reservations( TupleType &my_input ) { - tbb::flow::get( my_input ).consume(); - join_helper::consume_reservations( my_input ); - } - - template< typename TupleType > - static inline void release_my_reservation( TupleType &my_input ) { - tbb::flow::get( my_input ).release(); - } - - template - static inline void release_reservations( TupleType &my_input) { - join_helper::release_reservations(my_input); - release_my_reservation(my_input); - } - - template< typename InputTuple, typename OutputTuple > - static inline bool reserve( InputTuple &my_input, OutputTuple &out) { - if ( !tbb::flow::get( my_input ).reserve( tbb::flow::get( out ) ) ) return false; - if ( !join_helper::reserve( my_input, out ) ) { - release_my_reservation( my_input ); - return false; - } - return true; - } - - template - static inline bool get_my_item( InputTuple &my_input, OutputTuple &out) { - bool res = tbb::flow::get(my_input).get_item(tbb::flow::get(out) ); // may fail - return join_helper::get_my_item(my_input, out) && res; // do get on other inputs before returning - } - - template - static inline bool get_items(InputTuple &my_input, OutputTuple &out) { - return get_my_item(my_input, out); - } - - template - static inline void reset_my_port(InputTuple &my_input) { - join_helper::reset_my_port(my_input); - tbb::flow::get(my_input).reset_port(); - } - - template - static inline void reset_ports(InputTuple& my_input) { - reset_my_port(my_input); - } - - template - static inline void set_tag_func(InputTuple &my_input, TagFuncTuple &my_tag_funcs) { - tbb::flow::get(my_input).set_my_original_tag_func(tbb::flow::get(my_tag_funcs)); - tbb::flow::get(my_input).set_my_tag_func(tbb::flow::get(my_input).my_original_func()->clone()); - tbb::flow::get(my_tag_funcs) = NULL; - join_helper::set_tag_func(my_input, my_tag_funcs); - } - - template< typename TagFuncTuple1, typename TagFuncTuple2> - static inline void copy_tag_functors(TagFuncTuple1 &my_inputs, TagFuncTuple2 &other_inputs) { - if(tbb::flow::get(other_inputs).my_original_func()) { - tbb::flow::get(my_inputs).set_my_tag_func(tbb::flow::get(other_inputs).my_original_func()->clone()); - tbb::flow::get(my_inputs).set_my_original_tag_func(tbb::flow::get(other_inputs).my_original_func()->clone()); - } - join_helper::copy_tag_functors(my_inputs, other_inputs); - } - - template - static inline void reset_inputs(InputTuple &my_input __TBB_PFG_RESET_ARG(__TBB_COMMA reset_flags f)) { - join_helper::reset_inputs(my_input __TBB_PFG_RESET_ARG(__TBB_COMMA f)); - tbb::flow::get(my_input).reset_receiver(__TBB_PFG_RESET_ARG(f)); - } - }; - - template< > - struct join_helper<1> { - - template< typename TupleType, typename PortType > - static inline void set_join_node_pointer(TupleType &my_input, PortType *port) { - tbb::flow::get<0>( my_input ).set_join_node_pointer(port); - } - - template< typename TupleType > - static inline void consume_reservations( TupleType &my_input ) { - tbb::flow::get<0>( my_input ).consume(); - } - - template< typename TupleType > - static inline void release_my_reservation( TupleType &my_input ) { - tbb::flow::get<0>( my_input ).release(); - } - - template - static inline void release_reservations( TupleType &my_input) { - release_my_reservation(my_input); - } - - template< typename InputTuple, typename OutputTuple > - static inline bool reserve( InputTuple &my_input, OutputTuple &out) { - return tbb::flow::get<0>( my_input ).reserve( tbb::flow::get<0>( out ) ); - } - - template - static inline bool get_my_item( InputTuple &my_input, OutputTuple &out) { - return tbb::flow::get<0>(my_input).get_item(tbb::flow::get<0>(out)); - } - - template - static inline bool get_items(InputTuple &my_input, OutputTuple &out) { - return get_my_item(my_input, out); - } - - template - static inline void reset_my_port(InputTuple &my_input) { - tbb::flow::get<0>(my_input).reset_port(); - } - - template - static inline void reset_ports(InputTuple& my_input) { - reset_my_port(my_input); - } - - template - static inline void set_tag_func(InputTuple &my_input, TagFuncTuple &my_tag_funcs) { - tbb::flow::get<0>(my_input).set_my_original_tag_func(tbb::flow::get<0>(my_tag_funcs)); - tbb::flow::get<0>(my_input).set_my_tag_func(tbb::flow::get<0>(my_input).my_original_func()->clone()); - tbb::flow::get<0>(my_tag_funcs) = NULL; - } - - template< typename TagFuncTuple1, typename TagFuncTuple2> - static inline void copy_tag_functors(TagFuncTuple1 &my_inputs, TagFuncTuple2 &other_inputs) { - if(tbb::flow::get<0>(other_inputs).my_original_func()) { - tbb::flow::get<0>(my_inputs).set_my_tag_func(tbb::flow::get<0>(other_inputs).my_original_func()->clone()); - tbb::flow::get<0>(my_inputs).set_my_original_tag_func(tbb::flow::get<0>(other_inputs).my_original_func()->clone()); - } - } - template - static inline void reset_inputs(InputTuple &my_input __TBB_PFG_RESET_ARG(__TBB_COMMA reset_flags f)) { - tbb::flow::get<0>(my_input).reset_receiver(__TBB_PFG_RESET_ARG(f)); - } - }; - - //! The two-phase join port - template< typename T > - class reserving_port : public receiver { - public: - typedef T input_type; - typedef sender predecessor_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector predecessor_vector_type; -#endif - private: - // ----------- Aggregator ------------ - enum op_type { reg_pred, rem_pred, res_item, rel_res, con_res -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - , add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy -#endif - }; - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - typedef reserving_port my_class; - - class reserving_port_operation : public aggregated_operation { - public: - char type; - union { - T *my_arg; - predecessor_type *my_pred; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - size_t cnt_val; - predecessor_vector_type *pvec; -#endif - }; - reserving_port_operation(const T& e, op_type t) : - type(char(t)), my_arg(const_cast(&e)) {} - reserving_port_operation(const predecessor_type &s, op_type t) : type(char(t)), - my_pred(const_cast(&s)) {} - reserving_port_operation(op_type t) : type(char(t)) {} - }; - - typedef internal::aggregating_functor my_handler; - friend class internal::aggregating_functor; - aggregator my_aggregator; - - void handle_operations(reserving_port_operation* op_list) { - reserving_port_operation *current; - bool no_predecessors; - while(op_list) { - current = op_list; - op_list = op_list->next; - switch(current->type) { - case reg_pred: - no_predecessors = my_predecessors.empty(); - my_predecessors.add(*(current->my_pred)); - if ( no_predecessors ) { - (void) my_join->decrement_port_count(true); // may try to forward - } - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case rem_pred: - my_predecessors.remove(*(current->my_pred)); - if(my_predecessors.empty()) my_join->increment_port_count(); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case res_item: - if ( reserved ) { - __TBB_store_with_release(current->status, FAILED); - } - else if ( my_predecessors.try_reserve( *(current->my_arg) ) ) { - reserved = true; - __TBB_store_with_release(current->status, SUCCEEDED); - } else { - if ( my_predecessors.empty() ) { - my_join->increment_port_count(); - } - __TBB_store_with_release(current->status, FAILED); - } - break; - case rel_res: - reserved = false; - my_predecessors.try_release( ); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case con_res: - reserved = false; - my_predecessors.try_consume( ); - __TBB_store_with_release(current->status, SUCCEEDED); - break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - case add_blt_pred: - my_predecessors.internal_add_built_predecessor(*(current->my_pred)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case del_blt_pred: - my_predecessors.internal_delete_built_predecessor(*(current->my_pred)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case blt_pred_cnt: - current->cnt_val = my_predecessors.predecessor_count(); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case blt_pred_cpy: - my_predecessors.copy_predecessors(*(current->pvec)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - } - } - } - - protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - task *try_put_task( const T & ) { - return NULL; - } - - public: - - //! Constructor - reserving_port() : reserved(false) { - my_join = NULL; - my_predecessors.set_owner( this ); - my_aggregator.initialize_handler(my_handler(this)); - } - - // copy constructor - reserving_port(const reserving_port& /* other */) : receiver() { - reserved = false; - my_join = NULL; - my_predecessors.set_owner( this ); - my_aggregator.initialize_handler(my_handler(this)); - } - - void set_join_node_pointer(forwarding_base *join) { - my_join = join; - } - - //! Add a predecessor - bool register_predecessor( sender &src ) { - reserving_port_operation op_data(src, reg_pred); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - - //! Remove a predecessor - bool remove_predecessor( sender &src ) { - reserving_port_operation op_data(src, rem_pred); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - - //! Reserve an item from the port - bool reserve( T &v ) { - reserving_port_operation op_data(v, res_item); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - - //! Release the port - void release( ) { - reserving_port_operation op_data(rel_res); - my_aggregator.execute(&op_data); - } - - //! Complete use of the port - void consume( ) { - reserving_port_operation op_data(con_res); - my_aggregator.execute(&op_data); - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void internal_add_built_predecessor(predecessor_type &src) { - reserving_port_operation op_data(src, add_blt_pred); - my_aggregator.execute(&op_data); - } - - /*override*/void internal_delete_built_predecessor(predecessor_type &src) { - reserving_port_operation op_data(src, del_blt_pred); - my_aggregator.execute(&op_data); - } - - /*override*/size_t predecessor_count() { - reserving_port_operation op_data(blt_pred_cnt); - my_aggregator.execute(&op_data); - return op_data.cnt_val; - } - - /*override*/void copy_predecessors(predecessor_vector_type &v) { - reserving_port_operation op_data(blt_pred_cpy); - op_data.pvec = &v; - my_aggregator.execute(&op_data); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - /*override*/void reset_receiver( __TBB_PFG_RESET_ARG(reset_flags f)) { - my_predecessors.reset(__TBB_PFG_RESET_ARG(f)); - reserved = false; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - __TBB_ASSERT(!(f&rf_extract) || my_predecessors.empty(), "port edges not removed"); -#endif - } - - private: - forwarding_base *my_join; - reservable_predecessor_cache< T, null_mutex > my_predecessors; - bool reserved; - }; - - //! queueing join_port - template - class queueing_port : public receiver, public item_buffer { - public: - typedef T input_type; - typedef sender predecessor_type; - typedef queueing_port my_node_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector predecessor_vector_type; -#endif - - // ----------- Aggregator ------------ - private: - enum op_type { get__item, res_port, try__put_task -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - , add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy -#endif - }; - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - typedef queueing_port my_class; - - class queueing_port_operation : public aggregated_operation { - public: - char type; - T my_val; - T *my_arg; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - sender *pred; - size_t cnt_val; - predecessor_vector_type *pvec; -#endif - task * bypass_t; - // constructor for value parameter - queueing_port_operation(const T& e, op_type t) : - type(char(t)), my_val(e) - , bypass_t(NULL) - {} - // constructor for pointer parameter - queueing_port_operation(const T* p, op_type t) : - type(char(t)), my_arg(const_cast(p)) - , bypass_t(NULL) - {} - // constructor with no parameter - queueing_port_operation(op_type t) : type(char(t)) - , bypass_t(NULL) - {} - }; - - typedef internal::aggregating_functor my_handler; - friend class internal::aggregating_functor; - aggregator my_aggregator; - - void handle_operations(queueing_port_operation* op_list) { - queueing_port_operation *current; - bool was_empty; - while(op_list) { - current = op_list; - op_list = op_list->next; - switch(current->type) { - case try__put_task: { - task *rtask = NULL; - was_empty = this->buffer_empty(); - this->push_back(current->my_val); - if (was_empty) rtask = my_join->decrement_port_count(false); - else - rtask = SUCCESSFULLY_ENQUEUED; - current->bypass_t = rtask; - __TBB_store_with_release(current->status, SUCCEEDED); - } - break; - case get__item: - if(!this->buffer_empty()) { - this->copy_front(*(current->my_arg)); - __TBB_store_with_release(current->status, SUCCEEDED); - } - else { - __TBB_store_with_release(current->status, FAILED); - } - break; - case res_port: - __TBB_ASSERT(this->my_item_valid(this->my_head), "No item to reset"); - this->destroy_front(); - if(this->my_item_valid(this->my_head)) { - (void)my_join->decrement_port_count(true); - } - __TBB_store_with_release(current->status, SUCCEEDED); - break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - case add_blt_pred: - my_built_predecessors.add_edge(*(current->pred)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case del_blt_pred: - my_built_predecessors.delete_edge(*(current->pred)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case blt_pred_cnt: - current->cnt_val = my_built_predecessors.edge_count(); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case blt_pred_cpy: - my_built_predecessors.copy_edges(*(current->pvec)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - } - } - } - // ------------ End Aggregator --------------- - - protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - /*override*/task *try_put_task(const T &v) { - queueing_port_operation op_data(v, try__put_task); - my_aggregator.execute(&op_data); - __TBB_ASSERT(op_data.status == SUCCEEDED || !op_data.bypass_t, "inconsistent return from aggregator"); - if(!op_data.bypass_t) return SUCCESSFULLY_ENQUEUED; - return op_data.bypass_t; - } - - public: - - //! Constructor - queueing_port() : item_buffer() { - my_join = NULL; - my_aggregator.initialize_handler(my_handler(this)); - } - - //! copy constructor - queueing_port(const queueing_port& /* other */) : receiver(), item_buffer() { - my_join = NULL; - my_aggregator.initialize_handler(my_handler(this)); - } - - //! record parent for tallying available items - void set_join_node_pointer(forwarding_base *join) { - my_join = join; - } - - bool get_item( T &v ) { - queueing_port_operation op_data(&v, get__item); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - - // reset_port is called when item is accepted by successor, but - // is initiated by join_node. - void reset_port() { - queueing_port_operation op_data(res_port); - my_aggregator.execute(&op_data); - return; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void internal_add_built_predecessor(sender &p) { - queueing_port_operation op_data(add_blt_pred); - op_data.pred = &p; - my_aggregator.execute(&op_data); - } - - /*override*/void internal_delete_built_predecessor(sender &p) { - queueing_port_operation op_data(del_blt_pred); - op_data.pred = &p; - my_aggregator.execute(&op_data); - } - - /*override*/size_t predecessor_count() { - queueing_port_operation op_data(blt_pred_cnt); - my_aggregator.execute(&op_data); - return op_data.cnt_val; - } - - /*override*/void copy_predecessors(predecessor_vector_type &v) { - queueing_port_operation op_data(blt_pred_cpy); - op_data.pvec = &v; - my_aggregator.execute(&op_data); - } - - /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags f)) { - item_buffer::reset(); - if (f & rf_extract) - my_built_predecessors.receiver_extract(*this); - } -#else - /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) { item_buffer::reset(); } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - private: - forwarding_base *my_join; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - edge_container > my_built_predecessors; -#endif - }; - -#include "_flow_graph_tagged_buffer_impl.h" - - template< typename T > - class tag_matching_port : public receiver, public tagged_buffer< tag_value, T, NO_TAG > { - public: - typedef T input_type; - typedef sender predecessor_type; - typedef tag_matching_port my_node_type; // for forwarding, if needed - typedef function_body my_tag_func_type; - typedef tagged_buffer my_buffer_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector predecessor_vector_type; -#endif - private: -// ----------- Aggregator ------------ - private: - enum op_type { try__put, get__item, res_port, - add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy - }; - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - typedef tag_matching_port my_class; - - class tag_matching_port_operation : public aggregated_operation { - public: - char type; - T my_val; - T *my_arg; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - predecessor_type *pred; - size_t cnt_val; - predecessor_vector_type *pvec; -#endif - tag_value my_tag_value; - // constructor for value parameter - tag_matching_port_operation(const T& e, op_type t) : - type(char(t)), my_val(e) {} - // constructor for pointer parameter - tag_matching_port_operation(const T* p, op_type t) : - type(char(t)), my_arg(const_cast(p)) {} - // constructor with no parameter - tag_matching_port_operation(op_type t) : type(char(t)) {} - }; - - typedef internal::aggregating_functor my_handler; - friend class internal::aggregating_functor; - aggregator my_aggregator; - - void handle_operations(tag_matching_port_operation* op_list) { - tag_matching_port_operation *current; - while(op_list) { - current = op_list; - op_list = op_list->next; - switch(current->type) { - case try__put: { - bool was_inserted = this->tagged_insert(current->my_tag_value, current->my_val); - // return failure if a duplicate insertion occurs - __TBB_store_with_release(current->status, was_inserted ? SUCCEEDED : FAILED); - } - break; - case get__item: - // use current_tag from FE for item - if(!this->tagged_find(my_join->current_tag, *(current->my_arg))) { - __TBB_ASSERT(false, "Failed to find item corresponding to current_tag."); - } - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case res_port: - // use current_tag from FE for item - this->tagged_delete(my_join->current_tag); - __TBB_store_with_release(current->status, SUCCEEDED); - break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - case add_blt_pred: - my_built_predecessors.add_edge(*(current->pred)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case del_blt_pred: - my_built_predecessors.delete_edge(*(current->pred)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case blt_pred_cnt: - current->cnt_val = my_built_predecessors.edge_count(); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case blt_pred_cpy: - my_built_predecessors.copy_edges(*(current->pvec)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; -#endif - } - } - } -// ------------ End Aggregator --------------- - protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - /*override*/task *try_put_task(const T& v) { - tag_matching_port_operation op_data(v, try__put); - op_data.my_tag_value = (*my_tag_func)(v); - task *rtask = NULL; - my_aggregator.execute(&op_data); - if(op_data.status == SUCCEEDED) { - rtask = my_join->increment_tag_count(op_data.my_tag_value, false); // may spawn - // rtask has to reflect the return status of the try_put - if(!rtask) rtask = SUCCESSFULLY_ENQUEUED; - } - return rtask; - } - - public: - - tag_matching_port() : receiver(), tagged_buffer() { - my_join = NULL; - my_tag_func = NULL; - my_original_tag_func = NULL; - my_aggregator.initialize_handler(my_handler(this)); - } - - // copy constructor - tag_matching_port(const tag_matching_port& /*other*/) : receiver(), tagged_buffer() { - my_join = NULL; - // setting the tag methods is done in the copy-constructor for the front-end. - my_tag_func = NULL; - my_original_tag_func = NULL; - my_aggregator.initialize_handler(my_handler(this)); - } - - ~tag_matching_port() { - if (my_tag_func) delete my_tag_func; - if (my_original_tag_func) delete my_original_tag_func; - } - - void set_join_node_pointer(forwarding_base *join) { - my_join = join; - } - - void set_my_original_tag_func(my_tag_func_type *f) { - my_original_tag_func = f; - } - - void set_my_tag_func(my_tag_func_type *f) { - my_tag_func = f; - } - - bool get_item( T &v ) { - tag_matching_port_operation op_data(&v, get__item); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void internal_add_built_predecessor(sender &p) { - tag_matching_port_operation op_data(add_blt_pred); - op_data.pred = &p; - my_aggregator.execute(&op_data); - } - - /*override*/void internal_delete_built_predecessor(sender &p) { - tag_matching_port_operation op_data(del_blt_pred); - op_data.pred = &p; - my_aggregator.execute(&op_data); - } - - /*override*/size_t predecessor_count() { - tag_matching_port_operation op_data(blt_pred_cnt); - my_aggregator.execute(&op_data); - return op_data.cnt_val; - } - - /*override*/void copy_predecessors(predecessor_vector_type &v) { - tag_matching_port_operation op_data(blt_pred_cpy); - op_data.pvec = &v; - my_aggregator.execute(&op_data); - } -#endif - - // reset_port is called when item is accepted by successor, but - // is initiated by join_node. - void reset_port() { - tag_matching_port_operation op_data(res_port); - my_aggregator.execute(&op_data); - return; - } - - my_tag_func_type *my_func() { return my_tag_func; } - my_tag_func_type *my_original_func() { return my_original_tag_func; } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags f)) { - my_buffer_type::reset(); - if (f & rf_extract) - my_built_predecessors.receiver_extract(*this); - } -#else - /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) { my_buffer_type::reset(); } -#endif - - private: - // need map of tags to values - forwarding_base *my_join; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - edge_container my_built_predecessors; -#endif - my_tag_func_type *my_tag_func; - my_tag_func_type *my_original_tag_func; - }; // tag_matching_port - - using namespace graph_policy_namespace; - - template - class join_node_base; - - //! join_node_FE : implements input port policy - template - class join_node_FE; - - template - class join_node_FE : public forwarding_base { - public: - static const int N = tbb::flow::tuple_size::value; - typedef OutputTuple output_type; - typedef InputTuple input_type; - typedef join_node_base my_node_type; // for forwarding - - join_node_FE(graph &g) : forwarding_base(g), my_node(NULL) { - ports_with_no_inputs = N; - join_helper::set_join_node_pointer(my_inputs, this); - } - - join_node_FE(const join_node_FE& other) : forwarding_base(*(other.forwarding_base::my_graph_ptr)), my_node(NULL) { - ports_with_no_inputs = N; - join_helper::set_join_node_pointer(my_inputs, this); - } - - void set_my_node(my_node_type *new_my_node) { my_node = new_my_node; } - - void increment_port_count() { - ++ports_with_no_inputs; - } - - // if all input_ports have predecessors, spawn forward to try and consume tuples - task * decrement_port_count(bool handle_task) { - if(ports_with_no_inputs.fetch_and_decrement() == 1) { - task* tp = this->my_graph_ptr->root_task(); - if(tp) { - task *rtask = new ( task::allocate_additional_child_of( *tp ) ) - forward_task_bypass(*my_node); - if(!handle_task) return rtask; - FLOW_SPAWN(*rtask); - } - } - return NULL; - } - - input_type &input_ports() { return my_inputs; } - - protected: - - void reset( __TBB_PFG_RESET_ARG( reset_flags f)) { - // called outside of parallel contexts - ports_with_no_inputs = N; - join_helper::reset_inputs(my_inputs __TBB_PFG_RESET_ARG( __TBB_COMMA f)); - } - - // all methods on input ports should be called under mutual exclusion from join_node_base. - - bool tuple_build_may_succeed() { - return !ports_with_no_inputs; - } - - bool try_to_make_tuple(output_type &out) { - if(ports_with_no_inputs) return false; - return join_helper::reserve(my_inputs, out); - } - - void tuple_accepted() { - join_helper::consume_reservations(my_inputs); - } - void tuple_rejected() { - join_helper::release_reservations(my_inputs); - } - - input_type my_inputs; - my_node_type *my_node; - atomic ports_with_no_inputs; - }; - - template - class join_node_FE : public forwarding_base { - public: - static const int N = tbb::flow::tuple_size::value; - typedef OutputTuple output_type; - typedef InputTuple input_type; - typedef join_node_base my_node_type; // for forwarding - - join_node_FE(graph &g) : forwarding_base(g), my_node(NULL) { - ports_with_no_items = N; - join_helper::set_join_node_pointer(my_inputs, this); - } - - join_node_FE(const join_node_FE& other) : forwarding_base(*(other.forwarding_base::my_graph_ptr)), my_node(NULL) { - ports_with_no_items = N; - join_helper::set_join_node_pointer(my_inputs, this); - } - - // needed for forwarding - void set_my_node(my_node_type *new_my_node) { my_node = new_my_node; } - - void reset_port_count() { - ports_with_no_items = N; - } - - // if all input_ports have items, spawn forward to try and consume tuples - task * decrement_port_count(bool handle_task) - { - if(ports_with_no_items.fetch_and_decrement() == 1) { - task* tp = this->my_graph_ptr->root_task(); - if(tp) { - task *rtask = new ( task::allocate_additional_child_of( *tp ) ) - forward_task_bypass (*my_node); - if(!handle_task) return rtask; - FLOW_SPAWN( *rtask); - } - } - return NULL; - } - - void increment_port_count() { __TBB_ASSERT(false, NULL); } // should never be called - - input_type &input_ports() { return my_inputs; } - - protected: - - void reset( __TBB_PFG_RESET_ARG( reset_flags f)) { - reset_port_count(); - join_helper::reset_inputs(my_inputs __TBB_PFG_RESET_ARG( __TBB_COMMA f) ); - } - - // all methods on input ports should be called under mutual exclusion from join_node_base. - - bool tuple_build_may_succeed() { - return !ports_with_no_items; - } - - bool try_to_make_tuple(output_type &out) { - if(ports_with_no_items) return false; - return join_helper::get_items(my_inputs, out); - } - - void tuple_accepted() { - reset_port_count(); - join_helper::reset_ports(my_inputs); - } - void tuple_rejected() { - // nothing to do. - } - - input_type my_inputs; - my_node_type *my_node; - atomic ports_with_no_items; - }; - - // tag_matching join input port. - template - class join_node_FE : public forwarding_base, - // buffer of tag value counts buffer of output items - public tagged_buffer, public item_buffer { - public: - static const int N = tbb::flow::tuple_size::value; - typedef OutputTuple output_type; - typedef InputTuple input_type; - typedef tagged_buffer my_tag_buffer; - typedef item_buffer output_buffer_type; - typedef join_node_base my_node_type; // for forwarding - -// ----------- Aggregator ------------ - // the aggregator is only needed to serialize the access to the hash table. - // and the output_buffer_type base class - private: - enum op_type { res_count, inc_count, may_succeed, try_make }; - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - typedef join_node_FE my_class; - - class tag_matching_FE_operation : public aggregated_operation { - public: - char type; - union { - tag_value my_val; - output_type* my_output; - }; - task *bypass_t; - bool enqueue_task; - // constructor for value parameter - tag_matching_FE_operation(const tag_value& e , bool q_task , op_type t) : type(char(t)), my_val(e), - bypass_t(NULL), enqueue_task(q_task) {} - tag_matching_FE_operation(output_type *p, op_type t) : type(char(t)), my_output(p), bypass_t(NULL), - enqueue_task(true) {} - // constructor with no parameter - tag_matching_FE_operation(op_type t) : type(char(t)), bypass_t(NULL), enqueue_task(true) {} - }; - - typedef internal::aggregating_functor my_handler; - friend class internal::aggregating_functor; - aggregator my_aggregator; - - // called from aggregator, so serialized - // construct as many output objects as possible. - // returns a task pointer if the a task would have been enqueued but we asked that - // it be returned. Otherwise returns NULL. - task * fill_output_buffer(tag_value t, bool should_enqueue, bool handle_task) { - output_type l_out; - task *rtask = NULL; - task* tp = this->my_graph_ptr->root_task(); - bool do_fwd = should_enqueue && this->buffer_empty() && tp; - this->current_tag = t; - this->tagged_delete(this->current_tag); // remove the tag - if(join_helper::get_items(my_inputs, l_out)) { // <== call back - this->push_back(l_out); - if(do_fwd) { // we enqueue if receiving an item from predecessor, not if successor asks for item - rtask = new ( task::allocate_additional_child_of( *tp ) ) - forward_task_bypass(*my_node); - if(handle_task) { - FLOW_SPAWN(*rtask); - rtask = NULL; - } - do_fwd = false; - } - // retire the input values - join_helper::reset_ports(my_inputs); // <== call back - this->current_tag = NO_TAG; - } - else { - __TBB_ASSERT(false, "should have had something to push"); - } - return rtask; - } - - void handle_operations(tag_matching_FE_operation* op_list) { - tag_matching_FE_operation *current; - while(op_list) { - current = op_list; - op_list = op_list->next; - switch(current->type) { - case res_count: // called from BE - { - this->destroy_front(); - __TBB_store_with_release(current->status, SUCCEEDED); - } - break; - case inc_count: { // called from input ports - size_t *p = 0; - tag_value t = current->my_val; - bool do_enqueue = current->enqueue_task; - if(!(this->tagged_find_ref(t,p))) { - this->tagged_insert(t, 0); - if(!(this->tagged_find_ref(t,p))) { - __TBB_ASSERT(false, "should find tag after inserting it"); - } - } - if(++(*p) == size_t(N)) { - task *rtask = fill_output_buffer(t, true, do_enqueue); - __TBB_ASSERT(!rtask || !do_enqueue, "task should not be returned"); - current->bypass_t = rtask; - } - } - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case may_succeed: // called from BE - __TBB_store_with_release(current->status, this->buffer_empty() ? FAILED : SUCCEEDED); - break; - case try_make: // called from BE - if(this->buffer_empty()) { - __TBB_store_with_release(current->status, FAILED); - } - else { - this->copy_front(*(current->my_output)); - __TBB_store_with_release(current->status, SUCCEEDED); - } - break; - } - } - } -// ------------ End Aggregator --------------- - - public: - template - join_node_FE(graph &g, FunctionTuple tag_funcs) : forwarding_base(g), my_node(NULL) { - join_helper::set_join_node_pointer(my_inputs, this); - join_helper::set_tag_func(my_inputs, tag_funcs); - my_aggregator.initialize_handler(my_handler(this)); - } - - join_node_FE(const join_node_FE& other) : forwarding_base(*(other.forwarding_base::my_graph_ptr)), my_tag_buffer(), - output_buffer_type() { - my_node = NULL; - join_helper::set_join_node_pointer(my_inputs, this); - join_helper::copy_tag_functors(my_inputs, const_cast(other.my_inputs)); - my_aggregator.initialize_handler(my_handler(this)); - } - - // needed for forwarding - void set_my_node(my_node_type *new_my_node) { my_node = new_my_node; } - - void reset_port_count() { // called from BE - tag_matching_FE_operation op_data(res_count); - my_aggregator.execute(&op_data); - return; - } - - // if all input_ports have items, spawn forward to try and consume tuples - // return a task if we are asked and did create one. - task *increment_tag_count(tag_value t, bool handle_task) { // called from input_ports - tag_matching_FE_operation op_data(t, handle_task, inc_count); - my_aggregator.execute(&op_data); - return op_data.bypass_t; - } - - /*override*/ task *decrement_port_count(bool /*handle_task*/) { __TBB_ASSERT(false, NULL); return NULL; } - - void increment_port_count() { __TBB_ASSERT(false, NULL); } // should never be called - - input_type &input_ports() { return my_inputs; } - - protected: - - void reset( __TBB_PFG_RESET_ARG( reset_flags f )) { - // called outside of parallel contexts - join_helper::reset_inputs(my_inputs __TBB_PFG_RESET_ARG( __TBB_COMMA f)); - - my_tag_buffer::reset(); // have to reset the tag counts - output_buffer_type::reset(); // also the queue of outputs - my_node->current_tag = NO_TAG; - } - - // all methods on input ports should be called under mutual exclusion from join_node_base. - - bool tuple_build_may_succeed() { // called from back-end - tag_matching_FE_operation op_data(may_succeed); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - - // cannot lock while calling back to input_ports. current_tag will only be set - // and reset under the aggregator, so it will remain consistent. - bool try_to_make_tuple(output_type &out) { - tag_matching_FE_operation op_data(&out,try_make); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - - void tuple_accepted() { - reset_port_count(); // reset current_tag after ports reset. - } - - void tuple_rejected() { - // nothing to do. - } - - input_type my_inputs; // input ports - my_node_type *my_node; - }; // join_node_FE - - //! join_node_base - template - class join_node_base : public graph_node, public join_node_FE, - public sender { - protected: - using graph_node::my_graph; - public: - typedef OutputTuple output_type; - - typedef receiver successor_type; - typedef join_node_FE input_ports_type; - using input_ports_type::tuple_build_may_succeed; - using input_ports_type::try_to_make_tuple; - using input_ports_type::tuple_accepted; - using input_ports_type::tuple_rejected; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector successor_vector_type; -#endif - - private: - // ----------- Aggregator ------------ - enum op_type { reg_succ, rem_succ, try__get, do_fwrd, do_fwrd_bypass -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - , add_blt_succ, del_blt_succ, blt_succ_cnt, blt_succ_cpy -#endif - }; - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - typedef join_node_base my_class; - - class join_node_base_operation : public aggregated_operation { - public: - char type; - union { - output_type *my_arg; - successor_type *my_succ; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - size_t cnt_val; - successor_vector_type *svec; -#endif - }; - task *bypass_t; - join_node_base_operation(const output_type& e, op_type t) : type(char(t)), - my_arg(const_cast(&e)), bypass_t(NULL) {} - join_node_base_operation(const successor_type &s, op_type t) : type(char(t)), - my_succ(const_cast(&s)), bypass_t(NULL) {} - join_node_base_operation(op_type t) : type(char(t)), bypass_t(NULL) {} - }; - - typedef internal::aggregating_functor my_handler; - friend class internal::aggregating_functor; - bool forwarder_busy; - aggregator my_aggregator; - - void handle_operations(join_node_base_operation* op_list) { - join_node_base_operation *current; - while(op_list) { - current = op_list; - op_list = op_list->next; - switch(current->type) { - case reg_succ: { - my_successors.register_successor(*(current->my_succ)); - task* tp = this->graph_node::my_graph.root_task(); - if(tuple_build_may_succeed() && !forwarder_busy && tp) { - task *rtask = new ( task::allocate_additional_child_of(*tp) ) - forward_task_bypass - >(*this); - FLOW_SPAWN(*rtask); - forwarder_busy = true; - } - __TBB_store_with_release(current->status, SUCCEEDED); - } - break; - case rem_succ: - my_successors.remove_successor(*(current->my_succ)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case try__get: - if(tuple_build_may_succeed()) { - if(try_to_make_tuple(*(current->my_arg))) { - tuple_accepted(); - __TBB_store_with_release(current->status, SUCCEEDED); - } - else __TBB_store_with_release(current->status, FAILED); - } - else __TBB_store_with_release(current->status, FAILED); - break; - case do_fwrd_bypass: { - bool build_succeeded; - task *last_task = NULL; - output_type out; - if(tuple_build_may_succeed()) { - do { - build_succeeded = try_to_make_tuple(out); - if(build_succeeded) { - task *new_task = my_successors.try_put_task(out); - last_task = combine_tasks(last_task, new_task); - if(new_task) { - tuple_accepted(); - } - else { - tuple_rejected(); - build_succeeded = false; - } - } - } while(build_succeeded); - } - current->bypass_t = last_task; - __TBB_store_with_release(current->status, SUCCEEDED); - forwarder_busy = false; - } - break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - case add_blt_succ: - my_successors.internal_add_built_successor(*(current->my_succ)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case del_blt_succ: - my_successors.internal_delete_built_successor(*(current->my_succ)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case blt_succ_cnt: - current->cnt_val = my_successors.successor_count(); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case blt_succ_cpy: - my_successors.copy_successors(*(current->svec)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - } - } - } - // ---------- end aggregator ----------- - public: - join_node_base(graph &g) : graph_node(g), input_ports_type(g), forwarder_busy(false) { - my_successors.set_owner(this); - input_ports_type::set_my_node(this); - my_aggregator.initialize_handler(my_handler(this)); - } - - join_node_base(const join_node_base& other) : - graph_node(other.graph_node::my_graph), input_ports_type(other), - sender(), forwarder_busy(false), my_successors() { - my_successors.set_owner(this); - input_ports_type::set_my_node(this); - my_aggregator.initialize_handler(my_handler(this)); - } - - template - join_node_base(graph &g, FunctionTuple f) : graph_node(g), input_ports_type(g, f), forwarder_busy(false) { - my_successors.set_owner(this); - input_ports_type::set_my_node(this); - my_aggregator.initialize_handler(my_handler(this)); - } - - bool register_successor(successor_type &r) { - join_node_base_operation op_data(r, reg_succ); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - - bool remove_successor( successor_type &r) { - join_node_base_operation op_data(r, rem_succ); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - - bool try_get( output_type &v) { - join_node_base_operation op_data(v, try__get); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void internal_add_built_successor( successor_type &r) { - join_node_base_operation op_data(r, add_blt_succ); - my_aggregator.execute(&op_data); - } - - /*override*/void internal_delete_built_successor( successor_type &r) { - join_node_base_operation op_data(r, del_blt_succ); - my_aggregator.execute(&op_data); - } - - /*override*/size_t successor_count() { - join_node_base_operation op_data(blt_succ_cnt); - my_aggregator.execute(&op_data); - return op_data.cnt_val; - } - - /*override*/ void copy_successors(successor_vector_type &v) { - join_node_base_operation op_data(blt_succ_cpy); - op_data.svec = &v; - my_aggregator.execute(&op_data); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - protected: - - /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) { - input_ports_type::reset(__TBB_PFG_RESET_ARG(f)); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_successors.reset(f); -#endif - } - - private: - broadcast_cache my_successors; - - friend class forward_task_bypass< join_node_base >; - task *forward_task() { - join_node_base_operation op_data(do_fwrd_bypass); - my_aggregator.execute(&op_data); - return op_data.bypass_t; - } - - }; - - // join base class type generator - template class PT, typename OutputTuple, graph_buffer_policy JP> - struct join_base { - typedef typename internal::join_node_base::type, OutputTuple> type; - }; - - //! unfolded_join_node : passes input_ports_type to join_node_base. We build the input port type - // using tuple_element. The class PT is the port type (reserving_port, queueing_port, tag_matching_port) - // and should match the graph_buffer_policy. - - template class PT, typename OutputTuple, graph_buffer_policy JP> - class unfolded_join_node : public join_base::type { - public: - typedef typename wrap_tuple_elements::type input_ports_type; - typedef OutputTuple output_type; - private: - typedef join_node_base base_type; - public: - unfolded_join_node(graph &g) : base_type(g) {} - unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} - }; - - // tag_matching unfolded_join_node. This must be a separate specialization because the constructors - // differ. - - template - class unfolded_join_node<2,tag_matching_port,OutputTuple,tag_matching> : public - join_base<2,tag_matching_port,OutputTuple,tag_matching>::type { - typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; - typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; - public: - typedef typename wrap_tuple_elements<2,tag_matching_port,OutputTuple>::type input_ports_type; - typedef OutputTuple output_type; - private: - typedef join_node_base base_type; - typedef typename internal::function_body *f0_p; - typedef typename internal::function_body *f1_p; - typedef typename tbb::flow::tuple< f0_p, f1_p > func_initializer_type; - public: - template - unfolded_join_node(graph &g, B0 b0, B1 b1) : base_type(g, - func_initializer_type( - new internal::function_body_leaf(b0), - new internal::function_body_leaf(b1) - ) ) {} - unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} - }; - - template - class unfolded_join_node<3,tag_matching_port,OutputTuple,tag_matching> : public - join_base<3,tag_matching_port,OutputTuple,tag_matching>::type { - typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; - typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; - typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; - public: - typedef typename wrap_tuple_elements<3, tag_matching_port, OutputTuple>::type input_ports_type; - typedef OutputTuple output_type; - private: - typedef join_node_base base_type; - typedef typename internal::function_body *f0_p; - typedef typename internal::function_body *f1_p; - typedef typename internal::function_body *f2_p; - typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p > func_initializer_type; - public: - template - unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2) : base_type(g, - func_initializer_type( - new internal::function_body_leaf(b0), - new internal::function_body_leaf(b1), - new internal::function_body_leaf(b2) - ) ) {} - unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} - }; - - template - class unfolded_join_node<4,tag_matching_port,OutputTuple,tag_matching> : public - join_base<4,tag_matching_port,OutputTuple,tag_matching>::type { - typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; - typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; - typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; - typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3; - public: - typedef typename wrap_tuple_elements<4, tag_matching_port, OutputTuple>::type input_ports_type; - typedef OutputTuple output_type; - private: - typedef join_node_base base_type; - typedef typename internal::function_body *f0_p; - typedef typename internal::function_body *f1_p; - typedef typename internal::function_body *f2_p; - typedef typename internal::function_body *f3_p; - typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p > func_initializer_type; - public: - template - unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3) : base_type(g, - func_initializer_type( - new internal::function_body_leaf(b0), - new internal::function_body_leaf(b1), - new internal::function_body_leaf(b2), - new internal::function_body_leaf(b3) - ) ) {} - unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} - }; - - template - class unfolded_join_node<5,tag_matching_port,OutputTuple,tag_matching> : public - join_base<5,tag_matching_port,OutputTuple,tag_matching>::type { - typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; - typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; - typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; - typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3; - typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4; - public: - typedef typename wrap_tuple_elements<5, tag_matching_port, OutputTuple>::type input_ports_type; - typedef OutputTuple output_type; - private: - typedef join_node_base base_type; - typedef typename internal::function_body *f0_p; - typedef typename internal::function_body *f1_p; - typedef typename internal::function_body *f2_p; - typedef typename internal::function_body *f3_p; - typedef typename internal::function_body *f4_p; - typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p > func_initializer_type; - public: - template - unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4) : base_type(g, - func_initializer_type( - new internal::function_body_leaf(b0), - new internal::function_body_leaf(b1), - new internal::function_body_leaf(b2), - new internal::function_body_leaf(b3), - new internal::function_body_leaf(b4) - ) ) {} - unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} - }; - -#if __TBB_VARIADIC_MAX >= 6 - template - class unfolded_join_node<6,tag_matching_port,OutputTuple,tag_matching> : public - join_base<6,tag_matching_port,OutputTuple,tag_matching>::type { - typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; - typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; - typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; - typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3; - typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4; - typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5; - public: - typedef typename wrap_tuple_elements<6, tag_matching_port, OutputTuple>::type input_ports_type; - typedef OutputTuple output_type; - private: - typedef join_node_base base_type; - typedef typename internal::function_body *f0_p; - typedef typename internal::function_body *f1_p; - typedef typename internal::function_body *f2_p; - typedef typename internal::function_body *f3_p; - typedef typename internal::function_body *f4_p; - typedef typename internal::function_body *f5_p; - typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p > func_initializer_type; - public: - template - unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5) : base_type(g, - func_initializer_type( - new internal::function_body_leaf(b0), - new internal::function_body_leaf(b1), - new internal::function_body_leaf(b2), - new internal::function_body_leaf(b3), - new internal::function_body_leaf(b4), - new internal::function_body_leaf(b5) - ) ) {} - unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} - }; -#endif - -#if __TBB_VARIADIC_MAX >= 7 - template - class unfolded_join_node<7,tag_matching_port,OutputTuple,tag_matching> : public - join_base<7,tag_matching_port,OutputTuple,tag_matching>::type { - typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; - typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; - typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; - typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3; - typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4; - typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5; - typedef typename tbb::flow::tuple_element<6, OutputTuple>::type T6; - public: - typedef typename wrap_tuple_elements<7, tag_matching_port, OutputTuple>::type input_ports_type; - typedef OutputTuple output_type; - private: - typedef join_node_base base_type; - typedef typename internal::function_body *f0_p; - typedef typename internal::function_body *f1_p; - typedef typename internal::function_body *f2_p; - typedef typename internal::function_body *f3_p; - typedef typename internal::function_body *f4_p; - typedef typename internal::function_body *f5_p; - typedef typename internal::function_body *f6_p; - typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p > func_initializer_type; - public: - template - unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5, B6 b6) : base_type(g, - func_initializer_type( - new internal::function_body_leaf(b0), - new internal::function_body_leaf(b1), - new internal::function_body_leaf(b2), - new internal::function_body_leaf(b3), - new internal::function_body_leaf(b4), - new internal::function_body_leaf(b5), - new internal::function_body_leaf(b6) - ) ) {} - unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} - }; -#endif - -#if __TBB_VARIADIC_MAX >= 8 - template - class unfolded_join_node<8,tag_matching_port,OutputTuple,tag_matching> : public - join_base<8,tag_matching_port,OutputTuple,tag_matching>::type { - typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; - typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; - typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; - typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3; - typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4; - typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5; - typedef typename tbb::flow::tuple_element<6, OutputTuple>::type T6; - typedef typename tbb::flow::tuple_element<7, OutputTuple>::type T7; - public: - typedef typename wrap_tuple_elements<8, tag_matching_port, OutputTuple>::type input_ports_type; - typedef OutputTuple output_type; - private: - typedef join_node_base base_type; - typedef typename internal::function_body *f0_p; - typedef typename internal::function_body *f1_p; - typedef typename internal::function_body *f2_p; - typedef typename internal::function_body *f3_p; - typedef typename internal::function_body *f4_p; - typedef typename internal::function_body *f5_p; - typedef typename internal::function_body *f6_p; - typedef typename internal::function_body *f7_p; - typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p, f7_p > func_initializer_type; - public: - template - unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5, B6 b6, B7 b7) : base_type(g, - func_initializer_type( - new internal::function_body_leaf(b0), - new internal::function_body_leaf(b1), - new internal::function_body_leaf(b2), - new internal::function_body_leaf(b3), - new internal::function_body_leaf(b4), - new internal::function_body_leaf(b5), - new internal::function_body_leaf(b6), - new internal::function_body_leaf(b7) - ) ) {} - unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} - }; -#endif - -#if __TBB_VARIADIC_MAX >= 9 - template - class unfolded_join_node<9,tag_matching_port,OutputTuple,tag_matching> : public - join_base<9,tag_matching_port,OutputTuple,tag_matching>::type { - typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; - typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; - typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; - typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3; - typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4; - typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5; - typedef typename tbb::flow::tuple_element<6, OutputTuple>::type T6; - typedef typename tbb::flow::tuple_element<7, OutputTuple>::type T7; - typedef typename tbb::flow::tuple_element<8, OutputTuple>::type T8; - public: - typedef typename wrap_tuple_elements<9, tag_matching_port, OutputTuple>::type input_ports_type; - typedef OutputTuple output_type; - private: - typedef join_node_base base_type; - typedef typename internal::function_body *f0_p; - typedef typename internal::function_body *f1_p; - typedef typename internal::function_body *f2_p; - typedef typename internal::function_body *f3_p; - typedef typename internal::function_body *f4_p; - typedef typename internal::function_body *f5_p; - typedef typename internal::function_body *f6_p; - typedef typename internal::function_body *f7_p; - typedef typename internal::function_body *f8_p; - typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p, f7_p, f8_p > func_initializer_type; - public: - template - unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5, B6 b6, B7 b7, B8 b8) : base_type(g, - func_initializer_type( - new internal::function_body_leaf(b0), - new internal::function_body_leaf(b1), - new internal::function_body_leaf(b2), - new internal::function_body_leaf(b3), - new internal::function_body_leaf(b4), - new internal::function_body_leaf(b5), - new internal::function_body_leaf(b6), - new internal::function_body_leaf(b7), - new internal::function_body_leaf(b8) - ) ) {} - unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} - }; -#endif - -#if __TBB_VARIADIC_MAX >= 10 - template - class unfolded_join_node<10,tag_matching_port,OutputTuple,tag_matching> : public - join_base<10,tag_matching_port,OutputTuple,tag_matching>::type { - typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; - typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; - typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; - typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3; - typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4; - typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5; - typedef typename tbb::flow::tuple_element<6, OutputTuple>::type T6; - typedef typename tbb::flow::tuple_element<7, OutputTuple>::type T7; - typedef typename tbb::flow::tuple_element<8, OutputTuple>::type T8; - typedef typename tbb::flow::tuple_element<9, OutputTuple>::type T9; - public: - typedef typename wrap_tuple_elements<10, tag_matching_port, OutputTuple>::type input_ports_type; - typedef OutputTuple output_type; - private: - typedef join_node_base base_type; - typedef typename internal::function_body *f0_p; - typedef typename internal::function_body *f1_p; - typedef typename internal::function_body *f2_p; - typedef typename internal::function_body *f3_p; - typedef typename internal::function_body *f4_p; - typedef typename internal::function_body *f5_p; - typedef typename internal::function_body *f6_p; - typedef typename internal::function_body *f7_p; - typedef typename internal::function_body *f8_p; - typedef typename internal::function_body *f9_p; - typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p, f7_p, f8_p, f9_p > func_initializer_type; - public: - template - unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5, B6 b6, B7 b7, B8 b8, B9 b9) : base_type(g, - func_initializer_type( - new internal::function_body_leaf(b0), - new internal::function_body_leaf(b1), - new internal::function_body_leaf(b2), - new internal::function_body_leaf(b3), - new internal::function_body_leaf(b4), - new internal::function_body_leaf(b5), - new internal::function_body_leaf(b6), - new internal::function_body_leaf(b7), - new internal::function_body_leaf(b8), - new internal::function_body_leaf(b9) - ) ) {} - unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} - }; -#endif - - //! templated function to refer to input ports of the join node - template - typename tbb::flow::tuple_element::type &input_port(JNT &jn) { - return tbb::flow::get(jn.input_ports()); - } - -} -#endif // __TBB__flow_graph_join_impl_H - diff --git a/inst/include/tbb/internal/_flow_graph_node_impl.h b/inst/include/tbb/internal/_flow_graph_node_impl.h deleted file mode 100644 index 837d83449..000000000 --- a/inst/include/tbb/internal/_flow_graph_node_impl.h +++ /dev/null @@ -1,742 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB__flow_graph_node_impl_H -#define __TBB__flow_graph_node_impl_H - -#ifndef __TBB_flow_graph_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#include "_flow_graph_item_buffer_impl.h" - -//! @cond INTERNAL -namespace internal { - - using tbb::internal::aggregated_operation; - using tbb::internal::aggregating_functor; - using tbb::internal::aggregator; - - template< typename T, typename A > - class function_input_queue : public item_buffer { - public: - bool pop( T& t ) { - return this->pop_front( t ); - } - - bool push( T& t ) { - return this->push_back( t ); - } - }; - - //! Input and scheduling for a function node that takes a type Input as input - // The only up-ref is apply_body_impl, which should implement the function - // call and any handling of the result. - template< typename Input, typename A, typename ImplType > - class function_input_base : public receiver, tbb::internal::no_assign { - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - enum op_type {reg_pred, rem_pred, app_body, try_fwd, tryput_bypass, app_body_bypass -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - , add_blt_pred, del_blt_pred, - blt_pred_cnt, blt_pred_cpy // create vector copies of preds and succs -#endif - }; - typedef function_input_base my_class; - - public: - - //! The input type of this receiver - typedef Input input_type; - typedef sender predecessor_type; - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector predecessor_vector_type; -#endif - - //! Constructor for function_input_base - function_input_base( graph &g, size_t max_concurrency, function_input_queue *q = NULL ) - : my_graph(g), my_max_concurrency(max_concurrency), my_concurrency(0), - my_queue(q), forwarder_busy(false) { - my_predecessors.set_owner(this); - my_aggregator.initialize_handler(my_handler(this)); - } - - //! Copy constructor - function_input_base( const function_input_base& src, function_input_queue *q = NULL ) : - receiver(), tbb::internal::no_assign(), - my_graph(src.my_graph), my_max_concurrency(src.my_max_concurrency), - my_concurrency(0), my_queue(q), forwarder_busy(false) - { - my_predecessors.set_owner(this); - my_aggregator.initialize_handler(my_handler(this)); - } - - //! Destructor - virtual ~function_input_base() { - if ( my_queue ) delete my_queue; - } - - //! Put to the node, returning a task if available - virtual task * try_put_task( const input_type &t ) { - if ( my_max_concurrency == 0 ) { - return create_body_task( t ); - } else { - my_operation op_data(t, tryput_bypass); - my_aggregator.execute(&op_data); - if(op_data.status == SUCCEEDED ) { - return op_data.bypass_t; - } - return NULL; - } - } - - //! Adds src to the list of cached predecessors. - /* override */ bool register_predecessor( predecessor_type &src ) { - my_operation op_data(reg_pred); - op_data.r = &src; - my_aggregator.execute(&op_data); - return true; - } - - //! Removes src from the list of cached predecessors. - /* override */ bool remove_predecessor( predecessor_type &src ) { - my_operation op_data(rem_pred); - op_data.r = &src; - my_aggregator.execute(&op_data); - return true; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - //! Adds to list of predecessors added by make_edge - /*override*/ void internal_add_built_predecessor( predecessor_type &src) { - my_operation op_data(add_blt_pred); - op_data.r = &src; - my_aggregator.execute(&op_data); - } - - //! removes from to list of predecessors (used by remove_edge) - /*override*/ void internal_delete_built_predecessor( predecessor_type &src) { - my_operation op_data(del_blt_pred); - op_data.r = &src; - my_aggregator.execute(&op_data); - } - - /*override*/ size_t predecessor_count() { - my_operation op_data(blt_pred_cnt); - my_aggregator.execute(&op_data); - return op_data.cnt_val; - } - - /*override*/ void copy_predecessors(predecessor_vector_type &v) { - my_operation op_data(blt_pred_cpy); - op_data.predv = &v; - my_aggregator.execute(&op_data); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - protected: - - void reset_function_input_base( __TBB_PFG_RESET_ARG(reset_flags f)) { - my_concurrency = 0; - if(my_queue) { - my_queue->reset(); - } - reset_receiver(__TBB_PFG_RESET_ARG(f)); - forwarder_busy = false; - } - - graph& my_graph; - const size_t my_max_concurrency; - size_t my_concurrency; - function_input_queue *my_queue; - predecessor_cache my_predecessors; - - /*override*/void reset_receiver( __TBB_PFG_RESET_ARG(reset_flags f)) { - my_predecessors.reset(__TBB_PFG_RESET_ARG(f)); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - __TBB_ASSERT(!(f & rf_extract) || my_predecessors.empty(), "function_input_base reset failed"); -#endif - } - - private: - - friend class apply_body_task_bypass< my_class, input_type >; - friend class forward_task_bypass< my_class >; - - class my_operation : public aggregated_operation< my_operation > { - public: - char type; - union { - input_type *elem; - predecessor_type *r; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - size_t cnt_val; - predecessor_vector_type *predv; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - }; - tbb::task *bypass_t; - my_operation(const input_type& e, op_type t) : - type(char(t)), elem(const_cast(&e)) {} - my_operation(op_type t) : type(char(t)), r(NULL) {} - }; - - bool forwarder_busy; - typedef internal::aggregating_functor my_handler; - friend class internal::aggregating_functor; - aggregator< my_handler, my_operation > my_aggregator; - - void handle_operations(my_operation *op_list) { - my_operation *tmp; - while (op_list) { - tmp = op_list; - op_list = op_list->next; - switch (tmp->type) { - case reg_pred: - my_predecessors.add(*(tmp->r)); - __TBB_store_with_release(tmp->status, SUCCEEDED); - if (!forwarder_busy) { - forwarder_busy = true; - spawn_forward_task(); - } - break; - case rem_pred: - my_predecessors.remove(*(tmp->r)); - __TBB_store_with_release(tmp->status, SUCCEEDED); - break; - case app_body: - __TBB_ASSERT(my_max_concurrency != 0, NULL); - --my_concurrency; - __TBB_store_with_release(tmp->status, SUCCEEDED); - if (my_concurrencypop(i); - else - item_was_retrieved = my_predecessors.get_item(i); - if (item_was_retrieved) { - ++my_concurrency; - spawn_body_task(i); - } - } - break; - case app_body_bypass: { - task * new_task = NULL; - __TBB_ASSERT(my_max_concurrency != 0, NULL); - --my_concurrency; - if (my_concurrencypop(i); - else - item_was_retrieved = my_predecessors.get_item(i); - if (item_was_retrieved) { - ++my_concurrency; - new_task = create_body_task(i); - } - } - tmp->bypass_t = new_task; - __TBB_store_with_release(tmp->status, SUCCEEDED); - } - break; - case tryput_bypass: internal_try_put_task(tmp); break; - case try_fwd: internal_forward(tmp); break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - case add_blt_pred: { - my_predecessors.internal_add_built_predecessor(*(tmp->r)); - __TBB_store_with_release(tmp->status, SUCCEEDED); - } - break; - case del_blt_pred: - my_predecessors.internal_delete_built_predecessor(*(tmp->r)); - __TBB_store_with_release(tmp->status, SUCCEEDED); - break; - case blt_pred_cnt: - tmp->cnt_val = my_predecessors.predecessor_count(); - __TBB_store_with_release(tmp->status, SUCCEEDED); - break; - case blt_pred_cpy: - my_predecessors.copy_predecessors( *(tmp->predv) ); - __TBB_store_with_release(tmp->status, SUCCEEDED); - break; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - } - } - } - - //! Put to the node, but return the task instead of enqueueing it - void internal_try_put_task(my_operation *op) { - __TBB_ASSERT(my_max_concurrency != 0, NULL); - if (my_concurrency < my_max_concurrency) { - ++my_concurrency; - task * new_task = create_body_task(*(op->elem)); - op->bypass_t = new_task; - __TBB_store_with_release(op->status, SUCCEEDED); - } else if ( my_queue && my_queue->push(*(op->elem)) ) { - op->bypass_t = SUCCESSFULLY_ENQUEUED; - __TBB_store_with_release(op->status, SUCCEEDED); - } else { - op->bypass_t = NULL; - __TBB_store_with_release(op->status, FAILED); - } - } - - //! Tries to spawn bodies if available and if concurrency allows - void internal_forward(my_operation *op) { - op->bypass_t = NULL; - if (my_concurrencypop(i); - else - item_was_retrieved = my_predecessors.get_item(i); - if (item_was_retrieved) { - ++my_concurrency; - op->bypass_t = create_body_task(i); - __TBB_store_with_release(op->status, SUCCEEDED); - return; - } - } - __TBB_store_with_release(op->status, FAILED); - forwarder_busy = false; - } - - //! Applies the body to the provided input - // then decides if more work is available - void apply_body( input_type &i ) { - task *new_task = apply_body_bypass(i); - if(!new_task) return; - if(new_task == SUCCESSFULLY_ENQUEUED) return; - FLOW_SPAWN(*new_task); - return; - } - - //! Applies the body to the provided input - // then decides if more work is available - task * apply_body_bypass( input_type &i ) { - task * new_task = static_cast(this)->apply_body_impl_bypass(i); - if ( my_max_concurrency != 0 ) { - my_operation op_data(app_body_bypass); // tries to pop an item or get_item, enqueues another apply_body - my_aggregator.execute(&op_data); - tbb::task *ttask = op_data.bypass_t; - new_task = combine_tasks(new_task, ttask); - } - return new_task; - } - - //! allocates a task to call apply_body( input ) - inline task * create_body_task( const input_type &input ) { - - task* tp = my_graph.root_task(); - return (tp) ? - new(task::allocate_additional_child_of(*tp)) - apply_body_task_bypass < my_class, input_type >(*this, input) : - NULL; - } - - //! Spawns a task that calls apply_body( input ) - inline void spawn_body_task( const input_type &input ) { - task* tp = create_body_task(input); - // tp == NULL => g.reset(), which shouldn't occur in concurrent context - if(tp) { - FLOW_SPAWN(*tp); - } - } - - //! This is executed by an enqueued task, the "forwarder" - task *forward_task() { - my_operation op_data(try_fwd); - task *rval = NULL; - do { - op_data.status = WAIT; - my_aggregator.execute(&op_data); - if(op_data.status == SUCCEEDED) { - tbb::task *ttask = op_data.bypass_t; - rval = combine_tasks(rval, ttask); - } - } while (op_data.status == SUCCEEDED); - return rval; - } - - inline task *create_forward_task() { - task* tp = my_graph.root_task(); - return (tp) ? - new(task::allocate_additional_child_of(*tp)) forward_task_bypass< my_class >(*this) : - NULL; - } - - //! Spawns a task that calls forward() - inline void spawn_forward_task() { - task* tp = create_forward_task(); - if(tp) { - FLOW_SPAWN(*tp); - } - } - }; // function_input_base - - //! Implements methods for a function node that takes a type Input as input and sends - // a type Output to its successors. - template< typename Input, typename Output, typename A> - class function_input : public function_input_base > { - public: - typedef Input input_type; - typedef Output output_type; - typedef function_input my_class; - typedef function_input_base base_type; - typedef function_input_queue input_queue_type; - - - // constructor - template - function_input( graph &g, size_t max_concurrency, Body& body, function_input_queue *q = NULL ) : - base_type(g, max_concurrency, q), - my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) { - } - - //! Copy constructor - function_input( const function_input& src, input_queue_type *q = NULL ) : - base_type(src, q), - my_body( src.my_body->clone() ) { - } - - ~function_input() { - delete my_body; - } - - template< typename Body > - Body copy_function_object() { - internal::function_body &body_ref = *this->my_body; - return dynamic_cast< internal::function_body_leaf & >(body_ref).get_body(); - } - - task * apply_body_impl_bypass( const input_type &i) { -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - // There is an extra copied needed to capture the - // body execution without the try_put - tbb::internal::fgt_begin_body( my_body ); - output_type v = (*my_body)(i); - tbb::internal::fgt_end_body( my_body ); - task * new_task = successors().try_put_task( v ); -#else - task * new_task = successors().try_put_task( (*my_body)(i) ); -#endif - return new_task; - } - - protected: - - void reset_function_input(__TBB_PFG_RESET_ARG(reset_flags f)) { - base_type::reset_function_input_base(__TBB_PFG_RESET_ARG(f)); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - if(f & rf_reset_bodies) my_body->reset_body(); -#endif - } - - function_body *my_body; - virtual broadcast_cache &successors() = 0; - - }; // function_input - - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - // helper templates to reset the successor edges of the output ports of an multifunction_node - template - struct reset_element { - template - static void reset_this(P &p, reset_flags f) { - (void)tbb::flow::get(p).successors().reset(f); - reset_element::reset_this(p, f); - } - template - static bool this_empty(P &p) { - if(tbb::flow::get(p).successors().empty()) - return reset_element::this_empty(p); - return false; - } - }; - - template<> - struct reset_element<1> { - template - static void reset_this(P &p, reset_flags f) { - (void)tbb::flow::get<0>(p).successors().reset(f); - } - template - static bool this_empty(P &p) { - return tbb::flow::get<0>(p).successors().empty(); - } - }; -#endif - - //! Implements methods for a function node that takes a type Input as input - // and has a tuple of output ports specified. - template< typename Input, typename OutputPortSet, typename A> - class multifunction_input : public function_input_base > { - public: - static const int N = tbb::flow::tuple_size::value; - typedef Input input_type; - typedef OutputPortSet output_ports_type; - typedef multifunction_input my_class; - typedef function_input_base base_type; - typedef function_input_queue input_queue_type; - - - // constructor - template - multifunction_input( - graph &g, - size_t max_concurrency, - Body& body, - function_input_queue *q = NULL ) : - base_type(g, max_concurrency, q), - my_body( new internal::multifunction_body_leaf(body) ) { - } - - //! Copy constructor - multifunction_input( const multifunction_input& src, input_queue_type *q = NULL ) : - base_type(src, q), - my_body( src.my_body->clone() ) { - } - - ~multifunction_input() { - delete my_body; - } - - template< typename Body > - Body copy_function_object() { - internal::multifunction_body &body_ref = *this->my_body; - return dynamic_cast< internal::multifunction_body_leaf & >(body_ref).get_body(); - } - - // for multifunction nodes we do not have a single successor as such. So we just tell - // the task we were successful. - task * apply_body_impl_bypass( const input_type &i) { - tbb::internal::fgt_begin_body( my_body ); - (*my_body)(i, my_output_ports); - tbb::internal::fgt_end_body( my_body ); - task * new_task = SUCCESSFULLY_ENQUEUED; - return new_task; - } - - output_ports_type &output_ports(){ return my_output_ports; } - - protected: - - /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) { - base_type::reset_function_input_base(__TBB_PFG_RESET_ARG(f)); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - reset_element::reset_this(my_output_ports, f); - if(f & rf_reset_bodies) my_body->reset_body(); - __TBB_ASSERT(!(f & rf_extract) || reset_element::this_empty(my_output_ports), "multifunction_node reset failed"); -#endif - } - - multifunction_body *my_body; - output_ports_type my_output_ports; - - }; // multifunction_input - - // template to refer to an output port of a multifunction_node - template - typename tbb::flow::tuple_element::type &output_port(MOP &op) { - return tbb::flow::get(op.output_ports()); - } - -// helper structs for split_node - template - struct emit_element { - template - static void emit_this(const T &t, P &p) { - (void)tbb::flow::get(p).try_put(tbb::flow::get(t)); - emit_element::emit_this(t,p); - } - }; - - template<> - struct emit_element<1> { - template - static void emit_this(const T &t, P &p) { - (void)tbb::flow::get<0>(p).try_put(tbb::flow::get<0>(t)); - } - }; - - //! Implements methods for an executable node that takes continue_msg as input - template< typename Output > - class continue_input : public continue_receiver { - public: - - //! The input type of this receiver - typedef continue_msg input_type; - - //! The output type of this receiver - typedef Output output_type; - - template< typename Body > - continue_input( graph &g, Body& body ) - : my_graph_ptr(&g), - my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) { } - - template< typename Body > - continue_input( graph &g, int number_of_predecessors, Body& body ) - : continue_receiver( number_of_predecessors ), my_graph_ptr(&g), - my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) { } - - continue_input( const continue_input& src ) : continue_receiver(src), - my_graph_ptr(src.my_graph_ptr), my_body( src.my_body->clone() ) {} - - ~continue_input() { - delete my_body; - } - - template< typename Body > - Body copy_function_object() { - internal::function_body &body_ref = *my_body; - return dynamic_cast< internal::function_body_leaf & >(body_ref).get_body(); - } - - /*override*/void reset_receiver( __TBB_PFG_RESET_ARG(reset_flags f)) { - continue_receiver::reset_receiver(__TBB_PFG_RESET_ARG(f)); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - if(f & rf_reset_bodies) my_body->reset_body(); -#endif - } - - protected: - - graph* my_graph_ptr; - function_body *my_body; - - virtual broadcast_cache &successors() = 0; - - friend class apply_body_task_bypass< continue_input< Output >, continue_msg >; - - //! Applies the body to the provided input - /* override */ task *apply_body_bypass( input_type ) { -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - // There is an extra copied needed to capture the - // body execution without the try_put - tbb::internal::fgt_begin_body( my_body ); - output_type v = (*my_body)( continue_msg() ); - tbb::internal::fgt_end_body( my_body ); - return successors().try_put_task( v ); -#else - return successors().try_put_task( (*my_body)( continue_msg() ) ); -#endif - } - - //! Spawns a task that applies the body - /* override */ task *execute( ) { - task* tp = my_graph_ptr->root_task(); - return (tp) ? - new ( task::allocate_additional_child_of( *tp ) ) - apply_body_task_bypass< continue_input< Output >, continue_msg >( *this, continue_msg() ) : - NULL; - } - - }; // continue_input - - //! Implements methods for both executable and function nodes that puts Output to its successors - template< typename Output > - class function_output : public sender { - public: - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - template friend struct reset_element; -#endif - typedef Output output_type; - typedef receiver successor_type; - typedef broadcast_cache broadcast_cache_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector successor_vector_type; -#endif - - function_output() { my_successors.set_owner(this); } - function_output(const function_output & /*other*/) : sender() { - my_successors.set_owner(this); - } - - //! Adds a new successor to this node - /* override */ bool register_successor( receiver &r ) { - successors().register_successor( r ); - return true; - } - - //! Removes a successor from this node - /* override */ bool remove_successor( receiver &r ) { - successors().remove_successor( r ); - return true; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/ void internal_add_built_successor( receiver &r) { - successors().internal_add_built_successor( r ); - } - - /*override*/ void internal_delete_built_successor( receiver &r) { - successors().internal_delete_built_successor( r ); - } - - /*override*/ size_t successor_count() { - return successors().successor_count(); - } - - /*override*/ void copy_successors( successor_vector_type &v) { - successors().copy_successors(v); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - // for multifunction_node. The function_body that implements - // the node will have an input and an output tuple of ports. To put - // an item to a successor, the body should - // - // get(output_ports).try_put(output_value); - // - // return value will be bool returned from successors.try_put. - task *try_put_task(const output_type &i) { return my_successors.try_put_task(i); } - - protected: - broadcast_cache_type my_successors; - broadcast_cache_type &successors() { return my_successors; } - - }; // function_output - - template< typename Output > - class multifunction_output : public function_output { - public: - typedef Output output_type; - typedef function_output base_type; - using base_type::my_successors; - - multifunction_output() : base_type() {my_successors.set_owner(this);} - multifunction_output( const multifunction_output &/*other*/) : base_type() { my_successors.set_owner(this); } - - bool try_put(const output_type &i) { - task *res = my_successors.try_put_task(i); - if(!res) return false; - if(res != SUCCESSFULLY_ENQUEUED) FLOW_SPAWN(*res); - return true; - } - }; // multifunction_output - -} // internal - -#endif // __TBB__flow_graph_node_impl_H diff --git a/inst/include/tbb/internal/_flow_graph_tagged_buffer_impl.h b/inst/include/tbb/internal/_flow_graph_tagged_buffer_impl.h deleted file mode 100644 index 8c13eb592..000000000 --- a/inst/include/tbb/internal/_flow_graph_tagged_buffer_impl.h +++ /dev/null @@ -1,251 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// tagged buffer that can expand, and can support as many deletions as additions -// list-based, with elements of list held in array (for destruction management), -// multiplicative hashing (like ets). No synchronization built-in. -// - -#ifndef __TBB__flow_graph_tagged_buffer_impl_H -#define __TBB__flow_graph_tagged_buffer_impl_H - -#ifndef __TBB_flow_graph_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -// included in namespace tbb::flow::interface7::internal - -template -struct otherData { - T t; - U next; - otherData() : t(NoTagMark), next(NULL) {} -}; - -template -struct buffer_element_type { - // the second parameter below is void * because we can't forward-declare the type - // itself, so we just reinterpret_cast below. - typedef typename aligned_pair >::type type; -}; - -template - < - typename TagType, - typename ValueType, - size_t NoTagMark = 0, - typename Allocator=tbb::cache_aligned_allocator< typename buffer_element_type::type > - > -class tagged_buffer { -public: - static const size_t INITIAL_SIZE = 8; // initial size of the hash pointer table - static const TagType NO_TAG = TagType(NoTagMark); - typedef ValueType value_type; - typedef typename buffer_element_type::type element_type; - typedef value_type *pointer_type; - typedef element_type *list_array_type; // array we manage manually - typedef list_array_type *pointer_array_type; - typedef typename Allocator::template rebind::other pointer_array_allocator_type; - typedef typename Allocator::template rebind::other elements_array_allocator; -private: - size_t my_size; - size_t nelements; - pointer_array_type pointer_array; // pointer_array[my_size] - list_array_type elements_array; // elements_array[my_size / 2] - element_type* free_list; - - size_t mask() { return my_size - 1; } - - static size_t hash(TagType t) { - return uintptr_t(t)*tbb::internal::select_size_t_constant<0x9E3779B9,0x9E3779B97F4A7C15ULL>::value; - } - - void set_up_free_list( element_type **p_free_list, list_array_type la, size_t sz) { - for(size_t i=0; i < sz - 1; ++i ) { // construct free list - la[i].second.next = &(la[i+1]); - la[i].second.t = NO_TAG; - } - la[sz-1].second.next = NULL; - *p_free_list = &(la[0]); - } - - // cleanup for exceptions - struct DoCleanup { - pointer_array_type *my_pa; - list_array_type *my_elements; - size_t my_size; - - DoCleanup(pointer_array_type &pa, list_array_type &my_els, size_t sz) : - my_pa(&pa), my_elements(&my_els), my_size(sz) { } - ~DoCleanup() { - if(my_pa) { - size_t dont_care = 0; - internal_free_buffer(*my_pa, *my_elements, my_size, dont_care); - } - } - }; - - // exception-safety requires we do all the potentially-throwing operations first - void grow_array() { - size_t new_size = my_size*2; - size_t new_nelements = nelements; // internal_free_buffer zeroes this - list_array_type new_elements_array = NULL; - pointer_array_type new_pointer_array = NULL; - list_array_type new_free_list = NULL; - { - DoCleanup my_cleanup(new_pointer_array, new_elements_array, new_size); - new_elements_array = elements_array_allocator().allocate(my_size); - new_pointer_array = pointer_array_allocator_type().allocate(new_size); - for(size_t i=0; i < new_size; ++i) new_pointer_array[i] = NULL; - set_up_free_list(&new_free_list, new_elements_array, my_size ); - - for(size_t i=0; i < my_size; ++i) { - for( element_type* op = pointer_array[i]; op; op = (element_type *)(op->second.next)) { - value_type *ov = reinterpret_cast(&(op->first)); - // could have std::move semantics - internal_tagged_insert(new_pointer_array, new_size, new_free_list, op->second.t, *ov); - } - } - my_cleanup.my_pa = NULL; - my_cleanup.my_elements = NULL; - } - - internal_free_buffer(pointer_array, elements_array, my_size, nelements); - free_list = new_free_list; - pointer_array = new_pointer_array; - elements_array = new_elements_array; - my_size = new_size; - nelements = new_nelements; - } - - // v should have perfect forwarding if std::move implemented. - // we use this method to move elements in grow_array, so can't use class fields - void internal_tagged_insert( element_type **p_pointer_array, size_t p_sz, list_array_type &p_free_list, - const TagType t, const value_type &v) { - size_t l_mask = p_sz-1; - size_t h = hash(t) & l_mask; - __TBB_ASSERT(p_free_list, "Error: free list not set up."); - element_type* my_elem = p_free_list; p_free_list = (element_type *)(p_free_list->second.next); - my_elem->second.t = t; - (void) new(&(my_elem->first)) value_type(v); - my_elem->second.next = p_pointer_array[h]; - p_pointer_array[h] = my_elem; - } - - void internal_initialize_buffer() { - pointer_array = pointer_array_allocator_type().allocate(my_size); - for(size_t i = 0; i < my_size; ++i) pointer_array[i] = NULL; - elements_array = elements_array_allocator().allocate(my_size / 2); - set_up_free_list(&free_list, elements_array, my_size / 2); - } - - // made static so an enclosed class can use to properly dispose of the internals - static void internal_free_buffer( pointer_array_type &pa, list_array_type &el, size_t &sz, size_t &ne ) { - if(pa) { - for(size_t i = 0; i < sz; ++i ) { - element_type *p_next; - for( element_type *p = pa[i]; p; p = p_next) { - p_next = (element_type *)p->second.next; - value_type *vp = reinterpret_cast(&(p->first)); - vp->~value_type(); - } - } - pointer_array_allocator_type().deallocate(pa, sz); - pa = NULL; - } - // Separate test (if allocation of pa throws, el may be allocated. - // but no elements will be constructed.) - if(el) { - elements_array_allocator().deallocate(el, sz / 2); - el = NULL; - } - sz = INITIAL_SIZE; - ne = 0; - } - -public: - tagged_buffer() : my_size(INITIAL_SIZE), nelements(0) { - internal_initialize_buffer(); - } - - ~tagged_buffer() { - internal_free_buffer(pointer_array, elements_array, my_size, nelements); - } - - void reset() { - internal_free_buffer(pointer_array, elements_array, my_size, nelements); - internal_initialize_buffer(); - } - - bool tagged_insert(const TagType t, const value_type &v) { - pointer_type p; - if(tagged_find_ref(t, p)) { - p->~value_type(); - (void) new(p) value_type(v); // copy-construct into the space - return false; - } - ++nelements; - if(nelements*2 > my_size) grow_array(); - internal_tagged_insert(pointer_array, my_size, free_list, t, v); - return true; - } - - // returns reference to array element.v - bool tagged_find_ref(const TagType t, pointer_type &v) { - size_t i = hash(t) & mask(); - for(element_type* p = pointer_array[i]; p; p = (element_type *)(p->second.next)) { - if(p->second.t == t) { - v = reinterpret_cast(&(p->first)); - return true; - } - } - return false; - } - - bool tagged_find( const TagType t, value_type &v) { - value_type *p; - if(tagged_find_ref(t, p)) { - v = *p; - return true; - } - else - return false; - } - - void tagged_delete(const TagType t) { - size_t h = hash(t) & mask(); - element_type* prev = NULL; - for(element_type* p = pointer_array[h]; p; prev = p, p = (element_type *)(p->second.next)) { - if(p->second.t == t) { - value_type *vp = reinterpret_cast(&(p->first)); - vp->~value_type(); - p->second.t = NO_TAG; - if(prev) prev->second.next = p->second.next; - else pointer_array[h] = (element_type *)(p->second.next); - p->second.next = free_list; - free_list = p; - --nelements; - return; - } - } - __TBB_ASSERT(false, "tag not found for delete"); - } -}; -#endif // __TBB__flow_graph_tagged_buffer_impl_H diff --git a/inst/include/tbb/internal/_flow_graph_trace_impl.h b/inst/include/tbb/internal/_flow_graph_trace_impl.h deleted file mode 100644 index 43efc7c8e..000000000 --- a/inst/include/tbb/internal/_flow_graph_trace_impl.h +++ /dev/null @@ -1,205 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef _FGT_GRAPH_TRACE_IMPL_H -#define _FGT_GRAPH_TRACE_IMPL_H - -#include "../tbb_profiling.h" - -namespace tbb { - namespace internal { - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - -static inline void fgt_internal_create_input_port( void *node, void *p, string_index name_index ) { - itt_make_task_group( ITT_DOMAIN_FLOW, p, FLOW_INPUT_PORT, node, FLOW_NODE, name_index ); -} - -static inline void fgt_internal_create_output_port( void *node, void *p, string_index name_index ) { - itt_make_task_group( ITT_DOMAIN_FLOW, p, FLOW_OUTPUT_PORT, node, FLOW_NODE, name_index ); -} - -template < typename TypesTuple, typename PortsTuple, int N > -struct fgt_internal_input_helper { - static void register_port( void *node, PortsTuple &ports ) { - fgt_internal_create_input_port( node, (void*)static_cast< tbb::flow::interface7::receiver< typename tbb::flow::tuple_element::type > * >(&(tbb::flow::get(ports))), - static_cast(FLOW_INPUT_PORT_0 + N - 1) ); - fgt_internal_input_helper::register_port( node, ports ); - } -}; - -template < typename TypesTuple, typename PortsTuple > -struct fgt_internal_input_helper { - static void register_port( void *node, PortsTuple &ports ) { - fgt_internal_create_input_port( node, (void*)static_cast< tbb::flow::interface7::receiver< typename tbb::flow::tuple_element<0,TypesTuple>::type > * >(&(tbb::flow::get<0>(ports))), - FLOW_INPUT_PORT_0 ); - } -}; - -template < typename TypesTuple, typename PortsTuple, int N > -struct fgt_internal_output_helper { - static void register_port( void *node, PortsTuple &ports ) { - fgt_internal_create_output_port( node, (void*)static_cast< tbb::flow::interface7::sender< typename tbb::flow::tuple_element::type > * >(&(tbb::flow::get(ports))), - static_cast(FLOW_OUTPUT_PORT_0 + N - 1) ); - fgt_internal_output_helper::register_port( node, ports ); - } -}; - -template < typename TypesTuple, typename PortsTuple > -struct fgt_internal_output_helper { - static void register_port( void *node, PortsTuple &ports ) { - fgt_internal_create_output_port( node, (void*)static_cast< tbb::flow::interface7::sender< typename tbb::flow::tuple_element<0,TypesTuple>::type > * >(&(tbb::flow::get<0>(ports))), - FLOW_OUTPUT_PORT_0 ); - } -}; - -template< typename NodeType > -void fgt_multioutput_node_desc( const NodeType *node, const char *desc ) { - void *addr = (void *)( static_cast< tbb::flow::interface7::receiver< typename NodeType::input_type > * >(const_cast< NodeType *>(node)) ); - itt_metadata_str_add( ITT_DOMAIN_FLOW, addr, FLOW_NODE, FLOW_OBJECT_NAME, desc ); -} - -template< typename NodeType > -static inline void fgt_node_desc( const NodeType *node, const char *desc ) { - void *addr = (void *)( static_cast< tbb::flow::interface7::sender< typename NodeType::output_type > * >(const_cast< NodeType *>(node)) ); - itt_metadata_str_add( ITT_DOMAIN_FLOW, addr, FLOW_NODE, FLOW_OBJECT_NAME, desc ); -} - -static inline void fgt_graph_desc( void *g, const char *desc ) { - itt_metadata_str_add( ITT_DOMAIN_FLOW, g, FLOW_GRAPH, FLOW_OBJECT_NAME, desc ); -} - -static inline void fgt_body( void *node, void *body ) { - itt_relation_add( ITT_DOMAIN_FLOW, body, FLOW_BODY, __itt_relation_is_child_of, node, FLOW_NODE ); -} - -template< typename OutputTuple, int N, typename PortsTuple > -static inline void fgt_multioutput_node( string_index t, void *g, void *input_port, PortsTuple &ports ) { - itt_make_task_group( ITT_DOMAIN_FLOW, input_port, FLOW_NODE, g, FLOW_GRAPH, t ); - fgt_internal_create_input_port( input_port, input_port, FLOW_INPUT_PORT_0 ); - fgt_internal_output_helper::register_port( input_port, ports ); -} - -template< typename OutputTuple, int N, typename PortsTuple > -static inline void fgt_multioutput_node_with_body( string_index t, void *g, void *input_port, PortsTuple &ports, void *body ) { - itt_make_task_group( ITT_DOMAIN_FLOW, input_port, FLOW_NODE, g, FLOW_GRAPH, t ); - fgt_internal_create_input_port( input_port, input_port, FLOW_INPUT_PORT_0 ); - fgt_internal_output_helper::register_port( input_port, ports ); - fgt_body( input_port, body ); -} - - -template< typename InputTuple, int N, typename PortsTuple > -static inline void fgt_multiinput_node( string_index t, void *g, PortsTuple &ports, void *output_port) { - itt_make_task_group( ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); - fgt_internal_create_output_port( output_port, output_port, FLOW_OUTPUT_PORT_0 ); - fgt_internal_input_helper::register_port( output_port, ports ); -} - -static inline void fgt_node( string_index t, void *g, void *output_port ) { - itt_make_task_group( ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); - fgt_internal_create_output_port( output_port, output_port, FLOW_OUTPUT_PORT_0 ); -} - -static inline void fgt_node_with_body( string_index t, void *g, void *output_port, void *body ) { - itt_make_task_group( ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); - fgt_internal_create_output_port( output_port, output_port, FLOW_OUTPUT_PORT_0 ); - fgt_body( output_port, body ); -} - - -static inline void fgt_node( string_index t, void *g, void *input_port, void *output_port ) { - fgt_node( t, g, output_port ); - fgt_internal_create_input_port( output_port, input_port, FLOW_INPUT_PORT_0 ); -} - -static inline void fgt_node_with_body( string_index t, void *g, void *input_port, void *output_port, void *body ) { - fgt_node_with_body( t, g, output_port, body ); - fgt_internal_create_input_port( output_port, input_port, FLOW_INPUT_PORT_0 ); -} - - -static inline void fgt_node( string_index t, void *g, void *input_port, void *decrement_port, void *output_port ) { - fgt_node( t, g, input_port, output_port ); - fgt_internal_create_input_port( output_port, decrement_port, FLOW_INPUT_PORT_1 ); -} - -static inline void fgt_make_edge( void *output_port, void *input_port ) { - itt_relation_add( ITT_DOMAIN_FLOW, output_port, FLOW_OUTPUT_PORT, __itt_relation_is_predecessor_to, input_port, FLOW_INPUT_PORT); -} - -static inline void fgt_remove_edge( void *output_port, void *input_port ) { - itt_relation_add( ITT_DOMAIN_FLOW, output_port, FLOW_OUTPUT_PORT, __itt_relation_is_sibling_of, input_port, FLOW_INPUT_PORT); -} - -static inline void fgt_graph( void *g ) { - itt_make_task_group( ITT_DOMAIN_FLOW, g, FLOW_GRAPH, NULL, FLOW_NULL, FLOW_GRAPH ); -} - -static inline void fgt_begin_body( void *body ) { - itt_task_begin( ITT_DOMAIN_FLOW, body, FLOW_BODY, NULL, FLOW_NULL, FLOW_NULL ); -} - -static inline void fgt_end_body( void * ) { - itt_task_end( ITT_DOMAIN_FLOW ); -} - -#else // TBB_PREVIEW_FLOW_GRAPH_TRACE - -static inline void fgt_graph( void * /*g*/ ) { } - -template< typename NodeType > -static inline void fgt_multioutput_node_desc( const NodeType * /*node*/, const char * /*desc*/ ) { } - -template< typename NodeType > -static inline void fgt_node_desc( const NodeType * /*node*/, const char * /*desc*/ ) { } - -static inline void fgt_graph_desc( void * /*g*/, const char * /*desc*/ ) { } - -static inline void fgt_body( void * /*node*/, void * /*body*/ ) { } - -template< typename OutputTuple, int N, typename PortsTuple > -static inline void fgt_multioutput_node( string_index /*t*/, void * /*g*/, void * /*input_port*/, PortsTuple & /*ports*/ ) { } - -template< typename OutputTuple, int N, typename PortsTuple > -static inline void fgt_multioutput_node_with_body( string_index /*t*/, void * /*g*/, void * /*input_port*/, PortsTuple & /*ports*/, void * /*body*/ ) { } - -template< typename InputTuple, int N, typename PortsTuple > -static inline void fgt_multiinput_node( string_index /*t*/, void * /*g*/, PortsTuple & /*ports*/, void * /*output_port*/ ) { } - -static inline void fgt_node( string_index /*t*/, void * /*g*/, void * /*output_port*/ ) { } -static inline void fgt_node( string_index /*t*/, void * /*g*/, void * /*input_port*/, void * /*output_port*/ ) { } -static inline void fgt_node( string_index /*t*/, void * /*g*/, void * /*input_port*/, void * /*decrement_port*/, void * /*output_port*/ ) { } - -static inline void fgt_node_with_body( string_index /*t*/, void * /*g*/, void * /*output_port*/, void * /*body*/ ) { } -static inline void fgt_node_with_body( string_index /*t*/, void * /*g*/, void * /*input_port*/, void * /*output_port*/, void * /*body*/ ) { } - -static inline void fgt_make_edge( void * /*output_port*/, void * /*input_port*/ ) { } -static inline void fgt_remove_edge( void * /*output_port*/, void * /*input_port*/ ) { } - -static inline void fgt_begin_body( void * /*body*/ ) { } -static inline void fgt_end_body( void * /*body*/) { } - -#endif // TBB_PREVIEW_FLOW_GRAPH_TRACE - - } // namespace internal -} // namespace tbb - -#endif diff --git a/inst/include/tbb/internal/_flow_graph_types_impl.h b/inst/include/tbb/internal/_flow_graph_types_impl.h deleted file mode 100644 index 28a525a4d..000000000 --- a/inst/include/tbb/internal/_flow_graph_types_impl.h +++ /dev/null @@ -1,497 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB__flow_graph_types_impl_H -#define __TBB__flow_graph_types_impl_H - -#ifndef __TBB_flow_graph_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -// included in namespace tbb::flow::interface7 - -namespace internal { -// wrap each element of a tuple in a template, and make a tuple of the result. - - template class PT, typename TypeTuple> - struct wrap_tuple_elements; - - template class PT, typename TypeTuple> - struct wrap_tuple_elements<1, PT, TypeTuple> { - typedef typename tbb::flow::tuple< - PT::type> > - type; - }; - - template class PT, typename TypeTuple> - struct wrap_tuple_elements<2, PT, TypeTuple> { - typedef typename tbb::flow::tuple< - PT::type>, - PT::type> > - type; - }; - - template class PT, typename TypeTuple> - struct wrap_tuple_elements<3, PT, TypeTuple> { - typedef typename tbb::flow::tuple< - PT::type>, - PT::type>, - PT::type> > - type; - }; - - template class PT, typename TypeTuple> - struct wrap_tuple_elements<4, PT, TypeTuple> { - typedef typename tbb::flow::tuple< - PT::type>, - PT::type>, - PT::type>, - PT::type> > - type; - }; - - template class PT, typename TypeTuple> - struct wrap_tuple_elements<5, PT, TypeTuple> { - typedef typename tbb::flow::tuple< - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type> > - type; - }; - -#if __TBB_VARIADIC_MAX >= 6 - template class PT, typename TypeTuple> - struct wrap_tuple_elements<6, PT, TypeTuple> { - typedef typename tbb::flow::tuple< - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type> > - type; - }; -#endif - -#if __TBB_VARIADIC_MAX >= 7 - template class PT, typename TypeTuple> - struct wrap_tuple_elements<7, PT, TypeTuple> { - typedef typename tbb::flow::tuple< - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type> > - type; - }; -#endif - -#if __TBB_VARIADIC_MAX >= 8 - template class PT, typename TypeTuple> - struct wrap_tuple_elements<8, PT, TypeTuple> { - typedef typename tbb::flow::tuple< - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type> > - type; - }; -#endif - -#if __TBB_VARIADIC_MAX >= 9 - template class PT, typename TypeTuple> - struct wrap_tuple_elements<9, PT, TypeTuple> { - typedef typename tbb::flow::tuple< - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type> > - type; - }; -#endif - -#if __TBB_VARIADIC_MAX >= 10 - template class PT, typename TypeTuple> - struct wrap_tuple_elements<10, PT, TypeTuple> { - typedef typename tbb::flow::tuple< - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type> > - type; - }; -#endif - -//! type mimicking std::pair but with trailing fill to ensure each element of an array -//* will have the correct alignment - template - struct type_plus_align { - char first[sizeof(T1)]; - T2 second; - char fill1[REM]; - }; - - template - struct type_plus_align { - char first[sizeof(T1)]; - T2 second; - }; - - template struct alignment_of { - typedef struct { char t; U padded; } test_alignment; - static const size_t value = sizeof(test_alignment) - sizeof(U); - }; - - // T1, T2 are actual types stored. The space defined for T1 in the type returned - // is a char array of the correct size. Type T2 should be trivially-constructible, - // T1 must be explicitly managed. - template - struct aligned_pair { - static const size_t t1_align = alignment_of::value; - static const size_t t2_align = alignment_of::value; - typedef type_plus_align just_pair; - static const size_t max_align = t1_align < t2_align ? t2_align : t1_align; - static const size_t extra_bytes = sizeof(just_pair) % max_align; - static const size_t remainder = extra_bytes ? max_align - extra_bytes : 0; - public: - typedef type_plus_align type; - }; // aligned_pair - -// support for variant type -// type we use when we're not storing a value -struct default_constructed { }; - -// type which contains another type, tests for what type is contained, and references to it. -// internal::Wrapper -// void CopyTo( void *newSpace) : builds a Wrapper copy of itself in newSpace - -// struct to allow us to copy and test the type of objects -struct WrapperBase { - virtual ~WrapperBase() {} - virtual void CopyTo(void* /*newSpace*/) const { } -}; - -// Wrapper contains a T, with the ability to test what T is. The Wrapper can be -// constructed from a T, can be copy-constructed from another Wrapper, and can be -// examined via value(), but not modified. -template -struct Wrapper: public WrapperBase { - typedef T value_type; - typedef T* pointer_type; -private: - T value_space; -public: - const value_type &value() const { return value_space; } - -private: - Wrapper(); - - // on exception will ensure the Wrapper will contain only a trivially-constructed object - struct _unwind_space { - pointer_type space; - _unwind_space(pointer_type p) : space(p) {} - ~_unwind_space() { - if(space) (void) new (space) Wrapper(default_constructed()); - } - }; -public: - explicit Wrapper( const T& other ) : value_space(other) { } - explicit Wrapper(const Wrapper& other) : value_space(other.value_space) { } - - /*override*/void CopyTo(void* newSpace) const { - _unwind_space guard((pointer_type)newSpace); - (void) new(newSpace) Wrapper(value_space); - guard.space = NULL; - } - /*override*/~Wrapper() { } -}; - -// specialization for array objects -template -struct Wrapper : public WrapperBase { - typedef T value_type; - typedef T* pointer_type; - // space must be untyped. - typedef T ArrayType[N]; -private: - // The space is not of type T[N] because when copy-constructing, it would be - // default-initialized and then copied to in some fashion, resulting in two - // constructions and one destruction per element. If the type is char[ ], we - // placement new into each element, resulting in one construction per element. - static const size_t space_size = sizeof(ArrayType) / sizeof(char); - char value_space[space_size]; - - - // on exception will ensure the already-built objects will be destructed - // (the value_space is a char array, so it is already trivially-destructible.) - struct _unwind_class { - pointer_type space; - int already_built; - _unwind_class(pointer_type p) : space(p), already_built(0) {} - ~_unwind_class() { - if(space) { - for(size_t i = already_built; i > 0 ; --i ) space[i-1].~value_type(); - (void) new(space) Wrapper(default_constructed()); - } - } - }; -public: - const ArrayType &value() const { - char *vp = const_cast(value_space); - return reinterpret_cast(*vp); - } - -private: - Wrapper(); -public: - // have to explicitly construct because other decays to a const value_type* - explicit Wrapper(const ArrayType& other) { - _unwind_class guard((pointer_type)value_space); - pointer_type vp = reinterpret_cast(&value_space); - for(size_t i = 0; i < N; ++i ) { - (void) new(vp++) value_type(other[i]); - ++(guard.already_built); - } - guard.space = NULL; - } - explicit Wrapper(const Wrapper& other) : WrapperBase() { - // we have to do the heavy lifting to copy contents - _unwind_class guard((pointer_type)value_space); - pointer_type dp = reinterpret_cast(value_space); - pointer_type sp = reinterpret_cast(const_cast(other.value_space)); - for(size_t i = 0; i < N; ++i, ++dp, ++sp) { - (void) new(dp) value_type(*sp); - ++(guard.already_built); - } - guard.space = NULL; - } - - /*override*/void CopyTo(void* newSpace) const { - (void) new(newSpace) Wrapper(*this); // exceptions handled in copy constructor - } - - /*override*/~Wrapper() { - // have to destroy explicitly in reverse order - pointer_type vp = reinterpret_cast(&value_space); - for(size_t i = N; i > 0 ; --i ) vp[i-1].~value_type(); - } -}; - -// given a tuple, return the type of the element that has the maximum alignment requirement. -// Given a tuple and that type, return the number of elements of the object with the max -// alignment requirement that is at least as big as the largest object in the tuple. - -template struct pick_one; -template struct pick_one { typedef T1 type; }; -template struct pick_one { typedef T2 type; }; - -template< template class Selector, typename T1, typename T2 > -struct pick_max { - typedef typename pick_one< (Selector::value > Selector::value), T1, T2 >::type type; -}; - -template struct size_of { static const int value = sizeof(T); }; - -template< size_t N, class Tuple, template class Selector > struct pick_tuple_max { - typedef typename pick_tuple_max::type LeftMaxType; - typedef typename tbb::flow::tuple_element::type ThisType; - typedef typename pick_max::type type; -}; - -template< class Tuple, template class Selector > struct pick_tuple_max<0, Tuple, Selector> { - typedef typename tbb::flow::tuple_element<0, Tuple>::type type; -}; - -// is the specified type included in a tuple? - -template struct is_same_type { static const bool value = false; }; -template struct is_same_type { static const bool value = true; }; - -template -struct is_element_of { - typedef typename tbb::flow::tuple_element::type T_i; - static const bool value = is_same_type::value || is_element_of::value; -}; - -template -struct is_element_of { - typedef typename tbb::flow::tuple_element<0, Tuple>::type T_i; - static const bool value = is_same_type::value; -}; - -// allow the construction of types that are listed tuple. If a disallowed type -// construction is written, a method involving this type is created. The -// type has no definition, so a syntax error is generated. -template struct ERROR_Type_Not_allowed_In_Tagged_Msg_Not_Member_Of_Tuple; - -template struct do_if; -template -struct do_if { - static void construct(void *mySpace, const T& x) { - (void) new(mySpace) Wrapper(x); - } -}; -template -struct do_if { - static void construct(void * /*mySpace*/, const T& x) { - // This method is instantiated when the type T does not match any of the - // element types in the Tuple in variant. - ERROR_Type_Not_allowed_In_Tagged_Msg_Not_Member_Of_Tuple::bad_type(x); - } -}; - -// Tuple tells us the allowed types that variant can hold. It determines the alignment of the space in -// Wrapper, and how big Wrapper is. -// -// the object can only be tested for type, and a read-only reference can be fetched by cast_to(). - -using tbb::internal::punned_cast; -struct tagged_null_type {}; -template -class tagged_msg { - typedef tbb::flow::tuple= 6 - , T5 - #endif - #if __TBB_VARIADIC_MAX >= 7 - , T6 - #endif - #if __TBB_VARIADIC_MAX >= 8 - , T7 - #endif - #if __TBB_VARIADIC_MAX >= 9 - , T8 - #endif - #if __TBB_VARIADIC_MAX >= 10 - , T9 - #endif - > Tuple; - -private: - class variant { - static const size_t N = tbb::flow::tuple_size::value; - typedef typename pick_tuple_max::type AlignType; - typedef typename pick_tuple_max::type MaxSizeType; - static const size_t MaxNBytes = (sizeof(Wrapper)+sizeof(AlignType)-1); - static const size_t MaxNElements = MaxNBytes/sizeof(AlignType); - typedef typename tbb::aligned_space SpaceType; - SpaceType my_space; - static const size_t MaxSize = sizeof(SpaceType); - - public: - variant() { (void) new(&my_space) Wrapper(default_constructed()); } - - template - variant( const T& x ) { - do_if::value>::construct(&my_space,x); - } - - variant(const variant& other) { - const WrapperBase * h = punned_cast(&(other.my_space)); - h->CopyTo(&my_space); - } - - // assignment must destroy and re-create the Wrapper type, as there is no way - // to create a Wrapper-to-Wrapper assign even if we find they agree in type. - void operator=( const variant& rhs ) { - if(&rhs != this) { - WrapperBase *h = punned_cast(&my_space); - h->~WrapperBase(); - const WrapperBase *ch = punned_cast(&(rhs.my_space)); - ch->CopyTo(&my_space); - } - } - - template - const U& variant_cast_to() const { - const Wrapper *h = dynamic_cast*>(punned_cast(&my_space)); - if(!h) { - tbb::internal::throw_exception(tbb::internal::eid_bad_tagged_msg_cast); - } - return h->value(); - } - template - bool variant_is_a() const { return dynamic_cast*>(punned_cast(&my_space)) != NULL; } - - bool variant_is_default_constructed() const {return variant_is_a();} - - ~variant() { - WrapperBase *h = punned_cast(&my_space); - h->~WrapperBase(); - } - }; //class variant - - TagType my_tag; - variant my_msg; - -public: - tagged_msg(): my_tag(TagType(~0)), my_msg(){} - - template - tagged_msg(T const &index, R const &value) : my_tag(index), my_msg(value) {} - - #if __TBB_CONST_REF_TO_ARRAY_TEMPLATE_PARAM_BROKEN - template - tagged_msg(T const &index, R (&value)[N]) : my_tag(index), my_msg(value) {} - #endif - - void set_tag(TagType const &index) {my_tag = index;} - TagType tag() const {return my_tag;} - - template - const V& cast_to() const {return my_msg.template variant_cast_to();} - - template - bool is_a() const {return my_msg.template variant_is_a();} - - bool is_default_constructed() const {return my_msg.variant_is_default_constructed();} -}; //class tagged_msg - -// template to simplify cast and test for tagged_msg in template contexts -template -const T& cast_to(V const &v) { return v.template cast_to(); } - -template -bool is_a(V const &v) { return v.template is_a(); } - -} // namespace internal - -#endif /* __TBB__flow_graph_types_impl_H */ diff --git a/inst/include/tbb/internal/_mutex_padding.h b/inst/include/tbb/internal/_mutex_padding.h deleted file mode 100644 index ae07599c0..000000000 --- a/inst/include/tbb/internal/_mutex_padding.h +++ /dev/null @@ -1,102 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_mutex_padding_H -#define __TBB_mutex_padding_H - -// wrapper for padding mutexes to be alone on a cache line, without requiring they be allocated -// from a pool. Because we allow them to be defined anywhere they must be two cache lines in size. - - -namespace tbb { -namespace interface7 { -namespace internal { - -static const size_t cache_line_size = 64; - -// Pad a mutex to occupy a number of full cache lines sufficient to avoid false sharing -// with other data; space overhead is up to 2*cache_line_size-1. -template class padded_mutex; - -template -class padded_mutex : tbb::internal::mutex_copy_deprecated_and_disabled { - typedef long pad_type; - pad_type my_pad[((sizeof(Mutex)+cache_line_size-1)/cache_line_size+1)*cache_line_size/sizeof(pad_type)]; - - Mutex *impl() { return (Mutex *)((uintptr_t(this)|(cache_line_size-1))+1);} - -public: - static const bool is_rw_mutex = Mutex::is_rw_mutex; - static const bool is_recursive_mutex = Mutex::is_recursive_mutex; - static const bool is_fair_mutex = Mutex::is_fair_mutex; - - padded_mutex() { new(impl()) Mutex(); } - ~padded_mutex() { impl()->~Mutex(); } - - //! Represents acquisition of a mutex. - class scoped_lock : tbb::internal::no_copy { - typename Mutex::scoped_lock my_scoped_lock; - public: - scoped_lock() : my_scoped_lock() {} - scoped_lock( padded_mutex& m ) : my_scoped_lock(*m.impl()) { } - ~scoped_lock() { } - - void acquire( padded_mutex& m ) { my_scoped_lock.acquire(*m.impl()); } - bool try_acquire( padded_mutex& m ) { return my_scoped_lock.try_acquire(*m.impl()); } - void release() { my_scoped_lock.release(); } - }; -}; - -template -class padded_mutex : tbb::internal::mutex_copy_deprecated_and_disabled { - typedef long pad_type; - pad_type my_pad[((sizeof(Mutex)+cache_line_size-1)/cache_line_size+1)*cache_line_size/sizeof(pad_type)]; - - Mutex *impl() { return (Mutex *)((uintptr_t(this)|(cache_line_size-1))+1);} - -public: - static const bool is_rw_mutex = Mutex::is_rw_mutex; - static const bool is_recursive_mutex = Mutex::is_recursive_mutex; - static const bool is_fair_mutex = Mutex::is_fair_mutex; - - padded_mutex() { new(impl()) Mutex(); } - ~padded_mutex() { impl()->~Mutex(); } - - //! Represents acquisition of a mutex. - class scoped_lock : tbb::internal::no_copy { - typename Mutex::scoped_lock my_scoped_lock; - public: - scoped_lock() : my_scoped_lock() {} - scoped_lock( padded_mutex& m, bool write = true ) : my_scoped_lock(*m.impl(),write) { } - ~scoped_lock() { } - - void acquire( padded_mutex& m, bool write = true ) { my_scoped_lock.acquire(*m.impl(),write); } - bool try_acquire( padded_mutex& m, bool write = true ) { return my_scoped_lock.try_acquire(*m.impl(),write); } - bool upgrade_to_writer() { return my_scoped_lock.upgrade_to_writer(); } - bool downgrade_to_reader() { return my_scoped_lock.downgrade_to_reader(); } - void release() { my_scoped_lock.release(); } - }; -}; - -} // namespace internal -} // namespace interface7 -} // namespace tbb - -#endif /* __TBB_mutex_padding_H */ diff --git a/inst/include/tbb/internal/_range_iterator.h b/inst/include/tbb/internal/_range_iterator.h deleted file mode 100644 index 0622c4ffb..000000000 --- a/inst/include/tbb/internal/_range_iterator.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_range_iterator_H -#define __TBB_range_iterator_H - -#include "../tbb_stddef.h" - -#if __TBB_CPP11_STD_BEGIN_END_PRESENT && __TBB_CPP11_AUTO_PRESENT && __TBB_CPP11_DECLTYPE_PRESENT - #include -#endif - -namespace tbb { - // iterators to first and last elements of container - namespace internal { - -#if __TBB_CPP11_STD_BEGIN_END_PRESENT && __TBB_CPP11_AUTO_PRESENT && __TBB_CPP11_DECLTYPE_PRESENT - using std::begin; - using std::end; - template - auto first(Container& c)-> decltype(begin(c)) {return begin(c);} - - template - auto first(const Container& c)-> decltype(begin(c)) {return begin(c);} - - template - auto last(Container& c)-> decltype(begin(c)) {return end(c);} - - template - auto last(const Container& c)-> decltype(begin(c)) {return end(c);} -#else - template - typename Container::iterator first(Container& c) {return c.begin();} - - template - typename Container::const_iterator first(const Container& c) {return c.begin();} - - template - typename Container::iterator last(Container& c) {return c.end();} - - template - typename Container::const_iterator last(const Container& c) {return c.end();} -#endif - - template - T* first(T (&arr) [size]) {return arr;} - - template - T* last(T (&arr) [size]) {return arr + size;} - } //namespace internal -} //namespace tbb - -#endif // __TBB_range_iterator_H diff --git a/inst/include/tbb/internal/_tbb_strings.h b/inst/include/tbb/internal/_tbb_strings.h deleted file mode 100644 index ccffe1d36..000000000 --- a/inst/include/tbb/internal/_tbb_strings.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -TBB_STRING_RESOURCE(FLOW_BROADCAST_NODE, "broadcast_node") -TBB_STRING_RESOURCE(FLOW_BUFFER_NODE, "buffer_node") -TBB_STRING_RESOURCE(FLOW_CONTINUE_NODE, "continue_node") -TBB_STRING_RESOURCE(FLOW_FUNCTION_NODE, "function_node") -TBB_STRING_RESOURCE(FLOW_JOIN_NODE_QUEUEING, "join_node (queueing)") -TBB_STRING_RESOURCE(FLOW_JOIN_NODE_RESERVING, "join_node (reserving)") -TBB_STRING_RESOURCE(FLOW_JOIN_NODE_TAG_MATCHING, "join_node (tag_matching)") -TBB_STRING_RESOURCE(FLOW_LIMITER_NODE, "limiter_node") -TBB_STRING_RESOURCE(FLOW_MULTIFUNCTION_NODE, "multifunction_node") -TBB_STRING_RESOURCE(FLOW_OR_NODE, "or_node") //no longer in use, kept for backward compatibilty -TBB_STRING_RESOURCE(FLOW_OVERWRITE_NODE, "overwrite_node") -TBB_STRING_RESOURCE(FLOW_PRIORITY_QUEUE_NODE, "priority_queue_node") -TBB_STRING_RESOURCE(FLOW_QUEUE_NODE, "queue_node") -TBB_STRING_RESOURCE(FLOW_SEQUENCER_NODE, "sequencer_node") -TBB_STRING_RESOURCE(FLOW_SOURCE_NODE, "source_node") -TBB_STRING_RESOURCE(FLOW_SPLIT_NODE, "split_node") -TBB_STRING_RESOURCE(FLOW_WRITE_ONCE_NODE, "write_once_node") -TBB_STRING_RESOURCE(FLOW_BODY, "body") -TBB_STRING_RESOURCE(FLOW_GRAPH, "graph") -TBB_STRING_RESOURCE(FLOW_NODE, "node") -TBB_STRING_RESOURCE(FLOW_INPUT_PORT, "input_port") -TBB_STRING_RESOURCE(FLOW_INPUT_PORT_0, "input_port_0") -TBB_STRING_RESOURCE(FLOW_INPUT_PORT_1, "input_port_1") -TBB_STRING_RESOURCE(FLOW_INPUT_PORT_2, "input_port_2") -TBB_STRING_RESOURCE(FLOW_INPUT_PORT_3, "input_port_3") -TBB_STRING_RESOURCE(FLOW_INPUT_PORT_4, "input_port_4") -TBB_STRING_RESOURCE(FLOW_INPUT_PORT_5, "input_port_5") -TBB_STRING_RESOURCE(FLOW_INPUT_PORT_6, "input_port_6") -TBB_STRING_RESOURCE(FLOW_INPUT_PORT_7, "input_port_7") -TBB_STRING_RESOURCE(FLOW_INPUT_PORT_8, "input_port_8") -TBB_STRING_RESOURCE(FLOW_INPUT_PORT_9, "input_port_9") -TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT, "output_port") -TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_0, "output_port_0") -TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_1, "output_port_1") -TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_2, "output_port_2") -TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_3, "output_port_3") -TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_4, "output_port_4") -TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_5, "output_port_5") -TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_6, "output_port_6") -TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_7, "output_port_7") -TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_8, "output_port_8") -TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_9, "output_port_9") -TBB_STRING_RESOURCE(FLOW_OBJECT_NAME, "object_name") -TBB_STRING_RESOURCE(FLOW_NULL, "null") -TBB_STRING_RESOURCE(FLOW_INDEXER_NODE, "indexer_node") diff --git a/inst/include/tbb/internal/_tbb_windef.h b/inst/include/tbb/internal/_tbb_windef.h deleted file mode 100644 index 551dc2b0b..000000000 --- a/inst/include/tbb/internal/_tbb_windef.h +++ /dev/null @@ -1,73 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_tbb_windef_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif /* __TBB_tbb_windef_H */ - -// Check that the target Windows version has all API calls requried for TBB. -// Do not increase the version in condition beyond 0x0500 without prior discussion! -#if defined(_WIN32_WINNT) && _WIN32_WINNT<0x0501 -#error TBB is unable to run on old Windows versions; _WIN32_WINNT must be 0x0501 or greater. -#endif - -#if !defined(_MT) -#error TBB requires linkage with multithreaded C/C++ runtime library. \ - Choose multithreaded DLL runtime in project settings, or use /MD[d] compiler switch. -#endif - -// Workaround for the problem with MVSC headers failing to define namespace std -namespace std { - using ::size_t; using ::ptrdiff_t; -} - -#define __TBB_STRING_AUX(x) #x -#define __TBB_STRING(x) __TBB_STRING_AUX(x) - -// Default setting of TBB_USE_DEBUG -#ifdef TBB_USE_DEBUG -# if TBB_USE_DEBUG -# if !defined(_DEBUG) -# pragma message(__FILE__ "(" __TBB_STRING(__LINE__) ") : Warning: Recommend using /MDd if compiling with TBB_USE_DEBUG!=0") -# endif -# else -# if defined(_DEBUG) -# pragma message(__FILE__ "(" __TBB_STRING(__LINE__) ") : Warning: Recommend using /MD if compiling with TBB_USE_DEBUG==0") -# endif -# endif -#endif - -#if (__TBB_BUILD || __TBBMALLOC_BUILD) && !defined(__TBB_NO_IMPLICIT_LINKAGE) -#define __TBB_NO_IMPLICIT_LINKAGE 1 -#endif - -#if _MSC_VER - #if !__TBB_NO_IMPLICIT_LINKAGE - #ifdef __TBB_LIB_NAME - #pragma comment(lib, __TBB_STRING(__TBB_LIB_NAME)) - #else - #ifdef _DEBUG - #pragma comment(lib, "tbb_debug.lib") - #else - #pragma comment(lib, "tbb.lib") - #endif - #endif - #endif -#endif diff --git a/inst/include/tbb/internal/_x86_eliding_mutex_impl.h b/inst/include/tbb/internal/_x86_eliding_mutex_impl.h deleted file mode 100644 index d73877aa8..000000000 --- a/inst/include/tbb/internal/_x86_eliding_mutex_impl.h +++ /dev/null @@ -1,148 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB__x86_eliding_mutex_impl_H -#define __TBB__x86_eliding_mutex_impl_H - -#ifndef __TBB_spin_mutex_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#if ( __TBB_x86_32 || __TBB_x86_64 ) - -namespace tbb { -namespace interface7 { -namespace internal { - -template -class padded_mutex; - -//! An eliding lock that occupies a single byte. -/** A x86_eliding_mutex is an HLE-enabled spin mutex. It is recommended to - put the mutex on a cache line that is not shared by the data it protects. - It should be used for locking short critical sections where the lock is - contended but the data it protects are not. If zero-initialized, the - mutex is considered unheld. - @ingroup synchronization */ -class x86_eliding_mutex : tbb::internal::mutex_copy_deprecated_and_disabled { - //! 0 if lock is released, 1 if lock is acquired. - __TBB_atomic_flag flag; - - friend class padded_mutex; - -public: - //! Construct unacquired lock. - /** Equivalent to zero-initialization of *this. */ - x86_eliding_mutex() : flag(0) {} - -// bug in gcc 3.x.x causes syntax error in spite of the friend declaration above. -// Make the scoped_lock public in that case. -#if __TBB_USE_X86_ELIDING_MUTEX || __TBB_GCC_VERSION < 40000 -#else - // by default we will not provide the scoped_lock interface. The user - // should use the padded version of the mutex. scoped_lock is used in - // padded_mutex template. -private: -#endif - // scoped_lock in padded_mutex<> is the interface to use. - //! Represents acquisition of a mutex. - class scoped_lock : tbb::internal::no_copy { - private: - //! Points to currently held mutex, or NULL if no lock is held. - x86_eliding_mutex* my_mutex; - - public: - //! Construct without acquiring a mutex. - scoped_lock() : my_mutex(NULL) {} - - //! Construct and acquire lock on a mutex. - scoped_lock( x86_eliding_mutex& m ) : my_mutex(NULL) { acquire(m); } - - //! Acquire lock. - void acquire( x86_eliding_mutex& m ) { - __TBB_ASSERT( !my_mutex, "already holding a lock" ); - - my_mutex=&m; - my_mutex->lock(); - } - - //! Try acquiring lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_acquire( x86_eliding_mutex& m ) { - __TBB_ASSERT( !my_mutex, "already holding a lock" ); - - bool result = m.try_lock(); - if( result ) { - my_mutex = &m; - } - return result; - } - - //! Release lock - void release() { - __TBB_ASSERT( my_mutex, "release on scoped_lock that is not holding a lock" ); - - my_mutex->unlock(); - my_mutex = NULL; - } - - //! Destroy lock. If holding a lock, releases the lock first. - ~scoped_lock() { - if( my_mutex ) { - release(); - } - } - }; -#if __TBB_USE_X86_ELIDING_MUTEX || __TBB_GCC_VERSION < 40000 -#else -public: -#endif /* __TBB_USE_X86_ELIDING_MUTEX */ - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = false; - - // ISO C++0x compatibility methods - - //! Acquire lock - void lock() { - __TBB_LockByteElided(flag); - } - - //! Try acquiring lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_lock() { - return __TBB_TryLockByteElided(flag); - } - - //! Release lock - void unlock() { - __TBB_UnlockByteElided( flag ); - } -}; // end of x86_eliding_mutex - -} // namespace internal -} // namespace interface7 -} // namespace tbb - -#endif /* ( __TBB_x86_32 || __TBB_x86_64 ) */ - -#endif /* __TBB__x86_eliding_mutex_impl_H */ diff --git a/inst/include/tbb/internal/_x86_rtm_rw_mutex_impl.h b/inst/include/tbb/internal/_x86_rtm_rw_mutex_impl.h deleted file mode 100644 index 9fb8c82f3..000000000 --- a/inst/include/tbb/internal/_x86_rtm_rw_mutex_impl.h +++ /dev/null @@ -1,225 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB__x86_rtm_rw_mutex_impl_H -#define __TBB__x86_rtm_rw_mutex_impl_H - -#ifndef __TBB_spin_rw_mutex_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#if __TBB_TSX_AVAILABLE - -#include "../tbb_stddef.h" -#include "../tbb_machine.h" -#include "../tbb_profiling.h" -#include "../spin_rw_mutex.h" - -namespace tbb { -namespace interface8 { -namespace internal { - -enum RTM_type { - RTM_not_in_mutex, - RTM_transacting_reader, - RTM_transacting_writer, - RTM_real_reader, - RTM_real_writer -}; - -static const unsigned long speculation_granularity = 64; - -//! Fast, unfair, spinning speculation-enabled reader-writer lock with backoff and -// writer-preference -/** @ingroup synchronization */ -class x86_rtm_rw_mutex: private spin_rw_mutex { -#if __TBB_USE_X86_RTM_RW_MUTEX || __TBB_GCC_VERSION < 40000 -// bug in gcc 3.x.x causes syntax error in spite of the friend declaration below. -// Make the scoped_lock public in that case. -public: -#else -private: -#endif - friend class interface7::internal::padded_mutex; - class scoped_lock; // should be private - friend class scoped_lock; -private: - //! @cond INTERNAL - - //! Internal construct unacquired mutex. - void __TBB_EXPORTED_METHOD internal_construct(); - - //! Internal acquire write lock. - // only_speculate == true if we're doing a try_lock, else false. - void __TBB_EXPORTED_METHOD internal_acquire_writer(x86_rtm_rw_mutex::scoped_lock&, bool only_speculate=false); - - //! Internal acquire read lock. - // only_speculate == true if we're doing a try_lock, else false. - void __TBB_EXPORTED_METHOD internal_acquire_reader(x86_rtm_rw_mutex::scoped_lock&, bool only_speculate=false); - - //! Internal upgrade reader to become a writer. - bool __TBB_EXPORTED_METHOD internal_upgrade( x86_rtm_rw_mutex::scoped_lock& ); - - //! Out of line code for downgrading a writer to a reader. - bool __TBB_EXPORTED_METHOD internal_downgrade( x86_rtm_rw_mutex::scoped_lock& ); - - //! Internal try_acquire write lock. - bool __TBB_EXPORTED_METHOD internal_try_acquire_writer( x86_rtm_rw_mutex::scoped_lock& ); - - //! Internal release lock. - void __TBB_EXPORTED_METHOD internal_release( x86_rtm_rw_mutex::scoped_lock& ); - - static x86_rtm_rw_mutex* internal_get_mutex( const spin_rw_mutex::scoped_lock& lock ) - { - return static_cast( lock.internal_get_mutex() ); - } - static void internal_set_mutex( spin_rw_mutex::scoped_lock& lock, spin_rw_mutex* mtx ) - { - lock.internal_set_mutex( mtx ); - } - //! @endcond -public: - //! Construct unacquired mutex. - x86_rtm_rw_mutex() { - w_flag = false; -#if TBB_USE_THREADING_TOOLS - internal_construct(); -#endif - } - -#if TBB_USE_ASSERT - //! Empty destructor. - ~x86_rtm_rw_mutex() {} -#endif /* TBB_USE_ASSERT */ - - // Mutex traits - static const bool is_rw_mutex = true; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = false; - -#if __TBB_USE_X86_RTM_RW_MUTEX || __TBB_GCC_VERSION < 40000 -#else - // by default we will not provide the scoped_lock interface. The user - // should use the padded version of the mutex. scoped_lock is used in - // padded_mutex template. -private: -#endif - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - // Speculation-enabled scoped lock for spin_rw_mutex - // The idea is to be able to reuse the acquire/release methods of spin_rw_mutex - // and its scoped lock wherever possible. The only way to use a speculative lock is to use - // a scoped_lock. (because transaction_state must be local) - - class scoped_lock : tbb::internal::no_copy { - friend class x86_rtm_rw_mutex; - spin_rw_mutex::scoped_lock my_scoped_lock; - - RTM_type transaction_state; - - public: - //! Construct lock that has not acquired a mutex. - /** Equivalent to zero-initialization of *this. */ - scoped_lock() : my_scoped_lock(), transaction_state(RTM_not_in_mutex) { - } - - //! Acquire lock on given mutex. - scoped_lock( x86_rtm_rw_mutex& m, bool write = true ) : my_scoped_lock(), - transaction_state(RTM_not_in_mutex) { - acquire(m, write); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if(transaction_state != RTM_not_in_mutex) release(); - } - - //! Acquire lock on given mutex. - void acquire( x86_rtm_rw_mutex& m, bool write = true ) { - if( write ) m.internal_acquire_writer(*this); - else m.internal_acquire_reader(*this); - } - - //! Release lock - void release() { - x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock); - __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( transaction_state!=RTM_not_in_mutex, "lock is not acquired" ); - return mutex->internal_release(*this); - } - - //! Upgrade reader to become a writer. - /** Returns whether the upgrade happened without releasing and re-acquiring the lock */ - bool upgrade_to_writer() { - x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock); - __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( transaction_state==RTM_transacting_reader || transaction_state==RTM_real_reader, "Invalid state for upgrade" ); - return mutex->internal_upgrade(*this); - } - - //! Downgrade writer to become a reader. - /** Returns whether the downgrade happened without releasing and re-acquiring the lock */ - bool downgrade_to_reader() { - x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock); - __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( transaction_state==RTM_transacting_writer || transaction_state==RTM_real_writer, "Invalid state for downgrade" ); - return mutex->internal_downgrade(*this); - } - - //! Attempt to acquire mutex. - /** returns true if successful. */ - bool try_acquire( x86_rtm_rw_mutex& m, bool write = true ) { -#if TBB_USE_ASSERT - x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock); - __TBB_ASSERT( !mutex, "lock is already acquired" ); -#endif - // have to assign m to our mutex. - // cannot set the mutex, because try_acquire in spin_rw_mutex depends on it being NULL. - if(write) return m.internal_try_acquire_writer(*this); - // speculatively acquire the lock. If this fails, do try_acquire on the spin_rw_mutex. - m.internal_acquire_reader(*this, /*only_speculate=*/true); - if(transaction_state == RTM_transacting_reader) return true; - if( my_scoped_lock.try_acquire(m, false)) { - transaction_state = RTM_real_reader; - return true; - } - return false; - } - - }; // class x86_rtm_rw_mutex::scoped_lock - - // ISO C++0x compatibility methods not provided because we cannot maintain - // state about whether a thread is in a transaction. - -private: - char pad[speculation_granularity-sizeof(spin_rw_mutex)]; // padding - - // If true, writer holds the spin_rw_mutex. - tbb::atomic w_flag; // want this on a separate cache line - -}; // x86_rtm_rw_mutex - -} // namespace internal -} // namespace interface8 -} // namespace tbb - -#endif /* __TBB_TSX_AVAILABLE */ -#endif /* __TBB__x86_rtm_rw_mutex_impl_H */ diff --git a/inst/include/tbb/machine/gcc_armv7.h b/inst/include/tbb/machine/gcc_armv7.h deleted file mode 100644 index 83f5c55e6..000000000 --- a/inst/include/tbb/machine/gcc_armv7.h +++ /dev/null @@ -1,217 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -/* - Platform isolation layer for the ARMv7-a architecture. -*/ - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -//TODO: is ARMv7 is the only version ever to support? -#if !(__ARM_ARCH_7A__) -#error compilation requires an ARMv7-a architecture. -#endif - -#include -#include - -#define __TBB_WORDSIZE 4 - -// Traditionally ARM is little-endian. -// Note that, since only the layout of aligned 32-bit words is of interest, -// any apparent PDP-endianness of 32-bit words at half-word alignment or -// any little-endian ordering of big-endian 32-bit words in 64-bit quantities -// may be disregarded for this setting. -#if __BIG_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__) - #define __TBB_ENDIANNESS __TBB_ENDIAN_BIG -#elif __LITTLE_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__) - #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE -#elif defined(__BYTE_ORDER__) - #define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED -#else - #define __TBB_ENDIANNESS __TBB_ENDIAN_DETECT -#endif - - -#define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory") -#define __TBB_full_memory_fence() __asm__ __volatile__("dmb ish": : :"memory") -#define __TBB_control_consistency_helper() __TBB_full_memory_fence() -#define __TBB_acquire_consistency_helper() __TBB_full_memory_fence() -#define __TBB_release_consistency_helper() __TBB_full_memory_fence() - -//-------------------------------------------------- -// Compare and swap -//-------------------------------------------------- - -/** - * Atomic CAS for 32 bit values, if *ptr==comparand, then *ptr=value, returns *ptr - * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand - * @param value value to assign *ptr to if *ptr==comparand - * @param comparand value to compare with *ptr - * @return value originally in memory at ptr, regardless of success -*/ -static inline int32_t __TBB_machine_cmpswp4(volatile void *ptr, int32_t value, int32_t comparand ) -{ - int32_t oldval, res; - - __TBB_full_memory_fence(); - - do { - __asm__ __volatile__( - "ldrex %1, [%3]\n" - "mov %0, #0\n" - "cmp %1, %4\n" - "it eq\n" - "strexeq %0, %5, [%3]\n" - : "=&r" (res), "=&r" (oldval), "+Qo" (*(volatile int32_t*)ptr) - : "r" ((int32_t *)ptr), "Ir" (comparand), "r" (value) - : "cc"); - } while (res); - - __TBB_full_memory_fence(); - - return oldval; -} - -/** - * Atomic CAS for 64 bit values, if *ptr==comparand, then *ptr=value, returns *ptr - * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand - * @param value value to assign *ptr to if *ptr==comparand - * @param comparand value to compare with *ptr - * @return value originally in memory at ptr, regardless of success - */ -static inline int64_t __TBB_machine_cmpswp8(volatile void *ptr, int64_t value, int64_t comparand ) -{ - int64_t oldval; - int32_t res; - - __TBB_full_memory_fence(); - - do { - __asm__ __volatile__( - "mov %0, #0\n" - "ldrexd %1, %H1, [%3]\n" - "cmp %1, %4\n" - "it eq\n" - "cmpeq %H1, %H4\n" - "it eq\n" - "strexdeq %0, %5, %H5, [%3]" - : "=&r" (res), "=&r" (oldval), "+Qo" (*(volatile int64_t*)ptr) - : "r" ((int64_t *)ptr), "r" (comparand), "r" (value) - : "cc"); - } while (res); - - __TBB_full_memory_fence(); - - return oldval; -} - -static inline int32_t __TBB_machine_fetchadd4(volatile void* ptr, int32_t addend) -{ - unsigned long tmp; - int32_t result, tmp2; - - __TBB_full_memory_fence(); - - __asm__ __volatile__( -"1: ldrex %0, [%4]\n" -" add %3, %0, %5\n" -" strex %1, %3, [%4]\n" -" cmp %1, #0\n" -" bne 1b\n" - : "=&r" (result), "=&r" (tmp), "+Qo" (*(volatile int32_t*)ptr), "=&r"(tmp2) - : "r" ((int32_t *)ptr), "Ir" (addend) - : "cc"); - - __TBB_full_memory_fence(); - - return result; -} - -static inline int64_t __TBB_machine_fetchadd8(volatile void *ptr, int64_t addend) -{ - unsigned long tmp; - int64_t result, tmp2; - - __TBB_full_memory_fence(); - - __asm__ __volatile__( -"1: ldrexd %0, %H0, [%4]\n" -" adds %3, %0, %5\n" -" adc %H3, %H0, %H5\n" -" strexd %1, %3, %H3, [%4]\n" -" cmp %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (*(volatile int64_t*)ptr), "=&r"(tmp2) - : "r" ((int64_t *)ptr), "r" (addend) - : "cc"); - - - __TBB_full_memory_fence(); - - return result; -} - -inline void __TBB_machine_pause (int32_t delay ) -{ - while(delay>0) - { - __TBB_compiler_fence(); - delay--; - } -} - -namespace tbb { -namespace internal { - template - struct machine_load_store_relaxed { - static inline T load ( const volatile T& location ) { - const T value = location; - - /* - * An extra memory barrier is required for errata #761319 - * Please see http://infocenter.arm.com/help/topic/com.arm.doc.uan0004a - */ - __TBB_acquire_consistency_helper(); - return value; - } - - static inline void store ( volatile T& location, T value ) { - location = value; - } - }; -}} // namespaces internal, tbb - -// Machine specific atomic operations - -#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C) -#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C) -#define __TBB_Pause(V) __TBB_machine_pause(V) - -// Use generics for some things -#define __TBB_USE_GENERIC_PART_WORD_CAS 1 -#define __TBB_USE_GENERIC_PART_WORD_FETCH_ADD 1 -#define __TBB_USE_GENERIC_PART_WORD_FETCH_STORE 1 -#define __TBB_USE_GENERIC_FETCH_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_DWORD_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 diff --git a/inst/include/tbb/machine/gcc_generic.h b/inst/include/tbb/machine/gcc_generic.h deleted file mode 100644 index be80ed47f..000000000 --- a/inst/include/tbb/machine/gcc_generic.h +++ /dev/null @@ -1,131 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_gcc_generic_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_gcc_generic_H - -#include -#include - -#define __TBB_WORDSIZE __SIZEOF_POINTER__ - -#if __TBB_GCC_64BIT_ATOMIC_BUILTINS_BROKEN - #define __TBB_64BIT_ATOMICS 0 -#endif - -/** FPU control setting not available for non-Intel architectures on Android **/ -#if __ANDROID__ && __TBB_generic_arch - #define __TBB_CPU_CTL_ENV_PRESENT 0 -#endif - -// __BYTE_ORDER__ is used in accordance with http://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html, -// but __BIG_ENDIAN__ or __LITTLE_ENDIAN__ may be more commonly found instead. -#if __BIG_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__) - #define __TBB_ENDIANNESS __TBB_ENDIAN_BIG -#elif __LITTLE_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__) - #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE -#elif defined(__BYTE_ORDER__) - #define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED -#else - #define __TBB_ENDIANNESS __TBB_ENDIAN_DETECT -#endif - -/** As this generic implementation has absolutely no information about underlying - hardware, its performance most likely will be sub-optimal because of full memory - fence usages where a more lightweight synchronization means (or none at all) - could suffice. Thus if you use this header to enable TBB on a new platform, - consider forking it and relaxing below helpers as appropriate. **/ -#define __TBB_acquire_consistency_helper() __sync_synchronize() -#define __TBB_release_consistency_helper() __sync_synchronize() -#define __TBB_full_memory_fence() __sync_synchronize() -#define __TBB_control_consistency_helper() __sync_synchronize() - -#define __TBB_MACHINE_DEFINE_ATOMICS(S,T) \ -inline T __TBB_machine_cmpswp##S( volatile void *ptr, T value, T comparand ) { \ - return __sync_val_compare_and_swap(reinterpret_cast(ptr),comparand,value); \ -} \ - \ -inline T __TBB_machine_fetchadd##S( volatile void *ptr, T value ) { \ - return __sync_fetch_and_add(reinterpret_cast(ptr),value); \ -} \ - -__TBB_MACHINE_DEFINE_ATOMICS(1,int8_t) -__TBB_MACHINE_DEFINE_ATOMICS(2,int16_t) -__TBB_MACHINE_DEFINE_ATOMICS(4,int32_t) -__TBB_MACHINE_DEFINE_ATOMICS(8,int64_t) - -#undef __TBB_MACHINE_DEFINE_ATOMICS - -namespace tbb{ namespace internal { namespace gcc_builtins { - inline int clz(unsigned int x){ return __builtin_clz(x);}; - inline int clz(unsigned long int x){ return __builtin_clzl(x);}; - inline int clz(unsigned long long int x){ return __builtin_clzll(x);}; -}}} -//gcc __builtin_clz builtin count _number_ of leading zeroes -static inline intptr_t __TBB_machine_lg( uintptr_t x ) { - return sizeof(x)*8 - tbb::internal::gcc_builtins::clz(x) -1 ; -} - -static inline void __TBB_machine_or( volatile void *ptr, uintptr_t addend ) { - __sync_fetch_and_or(reinterpret_cast(ptr),addend); -} - -static inline void __TBB_machine_and( volatile void *ptr, uintptr_t addend ) { - __sync_fetch_and_and(reinterpret_cast(ptr),addend); -} - - -typedef unsigned char __TBB_Flag; - -typedef __TBB_atomic __TBB_Flag __TBB_atomic_flag; - -inline bool __TBB_machine_try_lock_byte( __TBB_atomic_flag &flag ) { - return __sync_lock_test_and_set(&flag,1)==0; -} - -inline void __TBB_machine_unlock_byte( __TBB_atomic_flag &flag ) { - __sync_lock_release(&flag); -} - -// Machine specific atomic operations -#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) - -#define __TBB_TryLockByte __TBB_machine_try_lock_byte -#define __TBB_UnlockByte __TBB_machine_unlock_byte - -// Definition of other functions -#define __TBB_Log2(V) __TBB_machine_lg(V) - -#define __TBB_USE_GENERIC_FETCH_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - -#if __TBB_WORDSIZE==4 - #define __TBB_USE_GENERIC_DWORD_LOAD_STORE 1 -#endif - -#if __TBB_x86_32 || __TBB_x86_64 -#include "gcc_itsx.h" -#endif diff --git a/inst/include/tbb/machine/gcc_ia32_common.h b/inst/include/tbb/machine/gcc_ia32_common.h deleted file mode 100644 index db276310a..000000000 --- a/inst/include/tbb/machine/gcc_ia32_common.h +++ /dev/null @@ -1,100 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_machine_gcc_ia32_common_H -#define __TBB_machine_gcc_ia32_common_H - -//TODO: Add a higher-level function, e.g. tbb::interal::log2(), into tbb_stddef.h, which -//uses __TBB_Log2 and contains the assert and remove the assert from here and all other -//platform-specific headers. -//TODO: Check if use of gcc intrinsic gives a better chance for cross call optimizations -template -static inline intptr_t __TBB_machine_lg( T x ) { - __TBB_ASSERT(x>0, "The logarithm of a non-positive value is undefined."); - uintptr_t j; - __asm__("bsr %1,%0" : "=r"(j) : "r"((uintptr_t)x)); - return j; -} -#define __TBB_Log2(V) __TBB_machine_lg(V) - -#ifndef __TBB_Pause -//TODO: check if raising a ratio of pause instructions to loop control instructions -//(via e.g. loop unrolling) gives any benefit for HT. E.g, the current implementation -//does about 2 CPU-consuming instructions for every pause instruction. Perhaps for -//high pause counts it should use an unrolled loop to raise the ratio, and thus free -//up more integer cycles for the other hyperthread. On the other hand, if the loop is -//unrolled too far, it won't fit in the core's loop cache, and thus take away -//instruction decode slots from the other hyperthread. - -//TODO: check if use of gcc __builtin_ia32_pause intrinsic gives a "some how" better performing code -static inline void __TBB_machine_pause( int32_t delay ) { - for (int32_t i = 0; i < delay; i++) { - __asm__ __volatile__("pause;"); - } - return; -} -#define __TBB_Pause(V) __TBB_machine_pause(V) -#endif /* !__TBB_Pause */ - -// API to retrieve/update FPU control setting -#ifndef __TBB_CPU_CTL_ENV_PRESENT -#define __TBB_CPU_CTL_ENV_PRESENT 1 -namespace tbb { -namespace internal { -class cpu_ctl_env { -private: - int mxcsr; - short x87cw; - static const int MXCSR_CONTROL_MASK = ~0x3f; /* all except last six status bits */ -public: - bool operator!=( const cpu_ctl_env& ctl ) const { return mxcsr != ctl.mxcsr || x87cw != ctl.x87cw; } - void get_env() { - #if __TBB_ICC_12_0_INL_ASM_FSTCW_BROKEN - cpu_ctl_env loc_ctl; - __asm__ __volatile__ ( - "stmxcsr %0\n\t" - "fstcw %1" - : "=m"(loc_ctl.mxcsr), "=m"(loc_ctl.x87cw) - ); - *this = loc_ctl; - #else - __asm__ __volatile__ ( - "stmxcsr %0\n\t" - "fstcw %1" - : "=m"(mxcsr), "=m"(x87cw) - ); - #endif - mxcsr &= MXCSR_CONTROL_MASK; - } - void set_env() const { - __asm__ __volatile__ ( - "ldmxcsr %0\n\t" - "fldcw %1" - : : "m"(mxcsr), "m"(x87cw) - ); - } -}; -} // namespace internal -} // namespace tbb -#endif /* !__TBB_CPU_CTL_ENV_PRESENT */ - -#include "gcc_itsx.h" - -#endif /* __TBB_machine_gcc_ia32_common_H */ diff --git a/inst/include/tbb/machine/gcc_itsx.h b/inst/include/tbb/machine/gcc_itsx.h deleted file mode 100644 index 87971659a..000000000 --- a/inst/include/tbb/machine/gcc_itsx.h +++ /dev/null @@ -1,123 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_gcc_itsx_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_gcc_itsx_H - -#define __TBB_OP_XACQUIRE 0xF2 -#define __TBB_OP_XRELEASE 0xF3 -#define __TBB_OP_LOCK 0xF0 - -#define __TBB_STRINGIZE_INTERNAL(arg) #arg -#define __TBB_STRINGIZE(arg) __TBB_STRINGIZE_INTERNAL(arg) - -#ifdef __TBB_x86_64 -#define __TBB_r_out "=r" -#else -#define __TBB_r_out "=q" -#endif - -inline static uint8_t __TBB_machine_try_lock_elided( volatile uint8_t* lk ) -{ - uint8_t value = 1; - __asm__ volatile (".byte " __TBB_STRINGIZE(__TBB_OP_XACQUIRE)"; lock; xchgb %0, %1;" - : __TBB_r_out(value), "=m"(*lk) : "0"(value), "m"(*lk) : "memory" ); - return uint8_t(value^1); -} - -inline static void __TBB_machine_try_lock_elided_cancel() -{ - // 'pause' instruction aborts HLE/RTM transactions - __asm__ volatile ("pause\n" : : : "memory" ); -} - -inline static void __TBB_machine_unlock_elided( volatile uint8_t* lk ) -{ - __asm__ volatile (".byte " __TBB_STRINGIZE(__TBB_OP_XRELEASE)"; movb $0, %0" - : "=m"(*lk) : "m"(*lk) : "memory" ); -} - -#if __TBB_TSX_INTRINSICS_PRESENT -#include - -#define __TBB_machine_is_in_transaction _xtest -#define __TBB_machine_begin_transaction _xbegin -#define __TBB_machine_end_transaction _xend -#define __TBB_machine_transaction_conflict_abort() _xabort(0xff) - -#else - -/*! - * Check if the instruction is executed in a transaction or not - */ -inline static bool __TBB_machine_is_in_transaction() -{ - int8_t res = 0; -#if __TBB_x86_32 - __asm__ volatile (".byte 0x0F; .byte 0x01; .byte 0xD6;\n" - "setz %0" : "=q"(res) : : "memory" ); -#else - __asm__ volatile (".byte 0x0F; .byte 0x01; .byte 0xD6;\n" - "setz %0" : "=r"(res) : : "memory" ); -#endif - return res==0; -} - -/*! - * Enter speculative execution mode. - * @return -1 on success - * abort cause ( or 0 ) on abort - */ -inline static uint32_t __TBB_machine_begin_transaction() -{ - uint32_t res = ~uint32_t(0); // success value - __asm__ volatile ("1: .byte 0xC7; .byte 0xF8;\n" // XBEGIN - " .long 2f-1b-6\n" // 2f-1b == difference in addresses of start - // of XBEGIN and the MOVL - // 2f - 1b - 6 == that difference minus the size of the - // XBEGIN instruction. This is the abort offset to - // 2: below. - " jmp 3f\n" // success (leave -1 in res) - "2: movl %%eax,%0\n" // store failure code in res - "3:" - :"=r"(res):"0"(res):"memory","%eax"); - return res; -} - -/*! - * Attempt to commit/end transaction - */ -inline static void __TBB_machine_end_transaction() -{ - __asm__ volatile (".byte 0x0F; .byte 0x01; .byte 0xD5" :::"memory"); // XEND -} - -/* - * aborts with code 0xFF (lock already held) - */ -inline static void __TBB_machine_transaction_conflict_abort() -{ - __asm__ volatile (".byte 0xC6; .byte 0xF8; .byte 0xFF" :::"memory"); -} - -#endif /* __TBB_TSX_INTRINSICS_PRESENT */ diff --git a/inst/include/tbb/machine/ibm_aix51.h b/inst/include/tbb/machine/ibm_aix51.h deleted file mode 100644 index 57dfeb336..000000000 --- a/inst/include/tbb/machine/ibm_aix51.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// TODO: revise by comparing with mac_ppc.h - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_ibm_aix51_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_ibm_aix51_H - -#define __TBB_WORDSIZE 8 -#define __TBB_ENDIANNESS __TBB_ENDIAN_BIG // assumption based on operating system - -#include -#include -#include - -extern "C" { -int32_t __TBB_machine_cas_32 (volatile void* ptr, int32_t value, int32_t comparand); -int64_t __TBB_machine_cas_64 (volatile void* ptr, int64_t value, int64_t comparand); -void __TBB_machine_flush (); -void __TBB_machine_lwsync (); -void __TBB_machine_isync (); -} - -// Mapping of old entry point names retained for the sake of backward binary compatibility -#define __TBB_machine_cmpswp4 __TBB_machine_cas_32 -#define __TBB_machine_cmpswp8 __TBB_machine_cas_64 - -#define __TBB_Yield() sched_yield() - -#define __TBB_USE_GENERIC_PART_WORD_CAS 1 -#define __TBB_USE_GENERIC_FETCH_ADD 1 -#define __TBB_USE_GENERIC_FETCH_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - -#if __GNUC__ - #define __TBB_control_consistency_helper() __asm__ __volatile__( "isync": : :"memory") - #define __TBB_acquire_consistency_helper() __asm__ __volatile__("lwsync": : :"memory") - #define __TBB_release_consistency_helper() __asm__ __volatile__("lwsync": : :"memory") - #define __TBB_full_memory_fence() __asm__ __volatile__( "sync": : :"memory") -#else - // IBM C++ Compiler does not support inline assembly - // TODO: Since XL 9.0 or earlier GCC syntax is supported. Replace with more - // lightweight implementation (like in mac_ppc.h) - #define __TBB_control_consistency_helper() __TBB_machine_isync () - #define __TBB_acquire_consistency_helper() __TBB_machine_lwsync () - #define __TBB_release_consistency_helper() __TBB_machine_lwsync () - #define __TBB_full_memory_fence() __TBB_machine_flush () -#endif diff --git a/inst/include/tbb/machine/icc_generic.h b/inst/include/tbb/machine/icc_generic.h deleted file mode 100644 index c31a5a3d5..000000000 --- a/inst/include/tbb/machine/icc_generic.h +++ /dev/null @@ -1,258 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_icc_generic_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#if ! __TBB_ICC_BUILTIN_ATOMICS_PRESENT - #error "Intel C++ Compiler of at least 12.0 version is needed to use ICC intrinsics port" -#endif - -#define __TBB_machine_icc_generic_H - -//ICC mimics the "native" target compiler -#if _MSC_VER - #include "msvc_ia32_common.h" -#else - #include "gcc_ia32_common.h" -#endif - -//TODO: Make __TBB_WORDSIZE macro optional for ICC intrinsics port. -//As compiler intrinsics are used for all the operations it is possible to do. - -#if __TBB_x86_32 - #define __TBB_WORDSIZE 4 -#else - #define __TBB_WORDSIZE 8 -#endif -#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE - -//__TBB_compiler_fence() defined just in case, as it seems not to be used on its own anywhere else -#if _MSC_VER - //TODO: any way to use same intrinsics on windows and linux? - #pragma intrinsic(_ReadWriteBarrier) - #define __TBB_compiler_fence() _ReadWriteBarrier() -#else - #define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory") -#endif - -#ifndef __TBB_full_memory_fence -#if _MSC_VER - //TODO: any way to use same intrinsics on windows and linux? - #pragma intrinsic(_mm_mfence) - #define __TBB_full_memory_fence() _mm_mfence() -#else - #define __TBB_full_memory_fence() __asm__ __volatile__("mfence": : :"memory") -#endif -#endif - -#define __TBB_control_consistency_helper() __TBB_compiler_fence() - -namespace tbb { namespace internal { -//TODO: is there any way to reuse definition of memory_order enum from ICC instead of copy paste. -//however it seems unlikely that ICC will silently change exact enum values, as they are defined -//in the ISO exactly like this. -//TODO: add test that exact values of the enum are same as in the ISO C++11 -typedef enum memory_order { - memory_order_relaxed, memory_order_consume, memory_order_acquire, - memory_order_release, memory_order_acq_rel, memory_order_seq_cst -} memory_order; - -namespace icc_intrinsics_port { - template - T convert_argument(T value){ - return value; - } - //The overload below is needed to have explicit conversion of pointer to void* in argument list. - //compiler bug? - //TODO: add according broken macro and recheck with ICC 13.0 if the overload is still needed - template - void* convert_argument(T* value){ - return (void*)value; - } -} -//TODO: code below is a bit repetitive, consider simplifying it -template -struct machine_load_store { - static T load_with_acquire ( const volatile T& location ) { - return __atomic_load_explicit(&location, memory_order_acquire); - } - static void store_with_release ( volatile T &location, T value ) { - __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_release); - } -}; - -template -struct machine_load_store_relaxed { - static inline T load ( const T& location ) { - return __atomic_load_explicit(&location, memory_order_relaxed); - } - static inline void store ( T& location, T value ) { - __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_relaxed); - } -}; - -template -struct machine_load_store_seq_cst { - static T load ( const volatile T& location ) { - return __atomic_load_explicit(&location, memory_order_seq_cst); - } - - static void store ( volatile T &location, T value ) { - __atomic_store_explicit(&location, value, memory_order_seq_cst); - } -}; - -}} // namespace tbb::internal - -namespace tbb{ namespace internal { namespace icc_intrinsics_port{ - typedef enum memory_order_map { - relaxed = memory_order_relaxed, - acquire = memory_order_acquire, - release = memory_order_release, - full_fence= memory_order_seq_cst - } memory_order_map; -}}}// namespace tbb::internal - -#define __TBB_MACHINE_DEFINE_ATOMICS(S,T,M) \ -inline T __TBB_machine_cmpswp##S##M( volatile void *ptr, T value, T comparand ) { \ - __atomic_compare_exchange_strong_explicit( \ - (T*)ptr \ - ,&comparand \ - ,value \ - , tbb::internal::icc_intrinsics_port::M \ - , tbb::internal::icc_intrinsics_port::M); \ - return comparand; \ -} \ - \ -inline T __TBB_machine_fetchstore##S##M(volatile void *ptr, T value) { \ - return __atomic_exchange_explicit((T*)ptr, value, tbb::internal::icc_intrinsics_port::M); \ -} \ - \ -inline T __TBB_machine_fetchadd##S##M(volatile void *ptr, T value) { \ - return __atomic_fetch_add_explicit((T*)ptr, value, tbb::internal::icc_intrinsics_port::M); \ -} \ - -__TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t, full_fence) -__TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t, acquire) -__TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t, release) -__TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t, relaxed) - -__TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t, full_fence) -__TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t, acquire) -__TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t, release) -__TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t, relaxed) - -__TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t, full_fence) -__TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t, acquire) -__TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t, release) -__TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t, relaxed) - -__TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t, full_fence) -__TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t, acquire) -__TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t, release) -__TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t, relaxed) - - -#undef __TBB_MACHINE_DEFINE_ATOMICS - -#define __TBB_USE_FENCED_ATOMICS 1 - -namespace tbb { namespace internal { -#if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN -__TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(full_fence) -__TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(full_fence) - -__TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(acquire) -__TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(release) - -__TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(relaxed) -__TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(relaxed) - -template -struct machine_load_store { - static T load_with_acquire ( const volatile T& location ) { - if( tbb::internal::is_aligned(&location,8)) { - return __atomic_load_explicit(&location, memory_order_acquire); - } else { - return __TBB_machine_generic_load8acquire(&location); - } - } - static void store_with_release ( volatile T &location, T value ) { - if( tbb::internal::is_aligned(&location,8)) { - __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_release); - } else { - return __TBB_machine_generic_store8release(&location,value); - } - } -}; - -template -struct machine_load_store_relaxed { - static T load( const volatile T& location ) { - if( tbb::internal::is_aligned(&location,8)) { - return __atomic_load_explicit(&location, memory_order_relaxed); - } else { - return __TBB_machine_generic_load8relaxed(&location); - } - } - static void store( volatile T &location, T value ) { - if( tbb::internal::is_aligned(&location,8)) { - __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_relaxed); - } else { - return __TBB_machine_generic_store8relaxed(&location,value); - } - } -}; - -template -struct machine_load_store_seq_cst { - static T load ( const volatile T& location ) { - if( tbb::internal::is_aligned(&location,8)) { - return __atomic_load_explicit(&location, memory_order_seq_cst); - } else { - return __TBB_machine_generic_load8full_fence(&location); - } - - } - - static void store ( volatile T &location, T value ) { - if( tbb::internal::is_aligned(&location,8)) { - __atomic_store_explicit(&location, value, memory_order_seq_cst); - } else { - return __TBB_machine_generic_store8full_fence(&location,value); - } - - } -}; - -#endif -}} // namespace tbb::internal -template -inline void __TBB_machine_OR( T *operand, T addend ) { - __atomic_fetch_or_explicit(operand, addend, tbb::internal::memory_order_seq_cst); -} - -template -inline void __TBB_machine_AND( T *operand, T addend ) { - __atomic_fetch_and_explicit(operand, addend, tbb::internal::memory_order_seq_cst); -} - diff --git a/inst/include/tbb/machine/linux_common.h b/inst/include/tbb/machine/linux_common.h deleted file mode 100644 index 53eeeafc3..000000000 --- a/inst/include/tbb/machine/linux_common.h +++ /dev/null @@ -1,84 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#include -#define __TBB_Yield() sched_yield() - -#include -/* Futex definitions */ -#include - -#if defined(SYS_futex) - -#define __TBB_USE_FUTEX 1 -#include -#include -// Unfortunately, some versions of Linux do not have a header that defines FUTEX_WAIT and FUTEX_WAKE. - -#ifdef FUTEX_WAIT -#define __TBB_FUTEX_WAIT FUTEX_WAIT -#else -#define __TBB_FUTEX_WAIT 0 -#endif - -#ifdef FUTEX_WAKE -#define __TBB_FUTEX_WAKE FUTEX_WAKE -#else -#define __TBB_FUTEX_WAKE 1 -#endif - -#ifndef __TBB_ASSERT -#error machine specific headers must be included after tbb_stddef.h -#endif - -namespace tbb { - -namespace internal { - -inline int futex_wait( void *futex, int comparand ) { - int r = syscall( SYS_futex,futex,__TBB_FUTEX_WAIT,comparand,NULL,NULL,0 ); -#if TBB_USE_ASSERT - int e = errno; - __TBB_ASSERT( r==0||r==EWOULDBLOCK||(r==-1&&(e==EAGAIN||e==EINTR)), "futex_wait failed." ); -#endif /* TBB_USE_ASSERT */ - return r; -} - -inline int futex_wakeup_one( void *futex ) { - int r = ::syscall( SYS_futex,futex,__TBB_FUTEX_WAKE,1,NULL,NULL,0 ); - __TBB_ASSERT( r==0||r==1, "futex_wakeup_one: more than one thread woken up?" ); - return r; -} - -inline int futex_wakeup_all( void *futex ) { - int r = ::syscall( SYS_futex,futex,__TBB_FUTEX_WAKE,INT_MAX,NULL,NULL,0 ); - __TBB_ASSERT( r>=0, "futex_wakeup_all: error in waking up threads" ); - return r; -} - -} /* namespace internal */ - -} /* namespace tbb */ - -#endif /* SYS_futex */ diff --git a/inst/include/tbb/machine/linux_ia32.h b/inst/include/tbb/machine/linux_ia32.h deleted file mode 100644 index 27def2ff5..000000000 --- a/inst/include/tbb/machine/linux_ia32.h +++ /dev/null @@ -1,232 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_linux_ia32_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_linux_ia32_H - -#include -#include "gcc_ia32_common.h" - -#define __TBB_WORDSIZE 4 -#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE - -#define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory") -#define __TBB_control_consistency_helper() __TBB_compiler_fence() -#define __TBB_acquire_consistency_helper() __TBB_compiler_fence() -#define __TBB_release_consistency_helper() __TBB_compiler_fence() -#define __TBB_full_memory_fence() __asm__ __volatile__("mfence": : :"memory") - -#if __TBB_ICC_ASM_VOLATILE_BROKEN -#define __TBB_VOLATILE -#else -#define __TBB_VOLATILE volatile -#endif - -#define __TBB_MACHINE_DEFINE_ATOMICS(S,T,X,R) \ -static inline T __TBB_machine_cmpswp##S (volatile void *ptr, T value, T comparand ) \ -{ \ - T result; \ - \ - __asm__ __volatile__("lock\ncmpxchg" X " %2,%1" \ - : "=a"(result), "=m"(*(__TBB_VOLATILE T*)ptr) \ - : "q"(value), "0"(comparand), "m"(*(__TBB_VOLATILE T*)ptr) \ - : "memory"); \ - return result; \ -} \ - \ -static inline T __TBB_machine_fetchadd##S(volatile void *ptr, T addend) \ -{ \ - T result; \ - __asm__ __volatile__("lock\nxadd" X " %0,%1" \ - : R (result), "=m"(*(__TBB_VOLATILE T*)ptr) \ - : "0"(addend), "m"(*(__TBB_VOLATILE T*)ptr) \ - : "memory"); \ - return result; \ -} \ - \ -static inline T __TBB_machine_fetchstore##S(volatile void *ptr, T value) \ -{ \ - T result; \ - __asm__ __volatile__("lock\nxchg" X " %0,%1" \ - : R (result), "=m"(*(__TBB_VOLATILE T*)ptr) \ - : "0"(value), "m"(*(__TBB_VOLATILE T*)ptr) \ - : "memory"); \ - return result; \ -} \ - -__TBB_MACHINE_DEFINE_ATOMICS(1,int8_t,"","=q") -__TBB_MACHINE_DEFINE_ATOMICS(2,int16_t,"","=r") -__TBB_MACHINE_DEFINE_ATOMICS(4,int32_t,"l","=r") - -#if __INTEL_COMPILER -#pragma warning( push ) -// reference to EBX in a function requiring stack alignment -#pragma warning( disable: 998 ) -#endif - -#if __TBB_GCC_CAS8_BUILTIN_INLINING_BROKEN -#define __TBB_IA32_CAS8_NOINLINE __attribute__ ((noinline)) -#else -#define __TBB_IA32_CAS8_NOINLINE -#endif - -static inline __TBB_IA32_CAS8_NOINLINE int64_t __TBB_machine_cmpswp8 (volatile void *ptr, int64_t value, int64_t comparand ) { -//TODO: remove the extra part of condition once __TBB_GCC_BUILTIN_ATOMICS_PRESENT is lowered to gcc version 4.1.2 -#if (__TBB_GCC_BUILTIN_ATOMICS_PRESENT || (__TBB_GCC_VERSION >= 40102)) && !__TBB_GCC_64BIT_ATOMIC_BUILTINS_BROKEN - return __sync_val_compare_and_swap( reinterpret_cast(ptr), comparand, value ); -#else /* !__TBB_GCC_BUILTIN_ATOMICS_PRESENT */ - //TODO: look like ICC 13.0 has some issues with this code, investigate it more deeply - int64_t result; - union { - int64_t i64; - int32_t i32[2]; - }; - i64 = value; -#if __PIC__ - /* compiling position-independent code */ - // EBX register preserved for compliance with position-independent code rules on IA32 - int32_t tmp; - __asm__ __volatile__ ( - "movl %%ebx,%2\n\t" - "movl %5,%%ebx\n\t" -#if __GNUC__==3 - "lock\n\t cmpxchg8b %1\n\t" -#else - "lock\n\t cmpxchg8b (%3)\n\t" -#endif - "movl %2,%%ebx" - : "=A"(result) - , "=m"(*(__TBB_VOLATILE int64_t *)ptr) - , "=m"(tmp) -#if __GNUC__==3 - : "m"(*(__TBB_VOLATILE int64_t *)ptr) -#else - : "SD"(ptr) -#endif - , "0"(comparand) - , "m"(i32[0]), "c"(i32[1]) - : "memory" -#if __INTEL_COMPILER - ,"ebx" -#endif - ); -#else /* !__PIC__ */ - __asm__ __volatile__ ( - "lock\n\t cmpxchg8b %1\n\t" - : "=A"(result), "=m"(*(__TBB_VOLATILE int64_t *)ptr) - : "m"(*(__TBB_VOLATILE int64_t *)ptr) - , "0"(comparand) - , "b"(i32[0]), "c"(i32[1]) - : "memory" - ); -#endif /* __PIC__ */ - return result; -#endif /* !__TBB_GCC_BUILTIN_ATOMICS_PRESENT */ -} - -#undef __TBB_IA32_CAS8_NOINLINE - -#if __INTEL_COMPILER -#pragma warning( pop ) -#endif // warning 998 is back - -static inline void __TBB_machine_or( volatile void *ptr, uint32_t addend ) { - __asm__ __volatile__("lock\norl %1,%0" : "=m"(*(__TBB_VOLATILE uint32_t *)ptr) : "r"(addend), "m"(*(__TBB_VOLATILE uint32_t *)ptr) : "memory"); -} - -static inline void __TBB_machine_and( volatile void *ptr, uint32_t addend ) { - __asm__ __volatile__("lock\nandl %1,%0" : "=m"(*(__TBB_VOLATILE uint32_t *)ptr) : "r"(addend), "m"(*(__TBB_VOLATILE uint32_t *)ptr) : "memory"); -} - -//TODO: Check if it possible and profitable for IA-32 architecture on (Linux* and Windows*) -//to use of 64-bit load/store via floating point registers together with full fence -//for sequentially consistent load/store, instead of CAS. - -#if __clang__ -#define __TBB_fildq "fildll" -#define __TBB_fistpq "fistpll" -#else -#define __TBB_fildq "fildq" -#define __TBB_fistpq "fistpq" -#endif - -static inline int64_t __TBB_machine_aligned_load8 (const volatile void *ptr) { - __TBB_ASSERT(tbb::internal::is_aligned(ptr,8),"__TBB_machine_aligned_load8 should be used with 8 byte aligned locations only \n"); - int64_t result; - __asm__ __volatile__ ( __TBB_fildq " %1\n\t" - __TBB_fistpq " %0" : "=m"(result) : "m"(*(const __TBB_VOLATILE uint64_t*)ptr) : "memory" ); - return result; -} - -static inline void __TBB_machine_aligned_store8 (volatile void *ptr, int64_t value ) { - __TBB_ASSERT(tbb::internal::is_aligned(ptr,8),"__TBB_machine_aligned_store8 should be used with 8 byte aligned locations only \n"); - // Aligned store - __asm__ __volatile__ ( __TBB_fildq " %1\n\t" - __TBB_fistpq " %0" : "=m"(*(__TBB_VOLATILE int64_t*)ptr) : "m"(value) : "memory" ); -} - -static inline int64_t __TBB_machine_load8 (const volatile void *ptr) { -#if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN - if( tbb::internal::is_aligned(ptr,8)) { -#endif - return __TBB_machine_aligned_load8(ptr); -#if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN - } else { - // Unaligned load - return __TBB_machine_cmpswp8(const_cast(ptr),0,0); - } -#endif -} - -//! Handles misaligned 8-byte store -/** Defined in tbb_misc.cpp */ -extern "C" void __TBB_machine_store8_slow( volatile void *ptr, int64_t value ); -extern "C" void __TBB_machine_store8_slow_perf_warning( volatile void *ptr ); - -static inline void __TBB_machine_store8(volatile void *ptr, int64_t value) { -#if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN - if( tbb::internal::is_aligned(ptr,8)) { -#endif - __TBB_machine_aligned_store8(ptr,value); -#if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN - } else { - // Unaligned store -#if TBB_USE_PERFORMANCE_WARNINGS - __TBB_machine_store8_slow_perf_warning(ptr); -#endif /* TBB_USE_PERFORMANCE_WARNINGS */ - __TBB_machine_store8_slow(ptr,value); - } -#endif -} - -// Machine specific atomic operations -#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) - -#define __TBB_USE_GENERIC_DWORD_FETCH_ADD 1 -#define __TBB_USE_GENERIC_DWORD_FETCH_STORE 1 -#define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - diff --git a/inst/include/tbb/machine/linux_ia64.h b/inst/include/tbb/machine/linux_ia64.h deleted file mode 100644 index a9f386acc..000000000 --- a/inst/include/tbb/machine/linux_ia64.h +++ /dev/null @@ -1,181 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_linux_ia64_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_linux_ia64_H - -#include -#include - -#define __TBB_WORDSIZE 8 -#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE - -#if __INTEL_COMPILER - #define __TBB_compiler_fence() - #define __TBB_control_consistency_helper() __TBB_compiler_fence() - #define __TBB_acquire_consistency_helper() - #define __TBB_release_consistency_helper() - #define __TBB_full_memory_fence() __mf() -#else - #define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory") - #define __TBB_control_consistency_helper() __TBB_compiler_fence() - // Even though GCC imbues volatile loads with acquire semantics, it sometimes moves - // loads over the acquire fence. The following helpers stop such incorrect code motion. - #define __TBB_acquire_consistency_helper() __TBB_compiler_fence() - #define __TBB_release_consistency_helper() __TBB_compiler_fence() - #define __TBB_full_memory_fence() __asm__ __volatile__("mf": : :"memory") -#endif /* !__INTEL_COMPILER */ - -// Most of the functions will be in a .s file -// TODO: revise dynamic_link, memory pools and etc. if the library dependency is removed. - -extern "C" { - int8_t __TBB_machine_fetchadd1__TBB_full_fence (volatile void *ptr, int8_t addend); - int8_t __TBB_machine_fetchadd1acquire(volatile void *ptr, int8_t addend); - int8_t __TBB_machine_fetchadd1release(volatile void *ptr, int8_t addend); - - int16_t __TBB_machine_fetchadd2__TBB_full_fence (volatile void *ptr, int16_t addend); - int16_t __TBB_machine_fetchadd2acquire(volatile void *ptr, int16_t addend); - int16_t __TBB_machine_fetchadd2release(volatile void *ptr, int16_t addend); - - int32_t __TBB_machine_fetchadd4__TBB_full_fence (volatile void *ptr, int32_t value); - int32_t __TBB_machine_fetchadd4acquire(volatile void *ptr, int32_t addend); - int32_t __TBB_machine_fetchadd4release(volatile void *ptr, int32_t addend); - - int64_t __TBB_machine_fetchadd8__TBB_full_fence (volatile void *ptr, int64_t value); - int64_t __TBB_machine_fetchadd8acquire(volatile void *ptr, int64_t addend); - int64_t __TBB_machine_fetchadd8release(volatile void *ptr, int64_t addend); - - int8_t __TBB_machine_fetchstore1__TBB_full_fence (volatile void *ptr, int8_t value); - int8_t __TBB_machine_fetchstore1acquire(volatile void *ptr, int8_t value); - int8_t __TBB_machine_fetchstore1release(volatile void *ptr, int8_t value); - - int16_t __TBB_machine_fetchstore2__TBB_full_fence (volatile void *ptr, int16_t value); - int16_t __TBB_machine_fetchstore2acquire(volatile void *ptr, int16_t value); - int16_t __TBB_machine_fetchstore2release(volatile void *ptr, int16_t value); - - int32_t __TBB_machine_fetchstore4__TBB_full_fence (volatile void *ptr, int32_t value); - int32_t __TBB_machine_fetchstore4acquire(volatile void *ptr, int32_t value); - int32_t __TBB_machine_fetchstore4release(volatile void *ptr, int32_t value); - - int64_t __TBB_machine_fetchstore8__TBB_full_fence (volatile void *ptr, int64_t value); - int64_t __TBB_machine_fetchstore8acquire(volatile void *ptr, int64_t value); - int64_t __TBB_machine_fetchstore8release(volatile void *ptr, int64_t value); - - int8_t __TBB_machine_cmpswp1__TBB_full_fence (volatile void *ptr, int8_t value, int8_t comparand); - int8_t __TBB_machine_cmpswp1acquire(volatile void *ptr, int8_t value, int8_t comparand); - int8_t __TBB_machine_cmpswp1release(volatile void *ptr, int8_t value, int8_t comparand); - - int16_t __TBB_machine_cmpswp2__TBB_full_fence (volatile void *ptr, int16_t value, int16_t comparand); - int16_t __TBB_machine_cmpswp2acquire(volatile void *ptr, int16_t value, int16_t comparand); - int16_t __TBB_machine_cmpswp2release(volatile void *ptr, int16_t value, int16_t comparand); - - int32_t __TBB_machine_cmpswp4__TBB_full_fence (volatile void *ptr, int32_t value, int32_t comparand); - int32_t __TBB_machine_cmpswp4acquire(volatile void *ptr, int32_t value, int32_t comparand); - int32_t __TBB_machine_cmpswp4release(volatile void *ptr, int32_t value, int32_t comparand); - - int64_t __TBB_machine_cmpswp8__TBB_full_fence (volatile void *ptr, int64_t value, int64_t comparand); - int64_t __TBB_machine_cmpswp8acquire(volatile void *ptr, int64_t value, int64_t comparand); - int64_t __TBB_machine_cmpswp8release(volatile void *ptr, int64_t value, int64_t comparand); - - int64_t __TBB_machine_lg(uint64_t value); - void __TBB_machine_pause(int32_t delay); - bool __TBB_machine_trylockbyte( volatile unsigned char &ptr ); - int64_t __TBB_machine_lockbyte( volatile unsigned char &ptr ); - - //! Retrieves the current RSE backing store pointer. IA64 specific. - void* __TBB_get_bsp(); - - int32_t __TBB_machine_load1_relaxed(const void *ptr); - int32_t __TBB_machine_load2_relaxed(const void *ptr); - int32_t __TBB_machine_load4_relaxed(const void *ptr); - int64_t __TBB_machine_load8_relaxed(const void *ptr); - - void __TBB_machine_store1_relaxed(void *ptr, int32_t value); - void __TBB_machine_store2_relaxed(void *ptr, int32_t value); - void __TBB_machine_store4_relaxed(void *ptr, int32_t value); - void __TBB_machine_store8_relaxed(void *ptr, int64_t value); -} // extern "C" - -// Mapping old entry points to the names corresponding to the new full_fence identifier. -#define __TBB_machine_fetchadd1full_fence __TBB_machine_fetchadd1__TBB_full_fence -#define __TBB_machine_fetchadd2full_fence __TBB_machine_fetchadd2__TBB_full_fence -#define __TBB_machine_fetchadd4full_fence __TBB_machine_fetchadd4__TBB_full_fence -#define __TBB_machine_fetchadd8full_fence __TBB_machine_fetchadd8__TBB_full_fence -#define __TBB_machine_fetchstore1full_fence __TBB_machine_fetchstore1__TBB_full_fence -#define __TBB_machine_fetchstore2full_fence __TBB_machine_fetchstore2__TBB_full_fence -#define __TBB_machine_fetchstore4full_fence __TBB_machine_fetchstore4__TBB_full_fence -#define __TBB_machine_fetchstore8full_fence __TBB_machine_fetchstore8__TBB_full_fence -#define __TBB_machine_cmpswp1full_fence __TBB_machine_cmpswp1__TBB_full_fence -#define __TBB_machine_cmpswp2full_fence __TBB_machine_cmpswp2__TBB_full_fence -#define __TBB_machine_cmpswp4full_fence __TBB_machine_cmpswp4__TBB_full_fence -#define __TBB_machine_cmpswp8full_fence __TBB_machine_cmpswp8__TBB_full_fence - -// Mapping relaxed operations to the entry points implementing them. -/** On IA64 RMW operations implicitly have acquire semantics. Thus one cannot - actually have completely relaxed RMW operation here. **/ -#define __TBB_machine_fetchadd1relaxed __TBB_machine_fetchadd1acquire -#define __TBB_machine_fetchadd2relaxed __TBB_machine_fetchadd2acquire -#define __TBB_machine_fetchadd4relaxed __TBB_machine_fetchadd4acquire -#define __TBB_machine_fetchadd8relaxed __TBB_machine_fetchadd8acquire -#define __TBB_machine_fetchstore1relaxed __TBB_machine_fetchstore1acquire -#define __TBB_machine_fetchstore2relaxed __TBB_machine_fetchstore2acquire -#define __TBB_machine_fetchstore4relaxed __TBB_machine_fetchstore4acquire -#define __TBB_machine_fetchstore8relaxed __TBB_machine_fetchstore8acquire -#define __TBB_machine_cmpswp1relaxed __TBB_machine_cmpswp1acquire -#define __TBB_machine_cmpswp2relaxed __TBB_machine_cmpswp2acquire -#define __TBB_machine_cmpswp4relaxed __TBB_machine_cmpswp4acquire -#define __TBB_machine_cmpswp8relaxed __TBB_machine_cmpswp8acquire - -#define __TBB_MACHINE_DEFINE_ATOMICS(S,V) \ - template \ - struct machine_load_store_relaxed { \ - static inline T load ( const T& location ) { \ - return (T)__TBB_machine_load##S##_relaxed(&location); \ - } \ - static inline void store ( T& location, T value ) { \ - __TBB_machine_store##S##_relaxed(&location, (V)value); \ - } \ - } - -namespace tbb { -namespace internal { - __TBB_MACHINE_DEFINE_ATOMICS(1,int8_t); - __TBB_MACHINE_DEFINE_ATOMICS(2,int16_t); - __TBB_MACHINE_DEFINE_ATOMICS(4,int32_t); - __TBB_MACHINE_DEFINE_ATOMICS(8,int64_t); -}} // namespaces internal, tbb - -#undef __TBB_MACHINE_DEFINE_ATOMICS - -#define __TBB_USE_FENCED_ATOMICS 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - -// Definition of Lock functions -#define __TBB_TryLockByte(P) __TBB_machine_trylockbyte(P) -#define __TBB_LockByte(P) __TBB_machine_lockbyte(P) - -// Definition of other utility functions -#define __TBB_Pause(V) __TBB_machine_pause(V) -#define __TBB_Log2(V) __TBB_machine_lg(V) diff --git a/inst/include/tbb/machine/linux_intel64.h b/inst/include/tbb/machine/linux_intel64.h deleted file mode 100644 index 6fe018b83..000000000 --- a/inst/include/tbb/machine/linux_intel64.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_linux_intel64_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_linux_intel64_H - -#include -#include "gcc_ia32_common.h" - -#define __TBB_WORDSIZE 8 -#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE - -#define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory") -#define __TBB_control_consistency_helper() __TBB_compiler_fence() -#define __TBB_acquire_consistency_helper() __TBB_compiler_fence() -#define __TBB_release_consistency_helper() __TBB_compiler_fence() - -#ifndef __TBB_full_memory_fence -#define __TBB_full_memory_fence() __asm__ __volatile__("mfence": : :"memory") -#endif - -#define __TBB_MACHINE_DEFINE_ATOMICS(S,T,X) \ -static inline T __TBB_machine_cmpswp##S (volatile void *ptr, T value, T comparand ) \ -{ \ - T result; \ - \ - __asm__ __volatile__("lock\ncmpxchg" X " %2,%1" \ - : "=a"(result), "=m"(*(volatile T*)ptr) \ - : "q"(value), "0"(comparand), "m"(*(volatile T*)ptr) \ - : "memory"); \ - return result; \ -} \ - \ -static inline T __TBB_machine_fetchadd##S(volatile void *ptr, T addend) \ -{ \ - T result; \ - __asm__ __volatile__("lock\nxadd" X " %0,%1" \ - : "=r"(result),"=m"(*(volatile T*)ptr) \ - : "0"(addend), "m"(*(volatile T*)ptr) \ - : "memory"); \ - return result; \ -} \ - \ -static inline T __TBB_machine_fetchstore##S(volatile void *ptr, T value) \ -{ \ - T result; \ - __asm__ __volatile__("lock\nxchg" X " %0,%1" \ - : "=r"(result),"=m"(*(volatile T*)ptr) \ - : "0"(value), "m"(*(volatile T*)ptr) \ - : "memory"); \ - return result; \ -} \ - -__TBB_MACHINE_DEFINE_ATOMICS(1,int8_t,"") -__TBB_MACHINE_DEFINE_ATOMICS(2,int16_t,"") -__TBB_MACHINE_DEFINE_ATOMICS(4,int32_t,"") -__TBB_MACHINE_DEFINE_ATOMICS(8,int64_t,"q") - -#undef __TBB_MACHINE_DEFINE_ATOMICS - -static inline void __TBB_machine_or( volatile void *ptr, uint64_t value ) { - __asm__ __volatile__("lock\norq %1,%0" : "=m"(*(volatile uint64_t*)ptr) : "r"(value), "m"(*(volatile uint64_t*)ptr) : "memory"); -} - -static inline void __TBB_machine_and( volatile void *ptr, uint64_t value ) { - __asm__ __volatile__("lock\nandq %1,%0" : "=m"(*(volatile uint64_t*)ptr) : "r"(value), "m"(*(volatile uint64_t*)ptr) : "memory"); -} - -#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) - -#define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - diff --git a/inst/include/tbb/machine/mac_ppc.h b/inst/include/tbb/machine/mac_ppc.h deleted file mode 100644 index 2f12c9817..000000000 --- a/inst/include/tbb/machine/mac_ppc.h +++ /dev/null @@ -1,313 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_gcc_power_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_gcc_power_H - -#include -#include - -// TODO: rename to gcc_power.h? -// This file is for Power Architecture with compilers supporting GNU inline-assembler syntax (currently GNU g++ and IBM XL). -// Note that XL V9.0 (sometimes?) has trouble dealing with empty input and/or clobber lists, so they should be avoided. - -#if __powerpc64__ || __ppc64__ - // IBM XL documents __powerpc64__ (and __PPC64__). - // Apple documents __ppc64__ (with __ppc__ only on 32-bit). - #define __TBB_WORDSIZE 8 -#else - #define __TBB_WORDSIZE 4 -#endif - -// Traditionally Power Architecture is big-endian. -// Little-endian could be just an address manipulation (compatibility with TBB not verified), -// or normal little-endian (on more recent systems). Embedded PowerPC systems may support -// page-specific endianness, but then one endianness must be hidden from TBB so that it still sees only one. -#if __BIG_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__) - #define __TBB_ENDIANNESS __TBB_ENDIAN_BIG -#elif __LITTLE_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__) - #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE -#elif defined(__BYTE_ORDER__) - #define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED -#else - #define __TBB_ENDIANNESS __TBB_ENDIAN_DETECT -#endif - -// On Power Architecture, (lock-free) 64-bit atomics require 64-bit hardware: -#if __TBB_WORDSIZE==8 - // Do not change the following definition, because TBB itself will use 64-bit atomics in 64-bit builds. - #define __TBB_64BIT_ATOMICS 1 -#elif __bgp__ - // Do not change the following definition, because this is known 32-bit hardware. - #define __TBB_64BIT_ATOMICS 0 -#else - // To enable 64-bit atomics in 32-bit builds, set the value below to 1 instead of 0. - // You must make certain that the program will only use them on actual 64-bit hardware - // (which typically means that the entire program is only executed on such hardware), - // because their implementation involves machine instructions that are illegal elsewhere. - // The setting can be chosen independently per compilation unit, - // which also means that TBB itself does not need to be rebuilt. - // Alternatively (but only for the current architecture and TBB version), - // override the default as a predefined macro when invoking the compiler. - #ifndef __TBB_64BIT_ATOMICS - #define __TBB_64BIT_ATOMICS 0 - #endif -#endif - -inline int32_t __TBB_machine_cmpswp4 (volatile void *ptr, int32_t value, int32_t comparand ) -{ - int32_t result; - - __asm__ __volatile__("sync\n" - "0:\n\t" - "lwarx %[res],0,%[ptr]\n\t" /* load w/ reservation */ - "cmpw %[res],%[cmp]\n\t" /* compare against comparand */ - "bne- 1f\n\t" /* exit if not same */ - "stwcx. %[val],0,%[ptr]\n\t" /* store new value */ - "bne- 0b\n" /* retry if reservation lost */ - "1:\n\t" /* the exit */ - "isync" - : [res]"=&r"(result) - , "+m"(* (int32_t*) ptr) /* redundant with "memory" */ - : [ptr]"r"(ptr) - , [val]"r"(value) - , [cmp]"r"(comparand) - : "memory" /* compiler full fence */ - , "cr0" /* clobbered by cmp and/or stwcx. */ - ); - return result; -} - -#if __TBB_WORDSIZE==8 - -inline int64_t __TBB_machine_cmpswp8 (volatile void *ptr, int64_t value, int64_t comparand ) -{ - int64_t result; - __asm__ __volatile__("sync\n" - "0:\n\t" - "ldarx %[res],0,%[ptr]\n\t" /* load w/ reservation */ - "cmpd %[res],%[cmp]\n\t" /* compare against comparand */ - "bne- 1f\n\t" /* exit if not same */ - "stdcx. %[val],0,%[ptr]\n\t" /* store new value */ - "bne- 0b\n" /* retry if reservation lost */ - "1:\n\t" /* the exit */ - "isync" - : [res]"=&r"(result) - , "+m"(* (int64_t*) ptr) /* redundant with "memory" */ - : [ptr]"r"(ptr) - , [val]"r"(value) - , [cmp]"r"(comparand) - : "memory" /* compiler full fence */ - , "cr0" /* clobbered by cmp and/or stdcx. */ - ); - return result; -} - -#elif __TBB_64BIT_ATOMICS /* && __TBB_WORDSIZE==4 */ - -inline int64_t __TBB_machine_cmpswp8 (volatile void *ptr, int64_t value, int64_t comparand ) -{ - int64_t result; - int64_t value_register, comparand_register, result_register; // dummy variables to allocate registers - __asm__ __volatile__("sync\n\t" - "ld %[val],%[valm]\n\t" - "ld %[cmp],%[cmpm]\n" - "0:\n\t" - "ldarx %[res],0,%[ptr]\n\t" /* load w/ reservation */ - "cmpd %[res],%[cmp]\n\t" /* compare against comparand */ - "bne- 1f\n\t" /* exit if not same */ - "stdcx. %[val],0,%[ptr]\n\t" /* store new value */ - "bne- 0b\n" /* retry if reservation lost */ - "1:\n\t" /* the exit */ - "std %[res],%[resm]\n\t" - "isync" - : [resm]"=m"(result) - , [res] "=&r"( result_register) - , [val] "=&r"( value_register) - , [cmp] "=&r"(comparand_register) - , "+m"(* (int64_t*) ptr) /* redundant with "memory" */ - : [ptr] "r"(ptr) - , [valm]"m"(value) - , [cmpm]"m"(comparand) - : "memory" /* compiler full fence */ - , "cr0" /* clobbered by cmpd and/or stdcx. */ - ); - return result; -} - -#endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */ - -#define __TBB_MACHINE_DEFINE_LOAD_STORE(S,ldx,stx,cmpx) \ - template \ - struct machine_load_store { \ - static inline T load_with_acquire(const volatile T& location) { \ - T result; \ - __asm__ __volatile__(ldx " %[res],0(%[ptr])\n" \ - "0:\n\t" \ - cmpx " %[res],%[res]\n\t" \ - "bne- 0b\n\t" \ - "isync" \ - : [res]"=r"(result) \ - : [ptr]"b"(&location) /* cannot use register 0 here */ \ - , "m"(location) /* redundant with "memory" */ \ - : "memory" /* compiler acquire fence */ \ - , "cr0" /* clobbered by cmpw/cmpd */); \ - return result; \ - } \ - static inline void store_with_release(volatile T &location, T value) { \ - __asm__ __volatile__("lwsync\n\t" \ - stx " %[val],0(%[ptr])" \ - : "=m"(location) /* redundant with "memory" */ \ - : [ptr]"b"(&location) /* cannot use register 0 here */ \ - , [val]"r"(value) \ - : "memory"/*compiler release fence*/ /*(cr0 not affected)*/); \ - } \ - }; \ - \ - template \ - struct machine_load_store_relaxed { \ - static inline T load (const __TBB_atomic T& location) { \ - T result; \ - __asm__ __volatile__(ldx " %[res],0(%[ptr])" \ - : [res]"=r"(result) \ - : [ptr]"b"(&location) /* cannot use register 0 here */ \ - , "m"(location) \ - ); /*(no compiler fence)*/ /*(cr0 not affected)*/ \ - return result; \ - } \ - static inline void store (__TBB_atomic T &location, T value) { \ - __asm__ __volatile__(stx " %[val],0(%[ptr])" \ - : "=m"(location) \ - : [ptr]"b"(&location) /* cannot use register 0 here */ \ - , [val]"r"(value) \ - ); /*(no compiler fence)*/ /*(cr0 not affected)*/ \ - } \ - }; - -namespace tbb { -namespace internal { - __TBB_MACHINE_DEFINE_LOAD_STORE(1,"lbz","stb","cmpw") - __TBB_MACHINE_DEFINE_LOAD_STORE(2,"lhz","sth","cmpw") - __TBB_MACHINE_DEFINE_LOAD_STORE(4,"lwz","stw","cmpw") - -#if __TBB_WORDSIZE==8 - - __TBB_MACHINE_DEFINE_LOAD_STORE(8,"ld" ,"std","cmpd") - -#elif __TBB_64BIT_ATOMICS /* && __TBB_WORDSIZE==4 */ - - template - struct machine_load_store { - static inline T load_with_acquire(const volatile T& location) { - T result; - T result_register; // dummy variable to allocate a register - __asm__ __volatile__("ld %[res],0(%[ptr])\n\t" - "std %[res],%[resm]\n" - "0:\n\t" - "cmpd %[res],%[res]\n\t" - "bne- 0b\n\t" - "isync" - : [resm]"=m"(result) - , [res]"=&r"(result_register) - : [ptr]"b"(&location) /* cannot use register 0 here */ - , "m"(location) /* redundant with "memory" */ - : "memory" /* compiler acquire fence */ - , "cr0" /* clobbered by cmpd */); - return result; - } - - static inline void store_with_release(volatile T &location, T value) { - T value_register; // dummy variable to allocate a register - __asm__ __volatile__("lwsync\n\t" - "ld %[val],%[valm]\n\t" - "std %[val],0(%[ptr])" - : "=m"(location) /* redundant with "memory" */ - , [val]"=&r"(value_register) - : [ptr]"b"(&location) /* cannot use register 0 here */ - , [valm]"m"(value) - : "memory"/*compiler release fence*/ /*(cr0 not affected)*/); - } - }; - - struct machine_load_store_relaxed { - static inline T load (const volatile T& location) { - T result; - T result_register; // dummy variable to allocate a register - __asm__ __volatile__("ld %[res],0(%[ptr])\n\t" - "std %[res],%[resm]" - : [resm]"=m"(result) - , [res]"=&r"(result_register) - : [ptr]"b"(&location) /* cannot use register 0 here */ - , "m"(location) - ); /*(no compiler fence)*/ /*(cr0 not affected)*/ - return result; - } - - static inline void store (volatile T &location, T value) { - T value_register; // dummy variable to allocate a register - __asm__ __volatile__("ld %[val],%[valm]\n\t" - "std %[val],0(%[ptr])" - : "=m"(location) - , [val]"=&r"(value_register) - : [ptr]"b"(&location) /* cannot use register 0 here */ - , [valm]"m"(value) - ); /*(no compiler fence)*/ /*(cr0 not affected)*/ - } - }; - #define __TBB_machine_load_store_relaxed_8 - -#endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */ - -}} // namespaces internal, tbb - -#undef __TBB_MACHINE_DEFINE_LOAD_STORE - -#define __TBB_USE_GENERIC_PART_WORD_CAS 1 -#define __TBB_USE_GENERIC_FETCH_ADD 1 -#define __TBB_USE_GENERIC_FETCH_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - -#define __TBB_control_consistency_helper() __asm__ __volatile__("isync": : :"memory") -#define __TBB_full_memory_fence() __asm__ __volatile__( "sync": : :"memory") - -static inline intptr_t __TBB_machine_lg( uintptr_t x ) { - __TBB_ASSERT(x, "__TBB_Log2(0) undefined"); - // cntlzd/cntlzw starts counting at 2^63/2^31 (ignoring any higher-order bits), and does not affect cr0 -#if __TBB_WORDSIZE==8 - __asm__ __volatile__ ("cntlzd %0,%0" : "+r"(x)); - return 63-static_cast(x); -#else - __asm__ __volatile__ ("cntlzw %0,%0" : "+r"(x)); - return 31-static_cast(x); -#endif -} -#define __TBB_Log2(V) __TBB_machine_lg(V) - -// Assumes implicit alignment for any 32-bit value -typedef uint32_t __TBB_Flag; -#define __TBB_Flag __TBB_Flag - -inline bool __TBB_machine_trylockbyte( __TBB_atomic __TBB_Flag &flag ) { - return __TBB_machine_cmpswp4(&flag,1,0)==0; -} -#define __TBB_TryLockByte(P) __TBB_machine_trylockbyte(P) diff --git a/inst/include/tbb/machine/macos_common.h b/inst/include/tbb/machine/macos_common.h deleted file mode 100644 index dffcea5d7..000000000 --- a/inst/include/tbb/machine/macos_common.h +++ /dev/null @@ -1,133 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_macos_common_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_macos_common_H - -#include -#define __TBB_Yield() sched_yield() - -// __TBB_HardwareConcurrency - -#include -#include - -static inline int __TBB_macos_available_cpu() { - int name[2] = {CTL_HW, HW_AVAILCPU}; - int ncpu; - size_t size = sizeof(ncpu); - sysctl( name, 2, &ncpu, &size, NULL, 0 ); - return ncpu; -} - -#define __TBB_HardwareConcurrency() __TBB_macos_available_cpu() - -#ifndef __TBB_full_memory_fence - // TBB has not recognized the architecture (none of the architecture abstraction - // headers was included). - #define __TBB_UnknownArchitecture 1 -#endif - -#if __TBB_UnknownArchitecture -// Implementation of atomic operations based on OS provided primitives -#include - -static inline int64_t __TBB_machine_cmpswp8_OsX(volatile void *ptr, int64_t value, int64_t comparand) -{ - __TBB_ASSERT( tbb::internal::is_aligned(ptr,8), "address not properly aligned for OS X* atomics"); - int64_t* address = (int64_t*)ptr; - while( !OSAtomicCompareAndSwap64Barrier(comparand, value, address) ){ -#if __TBB_WORDSIZE==8 - int64_t snapshot = *address; -#else - int64_t snapshot = OSAtomicAdd64( 0, address ); -#endif - if( snapshot!=comparand ) return snapshot; - } - return comparand; -} - -#define __TBB_machine_cmpswp8 __TBB_machine_cmpswp8_OsX - -#endif /* __TBB_UnknownArchitecture */ - -#if __TBB_UnknownArchitecture - -#ifndef __TBB_WORDSIZE -#define __TBB_WORDSIZE 4 -#endif - -#ifdef __TBB_ENDIANNESS - // Already determined based on hardware architecture. -#elif __BIG_ENDIAN__ - #define __TBB_ENDIANNESS __TBB_ENDIAN_BIG -#elif __LITTLE_ENDIAN__ - #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE -#else - #define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED -#endif - -/** As this generic implementation has absolutely no information about underlying - hardware, its performance most likely will be sub-optimal because of full memory - fence usages where a more lightweight synchronization means (or none at all) - could suffice. Thus if you use this header to enable TBB on a new platform, - consider forking it and relaxing below helpers as appropriate. **/ -#define __TBB_control_consistency_helper() OSMemoryBarrier() -#define __TBB_acquire_consistency_helper() OSMemoryBarrier() -#define __TBB_release_consistency_helper() OSMemoryBarrier() -#define __TBB_full_memory_fence() OSMemoryBarrier() - -static inline int32_t __TBB_machine_cmpswp4(volatile void *ptr, int32_t value, int32_t comparand) -{ - __TBB_ASSERT( tbb::internal::is_aligned(ptr,4), "address not properly aligned for OS X* atomics"); - int32_t* address = (int32_t*)ptr; - while( !OSAtomicCompareAndSwap32Barrier(comparand, value, address) ){ - int32_t snapshot = *address; - if( snapshot!=comparand ) return snapshot; - } - return comparand; -} - -static inline int32_t __TBB_machine_fetchadd4(volatile void *ptr, int32_t addend) -{ - __TBB_ASSERT( tbb::internal::is_aligned(ptr,4), "address not properly aligned for OS X* atomics"); - return OSAtomicAdd32Barrier(addend, (int32_t*)ptr) - addend; -} - -static inline int64_t __TBB_machine_fetchadd8(volatile void *ptr, int64_t addend) -{ - __TBB_ASSERT( tbb::internal::is_aligned(ptr,8), "address not properly aligned for OS X* atomics"); - return OSAtomicAdd64Barrier(addend, (int64_t*)ptr) - addend; -} - -#define __TBB_USE_GENERIC_PART_WORD_CAS 1 -#define __TBB_USE_GENERIC_PART_WORD_FETCH_ADD 1 -#define __TBB_USE_GENERIC_FETCH_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 -#if __TBB_WORDSIZE == 4 - #define __TBB_USE_GENERIC_DWORD_LOAD_STORE 1 -#endif -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - -#endif /* __TBB_UnknownArchitecture */ diff --git a/inst/include/tbb/machine/mic_common.h b/inst/include/tbb/machine/mic_common.h deleted file mode 100644 index 1f522da1f..000000000 --- a/inst/include/tbb/machine/mic_common.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_mic_common_H -#define __TBB_mic_common_H - -#ifndef __TBB_machine_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#if ! __TBB_DEFINE_MIC - #error mic_common.h should be included only when building for Intel(R) Many Integrated Core Architecture -#endif - -#ifndef __TBB_PREFETCHING -#define __TBB_PREFETCHING 1 -#endif -#if __TBB_PREFETCHING -#include -#define __TBB_cl_prefetch(p) _mm_prefetch((const char*)p, _MM_HINT_T1) -#define __TBB_cl_evict(p) _mm_clevict(p, _MM_HINT_T1) -#endif - -/** Intel(R) Many Integrated Core Architecture does not support mfence and pause instructions **/ -#define __TBB_full_memory_fence() __asm__ __volatile__("lock; addl $0,(%%rsp)":::"memory") -#define __TBB_Pause(x) _mm_delay_32(16*(x)) -#define __TBB_STEALING_PAUSE 1500/16 -#include -#define __TBB_Yield() sched_yield() - -// low-level timing intrinsic and its type -#define __TBB_machine_time_stamp() _rdtsc() -typedef uint64_t machine_tsc_t; - -/** Specifics **/ -#define __TBB_STEALING_ABORT_ON_CONTENTION 1 -#define __TBB_YIELD2P 1 -#define __TBB_HOARD_NONLOCAL_TASKS 1 - -#if ! ( __FreeBSD__ || __linux__ ) - #error Intel(R) Many Integrated Core Compiler does not define __FreeBSD__ or __linux__ anymore. Check for the __TBB_XXX_BROKEN defined under __FreeBSD__ or __linux__. -#endif /* ! ( __FreeBSD__ || __linux__ ) */ - -#endif /* __TBB_mic_common_H */ diff --git a/inst/include/tbb/machine/msvc_armv7.h b/inst/include/tbb/machine/msvc_armv7.h deleted file mode 100644 index b96511c75..000000000 --- a/inst/include/tbb/machine/msvc_armv7.h +++ /dev/null @@ -1,171 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_msvc_armv7_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_msvc_armv7_H - -#include -#include - -#define __TBB_WORDSIZE 4 - -#define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED - -#if defined(TBB_WIN32_USE_CL_BUILTINS) -// We can test this on _M_IX86 -#pragma intrinsic(_ReadWriteBarrier) -#pragma intrinsic(_mm_mfence) -#define __TBB_compiler_fence() _ReadWriteBarrier() -#define __TBB_full_memory_fence() _mm_mfence() -#define __TBB_control_consistency_helper() __TBB_compiler_fence() -#define __TBB_acquire_consistency_helper() __TBB_compiler_fence() -#define __TBB_release_consistency_helper() __TBB_compiler_fence() -#else -//Now __dmb(_ARM_BARRIER_SY) is used for both compiler and memory fences -//This might be changed later after testing -#define __TBB_compiler_fence() __dmb(_ARM_BARRIER_SY) -#define __TBB_full_memory_fence() __dmb(_ARM_BARRIER_SY) -#define __TBB_control_consistency_helper() __TBB_compiler_fence() -#define __TBB_acquire_consistency_helper() __TBB_full_memory_fence() -#define __TBB_release_consistency_helper() __TBB_full_memory_fence() -#endif - -//-------------------------------------------------- -// Compare and swap -//-------------------------------------------------- - -/** - * Atomic CAS for 32 bit values, if *ptr==comparand, then *ptr=value, returns *ptr - * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand - * @param value value to assign *ptr to if *ptr==comparand - * @param comparand value to compare with *ptr - * @return value originally in memory at ptr, regardless of success -*/ - -#define __TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(S,T,F) \ -inline T __TBB_machine_cmpswp##S( volatile void *ptr, T value, T comparand ) { \ - return _InterlockedCompareExchange##F(reinterpret_cast(ptr),value,comparand); \ -} \ - -#define __TBB_MACHINE_DEFINE_ATOMICS_FETCHADD(S,T,F) \ -inline T __TBB_machine_fetchadd##S( volatile void *ptr, T value ) { \ - return _InterlockedExchangeAdd##F(reinterpret_cast(ptr),value); \ -} \ - -__TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(1,char,8) -__TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(2,short,16) -__TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(4,long,) -__TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(8,__int64,64) -__TBB_MACHINE_DEFINE_ATOMICS_FETCHADD(4,long,) -#if defined(TBB_WIN32_USE_CL_BUILTINS) -// No _InterlockedExchangeAdd64 intrinsic on _M_IX86 -#define __TBB_64BIT_ATOMICS 0 -#else -__TBB_MACHINE_DEFINE_ATOMICS_FETCHADD(8,__int64,64) -#endif - -inline void __TBB_machine_pause (int32_t delay ) -{ - while(delay>0) - { - __TBB_compiler_fence(); - delay--; - } -} - -// API to retrieve/update FPU control setting -#define __TBB_CPU_CTL_ENV_PRESENT 1 - -namespace tbb { -namespace internal { - -template -struct machine_load_store_relaxed { - static inline T load ( const volatile T& location ) { - const T value = location; - - /* - * An extra memory barrier is required for errata #761319 - * Please see http://infocenter.arm.com/help/topic/com.arm.doc.uan0004a - */ - __TBB_acquire_consistency_helper(); - return value; - } - - static inline void store ( volatile T& location, T value ) { - location = value; - } -}; - -class cpu_ctl_env { -private: - unsigned int my_ctl; -public: - bool operator!=( const cpu_ctl_env& ctl ) const { return my_ctl != ctl.my_ctl; } - void get_env() { my_ctl = _control87(0, 0); } - void set_env() const { _control87( my_ctl, ~0U ); } -}; - -} // namespace internal -} // namespaces tbb - -// Machine specific atomic operations -#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C) -#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C) -#define __TBB_Pause(V) __TBB_machine_pause(V) - -// Use generics for some things -#define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_PART_WORD_FETCH_ADD 1 -#define __TBB_USE_GENERIC_PART_WORD_FETCH_STORE 1 -#define __TBB_USE_GENERIC_FETCH_STORE 1 -#define __TBB_USE_GENERIC_DWORD_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - -#if defined(TBB_WIN32_USE_CL_BUILTINS) -#if !__TBB_WIN8UI_SUPPORT -extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void ); -#define __TBB_Yield() SwitchToThread() -#else -#include -#define __TBB_Yield() std::this_thread::yield() -#endif -#else -#define __TBB_Yield() __yield() -#endif - -// Machine specific atomic operations -#define __TBB_AtomicOR(P,V) __TBB_machine_OR(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_AND(P,V) - -template -inline void __TBB_machine_OR( T1 *operand, T2 addend ) { - _InterlockedOr((long volatile *)operand, (long)addend); -} - -template -inline void __TBB_machine_AND( T1 *operand, T2 addend ) { - _InterlockedAnd((long volatile *)operand, (long)addend); -} - diff --git a/inst/include/tbb/machine/msvc_ia32_common.h b/inst/include/tbb/machine/msvc_ia32_common.h deleted file mode 100644 index 184c3dac3..000000000 --- a/inst/include/tbb/machine/msvc_ia32_common.h +++ /dev/null @@ -1,216 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_machine_msvc_ia32_common_H -#define __TBB_machine_msvc_ia32_common_H - -#include - -//TODO: consider moving this macro to tbb_config.h and used there MSVC asm is used -#if !_M_X64 || __INTEL_COMPILER - #define __TBB_X86_MSVC_INLINE_ASM_AVAILABLE 1 - - #if _M_X64 - #define __TBB_r(reg_name) r##reg_name - #else - #define __TBB_r(reg_name) e##reg_name - #endif -#else - //MSVC in x64 mode does not accept inline assembler - #define __TBB_X86_MSVC_INLINE_ASM_AVAILABLE 0 -#endif - -#define __TBB_NO_X86_MSVC_INLINE_ASM_MSG "The compiler being used is not supported (outdated?)" - -#if (_MSC_VER >= 1300) || (__INTEL_COMPILER) //Use compiler intrinsic when available - #define __TBB_PAUSE_USE_INTRINSIC 1 - #pragma intrinsic(_mm_pause) - namespace tbb { namespace internal { namespace intrinsics { namespace msvc { - static inline void __TBB_machine_pause (uintptr_t delay ) { - for (;delay>0; --delay ) - _mm_pause(); - } - }}}} -#else - #if !__TBB_X86_MSVC_INLINE_ASM_AVAILABLE - #error __TBB_NO_X86_MSVC_INLINE_ASM_MSG - #endif - - namespace tbb { namespace internal { namespace inline_asm { namespace msvc { - static inline void __TBB_machine_pause (uintptr_t delay ) { - _asm - { - mov __TBB_r(ax), delay - __TBB_L1: - pause - add __TBB_r(ax), -1 - jne __TBB_L1 - } - return; - } - }}}} -#endif - -static inline void __TBB_machine_pause (uintptr_t delay ){ - #if __TBB_PAUSE_USE_INTRINSIC - tbb::internal::intrinsics::msvc::__TBB_machine_pause(delay); - #else - tbb::internal::inline_asm::msvc::__TBB_machine_pause(delay); - #endif -} - -//TODO: move this function to windows_api.h or to place where it is used -#if (_MSC_VER<1400) && (!_WIN64) && (__TBB_X86_MSVC_INLINE_ASM_AVAILABLE) - static inline void* __TBB_machine_get_current_teb () { - void* pteb; - __asm mov eax, fs:[0x18] - __asm mov pteb, eax - return pteb; - } -#endif - -#if ( _MSC_VER>=1400 && !defined(__INTEL_COMPILER) ) || (__INTEL_COMPILER>=1200) -// MSVC did not have this intrinsic prior to VC8. -// ICL 11.1 fails to compile a TBB example if __TBB_Log2 uses the intrinsic. - #define __TBB_LOG2_USE_BSR_INTRINSIC 1 - #if _M_X64 - #define __TBB_BSR_INTRINSIC _BitScanReverse64 - #else - #define __TBB_BSR_INTRINSIC _BitScanReverse - #endif - #pragma intrinsic(__TBB_BSR_INTRINSIC) - - namespace tbb { namespace internal { namespace intrinsics { namespace msvc { - inline uintptr_t __TBB_machine_lg( uintptr_t i ){ - unsigned long j; - __TBB_BSR_INTRINSIC( &j, i ); - return j; - } - }}}} -#else - #if !__TBB_X86_MSVC_INLINE_ASM_AVAILABLE - #error __TBB_NO_X86_MSVC_INLINE_ASM_MSG - #endif - - namespace tbb { namespace internal { namespace inline_asm { namespace msvc { - inline uintptr_t __TBB_machine_lg( uintptr_t i ){ - uintptr_t j; - __asm - { - bsr __TBB_r(ax), i - mov j, __TBB_r(ax) - } - return j; - } - }}}} -#endif - -static inline intptr_t __TBB_machine_lg( uintptr_t i ) { -#if __TBB_LOG2_USE_BSR_INTRINSIC - return tbb::internal::intrinsics::msvc::__TBB_machine_lg(i); -#else - return tbb::internal::inline_asm::msvc::__TBB_machine_lg(i); -#endif -} - -// API to retrieve/update FPU control setting -#define __TBB_CPU_CTL_ENV_PRESENT 1 - -namespace tbb { namespace internal { class cpu_ctl_env; } } -#if __TBB_X86_MSVC_INLINE_ASM_AVAILABLE - inline void __TBB_get_cpu_ctl_env ( tbb::internal::cpu_ctl_env* ctl ) { - __asm { - __asm mov __TBB_r(ax), ctl - __asm stmxcsr [__TBB_r(ax)] - __asm fstcw [__TBB_r(ax)+4] - } - } - inline void __TBB_set_cpu_ctl_env ( const tbb::internal::cpu_ctl_env* ctl ) { - __asm { - __asm mov __TBB_r(ax), ctl - __asm ldmxcsr [__TBB_r(ax)] - __asm fldcw [__TBB_r(ax)+4] - } - } -#else - extern "C" { - void __TBB_EXPORTED_FUNC __TBB_get_cpu_ctl_env ( tbb::internal::cpu_ctl_env* ); - void __TBB_EXPORTED_FUNC __TBB_set_cpu_ctl_env ( const tbb::internal::cpu_ctl_env* ); - } -#endif - -namespace tbb { -namespace internal { -class cpu_ctl_env { -private: - int mxcsr; - short x87cw; - static const int MXCSR_CONTROL_MASK = ~0x3f; /* all except last six status bits */ -public: - bool operator!=( const cpu_ctl_env& ctl ) const { return mxcsr != ctl.mxcsr || x87cw != ctl.x87cw; } - void get_env() { - __TBB_get_cpu_ctl_env( this ); - mxcsr &= MXCSR_CONTROL_MASK; - } - void set_env() const { __TBB_set_cpu_ctl_env( this ); } -}; -} // namespace internal -} // namespace tbb - -#if !__TBB_WIN8UI_SUPPORT -extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void ); -#define __TBB_Yield() SwitchToThread() -#else -#include -#define __TBB_Yield() std::this_thread::yield() -#endif - -#define __TBB_Pause(V) __TBB_machine_pause(V) -#define __TBB_Log2(V) __TBB_machine_lg(V) - -#undef __TBB_r - -extern "C" { - __int8 __TBB_EXPORTED_FUNC __TBB_machine_try_lock_elided (volatile void* ptr); - void __TBB_EXPORTED_FUNC __TBB_machine_unlock_elided (volatile void* ptr); - - // 'pause' instruction aborts HLE/RTM transactions -#if __TBB_PAUSE_USE_INTRINSIC - inline static void __TBB_machine_try_lock_elided_cancel() { _mm_pause(); } -#else - inline static void __TBB_machine_try_lock_elided_cancel() { _asm pause; } -#endif - -#if __TBB_TSX_INTRINSICS_PRESENT - #define __TBB_machine_is_in_transaction _xtest - #define __TBB_machine_begin_transaction _xbegin - #define __TBB_machine_end_transaction _xend - // The value (0xFF) below comes from the - // Intel(R) 64 and IA-32 Architectures Optimization Reference Manual 12.4.5 lock not free - #define __TBB_machine_transaction_conflict_abort() _xabort(0xFF) -#else - __int8 __TBB_EXPORTED_FUNC __TBB_machine_is_in_transaction(); - unsigned __int32 __TBB_EXPORTED_FUNC __TBB_machine_begin_transaction(); - void __TBB_EXPORTED_FUNC __TBB_machine_end_transaction(); - void __TBB_EXPORTED_FUNC __TBB_machine_transaction_conflict_abort(); -#endif /* __TBB_TSX_INTRINSICS_PRESENT */ -} - -#endif /* __TBB_machine_msvc_ia32_common_H */ diff --git a/inst/include/tbb/machine/sunos_sparc.h b/inst/include/tbb/machine/sunos_sparc.h deleted file mode 100644 index 1d4fbf744..000000000 --- a/inst/include/tbb/machine/sunos_sparc.h +++ /dev/null @@ -1,203 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_sunos_sparc_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_sunos_sparc_H - -#include -#include - -#define __TBB_WORDSIZE 8 -// Big endian is assumed for SPARC. -// While hardware may support page-specific bi-endianness, only big endian pages may be exposed to TBB -#define __TBB_ENDIANNESS __TBB_ENDIAN_BIG - -/** To those working on SPARC hardware. Consider relaxing acquire and release - consistency helpers to no-op (as this port covers TSO mode only). **/ -#define __TBB_compiler_fence() __asm__ __volatile__ ("": : :"memory") -#define __TBB_control_consistency_helper() __TBB_compiler_fence() -#define __TBB_acquire_consistency_helper() __TBB_compiler_fence() -#define __TBB_release_consistency_helper() __TBB_compiler_fence() -#define __TBB_full_memory_fence() __asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreStore|#StoreLoad": : : "memory") - -//-------------------------------------------------- -// Compare and swap -//-------------------------------------------------- - -/** - * Atomic CAS for 32 bit values, if *ptr==comparand, then *ptr=value, returns *ptr - * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand - * @param value value to assign *ptr to if *ptr==comparand - * @param comparand value to compare with *ptr - ( @return value originally in memory at ptr, regardless of success -*/ -static inline int32_t __TBB_machine_cmpswp4(volatile void *ptr, int32_t value, int32_t comparand ){ - int32_t result; - __asm__ __volatile__( - "cas\t[%5],%4,%1" - : "=m"(*(int32_t *)ptr), "=r"(result) - : "m"(*(int32_t *)ptr), "1"(value), "r"(comparand), "r"(ptr) - : "memory"); - return result; -} - -/** - * Atomic CAS for 64 bit values, if *ptr==comparand, then *ptr=value, returns *ptr - * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand - * @param value value to assign *ptr to if *ptr==comparand - * @param comparand value to compare with *ptr - ( @return value originally in memory at ptr, regardless of success - */ -static inline int64_t __TBB_machine_cmpswp8(volatile void *ptr, int64_t value, int64_t comparand ){ - int64_t result; - __asm__ __volatile__( - "casx\t[%5],%4,%1" - : "=m"(*(int64_t *)ptr), "=r"(result) - : "m"(*(int64_t *)ptr), "1"(value), "r"(comparand), "r"(ptr) - : "memory"); - return result; -} - -//--------------------------------------------------- -// Fetch and add -//--------------------------------------------------- - -/** - * Atomic fetch and add for 32 bit values, in this case implemented by continuously checking success of atomicity - * @param ptr pointer to value to add addend to - * @param addened value to add to *ptr - * @return value at ptr before addened was added - */ -static inline int32_t __TBB_machine_fetchadd4(volatile void *ptr, int32_t addend){ - int32_t result; - __asm__ __volatile__ ( - "0:\t add\t %3, %4, %0\n" // do addition - "\t cas\t [%2], %3, %0\n" // cas to store result in memory - "\t cmp\t %3, %0\n" // check if value from memory is original - "\t bne,a,pn\t %%icc, 0b\n" // if not try again - "\t mov %0, %3\n" // use branch delay slot to move new value in memory to be added - : "=&r"(result), "=m"(*(int32_t *)ptr) - : "r"(ptr), "r"(*(int32_t *)ptr), "r"(addend), "m"(*(int32_t *)ptr) - : "ccr", "memory"); - return result; -} - -/** - * Atomic fetch and add for 64 bit values, in this case implemented by continuously checking success of atomicity - * @param ptr pointer to value to add addend to - * @param addened value to add to *ptr - * @return value at ptr before addened was added - */ -static inline int64_t __TBB_machine_fetchadd8(volatile void *ptr, int64_t addend){ - int64_t result; - __asm__ __volatile__ ( - "0:\t add\t %3, %4, %0\n" // do addition - "\t casx\t [%2], %3, %0\n" // cas to store result in memory - "\t cmp\t %3, %0\n" // check if value from memory is original - "\t bne,a,pn\t %%xcc, 0b\n" // if not try again - "\t mov %0, %3\n" // use branch delay slot to move new value in memory to be added - : "=&r"(result), "=m"(*(int64_t *)ptr) - : "r"(ptr), "r"(*(int64_t *)ptr), "r"(addend), "m"(*(int64_t *)ptr) - : "ccr", "memory"); - return result; -} - -//-------------------------------------------------------- -// Logarithm (base two, integer) -//-------------------------------------------------------- - -static inline int64_t __TBB_machine_lg( uint64_t x ) { - __TBB_ASSERT(x, "__TBB_Log2(0) undefined"); - uint64_t count; - // one hot encode - x |= (x >> 1); - x |= (x >> 2); - x |= (x >> 4); - x |= (x >> 8); - x |= (x >> 16); - x |= (x >> 32); - // count 1's - __asm__ ("popc %1, %0" : "=r"(count) : "r"(x) ); - return count-1; -} - -//-------------------------------------------------------- - -static inline void __TBB_machine_or( volatile void *ptr, uint64_t value ) { - __asm__ __volatile__ ( - "0:\t or\t %2, %3, %%g1\n" // do operation - "\t casx\t [%1], %2, %%g1\n" // cas to store result in memory - "\t cmp\t %2, %%g1\n" // check if value from memory is original - "\t bne,a,pn\t %%xcc, 0b\n" // if not try again - "\t mov %%g1, %2\n" // use branch delay slot to move new value in memory to be added - : "=m"(*(int64_t *)ptr) - : "r"(ptr), "r"(*(int64_t *)ptr), "r"(value), "m"(*(int64_t *)ptr) - : "ccr", "g1", "memory"); -} - -static inline void __TBB_machine_and( volatile void *ptr, uint64_t value ) { - __asm__ __volatile__ ( - "0:\t and\t %2, %3, %%g1\n" // do operation - "\t casx\t [%1], %2, %%g1\n" // cas to store result in memory - "\t cmp\t %2, %%g1\n" // check if value from memory is original - "\t bne,a,pn\t %%xcc, 0b\n" // if not try again - "\t mov %%g1, %2\n" // use branch delay slot to move new value in memory to be added - : "=m"(*(int64_t *)ptr) - : "r"(ptr), "r"(*(int64_t *)ptr), "r"(value), "m"(*(int64_t *)ptr) - : "ccr", "g1", "memory"); -} - - -static inline void __TBB_machine_pause( int32_t delay ) { - // do nothing, inlined, doesn't matter -} - -// put 0xff in memory location, return memory value, -// generic trylockbyte puts 0x01, however this is fine -// because all that matters is that 0 is unlocked -static inline bool __TBB_machine_trylockbyte(unsigned char &flag){ - unsigned char result; - __asm__ __volatile__ ( - "ldstub\t [%2], %0\n" - : "=r"(result), "=m"(flag) - : "r"(&flag), "m"(flag) - : "memory"); - return result == 0; -} - -#define __TBB_USE_GENERIC_PART_WORD_CAS 1 -#define __TBB_USE_GENERIC_PART_WORD_FETCH_ADD 1 -#define __TBB_USE_GENERIC_FETCH_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - -#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) - -// Definition of other functions -#define __TBB_Pause(V) __TBB_machine_pause(V) -#define __TBB_Log2(V) __TBB_machine_lg(V) - -#define __TBB_TryLockByte(P) __TBB_machine_trylockbyte(P) diff --git a/inst/include/tbb/machine/windows_api.h b/inst/include/tbb/machine/windows_api.h deleted file mode 100644 index c0ccc24c5..000000000 --- a/inst/include/tbb/machine/windows_api.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_machine_windows_api_H -#define __TBB_machine_windows_api_H - -#if _WIN32 || _WIN64 - -#if _XBOX - -#define NONET -#define NOD3D -#include - -#else // Assume "usual" Windows - -#include - -#endif // _XBOX - -#if _WIN32_WINNT < 0x0600 -// The following Windows API function is declared explicitly; -// otherwise it fails to compile by VS2005. -#if !defined(WINBASEAPI) || (_WIN32_WINNT < 0x0501 && _MSC_VER == 1400) -#define __TBB_WINBASEAPI extern "C" -#else -#define __TBB_WINBASEAPI WINBASEAPI -#endif -__TBB_WINBASEAPI BOOL WINAPI TryEnterCriticalSection( LPCRITICAL_SECTION ); -__TBB_WINBASEAPI BOOL WINAPI InitializeCriticalSectionAndSpinCount( LPCRITICAL_SECTION, DWORD ); -// Overloading WINBASEAPI macro and using local functions missing in Windows XP/2003 -#define InitializeCriticalSectionEx inlineInitializeCriticalSectionEx -#define CreateSemaphoreEx inlineCreateSemaphoreEx -#define CreateEventEx inlineCreateEventEx -inline BOOL WINAPI inlineInitializeCriticalSectionEx( LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount, DWORD ) -{ - return InitializeCriticalSectionAndSpinCount( lpCriticalSection, dwSpinCount ); -} -inline HANDLE WINAPI inlineCreateSemaphoreEx( LPSECURITY_ATTRIBUTES lpSemaphoreAttributes, LONG lInitialCount, LONG lMaximumCount, LPCTSTR lpName, DWORD, DWORD ) -{ - return CreateSemaphore( lpSemaphoreAttributes, lInitialCount, lMaximumCount, lpName ); -} -inline HANDLE WINAPI inlineCreateEventEx( LPSECURITY_ATTRIBUTES lpEventAttributes, LPCTSTR lpName, DWORD dwFlags, DWORD ) -{ - BOOL manual_reset = dwFlags&0x00000001 ? TRUE : FALSE; // CREATE_EVENT_MANUAL_RESET - BOOL initial_set = dwFlags&0x00000002 ? TRUE : FALSE; // CREATE_EVENT_INITIAL_SET - return CreateEvent( lpEventAttributes, manual_reset, initial_set, lpName ); -} -#endif - -#if defined(RTL_SRWLOCK_INIT) -#ifndef __TBB_USE_SRWLOCK -// TODO: turn it on when bug 1952 will be fixed -#define __TBB_USE_SRWLOCK 0 -#endif -#endif - -#else -#error tbb/machine/windows_api.h should only be used for Windows based platforms -#endif // _WIN32 || _WIN64 - -#endif // __TBB_machine_windows_api_H diff --git a/inst/include/tbb/machine/windows_ia32.h b/inst/include/tbb/machine/windows_ia32.h deleted file mode 100644 index 3a38634c9..000000000 --- a/inst/include/tbb/machine/windows_ia32.h +++ /dev/null @@ -1,144 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_windows_ia32_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_windows_ia32_H - -#include "msvc_ia32_common.h" - -#define __TBB_WORDSIZE 4 -#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE - -#if __INTEL_COMPILER && (__INTEL_COMPILER < 1100) - #define __TBB_compiler_fence() __asm { __asm nop } - #define __TBB_full_memory_fence() __asm { __asm mfence } -#elif _MSC_VER >= 1300 || __INTEL_COMPILER - #pragma intrinsic(_ReadWriteBarrier) - #pragma intrinsic(_mm_mfence) - #define __TBB_compiler_fence() _ReadWriteBarrier() - #define __TBB_full_memory_fence() _mm_mfence() -#else - #error Unsupported compiler - need to define __TBB_{control,acquire,release}_consistency_helper to support it -#endif - -#define __TBB_control_consistency_helper() __TBB_compiler_fence() -#define __TBB_acquire_consistency_helper() __TBB_compiler_fence() -#define __TBB_release_consistency_helper() __TBB_compiler_fence() - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (push) - #pragma warning (disable: 4244 4267) -#endif - -extern "C" { - __int64 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp8 (volatile void *ptr, __int64 value, __int64 comparand ); - __int64 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd8 (volatile void *ptr, __int64 addend ); - __int64 __TBB_EXPORTED_FUNC __TBB_machine_fetchstore8 (volatile void *ptr, __int64 value ); - void __TBB_EXPORTED_FUNC __TBB_machine_store8 (volatile void *ptr, __int64 value ); - __int64 __TBB_EXPORTED_FUNC __TBB_machine_load8 (const volatile void *ptr); -} - -//TODO: use _InterlockedXXX intrinsics as they available since VC 2005 -#define __TBB_MACHINE_DEFINE_ATOMICS(S,T,U,A,C) \ -static inline T __TBB_machine_cmpswp##S ( volatile void * ptr, U value, U comparand ) { \ - T result; \ - volatile T *p = (T *)ptr; \ - __asm \ - { \ - __asm mov edx, p \ - __asm mov C , value \ - __asm mov A , comparand \ - __asm lock cmpxchg [edx], C \ - __asm mov result, A \ - } \ - return result; \ -} \ -\ -static inline T __TBB_machine_fetchadd##S ( volatile void * ptr, U addend ) { \ - T result; \ - volatile T *p = (T *)ptr; \ - __asm \ - { \ - __asm mov edx, p \ - __asm mov A, addend \ - __asm lock xadd [edx], A \ - __asm mov result, A \ - } \ - return result; \ -}\ -\ -static inline T __TBB_machine_fetchstore##S ( volatile void * ptr, U value ) { \ - T result; \ - volatile T *p = (T *)ptr; \ - __asm \ - { \ - __asm mov edx, p \ - __asm mov A, value \ - __asm lock xchg [edx], A \ - __asm mov result, A \ - } \ - return result; \ -} - - -__TBB_MACHINE_DEFINE_ATOMICS(1, __int8, __int8, al, cl) -__TBB_MACHINE_DEFINE_ATOMICS(2, __int16, __int16, ax, cx) -__TBB_MACHINE_DEFINE_ATOMICS(4, ptrdiff_t, ptrdiff_t, eax, ecx) - -#undef __TBB_MACHINE_DEFINE_ATOMICS - -static inline void __TBB_machine_OR( volatile void *operand, __int32 addend ) { - __asm - { - mov eax, addend - mov edx, [operand] - lock or [edx], eax - } -} - -static inline void __TBB_machine_AND( volatile void *operand, __int32 addend ) { - __asm - { - mov eax, addend - mov edx, [operand] - lock and [edx], eax - } -} - -#define __TBB_AtomicOR(P,V) __TBB_machine_OR(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_AND(P,V) - -//TODO: Check if it possible and profitable for IA-32 architecture on (Linux and Windows) -//to use of 64-bit load/store via floating point registers together with full fence -//for sequentially consistent load/store, instead of CAS. -#define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warnings 4244, 4267 are back - diff --git a/inst/include/tbb/machine/windows_intel64.h b/inst/include/tbb/machine/windows_intel64.h deleted file mode 100644 index 03795efd9..000000000 --- a/inst/include/tbb/machine/windows_intel64.h +++ /dev/null @@ -1,105 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_windows_intel64_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_windows_intel64_H - -#define __TBB_WORDSIZE 8 -#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE - -#include -#include "msvc_ia32_common.h" - -//TODO: Use _InterlockedXXX16 intrinsics for 2 byte operations -#if !__INTEL_COMPILER - #pragma intrinsic(_InterlockedOr64) - #pragma intrinsic(_InterlockedAnd64) - #pragma intrinsic(_InterlockedCompareExchange) - #pragma intrinsic(_InterlockedCompareExchange64) - #pragma intrinsic(_InterlockedExchangeAdd) - #pragma intrinsic(_InterlockedExchangeAdd64) - #pragma intrinsic(_InterlockedExchange) - #pragma intrinsic(_InterlockedExchange64) -#endif /* !(__INTEL_COMPILER) */ - -#if __INTEL_COMPILER && (__INTEL_COMPILER < 1100) - #define __TBB_compiler_fence() __asm { __asm nop } - #define __TBB_full_memory_fence() __asm { __asm mfence } -#elif _MSC_VER >= 1300 || __INTEL_COMPILER - #pragma intrinsic(_ReadWriteBarrier) - #pragma intrinsic(_mm_mfence) - #define __TBB_compiler_fence() _ReadWriteBarrier() - #define __TBB_full_memory_fence() _mm_mfence() -#endif - -#define __TBB_control_consistency_helper() __TBB_compiler_fence() -#define __TBB_acquire_consistency_helper() __TBB_compiler_fence() -#define __TBB_release_consistency_helper() __TBB_compiler_fence() - -// ATTENTION: if you ever change argument types in machine-specific primitives, -// please take care of atomic_word<> specializations in tbb/atomic.h -extern "C" { - __int8 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp1 (volatile void *ptr, __int8 value, __int8 comparand ); - __int8 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd1 (volatile void *ptr, __int8 addend ); - __int8 __TBB_EXPORTED_FUNC __TBB_machine_fetchstore1 (volatile void *ptr, __int8 value ); - __int16 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp2 (volatile void *ptr, __int16 value, __int16 comparand ); - __int16 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd2 (volatile void *ptr, __int16 addend ); - __int16 __TBB_EXPORTED_FUNC __TBB_machine_fetchstore2 (volatile void *ptr, __int16 value ); -} - -inline long __TBB_machine_cmpswp4 (volatile void *ptr, __int32 value, __int32 comparand ) { - return _InterlockedCompareExchange( (long*)ptr, value, comparand ); -} -inline long __TBB_machine_fetchadd4 (volatile void *ptr, __int32 addend ) { - return _InterlockedExchangeAdd( (long*)ptr, addend ); -} -inline long __TBB_machine_fetchstore4 (volatile void *ptr, __int32 value ) { - return _InterlockedExchange( (long*)ptr, value ); -} - -inline __int64 __TBB_machine_cmpswp8 (volatile void *ptr, __int64 value, __int64 comparand ) { - return _InterlockedCompareExchange64( (__int64*)ptr, value, comparand ); -} -inline __int64 __TBB_machine_fetchadd8 (volatile void *ptr, __int64 addend ) { - return _InterlockedExchangeAdd64( (__int64*)ptr, addend ); -} -inline __int64 __TBB_machine_fetchstore8 (volatile void *ptr, __int64 value ) { - return _InterlockedExchange64( (__int64*)ptr, value ); -} - -#define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - -inline void __TBB_machine_OR( volatile void *operand, intptr_t addend ) { - _InterlockedOr64((__int64*)operand, addend); -} - -inline void __TBB_machine_AND( volatile void *operand, intptr_t addend ) { - _InterlockedAnd64((__int64*)operand, addend); -} - -#define __TBB_AtomicOR(P,V) __TBB_machine_OR(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_AND(P,V) - diff --git a/inst/include/tbb/machine/xbox360_ppc.h b/inst/include/tbb/machine/xbox360_ppc.h deleted file mode 100644 index 148e5b1d0..000000000 --- a/inst/include/tbb/machine/xbox360_ppc.h +++ /dev/null @@ -1,119 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// TODO: revise by comparing with mac_ppc.h - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_xbox360_ppc_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_xbox360_ppc_H - -#define NONET -#define NOD3D -#include "xtl.h" -#include "ppcintrinsics.h" - -#if _MSC_VER >= 1300 -extern "C" void _MemoryBarrier(); -#pragma intrinsic(_MemoryBarrier) -#define __TBB_control_consistency_helper() __isync() -#define __TBB_acquire_consistency_helper() _MemoryBarrier() -#define __TBB_release_consistency_helper() _MemoryBarrier() -#endif - -#define __TBB_full_memory_fence() __sync() - -#define __TBB_WORDSIZE 4 -#define __TBB_ENDIANNESS __TBB_ENDIAN_BIG - -//todo: define __TBB_USE_FENCED_ATOMICS and define acquire/release primitives to maximize performance - -inline __int32 __TBB_machine_cmpswp4(volatile void *ptr, __int32 value, __int32 comparand ) { - __sync(); - __int32 result = InterlockedCompareExchange((volatile LONG*)ptr, value, comparand); - __isync(); - return result; -} - -inline __int64 __TBB_machine_cmpswp8(volatile void *ptr, __int64 value, __int64 comparand ) -{ - __sync(); - __int64 result = InterlockedCompareExchange64((volatile LONG64*)ptr, value, comparand); - __isync(); - return result; -} - -#define __TBB_USE_GENERIC_PART_WORD_CAS 1 -#define __TBB_USE_GENERIC_FETCH_ADD 1 -#define __TBB_USE_GENERIC_FETCH_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_DWORD_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - -#pragma optimize( "", off ) -inline void __TBB_machine_pause (__int32 delay ) -{ - for (__int32 i=0; i> 0) & 1) + - ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 1) & 1) + - ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 2) & 1) + - ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 3) & 1) + - ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 4) & 1) + - ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 5) & 1) + 1; // +1 accomodates for the master thread -} - -static inline int __TBB_XBOX360_GetHardwareThreadIndex(int workerThreadIndex) -{ - workerThreadIndex %= __TBB_XBOX360_DetectNumberOfWorkers()-1; - int m = __TBB_XBOX360_HARDWARE_THREAD_MASK; - int index = 0; - int skipcount = workerThreadIndex; - while (true) - { - if ((m & 1)!=0) - { - if (skipcount==0) break; - skipcount--; - } - m >>= 1; - index++; - } - return index; -} - -#define __TBB_HardwareConcurrency() __TBB_XBOX360_DetectNumberOfWorkers() diff --git a/inst/include/tbb/mutex.h b/inst/include/tbb/mutex.h deleted file mode 100644 index 32340f8b1..000000000 --- a/inst/include/tbb/mutex.h +++ /dev/null @@ -1,234 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_mutex_H -#define __TBB_mutex_H - -#if _WIN32||_WIN64 -#include "machine/windows_api.h" -#else -#include -#endif /* _WIN32||_WIN64 */ - -#include -#include "aligned_space.h" -#include "tbb_stddef.h" -#include "tbb_profiling.h" - -namespace tbb { - -//! Wrapper around the platform's native reader-writer lock. -/** For testing purposes only. - @ingroup synchronization */ -class mutex : internal::mutex_copy_deprecated_and_disabled { -public: - //! Construct unacquired mutex. - mutex() { -#if TBB_USE_ASSERT || TBB_USE_THREADING_TOOLS - internal_construct(); -#else - #if _WIN32||_WIN64 - InitializeCriticalSectionEx(&impl, 4000, 0); - #else - int error_code = pthread_mutex_init(&impl,NULL); - if( error_code ) - tbb::internal::handle_perror(error_code,"mutex: pthread_mutex_init failed"); - #endif /* _WIN32||_WIN64*/ -#endif /* TBB_USE_ASSERT */ - }; - - ~mutex() { -#if TBB_USE_ASSERT - internal_destroy(); -#else - #if _WIN32||_WIN64 - DeleteCriticalSection(&impl); - #else - pthread_mutex_destroy(&impl); - - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - }; - - class scoped_lock; - friend class scoped_lock; - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock : internal::no_copy { - public: - //! Construct lock that has not acquired a mutex. - scoped_lock() : my_mutex(NULL) {}; - - //! Acquire lock on given mutex. - scoped_lock( mutex& mutex ) { - acquire( mutex ); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( my_mutex ) - release(); - } - - //! Acquire lock on given mutex. - void acquire( mutex& mutex ) { -#if TBB_USE_ASSERT - internal_acquire(mutex); -#else - mutex.lock(); - my_mutex = &mutex; -#endif /* TBB_USE_ASSERT */ - } - - //! Try acquire lock on given mutex. - bool try_acquire( mutex& mutex ) { -#if TBB_USE_ASSERT - return internal_try_acquire (mutex); -#else - bool result = mutex.try_lock(); - if( result ) - my_mutex = &mutex; - return result; -#endif /* TBB_USE_ASSERT */ - } - - //! Release lock - void release() { -#if TBB_USE_ASSERT - internal_release (); -#else - my_mutex->unlock(); - my_mutex = NULL; -#endif /* TBB_USE_ASSERT */ - } - - private: - //! The pointer to the current mutex to work - mutex* my_mutex; - - //! All checks from acquire using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_acquire( mutex& m ); - - //! All checks from try_acquire using mutex.state were moved here - bool __TBB_EXPORTED_METHOD internal_try_acquire( mutex& m ); - - //! All checks from release using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_release(); - - friend class mutex; - }; - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = false; - - // ISO C++0x compatibility methods - - //! Acquire lock - void lock() { -#if TBB_USE_ASSERT - aligned_space tmp; - new(tmp.begin()) scoped_lock(*this); -#else - #if _WIN32||_WIN64 - EnterCriticalSection(&impl); - #else - int error_code = pthread_mutex_lock(&impl); - if( error_code ) - tbb::internal::handle_perror(error_code,"mutex: pthread_mutex_lock failed"); - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Try acquiring lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_lock() { -#if TBB_USE_ASSERT - aligned_space tmp; - scoped_lock& s = *tmp.begin(); - s.my_mutex = NULL; - return s.internal_try_acquire(*this); -#else - #if _WIN32||_WIN64 - return TryEnterCriticalSection(&impl)!=0; - #else - return pthread_mutex_trylock(&impl)==0; - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Release lock - void unlock() { -#if TBB_USE_ASSERT - aligned_space tmp; - scoped_lock& s = *tmp.begin(); - s.my_mutex = this; - s.internal_release(); -#else - #if _WIN32||_WIN64 - LeaveCriticalSection(&impl); - #else - pthread_mutex_unlock(&impl); - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Return native_handle - #if _WIN32||_WIN64 - typedef LPCRITICAL_SECTION native_handle_type; - #else - typedef pthread_mutex_t* native_handle_type; - #endif - native_handle_type native_handle() { return (native_handle_type) &impl; } - - enum state_t { - INITIALIZED=0x1234, - DESTROYED=0x789A, - HELD=0x56CD - }; -private: -#if _WIN32||_WIN64 - CRITICAL_SECTION impl; - enum state_t state; -#else - pthread_mutex_t impl; -#endif /* _WIN32||_WIN64 */ - - //! All checks from mutex constructor using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_construct(); - - //! All checks from mutex destructor using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_destroy(); - -#if _WIN32||_WIN64 -public: - //! Set the internal state - void set_state( state_t to ) { state = to; } -#endif -}; - -__TBB_DEFINE_PROFILING_SET_NAME(mutex) - -} // namespace tbb - -#endif /* __TBB_mutex_H */ diff --git a/inst/include/tbb/null_mutex.h b/inst/include/tbb/null_mutex.h deleted file mode 100644 index 240e9bdcc..000000000 --- a/inst/include/tbb/null_mutex.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_null_mutex_H -#define __TBB_null_mutex_H - -#include "tbb_stddef.h" - -namespace tbb { - -//! A mutex which does nothing -/** A null_mutex does no operation and simulates success. - @ingroup synchronization */ -class null_mutex : internal::mutex_copy_deprecated_and_disabled { -public: - //! Represents acquisition of a mutex. - class scoped_lock : internal::no_copy { - public: - scoped_lock() {} - scoped_lock( null_mutex& ) {} - ~scoped_lock() {} - void acquire( null_mutex& ) {} - bool try_acquire( null_mutex& ) { return true; } - void release() {} - }; - - null_mutex() {} - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = true; - static const bool is_fair_mutex = true; -}; - -} - -#endif /* __TBB_null_mutex_H */ diff --git a/inst/include/tbb/null_rw_mutex.h b/inst/include/tbb/null_rw_mutex.h deleted file mode 100644 index 813f79f39..000000000 --- a/inst/include/tbb/null_rw_mutex.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_null_rw_mutex_H -#define __TBB_null_rw_mutex_H - -#include "tbb_stddef.h" - -namespace tbb { - -//! A rw mutex which does nothing -/** A null_rw_mutex is a rw mutex that does nothing and simulates successful operation. - @ingroup synchronization */ -class null_rw_mutex : internal::mutex_copy_deprecated_and_disabled { -public: - //! Represents acquisition of a mutex. - class scoped_lock : internal::no_copy { - public: - scoped_lock() {} - scoped_lock( null_rw_mutex& , bool = true ) {} - ~scoped_lock() {} - void acquire( null_rw_mutex& , bool = true ) {} - bool upgrade_to_writer() { return true; } - bool downgrade_to_reader() { return true; } - bool try_acquire( null_rw_mutex& , bool = true ) { return true; } - void release() {} - }; - - null_rw_mutex() {} - - // Mutex traits - static const bool is_rw_mutex = true; - static const bool is_recursive_mutex = true; - static const bool is_fair_mutex = true; -}; - -} - -#endif /* __TBB_null_rw_mutex_H */ diff --git a/inst/include/tbb/parallel_do.h b/inst/include/tbb/parallel_do.h deleted file mode 100644 index 8173a9715..000000000 --- a/inst/include/tbb/parallel_do.h +++ /dev/null @@ -1,522 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_parallel_do_H -#define __TBB_parallel_do_H - -#include "internal/_range_iterator.h" -#include "task.h" -#include "aligned_space.h" -#include - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - template class parallel_do_feeder_impl; - template class do_group_task; - - //! Strips its template type argument from 'cv' and '&' qualifiers - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; - // Most of the compilers remove cv-qualifiers from non-reference function argument types. - // But unfortunately there are those that don't. - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; -} // namespace internal -//! @endcond - -//! Class the user supplied algorithm body uses to add new tasks -/** \param Item Work item type **/ -template -class parallel_do_feeder: internal::no_copy -{ - parallel_do_feeder() {} - virtual ~parallel_do_feeder () {} - virtual void internal_add( const Item& item ) = 0; - template friend class internal::parallel_do_feeder_impl; -public: - //! Add a work item to a running parallel_do. - void add( const Item& item ) {internal_add(item);} -}; - -//! @cond INTERNAL -namespace internal { - //! For internal use only. - /** Selects one of the two possible forms of function call member operator. - @ingroup algorithms **/ - template - class parallel_do_operator_selector - { - typedef parallel_do_feeder Feeder; - template - static void internal_call( const Body& obj, A1& arg1, A2&, void (Body::*)(CvItem) const ) { - obj(arg1); - } - template - static void internal_call( const Body& obj, A1& arg1, A2& arg2, void (Body::*)(CvItem, parallel_do_feeder&) const ) { - obj(arg1, arg2); - } - - public: - template - static void call( const Body& obj, A1& arg1, A2& arg2 ) - { - internal_call( obj, arg1, arg2, &Body::operator() ); - } - }; - - //! For internal use only. - /** Executes one iteration of a do. - @ingroup algorithms */ - template - class do_iteration_task: public task - { - typedef parallel_do_feeder_impl feeder_type; - - Item my_value; - feeder_type& my_feeder; - - do_iteration_task( const Item& value, feeder_type& feeder ) : - my_value(value), my_feeder(feeder) - {} - - /*override*/ - task* execute() - { - parallel_do_operator_selector::call(*my_feeder.my_body, my_value, my_feeder); - return NULL; - } - - template friend class parallel_do_feeder_impl; - }; // class do_iteration_task - - template - class do_iteration_task_iter: public task - { - typedef parallel_do_feeder_impl feeder_type; - - Iterator my_iter; - feeder_type& my_feeder; - - do_iteration_task_iter( const Iterator& iter, feeder_type& feeder ) : - my_iter(iter), my_feeder(feeder) - {} - - /*override*/ - task* execute() - { - parallel_do_operator_selector::call(*my_feeder.my_body, *my_iter, my_feeder); - return NULL; - } - - template friend class do_group_task_forward; - template friend class do_group_task_input; - template friend class do_task_iter; - }; // class do_iteration_task_iter - - //! For internal use only. - /** Implements new task adding procedure. - @ingroup algorithms **/ - template - class parallel_do_feeder_impl : public parallel_do_feeder - { - /*override*/ - void internal_add( const Item& item ) - { - typedef do_iteration_task iteration_type; - - iteration_type& t = *new (task::allocate_additional_child_of(*my_barrier)) iteration_type(item, *this); - - t.spawn( t ); - } - public: - const Body* my_body; - empty_task* my_barrier; - - parallel_do_feeder_impl() - { - my_barrier = new( task::allocate_root() ) empty_task(); - __TBB_ASSERT(my_barrier, "root task allocation failed"); - } - -#if __TBB_TASK_GROUP_CONTEXT - parallel_do_feeder_impl(tbb::task_group_context &context) - { - my_barrier = new( task::allocate_root(context) ) empty_task(); - __TBB_ASSERT(my_barrier, "root task allocation failed"); - } -#endif - - ~parallel_do_feeder_impl() - { - my_barrier->destroy(*my_barrier); - } - }; // class parallel_do_feeder_impl - - - //! For internal use only - /** Unpacks a block of iterations. - @ingroup algorithms */ - - template - class do_group_task_forward: public task - { - static const size_t max_arg_size = 4; - - typedef parallel_do_feeder_impl feeder_type; - - feeder_type& my_feeder; - Iterator my_first; - size_t my_size; - - do_group_task_forward( Iterator first, size_t size, feeder_type& feeder ) - : my_feeder(feeder), my_first(first), my_size(size) - {} - - /*override*/ task* execute() - { - typedef do_iteration_task_iter iteration_type; - __TBB_ASSERT( my_size>0, NULL ); - task_list list; - task* t; - size_t k=0; - for(;;) { - t = new( allocate_child() ) iteration_type( my_first, my_feeder ); - ++my_first; - if( ++k==my_size ) break; - list.push_back(*t); - } - set_ref_count(int(k+1)); - spawn(list); - spawn_and_wait_for_all(*t); - return NULL; - } - - template friend class do_task_iter; - }; // class do_group_task_forward - - template - class do_group_task_input: public task - { - static const size_t max_arg_size = 4; - - typedef parallel_do_feeder_impl feeder_type; - - feeder_type& my_feeder; - size_t my_size; - aligned_space my_arg; - - do_group_task_input( feeder_type& feeder ) - : my_feeder(feeder), my_size(0) - {} - - /*override*/ task* execute() - { - typedef do_iteration_task_iter iteration_type; - __TBB_ASSERT( my_size>0, NULL ); - task_list list; - task* t; - size_t k=0; - for(;;) { - t = new( allocate_child() ) iteration_type( my_arg.begin() + k, my_feeder ); - if( ++k==my_size ) break; - list.push_back(*t); - } - set_ref_count(int(k+1)); - spawn(list); - spawn_and_wait_for_all(*t); - return NULL; - } - - ~do_group_task_input(){ - for( size_t k=0; k~Item(); - } - - template friend class do_task_iter; - }; // class do_group_task_input - - //! For internal use only. - /** Gets block of iterations and packages them into a do_group_task. - @ingroup algorithms */ - template - class do_task_iter: public task - { - typedef parallel_do_feeder_impl feeder_type; - - public: - do_task_iter( Iterator first, Iterator last , feeder_type& feeder ) : - my_first(first), my_last(last), my_feeder(feeder) - {} - - private: - Iterator my_first; - Iterator my_last; - feeder_type& my_feeder; - - /* Do not merge run(xxx) and run_xxx() methods. They are separated in order - to make sure that compilers will eliminate unused argument of type xxx - (that is will not put it on stack). The sole purpose of this argument - is overload resolution. - - An alternative could be using template functions, but explicit specialization - of member function templates is not supported for non specialized class - templates. Besides template functions would always fall back to the least - efficient variant (the one for input iterators) in case of iterators having - custom tags derived from basic ones. */ - /*override*/ task* execute() - { - typedef typename std::iterator_traits::iterator_category iterator_tag; - return run( (iterator_tag*)NULL ); - } - - /** This is the most restricted variant that operates on input iterators or - iterators with unknown tags (tags not derived from the standard ones). **/ - inline task* run( void* ) { return run_for_input_iterator(); } - - task* run_for_input_iterator() { - typedef do_group_task_input block_type; - - block_type& t = *new( allocate_additional_child_of(*my_feeder.my_barrier) ) block_type(my_feeder); - size_t k=0; - while( !(my_first == my_last) ) { - new (t.my_arg.begin() + k) Item(*my_first); - ++my_first; - if( ++k==block_type::max_arg_size ) { - if ( !(my_first == my_last) ) - recycle_to_reexecute(); - break; - } - } - if( k==0 ) { - destroy(t); - return NULL; - } else { - t.my_size = k; - return &t; - } - } - - inline task* run( std::forward_iterator_tag* ) { return run_for_forward_iterator(); } - - task* run_for_forward_iterator() { - typedef do_group_task_forward block_type; - - Iterator first = my_first; - size_t k=0; - while( !(my_first==my_last) ) { - ++my_first; - if( ++k==block_type::max_arg_size ) { - if ( !(my_first==my_last) ) - recycle_to_reexecute(); - break; - } - } - return k==0 ? NULL : new( allocate_additional_child_of(*my_feeder.my_barrier) ) block_type(first, k, my_feeder); - } - - inline task* run( std::random_access_iterator_tag* ) { return run_for_random_access_iterator(); } - - task* run_for_random_access_iterator() { - typedef do_group_task_forward block_type; - typedef do_iteration_task_iter iteration_type; - - size_t k = static_cast(my_last-my_first); - if( k > block_type::max_arg_size ) { - Iterator middle = my_first + k/2; - - empty_task& c = *new( allocate_continuation() ) empty_task; - do_task_iter& b = *new( c.allocate_child() ) do_task_iter(middle, my_last, my_feeder); - recycle_as_child_of(c); - - my_last = middle; - c.set_ref_count(2); - c.spawn(b); - return this; - }else if( k != 0 ) { - task_list list; - task* t; - size_t k1=0; - for(;;) { - t = new( allocate_child() ) iteration_type(my_first, my_feeder); - ++my_first; - if( ++k1==k ) break; - list.push_back(*t); - } - set_ref_count(int(k+1)); - spawn(list); - spawn_and_wait_for_all(*t); - } - return NULL; - } - }; // class do_task_iter - - //! For internal use only. - /** Implements parallel iteration over a range. - @ingroup algorithms */ - template - void run_parallel_do( Iterator first, Iterator last, const Body& body -#if __TBB_TASK_GROUP_CONTEXT - , task_group_context& context -#endif - ) - { - typedef do_task_iter root_iteration_task; -#if __TBB_TASK_GROUP_CONTEXT - parallel_do_feeder_impl feeder(context); -#else - parallel_do_feeder_impl feeder; -#endif - feeder.my_body = &body; - - root_iteration_task &t = *new( feeder.my_barrier->allocate_child() ) root_iteration_task(first, last, feeder); - - feeder.my_barrier->set_ref_count(2); - feeder.my_barrier->spawn_and_wait_for_all(t); - } - - //! For internal use only. - /** Detects types of Body's operator function arguments. - @ingroup algorithms **/ - template - void select_parallel_do( Iterator first, Iterator last, const Body& body, void (Body::*)(Item) const -#if __TBB_TASK_GROUP_CONTEXT - , task_group_context& context -#endif // __TBB_TASK_GROUP_CONTEXT - ) - { - run_parallel_do::type>( first, last, body -#if __TBB_TASK_GROUP_CONTEXT - , context -#endif // __TBB_TASK_GROUP_CONTEXT - ); - } - - //! For internal use only. - /** Detects types of Body's operator function arguments. - @ingroup algorithms **/ - template - void select_parallel_do( Iterator first, Iterator last, const Body& body, void (Body::*)(Item, parallel_do_feeder<_Item>&) const -#if __TBB_TASK_GROUP_CONTEXT - , task_group_context& context -#endif // __TBB_TASK_GROUP_CONTEXT - ) - { - run_parallel_do::type>( first, last, body -#if __TBB_TASK_GROUP_CONTEXT - , context -#endif // __TBB_TASK_GROUP_CONTEXT - ); - } - -} // namespace internal -//! @endcond - - -/** \page parallel_do_body_req Requirements on parallel_do body - Class \c Body implementing the concept of parallel_do body must define: - - \code - B::operator()( - cv_item_type item, - parallel_do_feeder& feeder - ) const - - OR - - B::operator()( cv_item_type& item ) const - \endcode Process item. - May be invoked concurrently for the same \c this but different \c item. - - - \code item_type( const item_type& ) \endcode - Copy a work item. - - \code ~item_type() \endcode Destroy a work item -**/ - -/** \name parallel_do - See also requirements on \ref parallel_do_body_req "parallel_do Body". **/ -//@{ -//! Parallel iteration over a range, with optional addition of more work. -/** @ingroup algorithms */ -template -void parallel_do( Iterator first, Iterator last, const Body& body ) -{ - if ( first == last ) - return; -#if __TBB_TASK_GROUP_CONTEXT - task_group_context context; -#endif // __TBB_TASK_GROUP_CONTEXT - internal::select_parallel_do( first, last, body, &Body::operator() -#if __TBB_TASK_GROUP_CONTEXT - , context -#endif // __TBB_TASK_GROUP_CONTEXT - ); -} - -template -void parallel_do(Range& rng, const Body& body) { - parallel_do(tbb::internal::first(rng), tbb::internal::last(rng), body); -} - -template -void parallel_do(const Range& rng, const Body& body) { - parallel_do(tbb::internal::first(rng), tbb::internal::last(rng), body); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Parallel iteration over a range, with optional addition of more work and user-supplied context -/** @ingroup algorithms */ -template -void parallel_do( Iterator first, Iterator last, const Body& body, task_group_context& context ) -{ - if ( first == last ) - return; - internal::select_parallel_do( first, last, body, &Body::operator(), context ); -} - -template -void parallel_do(Range& rng, const Body& body, task_group_context& context) { - parallel_do(tbb::internal::first(rng), tbb::internal::last(rng), body, context); -} - -template -void parallel_do(const Range& rng, const Body& body, task_group_context& context) { - parallel_do(tbb::internal::first(rng), tbb::internal::last(rng), body, context); -} - -#endif // __TBB_TASK_GROUP_CONTEXT - -//@} - -} // namespace - -#endif /* __TBB_parallel_do_H */ diff --git a/inst/include/tbb/parallel_for.h b/inst/include/tbb/parallel_for.h deleted file mode 100644 index 4dc499cf0..000000000 --- a/inst/include/tbb/parallel_for.h +++ /dev/null @@ -1,373 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_parallel_for_H -#define __TBB_parallel_for_H - -#include -#include "task.h" -#include "partitioner.h" -#include "blocked_range.h" -#include "tbb_exception.h" - -namespace tbb { - -namespace interface7 { -//! @cond INTERNAL -namespace internal { - - //! allocate right task with new parent - void* allocate_sibling(task* start_for_task, size_t bytes); - - //! Task type used in parallel_for - /** @ingroup algorithms */ - template - class start_for: public task { - Range my_range; - const Body my_body; - typename Partitioner::task_partition_type my_partition; - /*override*/ task* execute(); - - //! Update affinity info, if any. - /*override*/ void note_affinity( affinity_id id ) { - my_partition.note_affinity( id ); - } - - public: - //! Constructor for root task. - start_for( const Range& range, const Body& body, Partitioner& partitioner ) : - my_range(range), - my_body(body), - my_partition(partitioner) - { - } - //! Splitting constructor used to generate children. - /** parent_ becomes left child. Newly constructed object is right child. */ - start_for( start_for& parent_, typename Partitioner::split_type& split_obj) : - my_range(parent_.my_range, split_obj), - my_body(parent_.my_body), - my_partition(parent_.my_partition, split_obj) - { - my_partition.set_affinity(*this); - } - //! Construct right child from the given range as response to the demand. - /** parent_ remains left child. Newly constructed object is right child. */ - start_for( start_for& parent_, const Range& r, depth_t d ) : - my_range(r), - my_body(parent_.my_body), - my_partition(parent_.my_partition, split()) - { - my_partition.set_affinity(*this); - my_partition.align_depth( d ); - } - static void run( const Range& range, const Body& body, Partitioner& partitioner ) { - if( !range.empty() ) { -#if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP - start_for& a = *new(task::allocate_root()) start_for(range,body,partitioner); -#else - // Bound context prevents exceptions from body to affect nesting or sibling algorithms, - // and allows users to handle exceptions safely by wrapping parallel_for in the try-block. - task_group_context context; - start_for& a = *new(task::allocate_root(context)) start_for(range,body,partitioner); -#endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */ - task::spawn_root_and_wait(a); - } - } -#if __TBB_TASK_GROUP_CONTEXT - static void run( const Range& range, const Body& body, Partitioner& partitioner, task_group_context& context ) { - if( !range.empty() ) { - start_for& a = *new(task::allocate_root(context)) start_for(range,body,partitioner); - task::spawn_root_and_wait(a); - } - } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - //! Run body for range, serves as callback for partitioner - void run_body( Range &r ) { my_body( r ); } - - //! spawn right task, serves as callback for partitioner - void offer_work(typename Partitioner::split_type& split_obj) { - spawn( *new( allocate_sibling(static_cast(this), sizeof(start_for)) ) start_for(*this, split_obj) ); - } - //! spawn right task, serves as callback for partitioner - void offer_work(const Range& r, depth_t d = 0) { - spawn( *new( allocate_sibling(static_cast(this), sizeof(start_for)) ) start_for(*this, r, d) ); - } - }; - - //! allocate right task with new parent - // TODO: 'inline' here is to avoid multiple definition error but for sake of code size this should not be inlined - inline void* allocate_sibling(task* start_for_task, size_t bytes) { - task* parent_ptr = new( start_for_task->allocate_continuation() ) flag_task(); - start_for_task->set_parent(parent_ptr); - parent_ptr->set_ref_count(2); - return &parent_ptr->allocate_child().allocate(bytes); - } - - //! execute task for parallel_for - template - task* start_for::execute() { - my_partition.check_being_stolen( *this ); - my_partition.execute(*this, my_range); - return NULL; - } -} // namespace internal -//! @endcond -} // namespace interfaceX - -//! @cond INTERNAL -namespace internal { - using interface7::internal::start_for; - - //! Calls the function with values from range [begin, end) with a step provided - template - class parallel_for_body : internal::no_assign { - const Function &my_func; - const Index my_begin; - const Index my_step; - public: - parallel_for_body( const Function& _func, Index& _begin, Index& _step ) - : my_func(_func), my_begin(_begin), my_step(_step) {} - - void operator()( const tbb::blocked_range& r ) const { - // A set of local variables to help the compiler with vectorization of the following loop. - Index b = r.begin(); - Index e = r.end(); - Index ms = my_step; - Index k = my_begin + b*ms; - -#if __INTEL_COMPILER -#pragma ivdep -#if __TBB_ASSERT_ON_VECTORIZATION_FAILURE -#pragma vector always assert -#endif -#endif - for ( Index i = b; i < e; ++i, k += ms ) { - my_func( k ); - } - } - }; -} // namespace internal -//! @endcond - -// Requirements on Range concept are documented in blocked_range.h - -/** \page parallel_for_body_req Requirements on parallel_for body - Class \c Body implementing the concept of parallel_for body must define: - - \code Body::Body( const Body& ); \endcode Copy constructor - - \code Body::~Body(); \endcode Destructor - - \code void Body::operator()( Range& r ) const; \endcode Function call operator applying the body to range \c r. -**/ - -/** \name parallel_for - See also requirements on \ref range_req "Range" and \ref parallel_for_body_req "parallel_for Body". **/ -//@{ - -//! Parallel iteration over range with default partitioner. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body ) { - internal::start_for::run(range,body,__TBB_DEFAULT_PARTITIONER()); -} - -//! Parallel iteration over range with simple partitioner. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, const simple_partitioner& partitioner ) { - internal::start_for::run(range,body,partitioner); -} - -//! Parallel iteration over range with auto_partitioner. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, const auto_partitioner& partitioner ) { - internal::start_for::run(range,body,partitioner); -} - -//! Parallel iteration over range with affinity_partitioner. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, affinity_partitioner& partitioner ) { - internal::start_for::run(range,body,partitioner); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Parallel iteration over range with default partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, task_group_context& context ) { - internal::start_for::run(range, body, __TBB_DEFAULT_PARTITIONER(), context); -} - -//! Parallel iteration over range with simple partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, const simple_partitioner& partitioner, task_group_context& context ) { - internal::start_for::run(range, body, partitioner, context); -} - -//! Parallel iteration over range with auto_partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, const auto_partitioner& partitioner, task_group_context& context ) { - internal::start_for::run(range, body, partitioner, context); -} - -//! Parallel iteration over range with affinity_partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, affinity_partitioner& partitioner, task_group_context& context ) { - internal::start_for::run(range,body,partitioner, context); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ -//@} - -namespace strict_ppl { - -//@{ -//! Implementation of parallel iteration over stepped range of integers with explicit step and partitioner -template -void parallel_for_impl(Index first, Index last, Index step, const Function& f, Partitioner& partitioner) { - if (step <= 0 ) - internal::throw_exception(internal::eid_nonpositive_step); // throws std::invalid_argument - else if (last > first) { - // Above "else" avoids "potential divide by zero" warning on some platforms - Index end = (last - first - Index(1)) / step + Index(1); - tbb::blocked_range range(static_cast(0), end); - internal::parallel_for_body body(f, first, step); - tbb::parallel_for(range, body, partitioner); - } -} - -//! Parallel iteration over a range of integers with a step provided and default partitioner -template -void parallel_for(Index first, Index last, Index step, const Function& f) { - parallel_for_impl(first, last, step, f, auto_partitioner()); -} -//! Parallel iteration over a range of integers with a step provided and simple partitioner -template -void parallel_for(Index first, Index last, Index step, const Function& f, const simple_partitioner& partitioner) { - parallel_for_impl(first, last, step, f, partitioner); -} -//! Parallel iteration over a range of integers with a step provided and auto partitioner -template -void parallel_for(Index first, Index last, Index step, const Function& f, const auto_partitioner& partitioner) { - parallel_for_impl(first, last, step, f, partitioner); -} -//! Parallel iteration over a range of integers with a step provided and affinity partitioner -template -void parallel_for(Index first, Index last, Index step, const Function& f, affinity_partitioner& partitioner) { - parallel_for_impl(first, last, step, f, partitioner); -} - -//! Parallel iteration over a range of integers with a default step value and default partitioner -template -void parallel_for(Index first, Index last, const Function& f) { - parallel_for_impl(first, last, static_cast(1), f, auto_partitioner()); -} -//! Parallel iteration over a range of integers with a default step value and simple partitioner -template -void parallel_for(Index first, Index last, const Function& f, const simple_partitioner& partitioner) { - parallel_for_impl(first, last, static_cast(1), f, partitioner); -} -//! Parallel iteration over a range of integers with a default step value and auto partitioner -template -void parallel_for(Index first, Index last, const Function& f, const auto_partitioner& partitioner) { - parallel_for_impl(first, last, static_cast(1), f, partitioner); -} -//! Parallel iteration over a range of integers with a default step value and affinity partitioner -template -void parallel_for(Index first, Index last, const Function& f, affinity_partitioner& partitioner) { - parallel_for_impl(first, last, static_cast(1), f, partitioner); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Implementation of parallel iteration over stepped range of integers with explicit step, task group context, and partitioner -template -void parallel_for_impl(Index first, Index last, Index step, const Function& f, Partitioner& partitioner, tbb::task_group_context &context) { - if (step <= 0 ) - internal::throw_exception(internal::eid_nonpositive_step); // throws std::invalid_argument - else if (last > first) { - // Above "else" avoids "potential divide by zero" warning on some platforms - Index end = (last - first - Index(1)) / step + Index(1); - tbb::blocked_range range(static_cast(0), end); - internal::parallel_for_body body(f, first, step); - tbb::parallel_for(range, body, partitioner, context); - } -} - -//! Parallel iteration over a range of integers with explicit step, task group context, and default partitioner -template -void parallel_for(Index first, Index last, Index step, const Function& f, tbb::task_group_context &context) { - parallel_for_impl(first, last, step, f, auto_partitioner(), context); -} -//! Parallel iteration over a range of integers with explicit step, task group context, and simple partitioner - template -void parallel_for(Index first, Index last, Index step, const Function& f, const simple_partitioner& partitioner, tbb::task_group_context &context) { - parallel_for_impl(first, last, step, f, partitioner, context); -} -//! Parallel iteration over a range of integers with explicit step, task group context, and auto partitioner - template -void parallel_for(Index first, Index last, Index step, const Function& f, const auto_partitioner& partitioner, tbb::task_group_context &context) { - parallel_for_impl(first, last, step, f, partitioner, context); -} -//! Parallel iteration over a range of integers with explicit step, task group context, and affinity partitioner - template -void parallel_for(Index first, Index last, Index step, const Function& f, affinity_partitioner& partitioner, tbb::task_group_context &context) { - parallel_for_impl(first, last, step, f, partitioner, context); -} - - -//! Parallel iteration over a range of integers with a default step value, explicit task group context, and default partitioner -template -void parallel_for(Index first, Index last, const Function& f, tbb::task_group_context &context) { - parallel_for_impl(first, last, static_cast(1), f, auto_partitioner(), context); -} -//! Parallel iteration over a range of integers with a default step value, explicit task group context, and simple partitioner - template -void parallel_for(Index first, Index last, const Function& f, const simple_partitioner& partitioner, tbb::task_group_context &context) { - parallel_for_impl(first, last, static_cast(1), f, partitioner, context); -} -//! Parallel iteration over a range of integers with a default step value, explicit task group context, and auto partitioner - template -void parallel_for(Index first, Index last, const Function& f, const auto_partitioner& partitioner, tbb::task_group_context &context) { - parallel_for_impl(first, last, static_cast(1), f, partitioner, context); -} -//! Parallel iteration over a range of integers with a default step value, explicit task group context, and affinity_partitioner - template -void parallel_for(Index first, Index last, const Function& f, affinity_partitioner& partitioner, tbb::task_group_context &context) { - parallel_for_impl(first, last, static_cast(1), f, partitioner, context); -} - -#endif /* __TBB_TASK_GROUP_CONTEXT */ -//@} - -} // namespace strict_ppl - -using strict_ppl::parallel_for; - -} // namespace tbb - -#if TBB_PREVIEW_SERIAL_SUBSET -#define __TBB_NORMAL_EXECUTION -#include "../serial/tbb/parallel_for.h" -#undef __TBB_NORMAL_EXECUTION -#endif - -#endif /* __TBB_parallel_for_H */ diff --git a/inst/include/tbb/parallel_for_each.h b/inst/include/tbb/parallel_for_each.h deleted file mode 100644 index c7dc39f4d..000000000 --- a/inst/include/tbb/parallel_for_each.h +++ /dev/null @@ -1,95 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_parallel_for_each_H -#define __TBB_parallel_for_each_H - -#include "parallel_do.h" - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - // The class calls user function in operator() - template - class parallel_for_each_body : internal::no_assign { - const Function &my_func; - public: - parallel_for_each_body(const Function &_func) : my_func(_func) {} - parallel_for_each_body(const parallel_for_each_body &_caller) : my_func(_caller.my_func) {} - - void operator() ( typename std::iterator_traits::reference value ) const { - my_func(value); - } - }; -} // namespace internal -//! @endcond - -/** \name parallel_for_each - **/ -//@{ -//! Calls function f for all items from [first, last) interval using user-supplied context -/** @ingroup algorithms */ -#if __TBB_TASK_GROUP_CONTEXT -template -void parallel_for_each(InputIterator first, InputIterator last, const Function& f, task_group_context &context) { - internal::parallel_for_each_body body(f); - tbb::parallel_do (first, last, body, context); -} - -//! Calls function f for all items from rng using user-supplied context -/** @ingroup algorithms */ -template -void parallel_for_each(Range& rng, const Function& f, task_group_context& context) { - parallel_for_each(tbb::internal::first(rng), tbb::internal::last(rng), f, context); -} - -//! Calls function f for all items from const rng user-supplied context -/** @ingroup algorithms */ -template -void parallel_for_each(const Range& rng, const Function& f, task_group_context& context) { - parallel_for_each(tbb::internal::first(rng), tbb::internal::last(rng), f, context); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -//! Uses default context -template -void parallel_for_each(InputIterator first, InputIterator last, const Function& f) { - internal::parallel_for_each_body body(f); - tbb::parallel_do (first, last, body); -} - -//! Uses default context -template -void parallel_for_each(Range& rng, const Function& f) { - parallel_for_each(tbb::internal::first(rng), tbb::internal::last(rng), f); -} - -//! Uses default context -template -void parallel_for_each(const Range& rng, const Function& f) { - parallel_for_each(tbb::internal::first(rng), tbb::internal::last(rng), f); -} - -//@} - -} // namespace - -#endif /* __TBB_parallel_for_each_H */ diff --git a/inst/include/tbb/parallel_invoke.h b/inst/include/tbb/parallel_invoke.h deleted file mode 100644 index b0d79da2c..000000000 --- a/inst/include/tbb/parallel_invoke.h +++ /dev/null @@ -1,456 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_parallel_invoke_H -#define __TBB_parallel_invoke_H - -#include "task.h" - -#if __TBB_VARIADIC_PARALLEL_INVOKE - #include -#endif - -namespace tbb { - -#if !__TBB_TASK_GROUP_CONTEXT - /** Dummy to avoid cluttering the bulk of the header with enormous amount of ifdefs. **/ - struct task_group_context {}; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -//! @cond INTERNAL -namespace internal { - // Simple task object, executing user method - template - class function_invoker : public task{ - public: - function_invoker(const function& _function) : my_function(_function) {} - private: - const function &my_function; - /*override*/ - task* execute() - { - my_function(); - return NULL; - } - }; - - // The class spawns two or three child tasks - template - class spawner : public task { - private: - const function1& my_func1; - const function2& my_func2; - const function3& my_func3; - bool is_recycled; - - task* execute (){ - if(is_recycled){ - return NULL; - }else{ - __TBB_ASSERT(N==2 || N==3, "Number of arguments passed to spawner is wrong"); - set_ref_count(N); - recycle_as_safe_continuation(); - internal::function_invoker* invoker2 = new (allocate_child()) internal::function_invoker(my_func2); - __TBB_ASSERT(invoker2, "Child task allocation failed"); - spawn(*invoker2); - size_t n = N; // To prevent compiler warnings - if (n>2) { - internal::function_invoker* invoker3 = new (allocate_child()) internal::function_invoker(my_func3); - __TBB_ASSERT(invoker3, "Child task allocation failed"); - spawn(*invoker3); - } - my_func1(); - is_recycled = true; - return NULL; - } - } // execute - - public: - spawner(const function1& _func1, const function2& _func2, const function3& _func3) : my_func1(_func1), my_func2(_func2), my_func3(_func3), is_recycled(false) {} - }; - - // Creates and spawns child tasks - class parallel_invoke_helper : public empty_task { - public: - // Dummy functor class - class parallel_invoke_noop { - public: - void operator() () const {} - }; - // Creates a helper object with user-defined number of children expected - parallel_invoke_helper(int number_of_children) - { - set_ref_count(number_of_children + 1); - } - -#if __TBB_VARIADIC_PARALLEL_INVOKE - void add_children() {} - void add_children(tbb::task_group_context&) {} - - template - void add_children(function&& _func) - { - internal::function_invoker* invoker = new (allocate_child()) internal::function_invoker(std::forward(_func)); - __TBB_ASSERT(invoker, "Child task allocation failed"); - spawn(*invoker); - } - - template - void add_children(function&& _func, tbb::task_group_context&) - { - add_children(std::forward(_func)); - } - - // Adds child(ren) task(s) and spawns them - template - void add_children(function1&& _func1, function2&& _func2, function&&... _func) - { - // The third argument is dummy, it is ignored actually. - parallel_invoke_noop noop; - typedef internal::spawner<2, function1, function2, parallel_invoke_noop> spawner_type; - spawner_type & sub_root = *new(allocate_child()) spawner_type(std::forward(_func1), std::forward(_func2), noop); - spawn(sub_root); - add_children(std::forward(_func)...); - } -#else - // Adds child task and spawns it - template - void add_children (const function &_func) - { - internal::function_invoker* invoker = new (allocate_child()) internal::function_invoker(_func); - __TBB_ASSERT(invoker, "Child task allocation failed"); - spawn(*invoker); - } - - // Adds a task with multiple child tasks and spawns it - // two arguments - template - void add_children (const function1& _func1, const function2& _func2) - { - // The third argument is dummy, it is ignored actually. - parallel_invoke_noop noop; - internal::spawner<2, function1, function2, parallel_invoke_noop>& sub_root = *new(allocate_child())internal::spawner<2, function1, function2, parallel_invoke_noop>(_func1, _func2, noop); - spawn(sub_root); - } - // three arguments - template - void add_children (const function1& _func1, const function2& _func2, const function3& _func3) - { - internal::spawner<3, function1, function2, function3>& sub_root = *new(allocate_child())internal::spawner<3, function1, function2, function3>(_func1, _func2, _func3); - spawn(sub_root); - } -#endif // __TBB_VARIADIC_PARALLEL_INVOKE - - // Waits for all child tasks - template - void run_and_finish(const F0& f0) - { - internal::function_invoker* invoker = new (allocate_child()) internal::function_invoker(f0); - __TBB_ASSERT(invoker, "Child task allocation failed"); - spawn_and_wait_for_all(*invoker); - } - }; - // The class destroys root if exception occurred as well as in normal case - class parallel_invoke_cleaner: internal::no_copy { - public: -#if __TBB_TASK_GROUP_CONTEXT - parallel_invoke_cleaner(int number_of_children, tbb::task_group_context& context) - : root(*new(task::allocate_root(context)) internal::parallel_invoke_helper(number_of_children)) -#else - parallel_invoke_cleaner(int number_of_children, tbb::task_group_context&) - : root(*new(task::allocate_root()) internal::parallel_invoke_helper(number_of_children)) -#endif /* !__TBB_TASK_GROUP_CONTEXT */ - {} - - ~parallel_invoke_cleaner(){ - root.destroy(root); - } - internal::parallel_invoke_helper& root; - }; - -#if __TBB_VARIADIC_PARALLEL_INVOKE -// Determine whether the last parameter in a pack is task_group_context - template struct impl_selector; // to workaround a GCC bug - - template struct impl_selector { - typedef typename impl_selector::type type; - }; - - template struct impl_selector { - typedef false_type type; - }; - template<> struct impl_selector { - typedef true_type type; - }; - - // Select task_group_context parameter from the back of a pack - task_group_context& get_context( task_group_context& tgc ) { return tgc; } - - template - task_group_context& get_context( T1&& /*ignored*/, T&&... t ) - { return get_context( std::forward(t)... ); } - - // task_group_context is known to be at the back of the parameter pack - template - void parallel_invoke_impl(true_type, F0&& f0, F1&& f1, F&&... f) { - __TBB_STATIC_ASSERT(sizeof...(F)>0, "Variadic parallel_invoke implementation broken?"); - // # of child tasks: f0, f1, and a task for each two elements of the pack except the last - const size_t number_of_children = 2 + sizeof...(F)/2; - parallel_invoke_cleaner cleaner(number_of_children, get_context(std::forward(f)...)); - parallel_invoke_helper& root = cleaner.root; - - root.add_children(std::forward(f)...); - root.add_children(std::forward(f1)); - root.run_and_finish(std::forward(f0)); - } - - // task_group_context is not in the pack, needs to be added - template - void parallel_invoke_impl(false_type, F0&& f0, F1&& f1, F&&... f) { - tbb::task_group_context context; - // Add context to the arguments, and redirect to the other overload - parallel_invoke_impl(true_type(), std::forward(f0), std::forward(f1), std::forward(f)..., context); - } -#endif -} // namespace internal -//! @endcond - -/** \name parallel_invoke - **/ -//@{ -//! Executes a list of tasks in parallel and waits for all tasks to complete. -/** @ingroup algorithms */ - -#if __TBB_VARIADIC_PARALLEL_INVOKE - -// parallel_invoke for two or more arguments via variadic templates -// presence of task_group_context is defined automatically -template -void parallel_invoke(F0&& f0, F1&& f1, F&&... f) { - typedef typename internal::impl_selector::type selector_type; - internal::parallel_invoke_impl(selector_type(), std::forward(f0), std::forward(f1), std::forward(f)...); -} - -#else - -// parallel_invoke with user-defined context -// two arguments -template -void parallel_invoke(const F0& f0, const F1& f1, tbb::task_group_context& context) { - internal::parallel_invoke_cleaner cleaner(2, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f1); - - root.run_and_finish(f0); -} - -// three arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, tbb::task_group_context& context) { - internal::parallel_invoke_cleaner cleaner(3, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f2); - root.add_children(f1); - - root.run_and_finish(f0); -} - -// four arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(4, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f3); - root.add_children(f2); - root.add_children(f1); - - root.run_and_finish(f0); -} - -// five arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(3, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f4, f3); - root.add_children(f2, f1); - - root.run_and_finish(f0); -} - -// six arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, const F5& f5, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(3, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f5, f4, f3); - root.add_children(f2, f1); - - root.run_and_finish(f0); -} - -// seven arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(3, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f6, f5, f4); - root.add_children(f3, f2, f1); - - root.run_and_finish(f0); -} - -// eight arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(4, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f7, f6, f5); - root.add_children(f4, f3); - root.add_children(f2, f1); - - root.run_and_finish(f0); -} - -// nine arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7, const F8& f8, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(4, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f8, f7, f6); - root.add_children(f5, f4, f3); - root.add_children(f2, f1); - - root.run_and_finish(f0); -} - -// ten arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7, const F8& f8, const F9& f9, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(4, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f9, f8, f7); - root.add_children(f6, f5, f4); - root.add_children(f3, f2, f1); - - root.run_and_finish(f0); -} - -// two arguments -template -void parallel_invoke(const F0& f0, const F1& f1) { - task_group_context context; - parallel_invoke(f0, f1, context); -} -// three arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2) { - task_group_context context; - parallel_invoke(f0, f1, f2, context); -} -// four arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3) { - task_group_context context; - parallel_invoke(f0, f1, f2, f3, context); -} -// five arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4) { - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, context); -} -// six arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, const F5& f5) { - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, f5, context); -} -// seven arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6) -{ - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, f5, f6, context); -} -// eight arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7) -{ - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, f5, f6, f7, context); -} -// nine arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7, const F8& f8) -{ - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, f5, f6, f7, f8, context); -} -// ten arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7, const F8& f8, const F9& f9) -{ - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, context); -} -#endif // __TBB_VARIADIC_PARALLEL_INVOKE -//@} - -} // namespace - -#endif /* __TBB_parallel_invoke_H */ diff --git a/inst/include/tbb/parallel_reduce.h b/inst/include/tbb/parallel_reduce.h deleted file mode 100644 index 17fba2019..000000000 --- a/inst/include/tbb/parallel_reduce.h +++ /dev/null @@ -1,533 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_parallel_reduce_H -#define __TBB_parallel_reduce_H - -#include -#include "task.h" -#include "aligned_space.h" -#include "partitioner.h" -#include "tbb_profiling.h" - -namespace tbb { - -namespace interface7 { -//! @cond INTERNAL -namespace internal { - - using namespace tbb::internal; - - /** Values for reduction_context. */ - enum { - root_task, left_child, right_child - }; - - /** Represented as a char, not enum, for compactness. */ - typedef char reduction_context; - - //! Task type used to combine the partial results of parallel_reduce. - /** @ingroup algorithms */ - template - class finish_reduce: public flag_task { - //! Pointer to body, or NULL if the left child has not yet finished. - bool has_right_zombie; - const reduction_context my_context; - Body* my_body; - aligned_space zombie_space; - finish_reduce( reduction_context context_ ) : - has_right_zombie(false), // TODO: substitute by flag_task::child_stolen? - my_context(context_), - my_body(NULL) - { - } - ~finish_reduce() { - if( has_right_zombie ) - zombie_space.begin()->~Body(); - } - task* execute() { - if( has_right_zombie ) { - // Right child was stolen. - Body* s = zombie_space.begin(); - my_body->join( *s ); - // Body::join() won't be called if canceled. Defer destruction to destructor - } - if( my_context==left_child ) - itt_store_word_with_release( static_cast(parent())->my_body, my_body ); - return NULL; - } - template - friend class start_reduce; - }; - - //! allocate right task with new parent - void allocate_sibling(task* start_reduce_task, task *tasks[], size_t start_bytes, size_t finish_bytes); - - //! Task type used to split the work of parallel_reduce. - /** @ingroup algorithms */ - template - class start_reduce: public task { - typedef finish_reduce finish_type; - Body* my_body; - Range my_range; - typename Partitioner::task_partition_type my_partition; - reduction_context my_context; - /*override*/ task* execute(); - //! Update affinity info, if any - /*override*/ void note_affinity( affinity_id id ) { - my_partition.note_affinity( id ); - } - template - friend class finish_reduce; - -public: - //! Constructor used for root task - start_reduce( const Range& range, Body* body, Partitioner& partitioner ) : - my_body(body), - my_range(range), - my_partition(partitioner), - my_context(root_task) - { - } - //! Splitting constructor used to generate children. - /** parent_ becomes left child. Newly constructed object is right child. */ - start_reduce( start_reduce& parent_, typename Partitioner::split_type& split_obj ) : - my_body(parent_.my_body), - my_range(parent_.my_range, split_obj), - my_partition(parent_.my_partition, split_obj), - my_context(right_child) - { - my_partition.set_affinity(*this); - parent_.my_context = left_child; - } - //! Construct right child from the given range as response to the demand. - /** parent_ remains left child. Newly constructed object is right child. */ - start_reduce( start_reduce& parent_, const Range& r, depth_t d ) : - my_body(parent_.my_body), - my_range(r), - my_partition(parent_.my_partition, split()), - my_context(right_child) - { - my_partition.set_affinity(*this); - my_partition.align_depth( d ); // TODO: move into constructor of partitioner - parent_.my_context = left_child; - } - static void run( const Range& range, Body& body, Partitioner& partitioner ) { - if( !range.empty() ) { -#if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP - task::spawn_root_and_wait( *new(task::allocate_root()) start_reduce(range,&body,partitioner) ); -#else - // Bound context prevents exceptions from body to affect nesting or sibling algorithms, - // and allows users to handle exceptions safely by wrapping parallel_for in the try-block. - task_group_context context; - task::spawn_root_and_wait( *new(task::allocate_root(context)) start_reduce(range,&body,partitioner) ); -#endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */ - } - } -#if __TBB_TASK_GROUP_CONTEXT - static void run( const Range& range, Body& body, Partitioner& partitioner, task_group_context& context ) { - if( !range.empty() ) - task::spawn_root_and_wait( *new(task::allocate_root(context)) start_reduce(range,&body,partitioner) ); - } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - //! Run body for range - void run_body( Range &r ) { (*my_body)( r ); } - - //! spawn right task, serves as callback for partitioner - // TODO: remove code duplication from 'offer_work' methods - void offer_work(typename Partitioner::split_type& split_obj) { - task *tasks[2]; - allocate_sibling(static_cast(this), tasks, sizeof(start_reduce), sizeof(finish_type)); - new((void*)tasks[0]) finish_type(my_context); - new((void*)tasks[1]) start_reduce(*this, split_obj); - spawn(*tasks[1]); - } - //! spawn right task, serves as callback for partitioner - void offer_work(const Range& r, depth_t d = 0) { - task *tasks[2]; - allocate_sibling(static_cast(this), tasks, sizeof(start_reduce), sizeof(finish_type)); - new((void*)tasks[0]) finish_type(my_context); - new((void*)tasks[1]) start_reduce(*this, r, d); - spawn(*tasks[1]); - } - }; - - //! allocate right task with new parent - // TODO: 'inline' here is to avoid multiple definition error but for sake of code size this should not be inlined - inline void allocate_sibling(task* start_reduce_task, task *tasks[], size_t start_bytes, size_t finish_bytes) { - tasks[0] = &start_reduce_task->allocate_continuation().allocate(finish_bytes); - start_reduce_task->set_parent(tasks[0]); - tasks[0]->set_ref_count(2); - tasks[1] = &tasks[0]->allocate_child().allocate(start_bytes); - } - - template - task* start_reduce::execute() { - my_partition.check_being_stolen( *this ); - if( my_context==right_child ) { - finish_type* parent_ptr = static_cast(parent()); - if( !itt_load_word_with_acquire(parent_ptr->my_body) ) { // TODO: replace by is_stolen_task() or by parent_ptr->ref_count() == 2??? - my_body = new( parent_ptr->zombie_space.begin() ) Body(*my_body,split()); - parent_ptr->has_right_zombie = true; - } - } else __TBB_ASSERT(my_context==root_task,NULL);// because left leaf spawns right leafs without recycling - my_partition.execute(*this, my_range); - if( my_context==left_child ) { - finish_type* parent_ptr = static_cast(parent()); - __TBB_ASSERT(my_body!=parent_ptr->zombie_space.begin(),NULL); - itt_store_word_with_release(parent_ptr->my_body, my_body ); - } - return NULL; - } - - //! Task type used to combine the partial results of parallel_deterministic_reduce. - /** @ingroup algorithms */ - template - class finish_deterministic_reduce: public task { - Body &my_left_body; - Body my_right_body; - - finish_deterministic_reduce( Body &body ) : - my_left_body( body ), - my_right_body( body, split() ) - { - } - task* execute() { - my_left_body.join( my_right_body ); - return NULL; - } - template - friend class start_deterministic_reduce; - }; - - //! Task type used to split the work of parallel_deterministic_reduce. - /** @ingroup algorithms */ - template - class start_deterministic_reduce: public task { - typedef finish_deterministic_reduce finish_type; - Body &my_body; - Range my_range; - /*override*/ task* execute(); - - //! Constructor used for root task - start_deterministic_reduce( const Range& range, Body& body ) : - my_body( body ), - my_range( range ) - { - } - //! Splitting constructor used to generate children. - /** parent_ becomes left child. Newly constructed object is right child. */ - start_deterministic_reduce( start_deterministic_reduce& parent_, finish_type& c ) : - my_body( c.my_right_body ), - my_range( parent_.my_range, split() ) - { - } - -public: - static void run( const Range& range, Body& body ) { - if( !range.empty() ) { -#if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP - task::spawn_root_and_wait( *new(task::allocate_root()) start_deterministic_reduce(range,&body) ); -#else - // Bound context prevents exceptions from body to affect nesting or sibling algorithms, - // and allows users to handle exceptions safely by wrapping parallel_for in the try-block. - task_group_context context; - task::spawn_root_and_wait( *new(task::allocate_root(context)) start_deterministic_reduce(range,body) ); -#endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */ - } - } -#if __TBB_TASK_GROUP_CONTEXT - static void run( const Range& range, Body& body, task_group_context& context ) { - if( !range.empty() ) - task::spawn_root_and_wait( *new(task::allocate_root(context)) start_deterministic_reduce(range,body) ); - } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - }; - - template - task* start_deterministic_reduce::execute() { - if( !my_range.is_divisible() ) { - my_body( my_range ); - return NULL; - } else { - finish_type& c = *new( allocate_continuation() ) finish_type( my_body ); - recycle_as_child_of(c); - c.set_ref_count(2); - start_deterministic_reduce& b = *new( c.allocate_child() ) start_deterministic_reduce( *this, c ); - task::spawn(b); - return this; - } - } -} // namespace internal -//! @endcond -} //namespace interfaceX - -//! @cond INTERNAL -namespace internal { - using interface7::internal::start_reduce; - using interface7::internal::start_deterministic_reduce; - //! Auxiliary class for parallel_reduce; for internal use only. - /** The adaptor class that implements \ref parallel_reduce_body_req "parallel_reduce Body" - using given \ref parallel_reduce_lambda_req "anonymous function objects". - **/ - /** @ingroup algorithms */ - template - class lambda_reduce_body { - -//FIXME: decide if my_real_body, my_reduction, and identity_element should be copied or referenced -// (might require some performance measurements) - - const Value& identity_element; - const RealBody& my_real_body; - const Reduction& my_reduction; - Value my_value; - lambda_reduce_body& operator= ( const lambda_reduce_body& other ); - public: - lambda_reduce_body( const Value& identity, const RealBody& body, const Reduction& reduction ) - : identity_element(identity) - , my_real_body(body) - , my_reduction(reduction) - , my_value(identity) - { } - lambda_reduce_body( const lambda_reduce_body& other ) - : identity_element(other.identity_element) - , my_real_body(other.my_real_body) - , my_reduction(other.my_reduction) - , my_value(other.my_value) - { } - lambda_reduce_body( lambda_reduce_body& other, tbb::split ) - : identity_element(other.identity_element) - , my_real_body(other.my_real_body) - , my_reduction(other.my_reduction) - , my_value(other.identity_element) - { } - void operator()(Range& range) { - my_value = my_real_body(range, const_cast(my_value)); - } - void join( lambda_reduce_body& rhs ) { - my_value = my_reduction(const_cast(my_value), const_cast(rhs.my_value)); - } - Value result() const { - return my_value; - } - }; - -} // namespace internal -//! @endcond - -// Requirements on Range concept are documented in blocked_range.h - -/** \page parallel_reduce_body_req Requirements on parallel_reduce body - Class \c Body implementing the concept of parallel_reduce body must define: - - \code Body::Body( Body&, split ); \endcode Splitting constructor. - Must be able to run concurrently with operator() and method \c join - - \code Body::~Body(); \endcode Destructor - - \code void Body::operator()( Range& r ); \endcode Function call operator applying body to range \c r - and accumulating the result - - \code void Body::join( Body& b ); \endcode Join results. - The result in \c b should be merged into the result of \c this -**/ - -/** \page parallel_reduce_lambda_req Requirements on parallel_reduce anonymous function objects (lambda functions) - TO BE DOCUMENTED -**/ - -/** \name parallel_reduce - See also requirements on \ref range_req "Range" and \ref parallel_reduce_body_req "parallel_reduce Body". **/ -//@{ - -//! Parallel iteration with reduction and default partitioner. -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body ) { - internal::start_reduce::run( range, body, __TBB_DEFAULT_PARTITIONER() ); -} - -//! Parallel iteration with reduction and simple_partitioner -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner ) { - internal::start_reduce::run( range, body, partitioner ); -} - -//! Parallel iteration with reduction and auto_partitioner -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner ) { - internal::start_reduce::run( range, body, partitioner ); -} - -//! Parallel iteration with reduction and affinity_partitioner -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner ) { - internal::start_reduce::run( range, body, partitioner ); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Parallel iteration with reduction, simple partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner, task_group_context& context ) { - internal::start_reduce::run( range, body, partitioner, context ); -} - -//! Parallel iteration with reduction, auto_partitioner and user-supplied context -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner, task_group_context& context ) { - internal::start_reduce::run( range, body, partitioner, context ); -} - -//! Parallel iteration with reduction, affinity_partitioner and user-supplied context -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner, task_group_context& context ) { - internal::start_reduce::run( range, body, partitioner, context ); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -/** parallel_reduce overloads that work with anonymous function objects - (see also \ref parallel_reduce_lambda_req "requirements on parallel_reduce anonymous function objects"). **/ - -//! Parallel iteration with reduction and default partitioner. -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,const __TBB_DEFAULT_PARTITIONER> - ::run(range, body, __TBB_DEFAULT_PARTITIONER() ); - return body.result(); -} - -//! Parallel iteration with reduction and simple_partitioner. -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - const simple_partitioner& partitioner ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,const simple_partitioner> - ::run(range, body, partitioner ); - return body.result(); -} - -//! Parallel iteration with reduction and auto_partitioner -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - const auto_partitioner& partitioner ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,const auto_partitioner> - ::run( range, body, partitioner ); - return body.result(); -} - -//! Parallel iteration with reduction and affinity_partitioner -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - affinity_partitioner& partitioner ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,affinity_partitioner> - ::run( range, body, partitioner ); - return body.result(); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Parallel iteration with reduction, simple partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - const simple_partitioner& partitioner, task_group_context& context ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,const simple_partitioner> - ::run( range, body, partitioner, context ); - return body.result(); -} - -//! Parallel iteration with reduction, auto_partitioner and user-supplied context -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - const auto_partitioner& partitioner, task_group_context& context ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,const auto_partitioner> - ::run( range, body, partitioner, context ); - return body.result(); -} - -//! Parallel iteration with reduction, affinity_partitioner and user-supplied context -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - affinity_partitioner& partitioner, task_group_context& context ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,affinity_partitioner> - ::run( range, body, partitioner, context ); - return body.result(); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -//! Parallel iteration with deterministic reduction and default partitioner. -/** @ingroup algorithms **/ -template -void parallel_deterministic_reduce( const Range& range, Body& body ) { - internal::start_deterministic_reduce::run( range, body ); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Parallel iteration with deterministic reduction, simple partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -void parallel_deterministic_reduce( const Range& range, Body& body, task_group_context& context ) { - internal::start_deterministic_reduce::run( range, body, context ); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -/** parallel_reduce overloads that work with anonymous function objects - (see also \ref parallel_reduce_lambda_req "requirements on parallel_reduce anonymous function objects"). **/ - -//! Parallel iteration with deterministic reduction and default partitioner. -/** @ingroup algorithms **/ -template -Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_deterministic_reduce > - ::run(range, body); - return body.result(); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Parallel iteration with deterministic reduction, simple partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - task_group_context& context ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_deterministic_reduce > - ::run( range, body, context ); - return body.result(); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ -//@} - -} // namespace tbb - -#endif /* __TBB_parallel_reduce_H */ diff --git a/inst/include/tbb/parallel_scan.h b/inst/include/tbb/parallel_scan.h deleted file mode 100644 index e9d8c692e..000000000 --- a/inst/include/tbb/parallel_scan.h +++ /dev/null @@ -1,346 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_parallel_scan_H -#define __TBB_parallel_scan_H - -#include "task.h" -#include "aligned_space.h" -#include -#include "partitioner.h" - -namespace tbb { - -//! Used to indicate that the initial scan is being performed. -/** @ingroup algorithms */ -struct pre_scan_tag { - static bool is_final_scan() {return false;} -}; - -//! Used to indicate that the final scan is being performed. -/** @ingroup algorithms */ -struct final_scan_tag { - static bool is_final_scan() {return true;} -}; - -//! @cond INTERNAL -namespace internal { - - //! Performs final scan for a leaf - /** @ingroup algorithms */ - template - class final_sum: public task { - public: - Body my_body; - private: - aligned_space my_range; - //! Where to put result of last subrange, or NULL if not last subrange. - Body* my_stuff_last; - public: - final_sum( Body& body_ ) : - my_body(body_,split()) - { - poison_pointer(my_stuff_last); - } - ~final_sum() { - my_range.begin()->~Range(); - } - void finish_construction( const Range& range_, Body* stuff_last_ ) { - new( my_range.begin() ) Range(range_); - my_stuff_last = stuff_last_; - } - private: - /*override*/ task* execute() { - my_body( *my_range.begin(), final_scan_tag() ); - if( my_stuff_last ) - my_stuff_last->assign(my_body); - return NULL; - } - }; - - //! Split work to be done in the scan. - /** @ingroup algorithms */ - template - class sum_node: public task { - typedef final_sum final_sum_type; - public: - final_sum_type *my_incoming; - final_sum_type *my_body; - Body *my_stuff_last; - private: - final_sum_type *my_left_sum; - sum_node *my_left; - sum_node *my_right; - bool my_left_is_final; - Range my_range; - sum_node( const Range range_, bool left_is_final_ ) : - my_left_sum(NULL), - my_left(NULL), - my_right(NULL), - my_left_is_final(left_is_final_), - my_range(range_) - { - // Poison fields that will be set by second pass. - poison_pointer(my_body); - poison_pointer(my_incoming); - } - task* create_child( const Range& range_, final_sum_type& f, sum_node* n, final_sum_type* incoming_, Body* stuff_last_ ) { - if( !n ) { - f.recycle_as_child_of( *this ); - f.finish_construction( range_, stuff_last_ ); - return &f; - } else { - n->my_body = &f; - n->my_incoming = incoming_; - n->my_stuff_last = stuff_last_; - return n; - } - } - /*override*/ task* execute() { - if( my_body ) { - if( my_incoming ) - my_left_sum->my_body.reverse_join( my_incoming->my_body ); - recycle_as_continuation(); - sum_node& c = *this; - task* b = c.create_child(Range(my_range,split()),*my_left_sum,my_right,my_left_sum,my_stuff_last); - task* a = my_left_is_final ? NULL : c.create_child(my_range,*my_body,my_left,my_incoming,NULL); - set_ref_count( (a!=NULL)+(b!=NULL) ); - my_body = NULL; - if( a ) spawn(*b); - else a = b; - return a; - } else { - return NULL; - } - } - template - friend class start_scan; - - template - friend class finish_scan; - }; - - //! Combine partial results - /** @ingroup algorithms */ - template - class finish_scan: public task { - typedef sum_node sum_node_type; - typedef final_sum final_sum_type; - final_sum_type** const my_sum; - sum_node_type*& my_return_slot; - public: - final_sum_type* my_right_zombie; - sum_node_type& my_result; - - /*override*/ task* execute() { - __TBB_ASSERT( my_result.ref_count()==(my_result.my_left!=NULL)+(my_result.my_right!=NULL), NULL ); - if( my_result.my_left ) - my_result.my_left_is_final = false; - if( my_right_zombie && my_sum ) - ((*my_sum)->my_body).reverse_join(my_result.my_left_sum->my_body); - __TBB_ASSERT( !my_return_slot, NULL ); - if( my_right_zombie || my_result.my_right ) { - my_return_slot = &my_result; - } else { - destroy( my_result ); - } - if( my_right_zombie && !my_sum && !my_result.my_right ) { - destroy(*my_right_zombie); - my_right_zombie = NULL; - } - return NULL; - } - - finish_scan( sum_node_type*& return_slot_, final_sum_type** sum_, sum_node_type& result_ ) : - my_sum(sum_), - my_return_slot(return_slot_), - my_right_zombie(NULL), - my_result(result_) - { - __TBB_ASSERT( !my_return_slot, NULL ); - } - }; - - //! Initial task to split the work - /** @ingroup algorithms */ - template - class start_scan: public task { - typedef sum_node sum_node_type; - typedef final_sum final_sum_type; - final_sum_type* my_body; - /** Non-null if caller is requesting total. */ - final_sum_type** my_sum; - sum_node_type** my_return_slot; - /** Null if computing root. */ - sum_node_type* my_parent_sum; - bool my_is_final; - bool my_is_right_child; - Range my_range; - typename Partitioner::partition_type my_partition; - /*override*/ task* execute(); - public: - start_scan( sum_node_type*& return_slot_, start_scan& parent_, sum_node_type* parent_sum_ ) : - my_body(parent_.my_body), - my_sum(parent_.my_sum), - my_return_slot(&return_slot_), - my_parent_sum(parent_sum_), - my_is_final(parent_.my_is_final), - my_is_right_child(false), - my_range(parent_.my_range,split()), - my_partition(parent_.my_partition,split()) - { - __TBB_ASSERT( !*my_return_slot, NULL ); - } - - start_scan( sum_node_type*& return_slot_, const Range& range_, final_sum_type& body_, const Partitioner& partitioner_) : - my_body(&body_), - my_sum(NULL), - my_return_slot(&return_slot_), - my_parent_sum(NULL), - my_is_final(true), - my_is_right_child(false), - my_range(range_), - my_partition(partitioner_) - { - __TBB_ASSERT( !*my_return_slot, NULL ); - } - - static void run( const Range& range_, Body& body_, const Partitioner& partitioner_ ) { - if( !range_.empty() ) { - typedef internal::start_scan start_pass1_type; - internal::sum_node* root = NULL; - typedef internal::final_sum final_sum_type; - final_sum_type* temp_body = new(task::allocate_root()) final_sum_type( body_ ); - start_pass1_type& pass1 = *new(task::allocate_root()) start_pass1_type( - /*my_return_slot=*/root, - range_, - *temp_body, - partitioner_ ); - task::spawn_root_and_wait( pass1 ); - if( root ) { - root->my_body = temp_body; - root->my_incoming = NULL; - root->my_stuff_last = &body_; - task::spawn_root_and_wait( *root ); - } else { - body_.assign(temp_body->my_body); - temp_body->finish_construction( range_, NULL ); - temp_body->destroy(*temp_body); - } - } - } - }; - - template - task* start_scan::execute() { - typedef internal::finish_scan finish_pass1_type; - finish_pass1_type* p = my_parent_sum ? static_cast( parent() ) : NULL; - // Inspecting p->result.left_sum would ordinarily be a race condition. - // But we inspect it only if we are not a stolen task, in which case we - // know that task assigning to p->result.left_sum has completed. - bool treat_as_stolen = my_is_right_child && (is_stolen_task() || my_body!=p->my_result.my_left_sum); - if( treat_as_stolen ) { - // Invocation is for right child that has been really stolen or needs to be virtually stolen - p->my_right_zombie = my_body = new( allocate_root() ) final_sum_type(my_body->my_body); - my_is_final = false; - } - task* next_task = NULL; - if( (my_is_right_child && !treat_as_stolen) || !my_range.is_divisible() || my_partition.should_execute_range(*this) ) { - if( my_is_final ) - (my_body->my_body)( my_range, final_scan_tag() ); - else if( my_sum ) - (my_body->my_body)( my_range, pre_scan_tag() ); - if( my_sum ) - *my_sum = my_body; - __TBB_ASSERT( !*my_return_slot, NULL ); - } else { - sum_node_type* result; - if( my_parent_sum ) - result = new(allocate_additional_child_of(*my_parent_sum)) sum_node_type(my_range,/*my_left_is_final=*/my_is_final); - else - result = new(task::allocate_root()) sum_node_type(my_range,/*my_left_is_final=*/my_is_final); - finish_pass1_type& c = *new( allocate_continuation()) finish_pass1_type(*my_return_slot,my_sum,*result); - // Split off right child - start_scan& b = *new( c.allocate_child() ) start_scan( /*my_return_slot=*/result->my_right, *this, result ); - b.my_is_right_child = true; - // Left child is recycling of *this. Must recycle this before spawning b, - // otherwise b might complete and decrement c.ref_count() to zero, which - // would cause c.execute() to run prematurely. - recycle_as_child_of(c); - c.set_ref_count(2); - c.spawn(b); - my_sum = &result->my_left_sum; - my_return_slot = &result->my_left; - my_is_right_child = false; - next_task = this; - my_parent_sum = result; - __TBB_ASSERT( !*my_return_slot, NULL ); - } - return next_task; - } -} // namespace internal -//! @endcond - -// Requirements on Range concept are documented in blocked_range.h - -/** \page parallel_scan_body_req Requirements on parallel_scan body - Class \c Body implementing the concept of parallel_scan body must define: - - \code Body::Body( Body&, split ); \endcode Splitting constructor. - Split \c b so that \c this and \c b can accumulate separately - - \code Body::~Body(); \endcode Destructor - - \code void Body::operator()( const Range& r, pre_scan_tag ); \endcode - Preprocess iterations for range \c r - - \code void Body::operator()( const Range& r, final_scan_tag ); \endcode - Do final processing for iterations of range \c r - - \code void Body::reverse_join( Body& a ); \endcode - Merge preprocessing state of \c a into \c this, where \c a was - created earlier from \c b by b's splitting constructor -**/ - -/** \name parallel_scan - See also requirements on \ref range_req "Range" and \ref parallel_scan_body_req "parallel_scan Body". **/ -//@{ - -//! Parallel prefix with default partitioner -/** @ingroup algorithms **/ -template -void parallel_scan( const Range& range, Body& body ) { - internal::start_scan::run(range,body,__TBB_DEFAULT_PARTITIONER()); -} - -//! Parallel prefix with simple_partitioner -/** @ingroup algorithms **/ -template -void parallel_scan( const Range& range, Body& body, const simple_partitioner& partitioner ) { - internal::start_scan::run(range,body,partitioner); -} - -//! Parallel prefix with auto_partitioner -/** @ingroup algorithms **/ -template -void parallel_scan( const Range& range, Body& body, const auto_partitioner& partitioner ) { - internal::start_scan::run(range,body,partitioner); -} -//@} - -} // namespace tbb - -#endif /* __TBB_parallel_scan_H */ - diff --git a/inst/include/tbb/parallel_sort.h b/inst/include/tbb/parallel_sort.h deleted file mode 100644 index 1d33c1f70..000000000 --- a/inst/include/tbb/parallel_sort.h +++ /dev/null @@ -1,253 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_parallel_sort_H -#define __TBB_parallel_sort_H - -#include "parallel_for.h" -#include "blocked_range.h" -#include "internal/_range_iterator.h" -#include -#include -#include - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - -//! Range used in quicksort to split elements into subranges based on a value. -/** The split operation selects a splitter and places all elements less than or equal - to the value in the first range and the remaining elements in the second range. - @ingroup algorithms */ -template -class quick_sort_range: private no_assign { - - inline size_t median_of_three(const RandomAccessIterator &array, size_t l, size_t m, size_t r) const { - return comp(array[l], array[m]) ? ( comp(array[m], array[r]) ? m : ( comp( array[l], array[r]) ? r : l ) ) - : ( comp(array[r], array[m]) ? m : ( comp( array[r], array[l] ) ? r : l ) ); - } - - inline size_t pseudo_median_of_nine( const RandomAccessIterator &array, const quick_sort_range &range ) const { - size_t offset = range.size/8u; - return median_of_three(array, - median_of_three(array, 0, offset, offset*2), - median_of_three(array, offset*3, offset*4, offset*5), - median_of_three(array, offset*6, offset*7, range.size - 1) ); - - } - -public: - - static const size_t grainsize = 500; - const Compare ∁ - RandomAccessIterator begin; - size_t size; - - quick_sort_range( RandomAccessIterator begin_, size_t size_, const Compare &comp_ ) : - comp(comp_), begin(begin_), size(size_) {} - - bool empty() const {return size==0;} - bool is_divisible() const {return size>=grainsize;} - - quick_sort_range( quick_sort_range& range, split ) : comp(range.comp) { - using std::swap; - RandomAccessIterator array = range.begin; - RandomAccessIterator key0 = range.begin; - size_t m = pseudo_median_of_nine(array, range); - if (m) swap ( array[0], array[m] ); - - size_t i=0; - size_t j=range.size; - // Partition interval [i+1,j-1] with key *key0. - for(;;) { - __TBB_ASSERT( i -class quick_sort_pretest_body : internal::no_assign { - const Compare ∁ - -public: - quick_sort_pretest_body(const Compare &_comp) : comp(_comp) {} - - void operator()( const blocked_range& range ) const { - task &my_task = task::self(); - RandomAccessIterator my_end = range.end(); - - int i = 0; - for (RandomAccessIterator k = range.begin(); k != my_end; ++k, ++i) { - if ( i%64 == 0 && my_task.is_cancelled() ) break; - - // The k-1 is never out-of-range because the first chunk starts at begin+serial_cutoff+1 - if ( comp( *(k), *(k-1) ) ) { - my_task.cancel_group_execution(); - break; - } - } - } - -}; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -//! Body class used to sort elements in a range that is smaller than the grainsize. -/** @ingroup algorithms */ -template -struct quick_sort_body { - void operator()( const quick_sort_range& range ) const { - //SerialQuickSort( range.begin, range.size, range.comp ); - std::sort( range.begin, range.begin + range.size, range.comp ); - } -}; - -//! Wrapper method to initiate the sort by calling parallel_for. -/** @ingroup algorithms */ -template -void parallel_quick_sort( RandomAccessIterator begin, RandomAccessIterator end, const Compare& comp ) { -#if __TBB_TASK_GROUP_CONTEXT - task_group_context my_context; - const int serial_cutoff = 9; - - __TBB_ASSERT( begin + serial_cutoff < end, "min_parallel_size is smaller than serial cutoff?" ); - RandomAccessIterator k; - for ( k = begin ; k != begin + serial_cutoff; ++k ) { - if ( comp( *(k+1), *k ) ) { - goto do_parallel_quick_sort; - } - } - - parallel_for( blocked_range(k+1, end), - quick_sort_pretest_body(comp), - auto_partitioner(), - my_context); - - if (my_context.is_group_execution_cancelled()) -do_parallel_quick_sort: -#endif /* __TBB_TASK_GROUP_CONTEXT */ - parallel_for( quick_sort_range(begin, end-begin, comp ), - quick_sort_body(), - auto_partitioner() ); -} - -} // namespace internal -//! @endcond - -/** \page parallel_sort_iter_req Requirements on iterators for parallel_sort - Requirements on value type \c T of \c RandomAccessIterator for \c parallel_sort: - - \code void swap( T& x, T& y ) \endcode Swaps \c x and \c y - - \code bool Compare::operator()( const T& x, const T& y ) \endcode - True if x comes before y; -**/ - -/** \name parallel_sort - See also requirements on \ref parallel_sort_iter_req "iterators for parallel_sort". **/ -//@{ - -//! Sorts the data in [begin,end) using the given comparator -/** The compare function object is used for all comparisons between elements during sorting. - The compare object must define a bool operator() function. - @ingroup algorithms **/ -template -void parallel_sort( RandomAccessIterator begin, RandomAccessIterator end, const Compare& comp) { - const int min_parallel_size = 500; - if( end > begin ) { - if (end - begin < min_parallel_size) { - std::sort(begin, end, comp); - } else { - internal::parallel_quick_sort(begin, end, comp); - } - } -} - -//! Sorts the data in [begin,end) with a default comparator \c std::less -/** @ingroup algorithms **/ -template -inline void parallel_sort( RandomAccessIterator begin, RandomAccessIterator end ) { - parallel_sort( begin, end, std::less< typename std::iterator_traits::value_type >() ); -} - -//! Sorts the data in rng using the given comparator -/** @ingroup algorithms **/ -template -void parallel_sort(Range& rng, const Compare& comp) { - parallel_sort(tbb::internal::first(rng), tbb::internal::last(rng), comp); -} - -//! Sorts the data in const rng using the given comparator -/** @ingroup algorithms **/ -template -void parallel_sort(const Range& rng, const Compare& comp) { - parallel_sort(tbb::internal::first(rng), tbb::internal::last(rng), comp); -} - -//! Sorts the data in rng with a default comparator \c std::less -/** @ingroup algorithms **/ -template -void parallel_sort(Range& rng) { - parallel_sort(tbb::internal::first(rng), tbb::internal::last(rng)); -} - -//! Sorts the data in const rng with a default comparator \c std::less -/** @ingroup algorithms **/ -template -void parallel_sort(const Range& rng) { - parallel_sort(tbb::internal::first(rng), tbb::internal::last(rng)); -} - -//! Sorts the data in the range \c [begin,end) with a default comparator \c std::less -/** @ingroup algorithms **/ -template -inline void parallel_sort( T * begin, T * end ) { - parallel_sort( begin, end, std::less< T >() ); -} -//@} - - -} // namespace tbb - -#endif - diff --git a/inst/include/tbb/parallel_while.h b/inst/include/tbb/parallel_while.h deleted file mode 100644 index a1db83413..000000000 --- a/inst/include/tbb/parallel_while.h +++ /dev/null @@ -1,186 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_parallel_while -#define __TBB_parallel_while - -#include "task.h" -#include - -namespace tbb { - -template -class parallel_while; - -//! @cond INTERNAL -namespace internal { - - template class while_task; - - //! For internal use only. - /** Executes one iteration of a while. - @ingroup algorithms */ - template - class while_iteration_task: public task { - const Body& my_body; - typename Body::argument_type my_value; - /*override*/ task* execute() { - my_body(my_value); - return NULL; - } - while_iteration_task( const typename Body::argument_type& value, const Body& body ) : - my_body(body), my_value(value) - {} - template friend class while_group_task; - friend class tbb::parallel_while; - }; - - //! For internal use only - /** Unpacks a block of iterations. - @ingroup algorithms */ - template - class while_group_task: public task { - static const size_t max_arg_size = 4; - const Body& my_body; - size_t size; - typename Body::argument_type my_arg[max_arg_size]; - while_group_task( const Body& body ) : my_body(body), size(0) {} - /*override*/ task* execute() { - typedef while_iteration_task iteration_type; - __TBB_ASSERT( size>0, NULL ); - task_list list; - task* t; - size_t k=0; - for(;;) { - t = new( allocate_child() ) iteration_type(my_arg[k],my_body); - if( ++k==size ) break; - list.push_back(*t); - } - set_ref_count(int(k+1)); - spawn(list); - spawn_and_wait_for_all(*t); - return NULL; - } - template friend class while_task; - }; - - //! For internal use only. - /** Gets block of iterations from a stream and packages them into a while_group_task. - @ingroup algorithms */ - template - class while_task: public task { - Stream& my_stream; - const Body& my_body; - empty_task& my_barrier; - /*override*/ task* execute() { - typedef while_group_task block_type; - block_type& t = *new( allocate_additional_child_of(my_barrier) ) block_type(my_body); - size_t k=0; - while( my_stream.pop_if_present(t.my_arg[k]) ) { - if( ++k==block_type::max_arg_size ) { - // There might be more iterations. - recycle_to_reexecute(); - break; - } - } - if( k==0 ) { - destroy(t); - return NULL; - } else { - t.size = k; - return &t; - } - } - while_task( Stream& stream, const Body& body, empty_task& barrier ) : - my_stream(stream), - my_body(body), - my_barrier(barrier) - {} - friend class tbb::parallel_while; - }; - -} // namespace internal -//! @endcond - -//! Parallel iteration over a stream, with optional addition of more work. -/** The Body b has the requirement: \n - "b(v)" \n - "b.argument_type" \n - where v is an argument_type - @ingroup algorithms */ -template -class parallel_while: internal::no_copy { -public: - //! Construct empty non-running parallel while. - parallel_while() : my_body(NULL), my_barrier(NULL) {} - - //! Destructor cleans up data members before returning. - ~parallel_while() { - if( my_barrier ) { - my_barrier->destroy(*my_barrier); - my_barrier = NULL; - } - } - - //! Type of items - typedef typename Body::argument_type value_type; - - //! Apply body.apply to each item in the stream. - /** A Stream s has the requirements \n - "S::value_type" \n - "s.pop_if_present(value) is convertible to bool */ - template - void run( Stream& stream, const Body& body ); - - //! Add a work item while running. - /** Should be executed only by body.apply or a thread spawned therefrom. */ - void add( const value_type& item ); - -private: - const Body* my_body; - empty_task* my_barrier; -}; - -template -template -void parallel_while::run( Stream& stream, const Body& body ) { - using namespace internal; - empty_task& barrier = *new( task::allocate_root() ) empty_task(); - my_body = &body; - my_barrier = &barrier; - my_barrier->set_ref_count(2); - while_task& w = *new( my_barrier->allocate_child() ) while_task( stream, body, barrier ); - my_barrier->spawn_and_wait_for_all(w); - my_barrier->destroy(*my_barrier); - my_barrier = NULL; - my_body = NULL; -} - -template -void parallel_while::add( const value_type& item ) { - __TBB_ASSERT(my_barrier,"attempt to add to parallel_while that is not running"); - typedef internal::while_iteration_task iteration_type; - iteration_type& i = *new( task::allocate_additional_child_of(*my_barrier) ) iteration_type(item,*my_body); - task::self().spawn( i ); -} - -} // namespace - -#endif /* __TBB_parallel_while */ diff --git a/inst/include/tbb/partitioner.h b/inst/include/tbb/partitioner.h deleted file mode 100644 index c008e8ce8..000000000 --- a/inst/include/tbb/partitioner.h +++ /dev/null @@ -1,630 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_partitioner_H -#define __TBB_partitioner_H - -#ifndef __TBB_INITIAL_CHUNKS -// initial task divisions per thread -#define __TBB_INITIAL_CHUNKS 2 -#endif -#ifndef __TBB_RANGE_POOL_CAPACITY -// maximum number of elements in range pool -#define __TBB_RANGE_POOL_CAPACITY 8 -#endif -#ifndef __TBB_INIT_DEPTH -// initial value for depth of range pool -#define __TBB_INIT_DEPTH 5 -#endif -#ifndef __TBB_DEMAND_DEPTH_ADD -// when imbalance is found range splits this value times more -#define __TBB_DEMAND_DEPTH_ADD 2 -#endif -#ifndef __TBB_STATIC_THRESHOLD -// necessary number of clocks for the work to be distributed among all tasks -#define __TBB_STATIC_THRESHOLD 40000 -#endif -#if __TBB_DEFINE_MIC -#define __TBB_NONUNIFORM_TASK_CREATION 1 -#ifdef __TBB_machine_time_stamp -#define __TBB_USE_MACHINE_TIME_STAMPS 1 -#define __TBB_task_duration() __TBB_STATIC_THRESHOLD -#endif // __TBB_machine_time_stamp -#endif // __TBB_DEFINE_MIC - -#include "task.h" -#include "aligned_space.h" -#include "atomic.h" - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings - #pragma warning (push) - #pragma warning (disable: 4244) -#endif - -namespace tbb { - -class auto_partitioner; -class simple_partitioner; -class affinity_partitioner; -namespace interface7 { - namespace internal { - class affinity_partition_type; - } -} - -namespace internal { //< @cond INTERNAL -size_t __TBB_EXPORTED_FUNC get_initial_auto_partitioner_divisor(); - -//! Defines entry point for affinity partitioner into tbb run-time library. -class affinity_partitioner_base_v3: no_copy { - friend class tbb::affinity_partitioner; - friend class tbb::interface7::internal::affinity_partition_type; - //! Array that remembers affinities of tree positions to affinity_id. - /** NULL if my_size==0. */ - affinity_id* my_array; - //! Number of elements in my_array. - size_t my_size; - //! Zeros the fields. - affinity_partitioner_base_v3() : my_array(NULL), my_size(0) {} - //! Deallocates my_array. - ~affinity_partitioner_base_v3() {resize(0);} - //! Resize my_array. - /** Retains values if resulting size is the same. */ - void __TBB_EXPORTED_METHOD resize( unsigned factor ); -}; - -//! Provides backward-compatible methods for partition objects without affinity. -class partition_type_base { -public: - void set_affinity( task & ) {} - void note_affinity( task::affinity_id ) {} - task* continue_after_execute_range() {return NULL;} - bool decide_whether_to_delay() {return false;} - void spawn_or_delay( bool, task& b ) { - task::spawn(b); - } -}; - -template class start_scan; - -} //< namespace internal @endcond - -namespace serial { -namespace interface7 { -template class start_for; -} -} - -namespace interface7 { -//! @cond INTERNAL -namespace internal { -using namespace tbb::internal; -template class start_for; -template class start_reduce; - -//! Join task node that contains shared flag for stealing feedback -class flag_task: public task { -public: - tbb::atomic my_child_stolen; - flag_task() { my_child_stolen = false; } - task* execute() { return NULL; } - static void mark_task_stolen(task &t) { - tbb::atomic &flag = static_cast(t.parent())->my_child_stolen; -#if TBB_USE_THREADING_TOOLS - // Threading tools respect lock prefix but report false-positive data-race via plain store - flag.fetch_and_store(true); -#else - flag = true; -#endif //TBB_USE_THREADING_TOOLS - } - static bool is_peer_stolen(task &t) { - return static_cast(t.parent())->my_child_stolen; - } -}; - -//! Depth is a relative depth of recursive division inside a range pool. Relative depth allows -//! infinite absolute depth of the recursion for heavily unbalanced workloads with range represented -//! by a number that cannot fit into machine word. -typedef unsigned char depth_t; - -//! Range pool stores ranges of type T in a circular buffer with MaxCapacity -template -class range_vector { - depth_t my_head; - depth_t my_tail; - depth_t my_size; - depth_t my_depth[MaxCapacity]; // relative depths of stored ranges - tbb::aligned_space my_pool; - -public: - //! initialize via first range in pool - range_vector(const T& elem) : my_head(0), my_tail(0), my_size(1) { - my_depth[0] = 0; - new( static_cast(my_pool.begin()) ) T(elem);//TODO: std::move? - } - ~range_vector() { - while( !empty() ) pop_back(); - } - bool empty() const { return my_size == 0; } - depth_t size() const { return my_size; } - //! Populates range pool via ranges up to max depth or while divisible - //! max_depth starts from 0, e.g. value 2 makes 3 ranges in the pool up to two 1/4 pieces - void split_to_fill(depth_t max_depth) { - while( my_size < MaxCapacity && is_divisible(max_depth) ) { - depth_t prev = my_head; - my_head = (my_head + 1) % MaxCapacity; - new(my_pool.begin()+my_head) T(my_pool.begin()[prev]); // copy TODO: std::move? - my_pool.begin()[prev].~T(); // instead of assignment - new(my_pool.begin()+prev) T(my_pool.begin()[my_head], split()); // do 'inverse' split - my_depth[my_head] = ++my_depth[prev]; - my_size++; - } - } - void pop_back() { - __TBB_ASSERT(my_size > 0, "range_vector::pop_back() with empty size"); - my_pool.begin()[my_head].~T(); - my_size--; - my_head = (my_head + MaxCapacity - 1) % MaxCapacity; - } - void pop_front() { - __TBB_ASSERT(my_size > 0, "range_vector::pop_front() with empty size"); - my_pool.begin()[my_tail].~T(); - my_size--; - my_tail = (my_tail + 1) % MaxCapacity; - } - T& back() { - __TBB_ASSERT(my_size > 0, "range_vector::back() with empty size"); - return my_pool.begin()[my_head]; - } - T& front() { - __TBB_ASSERT(my_size > 0, "range_vector::front() with empty size"); - return my_pool.begin()[my_tail]; - } - //! similarly to front(), returns depth of the first range in the pool - depth_t front_depth() { - __TBB_ASSERT(my_size > 0, "range_vector::front_depth() with empty size"); - return my_depth[my_tail]; - } - depth_t back_depth() { - __TBB_ASSERT(my_size > 0, "range_vector::back_depth() with empty size"); - return my_depth[my_head]; - } - bool is_divisible(depth_t max_depth) { - return back_depth() < max_depth && back().is_divisible(); - } -}; - -//! Provides default methods for partition objects and common algorithm blocks. -template -struct partition_type_base { - typedef split split_type; - // decision makers - void set_affinity( task & ) {} - void note_affinity( task::affinity_id ) {} - bool check_being_stolen(task &) { return false; } // part of old should_execute_range() - bool check_for_demand(task &) { return false; } - bool is_divisible() { return true; } // part of old should_execute_range() - depth_t max_depth() { return 0; } - void align_depth(depth_t) { } - template split_type get_split() { return split(); } - - // common function blocks - Partition& self() { return *static_cast(this); } // CRTP helper - template - void execute(StartType &start, Range &range) { - // The algorithm in a few words ([]-denotes calls to decision methods of partitioner): - // [If this task is stolen, adjust depth and divisions if necessary, set flag]. - // If range is divisible { - // Spread the work while [initial divisions left]; - // Create trap task [if necessary]; - // } - // If not divisible or [max depth is reached], execute, else do the range pool part - if ( range.is_divisible() ) { - if ( self().is_divisible() ) { - do { // split until is divisible - typename Partition::split_type split_obj = self().template get_split(); - start.offer_work( split_obj ); - } while ( range.is_divisible() && self().is_divisible() ); - } - } - if( !range.is_divisible() || !self().max_depth() ) - start.run_body( range ); // simple partitioner goes always here - else { // do range pool - internal::range_vector range_pool(range); - do { - range_pool.split_to_fill(self().max_depth()); // fill range pool - if( self().check_for_demand( start ) ) { - if( range_pool.size() > 1 ) { - start.offer_work( range_pool.front(), range_pool.front_depth() ); - range_pool.pop_front(); - continue; - } - if( range_pool.is_divisible(self().max_depth()) ) // was not enough depth to fork a task - continue; // note: next split_to_fill() should split range at least once - } - start.run_body( range_pool.back() ); - range_pool.pop_back(); - } while( !range_pool.empty() && !start.is_cancelled() ); - } - } -}; - -//! Provides default methods for auto (adaptive) partition objects. -template -struct adaptive_partition_type_base : partition_type_base { - size_t my_divisor; - depth_t my_max_depth; - adaptive_partition_type_base() : my_max_depth(__TBB_INIT_DEPTH) { - my_divisor = tbb::internal::get_initial_auto_partitioner_divisor() / 4; - __TBB_ASSERT(my_divisor, "initial value of get_initial_auto_partitioner_divisor() is not valid"); - } - adaptive_partition_type_base(adaptive_partition_type_base &src, split) { - my_max_depth = src.my_max_depth; -#if TBB_USE_ASSERT - size_t old_divisor = src.my_divisor; -#endif - -#if __TBB_INITIAL_TASK_IMBALANCE - if( src.my_divisor <= 1 ) my_divisor = 0; - else my_divisor = src.my_divisor = (src.my_divisor + 1u) / 2u; -#else - my_divisor = src.my_divisor / 2u; - src.my_divisor = src.my_divisor - my_divisor; // TODO: check the effect separately - if (my_divisor) src.my_max_depth += static_cast(__TBB_Log2(src.my_divisor / my_divisor)); -#endif - // For affinity_partitioner, my_divisor indicates the number of affinity array indices the task reserves. - // A task which has only one index must produce the right split without reserved index in order to avoid - // it to be overwritten in note_affinity() of the created (right) task. - // I.e. a task created deeper than the affinity array can remember must not save its affinity (LIFO order) - __TBB_ASSERT( (old_divisor <= 1 && my_divisor == 0) || - (old_divisor > 1 && my_divisor != 0), NULL); - } - adaptive_partition_type_base(adaptive_partition_type_base &src, const proportional_split& split_obj) { - my_max_depth = src.my_max_depth; - my_divisor = size_t(float(src.my_divisor) * float(split_obj.right()) - / float(split_obj.left() + split_obj.right())); - src.my_divisor -= my_divisor; - } - bool check_being_stolen( task &t) { // part of old should_execute_range() - if( !my_divisor ) { // if not from the top P tasks of binary tree - my_divisor = 1; // TODO: replace by on-stack flag (partition_state's member)? - if( t.is_stolen_task() && t.parent()->ref_count() >= 2 ) { // runs concurrently with the left task -#if TBB_USE_EXCEPTIONS - // RTTI is available, check whether the cast is valid - __TBB_ASSERT(dynamic_cast(t.parent()), 0); - // correctness of the cast relies on avoiding the root task for which: - // - initial value of my_divisor != 0 (protected by separate assertion) - // - is_stolen_task() always returns false for the root task. -#endif - flag_task::mark_task_stolen(t); - if( !my_max_depth ) my_max_depth++; - my_max_depth += __TBB_DEMAND_DEPTH_ADD; - return true; - } - } - return false; - } - void align_depth(depth_t base) { - __TBB_ASSERT(base <= my_max_depth, 0); - my_max_depth -= base; - } - depth_t max_depth() { return my_max_depth; } -}; - -//! Helper that enables one or the other code branches (see example in is_range_divisible_in_proportion) -template struct enable_if { typedef T type; }; -template struct enable_if { }; - -//! Class determines whether template parameter has static boolean -//! constant 'is_divisible_in_proportion' initialized with value of -//! 'true' or not. -/** If template parameter has such field that has been initialized - * with non-zero value then class field will be set to 'true', - * otherwise - 'false' - */ -template -class is_range_divisible_in_proportion { -private: - typedef char yes[1]; - typedef char no [2]; - - template static yes& decide(typename enable_if::type *); - template static no& decide(...); -public: - // equals to 'true' if and only if static const variable 'is_divisible_in_proportion' of template parameter - // initialized with the value of 'true' - static const bool value = (sizeof(decide(0)) == sizeof(yes)); -}; - -//! Provides default methods for affinity (adaptive) partition objects. -class affinity_partition_type : public adaptive_partition_type_base { - static const unsigned factor_power = 4; - static const unsigned factor = 1<(), - my_delay(start) -#ifdef __TBB_USE_MACHINE_TIME_STAMPS - , my_dst_tsc(0) -#endif - { - __TBB_ASSERT( (factor&(factor-1))==0, "factor must be power of two" ); - my_divisor *= factor; - ap.resize(factor); - my_array = ap.my_array; - my_begin = 0; - my_max_depth = factor_power + 1; // the first factor_power ranges will be spawned, and >=1 ranges should be left - __TBB_ASSERT( my_max_depth < __TBB_RANGE_POOL_CAPACITY, 0 ); - } - affinity_partition_type(affinity_partition_type& p, split) - : adaptive_partition_type_base(p, split()), - my_delay(pass), -#ifdef __TBB_USE_MACHINE_TIME_STAMPS - my_dst_tsc(0), -#endif - my_array(p.my_array) { - // the sum of the divisors represents original value of p.my_divisor before split - __TBB_ASSERT(my_divisor + p.my_divisor <= factor, NULL); - my_begin = p.my_begin + p.my_divisor; - } - affinity_partition_type(affinity_partition_type& p, const proportional_split& split_obj) - : adaptive_partition_type_base(p, split_obj), - my_delay(start), -#ifdef __TBB_USE_MACHINE_TIME_STAMPS - my_dst_tsc(0), -#endif - my_array(p.my_array) { - size_t total_divisor = my_divisor + p.my_divisor; - __TBB_ASSERT(total_divisor % factor == 0, NULL); - my_divisor = (my_divisor + factor/2) & (0u - factor); - if (!my_divisor) - my_divisor = factor; - else if (my_divisor == total_divisor) - my_divisor = total_divisor - factor; - p.my_divisor = total_divisor - my_divisor; - __TBB_ASSERT(my_divisor && p.my_divisor, NULL); - my_begin = p.my_begin + p.my_divisor; - } - void set_affinity( task &t ) { - if( my_divisor ) { - if( !my_array[my_begin] ) { - // TODO: consider code reuse for static_paritioner - my_array[my_begin] = affinity_id(my_begin / factor + 1); - } - t.set_affinity( my_array[my_begin] ); - } - } - void note_affinity( task::affinity_id id ) { - if( my_divisor ) - my_array[my_begin] = id; - } - bool check_for_demand( task &t ) { - if( pass == my_delay ) { - if( my_divisor > 1 ) // produce affinitized tasks while they have slot in array - return true; // do not do my_max_depth++ here, but be sure range_pool is splittable once more - else if( my_divisor && my_max_depth ) { // make balancing task - my_divisor = 0; // once for each task; depth will be decreased in align_depth() - return true; - } - else if( flag_task::is_peer_stolen(t) ) { - my_max_depth += __TBB_DEMAND_DEPTH_ADD; - return true; - } - } else if( start == my_delay ) { -#ifndef __TBB_USE_MACHINE_TIME_STAMPS - my_delay = pass; -#else - my_dst_tsc = __TBB_machine_time_stamp() + __TBB_task_duration(); - my_delay = run; - } else if( run == my_delay ) { - if( __TBB_machine_time_stamp() < my_dst_tsc ) { - __TBB_ASSERT(my_max_depth > 0, NULL); - return false; - } - my_delay = pass; - return true; -#endif // __TBB_USE_MACHINE_TIME_STAMPS - } - return false; - } - bool is_divisible() { // part of old should_execute_range() - return my_divisor > factor; - } - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Suppress "conditional expression is constant" warning. - #pragma warning( push ) - #pragma warning( disable: 4127 ) -#endif - template - split_type get_split() { - if (is_range_divisible_in_proportion::value) { - size_t size = my_divisor / factor; -#if __TBB_NONUNIFORM_TASK_CREATION - size_t right = (size + 2) / 3; -#else - size_t right = size / 2; -#endif - size_t left = size - right; - return split_type(left, right); - } else { - return split_type(1, 1); - } - } -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning( pop ) -#endif // warning 4127 is back - - static const unsigned range_pool_size = __TBB_RANGE_POOL_CAPACITY; -}; - -class auto_partition_type: public adaptive_partition_type_base { -public: - auto_partition_type( const auto_partitioner& ) { - my_divisor *= __TBB_INITIAL_CHUNKS; - } - auto_partition_type( auto_partition_type& src, split) - : adaptive_partition_type_base(src, split()) {} - - bool is_divisible() { // part of old should_execute_range() - if( my_divisor > 1 ) return true; - if( my_divisor && my_max_depth ) { // can split the task. TODO: on-stack flag instead - // keep same fragmentation while splitting for the local task pool - my_max_depth--; - my_divisor = 0; // decrease max_depth once per task - return true; - } else return false; - } - bool check_for_demand(task &t) { - if( flag_task::is_peer_stolen(t) ) { - my_max_depth += __TBB_DEMAND_DEPTH_ADD; - return true; - } else return false; - } - - static const unsigned range_pool_size = __TBB_RANGE_POOL_CAPACITY; -}; - -class simple_partition_type: public partition_type_base { -public: - simple_partition_type( const simple_partitioner& ) {} - simple_partition_type( const simple_partition_type&, split ) {} - //! simplified algorithm - template - void execute(StartType &start, Range &range) { - split_type split_obj = split(); // start.offer_work accepts split_type as reference - while( range.is_divisible() ) - start.offer_work( split_obj ); - start.run_body( range ); - } - //static const unsigned range_pool_size = 1; - not necessary because execute() is overridden -}; - -//! Backward-compatible partition for auto and affinity partition objects. -class old_auto_partition_type: public tbb::internal::partition_type_base { - size_t num_chunks; - static const size_t VICTIM_CHUNKS = 4; -public: - bool should_execute_range(const task &t) { - if( num_chunks friend class serial::interface7::start_for; - template friend class interface7::internal::start_for; - template friend class interface7::internal::start_reduce; - template friend class internal::start_scan; - // backward compatibility - class partition_type: public internal::partition_type_base { - public: - bool should_execute_range(const task& ) {return false;} - partition_type( const simple_partitioner& ) {} - partition_type( const partition_type&, split ) {} - }; - // new implementation just extends existing interface - typedef interface7::internal::simple_partition_type task_partition_type; - - // TODO: consider to make split_type public - typedef interface7::internal::simple_partition_type::split_type split_type; -}; - -//! An auto partitioner -/** The range is initial divided into several large chunks. - Chunks are further subdivided into smaller pieces if demand detected and they are divisible. - @ingroup algorithms */ -class auto_partitioner { -public: - auto_partitioner() {} - -private: - template friend class serial::interface7::start_for; - template friend class interface7::internal::start_for; - template friend class interface7::internal::start_reduce; - template friend class internal::start_scan; - // backward compatibility - typedef interface7::internal::old_auto_partition_type partition_type; - // new implementation just extends existing interface - typedef interface7::internal::auto_partition_type task_partition_type; - - // TODO: consider to make split_type public - typedef interface7::internal::auto_partition_type::split_type split_type; -}; - -//! An affinity partitioner -class affinity_partitioner: internal::affinity_partitioner_base_v3 { -public: - affinity_partitioner() {} - -private: - template friend class serial::interface7::start_for; - template friend class interface7::internal::start_for; - template friend class interface7::internal::start_reduce; - template friend class internal::start_scan; - // backward compatibility - for parallel_scan only - typedef interface7::internal::old_auto_partition_type partition_type; - // new implementation just extends existing interface - typedef interface7::internal::affinity_partition_type task_partition_type; - - // TODO: consider to make split_type public - typedef interface7::internal::affinity_partition_type::split_type split_type; -}; - -} // namespace tbb - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4244 is back -#undef __TBB_INITIAL_CHUNKS -#undef __TBB_RANGE_POOL_CAPACITY -#undef __TBB_INIT_DEPTH -#endif /* __TBB_partitioner_H */ diff --git a/inst/include/tbb/pipeline.h b/inst/include/tbb/pipeline.h deleted file mode 100644 index 3a1d3d899..000000000 --- a/inst/include/tbb/pipeline.h +++ /dev/null @@ -1,664 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_pipeline_H -#define __TBB_pipeline_H - -#include "atomic.h" -#include "task.h" -#include "tbb_allocator.h" -#include - -#if __TBB_CPP11_TYPE_PROPERTIES_PRESENT || __TBB_TR1_TYPE_PROPERTIES_IN_STD_PRESENT -#include -#endif - -namespace tbb { - -class pipeline; -class filter; - -//! @cond INTERNAL -namespace internal { - -// The argument for PIPELINE_VERSION should be an integer between 2 and 9 -#define __TBB_PIPELINE_VERSION(x) ((unsigned char)(x-2)<<1) - -typedef unsigned long Token; -typedef long tokendiff_t; -class stage_task; -class input_buffer; -class pipeline_root_task; -class pipeline_cleaner; - -} // namespace internal - -namespace interface6 { - template class filter_t; - - namespace internal { - class pipeline_proxy; - } -} - -//! @endcond - -//! A stage in a pipeline. -/** @ingroup algorithms */ -class filter: internal::no_copy { -private: - //! Value used to mark "not in pipeline" - static filter* not_in_pipeline() {return reinterpret_cast(intptr_t(-1));} -protected: - //! The lowest bit 0 is for parallel vs. serial - static const unsigned char filter_is_serial = 0x1; - - //! 4th bit distinguishes ordered vs unordered filters. - /** The bit was not set for parallel filters in TBB 2.1 and earlier, - but is_ordered() function always treats parallel filters as out of order. */ - static const unsigned char filter_is_out_of_order = 0x1<<4; - - //! 5th bit distinguishes thread-bound and regular filters. - static const unsigned char filter_is_bound = 0x1<<5; - - //! 6th bit marks input filters emitting small objects - static const unsigned char filter_may_emit_null = 0x1<<6; - - //! 7th bit defines exception propagation mode expected by the application. - static const unsigned char exact_exception_propagation = -#if TBB_USE_CAPTURED_EXCEPTION - 0x0; -#else - 0x1<<7; -#endif /* TBB_USE_CAPTURED_EXCEPTION */ - - static const unsigned char current_version = __TBB_PIPELINE_VERSION(5); - static const unsigned char version_mask = 0x7<<1; // bits 1-3 are for version -public: - enum mode { - //! processes multiple items in parallel and in no particular order - parallel = current_version | filter_is_out_of_order, - //! processes items one at a time; all such filters process items in the same order - serial_in_order = current_version | filter_is_serial, - //! processes items one at a time and in no particular order - serial_out_of_order = current_version | filter_is_serial | filter_is_out_of_order, - //! @deprecated use serial_in_order instead - serial = serial_in_order - }; -protected: - filter( bool is_serial_ ) : - next_filter_in_pipeline(not_in_pipeline()), - my_input_buffer(NULL), - my_filter_mode(static_cast((is_serial_ ? serial : parallel) | exact_exception_propagation)), - prev_filter_in_pipeline(not_in_pipeline()), - my_pipeline(NULL), - next_segment(NULL) - {} - - filter( mode filter_mode ) : - next_filter_in_pipeline(not_in_pipeline()), - my_input_buffer(NULL), - my_filter_mode(static_cast(filter_mode | exact_exception_propagation)), - prev_filter_in_pipeline(not_in_pipeline()), - my_pipeline(NULL), - next_segment(NULL) - {} - - // signal end-of-input for concrete_filters - void __TBB_EXPORTED_METHOD set_end_of_input(); - -public: - //! True if filter is serial. - bool is_serial() const { - return bool( my_filter_mode & filter_is_serial ); - } - - //! True if filter must receive stream in order. - bool is_ordered() const { - return (my_filter_mode & (filter_is_out_of_order|filter_is_serial))==filter_is_serial; - } - - //! True if filter is thread-bound. - bool is_bound() const { - return ( my_filter_mode & filter_is_bound )==filter_is_bound; - } - - //! true if an input filter can emit null - bool object_may_be_null() { - return ( my_filter_mode & filter_may_emit_null ) == filter_may_emit_null; - } - - //! Operate on an item from the input stream, and return item for output stream. - /** Returns NULL if filter is a sink. */ - virtual void* operator()( void* item ) = 0; - - //! Destroy filter. - /** If the filter was added to a pipeline, the pipeline must be destroyed first. */ - virtual __TBB_EXPORTED_METHOD ~filter(); - -#if __TBB_TASK_GROUP_CONTEXT - //! Destroys item if pipeline was cancelled. - /** Required to prevent memory leaks. - Note it can be called concurrently even for serial filters.*/ - virtual void finalize( void* /*item*/ ) {}; -#endif - -private: - //! Pointer to next filter in the pipeline. - filter* next_filter_in_pipeline; - - //! has the filter not yet processed all the tokens it will ever see? - // (pipeline has not yet reached end_of_input or this filter has not yet - // seen the last token produced by input_filter) - bool has_more_work(); - - //! Buffer for incoming tokens, or NULL if not required. - /** The buffer is required if the filter is serial or follows a thread-bound one. */ - internal::input_buffer* my_input_buffer; - - friend class internal::stage_task; - friend class internal::pipeline_root_task; - friend class pipeline; - friend class thread_bound_filter; - - //! Storage for filter mode and dynamically checked implementation version. - const unsigned char my_filter_mode; - - //! Pointer to previous filter in the pipeline. - filter* prev_filter_in_pipeline; - - //! Pointer to the pipeline. - pipeline* my_pipeline; - - //! Pointer to the next "segment" of filters, or NULL if not required. - /** In each segment, the first filter is not thread-bound but follows a thread-bound one. */ - filter* next_segment; -}; - -//! A stage in a pipeline served by a user thread. -/** @ingroup algorithms */ -class thread_bound_filter: public filter { -public: - enum result_type { - // item was processed - success, - // item is currently not available - item_not_available, - // there are no more items to process - end_of_stream - }; -protected: - thread_bound_filter(mode filter_mode): - filter(static_cast(filter_mode | filter::filter_is_bound)) - { - __TBB_ASSERT(filter_mode & filter::filter_is_serial, "thread-bound filters must be serial"); - } -public: - //! If a data item is available, invoke operator() on that item. - /** This interface is non-blocking. - Returns 'success' if an item was processed. - Returns 'item_not_available' if no item can be processed now - but more may arrive in the future, or if token limit is reached. - Returns 'end_of_stream' if there are no more items to process. */ - result_type __TBB_EXPORTED_METHOD try_process_item(); - - //! Wait until a data item becomes available, and invoke operator() on that item. - /** This interface is blocking. - Returns 'success' if an item was processed. - Returns 'end_of_stream' if there are no more items to process. - Never returns 'item_not_available', as it blocks until another return condition applies. */ - result_type __TBB_EXPORTED_METHOD process_item(); - -private: - //! Internal routine for item processing - result_type internal_process_item(bool is_blocking); -}; - -//! A processing pipeline that applies filters to items. -/** @ingroup algorithms */ -class pipeline { -public: - //! Construct empty pipeline. - __TBB_EXPORTED_METHOD pipeline(); - - /** Though the current implementation declares the destructor virtual, do not rely on this - detail. The virtualness is deprecated and may disappear in future versions of TBB. */ - virtual __TBB_EXPORTED_METHOD ~pipeline(); - - //! Add filter to end of pipeline. - void __TBB_EXPORTED_METHOD add_filter( filter& filter_ ); - - //! Run the pipeline to completion. - void __TBB_EXPORTED_METHOD run( size_t max_number_of_live_tokens ); - -#if __TBB_TASK_GROUP_CONTEXT - //! Run the pipeline to completion with user-supplied context. - void __TBB_EXPORTED_METHOD run( size_t max_number_of_live_tokens, tbb::task_group_context& context ); -#endif - - //! Remove all filters from the pipeline. - void __TBB_EXPORTED_METHOD clear(); - -private: - friend class internal::stage_task; - friend class internal::pipeline_root_task; - friend class filter; - friend class thread_bound_filter; - friend class internal::pipeline_cleaner; - friend class tbb::interface6::internal::pipeline_proxy; - - //! Pointer to first filter in the pipeline. - filter* filter_list; - - //! Pointer to location where address of next filter to be added should be stored. - filter* filter_end; - - //! task who's reference count is used to determine when all stages are done. - task* end_counter; - - //! Number of idle tokens waiting for input stage. - atomic input_tokens; - - //! Global counter of tokens - atomic token_counter; - - //! False until fetch_input returns NULL. - bool end_of_input; - - //! True if the pipeline contains a thread-bound filter; false otherwise. - bool has_thread_bound_filters; - - //! Remove filter from pipeline. - void remove_filter( filter& filter_ ); - - //! Not used, but retained to satisfy old export files. - void __TBB_EXPORTED_METHOD inject_token( task& self ); - -#if __TBB_TASK_GROUP_CONTEXT - //! Does clean up if pipeline is cancelled or exception occurred - void clear_filters(); -#endif -}; - -//------------------------------------------------------------------------ -// Support for lambda-friendly parallel_pipeline interface -//------------------------------------------------------------------------ - -namespace interface6 { - -namespace internal { - template class concrete_filter; -} - -//! input_filter control to signal end-of-input for parallel_pipeline -class flow_control { - bool is_pipeline_stopped; - flow_control() { is_pipeline_stopped = false; } - template friend class internal::concrete_filter; -public: - void stop() { is_pipeline_stopped = true; } -}; - -//! @cond INTERNAL -namespace internal { - -template struct tbb_large_object {enum { value = sizeof(T) > sizeof(void *) }; }; - -// Obtain type properties in one or another way -#if __TBB_CPP11_TYPE_PROPERTIES_PRESENT -template struct tbb_trivially_copyable { enum { value = std::is_trivially_copyable::value }; }; -#elif __TBB_TR1_TYPE_PROPERTIES_IN_STD_PRESENT -template struct tbb_trivially_copyable { enum { value = std::has_trivial_copy_constructor::value }; }; -#else -// Explicitly list the types we wish to be placed as-is in the pipeline input_buffers. -template struct tbb_trivially_copyable { enum { value = false }; }; -template struct tbb_trivially_copyable { enum { value = true }; }; -template<> struct tbb_trivially_copyable { enum { value = true }; }; -template<> struct tbb_trivially_copyable { enum { value = true }; }; -template<> struct tbb_trivially_copyable { enum { value = !tbb_large_object::value }; }; -template<> struct tbb_trivially_copyable { enum { value = !tbb_large_object::value }; }; -template<> struct tbb_trivially_copyable { enum { value = !tbb_large_object::value }; }; -template<> struct tbb_trivially_copyable { enum { value = !tbb_large_object::value }; }; -template<> struct tbb_trivially_copyable { enum { value = !tbb_large_object::value }; }; -template<> struct tbb_trivially_copyable { enum { value = !tbb_large_object::value }; }; -#endif // Obtaining type properties - -template struct is_large_object {enum { value = tbb_large_object::value || !tbb_trivially_copyable::value }; }; - -template class token_helper; - -// large object helper (uses tbb_allocator) -template -class token_helper { - public: - typedef typename tbb::tbb_allocator allocator; - typedef T* pointer; - typedef T value_type; - static pointer create_token(const value_type & source) { - pointer output_t = allocator().allocate(1); - return new (output_t) T(source); - } - static value_type & token(pointer & t) { return *t;} - static void * cast_to_void_ptr(pointer ref) { return (void *) ref; } - static pointer cast_from_void_ptr(void * ref) { return (pointer)ref; } - static void destroy_token(pointer token) { - allocator().destroy(token); - allocator().deallocate(token,1); - } -}; - -// pointer specialization -template -class token_helper { - public: - typedef T* pointer; - typedef T* value_type; - static pointer create_token(const value_type & source) { return source; } - static value_type & token(pointer & t) { return t;} - static void * cast_to_void_ptr(pointer ref) { return (void *)ref; } - static pointer cast_from_void_ptr(void * ref) { return (pointer)ref; } - static void destroy_token( pointer /*token*/) {} -}; - -// small object specialization (converts void* to the correct type, passes objects directly.) -template -class token_helper { - typedef union { - T actual_value; - void * void_overlay; - } type_to_void_ptr_map; - public: - typedef T pointer; // not really a pointer in this case. - typedef T value_type; - static pointer create_token(const value_type & source) { - return source; } - static value_type & token(pointer & t) { return t;} - static void * cast_to_void_ptr(pointer ref) { - type_to_void_ptr_map mymap; - mymap.void_overlay = NULL; - mymap.actual_value = ref; - return mymap.void_overlay; - } - static pointer cast_from_void_ptr(void * ref) { - type_to_void_ptr_map mymap; - mymap.void_overlay = ref; - return mymap.actual_value; - } - static void destroy_token( pointer /*token*/) {} -}; - -template -class concrete_filter: public tbb::filter { - const Body& my_body; - typedef token_helper::value > t_helper; - typedef typename t_helper::pointer t_pointer; - typedef token_helper::value > u_helper; - typedef typename u_helper::pointer u_pointer; - - /*override*/ void* operator()(void* input) { - t_pointer temp_input = t_helper::cast_from_void_ptr(input); - u_pointer output_u = u_helper::create_token(my_body(t_helper::token(temp_input))); - t_helper::destroy_token(temp_input); - return u_helper::cast_to_void_ptr(output_u); - } - - /*override*/ void finalize(void * input) { - t_pointer temp_input = t_helper::cast_from_void_ptr(input); - t_helper::destroy_token(temp_input); - } - -public: - concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {} -}; - -// input -template -class concrete_filter: public filter { - const Body& my_body; - typedef token_helper::value > u_helper; - typedef typename u_helper::pointer u_pointer; - - /*override*/void* operator()(void*) { - flow_control control; - u_pointer output_u = u_helper::create_token(my_body(control)); - if(control.is_pipeline_stopped) { - u_helper::destroy_token(output_u); - set_end_of_input(); - return NULL; - } - return u_helper::cast_to_void_ptr(output_u); - } - -public: - concrete_filter(tbb::filter::mode filter_mode, const Body& body) : - filter(static_cast(filter_mode | filter_may_emit_null)), - my_body(body) - {} -}; - -template -class concrete_filter: public filter { - const Body& my_body; - typedef token_helper::value > t_helper; - typedef typename t_helper::pointer t_pointer; - - /*override*/ void* operator()(void* input) { - t_pointer temp_input = t_helper::cast_from_void_ptr(input); - my_body(t_helper::token(temp_input)); - t_helper::destroy_token(temp_input); - return NULL; - } - /*override*/ void finalize(void* input) { - t_pointer temp_input = t_helper::cast_from_void_ptr(input); - t_helper::destroy_token(temp_input); - } - -public: - concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {} -}; - -template -class concrete_filter: public filter { - const Body& my_body; - - /** Override privately because it is always called virtually */ - /*override*/ void* operator()(void*) { - flow_control control; - my_body(control); - void* output = control.is_pipeline_stopped ? NULL : (void*)(intptr_t)-1; - return output; - } -public: - concrete_filter(filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {} -}; - -//! The class that represents an object of the pipeline for parallel_pipeline(). -/** It primarily serves as RAII class that deletes heap-allocated filter instances. */ -class pipeline_proxy { - tbb::pipeline my_pipe; -public: - pipeline_proxy( const filter_t& filter_chain ); - ~pipeline_proxy() { - while( filter* f = my_pipe.filter_list ) - delete f; // filter destructor removes it from the pipeline - } - tbb::pipeline* operator->() { return &my_pipe; } -}; - -//! Abstract base class that represents a node in a parse tree underlying a filter_t. -/** These nodes are always heap-allocated and can be shared by filter_t objects. */ -class filter_node: tbb::internal::no_copy { - /** Count must be atomic because it is hidden state for user, but might be shared by threads. */ - tbb::atomic ref_count; -protected: - filter_node() { - ref_count = 0; -#ifdef __TBB_TEST_FILTER_NODE_COUNT - ++(__TBB_TEST_FILTER_NODE_COUNT); -#endif - } -public: - //! Add concrete_filter to pipeline - virtual void add_to( pipeline& ) = 0; - //! Increment reference count - void add_ref() {++ref_count;} - //! Decrement reference count and delete if it becomes zero. - void remove_ref() { - __TBB_ASSERT(ref_count>0,"ref_count underflow"); - if( --ref_count==0 ) - delete this; - } - virtual ~filter_node() { -#ifdef __TBB_TEST_FILTER_NODE_COUNT - --(__TBB_TEST_FILTER_NODE_COUNT); -#endif - } -}; - -//! Node in parse tree representing result of make_filter. -template -class filter_node_leaf: public filter_node { - const tbb::filter::mode mode; - const Body body; - /*override*/void add_to( pipeline& p ) { - concrete_filter* f = new concrete_filter(mode,body); - p.add_filter( *f ); - } -public: - filter_node_leaf( tbb::filter::mode m, const Body& b ) : mode(m), body(b) {} -}; - -//! Node in parse tree representing join of two filters. -class filter_node_join: public filter_node { - friend class filter_node; // to suppress GCC 3.2 warnings - filter_node& left; - filter_node& right; - /*override*/~filter_node_join() { - left.remove_ref(); - right.remove_ref(); - } - /*override*/void add_to( pipeline& p ) { - left.add_to(p); - right.add_to(p); - } -public: - filter_node_join( filter_node& x, filter_node& y ) : left(x), right(y) { - left.add_ref(); - right.add_ref(); - } -}; - -} // namespace internal -//! @endcond - -//! Create a filter to participate in parallel_pipeline -template -filter_t make_filter(tbb::filter::mode mode, const Body& body) { - return new internal::filter_node_leaf(mode, body); -} - -template -filter_t operator& (const filter_t& left, const filter_t& right) { - __TBB_ASSERT(left.root,"cannot use default-constructed filter_t as left argument of '&'"); - __TBB_ASSERT(right.root,"cannot use default-constructed filter_t as right argument of '&'"); - return new internal::filter_node_join(*left.root,*right.root); -} - -//! Class representing a chain of type-safe pipeline filters -template -class filter_t { - typedef internal::filter_node filter_node; - filter_node* root; - filter_t( filter_node* root_ ) : root(root_) { - root->add_ref(); - } - friend class internal::pipeline_proxy; - template - friend filter_t make_filter(tbb::filter::mode, const Body& ); - template - friend filter_t operator& (const filter_t& , const filter_t& ); -public: - filter_t() : root(NULL) {} - filter_t( const filter_t& rhs ) : root(rhs.root) { - if( root ) root->add_ref(); - } - template - filter_t( tbb::filter::mode mode, const Body& body ) : - root( new internal::filter_node_leaf(mode, body) ) { - root->add_ref(); - } - - void operator=( const filter_t& rhs ) { - // Order of operations below carefully chosen so that reference counts remain correct - // in unlikely event that remove_ref throws exception. - filter_node* old = root; - root = rhs.root; - if( root ) root->add_ref(); - if( old ) old->remove_ref(); - } - ~filter_t() { - if( root ) root->remove_ref(); - } - void clear() { - // Like operator= with filter_t() on right side. - if( root ) { - filter_node* old = root; - root = NULL; - old->remove_ref(); - } - } -}; - -inline internal::pipeline_proxy::pipeline_proxy( const filter_t& filter_chain ) : my_pipe() { - __TBB_ASSERT( filter_chain.root, "cannot apply parallel_pipeline to default-constructed filter_t" ); - filter_chain.root->add_to(my_pipe); -} - -inline void parallel_pipeline(size_t max_number_of_live_tokens, const filter_t& filter_chain -#if __TBB_TASK_GROUP_CONTEXT - , tbb::task_group_context& context -#endif - ) { - internal::pipeline_proxy pipe(filter_chain); - // tbb::pipeline::run() is called via the proxy - pipe->run(max_number_of_live_tokens -#if __TBB_TASK_GROUP_CONTEXT - , context -#endif - ); -} - -#if __TBB_TASK_GROUP_CONTEXT -inline void parallel_pipeline(size_t max_number_of_live_tokens, const filter_t& filter_chain) { - tbb::task_group_context context; - parallel_pipeline(max_number_of_live_tokens, filter_chain, context); -} -#endif // __TBB_TASK_GROUP_CONTEXT - -} // interface6 - -using interface6::flow_control; -using interface6::filter_t; -using interface6::make_filter; -using interface6::parallel_pipeline; - -} // tbb - -#endif /* __TBB_pipeline_H */ diff --git a/inst/include/tbb/queuing_mutex.h b/inst/include/tbb/queuing_mutex.h deleted file mode 100644 index 7986b0b45..000000000 --- a/inst/include/tbb/queuing_mutex.h +++ /dev/null @@ -1,123 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_queuing_mutex_H -#define __TBB_queuing_mutex_H - -#include "tbb_config.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "atomic.h" -#include "tbb_profiling.h" - -namespace tbb { - -//! Queuing mutex with local-only spinning. -/** @ingroup synchronization */ -class queuing_mutex : internal::mutex_copy_deprecated_and_disabled { -public: - //! Construct unacquired mutex. - queuing_mutex() { - q_tail = NULL; -#if TBB_USE_THREADING_TOOLS - internal_construct(); -#endif - } - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock: internal::no_copy { - //! Initialize fields to mean "no lock held". - void initialize() { - mutex = NULL; -#if TBB_USE_ASSERT - internal::poison_pointer(next); -#endif /* TBB_USE_ASSERT */ - } - - public: - //! Construct lock that has not acquired a mutex. - /** Equivalent to zero-initialization of *this. */ - scoped_lock() {initialize();} - - //! Acquire lock on given mutex. - scoped_lock( queuing_mutex& m ) { - initialize(); - acquire(m); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( mutex ) release(); - } - - //! Acquire lock on given mutex. - void __TBB_EXPORTED_METHOD acquire( queuing_mutex& m ); - - //! Acquire lock on given mutex if free (i.e. non-blocking) - bool __TBB_EXPORTED_METHOD try_acquire( queuing_mutex& m ); - - //! Release lock. - void __TBB_EXPORTED_METHOD release(); - - private: - //! The pointer to the mutex owned, or NULL if not holding a mutex. - queuing_mutex* mutex; - - //! The pointer to the next competitor for a mutex - scoped_lock *next; - - //! The local spin-wait variable - /** Inverted (0 - blocked, 1 - acquired the mutex) for the sake of - zero-initialization. Defining it as an entire word instead of - a byte seems to help performance slightly. */ - uintptr_t going; - }; - - void __TBB_EXPORTED_METHOD internal_construct(); - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = true; - -private: - //! The last competitor requesting the lock - atomic q_tail; - -}; - -__TBB_DEFINE_PROFILING_SET_NAME(queuing_mutex) - -} // namespace tbb - -#endif /* __TBB_queuing_mutex_H */ diff --git a/inst/include/tbb/queuing_rw_mutex.h b/inst/include/tbb/queuing_rw_mutex.h deleted file mode 100644 index 76df16290..000000000 --- a/inst/include/tbb/queuing_rw_mutex.h +++ /dev/null @@ -1,163 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_queuing_rw_mutex_H -#define __TBB_queuing_rw_mutex_H - -#include "tbb_config.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "atomic.h" -#include "tbb_profiling.h" - -namespace tbb { - -//! Queuing reader-writer mutex with local-only spinning. -/** Adapted from Krieger, Stumm, et al. pseudocode at - http://www.eecg.toronto.edu/parallel/pubs_abs.html#Krieger_etal_ICPP93 - @ingroup synchronization */ -class queuing_rw_mutex : internal::mutex_copy_deprecated_and_disabled { -public: - //! Construct unacquired mutex. - queuing_rw_mutex() { - q_tail = NULL; -#if TBB_USE_THREADING_TOOLS - internal_construct(); -#endif - } - - //! Destructor asserts if the mutex is acquired, i.e. q_tail is non-NULL - ~queuing_rw_mutex() { -#if TBB_USE_ASSERT - __TBB_ASSERT( !q_tail, "destruction of an acquired mutex"); -#endif - } - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock: internal::no_copy { - //! Initialize fields to mean "no lock held". - void initialize() { - my_mutex = NULL; -#if TBB_USE_ASSERT - my_state = 0xFF; // Set to invalid state - internal::poison_pointer(my_next); - internal::poison_pointer(my_prev); -#endif /* TBB_USE_ASSERT */ - } - - public: - //! Construct lock that has not acquired a mutex. - /** Equivalent to zero-initialization of *this. */ - scoped_lock() {initialize();} - - //! Acquire lock on given mutex. - scoped_lock( queuing_rw_mutex& m, bool write=true ) { - initialize(); - acquire(m,write); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( my_mutex ) release(); - } - - //! Acquire lock on given mutex. - void acquire( queuing_rw_mutex& m, bool write=true ); - - //! Acquire lock on given mutex if free (i.e. non-blocking) - bool try_acquire( queuing_rw_mutex& m, bool write=true ); - - //! Release lock. - void release(); - - //! Upgrade reader to become a writer. - /** Returns whether the upgrade happened without releasing and re-acquiring the lock */ - bool upgrade_to_writer(); - - //! Downgrade writer to become a reader. - bool downgrade_to_reader(); - - private: - //! The pointer to the mutex owned, or NULL if not holding a mutex. - queuing_rw_mutex* my_mutex; - - //! The pointer to the previous and next competitors for a mutex - scoped_lock *__TBB_atomic my_prev, *__TBB_atomic my_next; - - typedef unsigned char state_t; - - //! State of the request: reader, writer, active reader, other service states - atomic my_state; - - //! The local spin-wait variable - /** Corresponds to "spin" in the pseudocode but inverted for the sake of zero-initialization */ - unsigned char __TBB_atomic my_going; - - //! A tiny internal lock - unsigned char my_internal_lock; - - //! Acquire the internal lock - void acquire_internal_lock(); - - //! Try to acquire the internal lock - /** Returns true if lock was successfully acquired. */ - bool try_acquire_internal_lock(); - - //! Release the internal lock - void release_internal_lock(); - - //! Wait for internal lock to be released - void wait_for_release_of_internal_lock(); - - //! A helper function - void unblock_or_wait_on_internal_lock( uintptr_t ); - }; - - void __TBB_EXPORTED_METHOD internal_construct(); - - // Mutex traits - static const bool is_rw_mutex = true; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = true; - -private: - //! The last competitor requesting the lock - atomic q_tail; - -}; - -__TBB_DEFINE_PROFILING_SET_NAME(queuing_rw_mutex) - -} // namespace tbb - -#endif /* __TBB_queuing_rw_mutex_H */ diff --git a/inst/include/tbb/reader_writer_lock.h b/inst/include/tbb/reader_writer_lock.h deleted file mode 100644 index 60d24f3dc..000000000 --- a/inst/include/tbb/reader_writer_lock.h +++ /dev/null @@ -1,232 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_reader_writer_lock_H -#define __TBB_reader_writer_lock_H - -#include "tbb_thread.h" -#include "tbb_allocator.h" -#include "atomic.h" - -namespace tbb { -namespace interface5 { -//! Writer-preference reader-writer lock with local-only spinning on readers. -/** Loosely adapted from Mellor-Crummey and Scott pseudocode at - http://www.cs.rochester.edu/research/synchronization/pseudocode/rw.html#s_wp - @ingroup synchronization */ - class reader_writer_lock : tbb::internal::no_copy { - public: - friend class scoped_lock; - friend class scoped_lock_read; - //! Status type for nodes associated with lock instances - /** waiting_nonblocking: the wait state for nonblocking lock - instances; for writes, these transition straight to active - states; for reads, these are unused. - - waiting: the start and spin state for all lock instances; these will - transition to active state when appropriate. Non-blocking write locks - transition from this state to waiting_nonblocking immediately. - - active: the active state means that the lock instance holds - the lock; it will transition to invalid state during node deletion - - invalid: the end state for all nodes; this is set in the - destructor so if we encounter this state, we are looking at - memory that has already been freed - - The state diagrams below describe the status transitions. - Single arrows indicate that the thread that owns the node is - responsible for the transition; double arrows indicate that - any thread could make the transition. - - State diagram for scoped_lock status: - - waiting ----------> waiting_nonblocking - | _____________/ | - V V V - active -----------------> invalid - - State diagram for scoped_lock_read status: - - waiting - | - V - active ----------------->invalid - - */ - enum status_t { waiting_nonblocking, waiting, active, invalid }; - - //! Constructs a new reader_writer_lock - reader_writer_lock() { - internal_construct(); - } - - //! Destructs a reader_writer_lock object - ~reader_writer_lock() { - internal_destroy(); - } - - //! The scoped lock pattern for write locks - /** Scoped locks help avoid the common problem of forgetting to release the lock. - This type also serves as the node for queuing locks. */ - class scoped_lock : tbb::internal::no_copy { - public: - friend class reader_writer_lock; - - //! Construct with blocking attempt to acquire write lock on the passed-in lock - scoped_lock(reader_writer_lock& lock) { - internal_construct(lock); - } - - //! Destructor, releases the write lock - ~scoped_lock() { - internal_destroy(); - } - - void* operator new(size_t s) { - return tbb::internal::allocate_via_handler_v3(s); - } - void operator delete(void* p) { - tbb::internal::deallocate_via_handler_v3(p); - } - - private: - //! The pointer to the mutex to lock - reader_writer_lock *mutex; - //! The next queued competitor for the mutex - scoped_lock* next; - //! Status flag of the thread associated with this node - atomic status; - - //! Construct scoped_lock that is not holding lock - scoped_lock(); - - void __TBB_EXPORTED_METHOD internal_construct(reader_writer_lock&); - void __TBB_EXPORTED_METHOD internal_destroy(); - }; - - //! The scoped lock pattern for read locks - class scoped_lock_read : tbb::internal::no_copy { - public: - friend class reader_writer_lock; - - //! Construct with blocking attempt to acquire read lock on the passed-in lock - scoped_lock_read(reader_writer_lock& lock) { - internal_construct(lock); - } - - //! Destructor, releases the read lock - ~scoped_lock_read() { - internal_destroy(); - } - - void* operator new(size_t s) { - return tbb::internal::allocate_via_handler_v3(s); - } - void operator delete(void* p) { - tbb::internal::deallocate_via_handler_v3(p); - } - - private: - //! The pointer to the mutex to lock - reader_writer_lock *mutex; - //! The next queued competitor for the mutex - scoped_lock_read *next; - //! Status flag of the thread associated with this node - atomic status; - - //! Construct scoped_lock_read that is not holding lock - scoped_lock_read(); - - void __TBB_EXPORTED_METHOD internal_construct(reader_writer_lock&); - void __TBB_EXPORTED_METHOD internal_destroy(); - }; - - //! Acquires the reader_writer_lock for write. - /** If the lock is currently held in write mode by another - context, the writer will block by spinning on a local - variable. Exceptions thrown: improper_lock The context tries - to acquire a reader_writer_lock that it already has write - ownership of.*/ - void __TBB_EXPORTED_METHOD lock(); - - //! Tries to acquire the reader_writer_lock for write. - /** This function does not block. Return Value: True or false, - depending on whether the lock is acquired or not. If the lock - is already held by this acquiring context, try_lock() returns - false. */ - bool __TBB_EXPORTED_METHOD try_lock(); - - //! Acquires the reader_writer_lock for read. - /** If the lock is currently held by a writer, this reader will - block and wait until the writers are done. Exceptions thrown: - improper_lock The context tries to acquire a - reader_writer_lock that it already has write ownership of. */ - void __TBB_EXPORTED_METHOD lock_read(); - - //! Tries to acquire the reader_writer_lock for read. - /** This function does not block. Return Value: True or false, - depending on whether the lock is acquired or not. */ - bool __TBB_EXPORTED_METHOD try_lock_read(); - - //! Releases the reader_writer_lock - void __TBB_EXPORTED_METHOD unlock(); - - private: - void __TBB_EXPORTED_METHOD internal_construct(); - void __TBB_EXPORTED_METHOD internal_destroy(); - - //! Attempts to acquire write lock - /** If unavailable, spins in blocking case, returns false in non-blocking case. */ - bool start_write(scoped_lock *); - //! Sets writer_head to w and attempts to unblock - void set_next_writer(scoped_lock *w); - //! Relinquishes write lock to next waiting writer or group of readers - void end_write(scoped_lock *); - //! Checks if current thread holds write lock - bool is_current_writer(); - - //! Attempts to acquire read lock - /** If unavailable, spins in blocking case, returns false in non-blocking case. */ - void start_read(scoped_lock_read *); - //! Unblocks pending readers - void unblock_readers(); - //! Relinquishes read lock by decrementing counter; last reader wakes pending writer - void end_read(); - - //! The list of pending readers - atomic reader_head; - //! The list of pending writers - atomic writer_head; - //! The last node in the list of pending writers - atomic writer_tail; - //! Writer that owns the mutex; tbb_thread::id() otherwise. - tbb_thread::id my_current_writer; - //! Status of mutex - atomic rdr_count_and_flags; // used with __TBB_AtomicOR, which assumes uintptr_t -}; - -} // namespace interface5 - -using interface5::reader_writer_lock; - -} // namespace tbb - -#endif /* __TBB_reader_writer_lock_H */ diff --git a/inst/include/tbb/recursive_mutex.h b/inst/include/tbb/recursive_mutex.h deleted file mode 100644 index 1e41d90a5..000000000 --- a/inst/include/tbb/recursive_mutex.h +++ /dev/null @@ -1,234 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_recursive_mutex_H -#define __TBB_recursive_mutex_H - -#if _WIN32||_WIN64 -#include "machine/windows_api.h" -#else -#include -#endif /* _WIN32||_WIN64 */ - -#include -#include "aligned_space.h" -#include "tbb_stddef.h" -#include "tbb_profiling.h" - -namespace tbb { -//! Mutex that allows recursive mutex acquisition. -/** Mutex that allows recursive mutex acquisition. - @ingroup synchronization */ -class recursive_mutex : internal::mutex_copy_deprecated_and_disabled { -public: - //! Construct unacquired recursive_mutex. - recursive_mutex() { -#if TBB_USE_ASSERT || TBB_USE_THREADING_TOOLS - internal_construct(); -#else - #if _WIN32||_WIN64 - InitializeCriticalSectionEx(&impl, 4000, 0); - #else - pthread_mutexattr_t mtx_attr; - int error_code = pthread_mutexattr_init( &mtx_attr ); - if( error_code ) - tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutexattr_init failed"); - - pthread_mutexattr_settype( &mtx_attr, PTHREAD_MUTEX_RECURSIVE ); - error_code = pthread_mutex_init( &impl, &mtx_attr ); - if( error_code ) - tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutex_init failed"); - - pthread_mutexattr_destroy( &mtx_attr ); - #endif /* _WIN32||_WIN64*/ -#endif /* TBB_USE_ASSERT */ - }; - - ~recursive_mutex() { -#if TBB_USE_ASSERT - internal_destroy(); -#else - #if _WIN32||_WIN64 - DeleteCriticalSection(&impl); - #else - pthread_mutex_destroy(&impl); - - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - }; - - class scoped_lock; - friend class scoped_lock; - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock: internal::no_copy { - public: - //! Construct lock that has not acquired a recursive_mutex. - scoped_lock() : my_mutex(NULL) {}; - - //! Acquire lock on given mutex. - scoped_lock( recursive_mutex& mutex ) { -#if TBB_USE_ASSERT - my_mutex = &mutex; -#endif /* TBB_USE_ASSERT */ - acquire( mutex ); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( my_mutex ) - release(); - } - - //! Acquire lock on given mutex. - void acquire( recursive_mutex& mutex ) { -#if TBB_USE_ASSERT - internal_acquire( mutex ); -#else - my_mutex = &mutex; - mutex.lock(); -#endif /* TBB_USE_ASSERT */ - } - - //! Try acquire lock on given recursive_mutex. - bool try_acquire( recursive_mutex& mutex ) { -#if TBB_USE_ASSERT - return internal_try_acquire( mutex ); -#else - bool result = mutex.try_lock(); - if( result ) - my_mutex = &mutex; - return result; -#endif /* TBB_USE_ASSERT */ - } - - //! Release lock - void release() { -#if TBB_USE_ASSERT - internal_release(); -#else - my_mutex->unlock(); - my_mutex = NULL; -#endif /* TBB_USE_ASSERT */ - } - - private: - //! The pointer to the current recursive_mutex to work - recursive_mutex* my_mutex; - - //! All checks from acquire using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_acquire( recursive_mutex& m ); - - //! All checks from try_acquire using mutex.state were moved here - bool __TBB_EXPORTED_METHOD internal_try_acquire( recursive_mutex& m ); - - //! All checks from release using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_release(); - - friend class recursive_mutex; - }; - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = true; - static const bool is_fair_mutex = false; - - // C++0x compatibility interface - - //! Acquire lock - void lock() { -#if TBB_USE_ASSERT - aligned_space tmp; - new(tmp.begin()) scoped_lock(*this); -#else - #if _WIN32||_WIN64 - EnterCriticalSection(&impl); - #else - int error_code = pthread_mutex_lock(&impl); - if( error_code ) - tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutex_lock failed"); - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Try acquiring lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_lock() { -#if TBB_USE_ASSERT - aligned_space tmp; - return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this); -#else - #if _WIN32||_WIN64 - return TryEnterCriticalSection(&impl)!=0; - #else - return pthread_mutex_trylock(&impl)==0; - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Release lock - void unlock() { -#if TBB_USE_ASSERT - aligned_space tmp; - scoped_lock& s = *tmp.begin(); - s.my_mutex = this; - s.internal_release(); -#else - #if _WIN32||_WIN64 - LeaveCriticalSection(&impl); - #else - pthread_mutex_unlock(&impl); - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Return native_handle - #if _WIN32||_WIN64 - typedef LPCRITICAL_SECTION native_handle_type; - #else - typedef pthread_mutex_t* native_handle_type; - #endif - native_handle_type native_handle() { return (native_handle_type) &impl; } - -private: -#if _WIN32||_WIN64 - CRITICAL_SECTION impl; - enum state_t { - INITIALIZED=0x1234, - DESTROYED=0x789A, - } state; -#else - pthread_mutex_t impl; -#endif /* _WIN32||_WIN64 */ - - //! All checks from mutex constructor using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_construct(); - - //! All checks from mutex destructor using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_destroy(); -}; - -__TBB_DEFINE_PROFILING_SET_NAME(recursive_mutex) - -} // namespace tbb - -#endif /* __TBB_recursive_mutex_H */ diff --git a/inst/include/tbb/runtime_loader.h b/inst/include/tbb/runtime_loader.h deleted file mode 100644 index f5cd412ec..000000000 --- a/inst/include/tbb/runtime_loader.h +++ /dev/null @@ -1,180 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_runtime_loader_H -#define __TBB_runtime_loader_H - -#if ! TBB_PREVIEW_RUNTIME_LOADER - #error Set TBB_PREVIEW_RUNTIME_LOADER to include runtime_loader.h -#endif - -#include "tbb_stddef.h" -#include - -#if _MSC_VER - #if ! __TBB_NO_IMPLICIT_LINKAGE - #ifdef _DEBUG - #pragma comment( linker, "/nodefaultlib:tbb_debug.lib" ) - #pragma comment( linker, "/defaultlib:tbbproxy_debug.lib" ) - #else - #pragma comment( linker, "/nodefaultlib:tbb.lib" ) - #pragma comment( linker, "/defaultlib:tbbproxy.lib" ) - #endif - #endif -#endif - -namespace tbb { - -namespace interface6 { - -//! Load TBB at runtime. -/*! - -\b Usage: - -In source code: - -\code -#include "tbb/runtime_loader.h" - -char const * path[] = { "/lib/ia32", NULL }; -tbb::runtime_loader loader( path ); - -// Now use TBB. -\endcode - -Link with \c tbbproxy.lib (or \c libtbbproxy.a) instead of \c tbb.lib (\c libtbb.dylib, -\c libtbb.so). - -TBB library will be loaded at runtime from \c /lib/ia32 directory. - -\b Attention: - -All \c runtime_loader objects (in the same module, i.e. exe or dll) share some global state. -The most noticeable piece of global state is loaded TBB library. -There are some implications: - - - Only one TBB library can be loaded per module. - - - If one object has already loaded TBB library, another object will not load TBB. - If the loaded TBB library is suitable for the second object, both will use TBB - cooperatively, otherwise the second object will report an error. - - - \c runtime_loader objects will not work (correctly) in parallel due to absence of - synchronization. - -*/ - -class runtime_loader : tbb::internal::no_copy { - - public: - - //! Error mode constants. - enum error_mode { - em_status, //!< Save status of operation and continue. - em_throw, //!< Throw an exception of tbb::runtime_loader::error_code type. - em_abort //!< Print message to \c stderr and call \c abort(). - }; // error_mode - - //! Error codes. - enum error_code { - ec_ok, //!< No errors. - ec_bad_call, //!< Invalid function call (e. g. load() called when TBB is already loaded). - ec_bad_arg, //!< Invalid argument passed. - ec_bad_lib, //!< Invalid library found (e. g. \c TBB_runtime_version symbol not found). - ec_bad_ver, //!< TBB found but version is not suitable. - ec_no_lib //!< No suitable TBB library found. - }; // error_code - - //! Initialize object but do not load TBB. - runtime_loader( error_mode mode = em_abort ); - - //! Initialize object and load TBB. - /*! - See load() for details. - - If error mode is \c em_status, call status() to check whether TBB was loaded or not. - */ - runtime_loader( - char const * path[], //!< List of directories to search TBB in. - int min_ver = TBB_INTERFACE_VERSION, //!< Minimal suitable version of TBB. - int max_ver = INT_MAX, //!< Maximal suitable version of TBB. - error_mode mode = em_abort //!< Error mode for this object. - ); - - //! Destroy object. - ~runtime_loader(); - - //! Load TBB. - /*! - The method searches the directories specified in \c path[] array for the TBB library. - When the library is found, it is loaded and its version is checked. If the version is - not suitable, the library is unloaded, and the search continues. - - \b Note: - - For security reasons, avoid using relative directory names. For example, never load - TBB from current (\c "."), parent (\c "..") or any other relative directory (like - \c "lib" ). Use only absolute directory names (e. g. "/usr/local/lib"). - - For the same security reasons, avoid using system default directories (\c "") on - Windows. (See http://www.microsoft.com/technet/security/advisory/2269637.mspx for - details.) - - Neglecting these rules may cause your program to execute 3-rd party malicious code. - - \b Errors: - - \c ec_bad_call - TBB already loaded by this object. - - \c ec_bad_arg - \p min_ver and/or \p max_ver negative or zero, - or \p min_ver > \p max_ver. - - \c ec_bad_ver - TBB of unsuitable version already loaded by another object. - - \c ec_no_lib - No suitable library found. - */ - error_code - load( - char const * path[], //!< List of directories to search TBB in. - int min_ver = TBB_INTERFACE_VERSION, //!< Minimal suitable version of TBB. - int max_ver = INT_MAX //!< Maximal suitable version of TBB. - - ); - - - //! Report status. - /*! - If error mode is \c em_status, the function returns status of the last operation. - */ - error_code status(); - - private: - - error_mode const my_mode; - error_code my_status; - bool my_loaded; - -}; // class runtime_loader - -} // namespace interface6 - -using interface6::runtime_loader; - -} // namespace tbb - -#endif /* __TBB_runtime_loader_H */ - diff --git a/inst/include/tbb/scalable_allocator.h b/inst/include/tbb/scalable_allocator.h deleted file mode 100644 index 3683aa27f..000000000 --- a/inst/include/tbb/scalable_allocator.h +++ /dev/null @@ -1,319 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_scalable_allocator_H -#define __TBB_scalable_allocator_H -/** @file */ - -#include /* Need ptrdiff_t and size_t from here. */ -#if !_MSC_VER -#include /* Need intptr_t from here. */ -#endif - -#if !defined(__cplusplus) && __ICC==1100 - #pragma warning (push) - #pragma warning (disable: 991) -#endif - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -#if _MSC_VER >= 1400 -#define __TBB_EXPORTED_FUNC __cdecl -#else -#define __TBB_EXPORTED_FUNC -#endif - -/** The "malloc" analogue to allocate block of memory of size bytes. - * @ingroup memory_allocation */ -void * __TBB_EXPORTED_FUNC scalable_malloc (size_t size); - -/** The "free" analogue to discard a previously allocated piece of memory. - @ingroup memory_allocation */ -void __TBB_EXPORTED_FUNC scalable_free (void* ptr); - -/** The "realloc" analogue complementing scalable_malloc. - @ingroup memory_allocation */ -void * __TBB_EXPORTED_FUNC scalable_realloc (void* ptr, size_t size); - -/** The "calloc" analogue complementing scalable_malloc. - @ingroup memory_allocation */ -void * __TBB_EXPORTED_FUNC scalable_calloc (size_t nobj, size_t size); - -/** The "posix_memalign" analogue. - @ingroup memory_allocation */ -int __TBB_EXPORTED_FUNC scalable_posix_memalign (void** memptr, size_t alignment, size_t size); - -/** The "_aligned_malloc" analogue. - @ingroup memory_allocation */ -void * __TBB_EXPORTED_FUNC scalable_aligned_malloc (size_t size, size_t alignment); - -/** The "_aligned_realloc" analogue. - @ingroup memory_allocation */ -void * __TBB_EXPORTED_FUNC scalable_aligned_realloc (void* ptr, size_t size, size_t alignment); - -/** The "_aligned_free" analogue. - @ingroup memory_allocation */ -void __TBB_EXPORTED_FUNC scalable_aligned_free (void* ptr); - -/** The analogue of _msize/malloc_size/malloc_usable_size. - Returns the usable size of a memory block previously allocated by scalable_*, - or 0 (zero) if ptr does not point to such a block. - @ingroup memory_allocation */ -size_t __TBB_EXPORTED_FUNC scalable_msize (void* ptr); - -/* Results for scalable_allocation_* functions */ -typedef enum { - TBBMALLOC_OK, - TBBMALLOC_INVALID_PARAM, - TBBMALLOC_UNSUPPORTED, - TBBMALLOC_NO_MEMORY, - TBBMALLOC_NO_EFFECT -} ScalableAllocationResult; - -/* Setting TBB_MALLOC_USE_HUGE_PAGES environment variable to 1 enables huge pages. - scalable_allocation_mode call has priority over environment variable. */ -typedef enum { - TBBMALLOC_USE_HUGE_PAGES, /* value turns using huge pages on and off */ - /* deprecated, kept for backward compatibility only */ - USE_HUGE_PAGES = TBBMALLOC_USE_HUGE_PAGES, - /* try to limit memory consumption value Bytes, clean internal buffers - if limit is exceeded, but not prevents from requesting memory from OS */ - TBBMALLOC_SET_SOFT_HEAP_LIMIT -} AllocationModeParam; - -/** Set TBB allocator-specific allocation modes. - @ingroup memory_allocation */ -int __TBB_EXPORTED_FUNC scalable_allocation_mode(int param, intptr_t value); - -typedef enum { - /* Clean internal allocator buffers for all threads. - Returns TBBMALLOC_NO_EFFECT if no buffers cleaned, - TBBMALLOC_OK if some memory released from buffers. */ - TBBMALLOC_CLEAN_ALL_BUFFERS, - /* Clean internal allocator buffer for current thread only. - Return values same as for TBBMALLOC_CLEAN_ALL_BUFFERS. */ - TBBMALLOC_CLEAN_THREAD_BUFFERS -} ScalableAllocationCmd; - -/** Call TBB allocator-specific commands. - @ingroup memory_allocation */ -int __TBB_EXPORTED_FUNC scalable_allocation_command(int cmd, void *param); - -#ifdef __cplusplus -} /* extern "C" */ -#endif /* __cplusplus */ - -#ifdef __cplusplus - -//! The namespace rml contains components of low-level memory pool interface. -namespace rml { -class MemoryPool; - -typedef void *(*rawAllocType)(intptr_t pool_id, size_t &bytes); -typedef int (*rawFreeType)(intptr_t pool_id, void* raw_ptr, size_t raw_bytes); - -/* -MemPoolPolicy extension must be compatible with such structure fields layout - -struct MemPoolPolicy { - rawAllocType pAlloc; - rawFreeType pFree; - size_t granularity; // granularity of pAlloc allocations -}; -*/ - -struct MemPoolPolicy { - enum { - TBBMALLOC_POOL_VERSION = 1 - }; - - rawAllocType pAlloc; - rawFreeType pFree; - // granularity of pAlloc allocations. 0 means default used. - size_t granularity; - int version; - // all memory consumed at 1st pAlloc call and never returned, - // no more pAlloc calls after 1st - unsigned fixedPool : 1, - // memory consumed but returned only at pool termination - keepAllMemory : 1, - reserved : 30; - - MemPoolPolicy(rawAllocType pAlloc_, rawFreeType pFree_, - size_t granularity_ = 0, bool fixedPool_ = false, - bool keepAllMemory_ = false) : - pAlloc(pAlloc_), pFree(pFree_), granularity(granularity_), version(TBBMALLOC_POOL_VERSION), - fixedPool(fixedPool_), keepAllMemory(keepAllMemory_), - reserved(0) {} -}; - -// enums have same values as appropriate enums from ScalableAllocationResult -// TODO: use ScalableAllocationResult in pool_create directly -enum MemPoolError { - // pool created successfully - POOL_OK = TBBMALLOC_OK, - // invalid policy parameters found - INVALID_POLICY = TBBMALLOC_INVALID_PARAM, - // requested pool policy is not supported by allocator library - UNSUPPORTED_POLICY = TBBMALLOC_UNSUPPORTED, - // lack of memory during pool creation - NO_MEMORY = TBBMALLOC_NO_MEMORY, - // action takes no effect - NO_EFFECT = TBBMALLOC_NO_EFFECT -}; - -MemPoolError pool_create_v1(intptr_t pool_id, const MemPoolPolicy *policy, - rml::MemoryPool **pool); - -bool pool_destroy(MemoryPool* memPool); -void *pool_malloc(MemoryPool* memPool, size_t size); -void *pool_realloc(MemoryPool* memPool, void *object, size_t size); -void *pool_aligned_malloc(MemoryPool* mPool, size_t size, size_t alignment); -void *pool_aligned_realloc(MemoryPool* mPool, void *ptr, size_t size, size_t alignment); -bool pool_reset(MemoryPool* memPool); -bool pool_free(MemoryPool *memPool, void *object); -} - -#include /* To use new with the placement argument */ - -/* Ensure that including this header does not cause implicit linkage with TBB */ -#ifndef __TBB_NO_IMPLICIT_LINKAGE - #define __TBB_NO_IMPLICIT_LINKAGE 1 - #include "tbb_stddef.h" - #undef __TBB_NO_IMPLICIT_LINKAGE -#else - #include "tbb_stddef.h" -#endif - -#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - #include // std::forward -#endif - -namespace tbb { - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Workaround for erroneous "unreferenced parameter" warning in method destroy. - #pragma warning (push) - #pragma warning (disable: 4100) -#endif - -//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 -/** The members are ordered the same way they are in section 20.4.1 - of the ISO C++ standard. - @ingroup memory_allocation */ -template -class scalable_allocator { -public: - typedef typename internal::allocator_type::value_type value_type; - typedef value_type* pointer; - typedef const value_type* const_pointer; - typedef value_type& reference; - typedef const value_type& const_reference; - typedef size_t size_type; - typedef ptrdiff_t difference_type; - template struct rebind { - typedef scalable_allocator other; - }; - - scalable_allocator() throw() {} - scalable_allocator( const scalable_allocator& ) throw() {} - template scalable_allocator(const scalable_allocator&) throw() {} - - pointer address(reference x) const {return &x;} - const_pointer address(const_reference x) const {return &x;} - - //! Allocate space for n objects. - pointer allocate( size_type n, const void* /*hint*/ =0 ) { - return static_cast( scalable_malloc( n * sizeof(value_type) ) ); - } - - //! Free previously allocated block of memory - void deallocate( pointer p, size_type ) { - scalable_free( p ); - } - - //! Largest value for which method allocate might succeed. - size_type max_size() const throw() { - size_type absolutemax = static_cast(-1) / sizeof (value_type); - return (absolutemax > 0 ? absolutemax : 1); - } -#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - template - void construct(U *p, Args&&... args) - { ::new((void *)p) U(std::forward(args)...); } -#else // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC -#if __TBB_CPP11_RVALUE_REF_PRESENT - void construct( pointer p, value_type&& value ) { ::new((void*)(p)) value_type( std::move( value ) ); } -#endif - void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);} -#endif // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - void destroy( pointer p ) {p->~value_type();} -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4100 is back - -//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 -/** @ingroup memory_allocation */ -template<> -class scalable_allocator { -public: - typedef void* pointer; - typedef const void* const_pointer; - typedef void value_type; - template struct rebind { - typedef scalable_allocator other; - }; -}; - -template -inline bool operator==( const scalable_allocator&, const scalable_allocator& ) {return true;} - -template -inline bool operator!=( const scalable_allocator&, const scalable_allocator& ) {return false;} - -} // namespace tbb - -#if _MSC_VER - #if (__TBB_BUILD || __TBBMALLOC_BUILD) && !defined(__TBBMALLOC_NO_IMPLICIT_LINKAGE) - #define __TBBMALLOC_NO_IMPLICIT_LINKAGE 1 - #endif - - #if !__TBBMALLOC_NO_IMPLICIT_LINKAGE - #ifdef _DEBUG - #pragma comment(lib, "tbbmalloc_debug.lib") - #else - #pragma comment(lib, "tbbmalloc.lib") - #endif - #endif - - -#endif - -#endif /* __cplusplus */ - -#if !defined(__cplusplus) && __ICC==1100 - #pragma warning (pop) -#endif // ICC 11.0 warning 991 is back - -#endif /* __TBB_scalable_allocator_H */ diff --git a/inst/include/tbb/spin_mutex.h b/inst/include/tbb/spin_mutex.h deleted file mode 100644 index a7ed31be4..000000000 --- a/inst/include/tbb/spin_mutex.h +++ /dev/null @@ -1,212 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_spin_mutex_H -#define __TBB_spin_mutex_H - -#include -#include -#include "aligned_space.h" -#include "tbb_stddef.h" -#include "tbb_machine.h" -#include "tbb_profiling.h" -#include "internal/_mutex_padding.h" - -namespace tbb { - -//! A lock that occupies a single byte. -/** A spin_mutex is a spin mutex that fits in a single byte. - It should be used only for locking short critical sections - (typically less than 20 instructions) when fairness is not an issue. - If zero-initialized, the mutex is considered unheld. - @ingroup synchronization */ -class spin_mutex : internal::mutex_copy_deprecated_and_disabled { - //! 0 if lock is released, 1 if lock is acquired. - __TBB_atomic_flag flag; - -public: - //! Construct unacquired lock. - /** Equivalent to zero-initialization of *this. */ - spin_mutex() : flag(0) { -#if TBB_USE_THREADING_TOOLS - internal_construct(); -#endif - } - - //! Represents acquisition of a mutex. - class scoped_lock : internal::no_copy { - private: - //! Points to currently held mutex, or NULL if no lock is held. - spin_mutex* my_mutex; - - //! Value to store into spin_mutex::flag to unlock the mutex. - /** This variable is no longer used. Instead, 0 and 1 are used to - represent that the lock is free and acquired, respectively. - We keep the member variable here to ensure backward compatibility */ - __TBB_Flag my_unlock_value; - - //! Like acquire, but with ITT instrumentation. - void __TBB_EXPORTED_METHOD internal_acquire( spin_mutex& m ); - - //! Like try_acquire, but with ITT instrumentation. - bool __TBB_EXPORTED_METHOD internal_try_acquire( spin_mutex& m ); - - //! Like release, but with ITT instrumentation. - void __TBB_EXPORTED_METHOD internal_release(); - - friend class spin_mutex; - - public: - //! Construct without acquiring a mutex. - scoped_lock() : my_mutex(NULL), my_unlock_value(0) {} - - //! Construct and acquire lock on a mutex. - scoped_lock( spin_mutex& m ) : my_unlock_value(0) { - internal::suppress_unused_warning(my_unlock_value); -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - my_mutex=NULL; - internal_acquire(m); -#else - my_mutex=&m; - __TBB_LockByte(m.flag); -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/ - } - - //! Acquire lock. - void acquire( spin_mutex& m ) { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - internal_acquire(m); -#else - my_mutex = &m; - __TBB_LockByte(m.flag); -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/ - } - - //! Try acquiring lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_acquire( spin_mutex& m ) { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - return internal_try_acquire(m); -#else - bool result = __TBB_TryLockByte(m.flag); - if( result ) - my_mutex = &m; - return result; -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/ - } - - //! Release lock - void release() { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - internal_release(); -#else - __TBB_UnlockByte(my_mutex->flag); - my_mutex = NULL; -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - - //! Destroy lock. If holding a lock, releases the lock first. - ~scoped_lock() { - if( my_mutex ) { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - internal_release(); -#else - __TBB_UnlockByte(my_mutex->flag); -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - } - }; - - //! Internal constructor with ITT instrumentation. - void __TBB_EXPORTED_METHOD internal_construct(); - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = false; - - // ISO C++0x compatibility methods - - //! Acquire lock - void lock() { -#if TBB_USE_THREADING_TOOLS - aligned_space tmp; - new(tmp.begin()) scoped_lock(*this); -#else - __TBB_LockByte(flag); -#endif /* TBB_USE_THREADING_TOOLS*/ - } - - //! Try acquiring lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_lock() { -#if TBB_USE_THREADING_TOOLS - aligned_space tmp; - return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this); -#else - return __TBB_TryLockByte(flag); -#endif /* TBB_USE_THREADING_TOOLS*/ - } - - //! Release lock - void unlock() { -#if TBB_USE_THREADING_TOOLS - aligned_space tmp; - scoped_lock& s = *tmp.begin(); - s.my_mutex = this; - s.internal_release(); -#else - __TBB_store_with_release(flag, 0); -#endif /* TBB_USE_THREADING_TOOLS */ - } - - friend class scoped_lock; -}; // end of spin_mutex - -__TBB_DEFINE_PROFILING_SET_NAME(spin_mutex) - -} // namespace tbb - -#if ( __TBB_x86_32 || __TBB_x86_64 ) -#include "internal/_x86_eliding_mutex_impl.h" -#endif - -namespace tbb { -//! A cross-platform spin mutex with speculative lock acquisition. -/** On platforms with proper HW support, this lock may speculatively execute - its critical sections, using HW mechanisms to detect real data races and - ensure atomicity of the critical sections. In particular, it uses - Intel(R) Transactional Synchronization Extensions (Intel(R) TSX). - Without such HW support, it behaves like a spin_mutex. - It should be used for locking short critical sections where the lock is - contended but the data it protects are not. If zero-initialized, the - mutex is considered unheld. - @ingroup synchronization */ - -#if ( __TBB_x86_32 || __TBB_x86_64 ) -typedef interface7::internal::padded_mutex speculative_spin_mutex; -#else -typedef interface7::internal::padded_mutex speculative_spin_mutex; -#endif -__TBB_DEFINE_PROFILING_SET_NAME(speculative_spin_mutex) - -} // namespace tbb - -#endif /* __TBB_spin_mutex_H */ diff --git a/inst/include/tbb/spin_rw_mutex.h b/inst/include/tbb/spin_rw_mutex.h deleted file mode 100644 index 61151c179..000000000 --- a/inst/include/tbb/spin_rw_mutex.h +++ /dev/null @@ -1,259 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_spin_rw_mutex_H -#define __TBB_spin_rw_mutex_H - -#include "tbb_stddef.h" -#include "tbb_machine.h" -#include "tbb_profiling.h" -#include "internal/_mutex_padding.h" - -namespace tbb { - -#if __TBB_TSX_AVAILABLE -namespace interface8 { namespace internal { - class x86_rtm_rw_mutex; -}} -#endif - -class spin_rw_mutex_v3; -typedef spin_rw_mutex_v3 spin_rw_mutex; - -//! Fast, unfair, spinning reader-writer lock with backoff and writer-preference -/** @ingroup synchronization */ -class spin_rw_mutex_v3 : internal::mutex_copy_deprecated_and_disabled { - //! @cond INTERNAL - - //! Internal acquire write lock. - bool __TBB_EXPORTED_METHOD internal_acquire_writer(); - - //! Out of line code for releasing a write lock. - /** This code has debug checking and instrumentation for Intel(R) Thread Checker and Intel(R) Thread Profiler. */ - void __TBB_EXPORTED_METHOD internal_release_writer(); - - //! Internal acquire read lock. - void __TBB_EXPORTED_METHOD internal_acquire_reader(); - - //! Internal upgrade reader to become a writer. - bool __TBB_EXPORTED_METHOD internal_upgrade(); - - //! Out of line code for downgrading a writer to a reader. - /** This code has debug checking and instrumentation for Intel(R) Thread Checker and Intel(R) Thread Profiler. */ - void __TBB_EXPORTED_METHOD internal_downgrade(); - - //! Internal release read lock. - void __TBB_EXPORTED_METHOD internal_release_reader(); - - //! Internal try_acquire write lock. - bool __TBB_EXPORTED_METHOD internal_try_acquire_writer(); - - //! Internal try_acquire read lock. - bool __TBB_EXPORTED_METHOD internal_try_acquire_reader(); - - //! @endcond -public: - //! Construct unacquired mutex. - spin_rw_mutex_v3() : state(0) { -#if TBB_USE_THREADING_TOOLS - internal_construct(); -#endif - } - -#if TBB_USE_ASSERT - //! Destructor asserts if the mutex is acquired, i.e. state is zero. - ~spin_rw_mutex_v3() { - __TBB_ASSERT( !state, "destruction of an acquired mutex"); - }; -#endif /* TBB_USE_ASSERT */ - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock : internal::no_copy { -#if __TBB_TSX_AVAILABLE - friend class tbb::interface8::internal::x86_rtm_rw_mutex; - // helper methods for x86_rtm_rw_mutex - spin_rw_mutex *internal_get_mutex() const { return mutex; } - void internal_set_mutex(spin_rw_mutex* m) { mutex = m; } -#endif - public: - //! Construct lock that has not acquired a mutex. - /** Equivalent to zero-initialization of *this. */ - scoped_lock() : mutex(NULL), is_writer(false) {} - - //! Acquire lock on given mutex. - scoped_lock( spin_rw_mutex& m, bool write = true ) : mutex(NULL) { - acquire(m, write); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( mutex ) release(); - } - - //! Acquire lock on given mutex. - void acquire( spin_rw_mutex& m, bool write = true ) { - __TBB_ASSERT( !mutex, "holding mutex already" ); - is_writer = write; - mutex = &m; - if( write ) mutex->internal_acquire_writer(); - else mutex->internal_acquire_reader(); - } - - //! Upgrade reader to become a writer. - /** Returns whether the upgrade happened without releasing and re-acquiring the lock */ - bool upgrade_to_writer() { - __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( !is_writer, "not a reader" ); - is_writer = true; - return mutex->internal_upgrade(); - } - - //! Release lock. - void release() { - __TBB_ASSERT( mutex, "lock is not acquired" ); - spin_rw_mutex *m = mutex; - mutex = NULL; -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - if( is_writer ) m->internal_release_writer(); - else m->internal_release_reader(); -#else - if( is_writer ) __TBB_AtomicAND( &m->state, READERS ); - else __TBB_FetchAndAddWrelease( &m->state, -(intptr_t)ONE_READER); -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - - //! Downgrade writer to become a reader. - bool downgrade_to_reader() { - __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( is_writer, "not a writer" ); -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - mutex->internal_downgrade(); -#else - __TBB_FetchAndAddW( &mutex->state, ((intptr_t)ONE_READER-WRITER)); -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - is_writer = false; - return true; - } - - //! Try acquire lock on given mutex. - bool try_acquire( spin_rw_mutex& m, bool write = true ) { - __TBB_ASSERT( !mutex, "holding mutex already" ); - bool result; - is_writer = write; - result = write? m.internal_try_acquire_writer() - : m.internal_try_acquire_reader(); - if( result ) - mutex = &m; - return result; - } - - protected: - - //! The pointer to the current mutex that is held, or NULL if no mutex is held. - spin_rw_mutex* mutex; - - //! If mutex!=NULL, then is_writer is true if holding a writer lock, false if holding a reader lock. - /** Not defined if not holding a lock. */ - bool is_writer; - }; - - // Mutex traits - static const bool is_rw_mutex = true; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = false; - - // ISO C++0x compatibility methods - - //! Acquire writer lock - void lock() {internal_acquire_writer();} - - //! Try acquiring writer lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_lock() {return internal_try_acquire_writer();} - - //! Release lock - void unlock() { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - if( state&WRITER ) internal_release_writer(); - else internal_release_reader(); -#else - if( state&WRITER ) __TBB_AtomicAND( &state, READERS ); - else __TBB_FetchAndAddWrelease( &state, -(intptr_t)ONE_READER); -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - - // Methods for reader locks that resemble ISO C++0x compatibility methods. - - //! Acquire reader lock - void lock_read() {internal_acquire_reader();} - - //! Try acquiring reader lock (non-blocking) - /** Return true if reader lock acquired; false otherwise. */ - bool try_lock_read() {return internal_try_acquire_reader();} - -protected: - typedef intptr_t state_t; - static const state_t WRITER = 1; - static const state_t WRITER_PENDING = 2; - static const state_t READERS = ~(WRITER | WRITER_PENDING); - static const state_t ONE_READER = 4; - static const state_t BUSY = WRITER | READERS; - //! State of lock - /** Bit 0 = writer is holding lock - Bit 1 = request by a writer to acquire lock (hint to readers to wait) - Bit 2..N = number of readers holding lock */ - state_t state; - -private: - void __TBB_EXPORTED_METHOD internal_construct(); -}; - -__TBB_DEFINE_PROFILING_SET_NAME(spin_rw_mutex) - -} // namespace tbb - -#if __TBB_TSX_AVAILABLE -#include "internal/_x86_rtm_rw_mutex_impl.h" -#endif - -namespace tbb { -namespace interface8 { -//! A cross-platform spin reader/writer mutex with speculative lock acquisition. -/** On platforms with proper HW support, this lock may speculatively execute - its critical sections, using HW mechanisms to detect real data races and - ensure atomicity of the critical sections. In particular, it uses - Intel(R) Transactional Synchronization Extensions (Intel(R) TSX). - Without such HW support, it behaves like a spin_rw_mutex. - It should be used for locking short critical sections where the lock is - contended but the data it protects are not. - @ingroup synchronization */ -#if __TBB_TSX_AVAILABLE -typedef interface7::internal::padded_mutex speculative_spin_rw_mutex; -#else -typedef interface7::internal::padded_mutex speculative_spin_rw_mutex; -#endif -} // namespace interface8 - -using interface8::speculative_spin_rw_mutex; -__TBB_DEFINE_PROFILING_SET_NAME(speculative_spin_rw_mutex) -} // namespace tbb -#endif /* __TBB_spin_rw_mutex_H */ diff --git a/inst/include/tbb/task.h b/inst/include/tbb/task.h deleted file mode 100644 index 5dabcd97a..000000000 --- a/inst/include/tbb/task.h +++ /dev/null @@ -1,1007 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_task_H -#define __TBB_task_H - -#include "tbb_stddef.h" -#include "tbb_machine.h" -#include - -typedef struct ___itt_caller *__itt_caller; - -namespace tbb { - -class task; -class task_list; -class task_group_context; - -// MSVC does not allow taking the address of a member that was defined -// privately in task_base and made public in class task via a using declaration. -#if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3) -#define __TBB_TASK_BASE_ACCESS public -#else -#define __TBB_TASK_BASE_ACCESS private -#endif - -namespace internal { //< @cond INTERNAL - - class allocate_additional_child_of_proxy: no_assign { - //! No longer used, but retained for binary layout compatibility. Always NULL. - task* self; - task& parent; - public: - explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {} - task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; - void __TBB_EXPORTED_METHOD free( task& ) const; - }; - - struct cpu_ctl_env_space { int space[sizeof(internal::uint64_t)/sizeof(int)]; }; -} //< namespace internal @endcond - -namespace interface5 { - namespace internal { - //! Base class for methods that became static in TBB 3.0. - /** TBB's evolution caused the "this" argument for several methods to become obsolete. - However, for backwards binary compatibility, the new methods need distinct names, - otherwise the One Definition Rule would be broken. Hence the new methods are - defined in this private base class, and then exposed in class task via - using declarations. */ - class task_base: tbb::internal::no_copy { - __TBB_TASK_BASE_ACCESS: - friend class tbb::task; - - //! Schedule task for execution when a worker becomes available. - static void spawn( task& t ); - - //! Spawn multiple tasks and clear list. - static void spawn( task_list& list ); - - //! Like allocate_child, except that task's parent becomes "t", not this. - /** Typically used in conjunction with schedule_to_reexecute to implement while loops. - Atomically increments the reference count of t.parent() */ - static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) { - return tbb::internal::allocate_additional_child_of_proxy(t); - } - - //! Destroy a task. - /** Usually, calling this method is unnecessary, because a task is - implicitly deleted after its execute() method runs. However, - sometimes a task needs to be explicitly deallocated, such as - when a root task is used as the parent in spawn_and_wait_for_all. */ - static void __TBB_EXPORTED_FUNC destroy( task& victim ); - }; - } // internal -} // interface5 - -//! @cond INTERNAL -namespace internal { - - class scheduler: no_copy { - public: - //! For internal use only - virtual void spawn( task& first, task*& next ) = 0; - - //! For internal use only - virtual void wait_for_all( task& parent, task* child ) = 0; - - //! For internal use only - virtual void spawn_root_and_wait( task& first, task*& next ) = 0; - - //! Pure virtual destructor; - // Have to have it just to shut up overzealous compilation warnings - virtual ~scheduler() = 0; - - //! For internal use only - virtual void enqueue( task& t, void* reserved ) = 0; - }; - - //! A reference count - /** Should always be non-negative. A signed type is used so that underflow can be detected. */ - typedef intptr_t reference_count; - - //! An id as used for specifying affinity. - typedef unsigned short affinity_id; - -#if __TBB_TASK_GROUP_CONTEXT - class generic_scheduler; - - struct context_list_node_t { - context_list_node_t *my_prev, - *my_next; - }; - - class allocate_root_with_context_proxy: no_assign { - task_group_context& my_context; - public: - allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {} - task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; - void __TBB_EXPORTED_METHOD free( task& ) const; - }; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - class allocate_root_proxy: no_assign { - public: - static task& __TBB_EXPORTED_FUNC allocate( size_t size ); - static void __TBB_EXPORTED_FUNC free( task& ); - }; - - class allocate_continuation_proxy: no_assign { - public: - task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; - void __TBB_EXPORTED_METHOD free( task& ) const; - }; - - class allocate_child_proxy: no_assign { - public: - task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; - void __TBB_EXPORTED_METHOD free( task& ) const; - }; - - //! Memory prefix to a task object. - /** This class is internal to the library. - Do not reference it directly, except within the library itself. - Fields are ordered in way that preserves backwards compatibility and yields - good packing on typical 32-bit and 64-bit platforms. - - In case task prefix size exceeds 32 or 64 bytes on IA32 and Intel64 - architectures correspondingly, consider dynamic setting of task_alignment - and task_prefix_reservation_size based on the maximal operand size supported - by the current CPU. - - @ingroup task_scheduling */ - class task_prefix { - private: - friend class tbb::task; - friend class tbb::interface5::internal::task_base; - friend class tbb::task_list; - friend class internal::scheduler; - friend class internal::allocate_root_proxy; - friend class internal::allocate_child_proxy; - friend class internal::allocate_continuation_proxy; - friend class internal::allocate_additional_child_of_proxy; - -#if __TBB_TASK_GROUP_CONTEXT - //! Shared context that is used to communicate asynchronous state changes - /** Currently it is used to broadcast cancellation requests generated both - by users and as the result of unhandled exceptions in the task::execute() - methods. */ - task_group_context *context; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - //! The scheduler that allocated the task, or NULL if the task is big. - /** Small tasks are pooled by the scheduler that allocated the task. - If a scheduler needs to free a small task allocated by another scheduler, - it returns the task to that other scheduler. This policy avoids - memory space blowup issues for memory allocators that allocate from - thread-specific pools. */ - scheduler* origin; - -#if __TBB_TASK_PRIORITY - union { -#endif /* __TBB_TASK_PRIORITY */ - //! Obsolete. The scheduler that owns the task. - /** Retained only for the sake of backward binary compatibility. - Still used by inline methods in the task.h header. **/ - scheduler* owner; - -#if __TBB_TASK_PRIORITY - //! Pointer to the next offloaded lower priority task. - /** Used to maintain a list of offloaded tasks inside the scheduler. **/ - task* next_offloaded; - }; -#endif /* __TBB_TASK_PRIORITY */ - - //! The task whose reference count includes me. - /** In the "blocking style" of programming, this field points to the parent task. - In the "continuation-passing style" of programming, this field points to the - continuation of the parent. */ - tbb::task* parent; - - //! Reference count used for synchronization. - /** In the "continuation-passing style" of programming, this field is - the difference of the number of allocated children minus the - number of children that have completed. - In the "blocking style" of programming, this field is one more than the difference. */ - __TBB_atomic reference_count ref_count; - - //! Obsolete. Used to be scheduling depth before TBB 2.2 - /** Retained only for the sake of backward binary compatibility. - Not used by TBB anymore. **/ - int depth; - - //! A task::state_type, stored as a byte for compactness. - /** This state is exposed to users via method task::state(). */ - unsigned char state; - - //! Miscellaneous state that is not directly visible to users, stored as a byte for compactness. - /** 0x0 -> version 1.0 task - 0x1 -> version >=2.1 task - 0x10 -> task was enqueued - 0x20 -> task_proxy - 0x40 -> task has live ref_count - 0x80 -> a stolen task */ - unsigned char extra_state; - - affinity_id affinity; - - //! "next" field for list of task - tbb::task* next; - - //! The task corresponding to this task_prefix. - tbb::task& task() {return *reinterpret_cast(this+1);} - }; - -} // namespace internal -//! @endcond - -#if __TBB_TASK_GROUP_CONTEXT - -#if __TBB_TASK_PRIORITY -namespace internal { - static const int priority_stride_v4 = INT_MAX / 4; -} - -enum priority_t { - priority_normal = internal::priority_stride_v4 * 2, - priority_low = priority_normal - internal::priority_stride_v4, - priority_high = priority_normal + internal::priority_stride_v4 -}; - -#endif /* __TBB_TASK_PRIORITY */ - -#if TBB_USE_CAPTURED_EXCEPTION - class tbb_exception; -#else - namespace internal { - class tbb_exception_ptr; - } -#endif /* !TBB_USE_CAPTURED_EXCEPTION */ - -class task_scheduler_init; -namespace interface7 { class task_arena; } - -//! Used to form groups of tasks -/** @ingroup task_scheduling - The context services explicit cancellation requests from user code, and unhandled - exceptions intercepted during tasks execution. Intercepting an exception results - in generating internal cancellation requests (which is processed in exactly the - same way as external ones). - - The context is associated with one or more root tasks and defines the cancellation - group that includes all the descendants of the corresponding root task(s). Association - is established when a context object is passed as an argument to the task::allocate_root() - method. See task_group_context::task_group_context for more details. - - The context can be bound to another one, and other contexts can be bound to it, - forming a tree-like structure: parent -> this -> children. Arrows here designate - cancellation propagation direction. If a task in a cancellation group is cancelled - all the other tasks in this group and groups bound to it (as children) get cancelled too. - - IMPLEMENTATION NOTE: - When adding new members to task_group_context or changing types of existing ones, - update the size of both padding buffers (_leading_padding and _trailing_padding) - appropriately. See also VERSIONING NOTE at the constructor definition below. **/ -class task_group_context : internal::no_copy { -private: - friend class internal::generic_scheduler; - friend class task_scheduler_init; - friend class interface7::task_arena; - -#if TBB_USE_CAPTURED_EXCEPTION - typedef tbb_exception exception_container_type; -#else - typedef internal::tbb_exception_ptr exception_container_type; -#endif - - enum version_traits_word_layout { - traits_offset = 16, - version_mask = 0xFFFF, - traits_mask = 0xFFFFul << traits_offset - }; - -public: - enum kind_type { - isolated, - bound - }; - - enum traits_type { - exact_exception = 0x0001ul << traits_offset, -#if __TBB_FP_CONTEXT - fp_settings = 0x0002ul << traits_offset, -#endif - concurrent_wait = 0x0004ul << traits_offset, -#if TBB_USE_CAPTURED_EXCEPTION - default_traits = 0 -#else - default_traits = exact_exception -#endif /* !TBB_USE_CAPTURED_EXCEPTION */ - }; - -private: - enum state { - may_have_children = 1, - // the following enumerations must be the last, new 2^x values must go above - next_state_value, low_unused_state_bit = (next_state_value-1)*2 - }; - - union { - //! Flavor of this context: bound or isolated. - // TODO: describe asynchronous use, and whether any memory semantics are needed - __TBB_atomic kind_type my_kind; - uintptr_t _my_kind_aligner; - }; - - //! Pointer to the context of the parent cancellation group. NULL for isolated contexts. - task_group_context *my_parent; - - //! Used to form the thread specific list of contexts without additional memory allocation. - /** A context is included into the list of the current thread when its binding to - its parent happens. Any context can be present in the list of one thread only. **/ - internal::context_list_node_t my_node; - - //! Used to set and maintain stack stitching point for Intel Performance Tools. - __itt_caller itt_caller; - - //! Leading padding protecting accesses to frequently used members from false sharing. - /** Read accesses to the field my_cancellation_requested are on the hot path inside - the scheduler. This padding ensures that this field never shares the same cache - line with a local variable that is frequently written to. **/ - char _leading_padding[internal::NFS_MaxLineSize - - 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t) - - sizeof(__itt_caller) -#if __TBB_FP_CONTEXT - - sizeof(internal::cpu_ctl_env_space) -#endif - ]; - -#if __TBB_FP_CONTEXT - //! Space for platform-specific FPU settings. - /** Must only be accessed inside TBB binaries, and never directly in user - code or inline methods. */ - internal::cpu_ctl_env_space my_cpu_ctl_env; -#endif - - //! Specifies whether cancellation was requested for this task group. - uintptr_t my_cancellation_requested; - - //! Version for run-time checks and behavioral traits of the context. - /** Version occupies low 16 bits, and traits (zero or more ORed enumerators - from the traits_type enumerations) take the next 16 bits. - Original (zeroth) version of the context did not support any traits. **/ - uintptr_t my_version_and_traits; - - //! Pointer to the container storing exception being propagated across this task group. - exception_container_type *my_exception; - - //! Scheduler instance that registered this context in its thread specific list. - internal::generic_scheduler *my_owner; - - //! Internal state (combination of state flags, currently only may_have_children). - uintptr_t my_state; - -#if __TBB_TASK_PRIORITY - //! Priority level of the task group (in normalized representation) - intptr_t my_priority; -#endif /* __TBB_TASK_PRIORITY */ - - //! Trailing padding protecting accesses to frequently used members from false sharing - /** \sa _leading_padding **/ - char _trailing_padding[internal::NFS_MaxLineSize - 2 * sizeof(uintptr_t) - 2 * sizeof(void*) -#if __TBB_TASK_PRIORITY - - sizeof(intptr_t) -#endif /* __TBB_TASK_PRIORITY */ - ]; - -public: - //! Default & binding constructor. - /** By default a bound context is created. That is this context will be bound - (as child) to the context of the task calling task::allocate_root(this_context) - method. Cancellation requests passed to the parent context are propagated - to all the contexts bound to it. Similarly priority change is propagated - from the parent context to its children. - - If task_group_context::isolated is used as the argument, then the tasks associated - with this context will never be affected by events in any other context. - - Creating isolated contexts involve much less overhead, but they have limited - utility. Normally when an exception occurs in an algorithm that has nested - ones running, it is desirably to have all the nested algorithms cancelled - as well. Such a behavior requires nested algorithms to use bound contexts. - - There is one good place where using isolated algorithms is beneficial. It is - a master thread. That is if a particular algorithm is invoked directly from - the master thread (not from a TBB task), supplying it with explicitly - created isolated context will result in a faster algorithm startup. - - VERSIONING NOTE: - Implementation(s) of task_group_context constructor(s) cannot be made - entirely out-of-line because the run-time version must be set by the user - code. This will become critically important for binary compatibility, if - we ever have to change the size of the context object. - - Boosting the runtime version will also be necessary if new data fields are - introduced in the currently unused padding areas and these fields are updated - by inline methods. **/ - task_group_context ( kind_type relation_with_parent = bound, - uintptr_t traits = default_traits ) - : my_kind(relation_with_parent) - , my_version_and_traits(2 | traits) - { - init(); - } - - // Do not introduce standalone unbind method since it will break state propagation assumptions - __TBB_EXPORTED_METHOD ~task_group_context (); - - //! Forcefully reinitializes the context after the task tree it was associated with is completed. - /** Because the method assumes that all the tasks that used to be associated with - this context have already finished, calling it while the context is still - in use somewhere in the task hierarchy leads to undefined behavior. - - IMPORTANT: This method is not thread safe! - - The method does not change the context's parent if it is set. **/ - void __TBB_EXPORTED_METHOD reset (); - - //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups. - /** \return false if cancellation has already been requested, true otherwise. - - Note that canceling never fails. When false is returned, it just means that - another thread (or this one) has already sent cancellation request to this - context or to one of its ancestors (if this context is bound). It is guaranteed - that when this method is concurrently called on the same not yet cancelled - context, true will be returned by one and only one invocation. **/ - bool __TBB_EXPORTED_METHOD cancel_group_execution (); - - //! Returns true if the context received cancellation request. - bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const; - - //! Records the pending exception, and cancels the task group. - /** May be called only from inside a catch-block. If the context is already - cancelled, does nothing. - The method brings the task group associated with this context exactly into - the state it would be in, if one of its tasks threw the currently pending - exception during its execution. In other words, it emulates the actions - of the scheduler's dispatch loop exception handler. **/ - void __TBB_EXPORTED_METHOD register_pending_exception (); - -#if __TBB_FP_CONTEXT - //! Captures the current FPU control settings to the context. - /** Because the method assumes that all the tasks that used to be associated with - this context have already finished, calling it while the context is still - in use somewhere in the task hierarchy leads to undefined behavior. - - IMPORTANT: This method is not thread safe! - - The method does not change the FPU control settings of the context's parent. **/ - void __TBB_EXPORTED_METHOD capture_fp_settings (); -#endif - -#if __TBB_TASK_PRIORITY - //! Changes priority of the task group - void set_priority ( priority_t ); - - //! Retrieves current priority of the current task group - priority_t priority () const; -#endif /* __TBB_TASK_PRIORITY */ - -protected: - //! Out-of-line part of the constructor. - /** Singled out to ensure backward binary compatibility of the future versions. **/ - void __TBB_EXPORTED_METHOD init (); - -private: - friend class task; - friend class internal::allocate_root_with_context_proxy; - - static const kind_type binding_required = bound; - static const kind_type binding_completed = kind_type(bound+1); - static const kind_type detached = kind_type(binding_completed+1); - static const kind_type dying = kind_type(detached+1); - - //! Propagates any state change detected to *this, and as an optimisation possibly also upward along the heritage line. - template - void propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ); - - //! Registers this context with the local scheduler and binds it to its parent context - void bind_to ( internal::generic_scheduler *local_sched ); - - //! Registers this context with the local scheduler - void register_with ( internal::generic_scheduler *local_sched ); - -#if __TBB_FP_CONTEXT - //! Copies FPU control setting from another context - // TODO: Consider adding #else stub in order to omit #if sections in other code - void copy_fp_settings( const task_group_context &src ); -#endif /* __TBB_FP_CONTEXT */ -}; // class task_group_context - -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -//! Base class for user-defined tasks. -/** @ingroup task_scheduling */ -class task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base { - - //! Set reference count - void __TBB_EXPORTED_METHOD internal_set_ref_count( int count ); - - //! Decrement reference count and return its new value. - internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count(); - -protected: - //! Default constructor. - task() {prefix().extra_state=1;} - -public: - //! Destructor. - virtual ~task() {} - - //! Should be overridden by derived classes. - virtual task* execute() = 0; - - //! Enumeration of task states that the scheduler considers. - enum state_type { - //! task is running, and will be destroyed after method execute() completes. - executing, - //! task to be rescheduled. - reexecute, - //! task is in ready pool, or is going to be put there, or was just taken off. - ready, - //! task object is freshly allocated or recycled. - allocated, - //! task object is on free list, or is going to be put there, or was just taken off. - freed, - //! task to be recycled as continuation - recycle -#if __TBB_RECYCLE_TO_ENQUEUE - //! task to be scheduled for starvation-resistant execution - ,to_enqueue -#endif - }; - - //------------------------------------------------------------------------ - // Allocating tasks - //------------------------------------------------------------------------ - - //! Returns proxy for overloaded new that allocates a root task. - static internal::allocate_root_proxy allocate_root() { - return internal::allocate_root_proxy(); - } - -#if __TBB_TASK_GROUP_CONTEXT - //! Returns proxy for overloaded new that allocates a root task associated with user supplied context. - static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) { - return internal::allocate_root_with_context_proxy(ctx); - } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - //! Returns proxy for overloaded new that allocates a continuation task of *this. - /** The continuation's parent becomes the parent of *this. */ - internal::allocate_continuation_proxy& allocate_continuation() { - return *reinterpret_cast(this); - } - - //! Returns proxy for overloaded new that allocates a child task of *this. - internal::allocate_child_proxy& allocate_child() { - return *reinterpret_cast(this); - } - - //! Define recommended static form via import from base class. - using task_base::allocate_additional_child_of; - -#if __TBB_DEPRECATED_TASK_INTERFACE - //! Destroy a task. - /** Usually, calling this method is unnecessary, because a task is - implicitly deleted after its execute() method runs. However, - sometimes a task needs to be explicitly deallocated, such as - when a root task is used as the parent in spawn_and_wait_for_all. */ - void __TBB_EXPORTED_METHOD destroy( task& t ); -#else /* !__TBB_DEPRECATED_TASK_INTERFACE */ - //! Define recommended static form via import from base class. - using task_base::destroy; -#endif /* !__TBB_DEPRECATED_TASK_INTERFACE */ - - //------------------------------------------------------------------------ - // Recycling of tasks - //------------------------------------------------------------------------ - - //! Change this to be a continuation of its former self. - /** The caller must guarantee that the task's refcount does not become zero until - after the method execute() returns. Typically, this is done by having - method execute() return a pointer to a child of the task. If the guarantee - cannot be made, use method recycle_as_safe_continuation instead. - - Because of the hazard, this method may be deprecated in the future. */ - void recycle_as_continuation() { - __TBB_ASSERT( prefix().state==executing, "execute not running?" ); - prefix().state = allocated; - } - - //! Recommended to use, safe variant of recycle_as_continuation - /** For safety, it requires additional increment of ref_count. - With no descendants and ref_count of 1, it has the semantics of recycle_to_reexecute. */ - void recycle_as_safe_continuation() { - __TBB_ASSERT( prefix().state==executing, "execute not running?" ); - prefix().state = recycle; - } - - //! Change this to be a child of new_parent. - void recycle_as_child_of( task& new_parent ) { - internal::task_prefix& p = prefix(); - __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" ); - __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" ); - __TBB_ASSERT( p.parent==NULL, "parent must be null" ); - __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" ); - __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" ); - p.state = allocated; - p.parent = &new_parent; -#if __TBB_TASK_GROUP_CONTEXT - p.context = new_parent.prefix().context; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - } - - //! Schedule this for reexecution after current execute() returns. - /** Made obsolete by recycle_as_safe_continuation; may become deprecated. */ - void recycle_to_reexecute() { - __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" ); - __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" ); - prefix().state = reexecute; - } - -#if __TBB_RECYCLE_TO_ENQUEUE - //! Schedule this to enqueue after descendant tasks complete. - /** Save enqueue/spawn difference, it has the semantics of recycle_as_safe_continuation. */ - void recycle_to_enqueue() { - __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" ); - prefix().state = to_enqueue; - } -#endif /* __TBB_RECYCLE_TO_ENQUEUE */ - - //------------------------------------------------------------------------ - // Spawning and blocking - //------------------------------------------------------------------------ - - //! Set reference count - void set_ref_count( int count ) { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - internal_set_ref_count(count); -#else - prefix().ref_count = count; -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - - //! Atomically increment reference count and returns its old value. - /** Has acquire semantics */ - void increment_ref_count() { - __TBB_FetchAndIncrementWacquire( &prefix().ref_count ); - } - - //! Atomically decrement reference count and returns its new value. - /** Has release semantics. */ - int decrement_ref_count() { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - return int(internal_decrement_ref_count()); -#else - return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1; -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - - //! Define recommended static forms via import from base class. - using task_base::spawn; - - //! Similar to spawn followed by wait_for_all, but more efficient. - void spawn_and_wait_for_all( task& child ) { - prefix().owner->wait_for_all( *this, &child ); - } - - //! Similar to spawn followed by wait_for_all, but more efficient. - void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list ); - - //! Spawn task allocated by allocate_root, wait for it to complete, and deallocate it. - static void spawn_root_and_wait( task& root ) { - root.prefix().owner->spawn_root_and_wait( root, root.prefix().next ); - } - - //! Spawn root tasks on list and wait for all of them to finish. - /** If there are more tasks than worker threads, the tasks are spawned in - order of front to back. */ - static void spawn_root_and_wait( task_list& root_list ); - - //! Wait for reference count to become one, and set reference count to zero. - /** Works on tasks while waiting. */ - void wait_for_all() { - prefix().owner->wait_for_all( *this, NULL ); - } - - //! Enqueue task for starvation-resistant execution. -#if __TBB_TASK_PRIORITY - /** The task will be enqueued on the normal priority level disregarding the - priority of its task group. - - The rationale of such semantics is that priority of an enqueued task is - statically fixed at the moment of its enqueuing, while task group priority - is dynamic. Thus automatic priority inheritance would be generally a subject - to the race, which may result in unexpected behavior. - - Use enqueue() overload with explicit priority value and task::group_priority() - method to implement such priority inheritance when it is really necessary. **/ -#endif /* __TBB_TASK_PRIORITY */ - static void enqueue( task& t ) { - t.prefix().owner->enqueue( t, NULL ); - } - -#if __TBB_TASK_PRIORITY - //! Enqueue task for starvation-resistant execution on the specified priority level. - static void enqueue( task& t, priority_t p ) { - __TBB_ASSERT( p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value" ); - t.prefix().owner->enqueue( t, (void*)p ); - } -#endif /* __TBB_TASK_PRIORITY */ - - //! The innermost task being executed or destroyed by the current thread at the moment. - static task& __TBB_EXPORTED_FUNC self(); - - //! task on whose behalf this task is working, or NULL if this is a root. - task* parent() const {return prefix().parent;} - - //! sets parent task pointer to specified value - void set_parent(task* p) { -#if __TBB_TASK_GROUP_CONTEXT - __TBB_ASSERT(prefix().context == p->prefix().context, "The tasks must be in the same context"); -#endif - prefix().parent = p; - } - -#if __TBB_TASK_GROUP_CONTEXT - //! This method is deprecated and will be removed in the future. - /** Use method group() instead. **/ - task_group_context* context() {return prefix().context;} - - //! Pointer to the task group descriptor. - task_group_context* group () { return prefix().context; } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - //! True if task was stolen from the task pool of another thread. - bool is_stolen_task() const { - return (prefix().extra_state & 0x80)!=0; - } - - //------------------------------------------------------------------------ - // Debugging - //------------------------------------------------------------------------ - - //! Current execution state - state_type state() const {return state_type(prefix().state);} - - //! The internal reference count. - int ref_count() const { -#if TBB_USE_ASSERT - internal::reference_count ref_count_ = prefix().ref_count; - __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error"); -#endif - return int(prefix().ref_count); - } - - //! Obsolete, and only retained for the sake of backward compatibility. Always returns true. - bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const; - - //------------------------------------------------------------------------ - // Affinity - //------------------------------------------------------------------------ - - //! An id as used for specifying affinity. - /** Guaranteed to be integral type. Value of 0 means no affinity. */ - typedef internal::affinity_id affinity_id; - - //! Set affinity for this task. - void set_affinity( affinity_id id ) {prefix().affinity = id;} - - //! Current affinity of this task - affinity_id affinity() const {return prefix().affinity;} - - //! Invoked by scheduler to notify task that it ran on unexpected thread. - /** Invoked before method execute() runs, if task is stolen, or task has - affinity but will be executed on another thread. - - The default action does nothing. */ - virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id ); - -#if __TBB_TASK_GROUP_CONTEXT - //! Moves this task from its current group into another one. - /** Argument ctx specifies the new group. - - The primary purpose of this method is to associate unique task group context - with a task allocated for subsequent enqueuing. In contrast to spawned tasks - enqueued ones normally outlive the scope where they were created. This makes - traditional usage model where task group context are allocated locally on - the stack inapplicable. Dynamic allocation of context objects is performance - inefficient. Method change_group() allows to make task group context object - a member of the task class, and then associate it with its containing task - object in the latter's constructor. **/ - void __TBB_EXPORTED_METHOD change_group ( task_group_context& ctx ); - - //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups. - /** \return false if cancellation has already been requested, true otherwise. **/ - bool cancel_group_execution () { return prefix().context->cancel_group_execution(); } - - //! Returns true if the context has received cancellation request. - bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); } -#else - bool is_cancelled () const { return false; } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#if __TBB_TASK_PRIORITY - //! Changes priority of the task group this task belongs to. - void set_group_priority ( priority_t p ) { prefix().context->set_priority(p); } - - //! Retrieves current priority of the task group this task belongs to. - priority_t group_priority () const { return prefix().context->priority(); } - -#endif /* __TBB_TASK_PRIORITY */ - -private: - friend class interface5::internal::task_base; - friend class task_list; - friend class internal::scheduler; - friend class internal::allocate_root_proxy; -#if __TBB_TASK_GROUP_CONTEXT - friend class internal::allocate_root_with_context_proxy; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - friend class internal::allocate_continuation_proxy; - friend class internal::allocate_child_proxy; - friend class internal::allocate_additional_child_of_proxy; - - //! Get reference to corresponding task_prefix. - /** Version tag prevents loader on Linux from using the wrong symbol in debug builds. **/ - internal::task_prefix& prefix( internal::version_tag* = NULL ) const { - return reinterpret_cast(const_cast(this))[-1]; - } -}; // class task - -//! task that does nothing. Useful for synchronization. -/** @ingroup task_scheduling */ -class empty_task: public task { - /*override*/ task* execute() { - return NULL; - } -}; - -//! @cond INTERNAL -namespace internal { - template - class function_task : public task { - F my_func; - /*override*/ task* execute() { - my_func(); - return NULL; - } - public: - function_task( const F& f ) : my_func(f) {} - }; -} // namespace internal -//! @endcond - -//! A list of children. -/** Used for method task::spawn_children - @ingroup task_scheduling */ -class task_list: internal::no_copy { -private: - task* first; - task** next_ptr; - friend class task; - friend class interface5::internal::task_base; -public: - //! Construct empty list - task_list() : first(NULL), next_ptr(&first) {} - - //! Destroys the list, but does not destroy the task objects. - ~task_list() {} - - //! True if list if empty; false otherwise. - bool empty() const {return !first;} - - //! Push task onto back of list. - void push_back( task& task ) { - task.prefix().next = NULL; - *next_ptr = &task; - next_ptr = &task.prefix().next; - } - - //! Pop the front task from the list. - task& pop_front() { - __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" ); - task* result = first; - first = result->prefix().next; - if( !first ) next_ptr = &first; - return *result; - } - - //! Clear the list - void clear() { - first=NULL; - next_ptr=&first; - } -}; - -inline void interface5::internal::task_base::spawn( task& t ) { - t.prefix().owner->spawn( t, t.prefix().next ); -} - -inline void interface5::internal::task_base::spawn( task_list& list ) { - if( task* t = list.first ) { - t->prefix().owner->spawn( *t, *list.next_ptr ); - list.clear(); - } -} - -inline void task::spawn_root_and_wait( task_list& root_list ) { - if( task* t = root_list.first ) { - t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr ); - root_list.clear(); - } -} - -} // namespace tbb - -inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) { - return &tbb::internal::allocate_root_proxy::allocate(bytes); -} - -inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) { - tbb::internal::allocate_root_proxy::free( *static_cast(task) ); -} - -#if __TBB_TASK_GROUP_CONTEXT -inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) { - return &p.allocate(bytes); -} - -inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) { - p.free( *static_cast(task) ); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) { - return &p.allocate(bytes); -} - -inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) { - p.free( *static_cast(task) ); -} - -inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) { - return &p.allocate(bytes); -} - -inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) { - p.free( *static_cast(task) ); -} - -inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) { - return &p.allocate(bytes); -} - -inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) { - p.free( *static_cast(task) ); -} - -#endif /* __TBB_task_H */ diff --git a/inst/include/tbb/task_arena.h b/inst/include/tbb/task_arena.h deleted file mode 100644 index ee04c1248..000000000 --- a/inst/include/tbb/task_arena.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_task_arena_H -#define __TBB_task_arena_H - -#include "task.h" -#include "tbb_exception.h" -#if TBB_USE_THREADING_TOOLS -#include "atomic.h" // for as_atomic -#endif - -#if __TBB_TASK_ARENA - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - //! Internal to library. Should not be used by clients. - /** @ingroup task_scheduling */ - class arena; - class task_scheduler_observer_v3; -} // namespace internal -//! @endcond - -namespace interface7 { -//! @cond INTERNAL -namespace internal { -using namespace tbb::internal; //e.g. function_task from task.h - -class delegate_base : no_assign { -public: - virtual void operator()() const = 0; - virtual ~delegate_base() {} -}; - -template -class delegated_function : public delegate_base { - F &my_func; - /*override*/ void operator()() const { - my_func(); - } -public: - delegated_function ( F& f ) : my_func(f) {} -}; - -class task_arena_base { -protected: - //! NULL if not currently initialized. - internal::arena* my_arena; - -#if __TBB_TASK_GROUP_CONTEXT - //! default context of the arena - task_group_context *my_context; -#endif - - //! Concurrency level for deferred initialization - int my_max_concurrency; - - //! Reserved master slots - unsigned my_master_slots; - - //! Special settings - intptr_t my_version_and_traits; - - enum { - default_flags = 0 -#if __TBB_TASK_GROUP_CONTEXT - | (task_group_context::default_traits & task_group_context::exact_exception) // 0 or 1 << 16 - , exact_exception_flag = task_group_context::exact_exception // used to specify flag for context directly -#endif - }; - - task_arena_base(int max_concurrency, unsigned reserved_for_masters) - : my_arena(0) -#if __TBB_TASK_GROUP_CONTEXT - , my_context(0) -#endif - , my_max_concurrency(max_concurrency) - , my_master_slots(reserved_for_masters) - , my_version_and_traits(default_flags) - {} - - void __TBB_EXPORTED_METHOD internal_initialize( ); - void __TBB_EXPORTED_METHOD internal_terminate( ); - void __TBB_EXPORTED_METHOD internal_enqueue( task&, intptr_t ) const; - void __TBB_EXPORTED_METHOD internal_execute( delegate_base& ) const; - void __TBB_EXPORTED_METHOD internal_wait() const; - static int __TBB_EXPORTED_FUNC internal_current_slot(); -public: - //! Typedef for number of threads that is automatic. - static const int automatic = -1; // any value < 1 means 'automatic' - -}; - -} // namespace internal -//! @endcond - -/** 1-to-1 proxy representation class of scheduler's arena - * Constructors set up settings only, real construction is deferred till the first method invocation - * Destructor only removes one of the references to the inner arena representation. - * Final destruction happens when all the references (and the work) are gone. - */ -class task_arena : public internal::task_arena_base { - friend class tbb::internal::task_scheduler_observer_v3; - bool my_initialized; - -public: - //! Creates task_arena with certain concurrency limits - /** Sets up settings only, real construction is deferred till the first method invocation - * @arg max_concurrency specifies total number of slots in arena where threads work - * @arg reserved_for_masters specifies number of slots to be used by master threads only. - * Value of 1 is default and reflects behavior of implicit arenas. - **/ - task_arena(int max_concurrency = automatic, unsigned reserved_for_masters = 1) - : task_arena_base(max_concurrency, reserved_for_masters) - , my_initialized(false) - {} - - //! Copies settings from another task_arena - task_arena(const task_arena &s) // copy settings but not the reference or instance - : task_arena_base(s.my_max_concurrency, s.my_master_slots) - , my_initialized(false) - {} - - //! Forces allocation of the resources for the task_arena as specified in constructor arguments - inline void initialize() { - if( !my_initialized ) { - internal_initialize(); -#if TBB_USE_THREADING_TOOLS - // Threading tools respect lock prefix but report false-positive data-race via plain store - internal::as_atomic(my_initialized).fetch_and_store(true); -#else - my_initialized = true; -#endif //TBB_USE_THREADING_TOOLS - } - } - - //! Overrides concurrency level and forces initialization of internal representation - inline void initialize(int max_concurrency, unsigned reserved_for_masters = 1) { - __TBB_ASSERT( !my_arena, "Impossible to modify settings of an already initialized task_arena"); - if( !my_initialized ) { - my_max_concurrency = max_concurrency; - my_master_slots = reserved_for_masters; - initialize(); - } - } - - //! Removes the reference to the internal arena representation. - //! Not thread safe wrt concurrent invocations of other methods. - inline void terminate() { - if( my_initialized ) { - internal_terminate(); - my_initialized = false; - } - } - - //! Removes the reference to the internal arena representation, and destroys the external object. - //! Not thread safe wrt concurrent invocations of other methods. - ~task_arena() { - terminate(); - } - - //! Returns true if the arena is active (initialized); false otherwise. - //! The name was chosen to match a task_scheduler_init method with the same semantics. - bool is_active() const { return my_initialized; } - - //! Enqueues a task into the arena to process a functor, and immediately returns. - //! Does not require the calling thread to join the arena - template - void enqueue( const F& f ) { - initialize(); -#if __TBB_TASK_GROUP_CONTEXT - internal_enqueue( *new( task::allocate_root(*my_context) ) internal::function_task(f), 0 ); -#else - internal_enqueue( *new( task::allocate_root() ) internal::function_task(f), 0 ); -#endif - } - -#if __TBB_TASK_PRIORITY - //! Enqueues a task with priority p into the arena to process a functor f, and immediately returns. - //! Does not require the calling thread to join the arena - template - void enqueue( const F& f, priority_t p ) { - __TBB_ASSERT( p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value" ); - initialize(); -#if __TBB_TASK_GROUP_CONTEXT - internal_enqueue( *new( task::allocate_root(*my_context) ) internal::function_task(f), (intptr_t)p ); -#else - internal_enqueue( *new( task::allocate_root() ) internal::function_task(f), (intptr_t)p ); -#endif - } -#endif// __TBB_TASK_PRIORITY - - //! Joins the arena and executes a functor, then returns - //! If not possible to join, wraps the functor into a task, enqueues it and waits for task completion - //! Can decrement the arena demand for workers, causing a worker to leave and free a slot to the calling thread - template - void execute(F& f) { - initialize(); - internal::delegated_function d(f); - internal_execute( d ); - } - - //! Joins the arena and executes a functor, then returns - //! If not possible to join, wraps the functor into a task, enqueues it and waits for task completion - //! Can decrement the arena demand for workers, causing a worker to leave and free a slot to the calling thread - template - void execute(const F& f) { - initialize(); - internal::delegated_function d(f); - internal_execute( d ); - } - -#if __TBB_EXTRA_DEBUG - //! Wait for all work in the arena to be completed - //! Even submitted by other application threads - //! Joins arena if/when possible (in the same way as execute()) - void debug_wait_until_empty() { - initialize(); - internal_wait(); - } -#endif //__TBB_EXTRA_DEBUG - - //! Returns the index, aka slot number, of the calling thread in its current arena - inline static int current_thread_index() { - return internal_current_slot(); - } -}; - -} // namespace interfaceX - -using interface7::task_arena; - -} // namespace tbb - -#endif /* __TBB_TASK_ARENA */ - -#endif /* __TBB_task_arena_H */ diff --git a/inst/include/tbb/task_group.h b/inst/include/tbb/task_group.h deleted file mode 100644 index 4cbe7d55b..000000000 --- a/inst/include/tbb/task_group.h +++ /dev/null @@ -1,222 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_task_group_H -#define __TBB_task_group_H - -#include "task.h" -#include "tbb_exception.h" - -#if __TBB_TASK_GROUP_CONTEXT - -namespace tbb { - -namespace internal { - template class task_handle_task; -} - -class task_group; -class structured_task_group; - -template -class task_handle : internal::no_assign { - template friend class internal::task_handle_task; - friend class task_group; - friend class structured_task_group; - - static const intptr_t scheduled = 0x1; - - F my_func; - intptr_t my_state; - - void mark_scheduled () { - // The check here is intentionally lax to avoid the impact of interlocked operation - if ( my_state & scheduled ) - internal::throw_exception( internal::eid_invalid_multiple_scheduling ); - my_state |= scheduled; - } -public: - task_handle( const F& f ) : my_func(f), my_state(0) {} - - void operator() () const { my_func(); } -}; - -enum task_group_status { - not_complete, - complete, - canceled -}; - -namespace internal { - -template -class task_handle_task : public task { - task_handle& my_handle; - /*override*/ task* execute() { - my_handle(); - return NULL; - } -public: - task_handle_task( task_handle& h ) : my_handle(h) { h.mark_scheduled(); } -}; - -class task_group_base : internal::no_copy { -protected: - empty_task* my_root; - task_group_context my_context; - - task& owner () { return *my_root; } - - template - task_group_status internal_run_and_wait( F& f ) { - __TBB_TRY { - if ( !my_context.is_group_execution_cancelled() ) - f(); - } __TBB_CATCH( ... ) { - my_context.register_pending_exception(); - } - return wait(); - } - - template - void internal_run( F& f ) { - owner().spawn( *new( owner().allocate_additional_child_of(*my_root) ) Task(f) ); - } - -public: - task_group_base( uintptr_t traits = 0 ) - : my_context(task_group_context::bound, task_group_context::default_traits | traits) - { - my_root = new( task::allocate_root(my_context) ) empty_task; - my_root->set_ref_count(1); - } - - ~task_group_base() __TBB_NOEXCEPT(false) { - if( my_root->ref_count() > 1 ) { - bool stack_unwinding_in_progress = std::uncaught_exception(); - // Always attempt to do proper cleanup to avoid inevitable memory corruption - // in case of missing wait (for the sake of better testability & debuggability) - if ( !is_canceling() ) - cancel(); - __TBB_TRY { - my_root->wait_for_all(); - } __TBB_CATCH (...) { - task::destroy(*my_root); - __TBB_RETHROW(); - } - task::destroy(*my_root); - if ( !stack_unwinding_in_progress ) - internal::throw_exception( internal::eid_missing_wait ); - } - else { - task::destroy(*my_root); - } - } - - template - void run( task_handle& h ) { - internal_run< task_handle, internal::task_handle_task >( h ); - } - - task_group_status wait() { - __TBB_TRY { - my_root->wait_for_all(); - } __TBB_CATCH( ... ) { - my_context.reset(); - __TBB_RETHROW(); - } - if ( my_context.is_group_execution_cancelled() ) { - my_context.reset(); - return canceled; - } - return complete; - } - - bool is_canceling() { - return my_context.is_group_execution_cancelled(); - } - - void cancel() { - my_context.cancel_group_execution(); - } -}; // class task_group_base - -} // namespace internal - -class task_group : public internal::task_group_base { -public: - task_group () : task_group_base( task_group_context::concurrent_wait ) {} - -#if __SUNPRO_CC - template - void run( task_handle& h ) { - internal_run< task_handle, internal::task_handle_task >( h ); - } -#else - using task_group_base::run; -#endif - - template - void run( const F& f ) { - internal_run< const F, internal::function_task >( f ); - } - - template - task_group_status run_and_wait( const F& f ) { - return internal_run_and_wait( f ); - } - - template - task_group_status run_and_wait( task_handle& h ) { - h.mark_scheduled(); - return internal_run_and_wait< task_handle >( h ); - } -}; // class task_group - -class structured_task_group : public internal::task_group_base { -public: - template - task_group_status run_and_wait ( task_handle& h ) { - h.mark_scheduled(); - return internal_run_and_wait< task_handle >( h ); - } - - task_group_status wait() { - task_group_status res = task_group_base::wait(); - my_root->set_ref_count(1); - return res; - } -}; // class structured_task_group - -inline -bool is_current_task_group_canceling() { - return task::self().is_cancelled(); -} - -template -task_handle make_task( const F& f ) { - return task_handle( f ); -} - -} // namespace tbb - -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#endif /* __TBB_task_group_H */ diff --git a/inst/include/tbb/task_scheduler_init.h b/inst/include/tbb/task_scheduler_init.h deleted file mode 100644 index b49bddb89..000000000 --- a/inst/include/tbb/task_scheduler_init.h +++ /dev/null @@ -1,153 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_task_scheduler_init_H -#define __TBB_task_scheduler_init_H - -#include "tbb_stddef.h" -#include "limits.h" - -namespace tbb { - -typedef std::size_t stack_size_type; - -//! @cond INTERNAL -namespace internal { - //! Internal to library. Should not be used by clients. - /** @ingroup task_scheduling */ - class scheduler; -} // namespace internal -//! @endcond - -//! Class delimiting the scope of task scheduler activity. -/** A thread can construct a task_scheduler_init object and keep it alive - while it uses TBB's tasking subsystem (including parallel algorithms). - - This class allows to customize properties of the TBB task pool to some extent. - For example it can limit concurrency level of parallel work initiated by the - given thread. It also can be used to specify stack size of the TBB worker threads, - though this setting is not effective if the thread pool has already been created. - - If a parallel construct is used without task_scheduler_init object previously - created, the scheduler will be initialized automatically with default settings, - and will persist until this thread exits. Default concurrency level is defined - as described in task_scheduler_init::initialize(). - @ingroup task_scheduling */ -class task_scheduler_init: internal::no_copy { - enum ExceptionPropagationMode { - propagation_mode_exact = 1u, - propagation_mode_captured = 2u, - propagation_mode_mask = propagation_mode_exact | propagation_mode_captured - }; -#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE - enum { - wait_workers_in_terminate_flag = 128u - }; -#endif - - /** NULL if not currently initialized. */ - internal::scheduler* my_scheduler; -public: - - //! Typedef for number of threads that is automatic. - static const int automatic = -1; - - //! Argument to initialize() or constructor that causes initialization to be deferred. - static const int deferred = -2; - - //! Ensure that scheduler exists for this thread - /** A value of -1 lets TBB decide on the number of threads, which is usually - maximal hardware concurrency for this process, that is the number of logical - CPUs on the machine (possibly limited by the processor affinity mask of this - process (Windows) or of this thread (Linux, FreeBSD). It is preferable option - for production code because it helps to avoid nasty surprises when several - TBB based components run side-by-side or in a nested fashion inside the same - process. - - The number_of_threads is ignored if any other task_scheduler_inits - currently exist. A thread may construct multiple task_scheduler_inits. - Doing so does no harm because the underlying scheduler is reference counted. */ - void __TBB_EXPORTED_METHOD initialize( int number_of_threads=automatic ); - - //! The overloaded method with stack size parameter - /** Overloading is necessary to preserve ABI compatibility */ - void __TBB_EXPORTED_METHOD initialize( int number_of_threads, stack_size_type thread_stack_size ); - - //! Inverse of method initialize. - void __TBB_EXPORTED_METHOD terminate(); - - //! Shorthand for default constructor followed by call to initialize(number_of_threads). -#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE - task_scheduler_init( int number_of_threads=automatic, stack_size_type thread_stack_size=0, bool wait_workers_in_terminate = false ) : my_scheduler(NULL) -#else - task_scheduler_init( int number_of_threads=automatic, stack_size_type thread_stack_size=0 ) : my_scheduler(NULL) -#endif - { - // Two lowest order bits of the stack size argument may be taken to communicate - // default exception propagation mode of the client to be used when the - // client manually creates tasks in the master thread and does not use - // explicit task group context object. This is necessary because newer - // TBB binaries with exact propagation enabled by default may be used - // by older clients that expect tbb::captured_exception wrapper. - // All zeros mean old client - no preference. - __TBB_ASSERT( !(thread_stack_size & propagation_mode_mask), "Requested stack size is not aligned" ); -#if TBB_USE_EXCEPTIONS - thread_stack_size |= TBB_USE_CAPTURED_EXCEPTION ? propagation_mode_captured : propagation_mode_exact; -#endif /* TBB_USE_EXCEPTIONS */ -#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE - if (wait_workers_in_terminate) - my_scheduler = (internal::scheduler*)wait_workers_in_terminate_flag; -#endif - initialize( number_of_threads, thread_stack_size ); - } - - //! Destroy scheduler for this thread if thread has no other live task_scheduler_inits. - ~task_scheduler_init() { - if( my_scheduler ) - terminate(); - internal::poison_pointer( my_scheduler ); - } - //! Returns the number of threads TBB scheduler would create if initialized by default. - /** Result returned by this method does not depend on whether the scheduler - has already been initialized. - - Because tbb 2.0 does not support blocking tasks yet, you may use this method - to boost the number of threads in the tbb's internal pool, if your tasks are - doing I/O operations. The optimal number of additional threads depends on how - much time your tasks spend in the blocked state. - - Before TBB 3.0 U4 this method returned the number of logical CPU in the - system. Currently on Windows, Linux and FreeBSD it returns the number of - logical CPUs available to the current process in accordance with its affinity - mask. - - NOTE: The return value of this method never changes after its first invocation. - This means that changes in the process affinity mask that took place after - this method was first invoked will not affect the number of worker threads - in the TBB worker threads pool. */ - static int __TBB_EXPORTED_FUNC default_num_threads (); - - //! Returns true if scheduler is active (initialized); false otherwise - bool is_active() const { return my_scheduler != NULL; } -}; - -} // namespace tbb - -#endif /* __TBB_task_scheduler_init_H */ diff --git a/inst/include/tbb/task_scheduler_observer.h b/inst/include/tbb/task_scheduler_observer.h deleted file mode 100644 index 6c8ac4da0..000000000 --- a/inst/include/tbb/task_scheduler_observer.h +++ /dev/null @@ -1,167 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_task_scheduler_observer_H -#define __TBB_task_scheduler_observer_H - -#include "atomic.h" -#if __TBB_TASK_ARENA -#include "task_arena.h" -#endif //__TBB_TASK_ARENA - -#if __TBB_SCHEDULER_OBSERVER - -namespace tbb { -namespace interface6 { -class task_scheduler_observer; -} -namespace internal { - -class observer_proxy; -class observer_list; - -class task_scheduler_observer_v3 { - friend class observer_proxy; - friend class observer_list; - friend class interface6::task_scheduler_observer; - - //! Pointer to the proxy holding this observer. - /** Observers are proxied by the scheduler to maintain persistent lists of them. **/ - observer_proxy* my_proxy; - - //! Counter preventing the observer from being destroyed while in use by the scheduler. - /** Valid only when observation is on. **/ - atomic my_busy_count; - -public: - //! Enable or disable observation - /** For local observers the method can be used only when the current thread - has the task scheduler initialized or is attached to an arena. - - Repeated calls with the same state are no-ops. **/ - void __TBB_EXPORTED_METHOD observe( bool state=true ); - - //! Returns true if observation is enabled, false otherwise. - bool is_observing() const {return my_proxy!=NULL;} - - //! Construct observer with observation disabled. - task_scheduler_observer_v3() : my_proxy(NULL) { my_busy_count.store(0); } - - //! Entry notification - /** Invoked from inside observe(true) call and whenever a worker enters the arena - this observer is associated with. If a thread is already in the arena when - the observer is activated, the entry notification is called before it - executes the first stolen task. - - Obsolete semantics. For global observers it is called by a thread before - the first steal since observation became enabled. **/ - virtual void on_scheduler_entry( bool /*is_worker*/ ) {} - - //! Exit notification - /** Invoked from inside observe(false) call and whenever a worker leaves the - arena this observer is associated with. - - Obsolete semantics. For global observers it is called by a thread before - the first steal since observation became enabled. **/ - virtual void on_scheduler_exit( bool /*is_worker*/ ) {} - - //! Destructor automatically switches observation off if it is enabled. - virtual ~task_scheduler_observer_v3() { if(my_proxy) observe(false);} -}; - -} // namespace internal - -#if __TBB_ARENA_OBSERVER -namespace interface6 { -class task_scheduler_observer : public internal::task_scheduler_observer_v3 { - friend class internal::task_scheduler_observer_v3; - friend class internal::observer_proxy; - friend class internal::observer_list; - - /** Negative numbers with the largest absolute value to minimize probability - of coincidence in case of a bug in busy count usage. **/ - // TODO: take more high bits for version number - static const intptr_t v6_trait = (intptr_t)((~(uintptr_t)0 >> 1) + 1); - - //! contains task_arena pointer or tag indicating local or global semantics of the observer - intptr_t my_context_tag; - enum { global_tag = 0, implicit_tag = 1 }; - -public: - //! Construct local or global observer in inactive state (observation disabled). - /** For a local observer entry/exit notifications are invoked whenever a worker - thread joins/leaves the arena of the observer's owner thread. If a thread is - already in the arena when the observer is activated, the entry notification is - called before it executes the first stolen task. **/ - /** TODO: Obsolete. - Global observer semantics is obsolete as it violates master thread isolation - guarantees and is not composable. Thus the current default behavior of the - constructor is obsolete too and will be changed in one of the future versions - of the library. **/ - task_scheduler_observer( bool local = false ) { - my_context_tag = local? implicit_tag : global_tag; - } - -#if __TBB_TASK_ARENA - //! Construct local observer for a given arena in inactive state (observation disabled). - /** entry/exit notifications are invoked whenever a thread joins/leaves arena. - If a thread is already in the arena when the observer is activated, the entry notification - is called before it executes the first stolen task. **/ - task_scheduler_observer( task_arena & a) { - my_context_tag = (intptr_t)&a; - } -#endif //__TBB_TASK_ARENA - - /** Destructor protects instance of the observer from concurrent notification. - It is recommended to disable observation before destructor of a derived class starts, - otherwise it can lead to concurrent notification callback on partly destroyed object **/ - virtual ~task_scheduler_observer() { if(my_proxy) observe(false); } - - //! Enable or disable observation - /** Warning: concurrent invocations of this method are not safe. - Repeated calls with the same state are no-ops. **/ - void observe( bool state=true ) { - if( state && !my_proxy ) { - __TBB_ASSERT( !my_busy_count, "Inconsistent state of task_scheduler_observer instance"); - my_busy_count.store(v6_trait); - } - internal::task_scheduler_observer_v3::observe(state); - } - - //! Return commands for may_sleep() - enum { keep_awake = false, allow_sleep = true }; - - //! The callback can be invoked by a worker thread before it goes to sleep. - /** If it returns false ('keep_awake'), the thread will keep spinning and looking for work. - It will not be called for master threads. **/ - virtual bool may_sleep() { return allow_sleep; } -}; - -} //namespace interface6 -using interface6::task_scheduler_observer; -#else /*__TBB_ARENA_OBSERVER*/ -typedef tbb::internal::task_scheduler_observer_v3 task_scheduler_observer; -#endif /*__TBB_ARENA_OBSERVER*/ - -} // namespace tbb - -#endif /* __TBB_SCHEDULER_OBSERVER */ - -#endif /* __TBB_task_scheduler_observer_H */ diff --git a/inst/include/tbb/tbb.h b/inst/include/tbb/tbb.h deleted file mode 100644 index 8a8a8dfa8..000000000 --- a/inst/include/tbb/tbb.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_tbb_H -#define __TBB_tbb_H - -/** - This header bulk-includes declarations or definitions of all the functionality - provided by TBB (save for malloc dependent headers). - - If you use only a few TBB constructs, consider including specific headers only. - Any header listed below can be included independently of others. -**/ - -#if TBB_PREVIEW_AGGREGATOR -#include "aggregator.h" -#endif -#include "aligned_space.h" -#include "atomic.h" -#include "blocked_range.h" -#include "blocked_range2d.h" -#include "blocked_range3d.h" -#include "cache_aligned_allocator.h" -#include "combinable.h" -#include "concurrent_hash_map.h" -#if TBB_PREVIEW_CONCURRENT_LRU_CACHE -#include "concurrent_lru_cache.h" -#endif -#include "concurrent_priority_queue.h" -#include "concurrent_queue.h" -#include "concurrent_unordered_map.h" -#include "concurrent_unordered_set.h" -#include "concurrent_vector.h" -#include "critical_section.h" -#include "enumerable_thread_specific.h" -#include "flow_graph.h" -#include "mutex.h" -#include "null_mutex.h" -#include "null_rw_mutex.h" -#include "parallel_do.h" -#include "parallel_for.h" -#include "parallel_for_each.h" -#include "parallel_invoke.h" -#include "parallel_reduce.h" -#include "parallel_scan.h" -#include "parallel_sort.h" -#include "partitioner.h" -#include "pipeline.h" -#include "queuing_mutex.h" -#include "queuing_rw_mutex.h" -#include "reader_writer_lock.h" -#include "recursive_mutex.h" -#include "spin_mutex.h" -#include "spin_rw_mutex.h" -#include "task.h" -#include "task_arena.h" -#include "task_group.h" -#include "task_scheduler_init.h" -#include "task_scheduler_observer.h" -#include "tbb_allocator.h" -#include "tbb_exception.h" -#include "tbb_thread.h" -#include "tick_count.h" - -#endif /* __TBB_tbb_H */ diff --git a/inst/include/tbb/tbb_allocator.h b/inst/include/tbb/tbb_allocator.h deleted file mode 100644 index d9480f2ad..000000000 --- a/inst/include/tbb/tbb_allocator.h +++ /dev/null @@ -1,218 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_tbb_allocator_H -#define __TBB_tbb_allocator_H - -#include "tbb_stddef.h" -#include -#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - #include // std::forward -#endif - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - - //! Deallocates memory using FreeHandler - /** The function uses scalable_free if scalable allocator is available and free if not*/ - void __TBB_EXPORTED_FUNC deallocate_via_handler_v3( void *p ); - - //! Allocates memory using MallocHandler - /** The function uses scalable_malloc if scalable allocator is available and malloc if not*/ - void* __TBB_EXPORTED_FUNC allocate_via_handler_v3( size_t n ); - - //! Returns true if standard malloc/free are used to work with memory. - bool __TBB_EXPORTED_FUNC is_malloc_used_v3(); -} -//! @endcond - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Workaround for erroneous "unreferenced parameter" warning in method destroy. - #pragma warning (push) - #pragma warning (disable: 4100) -#endif - -//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 -/** The class selects the best memory allocation mechanism available - from scalable_malloc and standard malloc. - The members are ordered the same way they are in section 20.4.1 - of the ISO C++ standard. - @ingroup memory_allocation */ -template -class tbb_allocator { -public: - typedef typename internal::allocator_type::value_type value_type; - typedef value_type* pointer; - typedef const value_type* const_pointer; - typedef value_type& reference; - typedef const value_type& const_reference; - typedef size_t size_type; - typedef ptrdiff_t difference_type; - template struct rebind { - typedef tbb_allocator other; - }; - - //! Specifies current allocator - enum malloc_type { - scalable, - standard - }; - - tbb_allocator() throw() {} - tbb_allocator( const tbb_allocator& ) throw() {} - template tbb_allocator(const tbb_allocator&) throw() {} - - pointer address(reference x) const {return &x;} - const_pointer address(const_reference x) const {return &x;} - - //! Allocate space for n objects. - pointer allocate( size_type n, const void* /*hint*/ = 0) { - return pointer(internal::allocate_via_handler_v3( n * sizeof(value_type) )); - } - - //! Free previously allocated block of memory. - void deallocate( pointer p, size_type ) { - internal::deallocate_via_handler_v3(p); - } - - //! Largest value for which method allocate might succeed. - size_type max_size() const throw() { - size_type max = static_cast(-1) / sizeof (value_type); - return (max > 0 ? max : 1); - } - - //! Copy-construct value at location pointed to by p. -#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - template - void construct(U *p, Args&&... args) - { ::new((void *)p) U(std::forward(args)...); } -#else // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC -#if __TBB_CPP11_RVALUE_REF_PRESENT - void construct( pointer p, value_type&& value ) {::new((void*)(p)) value_type(std::move(value));} -#endif - void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);} -#endif // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - - //! Destroy value at location pointed to by p. - void destroy( pointer p ) {p->~value_type();} - - //! Returns current allocator - static malloc_type allocator_type() { - return internal::is_malloc_used_v3() ? standard : scalable; - } -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4100 is back - -//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 -/** @ingroup memory_allocation */ -template<> -class tbb_allocator { -public: - typedef void* pointer; - typedef const void* const_pointer; - typedef void value_type; - template struct rebind { - typedef tbb_allocator other; - }; -}; - -template -inline bool operator==( const tbb_allocator&, const tbb_allocator& ) {return true;} - -template -inline bool operator!=( const tbb_allocator&, const tbb_allocator& ) {return false;} - -//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 -/** The class is an adapter over an actual allocator that fills the allocation - using memset function with template argument C as the value. - The members are ordered the same way they are in section 20.4.1 - of the ISO C++ standard. - @ingroup memory_allocation */ -template class Allocator = tbb_allocator> -class zero_allocator : public Allocator -{ -public: - typedef Allocator base_allocator_type; - typedef typename base_allocator_type::value_type value_type; - typedef typename base_allocator_type::pointer pointer; - typedef typename base_allocator_type::const_pointer const_pointer; - typedef typename base_allocator_type::reference reference; - typedef typename base_allocator_type::const_reference const_reference; - typedef typename base_allocator_type::size_type size_type; - typedef typename base_allocator_type::difference_type difference_type; - template struct rebind { - typedef zero_allocator other; - }; - - zero_allocator() throw() { } - zero_allocator(const zero_allocator &a) throw() : base_allocator_type( a ) { } - template - zero_allocator(const zero_allocator &a) throw() : base_allocator_type( Allocator( a ) ) { } - - pointer allocate(const size_type n, const void *hint = 0 ) { - pointer ptr = base_allocator_type::allocate( n, hint ); - std::memset( ptr, 0, n * sizeof(value_type) ); - return ptr; - } -}; - -//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 -/** @ingroup memory_allocation */ -template class Allocator> -class zero_allocator : public Allocator { -public: - typedef Allocator base_allocator_type; - typedef typename base_allocator_type::value_type value_type; - typedef typename base_allocator_type::pointer pointer; - typedef typename base_allocator_type::const_pointer const_pointer; - template struct rebind { - typedef zero_allocator other; - }; -}; - -template class B1, typename T2, template class B2> -inline bool operator==( const zero_allocator &a, const zero_allocator &b) { - return static_cast< B1 >(a) == static_cast< B2 >(b); -} -template class B1, typename T2, template class B2> -inline bool operator!=( const zero_allocator &a, const zero_allocator &b) { - return static_cast< B1 >(a) != static_cast< B2 >(b); -} - -} // namespace tbb - -#endif /* __TBB_tbb_allocator_H */ diff --git a/inst/include/tbb/tbb_config.h b/inst/include/tbb/tbb_config.h deleted file mode 100644 index ed6d83c00..000000000 --- a/inst/include/tbb/tbb_config.h +++ /dev/null @@ -1,639 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_tbb_config_H -#define __TBB_tbb_config_H - -/** This header is supposed to contain macro definitions and C style comments only. - The macros defined here are intended to control such aspects of TBB build as - - presence of compiler features - - compilation modes - - feature sets - - known compiler/platform issues -**/ - -/*Check which standard library we use on OS X.*/ -/*__TBB_SYMBOL is defined only while processing exported symbols list where C++ is not allowed.*/ -#if !defined(__TBB_SYMBOL) && __APPLE__ - #include -#endif - -// note that when ICC is in use __TBB_GCC_VERSION might not closely match GCC version on the machine -#define __TBB_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) - -#if __clang__ - /**according to clang documentation version can be vendor specific **/ - #define __TBB_CLANG_VERSION (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) -#endif - -/** Preprocessor symbols to determine HW architecture **/ - -#if _WIN32||_WIN64 -# if defined(_M_X64)||defined(__x86_64__) // the latter for MinGW support -# define __TBB_x86_64 1 -# elif defined(_M_IA64) -# define __TBB_ipf 1 -# elif defined(_M_IX86)||defined(__i386__) // the latter for MinGW support -# define __TBB_x86_32 1 -# else -# define __TBB_generic_arch 1 -# endif -#else /* Assume generic Unix */ -# if !__linux__ && !__APPLE__ -# define __TBB_generic_os 1 -# endif -# if __x86_64__ -# define __TBB_x86_64 1 -# elif __ia64__ -# define __TBB_ipf 1 -# elif __i386__||__i386 // __i386 is for Sun OS -# define __TBB_x86_32 1 -# else -# define __TBB_generic_arch 1 -# endif -#endif - -#if __MIC__ || __MIC2__ -#define __TBB_DEFINE_MIC 1 -#endif - -#define __TBB_TSX_AVAILABLE (__TBB_x86_32 || __TBB_x86_64) && !__TBB_DEFINE_MIC - -/** Presence of compiler features **/ - -#if __INTEL_COMPILER == 9999 && __INTEL_COMPILER_BUILD_DATE == 20110811 -/* Intel(R) Composer XE 2011 Update 6 incorrectly sets __INTEL_COMPILER. Fix it. */ - #undef __INTEL_COMPILER - #define __INTEL_COMPILER 1210 -#endif - -#if __TBB_GCC_VERSION >= 40400 && !defined(__INTEL_COMPILER) - /** warning suppression pragmas available in GCC since 4.4 **/ - #define __TBB_GCC_WARNING_SUPPRESSION_PRESENT 1 -#endif - -/* Select particular features of C++11 based on compiler version. - ICC 12.1 (Linux), GCC 4.3 and higher, clang 2.9 and higher - set __GXX_EXPERIMENTAL_CXX0X__ in c++11 mode. - - Compilers that mimics other compilers (ICC, clang) must be processed before - compilers they mimic (GCC, MSVC). - - TODO: The following conditions should be extended when new compilers/runtimes - support added. - */ - -#if __INTEL_COMPILER - /** C++11 mode detection macros for Intel C++ compiler (enabled by -std=c++0x option): - __INTEL_CXX11_MODE__ for version >=13.0 - __STDC_HOSTED__ for version >=12.0 on Windows, - __GXX_EXPERIMENTAL_CXX0X__ for version >=12.0 on Linux and OS X. **/ - // On Windows, C++11 features supported by Visual Studio 2010 and higher are enabled by default - #ifndef __INTEL_CXX11_MODE__ - #define __INTEL_CXX11_MODE__ ((_MSC_VER && __STDC_HOSTED__) || __GXX_EXPERIMENTAL_CXX0X__) - // TODO: check if more conditions can be simplified with the above macro - #endif - #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT (__INTEL_CXX11_MODE__ && __VARIADIC_TEMPLATES) - // Both r-value reference support in compiler and std::move/std::forward - // presence in C++ standard library is checked. - #define __TBB_CPP11_RVALUE_REF_PRESENT ((__GXX_EXPERIMENTAL_CXX0X__ && (__TBB_GCC_VERSION >= 40300 || _LIBCPP_VERSION) || _MSC_VER >= 1600) && __INTEL_COMPILER >= 1200) - #if _MSC_VER >= 1600 - #define __TBB_EXCEPTION_PTR_PRESENT ( __INTEL_COMPILER > 1300 \ - /*ICC 12.1 Upd 10 and 13 beta Upd 2 fixed exception_ptr linking issue*/ \ - || (__INTEL_COMPILER == 1300 && __INTEL_COMPILER_BUILD_DATE >= 20120530) \ - || (__INTEL_COMPILER == 1210 && __INTEL_COMPILER_BUILD_DATE >= 20120410) ) - /** libstdc++ that comes with GCC 4.6 use C++11 features not supported by ICC 12.1. - * Because of that ICC 12.1 does not support C++11 mode with with gcc 4.6 (or higher), - * and therefore does not define __GXX_EXPERIMENTAL_CXX0X__ macro **/ - #elif __TBB_GCC_VERSION >= 40404 && __TBB_GCC_VERSION < 40600 - #define __TBB_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1200) - #elif __TBB_GCC_VERSION >= 40600 - #define __TBB_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1300) - #else - #define __TBB_EXCEPTION_PTR_PRESENT 0 - #endif - #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1700 || (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40600)) - #define __TBB_STATIC_ASSERT_PRESENT (__INTEL_CXX11_MODE__ || _MSC_VER >= 1600) - #define __TBB_CPP11_TUPLE_PRESENT (_MSC_VER >= 1600 || (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40300)) - /**Intel C++ compiler 14.0 crashes on using __has_include. When it fixed, condition will need to be updated. **/ - #if (__clang__ && __INTEL_COMPILER > 1400) - #if (__has_feature(__cxx_generalized_initializers__) && __has_include()) - #define __TBB_INITIALIZER_LISTS_PRESENT 1 - #endif - #else - /** TODO: when MSVC2013 is supported by Intel C++ compiler, it will be enabled silently by compiler, so rule will need to be updated.**/ - #define __TBB_INITIALIZER_LISTS_PRESENT __INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1400 && (_MSC_VER >= 1800 || __TBB_GCC_VERSION >= 40400 || _LIBCPP_VERSION) - #endif - - #define __TBB_CONSTEXPR_PRESENT __INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1400 - #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT __INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1200 - /** ICC seems to disable support of noexcept event in c++11 when compiling in compatibility mode for gcc <4.6 **/ - #define __TBB_NOEXCEPT_PRESENT __INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1300 && (__TBB_GCC_VERSION >= 40600 || _LIBCPP_VERSION || _MSC_VER) - #define __TBB_CPP11_STD_BEGIN_END_PRESENT (_MSC_VER >= 1700 || __GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1310 && (__TBB_GCC_VERSION >= 40600 || _LIBCPP_VERSION)) - #define __TBB_CPP11_AUTO_PRESENT (_MSC_VER >= 1600 || __GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1210) - #define __TBB_CPP11_DECLTYPE_PRESENT (_MSC_VER >= 1600 || __GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1210) -#elif __clang__ -//TODO: these options need to be rechecked -/** on OS X* the only way to get C++11 is to use clang. For library features (e.g. exception_ptr) libc++ is also - * required. So there is no need to check GCC version for clang**/ - #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT (__has_feature(__cxx_variadic_templates__)) - #define __TBB_CPP11_RVALUE_REF_PRESENT (__has_feature(__cxx_rvalue_references__) && (__TBB_GCC_VERSION >= 40300 || _LIBCPP_VERSION)) -/** TODO: extend exception_ptr related conditions to cover libstdc++ **/ - #define __TBB_EXCEPTION_PTR_PRESENT (__cplusplus >= 201103L && _LIBCPP_VERSION) - #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (__cplusplus >= 201103L && _LIBCPP_VERSION) - #define __TBB_STATIC_ASSERT_PRESENT __has_feature(__cxx_static_assert__) - /**Clang (preprocessor) has problems with dealing with expression having __has_include in #ifs - * used inside C++ code. (At least version that comes with OS X 10.8 : Apple LLVM version 4.2 (clang-425.0.28) (based on LLVM 3.2svn)) **/ - #if (__GXX_EXPERIMENTAL_CXX0X__ && __has_include()) - #define __TBB_CPP11_TUPLE_PRESENT 1 - #endif - #if (__has_feature(__cxx_generalized_initializers__) && __has_include()) - #define __TBB_INITIALIZER_LISTS_PRESENT 1 - #endif - #define __TBB_CONSTEXPR_PRESENT __has_feature(__cxx_constexpr__) - #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (__has_feature(__cxx_defaulted_functions__) && __has_feature(__cxx_deleted_functions__)) - /**For some unknown reason __has_feature(__cxx_noexcept) does not yield true for all cases. Compiler bug ? **/ - #define __TBB_NOEXCEPT_PRESENT (__cplusplus >= 201103L) - #define __TBB_CPP11_STD_BEGIN_END_PRESENT (__has_feature(__cxx_range_for__) && _LIBCPP_VERSION) - #define __TBB_CPP11_AUTO_PRESENT __has_feature(__cxx_auto_type__) - #define __TBB_CPP11_DECLTYPE_PRESENT __has_feature(__cxx_decltype__) -#elif __GNUC__ - #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __GXX_EXPERIMENTAL_CXX0X__ - #define __TBB_CPP11_RVALUE_REF_PRESENT __GXX_EXPERIMENTAL_CXX0X__ - /** __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 here is a substitution for _GLIBCXX_ATOMIC_BUILTINS_4, which is a prerequisite - for exception_ptr but cannot be used in this file because it is defined in a header, not by the compiler. - If the compiler has no atomic intrinsics, the C++ library should not expect those as well. **/ - #define __TBB_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40404 && __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) - #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40600) - #define __TBB_STATIC_ASSERT_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40300) - #define __TBB_CPP11_TUPLE_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40300) - #define __TBB_INITIALIZER_LISTS_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400) - /** gcc seems have to support constexpr from 4.4 but tests in (test_atomic) seeming reasonable fail to compile prior 4.6**/ - #define __TBB_CONSTEXPR_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400) - #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400) - #define __TBB_NOEXCEPT_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40600) - #define __TBB_CPP11_STD_BEGIN_END_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40600) - #define __TBB_CPP11_AUTO_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400) - #define __TBB_CPP11_DECLTYPE_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400) -#elif _MSC_VER - #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT (_MSC_VER >= 1800) - #define __TBB_CPP11_RVALUE_REF_PRESENT (_MSC_VER >= 1600) - #define __TBB_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1600) - #define __TBB_STATIC_ASSERT_PRESENT (_MSC_VER >= 1600) - #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1700) - #define __TBB_CPP11_TUPLE_PRESENT (_MSC_VER >= 1600) - #define __TBB_INITIALIZER_LISTS_PRESENT (_MSC_VER >= 1800) - #define __TBB_CONSTEXPR_PRESENT 0 - #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (_MSC_VER >= 1800) - #define __TBB_NOEXCEPT_PRESENT 0 /*for _MSC_VER == 1800*/ - #define __TBB_CPP11_STD_BEGIN_END_PRESENT (_MSC_VER >= 1700) - #define __TBB_CPP11_AUTO_PRESENT (_MSC_VER >= 1600) - #define __TBB_CPP11_DECLTYPE_PRESENT (_MSC_VER >= 1600) -#else - #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 0 - #define __TBB_CPP11_RVALUE_REF_PRESENT 0 - #define __TBB_EXCEPTION_PTR_PRESENT 0 - #define __TBB_STATIC_ASSERT_PRESENT 0 - #define __TBB_MAKE_EXCEPTION_PTR_PRESENT 0 - #define __TBB_CPP11_TUPLE_PRESENT 0 - #define __TBB_INITIALIZER_LISTS_PRESENT 0 - #define __TBB_CONSTEXPR_PRESENT 0 - #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT 0 - #define __TBB_NOEXCEPT_PRESENT 0 - #define __TBB_CPP11_STD_BEGIN_END_PRESENT 0 - #define __TBB_CPP11_AUTO_PRESENT 0 - #define __TBB_CPP11_DECLTYPE_PRESENT 0 -#endif - -// C++11 standard library features - -#define __TBB_CPP11_TYPE_PROPERTIES_PRESENT (_LIBCPP_VERSION || _MSC_VER >= 1700) -#define __TBB_TR1_TYPE_PROPERTIES_IN_STD_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40300 || _MSC_VER >= 1600) -//TODO: Probably more accurate way is to analyze version of stdlibc++ via__GLIBCXX__ instead of __TBB_GCC_VERSION -#define __TBB_ALLOCATOR_TRAITS_PRESENT (__cplusplus >= 201103L && _LIBCPP_VERSION || _MSC_VER >= 1700 || \ - __GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40700 && !(__TBB_GCC_VERSION == 40700 && __TBB_DEFINE_MIC) \ - ) - -//TODO: not clear how exactly this macro affects exception_ptr - investigate -// On linux ICC fails to find existing std::exception_ptr in libstdc++ without this define -#if __INTEL_COMPILER && __GNUC__ && __TBB_EXCEPTION_PTR_PRESENT && !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) - #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 1 -#endif - -// Work around a bug in MinGW32 -#if __MINGW32__ && __TBB_EXCEPTION_PTR_PRESENT && !defined(_GLIBCXX_ATOMIC_BUILTINS_4) - #define _GLIBCXX_ATOMIC_BUILTINS_4 -#endif - -#if __GNUC__ || __SUNPRO_CC || __IBMCPP__ - /* ICC defines __GNUC__ and so is covered */ - #define __TBB_ATTRIBUTE_ALIGNED_PRESENT 1 -#elif _MSC_VER && (_MSC_VER >= 1300 || __INTEL_COMPILER) - #define __TBB_DECLSPEC_ALIGN_PRESENT 1 -#endif - -/* Actually ICC supports gcc __sync_* intrinsics starting 11.1, - * but 64 bit support for 32 bit target comes in later ones*/ -/* TODO: change the version back to 4.1.2 once macro __TBB_WORD_SIZE become optional */ -#if __TBB_GCC_VERSION >= 40306 || __INTEL_COMPILER >= 1200 - /** built-in atomics available in GCC since 4.1.2 **/ - #define __TBB_GCC_BUILTIN_ATOMICS_PRESENT 1 -#endif - -#if __INTEL_COMPILER >= 1200 - /** built-in C++11 style atomics available in ICC since 12.0 **/ - #define __TBB_ICC_BUILTIN_ATOMICS_PRESENT 1 -#endif - -#define __TBB_TSX_INTRINSICS_PRESENT ((__RTM__ || _MSC_VER>=1700 || __INTEL_COMPILER>=1300) && !__TBB_DEFINE_MIC && !__ANDROID__) - -/** User controlled TBB features & modes **/ - -#ifndef TBB_USE_DEBUG -#ifdef _DEBUG -#define TBB_USE_DEBUG _DEBUG -#else -#define TBB_USE_DEBUG 0 -#endif -#endif /* TBB_USE_DEBUG */ - -#ifndef TBB_USE_ASSERT -#define TBB_USE_ASSERT TBB_USE_DEBUG -#endif /* TBB_USE_ASSERT */ - -#ifndef TBB_USE_THREADING_TOOLS -#define TBB_USE_THREADING_TOOLS TBB_USE_DEBUG -#endif /* TBB_USE_THREADING_TOOLS */ - -#ifndef TBB_USE_PERFORMANCE_WARNINGS -#ifdef TBB_PERFORMANCE_WARNINGS -#define TBB_USE_PERFORMANCE_WARNINGS TBB_PERFORMANCE_WARNINGS -#else -#define TBB_USE_PERFORMANCE_WARNINGS TBB_USE_DEBUG -#endif /* TBB_PEFORMANCE_WARNINGS */ -#endif /* TBB_USE_PERFORMANCE_WARNINGS */ - -#if !defined(__EXCEPTIONS) && !defined(_CPPUNWIND) && !defined(__SUNPRO_CC) || defined(_XBOX) - #if TBB_USE_EXCEPTIONS - #error Compilation settings do not support exception handling. Please do not set TBB_USE_EXCEPTIONS macro or set it to 0. - #elif !defined(TBB_USE_EXCEPTIONS) - #define TBB_USE_EXCEPTIONS 0 - #endif -#elif !defined(TBB_USE_EXCEPTIONS) - #if __TBB_DEFINE_MIC - #define TBB_USE_EXCEPTIONS 0 - #else - #define TBB_USE_EXCEPTIONS 1 - #endif -#elif TBB_USE_EXCEPTIONS && __TBB_DEFINE_MIC - #error Please do not set TBB_USE_EXCEPTIONS macro or set it to 0. -#endif - -#ifndef TBB_IMPLEMENT_CPP0X - /** By default, use C++11 classes if available **/ - #if __GNUC__==4 && __GNUC_MINOR__>=4 && __GXX_EXPERIMENTAL_CXX0X__ - #define TBB_IMPLEMENT_CPP0X 0 - #elif __clang__ && __cplusplus >= 201103L - //TODO: consider introducing separate macros for each file? - //prevent injection of corresponding tbb names into std:: namespace if native headers are present - #if __has_include() || __has_include() - #define TBB_IMPLEMENT_CPP0X 0 - #else - #define TBB_IMPLEMENT_CPP0X 1 - #endif - #elif _MSC_VER>=1700 - #define TBB_IMPLEMENT_CPP0X 0 - #elif __STDCPP_THREADS__ - #define TBB_IMPLEMENT_CPP0X 0 - #else - #define TBB_IMPLEMENT_CPP0X 1 - #endif -#endif /* TBB_IMPLEMENT_CPP0X */ - -/* TBB_USE_CAPTURED_EXCEPTION should be explicitly set to either 0 or 1, as it is used as C++ const */ -#ifndef TBB_USE_CAPTURED_EXCEPTION - /** IA-64 architecture pre-built TBB binaries do not support exception_ptr. **/ - #if __TBB_EXCEPTION_PTR_PRESENT && !defined(__ia64__) - #define TBB_USE_CAPTURED_EXCEPTION 0 - #else - #define TBB_USE_CAPTURED_EXCEPTION 1 - #endif -#else /* defined TBB_USE_CAPTURED_EXCEPTION */ - #if !TBB_USE_CAPTURED_EXCEPTION && !__TBB_EXCEPTION_PTR_PRESENT - #error Current runtime does not support std::exception_ptr. Set TBB_USE_CAPTURED_EXCEPTION and make sure that your code is ready to catch tbb::captured_exception. - #endif -#endif /* defined TBB_USE_CAPTURED_EXCEPTION */ - -/** Check whether the request to use GCC atomics can be satisfied **/ -#if TBB_USE_GCC_BUILTINS && !__TBB_GCC_BUILTIN_ATOMICS_PRESENT - #error "GCC atomic built-ins are not supported." -#endif - -/** Internal TBB features & modes **/ - -/** __TBB_WEAK_SYMBOLS_PRESENT denotes that the system supports the weak symbol mechanism **/ -#ifndef __TBB_WEAK_SYMBOLS_PRESENT -#define __TBB_WEAK_SYMBOLS_PRESENT ( !_WIN32 && !__APPLE__ && !__sun && (__TBB_GCC_VERSION >= 40000 || __INTEL_COMPILER ) ) -#endif - -/** __TBB_DYNAMIC_LOAD_ENABLED describes the system possibility to load shared libraries at run time **/ -#ifndef __TBB_DYNAMIC_LOAD_ENABLED - #define __TBB_DYNAMIC_LOAD_ENABLED 1 -#endif - -/** __TBB_SOURCE_DIRECTLY_INCLUDED is a mode used in whitebox testing when - it's necessary to test internal functions not exported from TBB DLLs -**/ -#if (_WIN32||_WIN64) && (__TBB_SOURCE_DIRECTLY_INCLUDED || TBB_USE_PREVIEW_BINARY) - #define __TBB_NO_IMPLICIT_LINKAGE 1 - #define __TBBMALLOC_NO_IMPLICIT_LINKAGE 1 -#endif - -#ifndef __TBB_COUNT_TASK_NODES - #define __TBB_COUNT_TASK_NODES TBB_USE_ASSERT -#endif - -#ifndef __TBB_TASK_GROUP_CONTEXT - #define __TBB_TASK_GROUP_CONTEXT 1 -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#ifndef __TBB_SCHEDULER_OBSERVER - #define __TBB_SCHEDULER_OBSERVER 1 -#endif /* __TBB_SCHEDULER_OBSERVER */ - -#ifndef __TBB_FP_CONTEXT - #define __TBB_FP_CONTEXT __TBB_TASK_GROUP_CONTEXT -#endif /* __TBB_FP_CONTEXT */ - -#if __TBB_FP_CONTEXT && !__TBB_TASK_GROUP_CONTEXT - #error __TBB_FP_CONTEXT requires __TBB_TASK_GROUP_CONTEXT to be enabled -#endif - -#ifndef __TBB_TASK_ARENA - #define __TBB_TASK_ARENA 1 -#endif /* __TBB_TASK_ARENA */ -#if __TBB_TASK_ARENA - #define __TBB_RECYCLE_TO_ENQUEUE __TBB_BUILD // keep non-official - #if !__TBB_SCHEDULER_OBSERVER - #error __TBB_TASK_ARENA requires __TBB_SCHEDULER_OBSERVER to be enabled - #endif -#endif /* __TBB_TASK_ARENA */ - -#ifndef __TBB_ARENA_OBSERVER - #define __TBB_ARENA_OBSERVER ((__TBB_BUILD||TBB_PREVIEW_LOCAL_OBSERVER)&& __TBB_SCHEDULER_OBSERVER) -#endif /* __TBB_ARENA_OBSERVER */ - -#ifndef __TBB_SLEEP_PERMISSION - #define __TBB_SLEEP_PERMISSION ((__TBB_CPF_BUILD||TBB_PREVIEW_LOCAL_OBSERVER)&& __TBB_SCHEDULER_OBSERVER) -#endif /* __TBB_SLEEP_PERMISSION */ - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE -#define __TBB_NO_IMPLICIT_LINKAGE 1 -#endif /* TBB_PREVIEW_FLOW_GRAPH_TRACE */ - -#ifndef __TBB_ITT_STRUCTURE_API -#define __TBB_ITT_STRUCTURE_API ( !__TBB_DEFINE_MIC && (__TBB_CPF_BUILD || TBB_PREVIEW_FLOW_GRAPH_TRACE) ) -#endif - -#if TBB_USE_EXCEPTIONS && !__TBB_TASK_GROUP_CONTEXT - #error TBB_USE_EXCEPTIONS requires __TBB_TASK_GROUP_CONTEXT to be enabled -#endif - -#ifndef __TBB_TASK_PRIORITY - #define __TBB_TASK_PRIORITY (!(__TBB_CPF_BUILD||TBB_USE_PREVIEW_BINARY)&&__TBB_TASK_GROUP_CONTEXT) // TODO: it will be enabled for CPF in the next versions -#endif /* __TBB_TASK_PRIORITY */ - -#if __TBB_TASK_PRIORITY && !__TBB_TASK_GROUP_CONTEXT - #error __TBB_TASK_PRIORITY requires __TBB_TASK_GROUP_CONTEXT to be enabled -#endif - -#if TBB_PREVIEW_WAITING_FOR_WORKERS || __TBB_BUILD - #define __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE 1 -#endif - -#if !defined(__TBB_SURVIVE_THREAD_SWITCH) && \ - (_WIN32 || _WIN64 || __APPLE__ || (__linux__ && !__ANDROID__)) - #define __TBB_SURVIVE_THREAD_SWITCH 1 -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ - -#ifndef __TBB_DEFAULT_PARTITIONER -#if TBB_DEPRECATED -/** Default partitioner for parallel loop templates in TBB 1.0-2.1 */ -#define __TBB_DEFAULT_PARTITIONER tbb::simple_partitioner -#else -/** Default partitioner for parallel loop templates since TBB 2.2 */ -#define __TBB_DEFAULT_PARTITIONER tbb::auto_partitioner -#endif /* TBB_DEPRECATED */ -#endif /* !defined(__TBB_DEFAULT_PARTITIONER */ - -#ifndef __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES -#define __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES 1 -#endif - -#ifdef _VARIADIC_MAX -#define __TBB_VARIADIC_MAX _VARIADIC_MAX -#else -#if _MSC_VER >= 1700 -#define __TBB_VARIADIC_MAX 5 /* current VS11 setting, may change. */ -#else -#define __TBB_VARIADIC_MAX 10 -#endif -#endif - -/** __TBB_WIN8UI_SUPPORT enables support of New Windows*8 Store Apps and limit a possibility to load - shared libraries at run time only from application container **/ -#if defined(WINAPI_FAMILY) && WINAPI_FAMILY == WINAPI_FAMILY_APP - #define __TBB_WIN8UI_SUPPORT 1 -#else - #define __TBB_WIN8UI_SUPPORT 0 -#endif - -/** Macros of the form __TBB_XXX_BROKEN denote known issues that are caused by - the bugs in compilers, standard or OS specific libraries. They should be - removed as soon as the corresponding bugs are fixed or the buggy OS/compiler - versions go out of the support list. -**/ - -#if __ANDROID__ && __TBB_GCC_VERSION <= 40403 && !__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 - /** Necessary because on Android 8-byte CAS and F&A are not available for some processor architectures, - but no mandatory warning message appears from GCC 4.4.3. Instead, only a linkage error occurs when - these atomic operations are used (such as in unit test test_atomic.exe). **/ - #define __TBB_GCC_64BIT_ATOMIC_BUILTINS_BROKEN 1 -#elif __TBB_x86_32 && __TBB_GCC_VERSION == 40102 && ! __GNUC_RH_RELEASE__ - /** GCC 4.1.2 erroneously emit call to external function for 64 bit sync_ intrinsics. - However these functions are not defined anywhere. It seems that this problem was fixed later on - and RHEL got an updated version of gcc 4.1.2. **/ - #define __TBB_GCC_64BIT_ATOMIC_BUILTINS_BROKEN 1 -#endif - -#if __GNUC__ && __TBB_x86_64 && __INTEL_COMPILER == 1200 - #define __TBB_ICC_12_0_INL_ASM_FSTCW_BROKEN 1 -#endif - -#if _MSC_VER && __INTEL_COMPILER && (__INTEL_COMPILER<1110 || __INTEL_COMPILER==1110 && __INTEL_COMPILER_BUILD_DATE < 20091012) - /** Necessary to avoid ICL error (or warning in non-strict mode): - "exception specification for implicitly declared virtual destructor is - incompatible with that of overridden one". **/ - #define __TBB_DEFAULT_DTOR_THROW_SPEC_BROKEN 1 -#endif - -#if defined(_MSC_VER) && _MSC_VER < 1500 && !defined(__INTEL_COMPILER) - /** VS2005 and earlier do not allow declaring template class as a friend - of classes defined in other namespaces. **/ - #define __TBB_TEMPLATE_FRIENDS_BROKEN 1 -#endif - -//TODO: recheck for different clang versions -#if __GLIBC__==2 && __GLIBC_MINOR__==3 || (__APPLE__ && ( __INTEL_COMPILER==1200 && !TBB_USE_DEBUG)) - /** Macro controlling EH usages in TBB tests. - Some older versions of glibc crash when exception handling happens concurrently. **/ - #define __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN 1 -#else - #define __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN 0 -#endif - -#if (_WIN32||_WIN64) && __INTEL_COMPILER == 1110 - /** That's a bug in Intel compiler 11.1.044/IA-32/Windows, that leads to a worker thread crash on the thread's startup. **/ - #define __TBB_ICL_11_1_CODE_GEN_BROKEN 1 -#endif - -#if __clang__ || (__GNUC__==3 && __GNUC_MINOR__==3 && !defined(__INTEL_COMPILER)) - /** Bugs with access to nested classes declared in protected area */ - #define __TBB_PROTECTED_NESTED_CLASS_BROKEN 1 -#endif - -#if __MINGW32__ && __TBB_GCC_VERSION < 40200 - /** MinGW has a bug with stack alignment for routines invoked from MS RTLs. - Since GCC 4.2, the bug can be worked around via a special attribute. **/ - #define __TBB_SSE_STACK_ALIGNMENT_BROKEN 1 -#else - #define __TBB_SSE_STACK_ALIGNMENT_BROKEN 0 -#endif - -#if __GNUC__==4 && __GNUC_MINOR__==3 && __GNUC_PATCHLEVEL__==0 - /* GCC of this version may rashly ignore control dependencies */ - #define __TBB_GCC_OPTIMIZER_ORDERING_BROKEN 1 -#endif - -#if __FreeBSD__ - /** A bug in FreeBSD 8.0 results in kernel panic when there is contention - on a mutex created with this attribute. **/ - #define __TBB_PRIO_INHERIT_BROKEN 1 - - /** A bug in FreeBSD 8.0 results in test hanging when an exception occurs - during (concurrent?) object construction by means of placement new operator. **/ - #define __TBB_PLACEMENT_NEW_EXCEPTION_SAFETY_BROKEN 1 -#endif /* __FreeBSD__ */ - -#if (__linux__ || __APPLE__) && __i386__ && defined(__INTEL_COMPILER) - /** The Intel compiler for IA-32 (Linux|OS X) crashes or generates - incorrect code when __asm__ arguments have a cast to volatile. **/ - #define __TBB_ICC_ASM_VOLATILE_BROKEN 1 -#endif - -#if !__INTEL_COMPILER && (_MSC_VER || __GNUC__==3 && __GNUC_MINOR__<=2) - /** Bug in GCC 3.2 and MSVC compilers that sometimes return 0 for __alignof(T) - when T has not yet been instantiated. **/ - #define __TBB_ALIGNOF_NOT_INSTANTIATED_TYPES_BROKEN 1 -#endif - -#if __TBB_DEFINE_MIC - /** Main thread and user's thread have different default thread affinity masks. **/ - #define __TBB_MAIN_THREAD_AFFINITY_BROKEN 1 -#endif - -#if __GXX_EXPERIMENTAL_CXX0X__ && !defined(__EXCEPTIONS) && \ - ((!__INTEL_COMPILER && !__clang__ && (__TBB_GCC_VERSION>=40400 && __TBB_GCC_VERSION<40600)) || \ - (__INTEL_COMPILER<=1400 && (__TBB_GCC_VERSION>=40400 && __TBB_GCC_VERSION<=40801))) -/* There is an issue for specific GCC toolchain when C++11 is enabled - and exceptions are disabled: - exceprion_ptr.h/nested_exception.h use throw unconditionally. - GCC can ignore 'throw' since 4.6; but with ICC the issue still exists. - */ - #define __TBB_LIBSTDCPP_EXCEPTION_HEADERS_BROKEN 1 -#else - #define __TBB_LIBSTDCPP_EXCEPTION_HEADERS_BROKEN 0 -#endif - -#if __INTEL_COMPILER==1300 && __TBB_GCC_VERSION>=40700 && defined(__GXX_EXPERIMENTAL_CXX0X__) -/* Some C++11 features used inside libstdc++ are not supported by Intel compiler. - * Checking version of gcc instead of libstdc++ because - * - they are directly connected, - * - for now it is not possible to check version of any standard library in this file - */ - #define __TBB_ICC_13_0_CPP11_STDLIB_SUPPORT_BROKEN 1 -#else - #define __TBB_ICC_13_0_CPP11_STDLIB_SUPPORT_BROKEN 0 -#endif - -#if (__GNUC__==4 && __GNUC_MINOR__==4 ) && !defined(__INTEL_COMPILER) && !defined(__clang__) - /** excessive warnings related to strict aliasing rules in GCC 4.4 **/ - #define __TBB_GCC_STRICT_ALIASING_BROKEN 1 - /* topical remedy: #pragma GCC diagnostic ignored "-Wstrict-aliasing" */ - #if !__TBB_GCC_WARNING_SUPPRESSION_PRESENT - #error Warning suppression is not supported, while should. - #endif -#endif - -/*In a PIC mode some versions of GCC 4.1.2 generate incorrect inlined code for 8 byte __sync_val_compare_and_swap intrinsic */ -#if __TBB_GCC_VERSION == 40102 && __PIC__ && !defined(__INTEL_COMPILER) && !defined(__clang__) - #define __TBB_GCC_CAS8_BUILTIN_INLINING_BROKEN 1 -#endif - -#if __TBB_x86_32 && (__linux__ || __APPLE__ || _WIN32 || __sun || __ANDROID__) && (__INTEL_COMPILER || (__GNUC__==3 && __GNUC_MINOR__==3 ) || __SUNPRO_CC) - // Some compilers for IA-32 fail to provide 8-byte alignment of objects on the stack, - // even if the object specifies 8-byte alignment. On such platforms, the IA-32 implementation - // of 64 bit atomics (e.g. atomic) use different tactics depending upon - // whether the object is properly aligned or not. - #define __TBB_FORCE_64BIT_ALIGNMENT_BROKEN 1 -#else - #define __TBB_FORCE_64BIT_ALIGNMENT_BROKEN 0 -#endif - -#if __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT && __TBB_GCC_VERSION < 40700 && !defined(__INTEL_COMPILER) && !defined (__clang__) - #define __TBB_ZERO_INIT_WITH_DEFAULTED_CTOR_BROKEN 1 -#endif - -#if _MSC_VER && _MSC_VER <= 1800 && !__INTEL_COMPILER - // With MSVC, when an array is passed by const reference to a template function, - // constness from the function parameter may get propagated to the template parameter. - #define __TBB_CONST_REF_TO_ARRAY_TEMPLATE_PARAM_BROKEN 1 -#endif - -// A compiler bug: a disabled copy constructor prevents use of the moving constructor -#define __TBB_IF_NO_COPY_CTOR_MOVE_SEMANTICS_BROKEN (_MSC_VER && (__INTEL_COMPILER >= 1300 && __INTEL_COMPILER <= 1310) && !__INTEL_CXX11_MODE__) - -// MSVC 2013 and ICC 15 seems do not generate implicit move constructor for empty derived class while should -#define __TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN (__TBB_CPP11_RVALUE_REF_PRESENT && \ - ( !__INTEL_COMPILER && _MSC_VER && _MSC_VER <=1800 || __INTEL_COMPILER && __INTEL_COMPILER <= 1500 )) - -/** End of __TBB_XXX_BROKEN macro section **/ - -#if defined(_MSC_VER) && _MSC_VER>=1500 && !defined(__INTEL_COMPILER) - // A macro to suppress erroneous or benign "unreachable code" MSVC warning (4702) - #define __TBB_MSVC_UNREACHABLE_CODE_IGNORED 1 -#endif - -#define __TBB_ATOMIC_CTORS (__TBB_CONSTEXPR_PRESENT && __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT && (!__TBB_ZERO_INIT_WITH_DEFAULTED_CTOR_BROKEN)) - -#define __TBB_ALLOCATOR_CONSTRUCT_VARIADIC (__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT) - -#define __TBB_VARIADIC_PARALLEL_INVOKE (TBB_PREVIEW_VARIADIC_PARALLEL_INVOKE && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT) -#endif /* __TBB_tbb_config_H */ diff --git a/inst/include/tbb/tbb_exception.h b/inst/include/tbb/tbb_exception.h deleted file mode 100644 index cfef55ef3..000000000 --- a/inst/include/tbb/tbb_exception.h +++ /dev/null @@ -1,379 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_exception_H -#define __TBB_exception_H - -#include "tbb_stddef.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include -#include //required for bad_alloc definition, operators new -#include // required to construct std exception classes - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -namespace tbb { - -//! Exception for concurrent containers -class bad_last_alloc : public std::bad_alloc { -public: - /*override*/ const char* what() const throw(); -#if __TBB_DEFAULT_DTOR_THROW_SPEC_BROKEN - /*override*/ ~bad_last_alloc() throw() {} -#endif -}; - -//! Exception for PPL locks -class improper_lock : public std::exception { -public: - /*override*/ const char* what() const throw(); -}; - -//! Exception for user-initiated abort -class user_abort : public std::exception { -public: - /*override*/ const char* what() const throw(); -}; - -//! Exception for missing wait on structured_task_group -class missing_wait : public std::exception { -public: - /*override*/ const char* what() const throw(); -}; - -//! Exception for repeated scheduling of the same task_handle -class invalid_multiple_scheduling : public std::exception { -public: - /*override*/ const char* what() const throw(); -}; - -namespace internal { -//! Obsolete -void __TBB_EXPORTED_FUNC throw_bad_last_alloc_exception_v4(); - -enum exception_id { - eid_bad_alloc = 1, - eid_bad_last_alloc, - eid_nonpositive_step, - eid_out_of_range, - eid_segment_range_error, - eid_index_range_error, - eid_missing_wait, - eid_invalid_multiple_scheduling, - eid_improper_lock, - eid_possible_deadlock, - eid_operation_not_permitted, - eid_condvar_wait_failed, - eid_invalid_load_factor, - eid_reserved, // free slot for backward compatibility, can be reused. - eid_invalid_swap, - eid_reservation_length_error, - eid_invalid_key, - eid_user_abort, - eid_reserved1, -#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE - // This id is used only inside library and only for support of CPF functionality. - // So, if we drop the functionality, eid_reserved1 can be safely renamed and reused. - eid_blocking_sch_init = eid_reserved1, -#endif - eid_bad_tagged_msg_cast, - //! The last enumerator tracks the number of defined IDs. It must remain the last one. - /** When adding new IDs, place them immediately _before_ this comment (that is - _after_ all the existing IDs. NEVER insert new IDs between the existing ones. **/ - eid_max -}; - -//! Gathers all throw operators in one place. -/** Its purpose is to minimize code bloat that can be caused by throw operators - scattered in multiple places, especially in templates. **/ -void __TBB_EXPORTED_FUNC throw_exception_v4 ( exception_id ); - -//! Versionless convenience wrapper for throw_exception_v4() -inline void throw_exception ( exception_id eid ) { throw_exception_v4(eid); } - -} // namespace internal -} // namespace tbb - -#if __TBB_TASK_GROUP_CONTEXT -#include "tbb_allocator.h" -#include //for typeid - -namespace tbb { - -//! Interface to be implemented by all exceptions TBB recognizes and propagates across the threads. -/** If an unhandled exception of the type derived from tbb::tbb_exception is intercepted - by the TBB scheduler in one of the worker threads, it is delivered to and re-thrown in - the root thread. The root thread is the thread that has started the outermost algorithm - or root task sharing the same task_group_context with the guilty algorithm/task (the one - that threw the exception first). - - Note: when documentation mentions workers with respect to exception handling, - masters are implied as well, because they are completely equivalent in this context. - Consequently a root thread can be master or worker thread. - - NOTE: In case of nested algorithms or complex task hierarchies when the nested - levels share (explicitly or by means of implicit inheritance) the task group - context of the outermost level, the exception may be (re-)thrown multiple times - (ultimately - in each worker on each nesting level) before reaching the root - thread at the outermost level. IMPORTANT: if you intercept an exception derived - from this class on a nested level, you must re-throw it in the catch block by means - of the "throw;" operator. - - TBB provides two implementations of this interface: tbb::captured_exception and - template class tbb::movable_exception. See their declarations for more info. **/ -class tbb_exception : public std::exception -{ - /** No operator new is provided because the TBB usage model assumes dynamic - creation of the TBB exception objects only by means of applying move() - operation on an exception thrown out of TBB scheduler. **/ - void* operator new ( size_t ); - -public: -#if __clang__ - // At -O3 or even -O2 optimization level, Clang may fully throw away an empty destructor - // of tbb_exception from destructors of derived classes. As a result, it does not create - // vtable for tbb_exception, which is a required part of TBB binary interface. - // Making the destructor non-empty (with just a semicolon) prevents that optimization. - ~tbb_exception() throw() { /* keep the semicolon! */ ; } -#endif - - //! Creates and returns pointer to the deep copy of this exception object. - /** Move semantics is allowed. **/ - virtual tbb_exception* move () throw() = 0; - - //! Destroys objects created by the move() method. - /** Frees memory and calls destructor for this exception object. - Can and must be used only on objects created by the move method. **/ - virtual void destroy () throw() = 0; - - //! Throws this exception object. - /** Make sure that if you have several levels of derivation from this interface - you implement or override this method on the most derived level. The implementation - is as simple as "throw *this;". Failure to do this will result in exception - of a base class type being thrown. **/ - virtual void throw_self () = 0; - - //! Returns RTTI name of the originally intercepted exception - virtual const char* name() const throw() = 0; - - //! Returns the result of originally intercepted exception's what() method. - virtual const char* what() const throw() = 0; - - /** Operator delete is provided only to allow using existing smart pointers - with TBB exception objects obtained as the result of applying move() - operation on an exception thrown out of TBB scheduler. - - When overriding method move() make sure to override operator delete as well - if memory is allocated not by TBB's scalable allocator. **/ - void operator delete ( void* p ) { - internal::deallocate_via_handler_v3(p); - } -}; - -//! This class is used by TBB to propagate information about unhandled exceptions into the root thread. -/** Exception of this type is thrown by TBB in the root thread (thread that started a parallel - algorithm ) if an unhandled exception was intercepted during the algorithm execution in one - of the workers. - \sa tbb::tbb_exception **/ -class captured_exception : public tbb_exception -{ -public: - captured_exception ( const captured_exception& src ) - : tbb_exception(src), my_dynamic(false) - { - set(src.my_exception_name, src.my_exception_info); - } - - captured_exception ( const char* name_, const char* info ) - : my_dynamic(false) - { - set(name_, info); - } - - __TBB_EXPORTED_METHOD ~captured_exception () throw(); - - captured_exception& operator= ( const captured_exception& src ) { - if ( this != &src ) { - clear(); - set(src.my_exception_name, src.my_exception_info); - } - return *this; - } - - /*override*/ - captured_exception* __TBB_EXPORTED_METHOD move () throw(); - - /*override*/ - void __TBB_EXPORTED_METHOD destroy () throw(); - - /*override*/ - void throw_self () { __TBB_THROW(*this); } - - /*override*/ - const char* __TBB_EXPORTED_METHOD name() const throw(); - - /*override*/ - const char* __TBB_EXPORTED_METHOD what() const throw(); - - void __TBB_EXPORTED_METHOD set ( const char* name, const char* info ) throw(); - void __TBB_EXPORTED_METHOD clear () throw(); - -private: - //! Used only by method clone(). - captured_exception() {} - - //! Functionally equivalent to {captured_exception e(name,info); return e.clone();} - static captured_exception* allocate ( const char* name, const char* info ); - - bool my_dynamic; - const char* my_exception_name; - const char* my_exception_info; -}; - -//! Template that can be used to implement exception that transfers arbitrary ExceptionData to the root thread -/** Code using TBB can instantiate this template with an arbitrary ExceptionData type - and throw this exception object. Such exceptions are intercepted by the TBB scheduler - and delivered to the root thread (). - \sa tbb::tbb_exception **/ -template -class movable_exception : public tbb_exception -{ - typedef movable_exception self_type; - -public: - movable_exception ( const ExceptionData& data_ ) - : my_exception_data(data_) - , my_dynamic(false) - , my_exception_name( -#if TBB_USE_EXCEPTIONS - typeid(self_type).name() -#else /* !TBB_USE_EXCEPTIONS */ - "movable_exception" -#endif /* !TBB_USE_EXCEPTIONS */ - ) - {} - - movable_exception ( const movable_exception& src ) throw () - : tbb_exception(src) - , my_exception_data(src.my_exception_data) - , my_dynamic(false) - , my_exception_name(src.my_exception_name) - {} - - ~movable_exception () throw() {} - - const movable_exception& operator= ( const movable_exception& src ) { - if ( this != &src ) { - my_exception_data = src.my_exception_data; - my_exception_name = src.my_exception_name; - } - return *this; - } - - ExceptionData& data () throw() { return my_exception_data; } - - const ExceptionData& data () const throw() { return my_exception_data; } - - /*override*/ const char* name () const throw() { return my_exception_name; } - - /*override*/ const char* what () const throw() { return "tbb::movable_exception"; } - - /*override*/ - movable_exception* move () throw() { - void* e = internal::allocate_via_handler_v3(sizeof(movable_exception)); - if ( e ) { - ::new (e) movable_exception(*this); - ((movable_exception*)e)->my_dynamic = true; - } - return (movable_exception*)e; - } - /*override*/ - void destroy () throw() { - __TBB_ASSERT ( my_dynamic, "Method destroy can be called only on dynamically allocated movable_exceptions" ); - if ( my_dynamic ) { - this->~movable_exception(); - internal::deallocate_via_handler_v3(this); - } - } - /*override*/ - void throw_self () { __TBB_THROW( *this ); } - -protected: - //! User data - ExceptionData my_exception_data; - -private: - //! Flag specifying whether this object has been dynamically allocated (by the move method) - bool my_dynamic; - - //! RTTI name of this class - /** We rely on the fact that RTTI names are static string constants. **/ - const char* my_exception_name; -}; - -#if !TBB_USE_CAPTURED_EXCEPTION -namespace internal { - -//! Exception container that preserves the exact copy of the original exception -/** This class can be used only when the appropriate runtime support (mandated - by C++0x) is present **/ -class tbb_exception_ptr { - std::exception_ptr my_ptr; - -public: - static tbb_exception_ptr* allocate (); - static tbb_exception_ptr* allocate ( const tbb_exception& tag ); - //! This overload uses move semantics (i.e. it empties src) - static tbb_exception_ptr* allocate ( captured_exception& src ); - - //! Destroys this objects - /** Note that objects of this type can be created only by the allocate() method. **/ - void destroy () throw(); - - //! Throws the contained exception . - void throw_self () { std::rethrow_exception(my_ptr); } - -private: - tbb_exception_ptr ( const std::exception_ptr& src ) : my_ptr(src) {} - tbb_exception_ptr ( const captured_exception& src ) : - #if __TBB_MAKE_EXCEPTION_PTR_PRESENT - my_ptr(std::make_exception_ptr(src)) // the final function name in C++11 - #else - my_ptr(std::copy_exception(src)) // early C++0x drafts name - #endif - {} -}; // class tbb::internal::tbb_exception_ptr - -} // namespace internal -#endif /* !TBB_USE_CAPTURED_EXCEPTION */ - -} // namespace tbb - -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#endif /* __TBB_exception_H */ diff --git a/inst/include/tbb/tbb_machine.h b/inst/include/tbb/tbb_machine.h deleted file mode 100644 index 479806529..000000000 --- a/inst/include/tbb/tbb_machine.h +++ /dev/null @@ -1,967 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#define __TBB_machine_H - -/** This header provides basic platform abstraction layer by hooking up appropriate - architecture/OS/compiler specific headers from the /include/tbb/machine directory. - If a plug-in header does not implement all the required APIs, it must specify - the missing ones by setting one or more of the following macros: - - __TBB_USE_GENERIC_PART_WORD_CAS - __TBB_USE_GENERIC_PART_WORD_FETCH_ADD - __TBB_USE_GENERIC_PART_WORD_FETCH_STORE - __TBB_USE_GENERIC_FETCH_ADD - __TBB_USE_GENERIC_FETCH_STORE - __TBB_USE_GENERIC_DWORD_FETCH_ADD - __TBB_USE_GENERIC_DWORD_FETCH_STORE - __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE - __TBB_USE_GENERIC_FULL_FENCED_LOAD_STORE - __TBB_USE_GENERIC_RELAXED_LOAD_STORE - __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE - - In this case tbb_machine.h will add missing functionality based on a minimal set - of APIs that are required to be implemented by all plug-n headers as described - further. - Note that these generic implementations may be sub-optimal for a particular - architecture, and thus should be relied upon only after careful evaluation - or as the last resort. - - Additionally __TBB_64BIT_ATOMICS can be set to 0 on a 32-bit architecture to - indicate that the port is not going to support double word atomics. It may also - be set to 1 explicitly, though normally this is not necessary as tbb_machine.h - will set it automatically. - - __TBB_ENDIANNESS macro can be defined by the implementation as well. - It is used only if __TBB_USE_GENERIC_PART_WORD_CAS is set (or for testing), - and must specify the layout of aligned 16-bit and 32-bit data anywhere within a process - (while the details of unaligned 16-bit or 32-bit data or of 64-bit data are irrelevant). - The layout must be the same at all relevant memory locations within the current process; - in case of page-specific endianness, one endianness must be kept "out of sight". - Possible settings, reflecting hardware and possibly O.S. convention, are: - - __TBB_ENDIAN_BIG for big-endian data, - - __TBB_ENDIAN_LITTLE for little-endian data, - - __TBB_ENDIAN_DETECT for run-time detection iff exactly one of the above, - - __TBB_ENDIAN_UNSUPPORTED to prevent undefined behavior if none of the above. - - Prerequisites for each architecture port - ---------------------------------------- - The following functions and macros have no generic implementation. Therefore they must be - implemented in each machine architecture specific header either as a conventional - function or as a functional macro. - - __TBB_WORDSIZE - This is the size of machine word in bytes, i.e. for 32 bit systems it - should be defined to 4. - - __TBB_Yield() - Signals OS that the current thread is willing to relinquish the remainder - of its time quantum. - - __TBB_full_memory_fence() - Must prevent all memory operations from being reordered across it (both - by hardware and compiler). All such fences must be totally ordered (or - sequentially consistent). - - __TBB_machine_cmpswp4( volatile void *ptr, int32_t value, int32_t comparand ) - Must be provided if __TBB_USE_FENCED_ATOMICS is not set. - - __TBB_machine_cmpswp8( volatile void *ptr, int32_t value, int64_t comparand ) - Must be provided for 64-bit architectures if __TBB_USE_FENCED_ATOMICS is not set, - and for 32-bit architectures if __TBB_64BIT_ATOMICS is set - - __TBB_machine_(...), where - = {cmpswp, fetchadd, fetchstore} - = {1, 2, 4, 8} - = {full_fence, acquire, release, relaxed} - Must be provided if __TBB_USE_FENCED_ATOMICS is set. - - __TBB_control_consistency_helper() - Bridges the memory-semantics gap between architectures providing only - implicit C++0x "consume" semantics (like Power Architecture) and those - also implicitly obeying control dependencies (like IA-64 architecture). - It must be used only in conditional code where the condition is itself - data-dependent, and will then make subsequent code behave as if the - original data dependency were acquired. - It needs only a compiler fence where implied by the architecture - either specifically (like IA-64 architecture) or because generally stronger - "acquire" semantics are enforced (like x86). - It is always valid, though potentially suboptimal, to replace - control with acquire on the load and then remove the helper. - - __TBB_acquire_consistency_helper(), __TBB_release_consistency_helper() - Must be provided if __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE is set. - Enforce acquire and release semantics in generic implementations of fenced - store and load operations. Depending on the particular architecture/compiler - combination they may be a hardware fence, a compiler fence, both or nothing. - **/ - -#include "tbb_stddef.h" - -namespace tbb { -namespace internal { //< @cond INTERNAL - -//////////////////////////////////////////////////////////////////////////////// -// Overridable helpers declarations -// -// A machine/*.h file may choose to define these templates, otherwise it must -// request default implementation by setting appropriate __TBB_USE_GENERIC_XXX macro(s). -// -template -struct machine_load_store; - -template -struct machine_load_store_relaxed; - -template -struct machine_load_store_seq_cst; -// -// End of overridable helpers declarations -//////////////////////////////////////////////////////////////////////////////// - -template struct atomic_selector; - -template<> struct atomic_selector<1> { - typedef int8_t word; - inline static word fetch_store ( volatile void* location, word value ); -}; - -template<> struct atomic_selector<2> { - typedef int16_t word; - inline static word fetch_store ( volatile void* location, word value ); -}; - -template<> struct atomic_selector<4> { -#if _MSC_VER && !_WIN64 - // Work-around that avoids spurious /Wp64 warnings - typedef intptr_t word; -#else - typedef int32_t word; -#endif - inline static word fetch_store ( volatile void* location, word value ); -}; - -template<> struct atomic_selector<8> { - typedef int64_t word; - inline static word fetch_store ( volatile void* location, word value ); -}; - -}} //< namespaces internal @endcond, tbb - -#define __TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(M) \ - inline void __TBB_machine_generic_store8##M(volatile void *ptr, int64_t value) { \ - for(;;) { \ - int64_t result = *(volatile int64_t *)ptr; \ - if( __TBB_machine_cmpswp8##M(ptr,value,result)==result ) break; \ - } \ - } \ - -#define __TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(M) \ - inline int64_t __TBB_machine_generic_load8##M(const volatile void *ptr) { \ - /* Comparand and new value may be anything, they only must be equal, and */ \ - /* the value should have a low probability to be actually found in 'location'.*/ \ - const int64_t anyvalue = 2305843009213693951LL; \ - return __TBB_machine_cmpswp8##M(const_cast(ptr),anyvalue,anyvalue); \ - } \ - -// The set of allowed values for __TBB_ENDIANNESS (see above for details) -#define __TBB_ENDIAN_UNSUPPORTED -1 -#define __TBB_ENDIAN_LITTLE 0 -#define __TBB_ENDIAN_BIG 1 -#define __TBB_ENDIAN_DETECT 2 - -#if _WIN32||_WIN64 - -#ifdef _MANAGED -#pragma managed(push, off) -#endif - - #if __MINGW64__ || __MINGW32__ - extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void ); - #define __TBB_Yield() SwitchToThread() - #if (TBB_USE_GCC_BUILTINS && __TBB_GCC_BUILTIN_ATOMICS_PRESENT) - #include "machine/gcc_generic.h" - #elif __MINGW64__ - #include "machine/linux_intel64.h" - #elif __MINGW32__ - #include "machine/linux_ia32.h" - #endif - #elif (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT) - #include "machine/icc_generic.h" - #elif defined(_M_IX86) && !defined(__TBB_WIN32_USE_CL_BUILTINS) - #include "machine/windows_ia32.h" - #elif defined(_M_X64) - #include "machine/windows_intel64.h" - #elif defined(_XBOX) - #include "machine/xbox360_ppc.h" - #elif defined(_M_ARM) || defined(__TBB_WIN32_USE_CL_BUILTINS) - #include "machine/msvc_armv7.h" - #endif - -#ifdef _MANAGED -#pragma managed(pop) -#endif - -#elif __TBB_DEFINE_MIC - - #include "machine/mic_common.h" - #if (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT) - #include "machine/icc_generic.h" - #else - #include "machine/linux_intel64.h" - #endif - -#elif __linux__ || __FreeBSD__ || __NetBSD__ - - #if (TBB_USE_GCC_BUILTINS && __TBB_GCC_BUILTIN_ATOMICS_PRESENT) - #include "machine/gcc_generic.h" - #elif (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT) - #include "machine/icc_generic.h" - #elif __i386__ - #include "machine/linux_ia32.h" - #elif __x86_64__ - #include "machine/linux_intel64.h" - #elif __ia64__ - #include "machine/linux_ia64.h" - #elif __powerpc__ - #include "machine/mac_ppc.h" - #elif __arm__ - #include "machine/gcc_armv7.h" - #elif __TBB_GCC_BUILTIN_ATOMICS_PRESENT - #include "machine/gcc_generic.h" - #endif - #include "machine/linux_common.h" - -#elif __APPLE__ - //TODO: TBB_USE_GCC_BUILTINS is not used for Mac, Sun, Aix - #if (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT) - #include "machine/icc_generic.h" - #elif __i386__ - #include "machine/linux_ia32.h" - #elif __x86_64__ - #include "machine/linux_intel64.h" - #elif __POWERPC__ - #include "machine/mac_ppc.h" - #endif - #include "machine/macos_common.h" - -#elif _AIX - - #include "machine/ibm_aix51.h" - -#elif __sun || __SUNPRO_CC - - #define __asm__ asm - #define __volatile__ volatile - - #if __i386 || __i386__ - #include "machine/linux_ia32.h" - #elif __x86_64__ - #include "machine/linux_intel64.h" - #elif __sparc - #include "machine/sunos_sparc.h" - #endif - #include - - #define __TBB_Yield() sched_yield() - -#endif /* OS selection */ - -#ifndef __TBB_64BIT_ATOMICS - #define __TBB_64BIT_ATOMICS 1 -#endif - -//TODO: replace usage of these functions with usage of tbb::atomic, and then remove them -//TODO: map functions with W suffix to use cast to tbb::atomic and according op, i.e. as_atomic().op() -// Special atomic functions -#if __TBB_USE_FENCED_ATOMICS - #define __TBB_machine_cmpswp1 __TBB_machine_cmpswp1full_fence - #define __TBB_machine_cmpswp2 __TBB_machine_cmpswp2full_fence - #define __TBB_machine_cmpswp4 __TBB_machine_cmpswp4full_fence - #define __TBB_machine_cmpswp8 __TBB_machine_cmpswp8full_fence - - #if __TBB_WORDSIZE==8 - #define __TBB_machine_fetchadd8 __TBB_machine_fetchadd8full_fence - #define __TBB_machine_fetchstore8 __TBB_machine_fetchstore8full_fence - #define __TBB_FetchAndAddWrelease(P,V) __TBB_machine_fetchadd8release(P,V) - #define __TBB_FetchAndIncrementWacquire(P) __TBB_machine_fetchadd8acquire(P,1) - #define __TBB_FetchAndDecrementWrelease(P) __TBB_machine_fetchadd8release(P,(-1)) - #else - #define __TBB_machine_fetchadd4 __TBB_machine_fetchadd4full_fence - #define __TBB_machine_fetchstore4 __TBB_machine_fetchstore4full_fence - #define __TBB_FetchAndAddWrelease(P,V) __TBB_machine_fetchadd4release(P,V) - #define __TBB_FetchAndIncrementWacquire(P) __TBB_machine_fetchadd4acquire(P,1) - #define __TBB_FetchAndDecrementWrelease(P) __TBB_machine_fetchadd4release(P,(-1)) - #endif /* __TBB_WORDSIZE==4 */ -#else /* !__TBB_USE_FENCED_ATOMICS */ - #define __TBB_FetchAndAddWrelease(P,V) __TBB_FetchAndAddW(P,V) - #define __TBB_FetchAndIncrementWacquire(P) __TBB_FetchAndAddW(P,1) - #define __TBB_FetchAndDecrementWrelease(P) __TBB_FetchAndAddW(P,(-1)) -#endif /* !__TBB_USE_FENCED_ATOMICS */ - -#if __TBB_WORDSIZE==4 - #define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp4(P,V,C) - #define __TBB_FetchAndAddW(P,V) __TBB_machine_fetchadd4(P,V) - #define __TBB_FetchAndStoreW(P,V) __TBB_machine_fetchstore4(P,V) -#elif __TBB_WORDSIZE==8 - #if __TBB_USE_GENERIC_DWORD_LOAD_STORE || __TBB_USE_GENERIC_DWORD_FETCH_ADD || __TBB_USE_GENERIC_DWORD_FETCH_STORE - #error These macros should only be used on 32-bit platforms. - #endif - - #define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp8(P,V,C) - #define __TBB_FetchAndAddW(P,V) __TBB_machine_fetchadd8(P,V) - #define __TBB_FetchAndStoreW(P,V) __TBB_machine_fetchstore8(P,V) -#else /* __TBB_WORDSIZE != 8 */ - #error Unsupported machine word size. -#endif /* __TBB_WORDSIZE */ - -#ifndef __TBB_Pause - inline void __TBB_Pause(int32_t) { - __TBB_Yield(); - } -#endif - -namespace tbb { - -//! Sequentially consistent full memory fence. -inline void atomic_fence () { __TBB_full_memory_fence(); } - -namespace internal { //< @cond INTERNAL - -//! Class that implements exponential backoff. -/** See implementation of spin_wait_while_eq for an example. */ -class atomic_backoff : no_copy { - //! Time delay, in units of "pause" instructions. - /** Should be equal to approximately the number of "pause" instructions - that take the same time as an context switch. */ - static const int32_t LOOPS_BEFORE_YIELD = 16; - int32_t count; -public: - // In many cases, an object of this type is initialized eagerly on hot path, - // as in for(atomic_backoff b; ; b.pause()) { /*loop body*/ } - // For this reason, the construction cost must be very small! - atomic_backoff() : count(1) {} - // This constructor pauses immediately; do not use on hot paths! - atomic_backoff( bool ) : count(1) { pause(); } - - //! Pause for a while. - void pause() { - if( count<=LOOPS_BEFORE_YIELD ) { - __TBB_Pause(count); - // Pause twice as long the next time. - count*=2; - } else { - // Pause is so long that we might as well yield CPU to scheduler. - __TBB_Yield(); - } - } - - // pause for a few times and then return false immediately. - bool bounded_pause() { - if( count<=LOOPS_BEFORE_YIELD ) { - __TBB_Pause(count); - // Pause twice as long the next time. - count*=2; - return true; - } else { - return false; - } - } - - void reset() { - count = 1; - } -}; - -//! Spin WHILE the value of the variable is equal to a given value -/** T and U should be comparable types. */ -template -void spin_wait_while_eq( const volatile T& location, U value ) { - atomic_backoff backoff; - while( location==value ) backoff.pause(); -} - -//! Spin UNTIL the value of the variable is equal to a given value -/** T and U should be comparable types. */ -template -void spin_wait_until_eq( const volatile T& location, const U value ) { - atomic_backoff backoff; - while( location!=value ) backoff.pause(); -} - -template -void spin_wait_while(predicate_type condition){ - atomic_backoff backoff; - while( condition() ) backoff.pause(); -} - -//////////////////////////////////////////////////////////////////////////////// -// Generic compare-and-swap applied to only a part of a machine word. -// -#ifndef __TBB_ENDIANNESS -#define __TBB_ENDIANNESS __TBB_ENDIAN_DETECT -#endif - -#if __TBB_USE_GENERIC_PART_WORD_CAS && __TBB_ENDIANNESS==__TBB_ENDIAN_UNSUPPORTED -#error Generic implementation of part-word CAS may not be used with __TBB_ENDIAN_UNSUPPORTED -#endif - -#if __TBB_ENDIANNESS!=__TBB_ENDIAN_UNSUPPORTED -// -// This function is the only use of __TBB_ENDIANNESS. -// The following restrictions/limitations apply for this operation: -// - T must be an integer type of at most 4 bytes for the casts and calculations to work -// - T must also be less than 4 bytes to avoid compiler warnings when computing mask -// (and for the operation to be useful at all, so no workaround is applied) -// - the architecture must consistently use either little-endian or big-endian (same for all locations) -// -// TODO: static_assert for the type requirements stated above -template -inline T __TBB_MaskedCompareAndSwap (volatile T * const ptr, const T value, const T comparand ) { - struct endianness{ static bool is_big_endian(){ - #if __TBB_ENDIANNESS==__TBB_ENDIAN_DETECT - const uint32_t probe = 0x03020100; - return (((const char*)(&probe))[0]==0x03); - #elif __TBB_ENDIANNESS==__TBB_ENDIAN_BIG || __TBB_ENDIANNESS==__TBB_ENDIAN_LITTLE - return __TBB_ENDIANNESS==__TBB_ENDIAN_BIG; - #else - #error Unexpected value of __TBB_ENDIANNESS - #endif - }}; - - const uint32_t byte_offset = (uint32_t) ((uintptr_t)ptr & 0x3); - volatile uint32_t * const aligned_ptr = (uint32_t*)((uintptr_t)ptr - byte_offset ); - - // location of T within uint32_t for a C++ shift operation - const uint32_t bits_to_shift = 8*(endianness::is_big_endian() ? (4 - sizeof(T) - (byte_offset)) : byte_offset); - const uint32_t mask = (((uint32_t)1<<(sizeof(T)*8)) - 1 )<> bits_to_shift); - } - else continue; // CAS failed but the bits of interest were not changed - } -} -#endif // __TBB_ENDIANNESS!=__TBB_ENDIAN_UNSUPPORTED -//////////////////////////////////////////////////////////////////////////////// - -template -inline T __TBB_CompareAndSwapGeneric (volatile void *ptr, T value, T comparand ); - -template<> -inline int8_t __TBB_CompareAndSwapGeneric <1,int8_t> (volatile void *ptr, int8_t value, int8_t comparand ) { -#if __TBB_USE_GENERIC_PART_WORD_CAS - return __TBB_MaskedCompareAndSwap((volatile int8_t *)ptr,value,comparand); -#else - return __TBB_machine_cmpswp1(ptr,value,comparand); -#endif -} - -template<> -inline int16_t __TBB_CompareAndSwapGeneric <2,int16_t> (volatile void *ptr, int16_t value, int16_t comparand ) { -#if __TBB_USE_GENERIC_PART_WORD_CAS - return __TBB_MaskedCompareAndSwap((volatile int16_t *)ptr,value,comparand); -#else - return __TBB_machine_cmpswp2(ptr,value,comparand); -#endif -} - -template<> -inline int32_t __TBB_CompareAndSwapGeneric <4,int32_t> (volatile void *ptr, int32_t value, int32_t comparand ) { - // Cast shuts up /Wp64 warning - return (int32_t)__TBB_machine_cmpswp4(ptr,value,comparand); -} - -#if __TBB_64BIT_ATOMICS -template<> -inline int64_t __TBB_CompareAndSwapGeneric <8,int64_t> (volatile void *ptr, int64_t value, int64_t comparand ) { - return __TBB_machine_cmpswp8(ptr,value,comparand); -} -#endif - -template -inline T __TBB_FetchAndAddGeneric (volatile void *ptr, T addend) { - T result; - for( atomic_backoff b;;b.pause() ) { - result = *reinterpret_cast(ptr); - // __TBB_CompareAndSwapGeneric presumed to have full fence. - if( __TBB_CompareAndSwapGeneric ( ptr, result+addend, result )==result ) - break; - } - return result; -} - -template -inline T __TBB_FetchAndStoreGeneric (volatile void *ptr, T value) { - T result; - for( atomic_backoff b;;b.pause() ) { - result = *reinterpret_cast(ptr); - // __TBB_CompareAndSwapGeneric presumed to have full fence. - if( __TBB_CompareAndSwapGeneric ( ptr, value, result )==result ) - break; - } - return result; -} - -#if __TBB_USE_GENERIC_PART_WORD_CAS -#define __TBB_machine_cmpswp1 tbb::internal::__TBB_CompareAndSwapGeneric<1,int8_t> -#define __TBB_machine_cmpswp2 tbb::internal::__TBB_CompareAndSwapGeneric<2,int16_t> -#endif - -#if __TBB_USE_GENERIC_FETCH_ADD || __TBB_USE_GENERIC_PART_WORD_FETCH_ADD -#define __TBB_machine_fetchadd1 tbb::internal::__TBB_FetchAndAddGeneric<1,int8_t> -#define __TBB_machine_fetchadd2 tbb::internal::__TBB_FetchAndAddGeneric<2,int16_t> -#endif - -#if __TBB_USE_GENERIC_FETCH_ADD -#define __TBB_machine_fetchadd4 tbb::internal::__TBB_FetchAndAddGeneric<4,int32_t> -#endif - -#if __TBB_USE_GENERIC_FETCH_ADD || __TBB_USE_GENERIC_DWORD_FETCH_ADD -#define __TBB_machine_fetchadd8 tbb::internal::__TBB_FetchAndAddGeneric<8,int64_t> -#endif - -#if __TBB_USE_GENERIC_FETCH_STORE || __TBB_USE_GENERIC_PART_WORD_FETCH_STORE -#define __TBB_machine_fetchstore1 tbb::internal::__TBB_FetchAndStoreGeneric<1,int8_t> -#define __TBB_machine_fetchstore2 tbb::internal::__TBB_FetchAndStoreGeneric<2,int16_t> -#endif - -#if __TBB_USE_GENERIC_FETCH_STORE -#define __TBB_machine_fetchstore4 tbb::internal::__TBB_FetchAndStoreGeneric<4,int32_t> -#endif - -#if __TBB_USE_GENERIC_FETCH_STORE || __TBB_USE_GENERIC_DWORD_FETCH_STORE -#define __TBB_machine_fetchstore8 tbb::internal::__TBB_FetchAndStoreGeneric<8,int64_t> -#endif - -#if __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE -#define __TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(S) \ - atomic_selector::word atomic_selector::fetch_store ( volatile void* location, word value ) { \ - return __TBB_machine_fetchstore##S( location, value ); \ - } - -__TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(1) -__TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(2) -__TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(4) -__TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(8) - -#undef __TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE -#endif /* __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE */ - -#if __TBB_USE_GENERIC_DWORD_LOAD_STORE -/*TODO: find a more elegant way to handle function names difference*/ -#if ! __TBB_USE_FENCED_ATOMICS - /* This name forwarding is needed for generic implementation of - * load8/store8 defined below (via macro) to pick the right CAS function*/ - #define __TBB_machine_cmpswp8full_fence __TBB_machine_cmpswp8 -#endif -__TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(full_fence) -__TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(full_fence) - -#if ! __TBB_USE_FENCED_ATOMICS - #undef __TBB_machine_cmpswp8full_fence -#endif - -#define __TBB_machine_store8 tbb::internal::__TBB_machine_generic_store8full_fence -#define __TBB_machine_load8 tbb::internal::__TBB_machine_generic_load8full_fence -#endif /* __TBB_USE_GENERIC_DWORD_LOAD_STORE */ - -#if __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE -/** Fenced operations use volatile qualifier to prevent compiler from optimizing - them out, and on architectures with weak memory ordering to induce compiler - to generate code with appropriate acquire/release semantics. - On architectures like IA32, Intel64 (and likely Sparc TSO) volatile has - no effect on code gen, and consistency helpers serve as a compiler fence (the - latter being true for IA64/gcc as well to fix a bug in some gcc versions). - This code assumes that the generated instructions will operate atomically, - which typically requires a type that can be moved in a single instruction, - cooperation from the compiler for effective use of such an instruction, - and appropriate alignment of the data. **/ -template -struct machine_load_store { - static T load_with_acquire ( const volatile T& location ) { - T to_return = location; - __TBB_acquire_consistency_helper(); - return to_return; - } - static void store_with_release ( volatile T &location, T value ) { - __TBB_release_consistency_helper(); - location = value; - } -}; - -//in general, plain load and store of 32bit compiler is not atomic for 64bit types -#if __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS -template -struct machine_load_store { - static T load_with_acquire ( const volatile T& location ) { - return (T)__TBB_machine_load8( (const volatile void*)&location ); - } - static void store_with_release ( volatile T& location, T value ) { - __TBB_machine_store8( (volatile void*)&location, (int64_t)value ); - } -}; -#endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */ -#endif /* __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE */ - -#if __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE -template -struct machine_load_store_seq_cst { - static T load ( const volatile T& location ) { - __TBB_full_memory_fence(); - return machine_load_store::load_with_acquire( location ); - } -#if __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE - static void store ( volatile T &location, T value ) { - atomic_selector::fetch_store( (volatile void*)&location, (typename atomic_selector::word)value ); - } -#else /* !__TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE */ - static void store ( volatile T &location, T value ) { - machine_load_store::store_with_release( location, value ); - __TBB_full_memory_fence(); - } -#endif /* !__TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE */ -}; - -#if __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS -/** The implementation does not use functions __TBB_machine_load8/store8 as they - are not required to be sequentially consistent. **/ -template -struct machine_load_store_seq_cst { - static T load ( const volatile T& location ) { - // Comparand and new value may be anything, they only must be equal, and - // the value should have a low probability to be actually found in 'location'. - const int64_t anyvalue = 2305843009213693951LL; - return __TBB_machine_cmpswp8( (volatile void*)const_cast(&location), anyvalue, anyvalue ); - } - static void store ( volatile T &location, T value ) { - int64_t result = (volatile int64_t&)location; - while ( __TBB_machine_cmpswp8((volatile void*)&location, (int64_t)value, result) != result ) - result = (volatile int64_t&)location; - } -}; -#endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */ -#endif /*__TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE */ - -#if __TBB_USE_GENERIC_RELAXED_LOAD_STORE -// Relaxed operations add volatile qualifier to prevent compiler from optimizing them out. -/** Volatile should not incur any additional cost on IA32, Intel64, and Sparc TSO - architectures. However on architectures with weak memory ordering compiler may - generate code with acquire/release semantics for operations on volatile data. **/ -template -struct machine_load_store_relaxed { - static inline T load ( const volatile T& location ) { - return location; - } - static inline void store ( volatile T& location, T value ) { - location = value; - } -}; - -#if __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS -template -struct machine_load_store_relaxed { - static inline T load ( const volatile T& location ) { - return (T)__TBB_machine_load8( (const volatile void*)&location ); - } - static inline void store ( volatile T& location, T value ) { - __TBB_machine_store8( (volatile void*)&location, (int64_t)value ); - } -}; -#endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */ -#endif /* __TBB_USE_GENERIC_RELAXED_LOAD_STORE */ - -#undef __TBB_WORDSIZE //this macro is forbidden to use outside of atomic machinery - -template -inline T __TBB_load_with_acquire(const volatile T &location) { - return machine_load_store::load_with_acquire( location ); -} -template -inline void __TBB_store_with_release(volatile T& location, V value) { - machine_load_store::store_with_release( location, T(value) ); -} -//! Overload that exists solely to avoid /Wp64 warnings. -inline void __TBB_store_with_release(volatile size_t& location, size_t value) { - machine_load_store::store_with_release( location, value ); -} - -template -inline T __TBB_load_full_fence(const volatile T &location) { - return machine_load_store_seq_cst::load( location ); -} -template -inline void __TBB_store_full_fence(volatile T& location, V value) { - machine_load_store_seq_cst::store( location, T(value) ); -} -//! Overload that exists solely to avoid /Wp64 warnings. -inline void __TBB_store_full_fence(volatile size_t& location, size_t value) { - machine_load_store_seq_cst::store( location, value ); -} - -template -inline T __TBB_load_relaxed (const volatile T& location) { - return machine_load_store_relaxed::load( const_cast(location) ); -} -template -inline void __TBB_store_relaxed ( volatile T& location, V value ) { - machine_load_store_relaxed::store( const_cast(location), T(value) ); -} -//! Overload that exists solely to avoid /Wp64 warnings. -inline void __TBB_store_relaxed ( volatile size_t& location, size_t value ) { - machine_load_store_relaxed::store( const_cast(location), value ); -} - -// Macro __TBB_TypeWithAlignmentAtLeastAsStrict(T) should be a type with alignment at least as -// strict as type T. The type should have a trivial default constructor and destructor, so that -// arrays of that type can be declared without initializers. -// It is correct (but perhaps a waste of space) if __TBB_TypeWithAlignmentAtLeastAsStrict(T) expands -// to a type bigger than T. -// The default definition here works on machines where integers are naturally aligned and the -// strictest alignment is 64. -#ifndef __TBB_TypeWithAlignmentAtLeastAsStrict - -#if __TBB_ATTRIBUTE_ALIGNED_PRESENT - -#define __TBB_DefineTypeWithAlignment(PowerOf2) \ -struct __TBB_machine_type_with_alignment_##PowerOf2 { \ - uint32_t member[PowerOf2/sizeof(uint32_t)]; \ -} __attribute__((aligned(PowerOf2))); -#define __TBB_alignof(T) __alignof__(T) - -#elif __TBB_DECLSPEC_ALIGN_PRESENT - -#define __TBB_DefineTypeWithAlignment(PowerOf2) \ -__declspec(align(PowerOf2)) \ -struct __TBB_machine_type_with_alignment_##PowerOf2 { \ - uint32_t member[PowerOf2/sizeof(uint32_t)]; \ -}; -#define __TBB_alignof(T) __alignof(T) - -#else /* A compiler with unknown syntax for data alignment */ -#error Must define __TBB_TypeWithAlignmentAtLeastAsStrict(T) -#endif - -/* Now declare types aligned to useful powers of two */ -// TODO: Is __TBB_DefineTypeWithAlignment(8) needed on 32 bit platforms? -__TBB_DefineTypeWithAlignment(16) -__TBB_DefineTypeWithAlignment(32) -__TBB_DefineTypeWithAlignment(64) - -typedef __TBB_machine_type_with_alignment_64 __TBB_machine_type_with_strictest_alignment; - -// Primary template is a declaration of incomplete type so that it fails with unknown alignments -template struct type_with_alignment; - -// Specializations for allowed alignments -template<> struct type_with_alignment<1> { char member; }; -template<> struct type_with_alignment<2> { uint16_t member; }; -template<> struct type_with_alignment<4> { uint32_t member; }; -template<> struct type_with_alignment<8> { uint64_t member; }; -template<> struct type_with_alignment<16> {__TBB_machine_type_with_alignment_16 member; }; -template<> struct type_with_alignment<32> {__TBB_machine_type_with_alignment_32 member; }; -template<> struct type_with_alignment<64> {__TBB_machine_type_with_alignment_64 member; }; - -#if __TBB_ALIGNOF_NOT_INSTANTIATED_TYPES_BROKEN -//! Work around for bug in GNU 3.2 and MSVC compilers. -/** Bug is that compiler sometimes returns 0 for __alignof(T) when T has not yet been instantiated. - The work-around forces instantiation by forcing computation of sizeof(T) before __alignof(T). */ -template -struct work_around_alignment_bug { - static const size_t alignment = __TBB_alignof(T); -}; -#define __TBB_TypeWithAlignmentAtLeastAsStrict(T) tbb::internal::type_with_alignment::alignment> -#else -#define __TBB_TypeWithAlignmentAtLeastAsStrict(T) tbb::internal::type_with_alignment<__TBB_alignof(T)> -#endif /* __TBB_ALIGNOF_NOT_INSTANTIATED_TYPES_BROKEN */ - -#endif /* __TBB_TypeWithAlignmentAtLeastAsStrict */ - -// Template class here is to avoid instantiation of the static data for modules that don't use it -template -struct reverse { - static const T byte_table[256]; -}; -// An efficient implementation of the reverse function utilizes a 2^8 lookup table holding the bit-reversed -// values of [0..2^8 - 1]. Those values can also be computed on the fly at a slightly higher cost. -template -const T reverse::byte_table[256] = { - 0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0, - 0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8, - 0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4, - 0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC, - 0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2, - 0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA, - 0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6, - 0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE, - 0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1, - 0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9, - 0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5, - 0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD, - 0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3, - 0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB, - 0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7, - 0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF -}; - -} // namespace internal @endcond -} // namespace tbb - -// Preserving access to legacy APIs -using tbb::internal::__TBB_load_with_acquire; -using tbb::internal::__TBB_store_with_release; - -// Mapping historically used names to the ones expected by atomic_load_store_traits -#define __TBB_load_acquire __TBB_load_with_acquire -#define __TBB_store_release __TBB_store_with_release - -#ifndef __TBB_Log2 -inline intptr_t __TBB_Log2( uintptr_t x ) { - if( x==0 ) return -1; - intptr_t result = 0; - -#if !defined(_M_ARM) - uintptr_t tmp; - if( sizeof(x)>4 && (tmp = ((uint64_t)x)>>32) ) { x=tmp; result += 32; } -#endif - if( uintptr_t tmp = x>>16 ) { x=tmp; result += 16; } - if( uintptr_t tmp = x>>8 ) { x=tmp; result += 8; } - if( uintptr_t tmp = x>>4 ) { x=tmp; result += 4; } - if( uintptr_t tmp = x>>2 ) { x=tmp; result += 2; } - - return (x&2)? result+1: result; -} -#endif - -#ifndef __TBB_AtomicOR -inline void __TBB_AtomicOR( volatile void *operand, uintptr_t addend ) { - for( tbb::internal::atomic_backoff b;;b.pause() ) { - uintptr_t tmp = *(volatile uintptr_t *)operand; - uintptr_t result = __TBB_CompareAndSwapW(operand, tmp|addend, tmp); - if( result==tmp ) break; - } -} -#endif - -#ifndef __TBB_AtomicAND -inline void __TBB_AtomicAND( volatile void *operand, uintptr_t addend ) { - for( tbb::internal::atomic_backoff b;;b.pause() ) { - uintptr_t tmp = *(volatile uintptr_t *)operand; - uintptr_t result = __TBB_CompareAndSwapW(operand, tmp&addend, tmp); - if( result==tmp ) break; - } -} -#endif - -#if __TBB_PREFETCHING -#ifndef __TBB_cl_prefetch -#error This platform does not define cache management primitives required for __TBB_PREFETCHING -#endif - -#ifndef __TBB_cl_evict -#define __TBB_cl_evict(p) -#endif -#endif - -#ifndef __TBB_Flag -typedef unsigned char __TBB_Flag; -#endif -typedef __TBB_atomic __TBB_Flag __TBB_atomic_flag; - -#ifndef __TBB_TryLockByte -inline bool __TBB_TryLockByte( __TBB_atomic_flag &flag ) { - return __TBB_machine_cmpswp1(&flag,1,0)==0; -} -#endif - -#ifndef __TBB_LockByte -inline __TBB_Flag __TBB_LockByte( __TBB_atomic_flag& flag ) { - tbb::internal::atomic_backoff backoff; - while( !__TBB_TryLockByte(flag) ) backoff.pause(); - return 0; -} -#endif - -#ifndef __TBB_UnlockByte -#define __TBB_UnlockByte(addr) __TBB_store_with_release((addr),0) -#endif - -// lock primitives with TSX -#if ( __TBB_x86_32 || __TBB_x86_64 ) /* only on ia32/intel64 */ -inline void __TBB_TryLockByteElidedCancel() { __TBB_machine_try_lock_elided_cancel(); } - -inline bool __TBB_TryLockByteElided( __TBB_atomic_flag& flag ) { - bool res = __TBB_machine_try_lock_elided( &flag )!=0; - // to avoid the "lemming" effect, we need to abort the transaction - // if __TBB_machine_try_lock_elided returns false (i.e., someone else - // has acquired the mutex non-speculatively). - if( !res ) __TBB_TryLockByteElidedCancel(); - return res; -} - -inline void __TBB_LockByteElided( __TBB_atomic_flag& flag ) -{ - for(;;) { - tbb::internal::spin_wait_while_eq( flag, 1 ); - if( __TBB_machine_try_lock_elided( &flag ) ) - return; - // Another thread acquired the lock "for real". - // To avoid the "lemming" effect, we abort the transaction. - __TBB_TryLockByteElidedCancel(); - } -} - -inline void __TBB_UnlockByteElided( __TBB_atomic_flag& flag ) { - __TBB_machine_unlock_elided( &flag ); -} -#endif - -#ifndef __TBB_ReverseByte -inline unsigned char __TBB_ReverseByte(unsigned char src) { - return tbb::internal::reverse::byte_table[src]; -} -#endif - -template -T __TBB_ReverseBits(T src) { - T dst; - unsigned char *original = (unsigned char *) &src; - unsigned char *reversed = (unsigned char *) &dst; - - for( int i = sizeof(T)-1; i >= 0; i-- ) - reversed[i] = __TBB_ReverseByte( original[sizeof(T)-i-1] ); - - return dst; -} - -#endif /* __TBB_machine_H */ diff --git a/inst/include/tbb/tbb_profiling.h b/inst/include/tbb/tbb_profiling.h deleted file mode 100644 index 5a0830f75..000000000 --- a/inst/include/tbb/tbb_profiling.h +++ /dev/null @@ -1,271 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_profiling_H -#define __TBB_profiling_H - -namespace tbb { - namespace internal { - - // - // This is not under __TBB_ITT_STRUCTURE_API because these values are used directly in flow_graph.h. - // - - // include list of index names - #define TBB_STRING_RESOURCE(index_name,str) index_name, - enum string_index { - #include "internal/_tbb_strings.h" - NUM_STRINGS - }; - #undef TBB_STRING_RESOURCE - - enum itt_relation - { - __itt_relation_is_unknown = 0, - __itt_relation_is_dependent_on, /**< "A is dependent on B" means that A cannot start until B completes */ - __itt_relation_is_sibling_of, /**< "A is sibling of B" means that A and B were created as a group */ - __itt_relation_is_parent_of, /**< "A is parent of B" means that A created B */ - __itt_relation_is_continuation_of, /**< "A is continuation of B" means that A assumes the dependencies of B */ - __itt_relation_is_child_of, /**< "A is child of B" means that A was created by B (inverse of is_parent_of) */ - __itt_relation_is_continued_by, /**< "A is continued by B" means that B assumes the dependencies of A (inverse of is_continuation_of) */ - __itt_relation_is_predecessor_to /**< "A is predecessor to B" means that B cannot start until A completes (inverse of is_dependent_on) */ - }; - - } -} - -// Check if the tools support is enabled -#if (_WIN32||_WIN64||__linux__) && !__MINGW32__ && TBB_USE_THREADING_TOOLS - -#if _WIN32||_WIN64 -#include /* mbstowcs_s */ -#endif -#include "tbb_stddef.h" - -namespace tbb { - namespace internal { - -#if _WIN32||_WIN64 - void __TBB_EXPORTED_FUNC itt_set_sync_name_v3( void *obj, const wchar_t* name ); - inline size_t multibyte_to_widechar( wchar_t* wcs, const char* mbs, size_t bufsize) { -#if _MSC_VER>=1400 - size_t len; - mbstowcs_s( &len, wcs, bufsize, mbs, _TRUNCATE ); - return len; // mbstowcs_s counts null terminator -#else - size_t len = mbstowcs( wcs, mbs, bufsize ); - if(wcs && len!=size_t(-1) ) - wcs[len - inline void itt_store_word_with_release(tbb::atomic& dst, U src) { -#if TBB_USE_THREADING_TOOLS - // This assertion should be replaced with static_assert - __TBB_ASSERT(sizeof(T) == sizeof(void *), "Type must be word-sized."); - itt_store_pointer_with_release_v3(&dst, (void *)uintptr_t(src)); -#else - dst = src; -#endif // TBB_USE_THREADING_TOOLS - } - - template - inline T itt_load_word_with_acquire(const tbb::atomic& src) { -#if TBB_USE_THREADING_TOOLS - // This assertion should be replaced with static_assert - __TBB_ASSERT(sizeof(T) == sizeof(void *), "Type must be word-sized."); -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings - #pragma warning (push) - #pragma warning (disable: 4311) -#endif - T result = (T)itt_load_pointer_with_acquire_v3(&src); -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif - return result; -#else - return src; -#endif // TBB_USE_THREADING_TOOLS - } - - template - inline void itt_store_word_with_release(T& dst, T src) { -#if TBB_USE_THREADING_TOOLS - // This assertion should be replaced with static_assert - __TBB_ASSERT(sizeof(T) == sizeof(void *), "Type must be word-sized."); - itt_store_pointer_with_release_v3(&dst, (void *)src); -#else - __TBB_store_with_release(dst, src); -#endif // TBB_USE_THREADING_TOOLS - } - - template - inline T itt_load_word_with_acquire(const T& src) { -#if TBB_USE_THREADING_TOOLS - // This assertion should be replaced with static_assert - __TBB_ASSERT(sizeof(T) == sizeof(void *), "Type must be word-sized"); - return (T)itt_load_pointer_with_acquire_v3(&src); -#else - return __TBB_load_with_acquire(src); -#endif // TBB_USE_THREADING_TOOLS - } - - template - inline void itt_hide_store_word(T& dst, T src) { -#if TBB_USE_THREADING_TOOLS - //TODO: This assertion should be replaced with static_assert - __TBB_ASSERT(sizeof(T) == sizeof(void *), "Type must be word-sized"); - itt_store_pointer_with_release_v3(&dst, (void *)src); -#else - dst = src; -#endif - } - - //TODO: rename to itt_hide_load_word_relaxed - template - inline T itt_hide_load_word(const T& src) { -#if TBB_USE_THREADING_TOOLS - //TODO: This assertion should be replaced with static_assert - __TBB_ASSERT(sizeof(T) == sizeof(void *), "Type must be word-sized."); - return (T)itt_load_pointer_v3(&src); -#else - return src; -#endif - } - -#if TBB_USE_THREADING_TOOLS - inline void call_itt_notify(notify_type t, void *ptr) { - call_itt_notify_v5((int)t, ptr); - } - -#else - inline void call_itt_notify(notify_type /*t*/, void * /*ptr*/) {} - -#endif // TBB_USE_THREADING_TOOLS - -#if __TBB_ITT_STRUCTURE_API - inline void itt_make_task_group( itt_domain_enum domain, void *group, unsigned long long group_extra, - void *parent, unsigned long long parent_extra, string_index name_index ) { - itt_make_task_group_v7( domain, group, group_extra, parent, parent_extra, name_index ); - } - - inline void itt_metadata_str_add( itt_domain_enum domain, void *addr, unsigned long long addr_extra, - string_index key, const char *value ) { - itt_metadata_str_add_v7( domain, addr, addr_extra, key, value ); - } - - inline void itt_relation_add( itt_domain_enum domain, void *addr0, unsigned long long addr0_extra, - itt_relation relation, void *addr1, unsigned long long addr1_extra ) { - itt_relation_add_v7( domain, addr0, addr0_extra, relation, addr1, addr1_extra ); - } - - inline void itt_task_begin( itt_domain_enum domain, void *task, unsigned long long task_extra, - void *parent, unsigned long long parent_extra, string_index name_index ) { - itt_task_begin_v7( domain, task, task_extra, parent, parent_extra, name_index ); - } - - inline void itt_task_end( itt_domain_enum domain ) { - itt_task_end_v7( domain ); - } -#endif // __TBB_ITT_STRUCTURE_API - - } // namespace internal -} // namespace tbb - -#endif /* __TBB_profiling_H */ diff --git a/inst/include/tbb/tbb_stddef.h b/inst/include/tbb/tbb_stddef.h deleted file mode 100644 index bab11ac5a..000000000 --- a/inst/include/tbb/tbb_stddef.h +++ /dev/null @@ -1,505 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_tbb_stddef_H -#define __TBB_tbb_stddef_H - -// Marketing-driven product version -#define TBB_VERSION_MAJOR 4 -#define TBB_VERSION_MINOR 3 - -// Engineering-focused interface version -#define TBB_INTERFACE_VERSION 8000 -#define TBB_INTERFACE_VERSION_MAJOR TBB_INTERFACE_VERSION/1000 - -// The oldest major interface version still supported -// To be used in SONAME, manifests, etc. -#define TBB_COMPATIBLE_INTERFACE_VERSION 2 - -#define __TBB_STRING_AUX(x) #x -#define __TBB_STRING(x) __TBB_STRING_AUX(x) - -// We do not need defines below for resource processing on windows -#if !defined RC_INVOKED - -// Define groups for Doxygen documentation -/** - * @defgroup algorithms Algorithms - * @defgroup containers Containers - * @defgroup memory_allocation Memory Allocation - * @defgroup synchronization Synchronization - * @defgroup timing Timing - * @defgroup task_scheduling Task Scheduling - */ - -// Simple text that is displayed on the main page of Doxygen documentation. -/** - * \mainpage Main Page - * - * Click the tabs above for information about the - * - Modules (groups of functionality) implemented by the library - * - Classes provided by the library - * - Files constituting the library. - * . - * Please note that significant part of TBB functionality is implemented in the form of - * template functions, descriptions of which are not accessible on the Classes - * tab. Use Modules or Namespace/Namespace Members - * tabs to find them. - * - * Additional pieces of information can be found here - * - \subpage concepts - * . - */ - -/** \page concepts TBB concepts - - A concept is a set of requirements to a type, which are necessary and sufficient - for the type to model a particular behavior or a set of behaviors. Some concepts - are specific to a particular algorithm (e.g. algorithm body), while other ones - are common to several algorithms (e.g. range concept). - - All TBB algorithms make use of different classes implementing various concepts. - Implementation classes are supplied by the user as type arguments of template - parameters and/or as objects passed as function call arguments. The library - provides predefined implementations of some concepts (e.g. several kinds of - \ref range_req "ranges"), while other ones must always be implemented by the user. - - TBB defines a set of minimal requirements each concept must conform to. Here is - the list of different concepts hyperlinked to the corresponding requirements specifications: - - \subpage range_req - - \subpage parallel_do_body_req - - \subpage parallel_for_body_req - - \subpage parallel_reduce_body_req - - \subpage parallel_scan_body_req - - \subpage parallel_sort_iter_req -**/ - -// tbb_config.h should be included the first since it contains macro definitions used in other headers -#include "tbb_config.h" - -#if _MSC_VER >=1400 - #define __TBB_EXPORTED_FUNC __cdecl - #define __TBB_EXPORTED_METHOD __thiscall -#else - #define __TBB_EXPORTED_FUNC - #define __TBB_EXPORTED_METHOD -#endif - -#if __INTEL_COMPILER || _MSC_VER -#define __TBB_NOINLINE(decl) __declspec(noinline) decl -#elif __GNUC__ -#define __TBB_NOINLINE(decl) decl __attribute__ ((noinline)) -#else -#define __TBB_NOINLINE(decl) decl -#endif - -#if __TBB_NOEXCEPT_PRESENT -#define __TBB_NOEXCEPT(expression) noexcept(expression) -#else -#define __TBB_NOEXCEPT(expression) -#endif - -#include /* Need size_t and ptrdiff_t */ - -#if _MSC_VER - #define __TBB_tbb_windef_H - #include "internal/_tbb_windef.h" - #undef __TBB_tbb_windef_H -#endif -#if !defined(_MSC_VER) || _MSC_VER>=1600 - #include -#endif - -//! Type for an assertion handler -typedef void(*assertion_handler_type)( const char* filename, int line, const char* expression, const char * comment ); - -#if TBB_USE_ASSERT - - #define __TBB_ASSERT_NS(predicate,message,ns) ((predicate)?((void)0) : ns::assertion_failure(__FILE__,__LINE__,#predicate,message)) - //! Assert that x is true. - /** If x is false, print assertion failure message. - If the comment argument is not NULL, it is printed as part of the failure message. - The comment argument has no other effect. */ -#if __TBBMALLOC_BUILD -namespace rml { namespace internal { - #define __TBB_ASSERT(predicate,message) __TBB_ASSERT_NS(predicate,message,rml::internal) -#else -namespace tbb { - #define __TBB_ASSERT(predicate,message) __TBB_ASSERT_NS(predicate,message,tbb) -#endif - - #define __TBB_ASSERT_EX __TBB_ASSERT - - //! Set assertion handler and return previous value of it. - assertion_handler_type __TBB_EXPORTED_FUNC set_assertion_handler( assertion_handler_type new_handler ); - - //! Process an assertion failure. - /** Normally called from __TBB_ASSERT macro. - If assertion handler is null, print message for assertion failure and abort. - Otherwise call the assertion handler. */ - void __TBB_EXPORTED_FUNC assertion_failure( const char* filename, int line, const char* expression, const char* comment ); - -#if __TBBMALLOC_BUILD -}} // namespace rml::internal -#else -} // namespace tbb -#endif -#else /* !TBB_USE_ASSERT */ - - //! No-op version of __TBB_ASSERT. - #define __TBB_ASSERT(predicate,comment) ((void)0) - //! "Extended" version is useful to suppress warnings if a variable is only used with an assert - #define __TBB_ASSERT_EX(predicate,comment) ((void)(1 && (predicate))) - -#endif /* !TBB_USE_ASSERT */ - -//! The namespace tbb contains all components of the library. -namespace tbb { - -#if _MSC_VER && _MSC_VER<1600 - namespace internal { - typedef __int8 int8_t; - typedef __int16 int16_t; - typedef __int32 int32_t; - typedef __int64 int64_t; - typedef unsigned __int8 uint8_t; - typedef unsigned __int16 uint16_t; - typedef unsigned __int32 uint32_t; - typedef unsigned __int64 uint64_t; - } // namespace internal -#else /* Posix */ - namespace internal { - using ::int8_t; - using ::int16_t; - using ::int32_t; - using ::int64_t; - using ::uint8_t; - using ::uint16_t; - using ::uint32_t; - using ::uint64_t; - } // namespace internal -#endif /* Posix */ - - using std::size_t; - using std::ptrdiff_t; - -//! The function returns the interface version of the TBB shared library being used. -/** - * The version it returns is determined at runtime, not at compile/link time. - * So it can be different than the value of TBB_INTERFACE_VERSION obtained at compile time. - */ -extern "C" int __TBB_EXPORTED_FUNC TBB_runtime_interface_version(); - -//! Dummy type that distinguishes splitting constructor from copy constructor. -/** - * See description of parallel_for and parallel_reduce for example usages. - * @ingroup algorithms - */ -class split { -}; - -//! Type enables transmission of splitting proportion from partitioners to range objects -/** - * In order to make use of such facility Range objects must implement - * splitting constructor with this type passed and initialize static - * constant boolean field 'is_divisible_in_proportion' with the value - * of 'true' - */ -class proportional_split { -public: - proportional_split(size_t _left = 1, size_t _right = 1) : my_left(_left), my_right(_right) { } - proportional_split(split) : my_left(1), my_right(1) { } - - size_t left() const { return my_left; } - size_t right() const { return my_right; } - - void set_proportion(size_t _left, size_t _right) { - my_left = _left; - my_right = _right; - } - - // used when range does not support proportional split - operator split() const { return split(); } -private: - size_t my_left, my_right; -}; - -/** - * @cond INTERNAL - * @brief Identifiers declared inside namespace internal should never be used directly by client code. - */ -namespace internal { - -//! Compile-time constant that is upper bound on cache line/sector size. -/** It should be used only in situations where having a compile-time upper - bound is more useful than a run-time exact answer. - @ingroup memory_allocation */ -const size_t NFS_MaxLineSize = 128; - -/** Label for data that may be accessed from different threads, and that may eventually become wrapped - in a formal atomic type. - - Note that no problems have yet been observed relating to the definition currently being empty, - even if at least "volatile" would seem to be in order to avoid data sometimes temporarily hiding - in a register (although "volatile" as a "poor man's atomic" lacks several other features of a proper - atomic, some of which are now provided instead through specialized functions). - - Note that usage is intentionally compatible with a definition as qualifier "volatile", - both as a way to have the compiler help enforce use of the label and to quickly rule out - one potential issue. - - Note however that, with some architecture/compiler combinations, e.g. on IA-64 architecture, "volatile" - also has non-portable memory semantics that are needlessly expensive for "relaxed" operations. - - Note that this must only be applied to data that will not change bit patterns when cast to/from - an integral type of the same length; tbb::atomic must be used instead for, e.g., floating-point types. - - TODO: apply wherever relevant **/ -#define __TBB_atomic // intentionally empty, see above - -template -struct padded_base : T { - char pad[S - R]; -}; -template struct padded_base : T {}; - -//! Pads type T to fill out to a multiple of cache line size. -template -struct padded : padded_base {}; - -//! Extended variant of the standard offsetof macro -/** The standard offsetof macro is not sufficient for TBB as it can be used for - POD-types only. The constant 0x1000 (not NULL) is necessary to appease GCC. **/ -#define __TBB_offsetof(class_name, member_name) \ - ((ptrdiff_t)&(reinterpret_cast(0x1000)->member_name) - 0x1000) - -//! Returns address of the object containing a member with the given name and address -#define __TBB_get_object_ref(class_name, member_name, member_addr) \ - (*reinterpret_cast((char*)member_addr - __TBB_offsetof(class_name, member_name))) - -//! Throws std::runtime_error with what() returning error_code description prefixed with aux_info -void __TBB_EXPORTED_FUNC handle_perror( int error_code, const char* aux_info ); - -#if TBB_USE_EXCEPTIONS - #define __TBB_TRY try - #define __TBB_CATCH(e) catch(e) - #define __TBB_THROW(e) throw e - #define __TBB_RETHROW() throw -#else /* !TBB_USE_EXCEPTIONS */ - inline bool __TBB_false() { return false; } - #define __TBB_TRY - #define __TBB_CATCH(e) if ( tbb::internal::__TBB_false() ) - #define __TBB_THROW(e) ((void)0) - #define __TBB_RETHROW() ((void)0) -#endif /* !TBB_USE_EXCEPTIONS */ - -//! Report a runtime warning. -void __TBB_EXPORTED_FUNC runtime_warning( const char* format, ... ); - -#if TBB_USE_ASSERT -static void* const poisoned_ptr = reinterpret_cast(-1); - -//! Set p to invalid pointer value. -// Also works for regular (non-__TBB_atomic) pointers. -template -inline void poison_pointer( T* __TBB_atomic & p ) { p = reinterpret_cast(poisoned_ptr); } - -/** Expected to be used in assertions only, thus no empty form is defined. **/ -template -inline bool is_poisoned( T* p ) { return p == reinterpret_cast(poisoned_ptr); } -#else -template -inline void poison_pointer( T* __TBB_atomic & ) {/*do nothing*/} -#endif /* !TBB_USE_ASSERT */ - -//! Cast between unrelated pointer types. -/** This method should be used sparingly as a last resort for dealing with - situations that inherently break strict ISO C++ aliasing rules. */ -// T is a pointer type because it will be explicitly provided by the programmer as a template argument; -// U is a referent type to enable the compiler to check that "ptr" is a pointer, deducing U in the process. -template -inline T punned_cast( U* ptr ) { - uintptr_t x = reinterpret_cast(ptr); - return reinterpret_cast(x); -} - -//! Base class for types that should not be assigned. -class no_assign { - // Deny assignment - void operator=( const no_assign& ); -public: -#if __GNUC__ - //! Explicitly define default construction, because otherwise gcc issues gratuitous warning. - no_assign() {} -#endif /* __GNUC__ */ -}; - -//! Base class for types that should not be copied or assigned. -class no_copy: no_assign { - //! Deny copy construction - no_copy( const no_copy& ); -public: - //! Allow default construction - no_copy() {} -}; - -#if TBB_DEPRECATED_MUTEX_COPYING -class mutex_copy_deprecated_and_disabled {}; -#else -// By default various implementations of mutexes are not copy constructible -// and not copy assignable. -class mutex_copy_deprecated_and_disabled : no_copy {}; -#endif - -//! A function to check if passed in pointer is aligned on a specific border -template -inline bool is_aligned(T* pointer, uintptr_t alignment) { - return 0==((uintptr_t)pointer & (alignment-1)); -} - -//! A function to check if passed integer is a power of 2 -template -inline bool is_power_of_two(integer_type arg) { - return arg && (0 == (arg & (arg - 1))); -} - -//! A function to compute arg modulo divisor where divisor is a power of 2. -template -inline argument_integer_type modulo_power_of_two(argument_integer_type arg, divisor_integer_type divisor) { - // Divisor is assumed to be a power of two (which is valid for current uses). - __TBB_ASSERT( is_power_of_two(divisor), "Divisor should be a power of two" ); - return (arg & (divisor - 1)); -} - - -//! A function to determine if "arg is a multiplication of a number and a power of 2". -// i.e. for strictly positive i and j, with j a power of 2, -// determines whether i==j< -inline bool is_power_of_two_factor(argument_integer_type arg, divisor_integer_type divisor) { - // Divisor is assumed to be a power of two (which is valid for current uses). - __TBB_ASSERT( is_power_of_two(divisor), "Divisor should be a power of two" ); - return 0 == (arg & (arg - divisor)); -} - -//! Utility template function to prevent "unused" warnings by various compilers. -template -void suppress_unused_warning( const T& ) {} - -// Struct to be used as a version tag for inline functions. -/** Version tag can be necessary to prevent loader on Linux from using the wrong - symbol in debug builds (when inline functions are compiled as out-of-line). **/ -struct version_tag_v3 {}; - -typedef version_tag_v3 version_tag; - -} // internal -} // tbb - -// Following is a set of classes and functions typically used in compile-time "metaprogramming". -// TODO: move all that to a separate header - -#if __TBB_ALLOCATOR_TRAITS_PRESENT -#include //for allocator_traits -#endif - -#if __TBB_CPP11_RVALUE_REF_PRESENT || _LIBCPP_VERSION -#include // for std::move -#endif - -namespace tbb { -namespace internal { - -//! Class for determining type of std::allocator::value_type. -template -struct allocator_type { - typedef T value_type; -}; - -#if _MSC_VER -//! Microsoft std::allocator has non-standard extension that strips const from a type. -template -struct allocator_type { - typedef T value_type; -}; -#endif - -// Ad-hoc implementation of true_type & false_type -// Intended strictly for internal use! For public APIs (traits etc), use C++11 analogues. -template -struct bool_constant { - static /*constexpr*/ const bool value = v; -}; -typedef bool_constant true_type; -typedef bool_constant false_type; - -#if __TBB_ALLOCATOR_TRAITS_PRESENT -using std::allocator_traits; -#else -template -struct allocator_traits{ - typedef tbb::internal::false_type propagate_on_container_move_assignment; -}; -#endif - -//! A template to select either 32-bit or 64-bit constant as compile time, depending on machine word size. -template -struct select_size_t_constant { - //Explicit cast is needed to avoid compiler warnings about possible truncation. - //The value of the right size, which is selected by ?:, is anyway not truncated or promoted. - static const size_t value = (size_t)((sizeof(size_t)==sizeof(u)) ? u : ull); -}; - -#if __TBB_CPP11_RVALUE_REF_PRESENT -using std::move; -#elif defined(_LIBCPP_NAMESPACE) -// libc++ defines "pre-C++11 move" similarly to our; use it to avoid name conflicts in some cases. -using std::_LIBCPP_NAMESPACE::move; -#else -template -T& move( T& x ) { return x; } -#endif - -template -struct STATIC_ASSERTION_FAILED; - -template <> -struct STATIC_ASSERTION_FAILED { enum {value=1};}; - -template<> -struct STATIC_ASSERTION_FAILED; //intentionally left undefined to cause compile time error - -//! @endcond -}} // namespace tbb::internal - -#if __TBB_STATIC_ASSERT_PRESENT -#define __TBB_STATIC_ASSERT(condition,msg) static_assert(condition,msg) -#else -//please note condition is intentionally inverted to get a bit more understandable error msg -#define __TBB_STATIC_ASSERT_IMPL1(condition,msg,line) \ - enum {static_assert_on_line_##line = tbb::internal::STATIC_ASSERTION_FAILED::value} - -#define __TBB_STATIC_ASSERT_IMPL(condition,msg,line) __TBB_STATIC_ASSERT_IMPL1(condition,msg,line) -//! Verify at compile time that passed in condition is hold -#define __TBB_STATIC_ASSERT(condition,msg) __TBB_STATIC_ASSERT_IMPL(condition,msg,__LINE__) -#endif - -#endif /* RC_INVOKED */ -#endif /* __TBB_tbb_stddef_H */ diff --git a/inst/include/tbb/tbb_thread.h b/inst/include/tbb/tbb_thread.h deleted file mode 100644 index 00ec24709..000000000 --- a/inst/include/tbb/tbb_thread.h +++ /dev/null @@ -1,330 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_tbb_thread_H -#define __TBB_tbb_thread_H - -#include "tbb_stddef.h" -#if _WIN32||_WIN64 -#include "machine/windows_api.h" -#define __TBB_NATIVE_THREAD_ROUTINE unsigned WINAPI -#define __TBB_NATIVE_THREAD_ROUTINE_PTR(r) unsigned (WINAPI* r)( void* ) -#if __TBB_WIN8UI_SUPPORT -typedef size_t thread_id_type; -#else // __TBB_WIN8UI_SUPPORT -typedef DWORD thread_id_type; -#endif // __TBB_WIN8UI_SUPPORT -#else -#define __TBB_NATIVE_THREAD_ROUTINE void* -#define __TBB_NATIVE_THREAD_ROUTINE_PTR(r) void* (*r)( void* ) -#include -#endif // _WIN32||_WIN64 - -#include "tick_count.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -namespace tbb { - -namespace internal { - class tbb_thread_v3; -} - -inline void swap( internal::tbb_thread_v3& t1, internal::tbb_thread_v3& t2 ) __TBB_NOEXCEPT(true); - -namespace internal { - - //! Allocate a closure - void* __TBB_EXPORTED_FUNC allocate_closure_v3( size_t size ); - //! Free a closure allocated by allocate_closure_v3 - void __TBB_EXPORTED_FUNC free_closure_v3( void* ); - - struct thread_closure_base { - void* operator new( size_t size ) {return allocate_closure_v3(size);} - void operator delete( void* ptr ) {free_closure_v3(ptr);} - }; - - template struct thread_closure_0: thread_closure_base { - F function; - - static __TBB_NATIVE_THREAD_ROUTINE start_routine( void* c ) { - thread_closure_0 *self = static_cast(c); - self->function(); - delete self; - return 0; - } - thread_closure_0( const F& f ) : function(f) {} - }; - //! Structure used to pass user function with 1 argument to thread. - template struct thread_closure_1: thread_closure_base { - F function; - X arg1; - //! Routine passed to Windows's _beginthreadex by thread::internal_start() inside tbb.dll - static __TBB_NATIVE_THREAD_ROUTINE start_routine( void* c ) { - thread_closure_1 *self = static_cast(c); - self->function(self->arg1); - delete self; - return 0; - } - thread_closure_1( const F& f, const X& x ) : function(f), arg1(x) {} - }; - template struct thread_closure_2: thread_closure_base { - F function; - X arg1; - Y arg2; - //! Routine passed to Windows's _beginthreadex by thread::internal_start() inside tbb.dll - static __TBB_NATIVE_THREAD_ROUTINE start_routine( void* c ) { - thread_closure_2 *self = static_cast(c); - self->function(self->arg1, self->arg2); - delete self; - return 0; - } - thread_closure_2( const F& f, const X& x, const Y& y ) : function(f), arg1(x), arg2(y) {} - }; - - //! Versioned thread class. - class tbb_thread_v3 { -#if __TBB_IF_NO_COPY_CTOR_MOVE_SEMANTICS_BROKEN - // Workaround for a compiler bug: declaring the copy constructor as public - // enables use of the moving constructor. - // The definition is not provided in order to prohibit copying. - public: -#endif - tbb_thread_v3(const tbb_thread_v3&); // = delete; // Deny access - public: -#if _WIN32||_WIN64 - typedef HANDLE native_handle_type; -#else - typedef pthread_t native_handle_type; -#endif // _WIN32||_WIN64 - - class id; - //! Constructs a thread object that does not represent a thread of execution. - tbb_thread_v3() __TBB_NOEXCEPT(true) : my_handle(0) -#if _WIN32||_WIN64 - , my_thread_id(0) -#endif // _WIN32||_WIN64 - {} - - //! Constructs an object and executes f() in a new thread - template explicit tbb_thread_v3(F f) { - typedef internal::thread_closure_0 closure_type; - internal_start(closure_type::start_routine, new closure_type(f)); - } - //! Constructs an object and executes f(x) in a new thread - template tbb_thread_v3(F f, X x) { - typedef internal::thread_closure_1 closure_type; - internal_start(closure_type::start_routine, new closure_type(f,x)); - } - //! Constructs an object and executes f(x,y) in a new thread - template tbb_thread_v3(F f, X x, Y y) { - typedef internal::thread_closure_2 closure_type; - internal_start(closure_type::start_routine, new closure_type(f,x,y)); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - tbb_thread_v3(tbb_thread_v3&& x) __TBB_NOEXCEPT(true) - : my_handle(x.my_handle) -#if _WIN32||_WIN64 - , my_thread_id(x.my_thread_id) -#endif - { - x.internal_wipe(); - } - tbb_thread_v3& operator=(tbb_thread_v3&& x) __TBB_NOEXCEPT(true) { - internal_move(x); - return *this; - } - private: - tbb_thread_v3& operator=(const tbb_thread_v3& x); // = delete; - public: -#else // __TBB_CPP11_RVALUE_REF_PRESENT - tbb_thread_v3& operator=(tbb_thread_v3& x) { - internal_move(x); - return *this; - } -#endif // __TBB_CPP11_RVALUE_REF_PRESENT - - void swap( tbb_thread_v3& t ) __TBB_NOEXCEPT(true) {tbb::swap( *this, t );} - bool joinable() const __TBB_NOEXCEPT(true) {return my_handle!=0; } - //! The completion of the thread represented by *this happens before join() returns. - void __TBB_EXPORTED_METHOD join(); - //! When detach() returns, *this no longer represents the possibly continuing thread of execution. - void __TBB_EXPORTED_METHOD detach(); - ~tbb_thread_v3() {if( joinable() ) detach();} - inline id get_id() const __TBB_NOEXCEPT(true); - native_handle_type native_handle() { return my_handle; } - - //! The number of hardware thread contexts. - /** Before TBB 3.0 U4 this methods returned the number of logical CPU in - the system. Currently on Windows, Linux and FreeBSD it returns the - number of logical CPUs available to the current process in accordance - with its affinity mask. - - NOTE: The return value of this method never changes after its first - invocation. This means that changes in the process affinity mask that - took place after this method was first invoked will not affect the - number of worker threads in the TBB worker threads pool. **/ - static unsigned __TBB_EXPORTED_FUNC hardware_concurrency() __TBB_NOEXCEPT(true); - private: - native_handle_type my_handle; -#if _WIN32||_WIN64 - thread_id_type my_thread_id; -#endif // _WIN32||_WIN64 - - void internal_wipe() __TBB_NOEXCEPT(true) { - my_handle = 0; -#if _WIN32||_WIN64 - my_thread_id = 0; -#endif - } - void internal_move(tbb_thread_v3& x) __TBB_NOEXCEPT(true) { - if (joinable()) detach(); - my_handle = x.my_handle; -#if _WIN32||_WIN64 - my_thread_id = x.my_thread_id; -#endif // _WIN32||_WIN64 - x.internal_wipe(); - } - - /** Runs start_routine(closure) on another thread and sets my_handle to the handle of the created thread. */ - void __TBB_EXPORTED_METHOD internal_start( __TBB_NATIVE_THREAD_ROUTINE_PTR(start_routine), - void* closure ); - friend void __TBB_EXPORTED_FUNC move_v3( tbb_thread_v3& t1, tbb_thread_v3& t2 ); - friend void tbb::swap( tbb_thread_v3& t1, tbb_thread_v3& t2 ) __TBB_NOEXCEPT(true); - }; - - class tbb_thread_v3::id { -#if _WIN32||_WIN64 - thread_id_type my_id; - id( thread_id_type id_ ) : my_id(id_) {} -#else - pthread_t my_id; - id( pthread_t id_ ) : my_id(id_) {} -#endif // _WIN32||_WIN64 - friend class tbb_thread_v3; - public: - id() __TBB_NOEXCEPT(true) : my_id(0) {} - - friend bool operator==( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true); - friend bool operator!=( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true); - friend bool operator<( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true); - friend bool operator<=( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true); - friend bool operator>( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true); - friend bool operator>=( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true); - - template - friend std::basic_ostream& - operator<< (std::basic_ostream &out, - tbb_thread_v3::id id) - { - out << id.my_id; - return out; - } - friend tbb_thread_v3::id __TBB_EXPORTED_FUNC thread_get_id_v3(); - }; // tbb_thread_v3::id - - tbb_thread_v3::id tbb_thread_v3::get_id() const __TBB_NOEXCEPT(true) { -#if _WIN32||_WIN64 - return id(my_thread_id); -#else - return id(my_handle); -#endif // _WIN32||_WIN64 - } - void __TBB_EXPORTED_FUNC move_v3( tbb_thread_v3& t1, tbb_thread_v3& t2 ); - tbb_thread_v3::id __TBB_EXPORTED_FUNC thread_get_id_v3(); - void __TBB_EXPORTED_FUNC thread_yield_v3(); - void __TBB_EXPORTED_FUNC thread_sleep_v3(const tick_count::interval_t &i); - - inline bool operator==(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true) - { - return x.my_id == y.my_id; - } - inline bool operator!=(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true) - { - return x.my_id != y.my_id; - } - inline bool operator<(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true) - { - return x.my_id < y.my_id; - } - inline bool operator<=(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true) - { - return x.my_id <= y.my_id; - } - inline bool operator>(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true) - { - return x.my_id > y.my_id; - } - inline bool operator>=(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true) - { - return x.my_id >= y.my_id; - } - -} // namespace internal; - -//! Users reference thread class by name tbb_thread -typedef internal::tbb_thread_v3 tbb_thread; - -using internal::operator==; -using internal::operator!=; -using internal::operator<; -using internal::operator>; -using internal::operator<=; -using internal::operator>=; - -inline void move( tbb_thread& t1, tbb_thread& t2 ) { - internal::move_v3(t1, t2); -} - -inline void swap( internal::tbb_thread_v3& t1, internal::tbb_thread_v3& t2 ) __TBB_NOEXCEPT(true) { - tbb::tbb_thread::native_handle_type h = t1.my_handle; - t1.my_handle = t2.my_handle; - t2.my_handle = h; -#if _WIN32||_WIN64 - thread_id_type i = t1.my_thread_id; - t1.my_thread_id = t2.my_thread_id; - t2.my_thread_id = i; -#endif /* _WIN32||_WIN64 */ -} - -namespace this_tbb_thread { - inline tbb_thread::id get_id() { return internal::thread_get_id_v3(); } - //! Offers the operating system the opportunity to schedule another thread. - inline void yield() { internal::thread_yield_v3(); } - //! The current thread blocks at least until the time specified. - inline void sleep(const tick_count::interval_t &i) { - internal::thread_sleep_v3(i); - } -} // namespace this_tbb_thread - -} // namespace tbb - -#endif /* __TBB_tbb_thread_H */ diff --git a/inst/include/tbb/tbbmalloc_proxy.h b/inst/include/tbb/tbbmalloc_proxy.h deleted file mode 100644 index dde395411..000000000 --- a/inst/include/tbb/tbbmalloc_proxy.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -/* -Replacing the standard memory allocation routines in Microsoft* C/C++ RTL -(malloc/free, global new/delete, etc.) with the TBB memory allocator. - -Include the following header to a source of any binary which is loaded during -application startup - -#include "tbb/tbbmalloc_proxy.h" - -or add following parameters to the linker options for the binary which is -loaded during application startup. It can be either exe-file or dll. - -For win32 -tbbmalloc_proxy.lib /INCLUDE:"___TBB_malloc_proxy" -win64 -tbbmalloc_proxy.lib /INCLUDE:"__TBB_malloc_proxy" -*/ - -#ifndef __TBB_tbbmalloc_proxy_H -#define __TBB_tbbmalloc_proxy_H - -#if _MSC_VER - -#ifdef _DEBUG - #pragma comment(lib, "tbbmalloc_proxy_debug.lib") -#else - #pragma comment(lib, "tbbmalloc_proxy.lib") -#endif - -#if defined(_WIN64) - #pragma comment(linker, "/include:__TBB_malloc_proxy") -#else - #pragma comment(linker, "/include:___TBB_malloc_proxy") -#endif - -#else -/* Primarily to support MinGW */ - -extern "C" void __TBB_malloc_proxy(); -struct __TBB_malloc_proxy_caller { - __TBB_malloc_proxy_caller() { __TBB_malloc_proxy(); } -} volatile __TBB_malloc_proxy_helper_object; - -#endif // _MSC_VER - -#endif //__TBB_tbbmalloc_proxy_H diff --git a/inst/include/tbb/tick_count.h b/inst/include/tbb/tick_count.h deleted file mode 100644 index b5520f9b0..000000000 --- a/inst/include/tbb/tick_count.h +++ /dev/null @@ -1,140 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_tick_count_H -#define __TBB_tick_count_H - -#include "tbb_stddef.h" - -#if _WIN32||_WIN64 -#include "machine/windows_api.h" -#elif __linux__ -#include -#else /* generic Unix */ -#include -#endif /* (choice of OS) */ - -namespace tbb { - -//! Absolute timestamp -/** @ingroup timing */ -class tick_count { -public: - //! Relative time interval. - class interval_t { - long long value; - explicit interval_t( long long value_ ) : value(value_) {} - public: - //! Construct a time interval representing zero time duration - interval_t() : value(0) {}; - - //! Construct a time interval representing sec seconds time duration - explicit interval_t( double sec ); - - //! Return the length of a time interval in seconds - double seconds() const; - - friend class tbb::tick_count; - - //! Extract the intervals from the tick_counts and subtract them. - friend interval_t operator-( const tick_count& t1, const tick_count& t0 ); - - //! Add two intervals. - friend interval_t operator+( const interval_t& i, const interval_t& j ) { - return interval_t(i.value+j.value); - } - - //! Subtract two intervals. - friend interval_t operator-( const interval_t& i, const interval_t& j ) { - return interval_t(i.value-j.value); - } - - //! Accumulation operator - interval_t& operator+=( const interval_t& i ) {value += i.value; return *this;} - - //! Subtraction operator - interval_t& operator-=( const interval_t& i ) {value -= i.value; return *this;} - private: - static long long ticks_per_second(){ -#if _WIN32||_WIN64 - LARGE_INTEGER qpfreq; - int rval = QueryPerformanceFrequency(&qpfreq); - __TBB_ASSERT_EX(rval, "QueryPerformanceFrequency returned zero"); - return static_cast(qpfreq.QuadPart); -#elif __linux__ - return static_cast(1E9); -#else /* generic Unix */ - return static_cast(1E6); -#endif /* (choice of OS) */ - } - }; - - //! Construct an absolute timestamp initialized to zero. - tick_count() : my_count(0) {}; - - //! Return current time. - static tick_count now(); - - //! Subtract two timestamps to get the time interval between - friend interval_t operator-( const tick_count& t1, const tick_count& t0 ); - - //! Return the resolution of the clock in seconds per tick. - static double resolution() { return 1.0 / interval_t::ticks_per_second(); } - -private: - long long my_count; -}; - -inline tick_count tick_count::now() { - tick_count result; -#if _WIN32||_WIN64 - LARGE_INTEGER qpcnt; - int rval = QueryPerformanceCounter(&qpcnt); - __TBB_ASSERT_EX(rval, "QueryPerformanceCounter failed"); - result.my_count = qpcnt.QuadPart; -#elif __linux__ - struct timespec ts; - int status = clock_gettime( CLOCK_REALTIME, &ts ); - __TBB_ASSERT_EX( status==0, "CLOCK_REALTIME not supported" ); - result.my_count = static_cast(1000000000UL)*static_cast(ts.tv_sec) + static_cast(ts.tv_nsec); -#else /* generic Unix */ - struct timeval tv; - int status = gettimeofday(&tv, NULL); - __TBB_ASSERT_EX( status==0, "gettimeofday failed" ); - result.my_count = static_cast(1000000)*static_cast(tv.tv_sec) + static_cast(tv.tv_usec); -#endif /*(choice of OS) */ - return result; -} - -inline tick_count::interval_t::interval_t( double sec ) { - value = static_cast(sec*interval_t::ticks_per_second()); -} - -inline tick_count::interval_t operator-( const tick_count& t1, const tick_count& t0 ) { - return tick_count::interval_t( t1.my_count-t0.my_count ); -} - -inline double tick_count::interval_t::seconds() const { - return value*tick_count::resolution(); -} - -} // namespace tbb - -#endif /* __TBB_tick_count_H */ diff --git a/inst/include/tthread/tinythread.h b/inst/include/tthread/tinythread.h index b6d04466d..290dba434 100644 --- a/inst/include/tthread/tinythread.h +++ b/inst/include/tthread/tinythread.h @@ -81,6 +81,7 @@ freely, subject to the following restrictions: #include #include #include + #include #endif // Generic includes @@ -129,8 +130,8 @@ freely, subject to the following restrictions: /// destructors) to be declared with the @c thread_local keyword, most pre-C++11 /// compilers only allow for trivial types (e.g. @c int). So, to guarantee /// portable code, only use trivial types for thread local storage. -/// @note This directive is currently not supported on Mac OS X (it will give -/// a compiler error), since compile-time TLS is not supported in the Mac OS X +/// @note This directive is currently not supported on Mac macOS (it will give +/// a compiler error), since compile-time TLS is not supported in the Mac macOS /// executable format. Also, some older versions of MinGW (before GCC 4.x) do /// not support this directive. /// @hideinitializer @@ -491,7 +492,7 @@ class thread { /// Default constructor. /// Construct a @c thread object without an associated thread of execution /// (i.e. non-joinable). - thread() : mHandle(0), mNotAThread(true) + thread() : mHandle(0), mJoinable(false) #if defined(_TTHREAD_WIN32_) , mWin32ThreadID(0) #endif @@ -553,7 +554,7 @@ class thread { private: native_handle_type mHandle; ///< Thread handle. mutable mutex mDataMutex; ///< Serializer for access to the thread private data. - bool mNotAThread; ///< True if this object is not a thread of execution. + bool mJoinable; ///< Is the thread joinable? #if defined(_TTHREAD_WIN32_) unsigned int mWin32ThreadID; ///< Unique thread ID (filled out by _beginthreadex). #endif @@ -870,7 +871,12 @@ inline void * thread::wrapper_function(void * aArg) // The thread is no longer executing lock_guard guard(ti->mThread->mDataMutex); - ti->mThread->mNotAThread = true; + + // On POSIX, we allow the thread to be joined even after execution has finished. + // This is necessary to ensure that thread-local memory can be reclaimed. +#if defined(_TTHREAD_WIN32_) + ti->mThread->mJoinable = false; +#endif // The thread is responsible for freeing the startup information delete ti; @@ -890,8 +896,8 @@ inline thread::thread(void (*aFunction)(void *), void * aArg) ti->mArg = aArg; ti->mThread = this; - // The thread is now alive - mNotAThread = false; + // Mark thread as joinable + mJoinable = true; // Create the thread #if defined(_TTHREAD_WIN32_) @@ -904,9 +910,10 @@ inline thread::thread(void (*aFunction)(void *), void * aArg) // Did we fail to create the thread? if(!mHandle) { - mNotAThread = true; + mJoinable = false; delete ti; } + } inline thread::~thread() @@ -925,28 +932,37 @@ inline void thread::join() #elif defined(_TTHREAD_POSIX_) pthread_join(mHandle, NULL); #endif + + // https://linux.die.net/man/3/pthread_join states: + // + // Joining with a thread that has previously been joined results in undefined behavior. + // + // We just allow a thread to be joined once. + mJoinable = false; } } inline bool thread::joinable() const { mDataMutex.lock(); - bool result = !mNotAThread; + bool result = mJoinable; mDataMutex.unlock(); return result; } inline void thread::detach() { + // TODO: Attempting to detach a non-joinable thread should throw. + // https://en.cppreference.com/w/cpp/thread/thread/detach mDataMutex.lock(); - if(!mNotAThread) + if(mJoinable) { #if defined(_TTHREAD_WIN32_) CloseHandle(mHandle); #elif defined(_TTHREAD_POSIX_) pthread_detach(mHandle); #endif - mNotAThread = true; + mJoinable = false; } mDataMutex.unlock(); } diff --git a/inst/rstudio/templates/project/RcppParallel.package.skeleton.dcf b/inst/rstudio/templates/project/RcppParallel.package.skeleton.dcf new file mode 100644 index 000000000..bed2a038e --- /dev/null +++ b/inst/rstudio/templates/project/RcppParallel.package.skeleton.dcf @@ -0,0 +1,10 @@ +Binding: RcppParallel.package.skeleton +Title: R Package using RcppParallel +Subtitle: Create a new R Package using RcppParallel +Caption: Create R Package using RcppParallel +OpenFiles: src/vector-sum.cpp + +Parameter: example_code +Widget: CheckboxInput +Label: Include an example C++ file using RcppParallel +Default: On diff --git a/inst/skeleton/vector-sum.Rd b/inst/skeleton/vector-sum.Rd new file mode 100644 index 000000000..9ef95e38b --- /dev/null +++ b/inst/skeleton/vector-sum.Rd @@ -0,0 +1,20 @@ +\name{parallelVectorSum} +\alias{parallelVectorSum} +\docType{package} +\title{ +Simple function using RcppParallel +} +\description{ +Simple function using RcppParallel +} +\usage{ +parallelVectorSum(x) +} +\arguments{ +\item{x}{A numeric vector.} +} +\examples{ +\dontrun{ +parallelVectorSum(1:20) +} +} diff --git a/inst/skeleton/vector-sum.cpp b/inst/skeleton/vector-sum.cpp new file mode 100644 index 000000000..57b541659 --- /dev/null +++ b/inst/skeleton/vector-sum.cpp @@ -0,0 +1,55 @@ +/** + * + * This file contains example code showcasing how RcppParallel + * can be used. In this file, we define and export a function called + * 'parallelVectorSum()', which computes the sum of a numeric vector + * in parallel. + * + * Please see https://rcppcore.github.io/RcppParallel/ for more + * details on how to use RcppParallel in an R package, and the + * Rcpp gallery at http://gallery.rcpp.org/ for more examples. + * + */ + +// [[Rcpp::depends(RcppParallel)]] +#include +#include + +using namespace Rcpp; +using namespace RcppParallel; + +struct Sum : public Worker +{ + // source vector + const RVector input; + + // accumulated value + double value; + + // constructors + Sum(const NumericVector input) : input(input), value(0) {} + Sum(const Sum& sum, Split) : input(sum.input), value(0) {} + + // accumulate just the element of the range I've been asked to + void operator()(std::size_t begin, std::size_t end) { + value += std::accumulate(input.begin() + begin, input.begin() + end, 0.0); + } + + // join my value with that of another Sum + void join(const Sum& rhs) { + value += rhs.value; + } +}; + +// [[Rcpp::export]] +double parallelVectorSum(NumericVector x) { + + // declare the SumBody instance + Sum sum(x); + + // call parallel_reduce to start the work + parallelReduce(0, x.length(), sum); + + // return the computed sum + return sum.value; +} diff --git a/tests/testthat/cpp/distance.cpp b/inst/tests/cpp/distance.cpp similarity index 100% rename from tests/testthat/cpp/distance.cpp rename to inst/tests/cpp/distance.cpp diff --git a/tests/testthat/cpp/innerproduct.cpp b/inst/tests/cpp/innerproduct.cpp similarity index 76% rename from tests/testthat/cpp/innerproduct.cpp rename to inst/tests/cpp/innerproduct.cpp index 7a1957205..fee3e41ce 100644 --- a/tests/testthat/cpp/innerproduct.cpp +++ b/inst/tests/cpp/innerproduct.cpp @@ -19,43 +19,43 @@ double innerProduct(NumericVector x, NumericVector y) { using namespace RcppParallel; struct InnerProduct : public Worker -{ +{ // source vectors const RVector x; const RVector y; - + // product that I have accumulated double product; - + // constructors - InnerProduct(const NumericVector x, const NumericVector y) + InnerProduct(const NumericVector x, const NumericVector y) : x(x), y(y), product(0) {} - InnerProduct(const InnerProduct& innerProduct, Split) + InnerProduct(const InnerProduct& innerProduct, Split) : x(innerProduct.x), y(innerProduct.y), product(0) {} - + // process just the elements of the range I have been asked to void operator()(std::size_t begin, std::size_t end) { - product += std::inner_product(x.begin() + begin, - x.begin() + end, - y.begin() + begin, + product += std::inner_product(x.begin() + begin, + x.begin() + end, + y.begin() + begin, 0.0); } - + // join my value with that of another InnerProduct - void join(const InnerProduct& rhs) { - product += rhs.product; + void join(const InnerProduct& rhs) { + product += rhs.product; } }; // [[Rcpp::export]] double parallelInnerProduct(NumericVector x, NumericVector y) { - + // declare the InnerProduct instance that takes a pointer to the vector data InnerProduct innerProduct(x, y); - + // call paralleReduce to start the work parallelReduce(0, x.length(), innerProduct); - + // return the computed product return innerProduct.product; } diff --git a/tests/testthat/cpp/sum.cpp b/inst/tests/cpp/sum.cpp similarity index 87% rename from tests/testthat/cpp/sum.cpp rename to inst/tests/cpp/sum.cpp index aec4895f9..db47c699e 100644 --- a/tests/testthat/cpp/sum.cpp +++ b/inst/tests/cpp/sum.cpp @@ -3,7 +3,7 @@ * @author JJ Allaire * @license GPL (>= 2) */ - + #include #include @@ -12,37 +12,37 @@ using namespace RcppParallel; using namespace Rcpp; struct Sum : public Worker -{ +{ // source vector const RVector input; - + // accumulated value double value; - + // constructors Sum(const NumericVector input) : input(input), value(0) {} Sum(const Sum& sum, Split) : input(sum.input), value(0) {} - + // accumulate just the element of the range I have been asked to void operator()(std::size_t begin, std::size_t end) { value += std::accumulate(input.begin() + begin, input.begin() + end, 0.0); } - + // join my value with that of another Sum - void join(const Sum& rhs) { - value += rhs.value; - } + void join(const Sum& rhs) { + value += rhs.value; + } }; // [[Rcpp::export]] double parallelVectorSum(NumericVector x) { - - // declare the SumBody instance + + // declare the SumBody instance Sum sum(x); - + // call parallel_reduce to start the work parallelReduce(0, x.length(), sum); - + // return the computed sum return sum.value; } diff --git a/tests/testthat/cpp/transform.cpp b/inst/tests/cpp/transform.cpp similarity index 90% rename from tests/testthat/cpp/transform.cpp rename to inst/tests/cpp/transform.cpp index 4a9b2d4a7..2e0955827 100644 --- a/tests/testthat/cpp/transform.cpp +++ b/inst/tests/cpp/transform.cpp @@ -10,6 +10,10 @@ using namespace Rcpp; #include #include +double squareRoot(double x) { + return ::sqrt(x); +} + // [[Rcpp::export]] NumericMatrix matrixSqrt(NumericMatrix orig) { @@ -17,7 +21,7 @@ NumericMatrix matrixSqrt(NumericMatrix orig) { NumericMatrix mat(orig.nrow(), orig.ncol()); // transform it - std::transform(orig.begin(), orig.end(), mat.begin(), ::sqrt); + std::transform(orig.begin(), orig.end(), mat.begin(), squareRoot); // return the new matrix return mat; @@ -43,7 +47,7 @@ struct SquareRoot : public Worker std::transform(input.begin() + begin, input.begin() + end, output.begin() + begin, - ::sqrt); + squareRoot); } }; diff --git a/inst/tests/cpp/truefalse_macros.cpp b/inst/tests/cpp/truefalse_macros.cpp new file mode 100644 index 000000000..412c88399 --- /dev/null +++ b/inst/tests/cpp/truefalse_macros.cpp @@ -0,0 +1,31 @@ +/** + * @title Test for TRUE and FALSE macros + * @author Travers Ching + * @license GPL (>= 2) + */ + +// TRUE and FALSE macros that may come with system headers on some systems +// But conflict with R.h (R_ext/Boolean.h) +// TRUE and FALSE macros should be undef in RcppParallel.h + +#include +#include + +// [[Rcpp::depends(RcppParallel)]] + +#ifndef TRUE +static_assert(true, "Macro TRUE does not exist"); +#else +static_assert(false, "Macro TRUE exists"); +#endif + +#ifndef FALSE +static_assert(true, "Macro FALSE does not exist"); +#else +static_assert(false, "Macro FALSE exists"); +#endif + +// [[Rcpp::export]] +int hush_no_export_warning() { + return 1; +} \ No newline at end of file diff --git a/inst/tests/runit.distance.R b/inst/tests/runit.distance.R new file mode 100644 index 000000000..f85a58020 --- /dev/null +++ b/inst/tests/runit.distance.R @@ -0,0 +1,18 @@ + +library(Rcpp) +library(RUnit) + +sourceCpp(system.file("tests/cpp/distance.cpp", package = "RcppParallel")) + +test.distance <- function() { + + n <- 1000 + m <- matrix(runif(n*10), ncol = 10) + m <- m/rowSums(m) + + checkEquals( + rcpp_js_distance(m), + rcpp_parallel_js_distance(m) + ) + +} diff --git a/inst/tests/runit.innerproduct.R b/inst/tests/runit.innerproduct.R new file mode 100644 index 000000000..2f9260cb2 --- /dev/null +++ b/inst/tests/runit.innerproduct.R @@ -0,0 +1,17 @@ + +library(Rcpp) +library(RUnit) + +sourceCpp(system.file("tests/cpp/innerproduct.cpp", package = "RcppParallel")) + +test.innerproduct <- function() { + + x <- runif(1000000) + y <- runif(1000000) + + checkEquals( + innerProduct(x, y), + parallelInnerProduct(x, y) + ) + +} diff --git a/inst/tests/runit.sum.R b/inst/tests/runit.sum.R new file mode 100644 index 000000000..4792b1cd7 --- /dev/null +++ b/inst/tests/runit.sum.R @@ -0,0 +1,15 @@ + +library(Rcpp) +library(RUnit) + +sourceCpp(system.file("tests/cpp/sum.cpp", package = "RcppParallel")) + +test.sum <- function() { + + v <- as.numeric(c(1:10000000)) + + checkEquals( + vectorSum(v), + parallelVectorSum(v) + ) +} diff --git a/inst/tests/runit.transform.R b/inst/tests/runit.transform.R new file mode 100644 index 000000000..d056dbdb4 --- /dev/null +++ b/inst/tests/runit.transform.R @@ -0,0 +1,15 @@ + +library(Rcpp) +library(RUnit) + +sourceCpp(system.file("tests/cpp/transform.cpp", package = "RcppParallel")) + +test.transform <- function() { + + m <- matrix(as.numeric(c(1:1000000)), nrow = 1000, ncol = 1000) + + checkEquals( + matrixSqrt(m), + parallelMatrixSqrt(m) + ) +} diff --git a/inst/tests/runit.truefalse_macros.R b/inst/tests/runit.truefalse_macros.R new file mode 100644 index 000000000..8e403e364 --- /dev/null +++ b/inst/tests/runit.truefalse_macros.R @@ -0,0 +1,6 @@ + +library(Rcpp) +library(RUnit) + +sourceCpp(system.file("tests/cpp/truefalse_macros.cpp", package = "RcppParallel")) + diff --git a/man/RcppParallel-package.Rd b/man/RcppParallel-package.Rd index 221d1ad73..76121353f 100644 --- a/man/RcppParallel-package.Rd +++ b/man/RcppParallel-package.Rd @@ -1,16 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/RcppParallel-package.R \docType{package} \name{RcppParallel-package} -\alias{RcppParallel} \alias{RcppParallel-package} +\alias{RcppParallel} \title{Parallel programming tools for Rcpp} \description{ - High level functions for doing parallel programming with Rcpp. - For example, the parallelFor function can be used to convert the work of - a standard serial "for" loop into a parallel one and the parallelReduce - function can be used for accumulating aggregate or other values. +High level functions for doing parallel programming with Rcpp. For example, +the \code{parallelFor()} function can be used to convert the work of a +standard serial "for" loop into a parallel one, and the \code{parallelReduce()} +function can be used for accumulating aggregate or other values. } -\author{ - JJ Allaire \email{jj@rstudio.com} +\details{ +The high level interface enables safe and robust parallel programming +without direct manipulation of operating system threads. On Windows, macOS, +and Linux systems the underlying implementation is based on Intel TBB +(Threading Building Blocks). On other platforms, a less-performant fallback +implementation based on the TinyThread library is used. + +For additional documentation, see the package website at: + +\url{https://rcppcore.github.io/RcppParallel/} } \keyword{package} - +\keyword{parallel} diff --git a/man/RcppParallel.package.skeleton.Rd b/man/RcppParallel.package.skeleton.Rd new file mode 100644 index 000000000..ad58178e6 --- /dev/null +++ b/man/RcppParallel.package.skeleton.Rd @@ -0,0 +1,67 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/skeleton.R +\name{RcppParallel.package.skeleton} +\alias{RcppParallel.package.skeleton} +\title{Create a skeleton for a new package depending on RcppParallel} +\usage{ +RcppParallel.package.skeleton(name = "anRpackage", example_code = TRUE, ...) +} +\arguments{ +\item{name}{The name of your R package.} + +\item{example_code}{If \code{TRUE}, example C++ code using RcppParallel is +added to the package.} + +\item{...}{Optional arguments passed to \link[Rcpp]{Rcpp.package.skeleton}.} +} +\value{ +Nothing, used for its side effects +} +\description{ +\code{RcppParallel.package.skeleton} automates the creation of a new source +package that intends to use features of RcppParallel. +} +\details{ +It is based on the \link[utils]{package.skeleton} function which it executes +first. + +In addition to \link[Rcpp]{Rcpp.package.skeleton} : + +The \samp{DESCRIPTION} file gains an Imports line requesting that the +package depends on RcppParallel and a LinkingTo line so that the package +finds RcppParallel header files. + +The \samp{NAMESPACE} gains a \code{useDynLib} directive as well as an +\code{importFrom(RcppParallel, evalCpp} to ensure instantiation of +RcppParallel. + +The \samp{src} directory is created if it does not exists and a +\samp{Makevars} file is added setting the environment variables +\samp{PKG_LIBS} to accomodate the necessary flags to link with the +RcppParallel library. + +If the \code{example_code} argument is set to \code{TRUE}, example files +\samp{vector-sum.cpp} is created in the \samp{src} directory. +\code{Rcpp::compileAttributes()} is then called to generate +\code{src/RcppExports.cpp} and \code{R/RcppExports.R}. These files are given +as an example and should eventually by removed from the generated package. +} +\examples{ + +\dontrun{ +# simple package +RcppParallel.package.skeleton("foobar") +} + +} +\references{ +Read the \emph{Writing R Extensions} manual for more details. + +Once you have created a \emph{source} package you need to install it: see +the \emph{R Installation and Administration} manual, \code{\link{INSTALL}} +and \code{\link{install.packages}}. +} +\seealso{ +\link[utils]{package.skeleton} +} +\keyword{programming} diff --git a/man/flags.Rd b/man/flags.Rd new file mode 100644 index 000000000..6401d4264 --- /dev/null +++ b/man/flags.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/flags.R +\name{flags} +\alias{flags} +\alias{RcppParallelLibs} +\alias{LdFlags} +\alias{CxxFlags} +\title{Compilation flags for RcppParallel} +\usage{ +CxxFlags() + +LdFlags() + +RcppParallelLibs() +} +\value{ +Returns \code{NULL}, invisibly. These functions are called for +their side effects (writing the associated flags to stdout). +} +\description{ +Output the compiler or linker flags required to build against RcppParallel. +} +\details{ +These functions are typically called from \code{Makevars} as follows:\preformatted{PKG_LIBS += $(shell "$\{R_HOME\}/bin/Rscript" -e "RcppParallel::LdFlags()") +} + +On Windows, the flags ensure that the package links with the built-in TBB +library. On Linux and macOS, the output is empty, because TBB is loaded +dynamically on load by \code{RcppParallel}. + +\R packages using RcppParallel should also add the following to their +\code{NAMESPACE} file:\preformatted{importFrom(RcppParallel, RcppParallelLibs) +} + +This is necessary to ensure that \pkg{RcppParallel} (and so, TBB) is loaded +and available. +} diff --git a/man/setThreadOptions.Rd b/man/setThreadOptions.Rd index a95558e3b..5e68a3c35 100644 --- a/man/setThreadOptions.Rd +++ b/man/setThreadOptions.Rd @@ -1,48 +1,49 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/options.R \name{setThreadOptions} \alias{setThreadOptions} \alias{defaultNumThreads} -\title{ -Thread options for RcppParallel -} -\description{ -Set thread options (number of threads to use for task scheduling and stack -size per-thread) for RcppParallel. -} +\title{Thread options for RcppParallel} \usage{ -setThreadOptions(numThreads = "auto", - stackSize = "auto") +setThreadOptions(numThreads = "auto", stackSize = "auto") + defaultNumThreads() } \arguments{ - \item{numThreads}{ - Number of threads to use for task scheduling (call - \code{defaultNumThreads} to determine the the default - value used for "auto"). - } - \item{stackSize}{ - Stack size (in bytes) to use for worker threads. The - default used for "auto" is 2MB on 32-bit systems and - 4MB on 64-bit systems (note that this parameter has - no effect on Windows). - } -} -\details{ - RcppParallel is automatically initialized with the default number - of threads and thread stack size when it loads. You can call - \code{setThreadOptions} at any time to change the defaults. -} +\item{numThreads}{Number of threads to use for task scheduling. Call \code{defaultNumThreads()} +to determine the the default value used for "auto".} +\item{stackSize}{Stack size (in bytes) to use for worker threads. The +default used for "auto" is 2MB on 32-bit systems and 4MB on 64-bit systems +(note that this parameter has no effect on Windows).} +} \value{ - The \code{defaultNumThreads} returns the default number of threads - that are used by RcppParallel if another value isn't specified using - \code{setThreadOptions}. +\code{defaultNumThreads()} returns the default number of threads used by +RcppParallel, if another value isn't specified either via +\code{setThreadOptions()} or explicitly in calls to \code{parallelFor()} and +\code{parallelReduce()}. } +\description{ +Set thread options (number of threads to use for task scheduling and stack +size per-thread) for RcppParallel. +} +\details{ +RcppParallel is automatically initialized with the default number of threads +and thread stack size when it loads. You can call \code{setThreadOptions()} at +any time to change the defaults. +The \code{parallelFor()} and \code{parallelReduce()} also accept \code{numThreads} as +an argument, if you'd like to control the number of threads specifically +to be made available for a particular parallel function call. Note that +this value is advisory, and TBB may choose a smaller number of threads +if the number of requested threads cannot be honored on the system. +} \examples{ + \dontrun{ +library(RcppParallel) setThreadOptions(numThreads = 4) - defaultNumThreads() } -} +} diff --git a/man/tbbLibraryPath.Rd b/man/tbbLibraryPath.Rd new file mode 100644 index 000000000..3989c45e1 --- /dev/null +++ b/man/tbbLibraryPath.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tbb.R +\name{tbbLibraryPath} +\alias{tbbLibraryPath} +\title{Get the Path to a TBB Library} +\usage{ +tbbLibraryPath(name = NULL) +} +\arguments{ +\item{name}{The name of the TBB library to be resolved. Normally, this is one of +\code{tbb}, \code{tbbmalloc}, or \code{tbbmalloc_proxy}. When \code{NULL}, the library +path containing the TBB libraries is returned instead.} +} +\description{ +Retrieve the path to a TBB library. This can be useful for \R packages +using RcppParallel that wish to use, or re-use, the version of TBB that +RcppParallel has been configured to use. +} diff --git a/patches/windows_arm64.diff b/patches/windows_arm64.diff new file mode 100644 index 000000000..514f62756 --- /dev/null +++ b/patches/windows_arm64.diff @@ -0,0 +1,47 @@ +diff --git a/src/tbb/build/Makefile.tbb b/src/tbb/build/Makefile.tbb +index 8d155f80..c58f4fb1 100644 +--- a/src/tbb/build/Makefile.tbb ++++ b/src/tbb/build/Makefile.tbb +@@ -91,7 +91,11 @@ ifneq (,$(TBB.DEF)) + tbb.def: $(TBB.DEF) $(TBB.LST) + $(CPLUS) $(PREPROC_ONLY) $< $(CPLUS_FLAGS) $(INCLUDES) > $@ + +-LIB_LINK_FLAGS += $(EXPORT_KEY)tbb.def ++# LLVM on Windows doesn't need --version-script export ++# https://reviews.llvm.org/D63743 ++ifeq (, $(WINARM64_CLANG)) ++ LIB_LINK_FLAGS += $(EXPORT_KEY)tbb.def ++endif + $(TBB.DLL): tbb.def + endif + +diff --git a/src/tbb/build/Makefile.tbbmalloc b/src/tbb/build/Makefile.tbbmalloc +index 421e95c5..e7c38fa4 100644 +--- a/src/tbb/build/Makefile.tbbmalloc ++++ b/src/tbb/build/Makefile.tbbmalloc +@@ -74,7 +74,11 @@ ifneq (,$(MALLOC.DEF)) + tbbmalloc.def: $(MALLOC.DEF) + $(CPLUS) $(PREPROC_ONLY) $< $(M_CPLUS_FLAGS) $(WARNING_SUPPRESS) $(INCLUDES) > $@ + +-MALLOC_LINK_FLAGS += $(EXPORT_KEY)tbbmalloc.def ++# LLVM on Windows doesn't need --version-script export ++# https://reviews.llvm.org/D63743 ++ifeq (, $(WINARM64_CLANG)) ++ MALLOC_LINK_FLAGS += $(EXPORT_KEY)tbbmalloc.def ++endif + $(MALLOC.DLL): tbbmalloc.def + endif + +diff --git a/src/tbb/src/tbbmalloc/TypeDefinitions.h b/src/tbb/src/tbbmalloc/TypeDefinitions.h +index 3178442e..fd4b7956 100644 +--- a/src/tbb/src/tbbmalloc/TypeDefinitions.h ++++ b/src/tbb/src/tbbmalloc/TypeDefinitions.h +@@ -25,7 +25,7 @@ + # define __ARCH_ipf 1 + # elif defined(_M_IX86)||defined(__i386__) // the latter for MinGW support + # define __ARCH_x86_32 1 +-# elif defined(_M_ARM) ++# elif defined(_M_ARM) || defined(__aarch64__) + # define __ARCH_other 1 + # else + # error Unknown processor architecture for Windows diff --git a/src/.gitignore b/src/.gitignore index 22034c461..a4117a2cd 100644 --- a/src/.gitignore +++ b/src/.gitignore @@ -1,3 +1,6 @@ *.o *.so *.dll + +Makevars +Makevars.win diff --git a/src/Makevars b/src/Makevars deleted file mode 100644 index 1bf58543a..000000000 --- a/src/Makevars +++ /dev/null @@ -1,65 +0,0 @@ - -CXX_STD = CXX11 - -PKG_CPPFLAGS += -I../inst/include/ - -ifeq ($(OS), Windows_NT) - USE_TBB=Windows - TBB_COPY_PATTERN=tbb.dll -else - -UNAME := $(shell uname) -TBB_COPY_PATTERN=libtbb.* - -ifeq ($(UNAME), Darwin) - USE_TBB=Mac -endif -ifeq ($(UNAME), Linux) - USE_TBB=Linux -endif -# Note: regular MinGW not supported - -endif - -ifdef USE_TBB - -# Tell options.cpp that TBB support is turned on -PKG_CPPFLAGS += -DRCPP_PARALLEL_USE_TBB=1 - -MAKE_ARGS := CXXFLAGS=-DTBB_NO_LEGACY=1 tbb_release tbb_build_prefix=lib - -# What I really want is startswith but this doesn't appear to be available -ifneq (,$(findstring clang,$(CC))) - MAKE_ARGS += compiler=clang -endif - -ifeq ($(USE_TBB), Windows) - # rtools: turn on hacks to compensate for make and shell differences rtools<=>MinGW - # compiler: overwrite default (which is cl = MS compiler) - MAKE_ARGS += rtools=true compiler=gcc - ifeq ("$(WIN)", "64") - # TBB defaults to ia32 - MAKE_ARGS += arch=intel64 - endif - - # Linker needs access to the tbb dll; otherwise you get errors such as: - # "undefined reference to `tbb::task_scheduler_init::terminate()'" - PKG_LIBS += -L../inst/lib -ltbb -endif - -.PHONY: all tbb - -# Order is important in Windows' case. See PKG_LIBS above -all: tbb $(SHLIB) - -tbb: - mkdir -p ../inst/lib - ( cd tbb/src; make $(MAKE_ARGS) ) - cp tbb/build/lib_release/$(TBB_COPY_PATTERN) ../inst/lib - -clean: - (cd tbb/src; make clean) - -endif - - diff --git a/src/Makevars.in b/src/Makevars.in new file mode 100644 index 000000000..7158b7322 --- /dev/null +++ b/src/Makevars.in @@ -0,0 +1,40 @@ + +# This needs to expand to something the shell will accept as '$ORIGIN', +# including a literal '$' (no variable expansion) +ORIGIN = \$$ORIGIN + +CMAKE = @CMAKE@ +R = @R@ + +TBB_LIB = @TBB_LIB@ +TBB_INC = @TBB_INC@ +TBB_NAME = @TBB_NAME@ +TBB_MALLOC_NAME = @TBB_MALLOC_NAME@ + +PKG_CPPFLAGS = @PKG_CPPFLAGS@ +PKG_CXXFLAGS = @PKG_CXXFLAGS@ + +PKG_LIBS = @PKG_LIBS@ @PKG_LIBS_EXTRA@ + +all: tbb $(SHLIB) + +# TBB needs to be built before our C++ sources are built, so that +# headers are copied and available from the expected locations. +$(OBJECTS): tbb + +# NOTE: TBB libraries are installed via install.libs.R. +# However, we need to copy headers here so that they are visible during compilation. +tbb: tbb-clean + @TBB_LIB="$(TBB_LIB)" TBB_INC="$(TBB_INC)" \ + TBB_NAME="$(TBB_NAME)" TBB_MALLOC_NAME="$(TBB_MALLOC_NAME)" \ + CC="$(CC)" CFLAGS="$(CFLAGS)" CPPFLAGS="$(CPPFLAGS)" \ + CXX="$(CXX)" CXXFLAGS="$(CXXFLAGS)" LDFLAGS="$(LDFLAGS)" \ + CMAKE="$(CMAKE)" "@R@" -s -f install.libs.R --args build + +# NOTE: we do not want to clean ../inst/lib or ../inst/libs here, +# as we may be writing to those locations in multiarch builds +tbb-clean: + @rm -rf ../inst/include/tbb + @rm -rf ../inst/include/oneapi + @rm -rf ../inst/include/tbb_local + @rm -rf ../inst/include/serial diff --git a/src/init.cpp b/src/init.cpp new file mode 100644 index 000000000..b95aebed1 --- /dev/null +++ b/src/init.cpp @@ -0,0 +1,19 @@ + +#include +#include +#include // for NULL +#include + +/* .Call calls */ +extern "C" SEXP defaultNumThreads(); + +static const R_CallMethodDef CallEntries[] = { + {"defaultNumThreads", (DL_FUNC) &defaultNumThreads, 0}, + {NULL, NULL, 0} +}; + +extern "C" void R_init_RcppParallel(DllInfo *dll) +{ + R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); + R_useDynamicSymbols(dll, FALSE); +} diff --git a/src/install.libs.R b/src/install.libs.R new file mode 100644 index 000000000..4dcbac274 --- /dev/null +++ b/src/install.libs.R @@ -0,0 +1,273 @@ + +# !diagnostics suppress=R_PACKAGE_DIR,SHLIB_EXT,R_ARCH +.install.libs <- function(tbbLib) { + + # copy default library + files <- Sys.glob(paste0("*", SHLIB_EXT)) + dest <- file.path(R_PACKAGE_DIR, paste0("libs", R_ARCH)) + dir.create(dest, recursive = TRUE, showWarnings = FALSE) + file.copy(files, dest, overwrite = TRUE) + + # copy symbols if available + if (file.exists("symbols.rds")) + file.copy("symbols.rds", dest, overwrite = TRUE) + + # also copy to package 'libs' folder, for devtools + libsDest <- paste0("../libs", R_ARCH) + dir.create(libsDest, recursive = TRUE, showWarnings = FALSE) + file.copy(files, libsDest, overwrite = TRUE) + + # copy tbb (NOTE: do not use inst/ folder as R will resolve symlinks, + # behavior which we do _not_ want here!) + tbbDest <- file.path(R_PACKAGE_DIR, paste0("lib", R_ARCH)) + dir.create(tbbDest, recursive = TRUE, showWarnings = FALSE) + + # note: on Linux, TBB gets compiled with extensions like + # '.so.2', so be ready to handle those + shlibPattern <- switch( + Sys.info()[["sysname"]], + Windows = "^tbb.*\\.dll$", + Darwin = "^libtbb.*\\.dylib$", + "^libtbb.*\\.so.*$" + ) + # WASM only supports static libraries + if (R.version$os == "emscripten") { + shlibPattern <- "^libtbb.*\\.a$" + } + + if (!nzchar(tbbLib)) { + + # using bundled TBB + tbbLibs <- list.files( + path = "tbb/build/lib_release", + pattern = shlibPattern, + full.names = TRUE + ) + + for (tbbLib in tbbLibs) { + system2("cp", c("-P", shQuote(tbbLib), shQuote(tbbDest))) + } + + } else { + + # using system tbb + tbbLibs <- list.files( + path = tbbLib, + pattern = shlibPattern, + full.names = TRUE + ) + + # don't copy symlinks + tbbLibs <- tbbLibs[!nzchar(Sys.readlink(tbbLibs))] + + # copy / link the libraries + useSymlinks <- Sys.getenv("TBB_USE_SYMLINKS", unset = .Platform$OS.type != "windows") + if (useSymlinks) { + file.symlink(tbbLibs, tbbDest) + } else { + for (tbbLib in tbbLibs) { + system2("cp", c("-P", shQuote(tbbLib), shQuote(tbbDest))) + } + } + + } + + # on Windows, we create a stub library that links to us so that + # older binaries (like rstan) can still load + if (.Platform$OS.type == "windows") { + tbbDll <- file.path(tbbDest, "tbb.dll") + if (!file.exists(tbbDll)) { + writeLines("** creating tbb stub library") + status <- system("R CMD SHLIB -o tbb-compat/tbb.dll tbb-compat/tbb-compat.cpp") + if (status != 0) + stop("error building tbb stub library") + file.copy("tbb-compat/tbb.dll", file.path(tbbDest, "tbb.dll")) + } + } + +} + +useTbbPreamble <- function(tbbInc) { + dir.create("../inst/include", recursive = TRUE, showWarnings = FALSE) + for (suffix in c("oneapi", "serial", "tbb")) { + tbbPath <- file.path(tbbInc, suffix) + if (file.exists(tbbPath)) { + file.copy(tbbPath, "../inst/include", recursive = TRUE) + } + } +} + +useSystemTbb <- function(tbbLib, tbbInc) { + useTbbPreamble(tbbInc) +} + +useBundledTbb <- function() { + + useTbbPreamble("tbb/include") + dir.create("tbb/build-tbb", showWarnings = FALSE) + + cmake <- Sys.getenv("CMAKE", unset = "cmake") + buildType <- Sys.getenv("CMAKE_BUILD_TYPE", unset = "Release") + verbose <- Sys.getenv("VERBOSE", unset = "0") + + splitCompilerVar("CC", "CFLAGS") + splitCompilerVar("CXX", "CXXFLAGS") + + prependFlags("CPPFLAGS", "CFLAGS") + prependFlags("CPPFLAGS", "CXXFLAGS") + + cmakeFlags <- c( + forwardEnvVar("CC", "CMAKE_C_COMPILER"), + forwardEnvVar("CXX", "CMAKE_CXX_COMPILER"), + forwardEnvVar("CFLAGS", "CMAKE_C_FLAGS"), + forwardEnvVar("CXXFLAGS", "CMAKE_CXX_FLAGS"), + forwardEnvVar("CMAKE_BUILD_TYPE", "CMAKE_BUILD_TYPE"), + "-DTBB_TEST=0", + "-DTBB_EXAMPLES=0", + "-DTBB_STRICT=0", + ".." + ) + + if (R.version$os == "emscripten") { + cmakeFlags <- c( + "-DEMSCRIPTEN=1", + "-DTBBMALLOC_BUILD=0", + "-DBUILD_SHARED_LIBS=0", + cmakeFlags + ) + } + + writeLines("*** configuring tbb") + owd <- setwd("tbb/build-tbb") + output <- system2(cmake, shQuote(cmakeFlags), stdout = TRUE, stderr = TRUE) + status <- attr(output, "status") + if (is.numeric(status) && status != 0L) { + writeLines(output) + stop("error configuring tbb (status code ", status, ")") + } else if (!identical(verbose, "0")) { + writeLines(output) + } + setwd(owd) + + if (!identical(verbose, "0")) { + writeLines("*** dumping CMakeCache.txt") + writeLines(readLines("tbb/build-tbb/CMakeCache.txt")) + } + + writeLines("*** building tbb") + owd <- setwd("tbb/build-tbb") + output <- system2(cmake, c("--build", ".", "--config", "release"), stdout = TRUE, stderr = TRUE) + status <- attr(output, "status") + if (is.numeric(status) && status != 0L) { + writeLines(output) + stop("error building tbb (status code ", status, ")") + } else if (!identical(verbose, "0")) { + writeLines(output) + } + setwd(owd) + + shlibPattern <- switch( + Sys.info()[["sysname"]], + Windows = "^tbb.*\\.dll$", + Darwin = "^libtbb.*\\.dylib$", + "^libtbb.*\\.so.*$" + ) + + # WASM only supports static libraries + if (R.version$os == "emscripten") { + shlibPattern <- "^libtbb.*\\.a$" + } + + tbbFiles <- list.files( + file.path(getwd(), "tbb/build-tbb"), + pattern = shlibPattern, + recursive = TRUE, + full.names = TRUE + ) + + dir.create("tbb/build/lib_release", recursive = TRUE, showWarnings = FALSE) + file.copy(tbbFiles, "tbb/build/lib_release", overwrite = TRUE) + unlink("tbb/build-tbb", recursive = TRUE) + writeLines("*** finished building tbb") + +} + +getenv <- function(key, unset = "") { + Sys.getenv(key, unset = unset) +} + + +setenv <- function(key, value) { + args <- list(paste(value, collapse = " ")) + names(args) <- key + do.call(Sys.setenv, args) +} + + +# CMake doesn't support flags specified directly as part of the compiler +# definition, so manually split it here. +splitCompilerVar <- function(compilerVar, flagsVar) { + + compiler <- Sys.getenv(compilerVar, unset = NA) + if (is.na(compiler)) + return(FALSE) + + tokens <- scan(text = compiler, what = character(), quiet = TRUE) + if (length(tokens) < 2L) + return(FALSE) + + setenv(compilerVar, tokens[[1L]]) + + oldFlags <- Sys.getenv(flagsVar) + newFlags <- c(tokens[-1L], oldFlags) + setenv(flagsVar, newFlags[nzchar(newFlags)]) + + TRUE + +} + + +# Given an environment variable like 'CC', forward that to CMake +# via the corresponding CMAKE_C_COMPILER flag. +forwardEnvVar <- function(envVar, cmakeVar) { + envVal <- Sys.getenv(envVar, unset = NA) + if (!is.na(envVal)) { + sprintf("-D%s=%s", cmakeVar, envVal) + } +} + +prependFlags <- function(prependFlags, toFlags) { + + prependVal <- Sys.getenv(prependFlags, unset = NA) + if (is.na(prependVal)) + return(FALSE) + + oldVal <- Sys.getenv(toFlags, unset = NA) + if (is.na(oldVal)) { + setenv(toFlags, prependVal) + } else { + setenv(toFlags, paste(prependVal, oldVal)) + } + + TRUE + +} + +# Main ---- + +tbbLib <- Sys.getenv("TBB_LIB") +tbbInc <- Sys.getenv("TBB_INC") + +args <- commandArgs(trailingOnly = TRUE) +if (identical(args, "build")) { + if (nzchar(tbbLib) && nzchar(tbbInc)) { + useSystemTbb(tbbLib, tbbInc) + } else if (.Platform$OS.type == "windows") { + writeLines("** building RcppParallel without tbb backend") + } else { + useBundledTbb() + } +} else { + source("../R/tbb-autodetected.R") + .install.libs(tbbLib) +} diff --git a/src/options.cpp b/src/options.cpp index 5796d917f..4f9de2e2c 100644 --- a/src/options.cpp +++ b/src/options.cpp @@ -8,40 +8,13 @@ #include #include -#include - -extern "C" SEXP setThreadOptions(SEXP numThreadsSEXP, SEXP stackSizeSEXP) { - - static tbb::task_scheduler_init* s_pTaskScheduler = NULL; - - int numThreads = Rf_asInteger(numThreadsSEXP); - - int stackSize = Rf_asInteger(stackSizeSEXP); - - try - { - if (!s_pTaskScheduler) { - s_pTaskScheduler = new tbb::task_scheduler_init(numThreads, stackSize); - } else { - s_pTaskScheduler->terminate(); - s_pTaskScheduler->initialize(numThreads, stackSize); - } - } - catch(const std::exception& e) - { - Rf_error(("Error loading TBB: " + std::string(e.what())).c_str()); - } - catch(...) - { - Rf_error("Error loading TBB: (Unknown error)"); - } - - return R_NilValue; -} - extern "C" SEXP defaultNumThreads() { SEXP threadsSEXP = Rf_allocVector(INTSXP, 1); +#ifdef __TBB_task_arena_H + INTEGER(threadsSEXP)[0] = tbb::this_task_arena::max_concurrency(); +#else INTEGER(threadsSEXP)[0] = tbb::task_scheduler_init::default_num_threads(); +#endif return threadsSEXP; } diff --git a/src/tbb-compat/tbb-compat.cpp b/src/tbb-compat/tbb-compat.cpp new file mode 100644 index 000000000..cbde70b4d --- /dev/null +++ b/src/tbb-compat/tbb-compat.cpp @@ -0,0 +1,143 @@ + +#include + +#include "../tbb/include/oneapi/tbb/detail/_namespace_injection.h" +#include "../tbb/include/oneapi/tbb/task_arena.h" + +#include "../tbb/src/tbb/observer_proxy.h" +#include "../tbb/src/tbb/main.h" +#include "../tbb/src/tbb/thread_data.h" + +#ifdef _WIN32 +# define DLL_EXPORT __declspec(dllexport) +#else +# define DLL_EXPORT +#endif + +namespace tbb { + +namespace interface6 { +class task_scheduler_observer; +} + +namespace internal { + +class task_scheduler_observer_v3 { + friend class tbb::detail::r1::observer_proxy; + friend class tbb::detail::r1::observer_list; + friend class interface6::task_scheduler_observer; + + //! Pointer to the proxy holding this observer. + /** Observers are proxied by the scheduler to maintain persistent lists of them. **/ + tbb::detail::r1::observer_proxy* my_proxy; + + //! Counter preventing the observer from being destroyed while in use by the scheduler. + /** Valid only when observation is on. **/ + std::atomic my_busy_count; + +public: + //! Enable or disable observation + /** For local observers the method can be used only when the current thread + has the task scheduler initialized or is attached to an arena. + Repeated calls with the same state are no-ops. **/ + void DLL_EXPORT __TBB_EXPORTED_METHOD observe( bool state=true ); + + //! Returns true if observation is enabled, false otherwise. + bool is_observing() const {return my_proxy!=NULL;} + + //! Construct observer with observation disabled. + task_scheduler_observer_v3() : my_proxy(NULL) { my_busy_count.store(0); } + + //! Entry notification + /** Invoked from inside observe(true) call and whenever a worker enters the arena + this observer is associated with. If a thread is already in the arena when + the observer is activated, the entry notification is called before it + executes the first stolen task. + Obsolete semantics. For global observers it is called by a thread before + the first steal since observation became enabled. **/ + virtual void on_scheduler_entry( bool /*is_worker*/ ) {} + + //! Exit notification + /** Invoked from inside observe(false) call and whenever a worker leaves the + arena this observer is associated with. + Obsolete semantics. For global observers it is called by a thread before + the first steal since observation became enabled. **/ + virtual void on_scheduler_exit( bool /*is_worker*/ ) {} + + //! Destructor automatically switches observation off if it is enabled. + virtual ~task_scheduler_observer_v3() { if(my_proxy) observe(false);} +}; + +} // namespace internal + +namespace interface6 { + +class task_scheduler_observer : public internal::task_scheduler_observer_v3 { + friend class internal::task_scheduler_observer_v3; + friend class tbb::detail::r1::observer_proxy; + friend class tbb::detail::r1::observer_list; + + /** Negative numbers with the largest absolute value to minimize probability + of coincidence in case of a bug in busy count usage. **/ + // TODO: take more high bits for version number + static const intptr_t v6_trait = (intptr_t)((~(uintptr_t)0 >> 1) + 1); + + //! contains task_arena pointer or tag indicating local or global semantics of the observer + intptr_t my_context_tag; + enum { global_tag = 0, implicit_tag = 1 }; + +public: + //! Construct local or global observer in inactive state (observation disabled). + /** For a local observer entry/exit notifications are invoked whenever a worker + thread joins/leaves the arena of the observer's owner thread. If a thread is + already in the arena when the observer is activated, the entry notification is + called before it executes the first stolen task. **/ + /** TODO: Obsolete. + Global observer semantics is obsolete as it violates master thread isolation + guarantees and is not composable. Thus the current default behavior of the + constructor is obsolete too and will be changed in one of the future versions + of the library. **/ + explicit task_scheduler_observer( bool local = false ) { + my_context_tag = local? implicit_tag : global_tag; + } + + //! Construct local observer for a given arena in inactive state (observation disabled). + /** entry/exit notifications are invoked whenever a thread joins/leaves arena. + If a thread is already in the arena when the observer is activated, the entry notification + is called before it executes the first stolen task. **/ + explicit task_scheduler_observer( task_arena & a) { + my_context_tag = (intptr_t)&a; + } + + /** Destructor protects instance of the observer from concurrent notification. + It is recommended to disable observation before destructor of a derived class starts, + otherwise it can lead to concurrent notification callback on partly destroyed object **/ + virtual ~task_scheduler_observer() { if(my_proxy) observe(false); } + + //! Enable or disable observation + /** Warning: concurrent invocations of this method are not safe. + Repeated calls with the same state are no-ops. **/ + void observe( bool state=true ) { + if( state && !my_proxy ) { + __TBB_ASSERT( !my_busy_count, "Inconsistent state of task_scheduler_observer instance"); + my_busy_count.store(v6_trait); + } + internal::task_scheduler_observer_v3::observe(state); + } +}; + +} // namespace interface6 + +} // namespace tbb + +namespace tbb { +namespace internal { + +DLL_EXPORT +void __TBB_EXPORTED_FUNC task_scheduler_observer_v3::observe( bool enable ) { + auto* tso = (tbb::detail::d1::task_scheduler_observer*) (this); + tbb::detail::r1::observe(*tso, enable); +} + +} // namespace internal +} // namespace tbb diff --git a/src/tbb.cpp b/src/tbb.cpp new file mode 100644 index 000000000..008e4ad4a --- /dev/null +++ b/src/tbb.cpp @@ -0,0 +1,245 @@ + +#if RCPP_PARALLEL_USE_TBB + +#include +#include + +#include +#include +#include +#include + +namespace RcppParallel { + +tbb::global_control* s_globalControl = nullptr; + +// TBB Tools ---- + +struct TBBWorker +{ + explicit TBBWorker(Worker& worker) : worker_(worker) {} + + void operator()(const tbb::blocked_range& r) const { + worker_(r.begin(), r.end()); + } + +private: + Worker& worker_; +}; + +ThreadStackSizeControl::ThreadStackSizeControl() +{ + int stackSize = resolveValue("RCPP_PARALLEL_STACK_SIZE", 0, 0); + if (stackSize > 0) + { + s_globalControl = new tbb::global_control( + tbb::global_control::thread_stack_size, + stackSize + ); + } +} + +ThreadStackSizeControl::~ThreadStackSizeControl() +{ + if (s_globalControl != nullptr) + { + delete s_globalControl; + s_globalControl = nullptr; + } +} + + +// TBB Parallel For ---- + +class TBBParallelForExecutor +{ +public: + + TBBParallelForExecutor(Worker& worker, + std::size_t begin, + std::size_t end, + std::size_t grainSize) + : worker_(worker), + begin_(begin), + end_(end), + grainSize_(grainSize) + { + } + + void operator()() const + { + TBBWorker tbbWorker(worker_); + tbb::parallel_for( + tbb::blocked_range(begin_, end_, grainSize_), + tbbWorker + ); + } + +private: + Worker& worker_; + std::size_t begin_; + std::size_t end_; + std::size_t grainSize_; +}; + +class TBBArenaParallelForExecutor +{ +public: + + TBBArenaParallelForExecutor(tbb::task_group& group, + Worker& worker, + std::size_t begin, + std::size_t end, + std::size_t grainSize) + : group_(group), + worker_(worker), + begin_(begin), + end_(end), + grainSize_(grainSize) + { + } + + void operator()() const + { + TBBParallelForExecutor executor(worker_, begin_, end_, grainSize_); + group_.run_and_wait(executor); + } + +private: + + tbb::task_group& group_; + Worker& worker_; + std::size_t begin_; + std::size_t end_; + std::size_t grainSize_; +}; + +void tbbParallelFor(std::size_t begin, + std::size_t end, + Worker& worker, + std::size_t grainSize, + int numThreads) +{ + ThreadStackSizeControl control; + + tbb::task_group group; + TBBArenaParallelForExecutor executor(group, worker, begin, end, grainSize); + + tbb::task_arena arena(numThreads); + arena.execute(executor); +} + + +// TBB Parallel Reduce ---- + +struct TBBReducer +{ + explicit TBBReducer(ReducerWrapper& reducer) + : pSplitReducer_(NULL), reducer_(reducer) + { + } + + TBBReducer(TBBReducer& tbbReducer, tbb::split) + : pSplitReducer_(new ReducerWrapper(tbbReducer.reducer_, RcppParallel::Split())), + reducer_(*pSplitReducer_) + { + } + + virtual ~TBBReducer() { delete pSplitReducer_; } + + void operator()(const tbb::blocked_range& r) + { + reducer_(r.begin(), r.end()); + } + + void join(const TBBReducer& tbbReducer) + { + reducer_.join(tbbReducer.reducer_); + } + +private: + ReducerWrapper* pSplitReducer_; + ReducerWrapper& reducer_; +}; + +class TBBParallelReduceExecutor +{ +public: + + TBBParallelReduceExecutor(ReducerWrapper& reducer, + std::size_t begin, + std::size_t end, + std::size_t grainSize) + : reducer_(reducer), + begin_(begin), + end_(end), + grainSize_(grainSize) + { + } + + void operator()() const + { + TBBReducer tbbReducer(reducer_); + tbb::parallel_reduce( + tbb::blocked_range(begin_, end_, grainSize_), + tbbReducer + ); + } + +private: + ReducerWrapper& reducer_; + std::size_t begin_; + std::size_t end_; + std::size_t grainSize_; +}; + +class TBBArenaParallelReduceExecutor +{ +public: + + TBBArenaParallelReduceExecutor(tbb::task_group& group, + ReducerWrapper& reducer, + std::size_t begin, + std::size_t end, + std::size_t grainSize) + : group_(group), + reducer_(reducer), + begin_(begin), + end_(end), + grainSize_(grainSize) + { + } + + void operator()() const + { + TBBParallelReduceExecutor executor(reducer_, begin_, end_, grainSize_); + group_.run_and_wait(executor); + } + +private: + + tbb::task_group& group_; + ReducerWrapper& reducer_; + std::size_t begin_; + std::size_t end_; + std::size_t grainSize_; +}; + +void tbbParallelReduceImpl(std::size_t begin, + std::size_t end, + ReducerWrapper& reducer, + std::size_t grainSize, + int numThreads) +{ + ThreadStackSizeControl control; + + tbb::task_group group; + TBBArenaParallelReduceExecutor executor(group, reducer, begin, end, grainSize); + + tbb::task_arena arena(numThreads); + arena.execute(executor); +} + +} // end namespace RcppParallel + +#endif /* RCPP_PARALLEL_USE_TBB */ diff --git a/src/tbb/.gitignore b/src/tbb/.gitignore new file mode 100644 index 000000000..a4d040468 --- /dev/null +++ b/src/tbb/.gitignore @@ -0,0 +1,62 @@ +# -------- C++ -------- +# Prerequisites +*.d + +# Compiled Object files +*.slo +*.lo +*.o +*.obj + +# Precompiled Headers +*.gch +*.pch + +# Compiled Dynamic libraries +*.so +*.so.* +*.dylib +*.dll + +# Fortran module files +*.mod +*.smod + +# Compiled Static libraries +*.lai +*.la +*.a +*.lib + +# Executables +*.exe +*.out +*.app + +# -------- CMake -------- +CMakeCache.txt +CMakeFiles +CMakeScripts +Testing +Makefile +cmake_install.cmake +install_manifest.txt +compile_commands.json +CTestTestfile.cmake +build/* + +# -------- Python -------- +__pycache__/ +*.py[cod] +*$py.class + +# -------- IDE -------- +.vscode/* +.vs/* +out/* +CMakeSettings.json + +# -------- CTags -------- +.tags +.ctags + diff --git a/src/tbb/CHANGES b/src/tbb/CHANGES deleted file mode 100644 index befbc943b..000000000 --- a/src/tbb/CHANGES +++ /dev/null @@ -1,1759 +0,0 @@ ------------------------------------------------------------------------- -The list of most significant changes made over time in -Intel(R) Threading Building Blocks (Intel(R) TBB). ------------------------------------------------------------------------- - -Intel TBB 4.3 -TBB_INTERFACE_VERSION == 8000 - -Changes (w.r.t. Intel TBB 4.2 Update 5): - -- The following features are now fully supported: flow::indexer_node, - task_arena, speculative_spin_rw_mutex. -- Compatibility with C++11 standard improved for tbb/compat/thread - and tbb::mutex. -- C++11 move constructors have been added to concurrent_queue and - concurrent_bounded_queue. -- C++11 move constructors and assignment operators have been added to - concurrent_vector, concurrent_hash_map, concurrent_priority_queue, - concurrent_unordered_{set,multiset,map,multimap}. -- C++11 move aware emplace/push/pop methods have been added to - concurrent_vector, concurrent_queue, concurrent_bounded_queue, - concurrent_priority_queue. -- Methods to insert a C++11 initializer list have been added: - concurrent_vector::grow_by(), concurrent_hash_map::insert(), - concurrent_unordered_{set,multiset,map,multimap}::insert(). -- Testing for compatibility of containers with some C++11 standard - library types has been added. -- Dynamic replacement of standard memory allocation routines has been - added for OS X*. -- Microsoft* Visual Studio* projects for Intel TBB examples updated - to VS 2010. -- For open-source packages, debugging information (line numbers) in - precompiled binaries now matches the source code. -- Debug information was added to release builds for OS X*, Solaris*, - FreeBSD* operating systems and MinGW*. -- Various improvements in documentation, debug diagnostics and examples. - -Preview Features: - -- Additional actions on reset of graphs, and extraction of individual - nodes from a graph (TBB_PREVIEW_FLOW_GRAPH_FEATURES). -- Support for an arbitrary number of arguments in parallel_invoke - (TBB_PREVIEW_VARIADIC_PARALLEL_INVOKE). - -Changes affecting backward compatibility: - -- For compatibility with C++11 standard, copy and move constructors and - assignment operators are disabled for all mutex classes. To allow - the old behavior, use TBB_DEPRECATED_MUTEX_COPYING macro. -- flow::sequencer_node rejects messages with repeating sequence numbers. -- Changed internal interface between tbbmalloc and tbbmalloc_proxy. -- Following deprecated functionality has been removed: - old debugging macros TBB_DO_ASSERT & TBB_DO_THREADING_TOOLS; - no-op depth-related methods in class task; - tbb::deprecated::concurrent_queue; - deprecated variants of concurrent_vector methods. -- register_successor() and remove_successor() are deprecated as methods - to add and remove edges in flow::graph; use make_edge() and - remove_edge() instead. - -Bugs fixed: - -- Fixed incorrect scalable_msize() implementation for aligned objects. -- Flow graph buffering nodes now destroy their copy of forwarded items. -- Multiple fixes in task_arena implementation, including for: - inconsistent task scheduler state inside executed functions; - incorrect floating-point settings and exception propagation; - possible stalls in concurrent invocations of execute(). -- Fixed floating-point settings propagation when the same instance of - task_group_context is used in different arenas. -- Fixed compilation error in pipeline.h with Intel Compiler on OS X*. -- Added missed headers for individual components to tbb.h. - -Open-source contributions integrated: - -- Range interface addition to parallel_do, parallel_for_each and - parallel_sort by Stephan Dollberg. -- Variadic template implementation of parallel_invoke - by Kizza George Mbidde (see Preview Features). -- Improvement in Seismic example for MacBook Pro* with Retina* display - by Raf Schietekat. - ------------------------------------------------------------------------- -Intel TBB 4.2 Update 5 -TBB_INTERFACE_VERSION == 7005 - -Changes (w.r.t. Intel TBB 4.2 Update 4): - -- The second template argument of class aligned_space now is set - to 1 by default. - -Preview Features: - -- Better support for exception safety, task priorities and floating - point settings in class task_arena. -- task_arena::current_slot() has been renamed to - task_arena::current_thread_index(). - -Bugs fixed: - -- Task priority change possibly ignored by a worker thread entering - a nested parallel construct. -- Memory leaks inside the task scheduler when running on - Intel(R) Xeon Phi(tm) coprocessor. - -Open-source contributions integrated: - -- Improved detection of X Window support for Intel TBB examples - and other feedback by Raf Schietekat. - ------------------------------------------------------------------------- -Intel TBB 4.2 Update 4 -TBB_INTERFACE_VERSION == 7004 - -Changes (w.r.t. Intel TBB 4.2 Update 3): - -- Added possibility to specify floating-point settings at invocation - of most parallel algorithms (including flow::graph) via - task_group_context. -- Added dynamic replacement of malloc_usable_size() under - Linux*/Android* and dlmalloc_usable_size() under Android*. -- Added new methods to concurrent_vector: - grow_by() that appends a sequence between two given iterators; - grow_to_at_least() that initializes new elements with a given value. -- Improved affinity_partitioner for better performance on balanced - workloads. -- Improvements in the task scheduler, including better scalability - when threads search for a task arena, and better diagnostics. -- Improved allocation performance for workloads that do intensive - allocation/releasing of same-size objects larger than ~8KB from - multiple threads. -- Exception support is enabled by default for 32-bit MinGW compilers. -- The tachyon example for Android* can be built for all targets - supported by the installed NDK. -- Added Windows Store* version of the tachyon example. -- GettingStarted/sub_string_finder example ported to offload execution - on Windows* for Intel(R) Many Integrated Core Architecture. - -Preview Features: - -- Removed task_scheduler_observer::on_scheduler_leaving() callback. -- Added task_scheduler_observer::may_sleep() callback. -- The CPF or_node has been renamed indexer_node. The input to - indexer_node is now a list of types. The output of indexer_node is - a tagged_msg type composed of a tag and a value. For indexer_node, - the tag is a size_t. - -Bugs fixed: - -- Fixed data races in preview extensions of task_scheduler_observer. -- Added noexcept(false) for destructor of task_group_base to avoid - crash on cancelation of structured task group in C++11. - -Open-source contributions integrated: - -- Improved concurrency detection for BG/Q, and other improvements - by Raf Schietekat. -- Fix for crashes in enumerable_thread_specific in case if a contained - object is too big to be constructed on the stack by Adrien Guinet. - ------------------------------------------------------------------------- -Intel TBB 4.2 Update 3 -TBB_INTERFACE_VERSION == 7003 - -Changes (w.r.t. Intel TBB 4.2 Update 2): - -- Added support for Microsoft* Visual Studio* 2013. -- Improved Microsoft* PPL-compatible form of parallel_for for better - support of auto-vectorization. -- Added a new example for cancellation and reset in the flow graph: - Kohonen self-organizing map (examples/graph/som). -- Various improvements in source code, tests, and makefiles. - -Bugs fixed: - -- Added dynamic replacement of _aligned_msize() previously missed. -- Fixed task_group::run_and_wait() to throw invalid_multiple_scheduling - exception if the specified task handle is already scheduled. - -Open-source contributions integrated: - -- A fix for ARM* processors by Steve Capper. -- Improvements in std::swap calls by Robert Maynard. - ------------------------------------------------------------------------- -Intel TBB 4.2 Update 2 -TBB_INTERFACE_VERSION == 7002 - -Changes (w.r.t. Intel TBB 4.2 Update 1): - -- Enable C++11 features for Microsoft* Visual Studio* 2013 Preview. -- Added a test for compatibility of TBB containers with C++11 - range-based for loop. - -Changes affecting backward compatibility: - -- Internal layout changed for class tbb::flow::limiter_node. - -Preview Features: - -- Added speculative_spin_rw_mutex, a read-write lock class which uses - Intel(R) Transactional Synchronization Extensions. - -Bugs fixed: - -- When building for Intel(R) Xeon Phi(tm) coprocessor, TBB programs - no longer require explicit linking with librt and libpthread. - -Open-source contributions integrated: - -- Fixes for ARM* processors by Steve Capper, Leif Lindholm - and Steven Noonan. -- Support for Clang on Linux by Raf Schietekat. -- Typo correction in scheduler.cpp by Julien Schueller. - ------------------------------------------------------------------------- -Intel TBB 4.2 Update 1 -TBB_INTERFACE_VERSION == 7001 - -Changes (w.r.t. Intel TBB 4.2): - -- Added project files for Microsoft* Visual Studio* 2010. -- Initial support of Microsoft* Visual Studio* 2013 Preview. -- Enable C++11 features available in Intel(R) C++ Compiler 14.0. -- scalable_allocation_mode(TBBMALLOC_SET_SOFT_HEAP_LIMIT, ) can be - used to urge releasing memory from tbbmalloc internal buffers when - the given limit is exceeded. - -Preview Features: - -- Class task_arena no longer requires linking with a preview library, - though still remains a community preview feature. -- The method task_arena::wait_until_empty() is removed. -- The method task_arena::current_slot() now returns -1 if - the task scheduler is not initialized in the thread. - -Changes affecting backward compatibility: - -- Because of changes in internal layout of graph nodes, the namespace - interface number of flow::graph has been incremented from 6 to 7. - -Bugs fixed: - -- Fixed a race in lazy initialization of task_arena. -- Fixed flow::graph::reset() to prevent situations where tasks would be - spawned in the process of resetting the graph to its initial state. -- Fixed decrement bug in limiter_node. -- Fixed a race in arc deletion in the flow graph. - -Open-source contributions integrated: - -- Improved support for IBM* Blue Gene* by Raf Schietekat. - ------------------------------------------------------------------------- -Intel TBB 4.2 -TBB_INTERFACE_VERSION == 7000 - -Changes (w.r.t. Intel TBB 4.1 Update 4): - -- Added speculative_spin_mutex, which uses Intel(R) Transactional - Synchronization Extensions when they are supported by hardware. -- Binary files linked with libc++ (the C++ standard library in Clang) - were added on OS X*. -- For OS X* exact exception propagation is supported with Clang; - it requires use of libc++ and corresponding Intel TBB binaries. -- Support for C++11 initializer lists in constructor and assigment - has been added to concurrent_hash_map, concurrent_unordered_set, - concurrent_unordered_multiset, concurrent_unordered_map, - concurrent_unordered_multimap. -- The memory allocator may now clean its per-thread memory caches - when it cannot get more memory. -- Added the scalable_allocation_command() function for on-demand - cleaning of internal memory caches. -- Reduced the time overhead for freeing memory objects smaller than ~8K. -- Simplified linking with the debug library for applications that use - Intel TBB in code offloaded to Intel(R) Xeon Phi(tm) coprocessors. - See an example in - examples/GettingStarted/sub_string_finder/Makefile. -- Various improvements in source code, scripts and makefiles. - -Changes affecting backward compatibility: - -- tbb::flow::graph has been modified to spawn its tasks; - the old behaviour (task enqueuing) is deprecated. This change may - impact applications that expected a flow graph to make progress - without calling wait_for_all(), which is no longer guaranteed. See - the documentation for more details. -- Changed the return values of the scalable_allocation_mode() function. - -Bugs fixed: - -- Fixed a leak of parallel_reduce body objects when execution is - cancelled or an exception is thrown, as suggested by Darcy Harrison. -- Fixed a race in the task scheduler which can lower the effective - priority despite the existence of higher priority tasks. -- On Linux an error during destruction of the internal thread local - storage no longer results in an exception. - -Open-source contributions integrated: - -- Fixed task_group_context state propagation to unrelated context trees - by Raf Schietekat. - ------------------------------------------------------------------------- -Intel TBB 4.1 Update 4 -TBB_INTERFACE_VERSION == 6105 - -Changes (w.r.t. Intel TBB 4.1 Update 3): - -- Use /volatile:iso option with VS 2012 to disable extended - semantics for volatile variables. -- Various improvements in affinity_partitioner, scheduler, - tests, examples, makefiles. -- Concurrent_priority_queue class now supports initialization/assignment - via C++11 initializer list feature (std::initializer_list). - -Bugs fixed: - -- Fixed more possible stalls in concurrent invocations of - task_arena::execute(), especially waiting for enqueued tasks. -- Fixed requested number of workers for task_arena(P,0). -- Fixed interoperability with Intel(R) VTune(TM) Amplifier XE in - case of using task_arena::enqueue() from a terminating thread. - -Open-source contributions integrated: - -- Type fixes, cleanups, and code beautification by Raf Schietekat. -- Improvements in atomic operations for big endian platforms - by Raf Schietekat. - ------------------------------------------------------------------------- -Intel TBB 4.1 Update 3 -TBB_INTERFACE_VERSION == 6103 - -Changes (w.r.t. Intel TBB 4.1 Update 2): - -- Binary files for Android* applications were added to the Linux* OS - package. -- Binary files for Windows Store* applications were added to the - Windows* OS package. -- Exact exception propagation (exception_ptr) support on Linux OS is - now turned on by default for GCC 4.4 and higher. -- Stopped implicit use of large memory pages by tbbmalloc (Linux-only). - Now use of large pages must be explicitly enabled with - scalable_allocation_mode() function or TBB_MALLOC_USE_HUGE_PAGES - environment variable. - -Community Preview Features: - -- Extended class task_arena constructor and method initialize() to - allow some concurrency to be reserved strictly for application - threads. -- New methods terminate() and is_active() were added to class - task_arena. - -Bugs fixed: - -- Fixed initialization of hashing helper constant in the hash - containers. -- Fixed possible stalls in concurrent invocations of - task_arena::execute() when no worker thread is available to make - progress. -- Fixed incorrect calculation of hardware concurrency in the presence - of inactive processor groups, particularly on systems running - Windows* 8 and Windows* Server 2012. - -Open-source contributions integrated: - -- The fix for the GUI examples on OS X* systems by Raf Schietekat. -- Moved some power-of-2 calculations to functions to improve readability - by Raf Schietekat. -- C++11/Clang support improvements by arcata. -- ARM* platform isolation layer by Steve Capper, Leif Lindholm, Leo Lara - (ARM). - ------------------------------------------------------------------------- -Intel TBB 4.1 Update 2 -TBB_INTERFACE_VERSION == 6102 - -Changes (w.r.t. Intel TBB 4.1 Update 1): - -- Objects up to 128 MB are now cached by the tbbmalloc. Previously - the threshold was 8MB. Objects larger than 128 MB are still - processed by direct OS calls. -- concurrent_unordered_multiset and concurrent_unordered_multimap - have been added, based on Microsoft* PPL prototype. -- Ability to value-initialize a tbb::atomic variable on construction - in C++11, with const expressions properly supported. - -Community Preview Features: - -- Added a possibility to wait until all worker threads terminate. - This is necessary before calling fork() from an application. - -Bugs fixed: - -- Fixed data race in tbbmalloc that might lead to memory leaks - for large object allocations. -- Fixed task_arena::enqueue() to use task_group_context of target arena. -- Improved implementation of 64 bit atomics on ia32. - ------------------------------------------------------------------------- -Intel TBB 4.1 Update 1 -TBB_INTERFACE_VERSION == 6101 - -Changes (w.r.t. Intel TBB 4.1): - -- concurrent_vector class now supports initialization/assignment - via C++11 initializer list feature (std::initializer_list) -- Added implementation of the platform isolation layer based on - Intel compiler atomic built-ins; it is supposed to work on - any platform supported by compiler version 12.1 and newer. -- Using GetNativeSystemInfo() instead of GetSystemInfo() to support - more than 32 processors for 32-bit applications under WOW64. -- The following form of parallel_for: - parallel_for(first, last, [step,] f[, context]) now accepts an - optional partitioner parameter after the function f. - -Backward-incompatible API changes: - -- The library no longer injects tuple in to namespace std. - In previous releases, tuple was injected into namespace std by - flow_graph.h when std::tuple was not available. In this release, - flow_graph.h now uses tbb::flow::tuple. On platforms where - std::tuple is available, tbb::flow::tuple is typedef'ed to - std::tuple. On all other platforms, tbb::flow::tuple provides - a subset of the functionality defined by std::tuple. Users of - flow_graph.h may need to change their uses of std::tuple to - tbb::flow::tuple to ensure compatibility with non-C++11 compliant - compilers. - -Bugs fixed: - -- Fixed local observer to be able to override propagated CPU state and - to provide correct value of task_arena::current_slot() in callbacks. - ------------------------------------------------------------------------- -Intel TBB 4.1 -TBB_INTERFACE_VERSION == 6100 - -Changes (w.r.t. Intel TBB 4.0 Update 5): - -- _WIN32_WINNT must be set to 0x0501 or greater in order to use TBB - on Microsoft* Windows*. -- parallel_deterministic_reduce template function is fully supported. -- TBB headers can be used with C++0x/C++11 mode (-std=c++0x) of GCC - and Intel(R) Compiler. -- C++11 std::make_exception_ptr is used where available, instead of - std::copy_exception from earlier C++0x implementations. -- Improvements in the TBB allocator to reduce extra memory consumption. -- Partial refactoring of the task scheduler data structures. -- TBB examples allow more flexible specification of the thread number, - including arithmetic and geometric progression. - -Bugs fixed: - -- On Linux & OS X*, pre-built TBB binaries do not yet support exact - exception propagation via C++11 exception_ptr. To prevent run time - errors, by default TBB headers disable exact exception propagation - even if the C++ implementation provides exception_ptr. - -Community Preview Features: - -- Added: class task_arena, for work submission by multiple application - threads with thread-independent control of concurrency level. -- Added: task_scheduler_observer can be created as local to a master - thread, to observe threads that work on behalf of that master. - Local observers may have new on_scheduler_leaving() callback. - ------------------------------------------------------------------------- -Intel TBB 4.0 Update 5 -TBB_INTERFACE_VERSION == 6005 - -Changes (w.r.t. Intel TBB 4.0 Update 4): - -- Parallel pipeline optimization (directly storing small objects in the - interstage data buffers) limited to trivially-copyable types for - C++11 and a short list of types for earlier compilers. -- _VARIADIC_MAX switch is honored for TBB tuple implementation - and flow::graph nodes based on tuple. -- Support of Cocoa framework was added to the GUI examples on OS X* - systems. - -Bugs fixed: - -- Fixed a tv_nsec overflow bug in condition_variable::wait_for. -- Fixed execution order of enqueued tasks with different priorities. -- Fixed a bug with task priority changes causing lack of progress - for fire-and-forget tasks when TBB was initialized to use 1 thread. -- Fixed duplicate symbol problem when linking multiple compilation - units that include flow_graph.h on VC 10. - ------------------------------------------------------------------------- -Intel TBB 4.0 Update 4 -TBB_INTERFACE_VERSION == 6004 - -Changes (w.r.t. Intel TBB 4.0 Update 3): - -- The TBB memory allocator transparently supports large pages on Linux. -- A new flow_graph example, logic_sim, was added. -- Support for DirectX* 9 was added to GUI examples. - -Community Preview Features: - -- Added: aggregator, a new concurrency control mechanism. - -Bugs fixed: - -- The abort operation on concurrent_bounded_queue now leaves the queue - in a reusable state. If a bad_alloc or bad_last_alloc exception is - thrown while the queue is recovering from an abort, that exception - will be reported instead of user_abort on the thread on which it - occurred, and the queue will not be reusable. -- Steal limiting heuristic fixed to avoid premature stealing disabling - when large amount of __thread data is allocated on thread stack. -- Fixed a low-probability leak of arenas in the task scheduler. -- In STL-compatible allocator classes, the method construct() was fixed - to comply with C++11 requirements. -- Fixed a bug that prevented creation of fixed-size memory pools - smaller than 2M. -- Significantly reduced the amount of warnings from various compilers. - -Open-source contributions integrated: - -- Multiple improvements by Raf Schietekat. -- Basic support for Clang on OS X* by Blas Rodriguez Somoza. -- Fixes for warnings and corner-case bugs by Blas Rodriguez Somoza - and Edward Lam. - ------------------------------------------------------------------------- -Intel TBB 4.0 Update 3 -TBB_INTERFACE_VERSION == 6003 - -Changes (w.r.t. Intel TBB 4.0 Update 2): - -- Modifications to the low-level API for memory pools: - added support for aligned allocations; - pool policies reworked to allow backward-compatible extensions; - added a policy to not return memory space till destruction; - pool_reset() does not return memory space anymore. -- Class tbb::flow::graph_iterator added to iterate over all nodes - registered with a graph instance. -- multioutput_function_node has been renamed multifunction_node. - multifunction_node and split_node are now fully-supported features. -- For the tagged join node, the policy for try_put of an item with - already existing tag has been defined: the item will be rejected. -- Matching the behavior on Windows, on other platforms the optional - shared libraries (libtbbmalloc, libirml) now are also searched - only in the directory where libtbb is located. -- The platform isolation layer based on GCC built-ins is extended. - -Backward-incompatible API changes: - -- a graph reference parameter is now required to be passed to the - constructors of the following flow graph nodes: overwrite_node, - write_once_node, broadcast_node, and the CPF or_node. -- the following tbb::flow node methods and typedefs have been renamed: - Old New - join_node and or_node: - inputs() -> input_ports() - input_ports_tuple_type -> input_ports_type - multifunction_node and split_node: - ports_type -> output_ports_type - -Bugs fixed: - -- Not all logical processors were utilized on systems with more than - 64 cores split by Windows into several processor groups. - ------------------------------------------------------------------------- -Intel TBB 4.0 Update 2 commercial-aligned release -TBB_INTERFACE_VERSION == 6002 - -Changes (w.r.t. Intel TBB 4.0 Update 1 commercial-aligned release): - -- concurrent_bounded_queue now has an abort() operation that releases - threads involved in pending push or pop operations. The released - threads will receive a tbb::user_abort exception. -- Added Community Preview Feature: concurrent_lru_cache container, - a concurrent implementation of LRU (least-recently-used) cache. - -Bugs fixed: - -- fixed a race condition in the TBB scalable allocator. -- concurrent_queue counter wraparound bug was fixed, which occurred when - the number of push and pop operations exceeded ~>4 billion on IA32. -- fixed races in the TBB scheduler that could put workers asleep too - early, especially in presense of affinitized tasks. - ------------------------------------------------------------------------- -Intel TBB 4.0 Update 1 commercial-aligned release -TBB_INTERFACE_VERSION == 6000 (forgotten to increment) - -Changes (w.r.t. Intel TBB 4.0 commercial-aligned release): - -- Memory leaks fixed in binpack example. -- Improvements and fixes in the TBB allocator. - ------------------------------------------------------------------------- -Intel TBB 4.0 commercial-aligned release -TBB_INTERFACE_VERSION == 6000 - -Changes (w.r.t. Intel TBB 3.0 Update 8 commercial-aligned release): - -- concurrent_priority_queue is now a fully supported feature. - Capacity control methods were removed. -- Flow graph is now a fully supported feature. -- A new memory backend has been implemented in the TBB allocator. - It can reuse freed memory for both small and large objects, and - returns unused memory blocks to the OS more actively. -- Improved partitioning algorithms for parallel_for and parallel_reduce - to better handle load imbalance. -- The convex_hull example has been refactored for reproducible - performance results. -- The major interface version has changed from 5 to 6. - Deprecated interfaces might be removed in future releases. - -Community Preview Features: - -- Added: serial subset, i.e. sequential implementations of TBB generic - algorithms (currently, only provided for parallel_for). -- Preview of new flow graph nodes: - or_node (accepts multiple inputs, forwards each input separately - to all successors), - split_node (accepts tuples, and forwards each element of a tuple - to a corresponding successor), and - multioutput_function_node (accepts one input, and passes the input - and a tuple of output ports to the function body to support outputs - to multiple successors). -- Added: memory pools for more control on memory source, grouping, - and collective deallocation. - ------------------------------------------------------------------------- -Intel TBB 3.0 Update 8 commercial-aligned release -TBB_INTERFACE_VERSION == 5008 - -Changes (w.r.t. Intel TBB 3.0 Update 7 commercial-aligned release): - -- Task priorities become an official feature of TBB, - not community preview as before. -- Atomics API extended, and implementation refactored. -- Added task::set_parent() method. -- Added concurrent_unordered_set container. - -Open-source contributions integrated: - -- PowerPC support by Raf Schietekat. -- Fix of potential task pool overrun and other improvements - in the task scheduler by Raf Schietekat. -- Fix in parallel_for_each to work with std::set in Visual* C++ 2010. - -Community Preview Features: - -- Graph community preview feature was renamed to flow graph. - Multiple improvements in the implementation. - Binpack example was added for the feature. -- A number of improvements to concurrent_priority_queue. - Shortpath example was added for the feature. -- TBB runtime loaded functionality was added (Windows*-only). - It allows to specify which versions of TBB should be used, - as well as to set directories for the library search. -- parallel_deterministic_reduce template function was added. - ------------------------------------------------------------------------- -Intel TBB 3.0 Update 7 commercial-aligned release -TBB_INTERFACE_VERSION == 5006 (forgotten to increment) - -Changes (w.r.t. Intel TBB 3.0 Update 6 commercial-aligned release): - -- Added implementation of the platform isolation layer based on - GCC atomic built-ins; it is supposed to work on any platform - where GCC has these built-ins. - -Community Preview Features: - -- Graph's dining_philosophers example added. -- A number of improvements to graph and concurrent_priority_queue. - - ------------------------------------------------------------------------- -Intel TBB 3.0 Update 6 commercial-aligned release -TBB_INTERFACE_VERSION == 5006 - -Changes (w.r.t. Intel TBB 3.0 Update 5 commercial-aligned release): - -- Added Community Preview feature: task and task group priority, and - Fractal example demonstrating it. -- parallel_pipeline optimized for data items of small and large sizes. -- Graph's join_node is now parametrized with a tuple of up to 10 types. -- Improved performance of concurrent_priority_queue. - -Open-source contributions integrated: - -- Initial NetBSD support by Aleksej Saushev. - -Bugs fixed: - -- Failure to enable interoperability with Intel(R) Cilk(tm) Plus runtime - library, and a crash caused by invoking the interoperability layer - after one of the libraries was unloaded. -- Data race that could result in concurrent_unordered_map structure - corruption after call to clear() method. -- Stack corruption caused by PIC version of 64-bit CAS compiled by Intel - compiler on Linux. -- Inconsistency of exception propagation mode possible when application - built with Microsoft* Visual Studio* 2008 or earlier uses TBB built - with Microsoft* Visual Studio* 2010. -- Affinitizing master thread to a subset of available CPUs after TBB - scheduler was initialized tied all worker threads to the same CPUs. -- Method is_stolen_task() always returned 'false' for affinitized tasks. -- write_once_node and overwrite_node did not immediately send buffered - items to successors - ------------------------------------------------------------------------- -Intel TBB 3.0 Update 5 commercial-aligned release -TBB_INTERFACE_VERSION == 5005 - -Changes (w.r.t. Intel TBB 3.0 Update 4 commercial-aligned release): - -- Added Community Preview feature: graph. -- Added automatic propagation of master thread FPU settings to - TBB worker threads. -- Added a public function to perform a sequentially consistent full - memory fence: tbb::atomic_fence() in tbb/atomic.h. - -Bugs fixed: - -- Data race that could result in scheduler data structures corruption - when using fire-and-forget tasks. -- Potential referencing of destroyed concurrent_hash_map element after - using erase(accessor&A) method with A acquired as const_accessor. -- Fixed a correctness bug in the convex hull example. - -Open-source contributions integrated: - -- Patch for calls to internal::atomic_do_once() by Andrey Semashev. - ------------------------------------------------------------------------- -Intel TBB 3.0 Update 4 commercial-aligned release -TBB_INTERFACE_VERSION == 5004 - -Changes (w.r.t. Intel TBB 3.0 Update 3 commercial-aligned release): - -- Added Community Preview feature: concurrent_priority_queue. -- Fixed library loading to avoid possibility for remote code execution, - see http://www.microsoft.com/technet/security/advisory/2269637.mspx. -- Added support of more than 64 cores for appropriate Microsoft* - Windows* versions. For more details, see - http://msdn.microsoft.com/en-us/library/dd405503.aspx. -- Default number of worker threads is adjusted in accordance with - process affinity mask. - -Bugs fixed: - -- Calls of scalable_* functions from inside the allocator library - caused issues if the functions were overridden by another module. -- A crash occurred if methods run() and wait() were called concurrently - for an empty tbb::task_group (1736). -- The tachyon example exhibited build problems associated with - bug 554339 on Microsoft* Visual Studio* 2010. Project files were - modified as a partial workaround to overcome the problem. See - http://connect.microsoft.com/VisualStudio/feedback/details/554339. - ------------------------------------------------------------------------- -Intel TBB 3.0 Update 3 commercial-aligned release -TBB_INTERFACE_VERSION == 5003 - -Changes (w.r.t. Intel TBB 3.0 Update 2 commercial-aligned release): - -- cache_aligned_allocator class reworked to use scalable_aligned_malloc. -- Improved performance of count() and equal_range() methods - in concurrent_unordered_map. -- Improved implementation of 64-bit atomic loads and stores on 32-bit - platforms, including compilation with VC 7.1. -- Added implementation of atomic operations on top of OSAtomic API - provided by OS X*. -- Removed gratuitous try/catch blocks surrounding thread function calls - in tbb_thread. -- Xcode* projects were added for sudoku and game_of_life examples. -- Xcode* projects were updated to work without TBB framework. - -Bugs fixed: - -- Fixed a data race in task scheduler destruction that on rare occasion - could result in memory corruption. -- Fixed idle spinning in thread bound filters in tbb::pipeline (1670). - -Open-source contributions integrated: - -- MinGW-64 basic support by brsomoza (partially). -- Patch for atomic.h by Andrey Semashev. -- Support for AIX & GCC on PowerPC by Giannis Papadopoulos. -- Various improvements by Raf Schietekat. - ------------------------------------------------------------------------- -Intel TBB 3.0 Update 2 commercial-aligned release -TBB_INTERFACE_VERSION == 5002 - -Changes (w.r.t. Intel TBB 3.0 Update 1 commercial-aligned release): - -- Destructor of tbb::task_group class throws missing_wait exception - if there are tasks running when it is invoked. -- Interoperability layer with Intel Cilk Plus runtime library added - to protect TBB TLS in case of nested usage with Intel Cilk Plus. -- Compilation fix for dependent template names in concurrent_queue. -- Memory allocator code refactored to ease development and maintenance. - -Bugs fixed: - -- Improved interoperability with other Intel software tools on Linux in - case of dynamic replacement of memory allocator (1700) -- Fixed install issues that prevented installation on - Mac OS* X 10.6.4 (1711). - ------------------------------------------------------------------------- -Intel TBB 3.0 Update 1 commercial-aligned release -TBB_INTERFACE_VERSION == 5000 (forgotten to increment) - -Changes (w.r.t. Intel TBB 3.0 commercial-aligned release): - -- Decreased memory fragmentation by allocations bigger than 8K. -- Lazily allocate worker threads, to avoid creating unnecessary stacks. - -Bugs fixed: - -- TBB allocator used much more memory than malloc (1703) - see above. -- Deadlocks happened in some specific initialization scenarios - of the TBB allocator (1701, 1704). -- Regression in enumerable_thread_specific: excessive requirements - for object constructors. -- A bug in construction of parallel_pipeline filters when body instance - was a temporary object. -- Incorrect usage of memory fences on PowerPC and XBOX360 platforms. -- A subtle issue in task group context binding that could result - in cancelation signal being missed by nested task groups. -- Incorrect construction of concurrent_unordered_map if specified - number of buckets is not power of two. -- Broken count() and equal_range() of concurrent_unordered_map. -- Return type of postfix form of operator++ for hash map's iterators. - ------------------------------------------------------------------------- -Intel TBB 3.0 commercial-aligned release -TBB_INTERFACE_VERSION == 5000 - -Changes (w.r.t. Intel TBB 2.2 Update 3 commercial-aligned release): - -- All open-source-release changes down to TBB 2.2 U3 below - were incorporated into this release. - ------------------------------------------------------------------------- -20100406 open-source release - -Changes (w.r.t. 20100310 open-source release): - -- Added support for Microsoft* Visual Studio* 2010, including binaries. -- Added a PDF file with recommended Design Patterns for TBB. -- Added parallel_pipeline function and companion classes and functions - that provide a strongly typed lambda-friendly pipeline interface. -- Reworked enumerable_thread_specific to use a custom implementation of - hash map that is more efficient for ETS usage models. -- Added example for class task_group; see examples/task_group/sudoku. -- Removed two examples, as they were long outdated and superceded: - pipeline/text_filter (use pipeline/square); - parallel_while/parallel_preorder (use parallel_do/parallel_preorder). -- PDF documentation updated. -- Other fixes and changes in code, tests, and examples. - -Bugs fixed: - -- Eliminated build errors with MinGW32. -- Fixed post-build step and other issues in VS projects for examples. -- Fixed discrepancy between scalable_realloc and scalable_msize that - caused crashes with malloc replacement on Windows. - ------------------------------------------------------------------------- -20100310 open-source release - -Changes (w.r.t. Intel TBB 2.2 Update 3 commercial-aligned release): - -- Version macros changed in anticipation of a future release. -- Directory structure aligned with Intel(R) C++ Compiler; - now TBB binaries reside in //[bin|lib] - (in TBB 2.x, it was [bin|lib]//). -- Visual Studio projects changed for examples: instead of separate set - of files for each VS version, now there is single 'msvs' directory - that contains workspaces for MS C++ compiler (_cl.sln) and - Intel C++ compiler (_icl.sln). Works with VS 2005 and above. -- The name versioning scheme for backward compatibility was improved; - now compatibility-breaking changes are done in a separate namespace. -- Added concurrent_unordered_map implementation based on a prototype - developed in Microsoft for a future version of PPL. -- Added PPL-compatible writer-preference RW lock (reader_writer_lock). -- Added TBB_IMPLEMENT_CPP0X macro to control injection of C++0x names - implemented in TBB into namespace std. -- Added almost-C++0x-compatible std::condition_variable, plus a bunch - of other C++0x classes required by condition_variable. -- With TBB_IMPLEMENT_CPP0X, tbb_thread can be also used as std::thread. -- task.cpp was split into several translation units to structure - TBB scheduler sources layout. Static data layout and library - initialization logic were also updated. -- TBB scheduler reworked to prevent master threads from stealing - work belonging to other masters. -- Class task was extended with enqueue() method, and slightly changed - semantics of methods spawn() and destroy(). For exact semantics, - refer to TBB Reference manual. -- task_group_context now allows for destruction by non-owner threads. -- Added TBB_USE_EXCEPTIONS macro to control use of exceptions in TBB - headers. It turns off (i.e. sets to 0) automatically if specified - compiler options disable exception handling. -- TBB is enabled to run on top of Microsoft's Concurrency Runtime - on Windows* 7 (via our worker dispatcher known as RML). -- Removed old unused busy-waiting code in concurrent_queue. -- Described the advanced build & test options in src/index.html. -- Warning level for GCC raised with -Wextra and a few other options. -- Multiple fixes and improvements in code, tests, examples, and docs. - -Open-source contributions integrated: - -- Xbox support by Roman Lut (Deep Shadows), though further changes are - required to make it working; e.g. post-2.1 entry points are missing. -- "Eventcount" by Dmitry Vyukov evolved into concurrent_monitor, - an internal class used in the implementation of concurrent_queue. - ------------------------------------------------------------------------- -Intel TBB 2.2 Update 3 commercial-aligned release -TBB_INTERFACE_VERSION == 4003 - -Changes (w.r.t. Intel TBB 2.2 Update 2 commercial-aligned release): - -- PDF documentation updated. - -Bugs fixed: - -- concurrent_hash_map compatibility issue exposed on Linux in case - two versions of the container were used by different modules. -- enforce 16 byte stack alignment for consistence with GCC; required - to work correctly with 128-bit variables processed by SSE. -- construct() methods of allocator classes now use global operator new. - ------------------------------------------------------------------------- -Intel TBB 2.2 Update 2 commercial-aligned release -TBB_INTERFACE_VERSION == 4002 - -Changes (w.r.t. Intel TBB 2.2 Update 1 commercial-aligned release): - -- parallel_invoke and parallel_for_each now take function objects - by const reference, not by value. -- Building TBB with /MT is supported, to avoid dependency on particular - versions of Visual C++* runtime DLLs. TBB DLLs built with /MT - are located in vc_mt directory. -- Class critical_section introduced. -- Improvements in exception support: new exception classes introduced, - all exceptions are thrown via an out-of-line internal method. -- Improvements and fixes in the TBB allocator and malloc replacement, - including robust memory identification, and more reliable dynamic - function substitution on Windows*. -- Method swap() added to class tbb_thread. -- Methods rehash() and bucket_count() added to concurrent_hash_map. -- Added support for Visual Studio* 2010 Beta2. No special binaries - provided, but CRT-independent DLLs (vc_mt) should work. -- Other fixes and improvements in code, tests, examples, and docs. - -Open-source contributions integrated: - -- The fix to build 32-bit TBB on Mac OS* X 10.6. -- GCC-based port for SPARC Solaris by Michailo Matijkiw, with use of - earlier work by Raf Schietekat. - -Bugs fixed: - -- 159 - TBB build for PowerPC* running Mac OS* X. -- 160 - IBM* Java segfault if used with TBB allocator. -- crash in concurrent_queue (1616). - ------------------------------------------------------------------------- -Intel TBB 2.2 Update 1 commercial-aligned release -TBB_INTERFACE_VERSION == 4001 - -Changes (w.r.t. Intel TBB 2.2 commercial-aligned release): - -- Incorporates all changes from open-source releases below. -- Documentation was updated. -- TBB scheduler auto-initialization now covers all possible use cases. -- concurrent_queue: made argument types of sizeof used in paddings - consistent with those actually used. -- Memory allocator was improved: supported corner case of user's malloc - calling scalable_malloc (non-Windows), corrected processing of - memory allocation requests during tbb memory allocator startup - (Linux). -- Windows malloc replacement has got better support for static objects. -- In pipeline setups that do not allow actual parallelism, execution - by a single thread is guaranteed, idle spinning eliminated, and - performance improved. -- RML refactoring and clean-up. -- New constructor for concurrent_hash_map allows reserving space for - a number of items. -- Operator delete() added to the TBB exception classes. -- Lambda support was improved in parallel_reduce. -- gcc 4.3 warnings were fixed for concurrent_queue. -- Fixed possible initialization deadlock in modules using TBB entities - during construction of global static objects. -- Copy constructor in concurrent_hash_map was fixed. -- Fixed a couple of rare crashes in the scheduler possible before - in very specific use cases. -- Fixed a rare crash in the TBB allocator running out of memory. -- New tests were implemented, including test_lambda.cpp that checks - support for lambda expressions. -- A few other small changes in code, tests, and documentation. - ------------------------------------------------------------------------- -20090809 open-source release - -Changes (w.r.t. Intel TBB 2.2 commercial-aligned release): - -- Fixed known exception safety issues in concurrent_vector. -- Better concurrency of simultaneous grow requests in concurrent_vector. -- TBB allocator further improves performance of large object allocation. -- Problem with source of text relocations was fixed on Linux -- Fixed bugs related to malloc replacement under Windows -- A few other small changes in code and documentation. - ------------------------------------------------------------------------- -Intel TBB 2.2 commercial-aligned release -TBB_INTERFACE_VERSION == 4000 - -Changes (w.r.t. Intel TBB 2.1 U4 commercial-aligned release): - -- Incorporates all changes from open-source releases below. -- Architecture folders renamed from em64t to intel64 and from itanium - to ia64. -- Major Interface version changed from 3 to 4. Deprecated interfaces - might be removed in future releases. -- Parallel algorithms that use partitioners have switched to use - the auto_partitioner by default. -- Improved memory allocator performance for allocations bigger than 8K. -- Added new thread-bound filters functionality for pipeline. -- New implementation of concurrent_hash_map that improves performance - significantly. -- A few other small changes in code and documentation. - ------------------------------------------------------------------------- -20090511 open-source release - -Changes (w.r.t. previous open-source release): - -- Basic support for MinGW32 development kit. -- Added tbb::zero_allocator class that initializes memory with zeros. - It can be used as an adaptor to any STL-compatible allocator class. -- Added tbb::parallel_for_each template function as alias to parallel_do. -- Added more overloads for tbb::parallel_for. -- Added support for exact exception propagation (can only be used with - compilers that support C++0x std::exception_ptr). -- tbb::atomic template class can be used with enumerations. -- mutex, recursive_mutex, spin_mutex, spin_rw_mutex classes extended - with explicit lock/unlock methods. -- Fixed size() and grow_to_at_least() methods of tbb::concurrent_vector - to provide space allocation guarantees. More methods added for - compatibility with std::vector, including some from C++0x. -- Preview of a lambda-friendly interface for low-level use of tasks. -- scalable_msize function added to the scalable allocator (Windows only). -- Rationalized internal auxiliary functions for spin-waiting and backoff. -- Several tests undergo decent refactoring. - -Changes affecting backward compatibility: - -- Improvements in concurrent_queue, including limited API changes. - The previous version is deprecated; its functionality is accessible - via methods of the new tbb::concurrent_bounded_queue class. -- grow* and push_back methods of concurrent_vector changed to return - iterators; old semantics is deprecated. - ------------------------------------------------------------------------- -Intel TBB 2.1 Update 4 commercial-aligned release -TBB_INTERFACE_VERSION == 3016 - -Changes (w.r.t. Intel TBB 2.1 U3 commercial-aligned release): - -- Added tests for aligned memory allocations and malloc replacement. -- Several improvements for better bundling with Intel(R) C++ Compiler. -- A few other small changes in code and documentaion. - -Bugs fixed: - -- 150 - request to build TBB examples with debug info in release mode. -- backward compatibility issue with concurrent_queue on Windows. -- dependency on VS 2005 SP1 runtime libraries removed. -- compilation of GUI examples under Xcode* 3.1 (1577). -- On Windows, TBB allocator classes can be instantiated with const types - for compatibility with MS implementation of STL containers (1566). - ------------------------------------------------------------------------- -20090313 open-source release - -Changes (w.r.t. 20081109 open-source release): - -- Includes all changes introduced in TBB 2.1 Update 2 & Update 3 - commercial-aligned releases (see below for details). -- Added tbb::parallel_invoke template function. It runs up to 10 - user-defined functions in parallel and waits for them to complete. -- Added a special library providing ability to replace the standard - memory allocation routines in Microsoft* C/C++ RTL (malloc/free, - global new/delete, etc.) with the TBB memory allocator. - Usage details are described in include/tbb/tbbmalloc_proxy.h file. -- Task scheduler switched to use new implementation of its core - functionality (deque based task pool, new structure of arena slots). -- Preview of Microsoft* Visual Studio* 2005 project files for - building the library is available in build/vsproject folder. -- Added tests for aligned memory allocations and malloc replacement. -- Added parallel_for/game_of_life.net example (for Windows only) - showing TBB usage in a .NET application. -- A number of other fixes and improvements to code, tests, makefiles, - examples and documents. - -Bugs fixed: - -- The same list as in TBB 2.1 Update 4 right above. - ------------------------------------------------------------------------- -Intel TBB 2.1 Update 3 commercial-aligned release -TBB_INTERFACE_VERSION == 3015 - -Changes (w.r.t. Intel TBB 2.1 U2 commercial-aligned release): - -- Added support for aligned allocations to the TBB memory allocator. -- Added a special library to use with LD_PRELOAD on Linux* in order to - replace the standard memory allocation routines in C/C++ with the - TBB memory allocator. -- Added null_mutex and null_rw_mutex: no-op classes interface-compliant - to other TBB mutexes. -- Improved performance of parallel_sort, to close most of the serial gap - with std::sort, and beat it on 2 and more cores. -- A few other small changes. - -Bugs fixed: - -- the problem where parallel_for hanged after exception throw - if affinity_partitioner was used (1556). -- get rid of VS warnings about mbstowcs deprecation (1560), - as well as some other warnings. -- operator== for concurrent_vector::iterator fixed to work correctly - with different vector instances. - ------------------------------------------------------------------------- -Intel TBB 2.1 Update 2 commercial-aligned release -TBB_INTERFACE_VERSION == 3014 - -Changes (w.r.t. Intel TBB 2.1 U1 commercial-aligned release): - -- Incorporates all open-source-release changes down to TBB 2.1 U1, - except for: - - 20081019 addition of enumerable_thread_specific; -- Warning level for Microsoft* Visual C++* compiler raised to /W4 /Wp64; - warnings found on this level were cleaned or suppressed. -- Added TBB_runtime_interface_version API function. -- Added new example: pipeline/square. -- Added exception handling and cancellation support - for parallel_do and pipeline. -- Added copy constructor and [begin,end) constructor to concurrent_queue. -- Added some support for beta version of Intel(R) Parallel Amplifier. -- Added scripts to set environment for cross-compilation of 32-bit - applications on 64-bit Linux with Intel(R) C++ Compiler. -- Fixed semantics of concurrent_vector::clear() to not deallocate - internal arrays. Fixed compact() to perform such deallocation later. -- Fixed the issue with atomic when T is incomplete type. -- Improved support for PowerPC* Macintosh*, including the fix - for a bug in masked compare-and-swap reported by a customer. -- As usual, a number of other improvements everywhere. - ------------------------------------------------------------------------- -20081109 open-source release - -Changes (w.r.t. previous open-source release): - -- Added new serial out of order filter for tbb::pipeline. -- Fixed the issue with atomic::operator= reported at the forum. -- Fixed the issue with using tbb::task::self() in task destructor - reported at the forum. -- A number of other improvements to code, tests, makefiles, examples - and documents. - -Open-source contributions integrated: -- Changes in the memory allocator were partially integrated. - ------------------------------------------------------------------------- -20081019 open-source release - -Changes (w.r.t. previous open-source release): - -- Introduced enumerable_thread_specific. This new class provides a - wrapper around native thread local storage as well as iterators and - ranges for accessing the thread local copies (1533). -- Improved support for Intel(R) Threading Analysis Tools - on Intel(R) 64 architecture. -- Dependency from Microsoft* CRT was integrated to the libraries using - manifests, to avoid issues if called from code that uses different - version of Visual C++* runtime than the library. -- Introduced new defines TBB_USE_ASSERT, TBB_USE_DEBUG, - TBB_USE_PERFORMANCE_WARNINGS, TBB_USE_THREADING_TOOLS. -- A number of other improvements to code, tests, makefiles, examples - and documents. - -Open-source contributions integrated: - -- linker optimization: /incremental:no . - ------------------------------------------------------------------------- -20080925 open-source release - -Changes (w.r.t. previous open-source release): - -- Same fix for a memory leak in the memory allocator as in TBB 2.1 U1. -- Improved support for lambda functions. -- Fixed more concurrent_queue issues reported at the forum. -- A number of other improvements to code, tests, makefiles, examples - and documents. - ------------------------------------------------------------------------- -Intel TBB 2.1 Update 1 commercial-aligned release -TBB_INTERFACE_VERSION == 3013 - -Changes (w.r.t. Intel TBB 2.1 commercial-aligned release): - -- Fixed small memory leak in the memory allocator. -- Incorporates all open-source-release changes since TBB 2.1, - except for: - - 20080825 changes for parallel_do; - ------------------------------------------------------------------------- -20080825 open-source release - -Changes (w.r.t. previous open-source release): - -- Added exception handling and cancellation support for parallel_do. -- Added default HashCompare template argument for concurrent_hash_map. -- Fixed concurrent_queue.clear() issues due to incorrect assumption - about clear() being private method. -- Added the possibility to use TBB in applications that change - default calling conventions (Windows* only). -- Many improvements to code, tests, examples, makefiles and documents. - -Bugs fixed: - -- 120, 130 - memset declaration missed in concurrent_hash_map.h - ------------------------------------------------------------------------- -20080724 open-source release - -Changes (w.r.t. previous open-source release): - -- Inline assembly for atomic operations improved for gcc 4.3 -- A few more improvements to the code. - ------------------------------------------------------------------------- -20080709 open-source release - -Changes (w.r.t. previous open-source release): - -- operator=() was added to the tbb_thread class according to - the current working draft for std::thread. -- Recognizing SPARC* in makefiles for Linux* and Sun Solaris*. - -Bugs fixed: - -- 127 - concurrent_hash_map::range fixed to split correctly. - -Open-source contributions integrated: - -- fix_set_midpoint.diff by jyasskin -- SPARC* support in makefiles by Raf Schietekat - ------------------------------------------------------------------------- -20080622 open-source release - -Changes (w.r.t. previous open-source release): - -- Fixed a hang that rarely happened on Linux - during deinitialization of the TBB scheduler. -- Improved support for Intel(R) Thread Checker. -- A few more improvements to the code. - ------------------------------------------------------------------------- -Intel TBB 2.1 commercial-aligned release -TBB_INTERFACE_VERSION == 3011 - -Changes (w.r.t. Intel TBB 2.0 U3 commercial-aligned release): - -- All open-source-release changes down to, and including, TBB 2.0 below, - were incorporated into this release. - ------------------------------------------------------------------------- -20080605 open-source release - -Changes (w.r.t. previous open-source release): - -- Explicit control of exported symbols by version scripts added on Linux. -- Interfaces polished for exception handling & algorithm cancellation. -- Cache behavior improvements in the scalable allocator. -- Improvements in text_filter, polygon_overlay, and other examples. -- A lot of other stability improvements in code, tests, and makefiles. -- First release where binary packages include headers/docs/examples, so - binary packages are now self-sufficient for using TBB. - -Open-source contributions integrated: - -- atomics patch (partially). -- tick_count warning patch. - -Bugs fixed: - -- 118 - fix for boost compatibility. -- 123 - fix for tbb_machine.h. - ------------------------------------------------------------------------- -20080512 open-source release - -Changes (w.r.t. previous open-source release): - -- Fixed a problem with backward binary compatibility - of debug Linux builds. -- Sun* Studio* support added. -- soname support added on Linux via linker script. To restore backward - binary compatibility, *.so -> *.so.2 softlinks should be created. -- concurrent_hash_map improvements - added few new forms of insert() - method and fixed precondition and guarantees of erase() methods. - Added runtime warning reporting about bad hash function used for - the container. Various improvements for performance and concurrency. -- Cancellation mechanism reworked so that it does not hurt scalability. -- Algorithm parallel_do reworked. Requirement for Body::argument_type - definition removed, and work item argument type can be arbitrarily - cv-qualified. -- polygon_overlay example added. -- A few more improvements to code, tests, examples and Makefiles. - -Open-source contributions integrated: - -- Soname support patch for Bugzilla #112. - -Bugs fixed: - -- 112 - fix for soname support. - ------------------------------------------------------------------------- -Intel TBB 2.0 U3 commercial-aligned release (package 017, April 20, 2008) - -Corresponds to commercial 019 (for Linux*, 020; for Mac OS* X, 018) -packages. - -Changes (w.r.t. Intel TBB 2.0 U2 commercial-aligned release): - -- Does not contain open-source-release changes below; this release is - only a minor update of TBB 2.0 U2. -- Removed spin-waiting in pipeline and concurrent_queue. -- A few more small bug fixes from open-source releases below. - ------------------------------------------------------------------------- -20080408 open-source release - -Changes (w.r.t. previous open-source release): - -- count_strings example reworked: new word generator implemented, hash - function replaced, and tbb_allocator is used with std::string class. -- Static methods of spin_rw_mutex were replaced by normal member - functions, and the class name was versioned. -- tacheon example was renamed to tachyon. -- Improved support for Intel(R) Thread Checker. -- A few more minor improvements. - -Open-source contributions integrated: - -- Two sets of Sun patches for IA Solaris support. - ------------------------------------------------------------------------- -20080402 open-source release - -Changes (w.r.t. previous open-source release): - -- Exception handling and cancellation support for tasks and algorithms - fully enabled. -- Exception safety guaranties defined and fixed for all concurrent - containers. -- User-defined memory allocator support added to all concurrent - containers. -- Performance improvement of concurrent_hash_map, spin_rw_mutex. -- Critical fix for a rare race condition during scheduler - initialization/de-initialization. -- New methods added for concurrent containers to be closer to STL, - as well as automatic filters removal from pipeline - and __TBB_AtomicAND function. -- The volatile keyword dropped from where it is not really needed. -- A few more minor improvements. - ------------------------------------------------------------------------- -20080319 open-source release - -Changes (w.r.t. previous open-source release): - -- Support for gcc version 4.3 was added. -- tbb_thread class, near compatible with std::thread expected in C++0x, - was added. - -Bugs fixed: - -- 116 - fix for compilation issues with gcc version 4.2.1. -- 120 - fix for compilation issues with gcc version 4.3. - ------------------------------------------------------------------------- -20080311 open-source release - -Changes (w.r.t. previous open-source release): - -- An enumerator added for pipeline filter types (serial vs. parallel). -- New task_scheduler_observer class introduced, to observe when - threads start and finish interacting with the TBB task scheduler. -- task_scheduler_init reverted to not use internal versioned class; - binary compatibility guaranteed with stable releases only. -- Various improvements to code, tests, examples and Makefiles. - ------------------------------------------------------------------------- -20080304 open-source release - -Changes (w.r.t. previous open-source release): - -- Task-to-thread affinity support, previously kept under a macro, - now fully legalized. -- Work-in-progress on cache_aligned_allocator improvements. -- Pipeline really supports parallel input stage; it's no more serialized. -- Various improvements to code, tests, examples and Makefiles. - -Bugs fixed: - -- 119 - fix for scalable_malloc sometimes failing to return a big block. -- TR575 - fixed a deadlock occurring on Windows in startup/shutdown - under some conditions. - ------------------------------------------------------------------------- -20080226 open-source release - -Changes (w.r.t. previous open-source release): - -- Introduced tbb_allocator to select between standard allocator and - tbb::scalable_allocator when available. -- Removed spin-waiting in pipeline and concurrent_queue. -- Improved performance of concurrent_hash_map by using tbb_allocator. -- Improved support for Intel(R) Thread Checker. -- Various improvements to code, tests, examples and Makefiles. - ------------------------------------------------------------------------- -Intel TBB 2.0 U2 commercial-aligned release (package 017, February 14, 2008) - -Corresponds to commercial 017 (for Linux*, 018; for Mac OS* X, 016) -packages. - -Changes (w.r.t. Intel TBB 2.0 U1 commercial-aligned release): - -- Does not contain open-source-release changes below; this release is - only a minor update of TBB 2.0 U1. -- Add support for Microsoft* Visual Studio* 2008, including binary - libraries and VS2008 projects for examples. -- Use SwitchToThread() not Sleep() to yield threads on Windows*. -- Enhancements to Doxygen-readable comments in source code. -- A few more small bug fixes from open-source releases below. - -Bugs fixed: - -- TR569 - Memory leak in concurrent_queue. - ------------------------------------------------------------------------- -20080207 open-source release - -Changes (w.r.t. previous open-source release): - -- Improvements and minor fixes in VS2008 projects for examples. -- Improvements in code for gating worker threads that wait for work, - previously consolidated under #if IMPROVED_GATING, now legalized. -- Cosmetic changes in code, examples, tests. - -Bugs fixed: - -- 113 - Iterators and ranges should be convertible to their const - counterparts. -- TR569 - Memory leak in concurrent_queue. - ------------------------------------------------------------------------- -20080122 open-source release - -Changes (w.r.t. previous open-source release): - -- Updated examples/parallel_for/seismic to improve the visuals and to - use the affinity_partitioner (20071127 and forward) for better - performance. -- Minor improvements to unittests and performance tests. - ------------------------------------------------------------------------- -20080115 open-source release - -Changes (w.r.t. previous open-source release): - -- Cleanup, simplifications and enhancements to the Makefiles for - building the libraries (see build/index.html for high-level - changes) and the examples. -- Use SwitchToThread() not Sleep() to yield threads on Windows*. -- Engineering work-in-progress on exception safety/support. -- Engineering work-in-progress on affinity_partitioner for - parallel_reduce. -- Engineering work-in-progress on improved gating for worker threads - (idle workers now block in the OS instead of spinning). -- Enhancements to Doxygen-readable comments in source code. - -Bugs fixed: - -- 102 - Support for parallel build with gmake -j -- 114 - /Wp64 build warning on Windows*. - ------------------------------------------------------------------------- -20071218 open-source release - -Changes (w.r.t. previous open-source release): - -- Full support for Microsoft* Visual Studio* 2008 in open-source. - Binaries for vc9/ will be available in future stable releases. -- New recursive_mutex class. -- Full support for 32-bit PowerMac including export files for builds. -- Improvements to parallel_do. - ------------------------------------------------------------------------- -20071206 open-source release - -Changes (w.r.t. previous open-source release): - -- Support for Microsoft* Visual Studio* 2008 in building libraries - from source as well as in vc9/ projects for examples. -- Small fixes to the affinity_partitioner first introduced in 20071127. -- Small fixes to the thread-stack size hook first introduced in 20071127. -- Engineering work in progress on concurrent_vector. -- Engineering work in progress on exception behavior. -- Unittest improvements. - ------------------------------------------------------------------------- -20071127 open-source release - -Changes (w.r.t. previous open-source release): - -- Task-to-thread affinity support (affinity partitioner) first appears. -- More work on concurrent_vector. -- New parallel_do algorithm (function-style version of parallel while) - and parallel_do/parallel_preorder example. -- New task_scheduler_init() hooks for getting default_num_threads() and - for setting thread stack size. -- Support for weak memory consistency models in the code base. -- Futex usage in the task scheduler (Linux). -- Started adding 32-bit PowerMac support. -- Intel(R) 9.1 compilers are now the base supported Intel(R) compiler - version. -- TBB libraries added to link line automatically on Microsoft Windows* - systems via #pragma comment linker directives. - -Open-source contributions integrated: - -- FreeBSD platform support patches. -- AIX weak memory model patch. - -Bugs fixed: - -- 108 - Removed broken affinity.h reference. -- 101 - Does not build on Debian Lenny (replaced arch with uname -m). - ------------------------------------------------------------------------- -20071030 open-source release - -Changes (w.r.t. previous open-source release): - -- More work on concurrent_vector. -- Better support for building with -Wall -Werror (or not) as desired. -- A few fixes to eliminate extraneous warnings. -- Begin introduction of versioning hooks so that the internal/API - version is tracked via TBB_INTERFACE_VERSION. The newest binary - libraries should always work with previously-compiled code when- - ever possible. -- Engineering work in progress on using futex inside the mutexes (Linux). -- Engineering work in progress on exception behavior. -- Engineering work in progress on a new parallel_do algorithm. -- Unittest improvements. - ------------------------------------------------------------------------- -20070927 open-source release - -Changes (w.r.t. Intel TBB 2.0 U1 commercial-aligned release): - -- Minor update to TBB 2.0 U1 below. -- Begin introduction of new concurrent_vector interfaces not released - with TBB 2.0 U1. - ------------------------------------------------------------------------- -Intel TBB 2.0 U1 commercial-aligned release (package 014, October 1, 2007) - -Corresponds to commercial 014 (for Linux*, 016) packages. - -Changes (w.r.t. Intel TBB 2.0 commercial-aligned release): - -- All open-source-release changes down to, and including, TBB 2.0 - below, were incorporated into this release. -- Made a number of changes to the officially supported OS list: - Added Linux* OSs: - Asianux* 3, Debian* 4.0, Fedora Core* 6, Fedora* 7, - Turbo Linux* 11, Ubuntu* 7.04; - Dropped Linux* OSs: - Asianux* 2, Fedora Core* 4, Haansoft* Linux 2006 Server, - Mandriva/Mandrake* 10.1, Miracle Linux* 4.0, - Red Flag* DC Server 5.0; - Only Mac OS* X 10.4.9 (and forward) and Xcode* tool suite 2.4.1 (and - forward) are now supported. -- Commercial installers on Linux* fixed to recommend the correct - binaries to use in more cases, with less unnecessary warnings. -- Changes to eliminate spurious build warnings. - -Open-source contributions integrated: - -- Two small header guard macro patches; it also fixed bug #94. -- New blocked_range3d class. - -Bugs fixed: - -- 93 - Removed misleading comments in task.h. -- 94 - See above. - ------------------------------------------------------------------------- -20070815 open-source release - -Changes: - -- Changes to eliminate spurious build warnings. -- Engineering work in progress on concurrent_vector allocator behavior. -- Added hooks to use the Intel(R) compiler code coverage tools. - -Open-source contributions integrated: - -- Mac OS* X build warning patch. - -Bugs fixed: - -- 88 - Fixed TBB compilation errors if both VS2005 and Windows SDK are - installed. - ------------------------------------------------------------------------- -20070719 open-source release - -Changes: - -- Minor update to TBB 2.0 commercial-aligned release below. -- Changes to eliminate spurious build warnings. - ------------------------------------------------------------------------- -Intel TBB 2.0 commercial-aligned release (package 010, July 19, 2007) - -Corresponds to commercial 010 (for Linux*, 012) packages. - -- TBB open-source debut release. - ------------------------------------------------------------------------- -Intel TBB 1.1 commercial release (April 10, 2007) - -Changes (w.r.t. Intel TBB 1.0 commercial release): - -- auto_partitioner which offered an automatic alternative to specifying - a grain size parameter to estimate the best granularity for tasks. -- The release was added to the Intel(R) C++ Compiler 10.0 Pro. - ------------------------------------------------------------------------- -Intel TBB 1.0 Update 2 commercial release - -Changes (w.r.t. Intel TBB 1.0 Update 1 commercial release): - -- Mac OS* X 64-bit support added. -- Source packages for commercial releases introduced. - ------------------------------------------------------------------------- -Intel TBB 1.0 Update 1 commercial-aligned release - -Changes (w.r.t. Intel TBB 1.0 commercial release): - -- Fix for critical package issue on Mac OS* X. - ------------------------------------------------------------------------- -Intel TBB 1.0 commercial release (August 29, 2006) - -Changes (w.r.t. Intel TBB 1.0 beta commercial release): - -- New namespace (and compatibility headers for old namespace). - Namespaces are tbb and tbb::internal and all classes are in the - underscore_style not the WindowsStyle. -- New class: scalable_allocator (and cache_aligned_allocator using that - if it exists). -- Added parallel_for/tacheon example. -- Removed C-style casts from headers for better C++ compliance. -- Bug fixes. -- Documentation improvements. -- Improved performance of the concurrent_hash_map class. -- Upgraded parallel_sort() to support STL-style random-access iterators - instead of just pointers. -- The Windows vs7_1 directories renamed to vs7.1 in examples. -- New class: spin version of reader-writer lock. -- Added push_back() interface to concurrent_vector(). - ------------------------------------------------------------------------- -Intel TBB 1.0 beta commercial release - -Initial release. - -Features / APIs: - -- Concurrent containers: ConcurrentHashTable, ConcurrentVector, - ConcurrentQueue. -- Parallel algorithms: ParallelFor, ParallelReduce, ParallelScan, - ParallelWhile, Pipeline, ParallelSort. -- Support: AlignedSpace, BlockedRange (i.e., 1D), BlockedRange2D -- Task scheduler with multi-master support. -- Atomics: read, write, fetch-and-store, fetch-and-add, compare-and-swap. -- Locks: spin, reader-writer, queuing, OS-wrapper. -- Memory allocation: STL-style memory allocator that avoids false - sharing. -- Timers. - -Tools Support: -- Intel(R) Thread Checker 3.0. -- Intel(R) Thread Profiler 3.0. - -Documentation: -- First Use Documents: README.txt, INSTALL.txt, Release_Notes.txt, - Doc_Index.html, Getting_Started.pdf, Tutorial.pdf, Reference.pdf. -- Class hierarchy HTML pages (Doxygen). -- Tree of index.html pages for navigating the installed package, esp. - for the examples. - -Examples: -- One for each of these TBB features: ConcurrentHashTable, ParallelFor, - ParallelReduce, ParallelWhile, Pipeline, Task. -- Live copies of examples from Getting_Started.pdf. -- TestAll example that exercises every class and header in the package - (i.e., a "liveness test"). -- Compilers: see Release_Notes.txt. -- APIs: OpenMP, WinThreads, Pthreads. - -Packaging: -- Package for Windows installs IA-32 and EM64T bits. -- Package for Linux installs IA-32, EM64T and IPF bits. -- Package for Mac OS* X installs IA-32 bits. -- All packages support Intel(R) software setup assistant (ISSA) and - install-time FLEXlm license checking. -- ISSA support allows license file to be specified directly in case of - no Internet connection or problems with IRC or serial #s. -- Linux installer allows root or non-root, RPM or non-RPM installs. -- FLEXlm license servers (for those who need floating/counted licenses) - are provided separately on Intel(R) Premier. - ------------------------------------------------------------------------- -Intel and Cilk are registered trademarks or trademarks of Intel Corporation or its -subsidiaries in the United States and other countries. - -* Other names and brands may be claimed as the property of others. diff --git a/src/tbb/CMakeLists.txt b/src/tbb/CMakeLists.txt new file mode 100644 index 000000000..811a3a554 --- /dev/null +++ b/src/tbb/CMakeLists.txt @@ -0,0 +1,348 @@ +# Copyright (c) 2020-2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.5) + +# Enable CMake policies + +if (POLICY CMP0068) + # RPATH settings do not affect install_name on macOS since CMake 3.9 + cmake_policy(SET CMP0068 NEW) +endif() + +if (POLICY CMP0091) + # The NEW behavior for this policy is to not place MSVC runtime library flags in the default + # CMAKE__FLAGS_ cache entries and use CMAKE_MSVC_RUNTIME_LIBRARY abstraction instead. + cmake_policy(SET CMP0091 NEW) +elseif (DEFINED CMAKE_MSVC_RUNTIME_LIBRARY) + message(FATAL_ERROR "CMAKE_MSVC_RUNTIME_LIBRARY was defined while policy CMP0091 is not available. Use CMake 3.15 or newer.") +endif() + +if (TBB_WINDOWS_DRIVER AND (NOT ("${CMAKE_MSVC_RUNTIME_LIBRARY}" STREQUAL MultiThreaded OR "${CMAKE_MSVC_RUNTIME_LIBRARY}" STREQUAL MultiThreadedDebug))) + message(FATAL_ERROR "Enabled TBB_WINDOWS_DRIVER requires CMAKE_MSVC_RUNTIME_LIBRARY to be set to MultiThreaded or MultiThreadedDebug.") +endif() + +# Enable support of minimum supported macOS version flag +if (APPLE) + if (NOT CMAKE_CXX_OSX_DEPLOYMENT_TARGET_FLAG) + set(CMAKE_CXX_OSX_DEPLOYMENT_TARGET_FLAG "-mmacosx-version-min=" CACHE STRING "Minimum macOS version flag") + endif() + if (NOT CMAKE_C_OSX_DEPLOYMENT_TARGET_FLAG) + set(CMAKE_C_OSX_DEPLOYMENT_TARGET_FLAG "-mmacosx-version-min=" CACHE STRING "Minimum macOS version flag") + endif() +endif() + +file(READ include/oneapi/tbb/version.h _tbb_version_info) +string(REGEX REPLACE ".*#define TBB_VERSION_MAJOR ([0-9]+).*" "\\1" _tbb_ver_major "${_tbb_version_info}") +string(REGEX REPLACE ".*#define TBB_VERSION_MINOR ([0-9]+).*" "\\1" _tbb_ver_minor "${_tbb_version_info}") +string(REGEX REPLACE ".*#define TBB_VERSION_PATCH ([0-9]+).*" "\\1" _tbb_ver_patch "${_tbb_version_info}") +string(REGEX REPLACE ".*#define TBB_INTERFACE_VERSION ([0-9]+).*" "\\1" TBB_INTERFACE_VERSION "${_tbb_version_info}") +string(REGEX REPLACE ".*#define __TBB_BINARY_VERSION ([0-9]+).*" "\\1" TBB_BINARY_VERSION "${_tbb_version_info}") +string(REGEX REPLACE "..(..)." "\\1" TBB_BINARY_MINOR_VERSION "${TBB_INTERFACE_VERSION}") +set(TBBMALLOC_BINARY_VERSION 2) +set(TBBBIND_BINARY_VERSION 3) + +project(TBB VERSION ${_tbb_ver_major}.${_tbb_ver_minor}.${_tbb_ver_patch} LANGUAGES CXX) +unset(_tbb_ver_major) +unset(_tbb_ver_minor) + +include(CheckCXXCompilerFlag) +include(GNUInstallDirs) +include(CMakeDependentOption) + +# --------------------------------------------------------------------------------------------------------- +# Handle C++ standard version. +if (NOT MSVC) # no need to cover MSVC as it uses C++14 by default. + if (NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 11) + endif() + + if (CMAKE_CXX${CMAKE_CXX_STANDARD}_STANDARD_COMPILE_OPTION) # if standard option was detected by CMake + set(CMAKE_CXX_STANDARD_REQUIRED ON) + else() # if standard option wasn't detected by CMake (e.g. for Intel Compiler with CMake 3.1) + # TBB_CXX_STD_FLAG should be added to targets via target_compile_options + set(TBB_CXX_STD_FLAG -std=c++${CMAKE_CXX_STANDARD}) + + check_cxx_compiler_flag(${TBB_CXX_STD_FLAG} c++${CMAKE_CXX_STANDARD}) + if (NOT c++${CMAKE_CXX_STANDARD}) + message(FATAL_ERROR "C++${CMAKE_CXX_STANDARD} (${TBB_CXX_STD_FLAG}) support is required") + endif() + unset(c++${CMAKE_CXX_STANDARD}) + endif() +endif() + +set(CMAKE_CXX_EXTENSIONS OFF) # use -std=c++... instead of -std=gnu++... +# --------------------------------------------------------------------------------------------------------- + +# Detect architecture (bitness). +if (CMAKE_SIZEOF_VOID_P EQUAL 4) + set(TBB_ARCH 32) +else() + set(TBB_ARCH 64) +endif() + +option(TBB_TEST "Enable testing" ON) +option(TBB_EXAMPLES "Enable examples" OFF) +option(TBB_STRICT "Treat compiler warnings as errors" ON) +option(TBB_WINDOWS_DRIVER "Build as Universal Windows Driver (UWD)" OFF) +option(TBB_NO_APPCONTAINER "Apply /APPCONTAINER:NO (for testing binaries for Windows Store)" OFF) +option(TBB4PY_BUILD "Enable tbb4py build" OFF) +option(TBB_BUILD "Enable tbb build" ON) +option(TBBMALLOC_BUILD "Enable tbbmalloc build" ON) +cmake_dependent_option(TBBMALLOC_PROXY_BUILD "Enable tbbmalloc_proxy build" ON "TBBMALLOC_BUILD" OFF) +option(TBB_CPF "Enable preview features of the library" OFF) +option(TBB_FIND_PACKAGE "Enable search for external oneTBB using find_package instead of build from sources" OFF) +option(TBB_DISABLE_HWLOC_AUTOMATIC_SEARCH "Disable HWLOC automatic search by pkg-config tool" ${CMAKE_CROSSCOMPILING}) +option(TBB_ENABLE_IPO "Enable Interprocedural Optimization (IPO) during the compilation" ON) +option(TBB_FUZZ_TESTING "Enable fuzz testing" OFF) +option(TBB_INSTALL "Enable installation" ON) +if(LINUX) +option(TBB_LINUX_SEPARATE_DBG "Enable separation of the debug symbols during the build" OFF) +endif() +if(APPLE) +option(TBB_BUILD_APPLE_FRAMEWORKS "Build as Apple Frameworks" OFF) +endif() + +if (NOT DEFINED BUILD_SHARED_LIBS) + set(BUILD_SHARED_LIBS ON) +endif() + +if (NOT BUILD_SHARED_LIBS) + if(NOT DEFINED CMAKE_POSITION_INDEPENDENT_CODE) + set(CMAKE_POSITION_INDEPENDENT_CODE ON) + endif() + message(WARNING "You are building oneTBB as a static library. This is highly discouraged and such configuration is not supported. Consider building a dynamic library to avoid unforeseen issues.") +endif() + +if (NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) + set(CMAKE_BUILD_TYPE RelWithDebInfo CACHE STRING "Build type" FORCE) + message(STATUS "CMAKE_BUILD_TYPE is not specified. Using default: ${CMAKE_BUILD_TYPE}") + # Possible values of build type for cmake-gui + set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo") +endif() + +if (CMAKE_BUILD_TYPE) + string(TOLOWER ${CMAKE_BUILD_TYPE} _tbb_build_type) + if (_tbb_build_type STREQUAL "debug") + set(TBB_ENABLE_IPO OFF) + endif() + unset(_tbb_build_type) +endif() + +# ------------------------------------------------------------------- +# Files and folders naming +set(CMAKE_DEBUG_POSTFIX _debug) + +if (NOT DEFINED TBB_OUTPUT_DIR_BASE) + if (MSVC) + if (NOT DEFINED CMAKE_MSVC_RUNTIME_LIBRARY OR CMAKE_MSVC_RUNTIME_LIBRARY MATCHES DLL) + set(_tbb_msvc_runtime _md) + else() + set(_tbb_msvc_runtime _mt) + endif() + + if (WINDOWS_STORE) + if (TBB_NO_APPCONTAINER) + set(_tbb_win_store _wsnoappcont) + else() + set(_tbb_win_store _ws) + endif() + elseif(TBB_WINDOWS_DRIVER) + set(_tbb_win_store _wd) + endif() + endif() + + string(REGEX MATCH "^([0-9]+\.[0-9]+|[0-9]+)" _tbb_compiler_version_short ${CMAKE_CXX_COMPILER_VERSION}) + string(TOLOWER ${CMAKE_CXX_COMPILER_ID}_${_tbb_compiler_version_short}_cxx${CMAKE_CXX_STANDARD}_${TBB_ARCH}${_tbb_msvc_runtime}${_tbb_win_store} TBB_OUTPUT_DIR_BASE) + unset(_tbb_msvc_runtime) + unset(_tbb_win_store) + unset(_tbb_compiler_version_short) +endif() + +foreach(output_type LIBRARY ARCHIVE PDB RUNTIME) + if (CMAKE_BUILD_TYPE) + string(TOLOWER ${CMAKE_BUILD_TYPE} _tbb_build_type_lower) + set(CMAKE_${output_type}_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${TBB_OUTPUT_DIR_BASE}_${_tbb_build_type_lower}) + unset(_tbb_build_type_lower) + endif() + + if (CMAKE_CONFIGURATION_TYPES) + foreach(suffix ${CMAKE_CONFIGURATION_TYPES}) + string(TOUPPER ${suffix} _tbb_suffix_upper) + string(TOLOWER ${suffix} _tbb_suffix_lower) + set(CMAKE_${output_type}_OUTPUT_DIRECTORY_${_tbb_suffix_upper} ${CMAKE_BINARY_DIR}/${TBB_OUTPUT_DIR_BASE}_${_tbb_suffix_lower}) + endforeach() + unset(_tbb_suffix_lower) + unset(_tbb_suffix_upper) + endif() +endforeach() + +if (CMAKE_CONFIGURATION_TYPES) + # We can't use generator expressions in a cmake variable name. + set(TBB_TEST_WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/${TBB_OUTPUT_DIR_BASE}_$>) +else() + set(TBB_TEST_WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) +endif() + +# ------------------------------------------------------------------- + +# ------------------------------------------------------------------- +# Common dependencies +#force -pthread during compilation for Emscripten +if (EMSCRIPTEN AND NOT EMSCRIPTEN_WITHOUT_PTHREAD) + set(THREADS_HAVE_PTHREAD_ARG TRUE) +endif() + +set(THREADS_PREFER_PTHREAD_FLAG TRUE) +find_package(Threads REQUIRED) +# ------------------------------------------------------------------- + +file(GLOB FILES_WITH_EXTRA_TARGETS ${CMAKE_CURRENT_SOURCE_DIR}/cmake/*.cmake) +foreach(FILE_WITH_EXTRA_TARGETS ${FILES_WITH_EXTRA_TARGETS}) + include(${FILE_WITH_EXTRA_TARGETS}) +endforeach() + +# - Enabling LTO on Android causes the NDK bug. +# NDK throws the warning: "argument unused during compilation: '-Wa,--noexecstack'" +# - For some reason GCC does not instrument code with Thread Sanitizer when lto is enabled and C linker is used. +if (TBB_ENABLE_IPO AND BUILD_SHARED_LIBS AND NOT ANDROID_PLATFORM AND NOT TBB_SANITIZE MATCHES "thread") + if (NOT CMAKE_VERSION VERSION_LESS 3.9) + cmake_policy(SET CMP0069 NEW) + include(CheckIPOSupported) + check_ipo_supported(RESULT TBB_IPO_PROPERTY) + else() + set(TBB_IPO_FLAGS TRUE) + endif() + if (TBB_IPO_PROPERTY OR TBB_IPO_FLAGS) + message(STATUS "IPO enabled") + endif() +endif() + +set(TBB_COMPILER_SETTINGS_FILE ${CMAKE_CURRENT_SOURCE_DIR}/cmake/compilers/${CMAKE_CXX_COMPILER_ID}.cmake) +if (EXISTS ${TBB_COMPILER_SETTINGS_FILE}) + include(${TBB_COMPILER_SETTINGS_FILE}) +else() + message(WARNING "TBB compiler settings not found ${TBB_COMPILER_SETTINGS_FILE}") +endif() + +if (TBB_FIND_PACKAGE AND TBB_DIR) + # Allow specifying external TBB to test with. + # Do not add main targets and installation instructions in that case. + message(STATUS "Using external TBB for testing") + find_package(TBB REQUIRED) +else() + if (TBB_BUILD) + add_subdirectory(src/tbb) + endif() + if (TBBMALLOC_BUILD) + add_subdirectory(src/tbbmalloc) + if(TBBMALLOC_PROXY_BUILD AND NOT "${MSVC_CXX_ARCHITECTURE_ID}" MATCHES "ARM64") + add_subdirectory(src/tbbmalloc_proxy) + endif() + endif() + if (NOT BUILD_SHARED_LIBS) + message(STATUS "TBBBind build targets are disabled due to unsupported environment") + else() + add_subdirectory(src/tbbbind) + endif() + if (TBB_INSTALL) + # ------------------------------------------------------------------- + # Installation instructions + include(CMakePackageConfigHelpers) + + install(DIRECTORY include/ + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + COMPONENT devel) + + install(EXPORT ${PROJECT_NAME}Targets + NAMESPACE TBB:: + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME} + COMPONENT devel) + file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake + "include(\${CMAKE_CURRENT_LIST_DIR}/${PROJECT_NAME}Targets.cmake)\n") + if (NOT BUILD_SHARED_LIBS) + file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake + "include(CMakeFindDependencyMacro)\nfind_dependency(Threads)\n") + endif() + + write_basic_package_version_file("${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake" + COMPATIBILITY AnyNewerVersion) + + install(FILES "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" + "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake" + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME} + COMPONENT devel) + + install(FILES "README.md" + DESTINATION ${CMAKE_INSTALL_DOCDIR} + COMPONENT devel) + # ------------------------------------------------------------------- + endif() +endif() + +if (TBB_TEST) + enable_testing() + add_subdirectory(test) +endif() + +if (TBB_EXAMPLES) + add_subdirectory(examples) +endif() + +if (TBB_BENCH) + if (NOT EXISTS ${CMAKE_CURRENT_LIST_DIR}/benchmark) + message(FATAL_ERROR "Benchmarks are not supported yet") + endif() + + enable_testing() + add_subdirectory(benchmark) +endif() + +if (ANDROID_PLATFORM) + if ("${ANDROID_STL}" STREQUAL "c++_shared") + if (${ANDROID_NDK_MAJOR} GREATER_EQUAL "25") + if(ANDROID_ABI STREQUAL "arm64-v8a") + set(ANDROID_TOOLCHAIN_NAME "aarch64-linux-android") + elseif(ANDROID_ABI STREQUAL "x86_64") + set(ANDROID_TOOLCHAIN_NAME "x86_64-linux-android") + elseif(ANDROID_ABI STREQUAL "armeabi-v7a") + set(ANDROID_TOOLCHAIN_NAME "arm-linux-androideabi") + elseif(ANDROID_ABI STREQUAL "x86") + set(ANDROID_TOOLCHAIN_NAME "i686-linux-android") + endif() + + configure_file( + "${ANDROID_TOOLCHAIN_ROOT}/sysroot/usr/lib/${ANDROID_TOOLCHAIN_NAME}/libc++_shared.so" + "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libc++_shared.so" + COPYONLY) + else() + configure_file( + "${ANDROID_NDK}/sources/cxx-stl/llvm-libc++/libs/${ANDROID_ABI}/libc++_shared.so" + "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libc++_shared.so" + COPYONLY) + endif() + endif() + # This custom target may be implemented without separate CMake script, but it requires + # ADB(Android Debug Bridge) executable file availability, so to incapsulate this requirement + # only for corresponding custom target, it was implemented by this way. + add_custom_target(device_environment_cleanup COMMAND ${CMAKE_COMMAND} + -P ${CMAKE_CURRENT_SOURCE_DIR}/cmake/android/device_environment_cleanup.cmake) +endif() + +if (TBB4PY_BUILD) + add_subdirectory(python) +endif() + +# Keep it the last instruction. +add_subdirectory(cmake/post_install) diff --git a/src/tbb/CODEOWNERS b/src/tbb/CODEOWNERS new file mode 100644 index 000000000..78105ac7e --- /dev/null +++ b/src/tbb/CODEOWNERS @@ -0,0 +1,27 @@ +# Where component owners are known, add them here. + +/oneTBB/src/tbb/ @pavelkumbrasev +/oneTBB/src/tbb/ @dnmokhov +/oneTBB/src/tbb/ @JhaShweta1 +/oneTBB/src/tbb/ @sarathnandu +/oneTBB/include/oneapi/tbb/parallel_* @pavelkumbrasev +/oneTBB/include/oneapi/tbb/concurrent_* @kboyarinov +/oneTBB/include/oneapi/tbb/flow_graph* @kboyarinov +/oneTBB/include/oneapi/tbb/flow_graph* @aleksei-fedotov +/oneTBB/include/oneapi/tbb/detail/_flow_graph* @kboyarinov +/oneTBB/include/oneapi/tbb/detail/_flow_graph* @aleksei-fedotov +/oneTBB/include/oneapi/tbb/detail/_concurrent* @kboyarinov +/oneTBB/src/doc @aepanchi +/oneTBB/src/tbbbind/ @isaevil +/oneTBB/src/tbbmalloc/ @lplewa +/oneTBB/src/tbbmalloc_proxy/ @lplewa +/oneTBB/cmake/ @isaevil +/oneTBB/*CMakeLists.txt @isaevil +/oneTBB/python/ @sarathnandu +/oneTBB/python/ @isaevil + +# Bazel build related files. +/oneTBB/.bazelversion @Vertexwahn +/oneTBB/Bazel.md @Vertexwahn +/oneTBB/BUILD.bazel @Vertexwahn +/oneTBB/MODULE.bazel @Vertexwahn diff --git a/src/tbb/CODE_OF_CONDUCT.md b/src/tbb/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..c16970739 --- /dev/null +++ b/src/tbb/CODE_OF_CONDUCT.md @@ -0,0 +1,134 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official email address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +oneTBBCodeOfConduct At intel DOT com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations + diff --git a/src/tbb/CONTRIBUTING.md b/src/tbb/CONTRIBUTING.md new file mode 100644 index 000000000..b2b6a968c --- /dev/null +++ b/src/tbb/CONTRIBUTING.md @@ -0,0 +1,45 @@ + + +# How to Contribute +As an open source project, we welcome community contributions to oneAPI Threading Building Blocks (oneTBB). This document explains how to participate in project conversations, log bugs and enhancement requests, and submit code patches to the project. + +## Licensing + +Licensing is very important to open source projects. It helps ensure the software continues to be available under the terms that the author desired. The oneTBB project uses the [Apache 2.0 License](https://github.com/oneapi-src/oneTBB/blob/master/LICENSE.txt), a permissive open source license that allows you to freely use, modify, and distribute your own products that include Apache 2.0 licensed software. By contributing to the oneTBB project, you agree to the license and copyright terms therein and release your own contributions under these terms. + +Some imported or reused components within oneTBB use other licenses, as described in [third-party-programs.txt](https://github.com/oneapi-src/oneTBB/blob/master/third-party-programs.txt). By carefully reviewing potential contributions, we can ensure that the community can develop products with oneTBB without concerns over patent or copyright issues. + +## Prerequisites + +As a contributor, you’ll want to be familiar with the oneTBB project and the repository layout. You should also know how to use it as explained in the [oneTBB documentation](https://oneapi-src.github.io/oneTBB/) and how to set up your build development environment to configure, build, and test oneTBB as explained in the [oneTBB Build System Description](cmake/README.md). + +## Pull Requests + +You can find all [open oneTBB pull requests](https://github.com/oneapi-src/oneTBB/pulls) on GitHub. + +### Before contributing changes directly to the oneTBB repository + +* Make sure you can build the product and run all the tests with your patch. +* For a larger feature, provide a relevant test. +* Document your code. The oneTBB project uses reStructuredText for documentation. +* Update the copyright year in the first line of the changing file(s). + For example, if you commit your changes in 2022: + * the copyright year should be `2005-2022` for existing files + * the copyright year should be `2022` for new files +* Submit a pull request into the master branch. You can submit changes with a pull request (preferred) or by sending an email patch. + +Continuous Integration (CI) testing is enabled for the repository. Your pull request must pass all checks before it can be merged. We will review your contribution and may provide feedback to guide you if any additional fixes or modifications are necessary. When reviewed and accepted, your pull request will be merged into our GitHub repository. diff --git a/src/tbb/COPYING b/src/tbb/COPYING deleted file mode 100644 index 5af6ed874..000000000 --- a/src/tbb/COPYING +++ /dev/null @@ -1,353 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. ----------------- END OF Gnu General Public License ---------------- - -The source code of Threading Building Blocks is distributed under version 2 -of the GNU General Public License, with the so-called "runtime exception," -as follows (or see any header or implementation file): - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. diff --git a/src/tbb/INSTALL.md b/src/tbb/INSTALL.md new file mode 100644 index 000000000..0ac95f875 --- /dev/null +++ b/src/tbb/INSTALL.md @@ -0,0 +1,135 @@ +# Installation from Sources + + +## Prerequisites + + - Make sure you have installed CMake version 3.1 (or newer) on your system. oneTBB uses CMake build configuration. + - Configure and build oneTBB. To work with build configurations, see [Build System Description](cmake/README.md). + + +## Configure oneTBB + +At the command prompt, type: +``` +cmake +``` + +You may want to use some additional options for configuration: + +| Option | Purpose | Description | +| ------ |------ | ------ | +| `-G ` | Specify project generator | For more information, run cmake `–help`. | +|`-DCMAKE_BUILD_TYPE=Debug` | Specify for Debug build | Not applicable for multi-configuration generators such as Visual Studio generator. | + + +## Build oneTBB + +To build the system, run: +``` +cmake --build . +``` + +Some useful build options: +- `--target ` - specific target, "all" is default. +- `--config ` - build configuration, applicable only for multi-config generators such as Visual Studio generator. + + +## Install and Pack oneTBB + +--- +**NOTE** + +Be careful about installing prefix. It defaults to `/usr/local` on UNIX* and `c:/Program Files/${PROJECT_NAME}` on Windows* OS. +You can define custom `CMAKE_INSTALL_PREFIX` during configuration: + +``` +cmake -DCMAKE_INSTALL_PREFIX=/my/install/prefix .. +``` + +--- + +Installation can also be done using: + +``` +cmake --install +``` + +Special ``--install`` target can alternatively be used for installation, e.g. ``make install``. + +You can use the ``install`` components for partial installation. + +The following install components are supported: +- `runtime` - oneTBB runtime package (core shared libraries and `.dll` files on Windows* OS). +- `devel` - oneTBB development package (header files, CMake integration files, library symbolic links, and `.lib` files on Windows* OS). +- `tbb4py` - [oneTBB Module for Python](https://github.com/oneapi-src/oneTBB/blob/master/python/README.md). + +If you want to install specific components after configuration and build, run: + +```bash +cmake -DCOMPONENT= [-DBUILD_TYPE=] -P cmake_install.cmake +``` + +Simple packaging using CPack is supported. +The following commands allow you to create a simple portable package that includes header files, libraries, and integration files for CMake: + +```bash +cmake .. +cpack +``` + +## Installation from vcpkg + +You can download and install oneTBB using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager: +```sh + git clone https://github.com/Microsoft/vcpkg.git + cd vcpkg + ./bootstrap-vcpkg.sh #.\bootstrap-vcpkg.bat(for Windows) + ./vcpkg integrate install + ./vcpkg install tbb +``` + +The oneTBB port in vcpkg is kept up to date by Microsoft* team members and community contributors. If the version is out of date, create an issue or pull request on the [vcpkg repository](https://github.com/Microsoft/vcpkg). + +## Example of Installation + +### Single-configuration generators + +The following example demonstrates how to install oneTBB for single-configuration generators (e.g. GNU Make, Ninja, etc.). +```bash +# Do our experiments in /tmp +cd /tmp +# Clone oneTBB repository +git clone https://github.com/oneapi-src/oneTBB.git +cd oneTBB +# Create binary directory for out-of-source build +mkdir build && cd build +# Configure: customize CMAKE_INSTALL_PREFIX and disable TBB_TEST to avoid tests build +cmake -DCMAKE_INSTALL_PREFIX=/tmp/my_installed_onetbb -DTBB_TEST=OFF .. +# Build +cmake --build . +# Install +cmake --install . +# Well done! Your installed oneTBB is in /tmp/my_installed_onetbb +``` + +### Multi-configuration generators + +The following example demonstrates how to install oneTBB for multi-configuration generators such as Visual Studio*. + +Choose the configuration during the build and install steps: +```batch +REM Do our experiments in %TMP% +cd %TMP% +REM Clone oneTBB repository +git clone https://github.com/oneapi-src/oneTBB.git +cd oneTBB +REM Create binary directory for out-of-source build +mkdir build && cd build +REM Configure: customize CMAKE_INSTALL_PREFIX and disable TBB_TEST to avoid tests build +cmake -DCMAKE_INSTALL_PREFIX=%TMP%\my_installed_onetbb -DTBB_TEST=OFF .. +REM Build "release with debug information" configuration +cmake --build . --config relwithdebinfo +REM Install "release with debug information" configuration +cmake --install . --config relwithdebinfo +REM Well done! Your installed oneTBB is in %TMP%\my_installed_onetbb +``` diff --git a/src/tbb/LICENSE.txt b/src/tbb/LICENSE.txt new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/src/tbb/LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/tbb/Makefile b/src/tbb/Makefile deleted file mode 100644 index dbdd3a20b..000000000 --- a/src/tbb/Makefile +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -tbb_root?=. -include $(tbb_root)/build/common.inc -.PHONY: default all tbb tbbmalloc tbbproxy test examples - -#workaround for non-depend targets tbb and tbbmalloc which both depend on version_string.ver -#According to documentation, recursively invoked make commands can process their targets in parallel -.NOTPARALLEL: tbb tbbmalloc tbbproxy - -default: tbb tbbmalloc $(if $(use_proxy),tbbproxy) - -all: tbb tbbmalloc tbbproxy test examples - -tbb: mkdir - $(MAKE) -C "$(work_dir)_debug" -r -f $(tbb_root)/build/Makefile.tbb cfg=debug - $(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.tbb cfg=release - -tbbmalloc: mkdir - $(MAKE) -C "$(work_dir)_debug" -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=debug malloc - $(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=release malloc - -tbbproxy: mkdir - $(MAKE) -C "$(work_dir)_debug" -r -f $(tbb_root)/build/Makefile.tbbproxy cfg=debug tbbproxy - $(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.tbbproxy cfg=release tbbproxy - -test: tbb tbbmalloc $(if $(use_proxy),tbbproxy) - -$(MAKE) -C "$(work_dir)_debug" -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=debug malloc_test - -$(MAKE) -C "$(work_dir)_debug" -r -f $(tbb_root)/build/Makefile.test cfg=debug - -$(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=release malloc_test - -$(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.test cfg=release - -rml: mkdir - $(MAKE) -C "$(work_dir)_debug" -r -f $(tbb_root)/build/Makefile.rml cfg=debug - $(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.rml cfg=release - - -examples: tbb tbbmalloc - $(MAKE) -C examples -r -f Makefile tbb_root=.. release test - -.PHONY: clean clean_examples mkdir info - -clean: clean_examples - $(shell $(RM) $(work_dir)_release$(SLASH)*.* >$(NUL) 2>$(NUL)) - $(shell $(RD) $(work_dir)_release >$(NUL) 2>$(NUL)) - $(shell $(RM) $(work_dir)_debug$(SLASH)*.* >$(NUL) 2>$(NUL)) - $(shell $(RD) $(work_dir)_debug >$(NUL) 2>$(NUL)) - @echo clean done - -clean_examples: - $(shell $(MAKE) -s -i -r -C examples -f Makefile tbb_root=.. clean >$(NUL) 2>$(NUL)) - -mkdir: - $(shell $(MD) "$(work_dir)_release" >$(NUL) 2>$(NUL)) - $(shell $(MD) "$(work_dir)_debug" >$(NUL) 2>$(NUL)) - @echo Created $(work_dir)_release and ..._debug directories - -info: - @echo OS: $(tbb_os) - @echo arch=$(arch) - @echo compiler=$(compiler) - @echo runtime=$(runtime) - @echo tbb_build_prefix=$(tbb_build_prefix) - diff --git a/src/tbb/README b/src/tbb/README deleted file mode 100644 index fcc87af0c..000000000 --- a/src/tbb/README +++ /dev/null @@ -1,11 +0,0 @@ -Intel(R) Threading Building Blocks - README - -See index.html for directions and documentation. - -If source is present (./Makefile and src/ directories), -type 'gmake' in this directory to build and test. - -See examples/index.html for runnable examples and directions. - -See http://threadingbuildingblocks.org for full documentation -and software information. diff --git a/src/tbb/README.md b/src/tbb/README.md new file mode 100644 index 000000000..2e7c2e81b --- /dev/null +++ b/src/tbb/README.md @@ -0,0 +1,70 @@ +# oneAPI Threading Building Blocks (oneTBB) +[![Apache License Version 2.0](https://img.shields.io/badge/license-Apache_2.0-green.svg)](LICENSE.txt) [![oneTBB CI](https://github.com/oneapi-src/oneTBB/actions/workflows/ci.yml/badge.svg)](https://github.com/oneapi-src/oneTBB/actions/workflows/ci.yml?query=branch%3Amaster) +[![Join the community on GitHub Discussions](https://badgen.net/badge/join%20the%20discussion/on%20github/blue?icon=github)](https://github.com/oneapi-src/oneTBB/discussions) +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9125/badge)](https://www.bestpractices.dev/projects/9125) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/oneapi-src/oneTBB/badge)](https://securityscorecards.dev/viewer/?uri=github.com/oneapi-src/oneTBB) + +oneTBB is a flexible C++ library that simplifies the work of adding parallelism +to complex applications, even if you are not a threading expert. + +The library lets you easily write parallel programs that take full advantage of the multi-core performance. Such programs are portable, +composable and have a future-proof scalability. oneTBB provides you with functions, interfaces, and classes to parallelize and scale the code. +All you have to do is to use the templates. + +The library differs from typical threading packages in the following ways: +* oneTBB enables you to specify logical parallelism instead of threads. +* oneTBB targets threading for performance. +* oneTBB is compatible with other threading packages. +* oneTBB emphasizes scalable, data parallel programming. +* oneTBB relies on generic programming. + + +Refer to oneTBB [examples](examples) and [samples](https://github.com/oneapi-src/oneAPI-samples/tree/master/Libraries/oneTBB) to see how you can use the library. + +oneTBB is a part of the [UXL Foundation](http://www.uxlfoundation.org) and is an implementation of [oneAPI specification](https://oneapi.io). + +> **_NOTE:_** Threading Building Blocks (TBB) is now called oneAPI Threading Building Blocks (oneTBB) to highlight that the tool is a part of the oneAPI ecosystem. + +## Release Information + +See [Release Notes](RELEASE_NOTES.md) and [System Requirements](SYSTEM_REQUIREMENTS.md). + +## Documentation +* [oneTBB Specification](https://spec.oneapi.com/versions/latest/elements/oneTBB/source/nested-index.html) +* [oneTBB Developer Guide and Reference](https://oneapi-src.github.io/oneTBB) +* [Migrating from TBB to oneTBB](https://oneapi-src.github.io/oneTBB/main/tbb_userguide/Migration_Guide.html) +* [README for the CMake build system](cmake/README.md) +* [oneTBB Testing Approach](https://oneapi-src.github.io/oneTBB/main/intro/testing_approach.html) +* [Basic support for the Bazel build system](Bazel.md) +* [oneTBB Discussions](https://github.com/oneapi-src/oneTBB/discussions) +* [WASM Support](WASM_Support.md) + +## Installation +See [Installation from Sources](INSTALL.md) to learn how to install oneTBB. + +## Governance + +The oneTBB project is governed by the UXL Foundation. +You can get involved in this project in following ways: +* Join the [Open Source and Specification Working Group](https://github.com/uxlfoundation/foundation/tree/main?tab=readme-ov-file#working-groups) meetings. +* Join the mailing lists for the [UXL Foundation](https://lists.uxlfoundation.org/g/main/subgroups) to receive meetings schedule and latest updates. +* Contribute to oneTBB project or oneTBB specification. Read [CONTRIBUTING](./CONTRIBUTING.md) for more information. + +## Support +See our [documentation](./SUPPORT.md) to learn how to request help. + +## How to Contribute +We welcome community contributions, so check our [Contributing Guidelines](CONTRIBUTING.md) +to learn more. + +Use GitHub Issues for feature requests, bug reports, and minor inquiries. For broader questions and development-related discussions, use GitHub Discussions. + +## License +oneAPI Threading Building Blocks is licensed under [Apache License, Version 2.0](LICENSE.txt). +By its terms, contributions submitted to the project are also done under that license. + +## Engineering team contacts +* [Email us.](mailto:inteltbbdevelopers@intel.com) + +------------------------------------------------------------------------ +\* All names and brands may be claimed as the property of others. diff --git a/src/tbb/RELEASE_NOTES.md b/src/tbb/RELEASE_NOTES.md new file mode 100644 index 000000000..c9b8e9713 --- /dev/null +++ b/src/tbb/RELEASE_NOTES.md @@ -0,0 +1,42 @@ + + +# Release Notes +This document contains changes of oneTBB compared to the last release. + +## Table of Contents +- [Known Limitations](#known-limitations) +- [Fixed Issues](#fixed-issues) + +## :rotating_light: Known Limitations +- The ``oneapi::tbb::info`` namespace interfaces might unexpectedly change the process affinity mask on Windows* OS systems (see https://github.com/open-mpi/hwloc/issues/366 for details) when using hwloc version lower than 2.5. +- Using a hwloc version other than 1.11, 2.0, or 2.5 may cause an undefined behavior on Windows OS. See https://github.com/open-mpi/hwloc/issues/477 for details. +- The NUMA topology may be detected incorrectly on Windows* OS machines where the number of NUMA node threads exceeds the size of 1 processor group. +- On Windows OS on ARM64*, when compiling an application using oneTBB with the Microsoft* Compiler, the compiler issues a warning C4324 that a structure was padded due to the alignment specifier. Consider suppressing the warning by specifying /wd4324 to the compiler command line. +- C++ exception handling mechanism on Windows* OS on ARM64* might corrupt memory if an exception is thrown from any oneTBB parallel algorithm (see Windows* OS on ARM64* compiler issue: https://developercommunity.visualstudio.com/t/ARM64-incorrect-stack-unwinding-for-alig/1544293. +- When CPU resource coordination is enabled, tasks from a lower-priority ``task_arena`` might be executed before tasks from a higher-priority ``task_arena``. + +> **_NOTE:_** To see known limitations that impact all versions of oneTBB, refer to [oneTBB Documentation](https://oneapi-src.github.io/oneTBB/main/intro/limitations.html). + + +## :hammer: Fixed Issues +- Fixed ``parallel_for_each`` algorithm behavior for iterators defining ``iterator_concept`` trait instead of ``iterator_category``. +- Fixed the redefinition issue for ``std::min`` and ``std::max`` on Windows* OS ([GitHub* #832](https://github.com/oneapi-src/oneTBB/issues/832)). +- Fixed the incorrect binary search order in ``TBBConfig.cmake``. +- Enabled the oneTBB library search using the pkg-config tool in Conda packages. + +## :octocat: Open-source Contributions Integrated +- Fixed the compiler warning for missing virtual destructor. Contributed by Elias Engelbert Plank (https://github.com/oneapi-src/oneTBB/pull/1215). diff --git a/src/tbb/SECURITY.md b/src/tbb/SECURITY.md new file mode 100644 index 000000000..4926041fc --- /dev/null +++ b/src/tbb/SECURITY.md @@ -0,0 +1,66 @@ +# Security Policy +As an open-source project, we understand the importance of and responsibility +for security. This Security Policy outlines our guidelines and procedures to +ensure the highest level of security and trust for oneTBB users. + +## Supported Versions +Security vulnerabilities are fixed in the [latest version][1] +and delivered as a patch release. We don't guarantee security fixes to be +back-ported to older oneTBB versions. + +## Report a Vulnerability +We are very grateful to the security researchers and users that report back +security vulnerabilities. We investigate every report thoroughly. +We strongly encourage you to report security vulnerabilities to us privately, +before disclosing them on public forums or opening a public GitHub* issue. + +Report a vulnerability to us in one of two ways: +* Open a draft **[GitHub* Security Advisory][2]** +* Send an e-mail to: **security@uxlfoundation.org**. +Along with the report, provide the following info: + * A descriptive title. + * Your name and affiliation (if any). + * A description of the technical details of the vulnerabilities. + * A minimal example of the vulnerability so we can reproduce your findings. + * An explanation of who can exploit this vulnerability, and what they gain + doing so. + * Whether this vulnerability is public or known to third parties. If it is, + provide details. + +### When Should I Report a Vulnerability? +* You think you discovered a potential security vulnerability in oneTBB. +* You are unsure how the potential vulnerability affects oneTBB. +* You think you discovered a vulnerability in another project or 3rd party +component on which oneTBB depends. If the issue is not fixed in the 3rd party +component, try to report directly there first. + +### When Should I NOT Report a Vulnerability? +* You got an automated scan hit and are unable to provide details. +* You need help using oneTBB for security. +* You need help applying security-related updates. +* Your issue is not security-related. + +## Security Reports Review Process +We aim to respond quickly to your inquiry and coordinate a fix and +disclosure with you. All confirmed security vulnerabilities will be addressed +according to severity level and impact on oneTBB. Normally, security issues +are fixed in the next planned release. + +## Disclosure Policy +We will publish security advisories using the +[**GitHub Security Advisories feature**][3] +to keep our community well-informed, and will credit you for your findings +unless you prefer to stay anonymous. We request that you refrain from +exploiting the vulnerability or making it public before the official disclosure. + +We will disclose the vulnerabilities and bugs as soon as possible once +mitigation is implemented and available. + +## Feedback on This Policy +If you have any suggestions on how this Policy could be improved, submit +an issue or a pull request to this repository. **Do not** report +potential vulnerabilities or security flaws via a pull request. + +[1]: https://github.com/oneapi-src/oneTBB/releases/latest +[2]: https://github.com/oneapi-src/oneTBB/security/advisories/new +[3]: https://github.com/oneapi-src/oneTBB/security/advisories diff --git a/src/tbb/SUPPORT.md b/src/tbb/SUPPORT.md new file mode 100644 index 000000000..47bb60a53 --- /dev/null +++ b/src/tbb/SUPPORT.md @@ -0,0 +1,35 @@ + + +# oneTBB Support + +We are committed to providing support and assistance to help you make the most out of oneTBB. +Use the following methods if you face any challenges. + +## Issues + +If you have a problem, check out the [GitHub Issues](https://github.com/oneapi-src/oneTBB/issues) to see if the issue you want to address is already reported. +You may find users that have encountered the same bug or have similar ideas for changes or updates. + +You can use issues to report a problem, make a feature request, or add comments on an existing issue. + +## Discussions + +Visit the [GitHub Discussions](https://github.com/oneapi-src/oneTBB/discussions) to engage with the community, ask questions, or help others. + +## Email + +Reach out to us privately via [email](mailto:inteltbbdevelopers@intel.com). \ No newline at end of file diff --git a/src/tbb/SYSTEM_REQUIREMENTS.md b/src/tbb/SYSTEM_REQUIREMENTS.md new file mode 100644 index 000000000..7f9d81616 --- /dev/null +++ b/src/tbb/SYSTEM_REQUIREMENTS.md @@ -0,0 +1,86 @@ + + +# System Requirements +This document provides details about hardware, operating system, and software prerequisites for the oneAPI Threading Building Blocks (oneTBB). + +## Table of Contents +- [Supported Hardware](#supported-hardware) +- [Software](#software) + - [Supported Operating Systems](#supported-operating-systems) + - [Community-Supported Platforms](#community-supported-platforms) + - [Supported Compilers](#supported-compilers) +- [Limitations](#limitations) + + +## Supported Hardware +- Intel(R) Celeron(R) processor family +- Intel(R) Core* processor family +- Intel(R) Xeon(R) processor family +- Intel(R) Atom* processor family +- Non-Intel(R) processors compatible with the processors listed above + + +## Software + +### Supported Operating Systems +- Systems with Microsoft* Windows* operating systems: + - Microsoft* Windows* 10 + - Microsoft* Windows* 11 + - Microsoft* Windows* Server 2019 + - Microsoft* Windows* Server 2022 +- Systems with Linux* operating systems: + - Oracle Linux* 8 + - Amazon* Linux 2, 2022 + - Debian* 9, 10, 11 + - Fedora* 36, 37, 38 + - Rocky* Linux* 8, 9 + - Red Hat* Enterprise Linux* 8, 9 + - SuSE* Linux* Enterprise Server 15 + - Ubuntu* 20.04, 22.04 +- Systems with macOS* operating systems: + - macOS* 12.x, 13.x +- Systems with Android* operating systems: + - Android* 9 + +### Community-Supported Platforms +- MinGW* +- FreeBSD* +- Microsoft* Windows* on ARM*/ARM64* +- macOS* on ARM64* + +### Supported Compilers +- Intel* oneAPI DPC++/C++ Compiler +- Intel® C++ Compiler Classic 2021.1 - 2021.9 +- Microsoft* Visual C++ 14.2 (Microsoft* Visual Studio* 2019, Windows* OS only) +- Microsoft* Visual C++ 14.3 (Microsoft* Visual Studio* 2022, Windows* OS only) +- For each supported Linux* operating system, the standard gcc version provided with that operating system is supported: + - GNU Compilers (gcc) 8.x – 12.x + - GNU C Library (glibc) version 2.28 – 2.36 + - Clang* 6.0.0 - 13.0.0 + +## Limitations +There are some cases where we cannot provide support for your platforms. It includes: + +1. The platform is out of official support (met end of life). When you use an unsupported platform, you can face a security risk that can be difficult to resolve. +2. We do not have the infrastructure to test a platform. Therefore we cannot guarantee that oneTBB works correctly on that platform. +3. Changes affect more code than just platform-specific macros. +4. The platform is incompatible with oneTBB. Some platforms may have limitations that prevent oneTBB from working correctly. We cannot provide support in these cases as the issue is beyond our control. +5. The platform is modified or customized. If you made significant updates to your platform, it might be hard for us to find the root cause of the issue. Therefore, we may not be able to provide support as the modification could affect the oneTBB functionality. + + +We understand that these limitations can be frustrating. Thus, we suggest creating a branch specifically for the unsupported platform, allowing other users to contribute to or use your implementation. + diff --git a/src/tbb/WASM_Support.md b/src/tbb/WASM_Support.md new file mode 100644 index 000000000..6306620d7 --- /dev/null +++ b/src/tbb/WASM_Support.md @@ -0,0 +1,81 @@ + + +# WASM Support + +oneTBB extends its capabilities by offering robust support for ``WASM`` (see ``Limitation`` sections). + +``WASM`` stands for WebAssembly, a low-level binary format for executing code in web browsers. +It is designed to be a portable target for compilers and efficient to parse and execute. + +Using oneTBB with WASM, you can take full advantage of parallelism and concurrency while working on web-based applications, interactive websites, and a variety of other WASM-compatible platforms. + +oneTBB offers WASM support through the integration with [Emscripten*](https://emscripten.org/docs/introducing_emscripten/index.html), a powerful toolchain for compiling C and C++ code into WASM-compatible runtimes. + +## Build + +**Prerequisites:** Download and install Emscripten*. See the [instructions](https://emscripten.org/docs/getting_started/downloads.html). + +To build the system, run: + +``` +mkdir build && cd build +emcmake cmake .. -DCMAKE_CXX_COMPILER=em++ -DCMAKE_C_COMPILER=emcc -DTBB_STRICT=OFF -DCMAKE_CXX_FLAGS=-Wno-unused-command-line-argument -DTBB_DISABLE_HWLOC_AUTOMATIC_SEARCH=ON -DBUILD_SHARED_LIBS=ON -DTBB_EXAMPLES=ON -DTBB_TEST=ON +``` +To compile oneTBB without ``pthreads``, set the flag ``-DEMSCRIPTEN_WITHOUT_PTHREAD=true`` in the command above. By default, oneTBB uses the ``pthreads``. +``` +cmake --build . +cmake --install . +``` +Where: + +* ``emcmake`` - a tool that sets up the environment for Emscripten*. +* ``-DCMAKE_CXX_COMPILER=em++`` - specifies the C++ compiler as Emscripten* C++ compiler. +* ``-DCMAKE_C_COMPILER=emcc`` - specifies the C compiler as Emscripten* C compiler. + + +> **_NOTE:_** See [CMake documentation](https://github.com/oneapi-src/oneTBB/blob/master/cmake/README.md) to learn about other options. + + +## Run Test + +To run tests, use: + +``` +ctest +``` + +# Limitations + +You can successfully build your application with oneTBB using WASM, but you may not achieve optimal performance immediately. This is due to the limitation with nested Web Workers: a Web Worker cannot schedule another worker without help from a browser thread. This can lead to unexpected performance outcomes, such as the application running in serial. +Find more information in the [issue](https://github.com/emscripten-core/emscripten/discussions/21963) in the Emscripten repository. +To workaround this issue, try one of the following ways: +1. **Recommended Solution: Use the ``-sPROXY_TO_PTHREAD`` Flag**. +This flag splits the initial thread into a browser thread and a main thread (proxied by a Web Worker), effectively resolving the issue as the browser thread is always present in the event loop and can participate in Web Workers scheduling. Refer to the [Emscripten documentation](https://emscripten.org/docs/porting/pthreads.html) for more details about ``-sPROXY_TO_PTHREAD`` since using this flag may require refactoring the code. +2. **Alternative Solution: Warm Up the oneTBB Thread Pool** +Initialize the oneTBB thread pool before making the first call to oneTBB. This approach forces the browser thread to participate in Web Workers scheduling. +```cpp + int num_threads = tbb::this_task_arena::max_concurrency(); + std::atomic barrier{num_threads}; + tbb::parallel_for(0, num_threads, [&barrier] (int) { + barrier--; + while (barrier > 0) { + // Send browser thread to event loop + std::this_thread::yield(); + } + }, tbb::static_partitioner{}); +``` +> **_NOTE:_** Be aware that it might cause delays on the browser side. diff --git a/src/tbb/build/.gitignore b/src/tbb/build/.gitignore deleted file mode 100644 index 53dc09fc1..000000000 --- a/src/tbb/build/.gitignore +++ /dev/null @@ -1 +0,0 @@ -lib_*/ diff --git a/src/tbb/build/AIX.gcc.inc b/src/tbb/build/AIX.gcc.inc deleted file mode 100644 index 65d99821a..000000000 --- a/src/tbb/build/AIX.gcc.inc +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -COMPILE_ONLY = -c -MMD -PREPROC_ONLY = -E -x c++ -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -fPIC -WARNING_AS_ERROR_KEY = -Werror -WARNING_KEY = -Wall -DYLIB_KEY = -shared -LIBDL = -ldl - -CPLUS = g++ -CONLY = gcc -LIB_LINK_FLAGS = -shared -LIBS = -lpthread -ldl -C_FLAGS = $(CPLUS_FLAGS) -x c - -ifeq ($(cfg), release) - CPLUS_FLAGS = -O2 -DUSE_PTHREAD -pthread -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = -DTBB_USE_DEBUG -g -O0 -DUSE_PTHREAD -pthread -endif - -ASM= -ASM_FLAGS= - -TBB_ASM.OBJ= - -ifeq (powerpc,$(arch)) - CPLUS_FLAGS += -maix64 -Wl,-G - LIB_LINK_FLAGS += -maix64 -Wl,-b64 -Wl,-brtl -Wl,-G -endif - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ - -ASSEMBLY_SOURCE=ibm_aix51 -ifeq (powerpc,$(arch)) - TBB_ASM.OBJ = atomic_support.o -endif - -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ - -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions - -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ diff --git a/src/tbb/build/AIX.inc b/src/tbb/build/AIX.inc deleted file mode 100644 index 536fc1ef2..000000000 --- a/src/tbb/build/AIX.inc +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -ifndef arch - arch:=$(shell uname -p) - export arch -endif - -ifndef runtime - gcc_version:=$(shell gcc -v 2>&1 | grep 'gcc version' | sed -e 's/^gcc version //' | sed -e 's/ .*$$//') - os_version:=$(shell uname -r) - os_kernel_version:=$(shell uname -r | sed -e 's/-.*$$//') - export runtime:=cc$(gcc_version)_kernel$(os_kernel_version) -endif - -native_compiler := gcc -export compiler ?= gcc -debugger ?= gdb - -CMD=$(SHELL) -c -CWD=$(shell pwd) -RM?=rm -f -RD?=rmdir -MD?=mkdir -p -NUL= /dev/null -SLASH=/ -MAKE_VERSIONS=sh $(tbb_root)/build/version_info_aix.sh $(VERSION_FLAGS) >version_string.ver -MAKE_TBBVARS=sh $(tbb_root)/build/generate_tbbvars.sh - -ifdef LIBPATH - export LIBPATH := .:$(LIBPATH) -else - export LIBPATH := . -endif - -####### Build settings ######################################################## - -OBJ = o -DLL = so - -TBB.LST = -TBB.DEF = -TBB.DLL = libtbb$(CPF_SUFFIX)$(DEBUG_SUFFIX).$(DLL) -TBB.LIB = $(TBB.DLL) -LINK_TBB.LIB = $(TBB.LIB) - -MALLOC.DLL = libtbbmalloc$(DEBUG_SUFFIX).$(DLL) -MALLOC.LIB = $(MALLOC.DLL) -LINK_MALLOC.LIB = $(MALLOC.LIB) - -TEST_LAUNCHER=sh $(tbb_root)/build/test_launcher.sh $(largs) diff --git a/src/tbb/build/FreeBSD.gcc.inc b/src/tbb/build/FreeBSD.gcc.inc deleted file mode 100644 index 64b7b1bab..000000000 --- a/src/tbb/build/FreeBSD.gcc.inc +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -COMPILE_ONLY = -c -MMD -PREPROC_ONLY = -E -x c++ -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -fPIC -WARNING_AS_ERROR_KEY = -Werror -WARNING_KEY = -Wall -DYLIB_KEY = -shared - -CPLUS = g++ -CONLY = gcc -LIB_LINK_FLAGS = -shared -LIBS = -lpthread -C_FLAGS = $(CPLUS_FLAGS) - -ifeq ($(cfg), release) - CPLUS_FLAGS = -g -O2 -DUSE_PTHREAD -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = -DTBB_USE_DEBUG -g -O0 -DUSE_PTHREAD -endif - -ASM= -ASM_FLAGS= - -TBB_ASM.OBJ= -MALLOC_ASM.OBJ= - -ifeq (ia64,$(arch)) -# Position-independent code (PIC) is a must on IA-64 architecture, even for regular (not shared) executables - CPLUS_FLAGS += $(PIC_KEY) -endif - -ifeq (intel64,$(arch)) - CPLUS_FLAGS += -m64 - LIB_LINK_FLAGS += -m64 -endif - -ifeq (ia32,$(arch)) - CPLUS_FLAGS += -m32 - LIB_LINK_FLAGS += -m32 -endif - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ -ASSEMBLY_SOURCE=$(arch)-gas -ifeq (ia64,$(arch)) - ASM=as - TBB_ASM.OBJ = atomic_support.o lock_byte.o log2.o pause.o - MALLOC_ASM.OBJ = atomic_support.o lock_byte.o pause.o -endif -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ - -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions - -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ diff --git a/src/tbb/build/FreeBSD.inc b/src/tbb/build/FreeBSD.inc deleted file mode 100644 index be5030db8..000000000 --- a/src/tbb/build/FreeBSD.inc +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -ifndef arch - ifeq ($(shell uname -m),i386) - export arch:=ia32 - endif - ifeq ($(shell uname -m),ia64) - export arch:=ia64 - endif - ifeq ($(shell uname -m),amd64) - export arch:=intel64 - endif -endif - -ifndef runtime - gcc_version:=$(shell gcc -v 2>&1 | grep 'gcc version' | sed -e 's/^gcc version //' | sed -e 's/ .*$$//') - os_version:=$(shell uname -r) - os_kernel_version:=$(shell uname -r | sed -e 's/-.*$$//') - export runtime:=cc$(gcc_version)_kernel$(os_kernel_version) -endif - -native_compiler := gcc -export compiler ?= gcc -debugger ?= gdb - -CMD=$(SHELL) -c -CWD=$(shell pwd) -RM?=rm -f -RD?=rmdir -MD?=mkdir -p -NUL= /dev/null -SLASH=/ -MAKE_VERSIONS=sh $(tbb_root)/build/version_info_linux.sh $(VERSION_FLAGS) >version_string.ver -MAKE_TBBVARS=sh $(tbb_root)/build/generate_tbbvars.sh - -ifdef LD_LIBRARY_PATH - export LD_LIBRARY_PATH := .:$(LD_LIBRARY_PATH) -else - export LD_LIBRARY_PATH := . -endif - -####### Build settings ######################################################## - -OBJ = o -DLL = so -LIBEXT=so - -TBB.LST = -TBB.DEF = -TBB.DLL = libtbb$(CPF_SUFFIX)$(DEBUG_SUFFIX).$(DLL) -TBB.LIB = $(TBB.DLL) -LINK_TBB.LIB = $(TBB.LIB) - -MALLOC.DLL = libtbbmalloc$(DEBUG_SUFFIX).$(DLL) -MALLOC.LIB = $(MALLOC.DLL) -LINK_MALLOC.LIB = $(MALLOC.LIB) - -TEST_LAUNCHER=sh $(tbb_root)/build/test_launcher.sh $(largs) diff --git a/src/tbb/build/Makefile.rml b/src/tbb/build/Makefile.rml deleted file mode 100644 index 6a76fd27b..000000000 --- a/src/tbb/build/Makefile.rml +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -tbb_root ?= $(TBBROOT) -BUILDING_PHASE=1 -TEST_RESOURCE = $(RML.RES) -include $(tbb_root)/build/common.inc -DEBUG_SUFFIX=$(findstring _debug,_$(cfg)) - -ifeq (android,$(target)) -$(error "RML is not supported on Android") -endif - -# default target -default_rml: rml rml_test - -RML_ROOT ?= $(tbb_root)/src/rml -RML_SERVER_ROOT = $(RML_ROOT)/server - -VPATH = $(tbb_root)/src/tbb $(tbb_root)/src/tbb/$(ASSEMBLY_SOURCE) -VPATH += $(RML_ROOT)/server $(RML_ROOT)/client $(RML_ROOT)/test $(tbb_root)/src/test - -include $(tbb_root)/build/common_rules.inc - -#-------------------------------------------------------------------------- -# Define rules for making the RML server shared library and client objects. -#-------------------------------------------------------------------------- - -# Object files that make up RML server -RML_SERVER.OBJ = rml_server.$(OBJ) - -# Object files that RML clients need -RML_TBB_CLIENT.OBJ ?= rml_tbb.$(OBJ) dynamic_link_rml.$(OBJ) -RML_OMP_CLIENT.OBJ ?= rml_omp.$(OBJ) omp_dynamic_link.$(OBJ) - -RML.OBJ = $(RML_SERVER.OBJ) $(RML_TBB_CLIENT.OBJ) $(RML_OMP_CLIENT.OBJ) -ifeq (windows,$(tbb_os)) -RML_ASM.OBJ = $(if $(findstring intel64,$(arch)),$(TBB_ASM.OBJ)) -endif -ifeq (linux,$(tbb_os)) -RML_ASM.OBJ = $(if $(findstring ia64,$(arch)),$(TBB_ASM.OBJ)) -endif - -RML_TBB_DEP= cache_aligned_allocator_rml.$(OBJ) dynamic_link_rml.$(OBJ) concurrent_vector_rml.$(OBJ) semaphore_rml.$(OBJ) tbb_misc_rml.$(OBJ) tbb_misc_ex_rml.$(OBJ) -TBB_DEP_NON_RML_TEST?= cache_aligned_allocator_rml.$(OBJ) dynamic_link_rml.$(OBJ) $(RML_ASM.OBJ) tbb_misc_rml.$(OBJ) tbb_misc_ex_rml.$(OBJ) -ifeq ($(cfg),debug) -RML_TBB_DEP+= spin_mutex_rml.$(OBJ) -TBB_DEP_RML_TEST?= $(RML_ASM.OBJ) tbb_misc_rml.$(OBJ) -else -TBB_DEP_RML_TEST?= $(RML_ASM.OBJ) -endif -LIBS += $(LIBDL) - -INCLUDES += $(INCLUDE_KEY)$(RML_ROOT)/include $(INCLUDE_KEY). -T_INCLUDES = $(INCLUDES) $(INCLUDE_KEY)$(tbb_root)/src/test $(INCLUDE_KEY)$(RML_SERVER_ROOT) - -ifeq ($(rml_wcrm),1) -CPLUS_FLAGS+=/DRML_USE_WCRM -endif - -# Suppress superfluous warnings for RML compilation -R_CPLUS_FLAGS = $(subst DO_ITT_NOTIFY,DO_ITT_NOTIFY=0,$(CPLUS_FLAGS)) $(WARNING_SUPPRESS) \ - $(DEFINE_KEY)TBB_USE_THREADING_TOOLS=0 $(DEFINE_KEY)__TBB_RML_STATIC=1 $(DEFINE_KEY)__TBB_NO_IMPLICIT_LINKAGE=1 - -%.$(OBJ): %.cpp - $(CPLUS) $(COMPILE_ONLY) $(R_CPLUS_FLAGS) $(PIC_KEY) $(INCLUDES) $< - -ifeq (linux,$(tbb_os)) -omp_dynamic_link.$(OBJ): CPLUS_FLAGS+=-fno-exceptions -endif - -tbb_misc_rml.$(OBJ): version_string.ver - -RML_TEST.OBJ = test_job_automaton.$(OBJ) test_thread_monitor.$(OBJ) test_rml_tbb.$(OBJ) test_rml_omp.$(OBJ) test_rml_mixed.$(OBJ) - -$(RML_TBB_DEP): %_rml.$(OBJ): %.cpp - $(CPLUS) $(COMPILE_ONLY) $(OUTPUTOBJ_KEY)$@ $(R_CPLUS_FLAGS) $(PIC_KEY) $(INCLUDES) $< - -$(RML_TEST.OBJ): %.$(OBJ): %.cpp - $(CPLUS) $(COMPILE_ONLY) $(R_CPLUS_FLAGS) $(PIC_KEY) $(T_INCLUDES) $< - -ifneq (,$(RML.DEF)) -rml.def: $(RML.DEF) - $(CPLUS) $(PREPROC_ONLY) $< $(CPLUS_FLAGS) $(INCLUDES) > $@ - -LIB_LINK_FLAGS += $(EXPORT_KEY)rml.def -$(RML.DLL): rml.def -endif - -$(RML.DLL): BUILDING_LIBRARY = $(RML.DLL) -$(RML.DLL): $(RML_TBB_DEP) $(RML_SERVER.OBJ) $(RML.RES) $(RML_NO_VERSION.DLL) $(RML_ASM.OBJ) - $(LIB_LINK_CMD) $(LIB_OUTPUT_KEY)$(RML.DLL) $(RML_SERVER.OBJ) $(RML_TBB_DEP) $(RML_ASM.OBJ) $(RML.RES) $(LIB_LINK_LIBS) $(LIB_LINK_FLAGS) - -ifneq (,$(RML_NO_VERSION.DLL)) -$(RML_NO_VERSION.DLL): - echo "INPUT ($(RML.DLL))" > $(RML_NO_VERSION.DLL) -endif - -rml: $(RML.DLL) $(RML_TBB_CLIENT.OBJ) $(RML_OMP_CLIENT.OBJ) - -#------------------------------------------------------ -# End of rules for making the RML server shared library -#------------------------------------------------------ - -#------------------------------------------------------ -# Define rules for making the RML unit tests -#------------------------------------------------------ - -add_debug=$(basename $(1))_debug$(suffix $(1)) -cross_suffix=$(if $(crosstest),$(if $(DEBUG_SUFFIX),$(subst _debug,,$(1)),$(call add_debug,$(1))),$(1)) - -RML_TESTS = test_job_automaton.$(TEST_EXT) test_thread_monitor.$(TEST_EXT) -RML_CUSTOM_TESTS = test_rml_tbb.$(TEST_EXT) test_rml_omp.$(TEST_EXT) test_rml_mixed.$(TEST_EXT) test_rml_omp_c_linkage.$(TEST_EXT) - -test_rml_tbb.$(TEST_EXT): test_rml_tbb.$(OBJ) $(RML_TBB_CLIENT.OBJ) $(TBB_DEP_RML_TEST) - $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) test_rml_tbb.$(OBJ) $(RML_TBB_CLIENT.OBJ) $(TBB_DEP_RML_TEST) $(LIBS) $(LINK_FLAGS) - -test_rml_omp.$(TEST_EXT): test_rml_omp.$(OBJ) $(RML_OMP_CLIENT.OBJ) $(TBB_DEP_NON_RML_TEST) - $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) test_rml_omp.$(OBJ) $(RML_OMP_CLIENT.OBJ) $(TBB_DEP_NON_RML_TEST) $(LIBS) $(LINK_FLAGS) - -test_rml_mixed.$(TEST_EXT): test_rml_mixed.$(OBJ) $(RML_TBB_CLIENT.OBJ) $(RML_OMP_CLIENT.OBJ) $(TBB_DEP_RML_TEST) - $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) test_rml_mixed.$(OBJ) $(RML_TBB_CLIENT.OBJ) $(RML_OMP_CLIENT.OBJ) $(TBB_DEP_RML_TEST) $(LIBS) $(LINK_FLAGS) - -rml_omp_stub.$(OBJ): rml_omp_stub.cpp - $(CPLUS) $(COMPILE_ONLY) $(M_CPLUS_FLAGS) $(WARNING_SUPPRESS) $(T_INCLUDES) $(PIC_KEY) $< - -test_rml_omp_c_linkage.$(TEST_EXT): test_rml_omp_c_linkage.$(OBJ) rml_omp_stub.$(OBJ) omp_dynamic_link.$(OBJ) - $(CONLY) $(C_FLAGS) $(OUTPUT_KEY)$@ test_rml_omp_c_linkage.$(OBJ) rml_omp_stub.$(OBJ) omp_dynamic_link.$(OBJ) $(LIBS) $(LINK_FLAGS) - -$(RML_TESTS): %.$(TEST_EXT): %.$(OBJ) $(TBB_DEP_NON_RML_TEST) - $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $< $(TBB_DEP_NON_RML_TEST) $(LIBS) $(LINK_FLAGS) - -### run_cmd is usually empty -rml_test: $(call cross_suffix,$(RML.DLL)) $(TEST_PREREQUISITE) $(RML_TESTS) $(RML_CUSTOM_TESTS) - $(run_cmd) ./test_job_automaton.$(TEST_EXT) $(args) - $(run_cmd) ./test_thread_monitor.$(TEST_EXT) $(args) - $(run_cmd) ./test_rml_tbb.$(TEST_EXT) $(args) - $(run_cmd) ./test_rml_omp.$(TEST_EXT) $(args) - $(run_cmd) ./test_rml_mixed.$(TEST_EXT) $(args) - $(run_cmd) ./test_rml_omp_c_linkage.$(TEST_EXT) $(args) - -#------------------------------------------------------ -# End of rules for making the TBBMalloc unit tests -#------------------------------------------------------ - -# Include automatically generated dependences --include *.d diff --git a/src/tbb/build/Makefile.tbb b/src/tbb/build/Makefile.tbb deleted file mode 100644 index 2bb0d719a..000000000 --- a/src/tbb/build/Makefile.tbb +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -#------------------------------------------------------------------------------ -# Define rules for making the TBB shared library. -#------------------------------------------------------------------------------ - -tbb_root ?= "$(TBBROOT)" -BUILDING_PHASE=1 -include $(tbb_root)/build/common.inc -DEBUG_SUFFIX=$(findstring _debug,_$(cfg)) - -#------------------------------------------------------------ -# Define static pattern rules dealing with .cpp source files -#------------------------------------------------------------ -$(warning CONFIG: cfg=$(cfg) arch=$(arch) compiler=$(compiler) target=$(target) runtime=$(runtime)) - -default_tbb: $(TBB.DLL) -.PHONY: default_tbb tbbvars clean -.PRECIOUS: %.$(OBJ) - -VPATH = $(tbb_root)/src/tbb/$(ASSEMBLY_SOURCE) $(tbb_root)/src/tbb $(tbb_root)/src/old $(tbb_root)/src/rml/client - -CPLUS_FLAGS += $(PIC_KEY) $(DEFINE_KEY)__TBB_BUILD=1 - -# suppress warnings for build of itt_notify by GCC3 -ifneq (,$(findstring gcc_cc3., $(compiler)_$(runtime))) -KNOWN_WARNINGS += itt_notify.$(OBJ) -endif - -# Object files (that were compiled from C++ code) that gmake up TBB -TBB_CPLUS.OBJ = concurrent_hash_map.$(OBJ) \ - concurrent_queue.$(OBJ) \ - concurrent_vector.$(OBJ) \ - dynamic_link.$(OBJ) \ - itt_notify.$(OBJ) \ - cache_aligned_allocator.$(OBJ) \ - pipeline.$(OBJ) \ - queuing_mutex.$(OBJ) \ - queuing_rw_mutex.$(OBJ) \ - reader_writer_lock.$(OBJ) \ - spin_rw_mutex.$(OBJ) \ - x86_rtm_rw_mutex.$(OBJ) \ - spin_mutex.$(OBJ) \ - critical_section.$(OBJ) \ - mutex.$(OBJ) \ - recursive_mutex.$(OBJ) \ - condition_variable.$(OBJ) \ - tbb_thread.$(OBJ) \ - concurrent_monitor.$(OBJ) \ - semaphore.$(OBJ) \ - private_server.$(OBJ) \ - rml_tbb.$(OBJ) \ - tbb_misc.$(OBJ) \ - tbb_misc_ex.$(OBJ) \ - task.$(OBJ) \ - task_group_context.$(OBJ) \ - governor.$(OBJ) \ - market.$(OBJ) \ - arena.$(OBJ) \ - scheduler.$(OBJ) \ - observer_proxy.$(OBJ) \ - tbb_statistics.$(OBJ) \ - tbb_main.$(OBJ) - -# OLD/Legacy object files for backward binary compatibility -ifeq (,$(findstring $(DEFINE_KEY)TBB_NO_LEGACY,$(CXXFLAGS))) -TBB_CPLUS_OLD.OBJ = \ - concurrent_vector_v2.$(OBJ) \ - concurrent_queue_v2.$(OBJ) \ - spin_rw_mutex_v2.$(OBJ) \ - task_v2.$(OBJ) -endif - -# Object files that gmake up TBB (TBB_ASM.OBJ is platform-specific) -TBB.OBJ = $(TBB_CPLUS.OBJ) $(TBB_CPLUS_OLD.OBJ) $(TBB_ASM.OBJ) - -# Suppress superfluous warnings for TBB compilation -WARNING_KEY += $(WARNING_SUPPRESS) - -include $(tbb_root)/build/common_rules.inc - -ifneq (,$(TBB.DEF)) -tbb.def: $(TBB.DEF) $(TBB.LST) - $(CPLUS) $(PREPROC_ONLY) $< $(CPLUS_FLAGS) $(INCLUDES) > $@ - -LIB_LINK_FLAGS += $(EXPORT_KEY)tbb.def -$(TBB.DLL): tbb.def -endif - -tbbvars.sh: - $(MAKE_TBBVARS) - -$(TBB.DLL): BUILDING_LIBRARY = $(TBB.DLL) -$(TBB.DLL): $(TBB.OBJ) $(TBB.RES) tbbvars.sh $(TBB_NO_VERSION.DLL) - $(LIB_LINK_CMD) $(LIB_OUTPUT_KEY)$(TBB.DLL) $(TBB.OBJ) $(TBB.RES) $(LIB_LINK_LIBS) $(LIB_LINK_FLAGS) - -ifneq (,$(TBB_NO_VERSION.DLL)) -$(TBB_NO_VERSION.DLL): - echo "INPUT ($(TBB.DLL))" > $(TBB_NO_VERSION.DLL) -endif - -#clean: -# $(RM) *.$(OBJ) *.$(DLL) *.res *.map *.ilk *.pdb *.exp *.manifest *.tmp *.d core core.*[0-9][0-9] *.ver - -# Include automatically generated dependences --include *.d diff --git a/src/tbb/build/Makefile.tbbmalloc b/src/tbb/build/Makefile.tbbmalloc deleted file mode 100644 index 86dd761d5..000000000 --- a/src/tbb/build/Makefile.tbbmalloc +++ /dev/null @@ -1,226 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -# default target -default_malloc: malloc malloc_test - -tbb_root ?= $(TBBROOT) -BUILDING_PHASE=1 -TEST_RESOURCE = $(MALLOC.RES) -TESTFILE=tbbmalloc -include $(tbb_root)/build/common.inc -DEBUG_SUFFIX=$(findstring _debug,$(call cross_cfg,_$(cfg))) - -MALLOC_ROOT ?= $(tbb_root)/src/tbbmalloc -MALLOC_SOURCE_ROOT ?= $(MALLOC_ROOT) - -VPATH = $(tbb_root)/src/tbb/$(ASSEMBLY_SOURCE) $(tbb_root)/src/tbb $(tbb_root)/src/test -VPATH += $(MALLOC_ROOT) $(MALLOC_SOURCE_ROOT) - -CPLUS_FLAGS += $(if $(crosstest),$(DEFINE_KEY)__TBBMALLOC_NO_IMPLICIT_LINKAGE=1) - -TEST_SUFFIXES=proxy -TEST_PREREQUISITE+=$(MALLOC.LIB) -LINK_FILES+=$(LINK_MALLOC.LIB) -include $(tbb_root)/build/common_rules.inc - -ORIG_CPLUS_FLAGS:=$(CPLUS_FLAGS) -ORIG_INCLUDES:=$(INCLUDES) -ORIG_LINK_MALLOC.LIB:=$(LINK_MALLOC.LIB) - -#------------------------------------------------------ -# Define rules for making the TBBMalloc shared library. -#------------------------------------------------------ - -# Object files that make up TBBMalloc -MALLOC_CPLUS.OBJ = backend.$(OBJ) large_objects.$(OBJ) backref.$(OBJ) tbbmalloc.$(OBJ) -MALLOC.OBJ := $(MALLOC_CPLUS.OBJ) $(MALLOC_ASM.OBJ) itt_notify_malloc.$(OBJ) frontend.$(OBJ) -PROXY.OBJ := proxy.$(OBJ) tbb_function_replacement.$(OBJ) -M_CPLUS_FLAGS := $(subst $(WARNING_KEY),,$(M_CPLUS_FLAGS)) $(DEFINE_KEY)__TBBMALLOC_BUILD=1 -M_INCLUDES := $(INCLUDES) $(INCLUDE_KEY)$(MALLOC_ROOT) $(INCLUDE_KEY)$(MALLOC_SOURCE_ROOT) - -# Suppress superfluous warnings for TBBmalloc compilation -$(MALLOC.OBJ): M_CPLUS_FLAGS += $(WARNING_SUPPRESS) - -frontend.$(OBJ): frontend.cpp version_string.ver - $(CPLUS) $(COMPILE_ONLY) $(M_CPLUS_FLAGS) $(PIC_KEY) $(M_INCLUDES) $(INCLUDE_KEY). $< - -$(PROXY.OBJ): %.$(OBJ): %.cpp - $(CPLUS) $(COMPILE_ONLY) $(CPLUS_FLAGS) $(PIC_KEY) $(DEFINE_KEY)__TBBMALLOC_BUILD=1 $(M_INCLUDES) $< - -$(MALLOC_CPLUS.OBJ): %.$(OBJ): %.cpp - $(CPLUS) $(COMPILE_ONLY) $(M_CPLUS_FLAGS) $(PIC_KEY) $(M_INCLUDES) $< - -itt_notify_malloc.$(OBJ): itt_notify.cpp - $(CPLUS) $(COMPILE_ONLY) $(M_CPLUS_FLAGS) $(PIC_KEY) $(OUTPUTOBJ_KEY)$@ $(INCLUDES) $< - -MALLOC_LINK_FLAGS = $(LIB_LINK_FLAGS) -PROXY_LINK_FLAGS = $(LIB_LINK_FLAGS) - -ifneq (,$(MALLOC.DEF)) -tbbmalloc.def: $(MALLOC.DEF) - $(CPLUS) $(PREPROC_ONLY) $< $(M_CPLUS_FLAGS) $(INCLUDES) > $@ - -MALLOC_LINK_FLAGS += $(EXPORT_KEY)tbbmalloc.def -$(MALLOC.DLL): tbbmalloc.def -endif - -$(MALLOC.DLL): BUILDING_LIBRARY = $(MALLOC.DLL) -$(MALLOC.DLL): $(MALLOC.OBJ) $(MALLOC.RES) $(MALLOC_NO_VERSION.DLL) - $(subst $(CPLUS),$(CONLY),$(LIB_LINK_CMD)) $(LIB_OUTPUT_KEY)$(MALLOC.DLL) $(MALLOC.OBJ) $(MALLOC.RES) $(LIB_LINK_LIBS) $(MALLOC_LINK_FLAGS) - -ifneq (,$(MALLOCPROXY.DEF)) -tbbmallocproxy.def: $(MALLOCPROXY.DEF) - $(CPLUS) $(PREPROC_ONLY) $< $(CPLUS_FLAGS) $(INCLUDES) > $@ - -PROXY_LINK_FLAGS += $(EXPORT_KEY)tbbmallocproxy.def -$(MALLOCPROXY.DLL): tbbmallocproxy.def -endif - -ifneq (,$(MALLOCPROXY.DLL)) -$(MALLOCPROXY.DLL): BUILDING_LIBRARY = $(MALLOCPROXY.DLL) -$(MALLOCPROXY.DLL): $(PROXY.OBJ) $(MALLOCPROXY_NO_VERSION.DLL) $(MALLOC.DLL) $(MALLOC.RES) - $(LIB_LINK_CMD) $(LIB_OUTPUT_KEY)$(MALLOCPROXY.DLL) $(PROXY.OBJ) $(MALLOC.RES) $(LIB_LINK_LIBS) $(LINK_MALLOC.LIB) $(PROXY_LINK_FLAGS) -endif - -ifneq (,$(MALLOC_NO_VERSION.DLL)) -$(MALLOC_NO_VERSION.DLL): - echo "INPUT ($(MALLOC.DLL))" > $(MALLOC_NO_VERSION.DLL) -endif - -ifneq (,$(MALLOCPROXY_NO_VERSION.DLL)) -$(MALLOCPROXY_NO_VERSION.DLL): - echo "INPUT ($(MALLOCPROXY.DLL))" > $(MALLOCPROXY_NO_VERSION.DLL) -endif - -malloc: $(MALLOC.DLL) $(MALLOCPROXY.DLL) - -malloc_dll: $(MALLOC.DLL) - -malloc_proxy_dll: $(MALLOCPROXY.DLL) - -.PHONY: malloc malloc_dll malloc_proxy_dll - -#------------------------------------------------------ -# End of rules for making the TBBMalloc shared library -#------------------------------------------------------ - -#------------------------------------------------------ -# Define rules for making the TBBMalloc unit tests -#------------------------------------------------------ - -# --------- The list of TBBMalloc unit tests ---------- -MALLOC_TESTS = test_ScalableAllocator.$(TEST_EXT) \ - test_ScalableAllocator_STL.$(TEST_EXT) \ - test_malloc_compliance.$(TEST_EXT) \ - test_malloc_regression.$(TEST_EXT) \ - test_malloc_init_shutdown.$(TEST_EXT) \ - test_malloc_pools.$(TEST_EXT) \ - test_malloc_pure_c.$(TEST_EXT) \ - test_malloc_whitebox.$(TEST_EXT) \ - test_malloc_used_by_lib.$(TEST_EXT) \ - test_malloc_lib_unload.$(TEST_EXT) -ifneq (,$(MALLOCPROXY.DLL)) -MALLOC_TESTS += test_malloc_overload.$(TEST_EXT) \ - test_malloc_overload_proxy.$(TEST_EXT) \ - test_malloc_atexit.$(TEST_EXT) -endif -# ----------------------------------------------------- -# ------------ Set test specific variables ------------ -ifeq (windows.gcc,$(tbb_os).$(compiler)) -test_malloc_overload.$(TEST_EXT): LIBS += $(MALLOCPROXY.LIB) -endif - -MALLOC_M_CPLUS_TESTS = test_malloc_whitebox.$(TEST_EXT) test_malloc_lib_unload.$(TEST_EXT) \ - test_malloc_used_by_lib.$(TEST_EXT) -MALLOC_NO_LIB_TESTS = test_malloc_whitebox.$(TEST_EXT) test_malloc_lib_unload.$(TEST_EXT) \ - test_malloc_used_by_lib.$(TEST_EXT) test_malloc_overload.$(TEST_EXT) -MALLOC_LINK_PROXY_TESTS = test_malloc_overload_proxy.$(TEST_EXT) -MALLOC_ADD_DLL_TESTS = test_malloc_lib_unload.$(TEST_EXT) test_malloc_used_by_lib.$(TEST_EXT) \ - test_malloc_atexit.$(TEST_EXT) - -$(MALLOC_M_CPLUS_TESTS): CPLUS_FLAGS=$(M_CPLUS_FLAGS) -$(MALLOC_M_CPLUS_TESTS): INCLUDES=$(M_INCLUDES) -$(MALLOC_NO_LIB_TESTS): LINK_MALLOC.LIB= -$(MALLOC_NO_LIB_TESTS): LINK_FLAGS+=$(LIBDL) -$(MALLOC_LINK_PROXY_TESTS): LINK_MALLOC.LIB=$(LINK_MALLOCPROXY.LIB) -$(MALLOC_ADD_DLL_TESTS): %.$(TEST_EXT): %_dll.$(DLL) -$(MALLOC_ADD_DLL_TESTS): TEST_LIBS+=$(@:.$(TEST_EXT)=_dll.$(LIBEXT)) - -test_malloc_over%.$(TEST_EXT): CPLUS_FLAGS=$(subst /MT,/MD,$(M_CPLUS_FLAGS)) -test_malloc_over%.$(TEST_EXT): INCLUDES=$(M_INCLUDES) -test_malloc_overload_proxy.$(TEST_EXT): LINK_FLAGS+=$(LIBDL) - -test_malloc_atexit_dll.$(DLL): CPLUS_FLAGS=$(subst /MT,/MD,$(M_CPLUS_FLAGS)) -test_malloc_atexit.$(TEST_EXT): CPLUS_FLAGS=$(subst /MT,/MD,$(M_CPLUS_FLAGS)) -test_malloc_atexit.$(TEST_EXT): LINK_FLAGS+=$(LIBDL) -# on Ubuntu 11.10 linker called with --as-needed, so dependence on libtbbmalloc_proxy -# is not created, and malloc overload via linking with -ltbbmalloc_proxy is not working. -# Overcome with --no-as-needed. -ifeq (linux.gcc,$(tbb_os).$(compiler)) -test_malloc_atexit.$(TEST_EXT): MALLOCPROXY.LIB := -Wl,--no-as-needed $(MALLOCPROXY.LIB) -endif -# The test doesn't added to MALLOC_LINK_PROXY_TESTS, because we need both -# tbbmalloc and proxy libs. For platforms other than Android it's enough -# to modify LINK_MALLOC.LIB for TEST_EXT target only. But under Android build -# of DLL and TEST_EXT can be requested independently, so there is no chance -# to set LINK_MALLOC.LIB in TEST_EXT build rule, and affect DLL build. -test_malloc_atexit.$(TEST_EXT): LINK_MALLOC.LIB := $(LINK_MALLOC.LIB) $(LINK_MALLOCPROXY.LIB) -test_malloc_atexit_dll.$(DLL): LINK_MALLOC.LIB := $(LINK_MALLOC.LIB) $(LINK_MALLOCPROXY.LIB) - -test_malloc_whitebox.$(TEST_EXT): $(MALLOC_ASM.OBJ) version_string.ver -test_malloc_whitebox.$(TEST_EXT): INCLUDES+=$(INCLUDE_KEY). -test_malloc_whitebox.$(TEST_EXT): LINK_FILES+=$(MALLOC_ASM.OBJ) - -# Some _dll targets need to restore variables since they are changed by parent -# target-specific rule of its .exe targets -test_malloc_lib_unload_dll.$(DLL): CPLUS_FLAGS=$(ORIG_CPLUS_FLAGS) -test_malloc_lib_unload_dll.$(DLL): INCLUDES=$(ORIG_INCLUDES) - -test_malloc_used_by_lib_dll.$(DLL): CPLUS_FLAGS=$(subst /MT,/LD,$(M_CPLUS_FLAGS)) -test_malloc_used_by_lib_dll.$(DLL): LINK_FILES+=$(ORIG_LINK_MALLOC.LIB) -test_malloc_used_by_lib_dll.$(DLL): LIBDL= -# ----------------------------------------------------- -# ---- The list of TBBMalloc test running commands ---- -# run_cmd is usually empty -malloc_test: $(MALLOC.DLL) malloc_test_no_depends - -malloc_test_no_depends: $(TEST_PREREQUISITE) $(MALLOC_TESTS) - $(run_cmd) ./test_malloc_pools.$(TEST_EXT) $(args) 1:4 -ifneq (,$(MALLOCPROXY.DLL)) - $(run_cmd) ./test_malloc_atexit.$(TEST_EXT) $(args) - $(run_cmd) $(TEST_LAUNCHER) -l $(MALLOCPROXY.DLL) ./test_malloc_overload.$(TEST_EXT) $(args) - $(run_cmd) $(TEST_LAUNCHER) ./test_malloc_overload_proxy.$(TEST_EXT) $(args) -endif - $(run_cmd) $(TEST_LAUNCHER) ./test_malloc_lib_unload.$(TEST_EXT) $(args) - $(run_cmd) $(TEST_LAUNCHER) ./test_malloc_used_by_lib.$(TEST_EXT) - $(run_cmd) ./test_malloc_whitebox.$(TEST_EXT) $(args) 1:4 - $(run_cmd) $(TEST_LAUNCHER) -u ./test_malloc_compliance.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_ScalableAllocator.$(TEST_EXT) $(args) - $(run_cmd) ./test_ScalableAllocator_STL.$(TEST_EXT) $(args) - $(run_cmd) ./test_malloc_regression.$(TEST_EXT) $(args) - $(run_cmd) ./test_malloc_init_shutdown.$(TEST_EXT) $(args) - $(run_cmd) ./test_malloc_pure_c.$(TEST_EXT) $(args) -# ----------------------------------------------------- - -#------------------------------------------------------ -# End of rules for making the TBBMalloc unit tests -#------------------------------------------------------ - -# Include automatically generated dependences --include *.d diff --git a/src/tbb/build/Makefile.tbbproxy b/src/tbb/build/Makefile.tbbproxy deleted file mode 100644 index 9d09a845e..000000000 --- a/src/tbb/build/Makefile.tbbproxy +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -# default target -default_tbbproxy: tbbproxy tbbproxy_test - -tbb_root ?= $(TBBROOT) -BUILDING_PHASE=1 -include $(tbb_root)/build/common.inc -DEBUG_SUFFIX=$(findstring _debug,_$(cfg)) - -PROXY_ROOT ?= $(tbb_root)/src/tbbproxy -PROXY_SOURCE_ROOT ?= $(PROXY_ROOT) - -VPATH = $(tbb_root)/src/tbb/$(ASSEMBLY_SOURCE) $(tbb_root)/src/tbb $(tbb_root)/src/test -VPATH += $(PROXY_ROOT) $(PROXY_SOURCE_ROOT) - -CPLUS_FLAGS += $(DEFINE_KEY)__TBB_DLL_NAME=$(TBB.DLL) -CPLUS_FLAGS += $(DEFINE_KEY)__TBB_LST=$(TBB.LST) -CPLUS_FLAGS += $(foreach dir,$(VPATH),$(INCLUDE_KEY)$(dir)) -CPLUS_FLAGS += $(PIC_KEY) - -include $(tbb_root)/build/common_rules.inc - -#------------------------------------------------------ -# Define rules for making the TBB Proxy static library. -#------------------------------------------------------ - -# Object files that make up TBB Proxy -PROXY_CPLUS.OBJ = tbbproxy.$(OBJ) -PROXY_ASM.OBJ = tbbproxy-asm.$(OBJ) -PROXY.OBJ := $(PROXY_CPLUS.OBJ) $(PROXY_ASM.OBJ) - -# Not using intrinsics prevents undesired dependence from ICL libraries (e.g. libirc). -# Not using default libs prevents link issues caused by different CRT versions in tbbproxy and in an app. -$(PROXY.OBJ): CPLUS_FLAGS += $(DEFINE_KEY)ARCH_$(arch) $(DEFINE_KEY)OS_$(tbb_os) $(NOINTRINSIC_KEY) $(NODEFAULTLIB_KEY) - -$(PROXY_CPLUS.OBJ): CPLUS_FLAGS+=$(if $(filter windows.%cl,$(tbb_os).$(compiler)),/Fdtbbproxy$(DEBUG_SUFFIX).pdb) -$(PROXY_CPLUS.OBJ): %.$(OBJ): %.cpp - $(CPLUS) $(COMPILE_ONLY) $(CPLUS_FLAGS) $(INCLUDES) $< - -$(PROXY.LIB): $(PROXY.OBJ) - $(AR) $(AR_FLAGS) $(AR_OUTPUT_KEY)$@ $^ - -.PRECIOUS : %.$(ASMEXT) -tbbproxy-asm.$(ASMEXT) : tbbproxy-$(tbb_os).$(ASMEXT) $(TBB.LST) $(TBB-OBJECTS.LST) - $(CPLUS) $(PREPROC_ONLY) $< $(INCLUDES) $(CPLUS_FLAGS) $(DEFINE_KEY)__TBB_BUILD=1 > $@ - -.PHONY: tbbproxy -ifeq (windows,$(tbb_os)) -tbbproxy: $(PROXY.LIB) -else -tbbproxy: -endif - -#------------------------------------------------------ -# End of rules for making the TBB Proxy static library -#------------------------------------------------------ - -#------------------------------------------------------ -# Define rules for making the TBB Proxy unit tests -#------------------------------------------------------ - -add_debug=$(basename $(1))_debug$(suffix $(1)) -cross_suffix=$(if $(crosstest),$(if $(DEBUG_SUFFIX),$(subst _debug,,$(1)),$(call add_debug,$(1))),$(1)) - -PROXY_LIB = $(call cross_suffix,$(PROXY.LIB)) -PROXY_TESTS_SRCS = test_runtime_loader.cpp -PROXY_TESTS_OBJS = $(PROXY_TESTS_SRCS:.cpp=.$(OBJ)) -PROXY_TESTS_EXES = $(PROXY_TESTS_OBJS:.$(OBJ)=.$(TEST_EXT)) - -# Run rules. -.PHONY: tbbproxy_test -ifeq (windows,$(tbb_os)) -tbbproxy_test: $(call cross_suffix,$(PROXY.LIB)) $(TEST_PREREQUISITE) $(PROXY_TESTS_EXES) - $(run_cmd) ./test_runtime_loader.$(TEST_EXT) $(args) -else -tbbproxy_test: -endif - -# Link rules. -$(PROXY_TESTS_EXES): %.$(TEST_EXT): %.$(OBJ) $(PROXY_LIB) - $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $< $(PROXY_LIB) $(LIBS) $(LIBDL) $(LINK_FLAGS) - -# Compilation rules. -$(PROXY_TESTS_OBJS): %.$(OBJ): %.cpp - $(CPLUS) $(COMPILE_ONLY) $(CPLUS_FLAGS) $(CXX_ONLY_FLAGS) $(CXX_WARN_SUPPRESS) $(INCLUDES) $(OUTPUT_KEY)$@ $< - -#------------------------------------------------------ -# End of rules for making the TBB Proxy unit tests -#------------------------------------------------------ - -# Include automatically generated dependences --include *.d diff --git a/src/tbb/build/Makefile.test b/src/tbb/build/Makefile.test deleted file mode 100644 index adb691adc..000000000 --- a/src/tbb/build/Makefile.test +++ /dev/null @@ -1,379 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -#------------------------------------------------------------------------------ -# Define rules for making the TBB tests. -#------------------------------------------------------------------------------ -.PHONY: default test_tbb_plain test_tbb_openmp test_tbb_cilk test_tbb_old clean - -default: test_tbb_plain test_tbb_openmp test_tbb_cilk test_tbb_old - -tbb_root ?= $(TBBROOT) -BUILDING_PHASE=1 -TEST_RESOURCE = $(TBB.RES) -TESTFILE=test -include $(tbb_root)/build/common.inc -DEBUG_SUFFIX=$(findstring _debug,$(call cross_cfg,_$(cfg))) - -#------------------------------------------------------------ -# Define static pattern rules dealing with .cpp source files -#------------------------------------------------------------ - -VPATH = $(tbb_root)/src/tbb/$(ASSEMBLY_SOURCE) $(tbb_root)/src/tbb $(tbb_root)/src/rml/client $(tbb_root)/src/old $(tbb_root)/src/test $(tbb_root)/src/perf -CPLUS_FLAGS += $(if $(crosstest),$(DEFINE_KEY)__TBB_NO_IMPLICIT_LINKAGE=1) $(if $(LINK_TBB.LIB),$(DEFINE_KEY)TEST_USES_TBB=1) - -TEST_PREREQUISITE+=$(TBB.LIB) -LINK_FILES+=$(LINK_TBB.LIB) - -ifdef use_proxy - USE_PROXY_FLAG = $(DEFINE_KEY)HARNESS_USE_RUNTIME_LOADER - CPLUS_FLAGS += $(USE_PROXY_FLAG) - LINK_TBB.LIB = $(PROXY.LIB) - LIBS += $(LIBDL) -endif - -TEST_SUFFIXES=secondary compiler_builtins pic -include $(tbb_root)/build/common_rules.inc - -# Rules for the tests, which use TBB in a dynamically loadable library -test_model_plugin.$(TEST_EXT): LINK_TBB.LIB = -test_model_plugin.$(TEST_EXT): CPLUS_FLAGS := $(CPLUS_FLAGS:$(USE_PROXY_FLAG)=) -test_model_plugin.$(TEST_EXT): LIBS += $(LIBDL) -test_model_plugin.$(TEST_EXT): test_model_plugin_dll.$(DLL) - -# tbb_misc.$(OBJ) has to be specified here (instead of harness_inject_scheduler.h) because it carries dependency on version_string.ver -SCHEDULER_DEPENDENCIES = $(TBB_ASM.OBJ) tbb_misc.$(OBJ) - -# These executables don't depend on the TBB library, but include core .cpp files directly -SCHEDULER_DIRECTLY_INCLUDED = test_task_leaks.$(TEST_EXT) \ - test_task_assertions.$(TEST_EXT) \ - test_fast_random.$(TEST_EXT) - -# Necessary to locate version_string.ver referenced from directly included tbb_misc.cpp -INCLUDES += $(INCLUDE_KEY). - -$(SCHEDULER_DIRECTLY_INCLUDED): WARNING_KEY += $(WARNING_SUPPRESS) -$(SCHEDULER_DIRECTLY_INCLUDED): LIBS += $(LIBDL) -#tbb.lib must not be linked to scheduler white box tests in order to not violate ODR -$(SCHEDULER_DIRECTLY_INCLUDED): LINK_TBB.LIB = -$(SCHEDULER_DIRECTLY_INCLUDED): LINK_FILES += $(SCHEDULER_DEPENDENCIES) -$(SCHEDULER_DIRECTLY_INCLUDED): $(SCHEDULER_DEPENDENCIES) - -# Tests that use some features of C++11 -TEST_TBB_CPP11 = test_lambda.$(TEST_EXT) test_cache_aligned_allocator_STL.$(TEST_EXT) - -ifneq (0,$(cpp0x)) -# Made CPP11 tests use NOSTRICT flags because -strict-ansi combined with -# -std=c++0x on ICC 13.0 results in a compile error when stdlib is included -$(TEST_TBB_CPP11): CPLUS_FLAGS := $(CPLUS_FLAGS_NOSTRICT) -endif - -# test_tbb_header detects "multiple definition" linker error using the test that covers the whole library -TWICE_LINKED_TESTS = test_tbb_header.$(TEST_EXT) \ - test_concurrent_unordered_set.$(TEST_EXT) - -%_secondary.$(OBJ): CPLUS_FLAGS+=$(DEFINE_KEY)__TBB_TEST_SECONDARY=1 - -# Detecting "multiple definition" linker error using the test that covers the whole library -$(TWICE_LINKED_TESTS): %.$(TEST_EXT): %.$(OBJ) %_secondary.$(OBJ) -$(TWICE_LINKED_TESTS): LINK_FILES+=$(@:.$(TEST_EXT)=_secondary.$(OBJ)) - -# Checks that TBB works correctly in position independent code -%_pic.$(OBJ): CPLUS_FLAGS+=$(PIC_KEY) -%_pic.$(OBJ): CPLUS_FLAGS+=$(DEFINE_KEY)__TBB_TEST_PIC=1 - -# Test of generic gcc port and icc intrinsics port -%_compiler_builtins.$(TEST_EXT): LINK_TBB.LIB = -%_compiler_builtins.$(OBJ): CPLUS_FLAGS+=$(DEFINE_KEY)__TBB_TEST_BUILTINS=1 $(DEFINE_KEY)TBB_USE_ASSERT=0 - -# The test_dynamic_link test doesn't depend on the TBB library -test_dynamic_link.$(TEST_EXT): LINK_TBB.LIB = -test_dynamic_link.$(TEST_EXT): LIBS += $(LIBDL) - -# The main list of TBB tests -TEST_TBB_PLAIN.EXE = test_assembly.$(TEST_EXT) \ - test_tbb_fork.$(TEST_EXT) \ - test_assembly_compiler_builtins.$(TEST_EXT) \ - test_aligned_space.$(TEST_EXT) \ - test_atomic.$(TEST_EXT) \ - test_atomic_pic.$(TEST_EXT) \ - test_atomic_compiler_builtins.$(TEST_EXT) \ - test_blocked_range.$(TEST_EXT) \ - test_blocked_range2d.$(TEST_EXT) \ - test_blocked_range3d.$(TEST_EXT) \ - test_concurrent_queue.$(TEST_EXT) \ - test_concurrent_queue_whitebox.$(TEST_EXT) \ - test_concurrent_vector.$(TEST_EXT) \ - test_concurrent_unordered_set.$(TEST_EXT) \ - test_concurrent_unordered_map.$(TEST_EXT) \ - test_concurrent_hash_map.$(TEST_EXT) \ - test_enumerable_thread_specific.$(TEST_EXT) \ - test_handle_perror.$(TEST_EXT) \ - test_halt.$(TEST_EXT) \ - test_model_plugin.$(TEST_EXT) \ - test_mutex.$(TEST_EXT) \ - test_mutex_native_threads.$(TEST_EXT) \ - test_rwm_upgrade_downgrade.$(TEST_EXT) \ - test_cache_aligned_allocator.$(TEST_EXT) \ - test_parallel_for.$(TEST_EXT) \ - test_parallel_reduce.$(TEST_EXT) \ - test_parallel_sort.$(TEST_EXT) \ - test_parallel_scan.$(TEST_EXT) \ - test_parallel_while.$(TEST_EXT) \ - test_parallel_do.$(TEST_EXT) \ - test_pipeline.$(TEST_EXT) \ - test_pipeline_with_tbf.$(TEST_EXT) \ - test_parallel_pipeline.$(TEST_EXT) \ - test_task_scheduler_init.$(TEST_EXT) \ - test_task_scheduler_observer.$(TEST_EXT) \ - test_task.$(TEST_EXT) \ - test_tbb_thread.$(TEST_EXT) \ - test_std_thread.$(TEST_EXT) \ - test_tick_count.$(TEST_EXT) \ - test_inits_loop.$(TEST_EXT) \ - test_yield.$(TEST_EXT) \ - test_eh_tasks.$(TEST_EXT) \ - test_eh_algorithms.$(TEST_EXT) \ - test_eh_flow_graph.$(TEST_EXT) \ - test_parallel_invoke.$(TEST_EXT) \ - test_task_group.$(TEST_EXT) \ - test_ittnotify.$(TEST_EXT) \ - test_parallel_for_each.$(TEST_EXT) \ - test_tbb_header.$(TEST_EXT) \ - test_combinable.$(TEST_EXT) \ - test_task_auto_init.$(TEST_EXT) \ - test_task_arena.$(TEST_EXT) \ - test_concurrent_monitor.$(TEST_EXT) \ - test_semaphore.$(TEST_EXT) \ - test_critical_section.$(TEST_EXT) \ - test_reader_writer_lock.$(TEST_EXT) \ - test_tbb_condition_variable.$(TEST_EXT) \ - test_intrusive_list.$(TEST_EXT) \ - test_concurrent_priority_queue.$(TEST_EXT) \ - test_task_priority.$(TEST_EXT) \ - test_task_enqueue.$(TEST_EXT) \ - test_task_steal_limit.$(TEST_EXT) \ - test_hw_concurrency.$(TEST_EXT) \ - test_fp.$(TEST_EXT) \ - test_tuple.$(TEST_EXT) \ - test_flow_graph.$(TEST_EXT) \ - test_broadcast_node.$(TEST_EXT) \ - test_continue_node.$(TEST_EXT) \ - test_function_node.$(TEST_EXT) \ - test_limiter_node.$(TEST_EXT) \ - test_join_node.$(TEST_EXT) \ - test_buffer_node.$(TEST_EXT) \ - test_queue_node.$(TEST_EXT) \ - test_priority_queue_node.$(TEST_EXT) \ - test_sequencer_node.$(TEST_EXT) \ - test_source_node.$(TEST_EXT) \ - test_overwrite_node.$(TEST_EXT) \ - test_write_once_node.$(TEST_EXT) \ - test_indexer_node.$(TEST_EXT) \ - test_multifunction_node.$(TEST_EXT) \ - test_split_node.$(TEST_EXT) \ - test_static_assert.$(TEST_EXT) \ - test_aggregator.$(TEST_EXT) \ - test_concurrent_lru_cache.$(TEST_EXT) \ - test_examples_common_utility.$(TEST_EXT) \ - test_dynamic_link.$(TEST_EXT) \ - test_parallel_for_vectorization.$(TEST_EXT) \ - test_tagged_msg.$(TEST_EXT) \ - test_partitioner_whitebox.$(TEST_EXT) \ - test_flow_graph_whitebox.$(TEST_EXT) \ - test_tbb_version.$(TEST_EXT) # insert new files right above - -TEST_TBB_PLAIN.EXE += $(TEST_TBB_CPP11) - -ifdef OPENMP_FLAG -test_openmp.$(TEST_EXT): CPLUS_FLAGS += $(OPENMP_FLAG) - -test_tbb_openmp: $(TEST_PREREQUISITE) test_openmp.$(TEST_EXT) - $(run_cmd) ./test_openmp.$(TEST_EXT) 1:4 -else -test_tbb_openmp: - @echo "OpenMP is not available" -endif - -ifdef CILK_AVAILABLE -test_cilk_dynamic_load.$(TEST_EXT): LIBS += $(LIBDL) -test_cilk_dynamic_load.$(TEST_EXT): test_cilk_dynamic_load_dll.$(DLL) - -# Workaround on cilkrts linkage known issue (see Intel(R) C++ Composer XE 2011 Release Notes) -# The issue reveals itself if a version of binutils is prior to 2.17 -ifeq (linux_icc,$(tbb_os)_$(compiler)) -test_cilk_interop.$(TEST_EXT): LIBS += -lcilkrts -endif -test_tbb_cilk: test_cilk_interop.$(TEST_EXT) test_cilk_dynamic_load.$(TEST_EXT) - $(run_cmd) ./test_cilk_interop.$(TEST_EXT) $(args) - $(run_cmd) ./test_cilk_dynamic_load.$(TEST_EXT) $(args) -else -test_tbb_cilk: - @echo "Intel(R) Cilk(TM) Plus is not available" -endif - -$(TEST_TBB_PLAIN.EXE): WARNING_KEY += $(TEST_WARNING_KEY) - -# Run tests that are in SCHEDULER_DIRECTLY_INCLUDED and TEST_TBB_PLAIN.EXE -# Note that usually run_cmd is empty, and tests run directly -test_tbb_plain: $(TEST_PREREQUISITE) $(SCHEDULER_DIRECTLY_INCLUDED) $(TEST_TBB_PLAIN.EXE) - $(run_cmd) ./test_tbb_version.$(TEST_EXT) $(args) - # Checking TBB version first to make sure the following testing has anything in it - $(run_cmd) ./test_assembly.$(TEST_EXT) $(args) - $(run_cmd) ./test_atomic.$(TEST_EXT) $(args) - $(run_cmd) ./test_atomic_pic.$(TEST_EXT) $(args) - # Yes, 4:8 is intended on the next line. - $(run_cmd) ./test_yield.$(TEST_EXT) $(args) 4:8 - $(run_cmd) ./test_handle_perror.$(TEST_EXT) $(args) - $(run_cmd) ./test_dynamic_link.$(TEST_EXT) $(args) - $(run_cmd) ./test_task_auto_init.$(TEST_EXT) $(args) - $(run_cmd) ./test_task_arena.$(TEST_EXT) $(args) - $(run_cmd) ./test_task_scheduler_init.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_task_scheduler_observer.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_task.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_task_assertions.$(TEST_EXT) $(args) - $(run_cmd) ./test_task_leaks.$(TEST_EXT) $(args) - $(run_cmd) ./test_fast_random.$(TEST_EXT) $(args) 1:16 - $(run_cmd) ./test_eh_tasks.$(TEST_EXT) $(args) 2:4 - $(run_cmd) ./test_cache_aligned_allocator.$(TEST_EXT) $(args) - $(run_cmd) ./test_cache_aligned_allocator_STL.$(TEST_EXT) $(args) - $(run_cmd) ./test_blocked_range.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_blocked_range2d.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_blocked_range3d.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_parallel_for.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_partitioner_whitebox.$(TEST_EXT) $(args) - $(run_cmd) ./test_parallel_for_vectorization.$(TEST_EXT) $(args) - $(run_cmd) ./test_parallel_sort.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_aligned_space.$(TEST_EXT) $(args) - $(run_cmd) ./test_parallel_reduce.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_parallel_scan.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_parallel_while.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_parallel_do.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_inits_loop.$(TEST_EXT) $(args) - $(run_cmd) ./test_lambda.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_mutex.$(TEST_EXT) $(args) 1:3 - $(run_cmd) ./test_mutex_native_threads.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_rwm_upgrade_downgrade.$(TEST_EXT) $(args) 4 - # Yes, 4:8 is intended on the next line. - $(run_cmd) ./test_halt.$(TEST_EXT) $(args) 4:8 - $(run_cmd) ./test_pipeline.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_pipeline_with_tbf.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_parallel_pipeline.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_tick_count.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_concurrent_queue.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_concurrent_vector.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_concurrent_queue_whitebox.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_concurrent_unordered_set.$(TEST_EXT) $(args) - $(run_cmd) ./test_concurrent_unordered_map.$(TEST_EXT) $(args) - $(run_cmd) ./test_concurrent_hash_map.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_enumerable_thread_specific.$(TEST_EXT) $(args) 0:4 - $(run_cmd) ./test_combinable.$(TEST_EXT) $(args) 0:4 - $(run_cmd) ./test_model_plugin.$(TEST_EXT) $(args) 4 - $(run_cmd) ./test_eh_algorithms.$(TEST_EXT) $(args) 2:4 - $(run_cmd) ./test_eh_flow_graph.$(TEST_EXT) $(args) 2:4 - $(run_cmd) ./test_tbb_thread.$(TEST_EXT) $(args) - $(run_cmd) ./test_std_thread.$(TEST_EXT) $(args) - $(run_cmd) ./test_parallel_invoke.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_task_group.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_ittnotify.$(TEST_EXT) $(args) 2:2 - $(run_cmd) ./test_parallel_for_each.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_tbb_header.$(TEST_EXT) $(args) - $(run_cmd) ./test_concurrent_monitor.$(TEST_EXT) $(args) 6:8 - $(run_cmd) ./test_critical_section.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_semaphore.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_reader_writer_lock.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_tbb_condition_variable.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_tbb_fork.$(TEST_EXT) $(args) - $(run_cmd) ./test_intrusive_list.$(TEST_EXT) $(args) - $(run_cmd) ./test_concurrent_priority_queue.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_task_priority.$(TEST_EXT) $(args) - $(run_cmd) ./test_task_enqueue.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_task_steal_limit.$(TEST_EXT) $(args) - $(run_cmd) ./test_hw_concurrency.$(TEST_EXT) $(args) - $(run_cmd) ./test_fp.$(TEST_EXT) $(args) - $(run_cmd) ./test_tuple.$(TEST_EXT) $(args) - $(run_cmd) ./test_flow_graph.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_broadcast_node.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_continue_node.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_function_node.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_limiter_node.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_join_node.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_buffer_node.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_queue_node.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_priority_queue_node.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_sequencer_node.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_source_node.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_overwrite_node.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_write_once_node.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_indexer_node.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_multifunction_node.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_split_node.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_examples_common_utility.$(TEST_EXT) $(args) - $(run_cmd) ./test_atomic_compiler_builtins.$(TEST_EXT) $(args) - $(run_cmd) ./test_assembly_compiler_builtins.$(TEST_EXT) $(args) - $(run_cmd) ./test_static_assert.$(TEST_EXT) $(args) - $(run_cmd) ./test_aggregator.$(TEST_EXT) $(args) - $(run_cmd) ./test_concurrent_lru_cache.$(TEST_EXT) $(args) - $(run_cmd) ./test_tagged_msg.$(TEST_EXT) $(args) - $(run_cmd) ./test_flow_graph_whitebox.$(TEST_EXT) $(args) - -# For deprecated files, we don't mind warnings etc., thus compilation rules are most relaxed -CPLUS_FLAGS_DEPRECATED = $(DEFINE_KEY)TBB_DEPRECATED=1 $(subst $(WARNING_KEY),,$(CPLUS_FLAGS)) $(WARNING_SUPPRESS) $(INCLUDE_KEY)$(tbb_root)/src/test -TEST_TBB_OLD.OBJ = test_concurrent_vector_v2.$(OBJ) test_concurrent_queue_v2.$(OBJ) test_mutex_v2.$(OBJ) test_task_scheduler_observer_v3.$(OBJ) - -$(TEST_TBB_OLD.OBJ): CPLUS_FLAGS := $(CPLUS_FLAGS_DEPRECATED) - -TEST_TBB_OLD.EXE = $(subst .$(OBJ),.$(TEST_EXT),$(TEST_TBB_OLD.OBJ)) - -ifeq (,$(NO_LEGACY_TESTS)) -test_tbb_old: $(TEST_PREREQUISITE) $(TEST_TBB_OLD.EXE) - $(run_cmd) ./test_concurrent_vector_v2.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_concurrent_queue_v2.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_mutex_v2.$(TEST_EXT) $(args) 1 - $(run_cmd) ./test_mutex_v2.$(TEST_EXT) $(args) 2 - $(run_cmd) ./test_mutex_v2.$(TEST_EXT) $(args) 4 - $(run_cmd) ./test_task_scheduler_observer_v3.$(TEST_EXT) $(args) 1:4 -else -test_tbb_old: - @echo Legacy tests skipped -endif - -ifneq (,$(codecov)) -codecov_gen: - profmerge - codecov $(if $(findstring -,$(codecov)),$(codecov),) -demang -comp $(tbb_root)/build/codecov.txt -endif - -time_%: time_%.$(TEST_EXT) $(TEST_PREREQUISITE) - $(run_cmd) ./$< $(args) - - -# for some reason, "perf_%.$(TEST_EXT): perf_dll.$(DLL)" does not work TODO: find out how to apply pattern here -perf_sched.$(TEST_EXT): perf_dll.$(DLL) -perf_%.$(TEST_EXT): TEST_LIBS = perf_dll.$(LIBEXT) -perf_%: perf_%.$(TEST_EXT) $(TEST_PREREQUISITE) - $(run_cmd) ./$< $(args) - -clean_%: - $(RM) $*.$(OBJ) $*.exe $*.$(DLL) $*.$(LIBEXT) $*.res $*.map $*.ilk $*.pdb $*.exp $*.*manifest $*.tmp $*.d *.ver - -clean: - $(RM) *.$(OBJ) *.exe *.$(DLL) *.$(LIBEXT) *.res *.map *.ilk *.pdb *.exp *.manifest *.tmp *.d pgopti.* *.dyn core core.*[0-9][0-9] *.ver - -# Include automatically generated dependences --include *.d diff --git a/src/tbb/build/SunOS.gcc.inc b/src/tbb/build/SunOS.gcc.inc deleted file mode 100644 index cc47fde60..000000000 --- a/src/tbb/build/SunOS.gcc.inc +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -COMPILE_ONLY = -c -MMD -PREPROC_ONLY = -E -x c++ -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -fPIC -WARNING_AS_ERROR_KEY = -Werror -WARNING_KEY = -Wall -TEST_WARNING_KEY = -Wshadow -Wcast-qual -Woverloaded-virtual -Wnon-virtual-dtor $(if $(findstring cc4., $(runtime)),-Wextra) -WARNING_SUPPRESS = -Wno-parentheses -Wno-non-virtual-dtor -DYLIB_KEY = -shared -LIBDL = -ldl - -CPLUS = g++ -CONLY = gcc -LIB_LINK_FLAGS = -shared -LIBS = -lpthread -lrt -ldl -C_FLAGS = $(CPLUS_FLAGS) -x c - -ifeq ($(cfg), release) - CPLUS_FLAGS = -g -O2 -DUSE_PTHREAD -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = -DTBB_USE_DEBUG -g -O0 -DUSE_PTHREAD -endif - -ASM= -ASM_FLAGS= - -TBB_ASM.OBJ= - -ifeq (ia64,$(arch)) -# Position-independent code (PIC) is a must for IA-64 - CPLUS_FLAGS += $(PIC_KEY) -endif - -ifeq (intel64,$(arch)) - CPLUS_FLAGS += -m64 - LIB_LINK_FLAGS += -m64 -endif - -ifeq (ia32,$(arch)) - CPLUS_FLAGS += -m32 - LIB_LINK_FLAGS += -m32 -endif - -# for some gcc versions on Solaris, -m64 may imply V9, but perhaps not everywhere (TODO: verify) -ifeq (sparc,$(arch)) - CPLUS_FLAGS += -mcpu=v9 -m64 - LIB_LINK_FLAGS += -mcpu=v9 -m64 -endif - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ -ASSEMBLY_SOURCE=$(arch)-gas -ifeq (ia64,$(arch)) - ASM=ias - TBB_ASM.OBJ = atomic_support.o lock_byte.o log2.o pause.o -endif -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ - -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions - -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ diff --git a/src/tbb/build/SunOS.inc b/src/tbb/build/SunOS.inc deleted file mode 100644 index 67439053f..000000000 --- a/src/tbb/build/SunOS.inc +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -ifndef arch - arch:=$(shell uname -p) - ifeq ($(arch),i386) - ifeq ($(shell isainfo -b),64) - arch:=intel64 - else - arch:=ia32 - endif - endif - export arch -# For non-IA systems running Sun OS, 'arch' will contain whatever is printed by uname -p. -# In particular, for SPARC architecture it will contain "sparc". -endif - -ifndef runtime - gcc_version:=$(shell gcc -v 2>&1 | grep 'gcc version' | sed -e 's/^gcc version //' | sed -e 's/ .*$$//') - os_version:=$(shell uname -r) - os_kernel_version:=$(shell uname -r | sed -e 's/-.*$$//') - export runtime:=cc$(gcc_version)_kernel$(os_kernel_version) -endif - -ifeq ($(arch),sparc) - native_compiler := gcc - export compiler ?= gcc -else - native_compiler := suncc - export compiler ?= suncc -endif -# debugger ?= gdb - -CMD=$(SHELL) -c -CWD=$(shell pwd) -RM?=rm -f -RD?=rmdir -MD?=mkdir -p -NUL= /dev/null -SLASH=/ -MAKE_VERSIONS=bash $(tbb_root)/build/version_info_sunos.sh $(VERSION_FLAGS) >version_string.ver -MAKE_TBBVARS=bash $(tbb_root)/build/generate_tbbvars.sh - -ifdef LD_LIBRARY_PATH - export LD_LIBRARY_PATH := .:$(LD_LIBRARY_PATH) -else - export LD_LIBRARY_PATH := . -endif - -####### Build settings ######################################################## - -OBJ = o -DLL = so -LIBEXT=so - -TBB.LST = -TBB.DEF = -TBB.DLL = libtbb$(CPF_SUFFIX)$(DEBUG_SUFFIX).$(DLL) -TBB.LIB = $(TBB.DLL) -LINK_TBB.LIB = $(TBB.LIB) - -MALLOC.DLL = libtbbmalloc$(DEBUG_SUFFIX).$(DLL) -MALLOC.LIB = $(MALLOC.DLL) -LINK_MALLOC.LIB = $(MALLOC.LIB) - -MALLOCPROXY.DLL = libtbbmalloc_proxy$(DEBUG_SUFFIX).$(DLL) - -TEST_LAUNCHER=sh $(tbb_root)/build/test_launcher.sh $(largs) diff --git a/src/tbb/build/SunOS.suncc.inc b/src/tbb/build/SunOS.suncc.inc deleted file mode 100644 index 79015adac..000000000 --- a/src/tbb/build/SunOS.suncc.inc +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -COMPILE_ONLY = -c -xMMD -errtags -PREPROC_ONLY = -E -xMMD -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -KPIC -DYLIB_KEY = -G -LIBDL = -ldl -# WARNING_AS_ERROR_KEY = -errwarn=%all -WARNING_AS_ERROR_KEY = Warning as error -# Supported Solaris Studio* 12.2 and above, remove ',inlasmpnu' in the line below to build by compiler prior Solaris Studio* 12.2 -WARNING_SUPPRESS = -erroff=unassigned,attrskipunsup,badargtype2w,badbinaryopw,wbadasg,wvarhidemem,inlasmpnu -tbb_strict=0 - -CPLUS = CC -CONLY = cc -LIB_LINK_FLAGS = -G -R . -M$(tbb_root)/build/suncc.map.pause -LINK_FLAGS += -M$(tbb_root)/build/suncc.map.pause -LIBS = -lpthread -lrt -R . -C_FLAGS = $(CPLUS_FLAGS) - -#TODO: the $(stdlib) instead of hard-wiring STLPort -ifeq ($(cfg), release) - CPLUS_FLAGS = -mt -xO2 -g -library=stlport4 -DUSE_PTHREAD $(WARNING_SUPPRESS) -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = -mt -DTBB_USE_DEBUG -g -library=stlport4 -DUSE_PTHREAD $(WARNING_SUPPRESS) -endif - -ASM= -ASM_FLAGS= - -TBB_ASM.OBJ= - -ifeq (intel64,$(arch)) - CPLUS_FLAGS += -m64 - ASM_FLAGS += -m64 - LIB_LINK_FLAGS += -m64 -endif - -ifeq (ia32,$(arch)) - CPLUS_FLAGS += -m32 - LIB_LINK_FLAGS += -m32 -endif - -# TODO: verify whether -m64 implies V9 on relevant Sun Studio versions -# (those that handle gcc assembler syntax) -ifeq (sparc,$(arch)) - CPLUS_FLAGS += -m64 - LIB_LINK_FLAGS += -m64 -endif - -export TBB_CUSTOM_VARS_SH=export CXXFLAGS="-I$${TBBROOT}/include -library=stlport4 $(CXXFLAGS) -M$${TBBROOT}/build/suncc.map.pause" -export TBB_CUSTOM_VARS_CSH=setenv CXXFLAGS "-I$${TBBROOT}/include -library=stlport4 $(CXXFLAGS) -M$${TBBROOT}/build/suncc.map.pause" - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ -ASSEMBLY_SOURCE=$(arch)-fbe -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ -M_INCLUDES = $(INCLUDES) -I$(MALLOC_ROOT) -I$(MALLOC_SOURCE_ROOT) -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ diff --git a/src/tbb/build/android.gcc.inc b/src/tbb/build/android.gcc.inc deleted file mode 100644 index d5ac9c9b1..000000000 --- a/src/tbb/build/android.gcc.inc +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - - -COMPILE_ONLY = -c -MMD -PREPROC_ONLY = -E -x c++ -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -fPIC -WARNING_AS_ERROR_KEY = -Werror -WARNING_KEY = -Wall -TEST_WARNING_KEY = -Wshadow -Wcast-qual -Woverloaded-virtual -Wnon-virtual-dtor -Wextra - -WARNING_SUPPRESS = -Wno-parentheses -Wno-non-virtual-dtor -DYLIB_KEY = -shared -EXPORT_KEY = -Wl,--version-script, -LIBDL = -ldl - -CPLUS = $(tbb_tool_prefix)g++ -CONLY = $(tbb_tool_prefix)gcc - -# -soname is necessary for proper linkage to TBB prebuilt libraries when building application with Android SDK -LIB_LINK_FLAGS = $(DYLIB_KEY) -Wl,-soname=$(BUILDING_LIBRARY) - -LINK_FLAGS = -Wl,-rpath-link=. -rdynamic -C_FLAGS = $(CPLUS_FLAGS) - -# gcc 4.4 and higher support C++11 -ifneq (,$(shell $(CPLUS) -dumpversion | egrep "^(4\.[4-9]|[5-9])")) - # On Android/gcc 4.4.3, -std=c++0x causes ::int64_t and ::uint64_t to be undefined. - CPP11_FLAGS = -std=gnu++0x $(DEFINE_KEY)_TBB_CPP0X -endif - -ifeq ($(cfg), release) - CPLUS_FLAGS = -O2 -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = -g -O0 $(DEFINE_KEY)TBB_USE_DEBUG -endif - -CPLUS_FLAGS += $(DEFINE_KEY)USE_PTHREAD $(DEFINE_KEY)_GLIBCXX_HAVE_FENV_H - -ifneq (,$(findstring $(arch),ia32 intel64)) - CPLUS_FLAGS += $(DEFINE_KEY)DO_ITT_NOTIFY -endif - -ifneq (0, $(dlopen_workaround)) - CPLUS_FLAGS += $(DEFINE_KEY)__TBB_USE_DLOPEN_REENTRANCY_WORKAROUND=1 - CPLUS_FLAGS += $(DEFINE_KEY)__TBB_USE_DLOPEN_MAIN_PROGRAM_WORKAROUND=1 -else - CPLUS_FLAGS += $(DEFINE_KEY)__TBB_USE_DLOPEN_REENTRANCY_WORKAROUND=0 - CPLUS_FLAGS += $(DEFINE_KEY)__TBB_USE_DLOPEN_MAIN_PROGRAM_WORKAROUND=0 -endif - -ifeq (0, $(dynamic_load)) - CPLUS_FLAGS += $(DEFINE_KEY)__TBB_DYNAMIC_LOAD_ENABLED=0 -endif - - -# Paths to the NDK prebuilt tools and libraries -CPLUS_FLAGS += --sysroot=$(SYSROOT) -LIB_LINK_FLAGS += --sysroot=$(SYSROOT) -LIBS = -L$(CPLUS_LIB_PATH) -lgnustl_shared - -# This causes CPP11_FLAGS to be issued twice for test_lambda.cpp -# TODO: Fix this in general for all platforms once the correct strategy is determined. -ifneq (00,$(lambdas)$(cpp0x)) - CXX_ONLY_FLAGS += $(CPP11_FLAGS) -endif - -ifeq (ia32,$(arch)) - # TODO: Determine best setting of -march and add to CPLUS_FLAGS - CPLUS_FLAGS += -m32 - LIB_LINK_FLAGS += -m32 -endif - -# Currently, no intel64 support for Android, but if added later, these flags may suffice -ifeq (intel64,$(arch)) - CPLUS_FLAGS += -m64 - LIB_LINK_FLAGS += -m64 -endif - -ifeq (arm,$(findstring arm,$(arch))) - CPLUS_FLAGS += -march=armv7-a $(DEFINE_KEY)TBB_USE_GCC_BUILTINS=1 $(DEFINE_KEY)__TBB_64BIT_ATOMICS=0 -endif - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ -TBB_ASM.OBJ= -MALLOC_ASM.OBJ= - -ASM = $(tbb_tool_prefix)as -ifeq (intel64,$(arch)) - ASM_FLAGS += --64 -endif -ifeq (ia32,$(arch)) - ASM_FLAGS += --32 -endif -ifeq ($(cfg),debug) - ASM_FLAGS += -g -endif - -ASSEMBLY_SOURCE=$(arch)-gas -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ - -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions - -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ diff --git a/src/tbb/build/android.inc b/src/tbb/build/android.inc deleted file mode 100644 index 2c327d88a..000000000 --- a/src/tbb/build/android.inc +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -# -# Extra gmake command-line parameters for use with Android: -# -# dlopen_workaround: Some OS versions need workaround for dlopen to avoid recursive calls. -# - -####### Detections and Commands ############################################### - -ifeq (android,$(findstring android,$(tbb_os))) - $(error TBB only supports cross-compilation for Android. Specify "target=android" instead.) -endif - -ifneq ("command line","$(origin arch)") - ifeq (icc,$(compiler)) - export COMPILER_VERSION := ICC: $(shell icc -V &1 | grep 'Version') - ifneq (,$(findstring IA-32, $(COMPILER_VERSION))) - export arch:=ia32 - else - $(error "No support for Android in $(COMPILER_VERSION)") - endif - - else - ifdef ANDROID_SERIAL - uname_m:=$(shell adb shell uname -m) - ifeq (i686,$(uname_m)) - export arch:=ia32 - else - export arch:=$(uname_m) - endif - else - ifndef arch - $(error "No target architecture specified and \'ANDROID_SERIAL\' environment variable specifying target device not set") - endif - endif - endif -endif - -# Many OS versions (Android 4.0.[0-3] for example) need workaround for dlopen to avoid non-recursive loader lock hang -export dlopen_workaround = 1 - -# Android platform only supported from TBB 4.1 forward -NO_LEGACY_TESTS = 1 - - diff --git a/src/tbb/build/android.linux.inc b/src/tbb/build/android.linux.inc deleted file mode 100644 index d6a871b2d..000000000 --- a/src/tbb/build/android.linux.inc +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -####### Detections and Commands ############################################### - -# Must set def_prefix according to target architecture detected above -ifeq (ia32,$(arch)) - def_prefix = lin32 -endif -ifeq (arm,$(findstring arm,$(arch))) - def_prefix = lin32 -endif -ifeq (64,$(findstring 64,$(arch))) - def_prefix = lin64 -endif - -gcc_version = $(shell $(tbb_tool_prefix)g++ -dumpversion) - -ifdef ANDROID_NDK_ROOT - $(warning "NDK version $(ANDROID_NDK_ROOT)") - ndk_version:= $(lastword $(subst -, ,$(ANDROID_NDK_ROOT))) -else - $(warning "NDK version not set in environment, using \'unknown\' instead.") - ndk_version:=unknown -endif - -export runtime:=$(target)_cc$(gcc_version)_NDK$(ndk_version)_version_$(target_os_version) - -AR = $(tbb_tool_prefix)ar -MAKE_VERSIONS=sh $(tbb_root)/build/version_info_android.sh $(VERSION_FLAGS) >version_string.ver - -####### Build settings ######################################################## - -# No SONAME_SUFFIX for Android allowed in library names -TBB.LST = $(tbb_root)/src/tbb/$(def_prefix)-tbb-export.lst -TBB.DEF = $(TBB.LST:.lst=.def) -TBB.DLL = libtbb$(CPF_SUFFIX)$(DEBUG_SUFFIX).$(DLL) -TBB.LIB = $(TBB.DLL) -TBB_NO_VERSION.DLL= -LINK_TBB.LIB = $(TBB.LIB) - -MALLOC.DEF = $(MALLOC_ROOT)/$(def_prefix)-tbbmalloc-export.def -MALLOC.DLL = libtbbmalloc$(DEBUG_SUFFIX).$(DLL) -MALLOC.LIB = $(MALLOC.DLL) -MALLOC_NO_VERSION.DLL= -LINK_MALLOC.LIB = $(MALLOC.LIB) - -MALLOCPROXY.DEF = $(MALLOC_ROOT)/$(def_prefix)-proxy-export.def -MALLOCPROXY.DLL = libtbbmalloc_proxy$(DEBUG_SUFFIX).$(DLL) -MALLOCPROXY_NO_VERSION.DLL= -MALLOCPROXY.LIB = $(MALLOCPROXY.DLL) -LINK_MALLOCPROXY.LIB = $(MALLOCPROXY.LIB) - -TEST_LAUNCHER= -run_cmd ?= -sh $(tbb_root)/build/android.linux.launcher.sh $(largs) diff --git a/src/tbb/build/android.linux.launcher.sh b/src/tbb/build/android.linux.launcher.sh deleted file mode 100644 index b818053df..000000000 --- a/src/tbb/build/android.linux.launcher.sh +++ /dev/null @@ -1,147 +0,0 @@ -#!/bin/sh -# -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -# Usage: -# android.linux.launcher.sh [-v] [-q] [-s] [-r ] [-u] [-l ] -# where: -v enables verbose output -# where: -q enables quiet mode -# where: -s runs the test in stress mode (until non-zero exit code or ctrl-c pressed) -# where: -r specifies number of times to repeat execution -# where: -u is ignored on Android -# where: -l specifies the library name to be assigned to LD_PRELOAD -# -# Libs and executable necessary for testing should be present in the current directory before running. -# ANDROID_SERIAL must be set to the connected Android target device name for file transfer and test runs. -# ANDROID_TEST_DIRECTORY may be set to the directory used for testing on the Android target device; otherwise, -# the default directory used is "/data/local/tmp/$(basename $PWD)". -# Note: Do not remove the redirections to '/dev/null' in the script, otherwise the nightly test system will fail. - -do_cleanup() # -{ # - adb pull $targetdir/events.txt events.txt > /dev/null 2>&1 # - # Remove target directory on the device - adb shell "rm -r ${targetdir}; mkdir -p ${targetdir}" > /dev/null 2>&1 # -} # -do_trap_cleanup() # -{ # - do_cleanup # - exit -1 # -} # -while getopts "qvsr:ul:" flag # -do case $flag in # - s ) # Stress testing mode - echo Doing stress testing. Press Ctrl-C to terminate - run_env='stressed() { while $*; do :; done; }; ' # - run_prefix="stressed $run_prefix" ;; # - r ) # Repeats test n times - run_env="repeated() { for i in $(seq -s ' ' 1 $OPTARG) ; do echo \$i of $OPTARG:; \$*; done; }; " # - run_prefix="repeated $run_prefix" ;; # - l ) # Additional library - ldpreload="$OPTARG " ;; # - u ) # Stack limit - ;; # - q ) # Quiet mode, removes 'done' but prepends any other output by test name - OUTPUT='2>&1 | sed -e "s/done//;/^[[:space:]]*$/d;s!^!$exename: !"' ;; # - v ) # Verbose mode - SUPPRESS='' # - verbose=1 ;; # -esac done # -shift `expr $OPTIND - 1` # -[ -z "$OUTPUT" ] && OUTPUT='| sed -e "s/\\r$//"' # -[ $verbose ] || SUPPRESS='>/dev/null' # -# Collect the executable name -exename=$(basename $1) # -shift # -# Prepare the target directory on the device -currentdir=$(basename $PWD) # -targetdir=${ANDROID_TEST_DIRECTORY:-/data/local/tmp/$currentdir} # -do_cleanup # -trap do_trap_cleanup INT # if someone hits control-c, cleanup the device -# Collect the list of files to transfer to the target device, starting with executable itself. -fnamelist="$exename" # -# Add the C++ standard library from the NDK, which is required for all tests on Android. -if [ ! -z "${LIB_GNU_STL_ANDROID}" ]; then # - fnamelist="$fnamelist ${LIB_GNU_STL_ANDROID}/libgnustl_shared.so" # -else # - fnamelist="$fnamelist libgnustl_shared.so" # -fi # -# Find the TBB libraries and add them to the list. -# Add TBB libraries from the current directory that contains libtbb* files -files="$(/bin/ls libtbb* 2> /dev/null)" # -[ -z "$files" ] || fnamelist="$fnamelist $files" # -# Add any libraries built for specific tests. -exeroot=${exename%\.*} # -files="$(/bin/ls ${exeroot}*.so ${exeroot}*.so.* 2> /dev/null)" # -[ -z "$files" ] || fnamelist="$fnamelist $files" # -# TODO: Add extra libraries from the Intel(R) Compiler for certain tests -# found=$(echo $exename | egrep 'test_malloc_atexit\|test_malloc_lib_unload' 2> /dev/null) -# if [ ! -z $found ] ; then -# fnamelist="$fnamelist ${compiler_path_lib}/libimf.so \ -# ${compiler_path_lib}/libsvml.so \ -# ${compiler_path_lib}/libintlc.so.5" -# fi - -# Transfer collected executable and library files to the target device. -transfers_ok=1 # -for fullname in $fnamelist; do { # - if [ -r $fullname ]; then { # - # Transfer the executable and libraries to top-level target directory - [ $verbose ] && echo -n "Pushing $fullname: " # - eval "adb push $fullname ${targetdir}/$(basename $fullname) $SUPPRESS 2>&1" # - }; else { # - echo "Error: required file ${currentdir}/${fullname} for test $exename not available for transfer." # - transfers_ok=0 # - }; fi # -}; done # -if [ "${transfers_ok}" = "0" ]; then { # - do_cleanup # - exit -1 # -}; fi # -# Transfer input files used by example codes by scanning the executable argument list. -for fullname in "$@"; do { # - if [ -r $fullname ]; then { # - directory=$(dirname $fullname) # - filename=$(basename $fullname) # - # strip leading "." from fullname if present - if [ "$directory" = "\." ]; then { # - directory="" # - fullname=$filename # - }; fi # - # Create the target directory to hold input file if necessary - if [ ! -z $directory ]; then { # - eval "adb shell 'mkdir $directory' $SUPPRESS 2>&1" # - }; fi # - # Transfer the input file to corresponding directory on target device - [ $verbose ] && echo -n "Pushing $fullname: " # - eval "adb push $fullname ${targetdir}/$fullname $SUPPRESS 2>&1" # - }; fi # -}; done # -# Set LD_PRELOAD if necessary -[ -z "$ldpreload" ] || run_prefix="LD_PRELOAD='$ldpreload' $run_prefix" # -[ $verbose ] && echo Running $run_prefix ./$exename $* # -run_env="$run_env cd $targetdir; export LD_LIBRARY_PATH=." # -# The return_code file is the best way found to return the status of the test execution when using adb shell. -eval 'adb shell "$run_env; $run_prefix ./$exename $* || echo -n \$? >error_code"' "${OUTPUT}" # -# Capture the return code string and remove the trailing \r from the return_code file contents -err=`adb shell "cat $targetdir/error_code 2>/dev/null"` # -[ -z $err ] || echo $exename: exited with error $err # -do_cleanup # -# Return the exit code of the test. -exit $err # diff --git a/src/tbb/build/android.macos.inc b/src/tbb/build/android.macos.inc deleted file mode 100644 index 9f7b9dd19..000000000 --- a/src/tbb/build/android.macos.inc +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -####### Detections and Commands ############################################### - -# Must set def_prefix according to target architecture detected above -ifeq (ia32,$(arch)) - def_prefix = lin32 -endif -ifeq (arm,$(findstring arm,$(arch))) - def_prefix = lin32 -endif -ifeq (64,$(findstring 64,$(arch))) - def_prefix = lin64 -endif - -gcc_version = $(shell $(tbb_tool_prefix)g++ -dumpversion) - -ifdef ANDROID_NDK_ROOT - $(warning "NDK version $(ANDROID_NDK_ROOT)") - ndk_version:= $(lastword $(subst -, ,$(ANDROID_NDK_ROOT))) -else - $(warning "NDK version not set in environment, using \'unknown\' instead.") - ndk_version:=unknown -endif - -export runtime:=$(target)_cc$(gcc_version)_NDK$(ndk_version)_version_$(target_os_version) - -AR = $(tbb_tool_prefix)ar -MAKE_VERSIONS=sh $(tbb_root)/build/version_info_android.sh $(VERSION_FLAGS) >version_string.ver - -####### Build settings ######################################################## - -# No SONAME_SUFFIX for Android allowed in library names -TBB.LST = $(tbb_root)/src/tbb/$(def_prefix)-tbb-export.lst -TBB.DEF = $(TBB.LST:.lst=.def) -TBB.DLL = libtbb$(CPF_SUFFIX)$(DEBUG_SUFFIX).$(DLL) -TBB.LIB = $(TBB.DLL) -TBB_NO_VERSION.DLL= -LINK_TBB.LIB = $(TBB.LIB) - -MALLOC.DEF = $(MALLOC_ROOT)/$(def_prefix)-tbbmalloc-export.def -MALLOC.DLL = libtbbmalloc$(DEBUG_SUFFIX).$(DLL) -MALLOC.LIB = $(MALLOC.DLL) -MALLOC_NO_VERSION.DLL= -LINK_MALLOC.LIB = $(MALLOC.LIB) - -MALLOCPROXY.DEF = $(MALLOC_ROOT)/$(def_prefix)-proxy-export.def -MALLOCPROXY.DLL = libtbbmalloc_proxy$(DEBUG_SUFFIX).$(DLL) -MALLOCPROXY_NO_VERSION.DLL= -MALLOCPROXY.LIB = $(MALLOCPROXY.DLL) -LINK_MALLOCPROXY.LIB = $(MALLOCPROXY.LIB) - -TBB.RES = -MALLOC.RES = -RML.RES = -TBB.MANIFEST = -MALLOC.MANIFEST = -RML.MANIFEST = -OBJ = o -DLL = so - -TEST_LAUNCHER= -run_cmd ?= -sh $(tbb_root)/build/android.linux.launcher.sh $(largs) diff --git a/src/tbb/build/android.windows.inc b/src/tbb/build/android.windows.inc deleted file mode 100644 index 840d73d16..000000000 --- a/src/tbb/build/android.windows.inc +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -####### Detections and Commands ############################################### - -# Must set def_prefix according to target architecture detected above -ifeq (ia32,$(arch)) - def_prefix = lin32 -endif -ifeq (arm,$(findstring arm,$(arch))) - def_prefix = lin32 -endif -ifeq (64,$(findstring 64,$(arch))) - def_prefix = lin64 -endif - -gcc_version = $(shell $(tbb_tool_prefix)g++ -dumpversion) - -ifdef ANDROID_NDK_ROOT - $(warning "NDK version $(ANDROID_NDK_ROOT)") - ndk_version:= $(lastword $(subst -, ,$(ANDROID_NDK_ROOT))) -else - $(warning "NDK version not set in environment, using \'unknown\' instead.") - ndk_version:=unknown -endif - -export runtime:=$(target)_cc$(gcc_version)_NDK$(ndk_version)_version_$(target_os_version) - -AR = $(tbb_tool_prefix)ar -MAKE_VERSIONS = cmd /C cscript /nologo /E:jscript $(subst \,/,$(tbb_root))/build/version_info_windows.js $(CONLY) $(arch) $(subst \,/,"$(VERSION_FLAGS)") > version_string.ver - -####### Build settings ######################################################## - -# No SONAME_SUFFIX for Android allowed in library names -TBB.LST = $(tbb_root)/src/tbb/$(def_prefix)-tbb-export.lst -TBB.DEF = $(TBB.LST:.lst=.def) -TBB.DLL = libtbb$(CPF_SUFFIX)$(DEBUG_SUFFIX).$(DLL) -TBB.LIB = $(TBB.DLL) -TBB_NO_VERSION.DLL= -LINK_TBB.LIB = $(TBB.LIB) - -MALLOC.DEF = $(MALLOC_ROOT)/$(def_prefix)-tbbmalloc-export.def -MALLOC.DLL = libtbbmalloc$(DEBUG_SUFFIX).$(DLL) -MALLOC.LIB = $(MALLOC.DLL) -MALLOC_NO_VERSION.DLL= -LINK_MALLOC.LIB = $(MALLOC.LIB) - -MALLOCPROXY.DEF = $(MALLOC_ROOT)/$(def_prefix)-proxy-export.def -MALLOCPROXY.DLL = libtbbmalloc_proxy$(DEBUG_SUFFIX).$(DLL) -MALLOCPROXY_NO_VERSION.DLL= -MALLOCPROXY.LIB = $(MALLOCPROXY.DLL) - -TBB.RES = -MALLOC.RES = -RML.RES = -TBB.MANIFEST = -MALLOC.MANIFEST = -RML.MANIFEST = -OBJ = o -DLL = so - -TEST_LAUNCHER= -run_cmd ?= -sh $(tbb_root)/build/android.linux.launcher.sh $(largs) diff --git a/src/tbb/build/big_iron.inc b/src/tbb/build/big_iron.inc deleted file mode 100644 index 7840a2214..000000000 --- a/src/tbb/build/big_iron.inc +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -#------------------------------------------------------------------------------ -# Defines settings for building the TBB run-time as a static library. -# Use these only on platforms where dynamic linking is impractical. -# -# IF YOU USE TBB AS A STATIC LIBRARY, YOU MUST GUARANTEE THAT ONLY ONE COPY OF -# THE TBB RUN-TIME IS LINKED INTO AN APPLICATION! LINKING IN MULTIPLE COPIES -# OF THE TBB RUN-TIME, DIRECTLY OR INDIRECTLY, MAY CAUSE PROGRAM FAILURE! -#------------------------------------------------------------------------------ - -# Note that ITT_NOTIFY allows to selectively remove the definition of -# DO_ITT_NOTIFY without sabotaging deferred expansion of CPLUS_FLAGS. -# TODO: currently only in linux.{gcc,xl}.inc - -# Note that -pthread with xl gives "1501-210 (W) command option t contains an incorrect subargument"; -# multithreading is instead achieved by using the _r affix in the compiler name. -# TODO: is -lpthread still relevant/needed with XL and _r affix? - -# Note that usage of dynamic (shared) libraries is disabled -# (via -D__TBB_DYNAMIC_LOAD_ENABLED=0 and LIBDL emptied) primarily for performance. - -# OS specific settings => - LIB_LINK_CMD = ar rcs - LIB_LINK_FLAGS = - LIB_LINK_LIBS = - LIB_OUTPUT_KEY = - ifeq ($(tbb_os),linux) - ifeq ($(compiler),clang) - LIBS = -pthread -lrt - endif - ifeq ($(compiler),gcc) - LIBS = -pthread -lrt - endif - ifeq ($(compiler),xl) - LIBS = -lpthread -lrt - endif - LINK_FLAGS = - endif - override CXXFLAGS += -D__TBB_DYNAMIC_LOAD_ENABLED=0 -D__TBB_SOURCE_DIRECTLY_INCLUDED=1 - ITT_NOTIFY = - DLL = a - LIBEXT = a - LIBPREF = lib - LIBDL = -# <= OS specific settings - -TBB.DLL = $(LIBPREF)tbb$(DEBUG_SUFFIX).$(LIBEXT) -LINK_TBB.LIB = $(TBB.DLL) -TBB.LST = -TBB.DEF = -TBB_NO_VERSION.DLL = - -MALLOC.DLL = $(LIBPREF)tbbmalloc$(DEBUG_SUFFIX).$(LIBEXT) -LINK_MALLOC.LIB = $(MALLOC.DLL) -MALLOC.DEF = -MALLOC_NO_VERSION.DLL = -MALLOCPROXY.DLL = -MALLOCPROXY.DEF = diff --git a/src/tbb/build/codecov.txt b/src/tbb/build/codecov.txt deleted file mode 100644 index e22f8059a..000000000 --- a/src/tbb/build/codecov.txt +++ /dev/null @@ -1,7 +0,0 @@ -src/tbb -src/tbbmalloc -include/tbb -src/rml/server -src/rml/client -src/rml/include -source/malloc diff --git a/src/tbb/build/common.inc b/src/tbb/build/common.inc deleted file mode 100644 index d338180c2..000000000 --- a/src/tbb/build/common.inc +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -ifndef tbb_os - - # Windows sets environment variable OS; for other systems, ask uname - ifeq ($(OS),) - OS:=$(shell uname) - ifeq ($(OS),) - $(error "Cannot detect operating system") - endif - export tbb_os=$(OS) - endif - - ifeq ($(OS), Windows_NT) - export tbb_os=windows - endif - ifeq ($(OS), Linux) - export tbb_os=linux - endif - ifeq ($(OS), Darwin) - export tbb_os=macos - endif - -endif # !tbb_os - -ifeq ($(tbb_cpf),1) - export CPF_SUFFIX ?=_preview -endif - -ifeq (,$(wildcard $(tbb_root)/build/$(tbb_os).inc)) - $(error "$(tbb_os)" is not supported. Add build/$(tbb_os).inc file with os-specific settings ) -endif - -# detect arch and runtime versions, provide common host-specific definitions -include $(tbb_root)/build/$(tbb_os).inc - -ifeq ($(arch),) - $(error Architecture not detected) -endif -ifeq ($(runtime),) - $(error Runtime version not detected) -endif - -# process target-dependent compilation and testing configurations -ifdef target - # optionally process target-dependent options for compilation and testing - ifneq (,$(wildcard $(tbb_root)/build/$(target).inc)) - include $(tbb_root)/build/$(target).inc - endif - - # optionally process host-dependent environment for target-dependent compilation and testing - ifneq (,$(wildcard $(tbb_root)/build/$(target).$(tbb_os).inc)) - include $(tbb_root)/build/$(target).$(tbb_os).inc - endif - - # insure at least one target-dependent configuration file was found for compilation and testing - ifeq (,$(wildcard $(tbb_root)/build/$(target).inc)$(wildcard $(tbb_root)/build/$(target).$(tbb_os).inc)) - $(error "$(target)" is not supported. Add build/$(target).inc or build/$(target).$(tbb_os).inc file) - endif -endif #target - -# Support for running debug tests to release library and vice versa -flip_cfg=$(subst _flipcfg,_release,$(subst _release,_debug,$(subst _debug,_flipcfg,$(1)))) -cross_cfg = $(if $(crosstest),$(call flip_cfg,$(1)),$(1)) -# Setting default configuration to release -cfg?=release - -ifdef BUILDING_PHASE - # No lambdas or other C++0x extensions by default for compilers that implement them as experimental features - # TODO: it should become unnecessary when all relevant tests are "moved" to the TEST_TBB_CPP11 set - lambdas ?= 0 - cpp0x ?= 0 - - ifndef target - target:=$(tbb_os) - endif - # process host/target compiler-dependent build configuration - ifeq (,$(wildcard $(tbb_root)/build/$(target).$(compiler).inc)) - $(error "$(compiler)" is not supported on $(target). Add build/$(target).$(compiler).inc file with compiler-specific settings. ) - endif - include $(tbb_root)/build/$(target).$(compiler).inc -endif - -ifneq ($(BUILDING_PHASE),1) - # definitions for top-level Makefiles - origin_build_dir:=$(origin tbb_build_dir) - tbb_build_dir?=$(tbb_root)$(SLASH)build - export tbb_build_prefix?=$(tbb_os)_$(arch)_$(compiler)_$(runtime)$(CPF_SUFFIX) - work_dir=$(tbb_build_dir)$(SLASH)$(tbb_build_prefix) -endif # BUILDING_PHASE != 1 - -ifdef offload - extra_inc=$(offload).offload.inc -endif -ifdef extra_inc - ifneq (,$(wildcard $(tbb_root)/build/$(extra_inc))) - include $(tbb_root)/build/$(extra_inc) - else - $(error specified build file: "build/$(extra_inc)" is not found. ) - endif -endif - -ifndef BUILDING_PHASE - work_dir:=$(work_dir) - # assign new value for tbb_root if path is not absolute (the filter keeps only /* paths) - ifeq ($(filter /% $(SLASH)%, $(subst :, ,$(tbb_root)) ),) - full_tbb_root:=$(CURDIR)/$(tbb_root) - ifeq ($(origin_build_dir),undefined) - #relative path are needed here as a workaround to support whitespaces in path - override tbb_root:=../.. - else - override tbb_root:=$(full_tbb_root) - endif - export tbb_root - endif - endif # !BUILDING_PHASE - -.DELETE_ON_ERROR: # Make will delete target if error occurred when building it. - -# MAKEOVERRIDES contains the command line variable definitions. Reseting it to -# empty allows propogating all exported overridden variables to nested makes. -# NOTEs: -# 1. All variable set in command line are propagated to nested makes. -# 2. All variables declared with the "export" keyword are propagated to -# nested makes. -# 3. "override" allows changing variables set in command line. But it doesn't -# propagate new values to nested makes. For propagation, the "export" keyword -# should be used. -# 4. gmake v3.80 doesn't support exporting of target-specific variables using -# the "export" keyword -MAKEOVERRIDES = diff --git a/src/tbb/build/common_rules.inc b/src/tbb/build/common_rules.inc deleted file mode 100644 index 571173831..000000000 --- a/src/tbb/build/common_rules.inc +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - - -ifeq ($(tbb_strict),1) - ifeq ($(WARNING_AS_ERROR_KEY),) - $(error WARNING_AS_ERROR_KEY is empty) - endif - # Do not remove line below! - WARNING_KEY += $(WARNING_AS_ERROR_KEY) -endif - -ifneq (,$(findstring s,$(MAKEFLAGS))) - override largs+=-q -endif -ifneq (,$(repeat)) - override largs+=-r $(repeat) -endif -ifneq (,$(largs)$(run_prefix)) - override run_cmd:=$(run_cmd) $(TEST_LAUNCHER) - TEST_LAUNCHER= - ifeq (,$(strip $(run_cmd))) - $(warning Test launcher is not defined for the platform, ignoring launcher arguments) - endif -endif - -ifndef TEST_EXT - TEST_EXT = exe -endif - -INCLUDES += $(INCLUDE_KEY)$(tbb_root)/src $(INCLUDE_KEY)$(tbb_root)/src/rml/include $(INCLUDE_KEY)$(tbb_root)/include - -CPLUS_FLAGS += $(WARNING_KEY) $(CXXFLAGS) -ifeq ($(tbb_cpf),1) -CPLUS_FLAGS += $(DEFINE_KEY)__TBB_CPF_BUILD=1 -endif -LINK_FLAGS += $(LDFLAGS) -LIB_LINK_FLAGS += $(LDFLAGS) -CPLUS_FLAGS_NOSTRICT = $(subst -strict-ansi,-ansi,$(CPLUS_FLAGS)) - -LIB_LINK_CMD ?= $(CPLUS) $(PIC_KEY) -ifeq ($(origin LIB_OUTPUT_KEY), undefined) - LIB_OUTPUT_KEY = $(OUTPUT_KEY) -endif -ifeq ($(origin LIB_LINK_LIBS), undefined) - LIB_LINK_LIBS = $(LIBDL) $(LIBS) -endif - -CONLY ?= $(CPLUS) - -# The most generic rules -#$(1) - is the target pattern -define make-cxx-obj -$1: %.cpp - $$(CPLUS) $$(OUTPUTOBJ_KEY)$$@ $$(COMPILE_ONLY) $$(CPLUS_FLAGS) $$(CXX_ONLY_FLAGS) $$(CXX_WARN_SUPPRESS) $$(INCLUDES) $$< -endef - -TEST_AFFIXES_OBJS=$(addsuffix .$(OBJ),$(addprefix %_,$(TEST_SUFFIXES)) $(addsuffix _%,$(TEST_PREFIXES))) - -# Make will not process the same recipe for each test pattern (since the dependency on the same %.cpp) -# thus the separated recipes should be provided -$(foreach t,%.$(OBJ) $(TEST_AFFIXES_OBJS),$(eval $(call make-cxx-obj,$(t)))) - -.PRECIOUS: %.$(OBJ) %.$(TEST_EXT) %.res $(TEST_AFFIXES_OBJS) - -# Rules for generating a test DLL -%_dll.$(OBJ): %.cpp - $(CPLUS) $(COMPILE_ONLY) $(OUTPUTOBJ_KEY)$@ $(CPLUS_FLAGS) $(PIC_KEY) $(DEFINE_KEY)_USRDLL $(INCLUDES) $< - -#$(1) - is the binary name -#$(2) - is the input obj files and libraries -define make-test-binary - $(CPLUS) $(OUTPUT_KEY)$(strip $1) $(CPLUS_FLAGS) $(2) $(LIBS) $(LINK_FLAGS) -endef - -# LINK_FILES the list of options to link test specific files (libraries and object files) -LINK_FILES+=$(TEST_LIBS) -# Rule for generating executable test -%.$(TEST_EXT): %.$(OBJ) $(TEST_LIBS) $(TEST_PREREQUISITE) $(if $(use_proxy),$(PROXY.LIB)) - $(call make-test-binary,$@,$< $(LINK_FILES)) - -# Rules for generating a test DLL -%_dll.$(DLL): LINK_FLAGS += $(PIC_KEY) $(DYLIB_KEY) -%_dll.$(DLL): TEST_LIBS := $(subst %_dll.$(DLL),,$(TEST_LIBS)) -%_dll.$(DLL): %_dll.$(OBJ) - $(call make-test-binary,$@,$< $(subst %_dll.$(DLL),,$(LINK_FILES))) -.PRECIOUS: %_dll.$(OBJ) %_dll.$(DLL) - -%.$(OBJ): %.c - $(CONLY) $(COMPILE_ONLY) $(OUTPUTOBJ_KEY)$@ $(C_FLAGS) $(INCLUDES) $< - -%.$(OBJ): %.asm - $(ASM) $(ASM_FLAGS) $< - -%.$(OBJ): %.s - cpp <$< | grep -v '^#' >$*.tmp - $(ASM) $(ASM_FLAGS) -o $@ $*.tmp - -ifdef rtools -# Line 70 doesn't work with rtool's version of make. The symptom being that the asm rule kicks off instead, and these rules are cl only -%.$(OBJ): %.cpp - $(CPLUS) $(OUTPUTOBJ_KEY)$@ $(COMPILE_ONLY) $(CPLUS_FLAGS) $(CXX_ONLY_FLAGS) $(CXX_WARN_SUPPRESS) $(INCLUDES) $< -endif - -# Rule for generating .E file if needed for visual inspection -# Note that due to mapping for ICL all uses of PREPROC_ONLY should be immediately followed by a file name -%.E: %.cpp - $(CPLUS) $(CPLUS_FLAGS) $(CXX_ONLY_FLAGS) $(INCLUDES) $(PREPROC_ONLY) $< >$@ - -# TODO Rule for generating .asm file if needed for visual inspection -%.asm: %.cpp - $(CPLUS) /c /FAs /Fa $(CPLUS_FLAGS) $(CXX_ONLY_FLAGS) $(INCLUDES) $< - -# TODO Rule for generating .s file if needed for visual inspection -%.s: %.cpp - $(CPLUS) -S $(CPLUS_FLAGS) $(CXX_ONLY_FLAGS) $(INCLUDES) $< - -# Customizations -$(KNOWN_WARNINGS): %.$(OBJ): %.cpp - $(CPLUS) $(COMPILE_ONLY) $(subst $(WARNING_KEY),,$(CPLUS_FLAGS)) $(CXX_ONLY_FLAGS) $(CXX_WARN_SUPPRESS) $(INCLUDES) $< - -tbb_misc.$(OBJ): version_string.ver -tbb_misc.$(OBJ): INCLUDES+=$(INCLUDE_KEY). - -tbb_misc.E: tbb_misc.cpp version_string.ver - $(CPLUS) $(CPLUS_FLAGS) $(CXX_ONLY_FLAGS) $(INCLUDE_KEY). $(INCLUDES) $(PREPROC_ONLY) $< >$@ - -%.res: %.rc version_string.ver $(TBB.MANIFEST) - rc /Fo$@ $(INCLUDES) $(filter /D%,$(CPLUS_FLAGS)) $< - -# TODO: add $(LIB_LINK_LIBS) $(LIB_LINK_FLAGS) (in a separate line?) and remove useless $(INCLUDES) -VERSION_FLAGS=$(CPLUS) $(CPLUS_FLAGS) $(CXX_ONLY_FLAGS) $(INCLUDES) - -ifneq (,$(TBB.MANIFEST)) -$(TBB.MANIFEST): - cmd /C "echo #include ^ >tbbmanifest.c" - cmd /C "echo int main(){return 0;} >>tbbmanifest.c" - cl /nologo $(C_FLAGS) tbbmanifest.c - -version_string.ver: $(TBB.MANIFEST) - $(MAKE_VERSIONS) - cmd /C "echo #define TBB_MANIFEST 1 >> version_string.ver" -# TODO: fix parallel build by writting to a temporary file and rename it when complete -else -# TODO: make version strings directly representative for all the libraries -version_string.ver: - $(MAKE_VERSIONS) -endif - -test_% debug_%: test_%.$(TEST_EXT) $(TEST_PREREQUISITE) - $(run_cmd) ./$< $(args) -ifneq (,$(codecov)) - profmerge - codecov $(if $(findstring -,$(codecov)),$(codecov),) -demang -comp $(tbb_root)/build/codecov.txt -endif - diff --git a/src/tbb/build/detect.js b/src/tbb/build/detect.js deleted file mode 100644 index 9422b0627..000000000 --- a/src/tbb/build/detect.js +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2005-2014 Intel Corporation. All Rights Reserved. -// -// This file is part of Threading Building Blocks. Threading Building Blocks is free software; -// you can redistribute it and/or modify it under the terms of the GNU General Public License -// version 2 as published by the Free Software Foundation. Threading Building Blocks is -// distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. You should have received a copy of -// the GNU General Public License along with Threading Building Blocks; if not, write to the -// Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -// -// As a special exception, you may use this file as part of a free software library without -// restriction. Specifically, if other files instantiate templates or use macros or inline -// functions from this file, or you compile this file and link it with other files to produce -// an executable, this file does not by itself cause the resulting executable to be covered -// by the GNU General Public License. This exception does not however invalidate any other -// reasons why the executable file might be covered by the GNU General Public License. - -function doWork() { - var WshShell = WScript.CreateObject("WScript.Shell"); - - var fso = new ActiveXObject("Scripting.FileSystemObject"); - - var tmpExec; - tmpExec = WshShell.Run("cmd /c echo int main(){return 0;} >detect.c", 0, true); - - // The next block deals with GCC (MinGW) - if ( WScript.Arguments.Count() > 1 && WScript.Arguments(1) == "gcc" ) { - if ( WScript.Arguments(0) == "/arch" ) { - // Get predefined macros - tmpExec = WshShell.Run("cmd /C gcc -dM -E detect.c > detect.map", 0, true); - var file = fso.OpenTextFile("detect.map", 1, 0); - var defs = file.readAll(); - file.Close(); - - //detect target architecture - var intel64=/x86_64|amd64/mgi; - var ia32=/i386/mgi; - if ( defs.match(intel64) ) { - WScript.Echo( "intel64" ); - } else if ( defs.match(ia32) ) { - WScript.Echo( "ia32" ); - } else { - WScript.Echo( "unknown" ); - } - } else { - tmpExec = WshShell.Exec("gcc -dumpversion"); - var gcc_version = tmpExec.StdOut.ReadLine(); - if ( WScript.Arguments(0) == "/runtime" ) { - WScript.Echo( "mingw"+gcc_version ); - } - else if ( WScript.Arguments(0) == "/minversion" ) { - // Comparing strings, not numbers; will not work for two-digit versions - if ( gcc_version >= WScript.Arguments(2) ) { - WScript.Echo( "ok" ); - } else { - WScript.Echo( "fail" ); - } - } - } - return; - } - - //Compile binary - tmpExec = WshShell.Exec("cl /MD detect.c /link /MAP"); - while ( tmpExec.Status == 0 ) { - WScript.Sleep(100); - } - //compiler banner that includes version and target arch was printed to stderr - var clVersion = tmpExec.StdErr.ReadAll(); - - if ( WScript.Arguments(0) == "/arch" ) { - //detect target architecture - var intel64=/AMD64|EM64T|x64/mgi; - var ia32=/[80|\s]x86/mgi; - var arm=/ARM/mgi; - if ( clVersion.match(intel64) ) { - WScript.Echo( "intel64" ); - } else if ( clVersion.match(ia32) ) { - WScript.Echo( "ia32" ); - } else if ( clVersion.match(arm) ) { - WScript.Echo( "armv7" ); - } else { - WScript.Echo( "unknown" ); - } - return; - } - - if ( WScript.Arguments(0) == "/runtime" ) { - //read map-file - var map = fso.OpenTextFile("detect.map", 1, 0); - var mapContext = map.readAll(); - map.Close(); - - //detect runtime - var vc71=/MSVCR71\.DLL/mgi; - var vc80=/MSVCR80\.DLL/mgi; - var vc90=/MSVCR90\.DLL/mgi; - var vc100=/MSVCR100\.DLL/mgi; - var vc110=/MSVCR110\.DLL/mgi; - var vc120=/MSVCR120\.DLL/mgi; - var psdk=/MSVCRT\.DLL/mgi; - if ( mapContext.match(vc71) ) { - WScript.Echo( "vc7.1" ); - } else if ( mapContext.match(vc80) ) { - WScript.Echo( "vc8" ); - } else if ( mapContext.match(vc90) ) { - WScript.Echo( "vc9" ); - } else if ( mapContext.match(vc100) ) { - WScript.Echo( "vc10" ); - } else if ( mapContext.match(vc110) ) { - WScript.Echo( "vc11" ); - } else if ( mapContext.match(vc120) ) { - WScript.Echo( "vc12" ); - } else { - WScript.Echo( "unknown" ); - } - return; - } - - if ( WScript.Arguments(0) == "/minversion" ) { - var compiler_version; - if ( WScript.Arguments(1) == "cl" ) { - compiler_version = clVersion.match(/Compiler Version ([0-9.]+)\s/mi)[1]; - // compiler_version is in xx.xx.xxxxx.xx format, i.e. a string. - // It will compare well with major.minor versions where major has two digits, - // which is sufficient as the versions of interest start from 13 (for VC7). - } else if ( WScript.Arguments(1) == "icl" ) { - // Get predefined ICL macros - tmpExec = WshShell.Run("cmd /C icl /QdM /E detect.c > detect.map", 0, true); - var file = fso.OpenTextFile("detect.map", 1, 0); - var defs = file.readAll(); - file.Close(); - // In #define __INTEL_COMPILER XXYY, XX is the major ICL version, YY is minor - compiler_version = defs.match(/__INTEL_COMPILER[ \t]*([0-9]+).*$/mi)[1]/100; - // compiler version is a number; it compares well with another major.minor - // version number, where major has one, two, and perhaps more digits (9.1, 11, etc). - } - if ( compiler_version >= WScript.Arguments(2) ) { - WScript.Echo( "ok" ); - } else { - WScript.Echo( "fail" ); - } - return; - } -} - -function doClean() { - var fso = new ActiveXObject("Scripting.FileSystemObject"); - // delete intermediate files - if ( fso.FileExists("detect.c") ) - fso.DeleteFile ("detect.c", false); - if ( fso.FileExists("detect.obj") ) - fso.DeleteFile ("detect.obj", false); - if ( fso.FileExists("detect.map") ) - fso.DeleteFile ("detect.map", false); - if ( fso.FileExists("detect.exe") ) - fso.DeleteFile ("detect.exe", false); - if ( fso.FileExists("detect.exe.manifest") ) - fso.DeleteFile ("detect.exe.manifest", false); -} - -if ( WScript.Arguments.Count() > 0 ) { - - try { - doWork(); - } catch( error ) { - WScript.Echo( "unknown" ); - } - doClean(); - -} else { - WScript.Echo( "Supported options:\n" - + "\t/arch [compiler]\n" - + "\t/runtime [compiler]\n" - + "\t/minversion compiler version" ); -} - diff --git a/src/tbb/build/generate_tbbvars.bat b/src/tbb/build/generate_tbbvars.bat deleted file mode 100644 index 0029bf37c..000000000 --- a/src/tbb/build/generate_tbbvars.bat +++ /dev/null @@ -1,69 +0,0 @@ -@echo off -REM -REM Copyright 2005-2014 Intel Corporation. All Rights Reserved. -REM -REM This file is part of Threading Building Blocks. Threading Building Blocks is free software; -REM you can redistribute it and/or modify it under the terms of the GNU General Public License -REM version 2 as published by the Free Software Foundation. Threading Building Blocks is -REM distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -REM implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -REM See the GNU General Public License for more details. You should have received a copy of -REM the GNU General Public License along with Threading Building Blocks; if not, write to the -REM Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -REM -REM As a special exception, you may use this file as part of a free software library without -REM restriction. Specifically, if other files instantiate templates or use macros or inline -REM functions from this file, or you compile this file and link it with other files to produce -REM an executable, this file does not by itself cause the resulting executable to be covered -REM by the GNU General Public License. This exception does not however invalidate any other -REM reasons why the executable file might be covered by the GNU General Public License. -REM -setlocal -for %%D in ("%tbb_root%") do set actual_root=%%~fD -set fslash_root=%actual_root:\=/% -set bin_dir=%CD% -set fslash_bin_dir=%bin_dir:\=/% -set _INCLUDE=INCLUDE& set _LIB=LIB -if not x%UNIXMODE%==x set _INCLUDE=CPATH& set _LIB=LIBRARY_PATH - -if exist tbbvars.bat goto skipbat -echo Generating local tbbvars.bat -echo @echo off>tbbvars.bat -echo SET TBBROOT=%actual_root%>>tbbvars.bat -echo SET TBB_ARCH_PLATFORM=%arch%\%runtime%>>tbbvars.bat -echo SET TBB_TARGET_ARCH=%arch%>>tbbvars.bat -echo SET %_INCLUDE%=%%TBBROOT%%\include;%%%_INCLUDE%%%>>tbbvars.bat -echo SET %_LIB%=%bin_dir%;%%%_LIB%%%>>tbbvars.bat -echo SET PATH=%bin_dir%;%%PATH%%>>tbbvars.bat -if not x%UNIXMODE%==x echo SET LD_LIBRARY_PATH=%bin_dir%;%%LD_LIBRARY_PATH%%>>tbbvars.bat -:skipbat - -if exist tbbvars.sh goto skipsh -echo Generating local tbbvars.sh -echo #!/bin/sh>tbbvars.sh -echo export TBBROOT="%fslash_root%">>tbbvars.sh -echo export TBB_ARCH_PLATFORM="%arch%\%runtime%">>tbbvars.sh -echo export TBB_TARGET_ARCH="%arch%">>tbbvars.sh -echo export %_INCLUDE%="${TBBROOT}/include;$%_INCLUDE%">>tbbvars.sh -echo export %_LIB%="%fslash_bin_dir%;$%_LIB%">>tbbvars.sh -echo export PATH="%fslash_bin_dir%;$PATH">>tbbvars.sh -if not x%UNIXMODE%==x echo export LD_LIBRARY_PATH="%fslash_bin_dir%;$LD_LIBRARY_PATH">>tbbvars.sh -:skipsh - -if exist tbbvars.csh goto skipcsh -echo Generating local tbbvars.csh -echo #!/bin/csh>tbbvars.csh -echo setenv TBBROOT "%actual_root%">>tbbvars.csh -echo setenv TBB_ARCH_PLATFORM "%arch%\%runtime%">>tbbvars.csh -echo setenv TBB_TARGET_ARCH "%arch%">>tbbvars.csh -echo setenv %_INCLUDE% "${TBBROOT}\include;$%_INCLUDE%">>tbbvars.csh -echo setenv %_LIB% "%bin_dir%;$%_LIB%">>tbbvars.csh -echo setenv PATH "%bin_dir%;$PATH">>tbbvars.csh -if not x%UNIXMODE%==x echo setenv LD_LIBRARY_PATH "%bin_dir%;$LD_LIBRARY_PATH">>tbbvars.csh -:skipcsh - -REM Workaround for copying Android* specific libgnustl_shared.so library to work folder -if not x%LIB_GNU_STL_ANDROID%==x copy /Y "%LIB_GNU_STL_ANDROID%"\libgnustl_shared.so - -endlocal -exit diff --git a/src/tbb/build/generate_tbbvars.sh b/src/tbb/build/generate_tbbvars.sh deleted file mode 100644 index b5fde40b9..000000000 --- a/src/tbb/build/generate_tbbvars.sh +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/bash -# -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -# Script used to generate tbbvars.[c]sh scripts -bin_dir="$PWD" # -cd "$tbb_root" # keep this comments here -tbb_root="$PWD" # to make it unsensible -cd "$bin_dir" # to EOL encoding -[ -f ./tbbvars.sh ] || cat >./tbbvars.sh <./tbbvars.csh < - - -

Overview

-This directory contains the internal Makefile infrastructure for Intel® Threading Building Blocks (Intel® TBB). - -

-See below for how to build Intel TBB and how to port Intel TBB -to a new platform, operating system or architecture. -

- -

Files

-The files here are not intended to be used directly. See below for usage. -
-
Makefile.tbb -
Main Makefile to build the Intel TBB library. - Invoked via 'make tbb' from top-level Makefile. -
Makefile.tbbmalloc -
Main Makefile to build the Intel TBB scalable memory allocator library as well as its tests. - Invoked via 'make tbbmalloc' from top-level Makefile. -
Makefile.test -
Main Makefile to build and run the tests for the Intel TBB library. - Invoked via 'make test' from top-level Makefile. -
common.inc -
Main common included Makefile that includes OS-specific and compiler-specific Makefiles. -
<os>.inc -
OS-specific Makefile for a particular <os>. -
<os>.<compiler>.inc -
Compiler-specific Makefile for a particular <os> / <compiler> combination. -
*.sh -
Infrastructure utilities for Linux* OS, OS X*, and UNIX*-related operating systems. -
*.js, *.bat -
Infrastructure utilities for Windows* OS. -
- -

To Build

-

-To port Intel TBB to a new platform, operating system or architecture, see the porting directions below. -

- -

Software prerequisites:

-
    -
  1. C++ compiler for the platform, operating system and architecture of interest. - Either the native compiler for your system, or, optionally, the appropriate Intel® C++ compiler, may be used. -
  2. GNU make utility. On Windows OS, if a UNIX* emulator is used to run GNU make, - it should be able to run Windows OS utilities and commands. On Linux OS, OS X, etc., - shell commands issued by GNU make should execute in a Bourne or BASH compatible shell. -
- -

-Intel TBB libraries can be built by performing the following steps. -On systems that support only one ABI (e.g., 32-bit), these steps build the libraries for that ABI. -On systems that support both 64-bit and 32-bit libraries, these steps build the 64-bit libraries -(Linux OS, OS X, and related systems) or whichever ABI is selected in the development environment (Windows OS). -

-
    -
  1. Change to the top-level directory of the installed software. -
  2. If using the Intel® C++ compiler, make sure the appropriate compiler is available in your PATH - (e.g., by sourcing the appropriate iccvars script for the compiler to be used). -
  3. Invoke GNU make using no arguments, for example, 'gmake'. -
- -

-To build Intel TBB libraries for other than the default ABI (e.g., to build 32-bit libraries on Linux OS, OS X, -or related systems that support both 64-bit and 32-bit libraries), perform the following steps: -

-
    -
  1. Change to the top-level directory of the installed software. -
  2. If using the Intel® C++ compiler, make sure the appropriate compiler is available in your PATH - (e.g., by sourcing the appropriate iccvars script for the compiler to be used). -
  3. Invoke GNU make as follows, 'gmake arch=ia32'. -
- -

The default make target will build the release and debug versions of the Intel TBB library.

-

Other targets are available in the top-level Makefile. You might find the following targets useful: -

    -
  • 'make test' will build and run Intel TBB unit-tests; -
  • 'make examples' will build and run Intel TBB examples; -
  • 'make all' will do all of the above. -
-See also the list of other targets below. -

- -

-By default, the libraries will be built in sub-directories within the build/ directory. -The sub-directories are named according to the operating system, architecture, compiler and software environment used -(the sub-directory names also distinguish release vs. debug libraries). On Linux OS, the software environment comprises -the GCC, libc and kernel version used. On OS X, the software environment comprises the GCC and OS version used. -On Windows OS, the software environment comprises the Microsoft* Visual Studio* version used. -See below for how to change the default build directory. -

- -

-To perform different build and/or test operations, use the following steps. -

-
    -
  1. Change to the top-level directory of the installed software. -
  2. If using the Intel® C++ compiler, make sure the appropriate compiler is available in your PATH - (e.g., by sourcing the appropriate iccvars script for the compiler to be used). -
  3. Invoke GNU make by using one or more of the following commands. -
    -
    make -
    Default build. Equivalent to 'make tbb tbbmalloc'. -
    make all -
    Equivalent to 'make tbb tbbmalloc test examples'. -
    cd src;make release -
    Build and test release libraries only. -
    cd src;make debug -
    Build and test debug libraries only. -
    make tbb -
    Make Intel TBB release and debug libraries. -
    make tbbmalloc -
    Make Intel TBB scalable memory allocator libraries. -
    make test -
    Compile and run unit-tests -
    make examples -
    Build libraries and run all examples, like doing 'make debug clean release' from - the general example Makefile. -
    make compiler={icl, icc, gcc, clang} [(above options or targets)] -
    Build and run as above, but use specified compilers instead of default, native compilers -
  4. {icl, icc} - to use Intel® compilers (icl on Windows OS, icc on Linux OS or OS X).
  5. -
  6. gcc - to use g++ (e.g. MinGW on Windows OS)
  7. -
  8. clang - to use Clang compiler
  9. -
    make compiler=clang stdlib=libc++ [(above options or targets)] -
    Build and run as above, but use libc++ as a standard c++ library for clang. -
    make target_ui=win8ui [target_ui_mode=production] [(above options or targets)] -
    Build and run as above, but use API that is compliant with Windows Store* applications. - target_ui_mode=production is used to produce binaries that are compliant with Windows Store* application container. - In later case they won't with Intel TBB unit tests but work only with Windows Store* applications. -
    ndk-build target=android [(above options or targets)] -
    Build and run as above, but build libraries for Android* OS by Android NDK that should be installed. Makefiles were tested with revision 8. -
    make arch={ia32, intel64, ia64} [(above options or targets)] -
    Build and run as above, but build libraries for the selected ABI. - Might be useful for cross-compilation; ensure proper environment is set before running this command. -
    make tbb_root={(Intel TBB directory)} [(above options or targets)] -
    Build and run as above; for use when invoking 'make' from a directory other than - the top-level directory. -
    make tbb_build_dir={(build directory)} [(above options or targets)] -
    Build and run as above, but place the built libraries in the specified directory, rather than in the default - sub-directory within the build/ directory. This command might have troubles with the build in case the sources - installed to the directory with spaces in the path. -
    make tbb_build_prefix={(build sub-directory)} [(above options or targets)] -
    Build and run as above, but place the built libraries in the specified sub-directory within the build/ directory, - rather than using the default sub-directory name. -
    make tbb_cpf=1 [(above options or targets)] -
    Build and run as above, but build and use libraries with the Community Preview Features enabled, - rather than the default libraries. -
    make [(above options)] clean -
    Remove any executables or intermediate files produced by the above commands. - Includes build directories, object files, libraries and test executables. -
    -
- -

To Port

-

-This section provides information on how to port Intel TBB to a new platform, operating system or architecture. -A subset or a superset of these steps may be required for porting to a given platform. -

- -

To port the Intel TBB source code:

-
    -
  1. If porting to a new architecture, create a file that describes the architecture-specific details for that architecture. -
      -
    • Create a <os>_<architecture>.h file in the include/tbb/machine directory - that describes these details. -
        -
      • The <os>_<architecture>.h is named after the operating system and architecture as recognized by - include/tbb/tbb_machine.h and the Makefile infrastructure. -
      • This file defines the implementations of synchronization operations, and also the - scheduler yield function, for the operating system and architecture. -
      • Several examples of <os>_<architecture>.h files can be found in the - include/tbb/machine directory. -
          -
        • A minimal implementation defines the 4-byte and 8-byte compare-and-swap operations, - and the scheduler yield function. See include/tbb/machine/mac_ppc.h - for an example of a minimal implementation. -
        • More complex implementation examples can also be found in the - include/tbb/machine directory - that implement all the individual variants of synchronization operations that Intel TBB uses. - Such implementations are more verbose but may achieve better performance on a given architecture. -
        • In a given implementation, any synchronization operation that is not defined is implemented, by default, - in terms of 4-byte or 8-byte compare-and-swap. More operations can thus be added incrementally to increase - the performance of an implementation. -
        • In most cases, synchronization operations are implemented as inline assembly code; examples also exist, - (e.g., for Intel® Itanium® processors) that use out-of-line assembly code in *.s or *.asm files - (see the assembly code sub-directories in the src/tbb directory). -
        -
      -
    • Modify include/tbb/tbb_machine.h, if needed, to invoke the appropriate - <os>_<architecture>.h file in the include/tbb/machine directory. -
    -
  2. Add an implementation of DetectNumberOfWorkers() in src/tbb/tbb_misc.h, - that returns the number of cores found on the system in case it is not supported by the current implementation. - This is used to determine the default number of threads for the Intel TBB task scheduler. -
  3. Either properly define FillDynamicLinks for use in - src/tbb/cache_aligned_allocator.cpp, - or hardcode the allocator to be used. -
  4. Additional types might be required in the union defined in - include/tbb/aligned_space.h - to ensure proper alignment on your platform. -
  5. Changes may be required in include/tbb/tick_count.h - for systems that do not provide gettimeofday. -
- -

To port the Makefile infrastructure:

-Modify the appropriate files in the Makefile infrastructure to add a new platform, operating system or architecture as needed. -See the Makefile infrastructure files for examples. -
    -
  1. The top-level Makefile includes common.inc to determine the operating system. -
      -
    • To add a new operating system, add the appropriate test to common.inc, - and create the needed <os>.inc and <os>.<compiler>.inc files (see below). -
    -
  2. The <os>.inc file makes OS-specific settings for a particular operating systems. -
      -
    • For example, linux.inc makes settings specific to Linux operating systems. -
    • This file performs OS-dependent tests to determine the specific platform and/or architecture, - and sets other platform-dependent values. -
    • Add a new <os>.inc file for each new operating system added. -
    -
  3. The <os>.<compiler>.inc file makes compiler-specific settings for a particular - <os> / <compiler> combination. -
      -
    • For example, linux.gcc.inc makes specific settings for using GCC on Linux OS, - and linux.icc.inc makes specific settings for using the Intel® C++ compiler on Linux OS. -
    • This file sets particular compiler, assembler and linker options required when using a particular - <os> / <compiler> combination. -
    • Add a new <os>.<compiler>.inc file for each new <os> / <compiler> combination added. -
    -
- -
-Up to parent directory -

-Copyright © 2005-2014 Intel Corporation. All Rights Reserved. -

-Intel and Itanium are registered trademarks or trademarks of Intel Corporation or its -subsidiaries in the United States and other countries. -

-* Other names and brands may be claimed as the property of others. - - diff --git a/src/tbb/build/linux.clang.inc b/src/tbb/build/linux.clang.inc deleted file mode 100644 index b7155af83..000000000 --- a/src/tbb/build/linux.clang.inc +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -COMPILE_ONLY = -c -MMD -PREPROC_ONLY = -E -x c++ -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -fPIC -WARNING_AS_ERROR_KEY = -Werror -WARNING_KEY = -Wall -TEST_WARNING_KEY = -Wextra -Wshadow -Wcast-qual -Woverloaded-virtual -Wnon-virtual-dtor -WARNING_SUPPRESS = -Wno-parentheses -Wno-non-virtual-dtor -Wno-dangling-else -DYLIB_KEY = -shared -EXPORT_KEY = -Wl,--version-script, -LIBDL = -ldl - -CPLUS = clang++ -CONLY = clang -LIB_LINK_FLAGS = $(DYLIB_KEY) -Wl,-soname=$(BUILDING_LIBRARY) -LIBS += -lpthread -lrt -LINK_FLAGS = -Wl,-rpath-link=. -C_FLAGS = $(CPLUS_FLAGS) - -ifeq ($(cfg), release) - CPLUS_FLAGS = $(ITT_NOTIFY) -g -O2 -DUSE_PTHREAD -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = -DTBB_USE_DEBUG $(ITT_NOTIFY) -g -O0 -DUSE_PTHREAD -endif - -ifeq (libc++,$(stdlib)) - CPLUS_FLAGS += -stdlib=libc++ - LIB_LINK_FLAGS += -stdlib=libc++ -endif - -CPP11_FLAGS = -std=c++11 -D_TBB_CPP0X - -ifneq (00,$(lambdas)$(cpp0x)) - CXX_ONLY_FLAGS += $(CPP11_FLAGS) -endif - -TBB_ASM.OBJ= -MALLOC_ASM.OBJ= - -ifeq (intel64,$(arch)) - ITT_NOTIFY = -DDO_ITT_NOTIFY - CPLUS_FLAGS += -m64 - LIB_LINK_FLAGS += -m64 -endif - -ifeq (ia32,$(arch)) - ITT_NOTIFY = -DDO_ITT_NOTIFY - CPLUS_FLAGS += -m32 -march=pentium4 - LIB_LINK_FLAGS += -m32 -endif - -ifeq (ppc64,$(arch)) - CPLUS_FLAGS += -m64 - LIB_LINK_FLAGS += -m64 -endif - -ifeq (ppc32,$(arch)) - CPLUS_FLAGS += -m32 - LIB_LINK_FLAGS += -m32 -endif - -ifeq (bg,$(arch)) - CPLUS = bgclang++ - CONLY = bgclang -endif - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ -ASM = as -ifeq (intel64,$(arch)) - ASM_FLAGS += --64 -endif -ifeq (ia32,$(arch)) - ASM_FLAGS += --32 -endif -ifeq ($(cfg),debug) - ASM_FLAGS += -g -endif - -ASSEMBLY_SOURCE=$(arch)-gas -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ - -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions - -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ diff --git a/src/tbb/build/linux.gcc.inc b/src/tbb/build/linux.gcc.inc deleted file mode 100644 index a4a30d25e..000000000 --- a/src/tbb/build/linux.gcc.inc +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -COMPILE_ONLY = -c -MMD -PREPROC_ONLY = -E -x c++ -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -fPIC -WARNING_AS_ERROR_KEY = -Werror -WARNING_KEY = -Wall -TEST_WARNING_KEY = -Wshadow -Wcast-qual -Woverloaded-virtual -Wnon-virtual-dtor $(if $(findstring cc4., $(runtime)),-Wextra) - -WARNING_SUPPRESS = -Wno-parentheses -Wno-non-virtual-dtor -DYLIB_KEY = -shared -EXPORT_KEY = -Wl,--version-script, -LIBDL = -ldl - -CPLUS = g++ -CONLY = gcc -LIB_LINK_FLAGS = $(DYLIB_KEY) -Wl,-soname=$(BUILDING_LIBRARY) -LIBS += -lpthread -lrt -LINK_FLAGS = -Wl,-rpath-link=. -rdynamic -C_FLAGS = $(CPLUS_FLAGS) -# gcc 4.4 and higher support -std=c++0x -ifneq (,$(shell gcc -dumpversion | egrep "^(4\.[4-9]|[5-9])")) - CPP11_FLAGS = -std=c++0x -D_TBB_CPP0X -endif - -# gcc 4.2 and higher support OpenMP -ifneq (,$(shell gcc -dumpversion | egrep "^(4\.[2-9]|[5-9])")) - OPENMP_FLAG = -fopenmp -endif - -# gcc 4.8 and later support RTM intrinsics, but require command line switch to enable them -ifneq (,$(shell gcc -dumpversion | egrep "^4\.[8-9]")) - RTM_KEY = -mrtm -endif - -ifeq ($(cfg), release) - CPLUS_FLAGS = $(ITT_NOTIFY) -g -O2 -DUSE_PTHREAD -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = -DTBB_USE_DEBUG $(ITT_NOTIFY) -g -O0 -DUSE_PTHREAD -endif - -ifneq (00,$(lambdas)$(cpp0x)) - CXX_ONLY_FLAGS += $(CPP11_FLAGS) -endif - -TBB_ASM.OBJ= -MALLOC_ASM.OBJ= - -ifeq (ia64,$(arch)) -# Position-independent code (PIC) is a must on IA-64 architecture, even for regular (not shared) executables - CPLUS_FLAGS += $(PIC_KEY) -endif - -ifeq (intel64,$(arch)) - ITT_NOTIFY = -DDO_ITT_NOTIFY - CPLUS_FLAGS += -m64 $(RTM_KEY) - LIB_LINK_FLAGS += -m64 -endif - -ifeq (ia32,$(arch)) - ITT_NOTIFY = -DDO_ITT_NOTIFY - CPLUS_FLAGS += -m32 -march=pentium4 $(RTM_KEY) - LIB_LINK_FLAGS += -m32 -endif - -ifeq (ppc64,$(arch)) - CPLUS_FLAGS += -m64 - LIB_LINK_FLAGS += -m64 -endif - -ifeq (ppc32,$(arch)) - CPLUS_FLAGS += -m32 - LIB_LINK_FLAGS += -m32 -endif - -ifeq (bg,$(arch)) - CPLUS = mpicxx - CONLY = mpicc -endif - -# for some gcc versions on Solaris, -m64 may imply V9, but perhaps not everywhere (TODO: verify) -ifeq (sparc,$(arch)) - CPLUS_FLAGS += -mcpu=v9 -m64 - LIB_LINK_FLAGS += -mcpu=v9 -m64 -endif - -# automatically generate "IT" instructions when compiling for Thumb ISA -ifeq (armv7,$(arch)) - CPLUS_FLAGS += -Wa,-mimplicit-it=thumb -endif - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ -ASM = as -ifeq (intel64,$(arch)) - ASM_FLAGS += --64 -endif -ifeq (ia32,$(arch)) - ASM_FLAGS += --32 -endif -ifeq ($(cfg),debug) - ASM_FLAGS += -g -endif - -ASSEMBLY_SOURCE=$(arch)-gas -ifeq (ia64,$(arch)) - ASM_FLAGS += -xexplicit - TBB_ASM.OBJ += atomic_support.o lock_byte.o log2.o pause.o ia64_misc.o - MALLOC_ASM.OBJ += atomic_support.o lock_byte.o pause.o log2.o -endif -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ - -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions - -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ diff --git a/src/tbb/build/linux.icc.inc b/src/tbb/build/linux.icc.inc deleted file mode 100644 index 06624ae8e..000000000 --- a/src/tbb/build/linux.icc.inc +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -COMPILE_ONLY = -c -MMD -PREPROC_ONLY = -E -x c++ -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -fPIC -WARNING_AS_ERROR_KEY = -Werror -WARNING_KEY = -w1 -DYLIB_KEY = -shared -EXPORT_KEY = -Wl,--version-script, -NOINTRINSIC_KEY = -fno-builtin -LIBDL = -ldl - -CPLUS = icpc -CONLY = icc - -ITT_NOTIFY = -DDO_ITT_NOTIFY -ifeq (release,$(cfg)) -CPLUS_FLAGS = $(ITT_NOTIFY) -O2 -g -DUSE_PTHREAD -else -CPLUS_FLAGS = $(ITT_NOTIFY) -O0 -g -DUSE_PTHREAD -DTBB_USE_DEBUG -endif - -OPENMP_FLAG = -openmp -LIB_LINK_FLAGS = -shared -i-static -Wl,-soname=$(BUILDING_LIBRARY) -LIBS += -lpthread -lrt -LINK_FLAGS = -rdynamic -C_FLAGS = $(CPLUS_FLAGS) -# ICC 11.0 and higher support -std=c++0x -ifneq (,$(shell icc -dumpversion | egrep "^1[1-9]\.")) - CPP11_FLAGS = -std=c++0x -D_TBB_CPP0X -endif - -# ICC 12.0 and higher provide Intel(R) Cilk(TM) Plus -ifneq (,$(shell icc -dumpversion | egrep "^1[2-9]\.")) - CILK_AVAILABLE = yes -endif - -TBB_ASM.OBJ= -MALLOC_ASM.OBJ= - -ifeq (ia32,$(arch)) - CPLUS_FLAGS += -m32 -falign-stack=maintain-16-byte - LIB_LINK_FLAGS += -m32 -endif - -ifeq (ia64,$(arch)) - ITT_NOTIFY = -# Position-independent code (PIC) is a must on IA-64 architecture, even for regular (not shared) executables -# strict-ansi does not work with on RHEL 4 AS - CPLUS_FLAGS += $(PIC_KEY) $(if $(findstring cc3.,$(runtime)),-ansi,-strict-ansi) -else - CPLUS_FLAGS += -strict-ansi -endif - -ifneq (,$(codecov)) -# no tool support for code coverage, need profile data generation - ITT_NOTIFY = -prof-genx -endif - -ifneq (00,$(lambdas)$(cpp0x)) - CXX_ONLY_FLAGS += $(CPP11_FLAGS) -endif - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ -ASM = as -ifeq (intel64,$(arch)) - ASM_FLAGS += --64 -endif -ifeq (ia32,$(arch)) - ASM_FLAGS += --32 -endif -ifeq ($(cfg),debug) - ASM_FLAGS += -g -endif - -ASSEMBLY_SOURCE=$(arch)-gas -ifeq (ia64,$(arch)) - ASM_FLAGS += -xexplicit - TBB_ASM.OBJ += atomic_support.o lock_byte.o log2.o pause.o ia64_misc.o - MALLOC_ASM.OBJ += atomic_support.o lock_byte.o pause.o log2.o -endif -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ - -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions - -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ - diff --git a/src/tbb/build/linux.inc b/src/tbb/build/linux.inc deleted file mode 100644 index 90fee5f99..000000000 --- a/src/tbb/build/linux.inc +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -####### Detections and Commands ############################################### - -ifeq (icc,$(compiler)) - export COMPILER_VERSION := ICC: $(shell icc -V &1 | grep 'Version') - ifneq (,$(findstring IA-32, $(COMPILER_VERSION))) - export arch:=ia32 - endif - ifneq (,$(findstring Intel(R) 64, $(COMPILER_VERSION))) - export arch:=intel64 - endif - ifneq (,$(findstring IA-64, $(COMPILER_VERSION))) - export arch:=ia64 - endif - ifeq (,$(arch)) - $(warning "Unknown Intel compiler") - endif -endif - -ifndef arch - uname_m:=$(shell uname -m) - ifeq ($(uname_m),i686) - export arch:=ia32 - endif - ifeq ($(uname_m),ia64) - export arch:=ia64 - endif - ifeq ($(uname_m),x86_64) - export arch:=intel64 - endif - ifeq ($(uname_m),sparc64) - export arch:=sparc - endif - ifeq ($(uname_m),armv7l) - export arch:=armv7 - endif - ifndef arch - export arch:=$(uname_m) - endif -endif - -ifndef runtime - gcc_version = $(shell gcc -dumpversion) - os_version:=$(shell uname -r) - os_kernel_version:=$(shell uname -r | sed -e 's/-.*$$//') - export os_glibc_version_full:=$(shell getconf GNU_LIBC_VERSION | grep glibc | sed -e 's/^glibc //') - os_glibc_version:=$(shell echo "$(os_glibc_version_full)" | sed -e '2,$$d' -e 's/-.*$$//') - export runtime:=cc$(gcc_version)_libc$(os_glibc_version)_kernel$(os_kernel_version) -endif - -native_compiler := gcc -export compiler ?= gcc -debugger ?= gdb - -CMD=sh -c -CWD=$(shell pwd) -CP=cp -RM?=rm -f -RD?=rmdir -MD?=mkdir -p -NUL= /dev/null -SLASH=/ -MAKE_VERSIONS=sh $(tbb_root)/build/version_info_linux.sh $(VERSION_FLAGS) >version_string.ver -MAKE_TBBVARS=sh $(tbb_root)/build/generate_tbbvars.sh - -ifdef LD_LIBRARY_PATH - export LD_LIBRARY_PATH := .:$(LD_LIBRARY_PATH) -else - export LD_LIBRARY_PATH := . -endif - -####### Build settings ######################################################## - -OBJ = o -DLL = so -MALLOC_DLL?=$(DLL) -LIBEXT = so -SONAME_SUFFIX =$(shell grep TBB_COMPATIBLE_INTERFACE_VERSION $(tbb_root)/include/tbb/tbb_stddef.h | egrep -o [0-9.]+) - -ifeq ($(arch),ia64) - def_prefix = lin64ipf -endif -ifeq ($(arch),sparc) - def_prefix = lin64 -endif -ifeq ($(arch),armv7) - def_prefix = lin32 -endif -ifeq (,$(def_prefix)) - ifeq (64,$(findstring 64,$(arch))) - def_prefix = lin64 - else - def_prefix = lin32 - endif -endif -TBB.LST = $(tbb_root)/src/tbb/$(def_prefix)-tbb-export.lst -TBB.DEF = $(TBB.LST:.lst=.def) - -TBB.DLL = $(TBB_NO_VERSION.DLL).$(SONAME_SUFFIX) -TBB.LIB = $(TBB.DLL) -TBB_NO_VERSION.DLL=libtbb$(CPF_SUFFIX)$(DEBUG_SUFFIX).$(DLL) -LINK_TBB.LIB = $(TBB_NO_VERSION.DLL) - -MALLOC_NO_VERSION.DLL = libtbbmalloc$(DEBUG_SUFFIX).$(MALLOC_DLL) -MALLOC.DEF = $(MALLOC_ROOT)/$(def_prefix)-tbbmalloc-export.def -MALLOC.DLL = $(MALLOC_NO_VERSION.DLL).$(SONAME_SUFFIX) -MALLOC.LIB = $(MALLOC_NO_VERSION.DLL) -LINK_MALLOC.LIB = $(MALLOC_NO_VERSION.DLL) - -MALLOCPROXY_NO_VERSION.DLL = libtbbmalloc_proxy$(DEBUG_SUFFIX).$(DLL) -MALLOCPROXY.DEF = $(MALLOC_ROOT)/$(def_prefix)-proxy-export.def -MALLOCPROXY.DLL = $(MALLOCPROXY_NO_VERSION.DLL).$(SONAME_SUFFIX) -MALLOCPROXY.LIB = $(MALLOCPROXY_NO_VERSION.DLL) -LINK_MALLOCPROXY.LIB = $(MALLOCPROXY.LIB) - -RML_NO_VERSION.DLL = libirml$(DEBUG_SUFFIX).$(DLL) -RML.DEF = $(RML_SERVER_ROOT)/lin-rml-export.def -RML.DLL = $(RML_NO_VERSION.DLL).1 -RML.LIB = $(RML_NO_VERSION.DLL) - -TEST_LAUNCHER=sh $(tbb_root)/build/test_launcher.sh $(largs) diff --git a/src/tbb/build/linux.xl.inc b/src/tbb/build/linux.xl.inc deleted file mode 100644 index a9db55ce5..000000000 --- a/src/tbb/build/linux.xl.inc +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -####### Detections and Commands ############################################### - -COMPILE_ONLY = -c -PREPROC_ONLY = -E -qsourcetype=c -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -qpic -WARNING_AS_ERROR_KEY = -qhalt=w -WARNING_KEY = -TEST_WARNING_KEY = - -WARNING_SUPPRESS = -DYLIB_KEY = -qmkshrobj -EXPORT_KEY = -Wl,--version-script, -LIBDL = -ldl - -CPLUS = xlc++_r -CONLY = xlc_r -LIB_LINK_FLAGS = $(DYLIB_KEY) -Wl,-soname=$(BUILDING_LIBRARY) -LIBS = -lpthread -lrt -C_FLAGS = $(CPLUS_FLAGS) - -ifeq ($(cfg), release) - CPLUS_FLAGS = $(ITT_NOTIFY) -O2 -DUSE_PTHREAD -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = -DTBB_USE_DEBUG $(ITT_NOTIFY) -g -O0 -DUSE_PTHREAD -endif - -# Adding directly to CPLUS_FLAGS instead of to WARNING_SUPPRESS because otherwise it would not be used in several tests (why not?). -# Suppress warnings like: -# - "1500-029: (W) WARNING: subprogram [...] could not be inlined into [...]." -# - "1501-201: (W) Maximum number of common component diagnostics, 10 has been exceeded." -# see http://www-01.ibm.com/support/docview.wss?uid=swg1LI72843 -# it seems that the internal compiler error that would ensue has now been avoided, making the condition harmless -# - "1540-0198 (W) The omitted keyword "private" is assumed for base class "no_copy"." -# - "1540-0822 (W) The name "__FUNCTION__" must not be defined as a macro." -CPLUS_FLAGS += -qsuppress=1500-029:1501-201:1540-0198:1540-0822 - -ASM= -ASM_FLAGS= - -TBB_ASM.OBJ= - -ifeq (intel64,$(arch)) - ITT_NOTIFY = -DDO_ITT_NOTIFY - CPLUS_FLAGS += -q64 - LIB_LINK_FLAGS += -q64 -endif - -# TODO: equivalent for -march=pentium4 in CPLUS_FLAGS -ifeq (ia32,$(arch)) - ITT_NOTIFY = -DDO_ITT_NOTIFY - CPLUS_FLAGS += -q32 -qarch=pentium4 - LIB_LINK_FLAGS += -q32 -endif - -ifeq (ppc64,$(arch)) - CPLUS_FLAGS += -q64 - LIB_LINK_FLAGS += -q64 -endif - -ifeq (ppc32,$(arch)) - CPLUS_FLAGS += -q32 - LIB_LINK_FLAGS += -q32 -endif - -ifeq (bg,$(arch)) - CPLUS = bgxlC_r - CONLY = bgxlc_r -endif - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ - -# Suppress innumerable warnings like "1540-1088 (W) The exception specification is being ignored." -# Suppress warnings like "1540-1090 (I) The destructor of "lock" might not be called." -# TODO: aren't these warnings an indication that -qnoeh might not be appropriate? -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -qnortti -qnoeh -qsuppress=1540-1088:1540-1090 - -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ diff --git a/src/tbb/build/macos.clang.inc b/src/tbb/build/macos.clang.inc deleted file mode 100644 index da29a5099..000000000 --- a/src/tbb/build/macos.clang.inc +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -CPLUS = clang++ -CONLY = clang -COMPILE_ONLY = -c -MMD -PREPROC_ONLY = -E -x c++ -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -fPIC -WARNING_AS_ERROR_KEY = -Werror -WARNING_KEY = -Wall -TEST_WARNING_KEY = -Wextra -Wshadow -Wcast-qual -Woverloaded-virtual -Wnon-virtual-dtor -WARNING_SUPPRESS = -Wno-non-virtual-dtor -Wno-dangling-else -DYLIB_KEY = -dynamiclib -EXPORT_KEY = -Wl,-exported_symbols_list, -LIBDL = -ldl - -LIBS = -lpthread -LINK_FLAGS = -LIB_LINK_FLAGS = -dynamiclib -C_FLAGS = $(CPLUS_FLAGS) - -ifeq ($(cfg), release) - CPLUS_FLAGS = -g -O2 -else - CPLUS_FLAGS = -g -O0 -DTBB_USE_DEBUG -endif - -CPLUS_FLAGS += -DUSE_PTHREAD - -# For Clang, we add the option to support RTM intrinsics *iff* xtest is found in -ifneq (,$(shell grep xtest `echo "\#include" | clang -E -M - 2>&1 | grep immintrin.h` 2>/dev/null)) - RTM_KEY = -mrtm -endif - -ifeq (libc++,$(stdlib)) - CPLUS_FLAGS += -stdlib=libc++ - LIB_LINK_FLAGS += -stdlib=libc++ -endif - -CPP11_FLAGS = -std=c++11 -D_TBB_CPP0X - -ifneq (00,$(lambdas)$(cpp0x)) - CXX_ONLY_FLAGS += $(CPP11_FLAGS) -endif - -ifeq (intel64,$(arch)) - CPLUS_FLAGS += -m64 $(RTM_KEY) - LINK_FLAGS += -m64 - LIB_LINK_FLAGS += -m64 -endif - -ifeq (ia32,$(arch)) - CPLUS_FLAGS += -m32 $(RTM_KEY) - LINK_FLAGS += -m32 - LIB_LINK_FLAGS += -m32 -endif - -ifeq (ppc64,$(arch)) - CPLUS_FLAGS += -arch ppc64 - LINK_FLAGS += -arch ppc64 - LIB_LINK_FLAGS += -arch ppc64 -endif - -ifeq (ppc32,$(arch)) - CPLUS_FLAGS += -arch ppc - LINK_FLAGS += -arch ppc - LIB_LINK_FLAGS += -arch ppc -endif - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ - -ASM = as -ifeq (intel64,$(arch)) - ASM_FLAGS += -arch x86_64 -endif -ifeq (ia32,$(arch)) - ASM_FLAGS += -arch i386 -endif -ifeq ($(cfg), debug) - ASM_FLAGS += -g -endif - -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ - -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions - -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ - diff --git a/src/tbb/build/macos.gcc.inc b/src/tbb/build/macos.gcc.inc deleted file mode 100644 index 5f921f4de..000000000 --- a/src/tbb/build/macos.gcc.inc +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -CPLUS = g++ -CONLY = gcc -COMPILE_ONLY = -c -MMD -PREPROC_ONLY = -E -x c++ -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -fPIC -WARNING_AS_ERROR_KEY = -Werror -WARNING_KEY = -Wall -TEST_WARNING_KEY = -Wextra -Wshadow -Wcast-qual -Woverloaded-virtual -Wnon-virtual-dtor -WARNING_SUPPRESS = -Wno-non-virtual-dtor -DYLIB_KEY = -dynamiclib -EXPORT_KEY = -Wl,-exported_symbols_list, -LIBDL = -ldl - -LIBS = -lpthread -LINK_FLAGS = -LIB_LINK_FLAGS = -dynamiclib -C_FLAGS = $(CPLUS_FLAGS) - -ifeq ($(cfg), release) - CPLUS_FLAGS = -g -O2 -else - CPLUS_FLAGS = -g -O0 -DTBB_USE_DEBUG -endif - -CPLUS_FLAGS += -DUSE_PTHREAD - -ifeq (intel64,$(arch)) - CPLUS_FLAGS += -m64 - LINK_FLAGS += -m64 - LIB_LINK_FLAGS += -m64 -endif - -ifeq (ia32,$(arch)) - CPLUS_FLAGS += -m32 - LINK_FLAGS += -m32 - LIB_LINK_FLAGS += -m32 -endif - -ifeq (ppc64,$(arch)) - CPLUS_FLAGS += -arch ppc64 - LINK_FLAGS += -arch ppc64 - LIB_LINK_FLAGS += -arch ppc64 -endif - -ifeq (ppc32,$(arch)) - CPLUS_FLAGS += -arch ppc - LINK_FLAGS += -arch ppc - LIB_LINK_FLAGS += -arch ppc -endif - -ifeq (armv7,$(arch)) - CPLUS_FLAGS += -arch armv7 - LINK_FLAGS += -arch armv7 - LIB_LINK_FLAGS += -arch armv7 -endif - -ifdef SDKROOT - CPLUS_FLAGS += -isysroot $(SDKROOT) - LIB_LINK_FLAGS += -L$(SDKROOT)/usr/lib/system -L$(SDKROOT)/usr/lib/ -endif - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ - -ASM = as -ifeq (intel64,$(arch)) - ASM_FLAGS += -arch x86_64 -endif -ifeq (ia32,$(arch)) - ASM_FLAGS += -arch i386 -endif -ifeq ($(cfg), debug) - ASM_FLAGS += -g -endif - -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ - -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions - -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ - diff --git a/src/tbb/build/macos.icc.inc b/src/tbb/build/macos.icc.inc deleted file mode 100644 index 3452de3cc..000000000 --- a/src/tbb/build/macos.icc.inc +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -CPLUS = icpc -CONLY = icc -COMPILE_ONLY = -c -MMD -PREPROC_ONLY = -E -x c++ -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -fPIC -WARNING_AS_ERROR_KEY = -Werror -WARNING_KEY = -w1 -DYLIB_KEY = -dynamiclib -EXPORT_KEY = -Wl,-exported_symbols_list, -LIBDL = -ldl - -OPENMP_FLAG = -openmp -LIBS = -lpthread -LINK_FLAGS = -LIB_LINK_FLAGS = -dynamiclib -i-static -C_FLAGS = $(CPLUS_FLAGS) -# ICC 11.0 and higher support -std=c++0x -ifneq (,$(shell icc -dumpversion | egrep "^1[1-9]\.")) - CPP11_FLAGS = -std=c++0x -D_TBB_CPP0X -endif - -# ICC 12.0 and higher provide Intel(R) Cilk(TM) Plus -ifneq (,$(shell icc -dumpversion | egrep "^1[2-9]\.")) - CILK_AVAILABLE = yes -endif - -ifeq ($(cfg), release) - CPLUS_FLAGS = -g -O2 -fno-omit-frame-pointer -else - CPLUS_FLAGS = -g -O0 -DTBB_USE_DEBUG -endif - -CPLUS_FLAGS += -DUSE_PTHREAD - -ifneq (,$(codecov)) - CPLUS_FLAGS += -prof-genx -endif - -ifneq (00,$(lambdas)$(cpp0x)) - CXX_ONLY_FLAGS += $(CPP11_FLAGS) -endif - -# ICC 14.0 and higher support clang environment -ifneq (,$(shell icc -dumpversion | egrep "^1[4-9]\.")) - ifeq (libc++,$(stdlib)) - CPLUS_FLAGS += -use-clang-env -stdlib=libc++ - LIB_LINK_FLAGS += -use-clang-env -stdlib=libc++ - endif -endif -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ - -ASM = as -ifeq (intel64,$(arch)) - ASM_FLAGS += -arch x86_64 -endif -ifeq (ia32,$(arch)) - CPLUS_FLAGS += -m32 - LINK_FLAGS += -m32 - LIB_LINK_FLAGS += -m32 - ASM_FLAGS += -arch i386 -endif -ifeq ($(cfg), debug) - ASM_FLAGS += -g -endif - -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ - -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions - -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ diff --git a/src/tbb/build/macos.inc b/src/tbb/build/macos.inc deleted file mode 100644 index f9f2f278c..000000000 --- a/src/tbb/build/macos.inc +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -####### Detections and Commands ############################################### - -ifeq (icc,$(compiler)) - export COMPILER_VERSION := ICC: $(shell icc -V &1 | grep 'Version') - ifneq (,$(findstring IA-32, $(COMPILER_VERSION))) - export arch:=ia32 - endif - ifneq (,$(findstring Intel(R) 64, $(COMPILER_VERSION))) - export arch:=intel64 - endif - ifeq (,$(arch)) - $(warning "Unknown Intel compiler") - endif -endif - -ifndef arch - ifeq ($(shell /usr/sbin/sysctl -n hw.machine),Power Macintosh) - ifeq ($(shell /usr/sbin/sysctl -n hw.optional.64bitops),1) - export arch:=ppc64 - else - export arch:=ppc32 - endif - else - ifeq ($(shell /usr/sbin/sysctl -n hw.optional.x86_64 2>/dev/null),1) - export arch:=intel64 - else - export arch:=ia32 - endif - endif -endif - -ifeq (ios,$(target)) - ifneq (armv7, $(arch)) - $(error $(arch) not supported for target 'ios') - endif - export SDKROOT?=$(shell xcodebuild -sdk -version | grep -o -E '/.*SDKs/iPhoneOS.*' 2>/dev/null) - ifeq (,$(SDKROOT)) - $(error iOS SDK not found) - endif - # next, use a single compiler include file for both iOS* and OS X* builds. - override target:=macos - export target -endif - -ifndef runtime - gcc_version = $(shell gcc -dumpversion) - os_version:=$(shell /usr/bin/sw_vers -productVersion) - export runtime:=cc$(gcc_version)_os$(os_version) -endif - -native_compiler := clang -export compiler ?= clang -debugger ?= gdb - -CMD=$(SHELL) -c -CWD=$(shell pwd) -RM?=rm -f -RD?=rmdir -MD?=mkdir -p -NUL= /dev/null -SLASH=/ -MAKE_VERSIONS=sh $(tbb_root)/build/version_info_macos.sh $(VERSION_FLAGS) >version_string.ver -MAKE_TBBVARS=sh $(tbb_root)/build/generate_tbbvars.sh DY - -ifdef DYLD_LIBRARY_PATH - export DYLD_LIBRARY_PATH := .:$(DYLD_LIBRARY_PATH) -else - export DYLD_LIBRARY_PATH := . -endif - -####### Build settings ######################################################## - -OBJ=o -DLL=dylib -MALLOC_DLL?=$(DLL) -LIBEXT=dylib - -def_prefix = $(if $(findstring 64,$(arch)),mac64,mac32) - -TBB.LST = $(tbb_root)/src/tbb/$(def_prefix)-tbb-export.lst -TBB.DEF = $(TBB.LST:.lst=.def) -TBB.DLL = libtbb$(CPF_SUFFIX)$(DEBUG_SUFFIX).$(DLL) -TBB.LIB = $(TBB.DLL) -LINK_TBB.LIB = $(TBB.LIB) - -MALLOC.DEF = $(MALLOC_ROOT)/$(def_prefix)-tbbmalloc-export.def -MALLOC.DLL = libtbbmalloc$(DEBUG_SUFFIX).$(MALLOC_DLL) -MALLOC.LIB = $(MALLOC.DLL) -LINK_MALLOC.LIB = $(MALLOC.LIB) - -MALLOCPROXY.DLL = libtbbmalloc_proxy$(DEBUG_SUFFIX).$(MALLOC_DLL) -MALLOCPROXY.LIB = $(MALLOCPROXY.DLL) -LINK_MALLOCPROXY.LIB = $(MALLOCPROXY.LIB) - -TEST_LAUNCHER=sh $(tbb_root)/build/test_launcher.sh $(largs) diff --git a/src/tbb/build/mic.icc.inc b/src/tbb/build/mic.icc.inc deleted file mode 100644 index 2e1afb954..000000000 --- a/src/tbb/build/mic.icc.inc +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -COMPILE_ONLY = -c -MMD -PREPROC_ONLY = -E -x c++ -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -fPIC -WARNING_AS_ERROR_KEY = -Werror -WARNING_KEY = -w1 -DYLIB_KEY = -shared -Wl,-soname=$@ -EXPORT_KEY = -Wl,--version-script, -NOINTRINSIC_KEY = -fno-builtin -LIBDL = -ldl - -CPLUS = icpc -CONLY = icc - -ifeq (release,$(cfg)) - CPLUS_FLAGS = -O2 -g -DUSE_PTHREAD -else - CPLUS_FLAGS = -O0 -g -DUSE_PTHREAD -DTBB_USE_DEBUG -endif - -ifneq (,$(codecov)) - CPLUS_FLAGS += -prof-genx -endif - -OPENMP_FLAG = -openmp -LIB_LINK_FLAGS = -shared -i-static -Wl,-soname=$(BUILDING_LIBRARY) -LIBS += -lpthread -lrt -C_FLAGS = $(CPLUS_FLAGS) -CPP11_FLAGS = -std=c++0x -D_TBB_CPP0X -CILK_AVAILABLE = yes - -TBB_ASM.OBJ= -MALLOC_ASM.OBJ= - -ifneq (00,$(lambdas)$(cpp0x)) - CXX_ONLY_FLAGS += $(CPP11_FLAGS) -endif - -CPLUS_FLAGS += -DHARNESS_INCOMPLETE_SOURCES=1 -D__TBB_MIC_NATIVE -DTBB_USE_EXCEPTIONS=0 -opt-streaming-stores never -CPLUS += -mmic -CONLY += -mmic -LINK_FLAGS = -Wl,-rpath-link=. -rdynamic -# Tell the icc to not link against libcilk*. Otherwise icc tries to link and emits a warning message. -LIB_LINK_FLAGS += -no-intel-extensions -# Do not depend on libirc etc dynamic libs. It makes 'native' execution easier for the users. -LIB_LINK_FLAGS += -static-intel - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ - -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions - -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ - - - diff --git a/src/tbb/build/mic.linux.inc b/src/tbb/build/mic.linux.inc deleted file mode 100644 index 6c61e7a8b..000000000 --- a/src/tbb/build/mic.linux.inc +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -ifeq ($(tbb_os),mic) - $(error MIC supports only cross-compilation. Specify "target=mic" instead.) -endif - -ifneq ($(BUILDING_PHASE),1) - # The same build prefix should be used in offload.inc - ifeq (,$(tbb_build_prefix)) - tbb_build_prefix=mic_icc$(CPF_SUFFIX) - endif - # For examples - mic_tbb_build_prefix=$(tbb_build_prefix) -endif - -MAKE_VERSIONS=sh $(tbb_root)/build/version_info_linux.sh $(VERSION_FLAGS) >version_string.ver -MAKE_TBBVARS=sh $(tbb_root)/build/generate_tbbvars.sh MIC_ MIC_ -def_prefix=lin64 - -TEST_LAUNCHER= -run_cmd ?= bash $(tbb_root)/build/mic.linux.launcher.sh $(largs) - -# detects whether examples are being built. -ifeq ($(BUILDING_PHASE),0) - export UI = con - export x64 = 64 -endif # examples diff --git a/src/tbb/build/mic.linux.launcher.sh b/src/tbb/build/mic.linux.launcher.sh deleted file mode 100644 index 33fdba3b2..000000000 --- a/src/tbb/build/mic.linux.launcher.sh +++ /dev/null @@ -1,157 +0,0 @@ -#!/bin/bash -# -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -# Usage: -# mic.linux.launcher.sh [-v] [-q] [-s] [-r ] [-u] [-l ] -# where: -v enables verbose output -# where: -q enables quiet mode -# where: -s runs the test in stress mode (until non-zero exit code or ctrl-c pressed) -# where: -r specifies number of times to repeat execution -# where: -u limits stack size -# where: -l specifies the library name to be assigned to LD_PRELOAD -# -# Libs and executable necessary for testing should be present in the current directory before running. -# Note: Do not remove the redirections to '/dev/null' in the script, otherwise the nightly test system will fail. -# -trap 'echo Error at line $LINENO while executing "$BASH_COMMAND"' ERR # -trap 'echo -e "\n*** Interrupted ***" && exit 1' SIGINT SIGQUIT # -# Process the optional arguments if present -while getopts "qvsr:ul:" flag # -do case $flag in # - s ) # Stress testing mode - echo Doing stress testing. Press Ctrl-C to terminate - run_env='stressed() { while $*; do :; done; };' # - run_prefix="stressed $run_prefix" ;; # - r ) # Repeats test n times - run_env="repeated() { for i in \$(seq 1 $OPTARG); do echo \$i of $OPTARG:; \$*; done; };" # - run_prefix="repeated $run_prefix" ;; # - l ) # Additional library - ldd_list+="$OPTARG " # - run_prefix+=" LD_PRELOAD=$OPTARG" ;; # - u ) # Set stack limit - run_prefix="ulimit -s 10240; $run_prefix" ;; # - q ) # Quiet mode, removes 'done' but prepends any other output by test name - SUPPRESS='>/dev/null' # - verbose=1 ;; # TODO: implement a better quiet mode - v ) # Verbose mode - verbose=1 ;; # -esac done # -shift `expr $OPTIND - 1` # -[ $verbose ] || SUPPRESS='>/dev/null' # -# -# Collect the executable name -fexename="$1" # -exename=`basename $1` # -shift # -# -: ${MICDEV:=mic0} # -RSH="sudo ssh $MICDEV" # -RCP="sudo scp" # -currentdir=$PWD # -# -# Prepare the target directory on the device -targetdir="`$RSH mktemp -d /tmp/tbbtestXXXXXX 2>/dev/null`" # -# Prepare the temporary directory on the host -hostdir="`mktemp -d /tmp/tbbtestXXXXXX 2>/dev/null`" # -# -function copy_files { # - eval "cp $* $hostdir/ $SUPPRESS 2>/dev/null || exit \$?" # - eval "$RCP $hostdir/* $MICDEV:$targetdir/ $SUPPRESS 2>/dev/null || exit \$?" # - eval "rm $hostdir/* $SUPPRESS 2>/dev/null || exit \$?" # -} # copy files -# -function clean_all() { # - eval "$RSH rm -fr $targetdir $SUPPRESS" ||: # - eval "rm -fr $hostdir $SUPPRESS" ||: # -} # clean all temporary files -# -function kill_interrupt() { # - echo -e "\n*** Killing remote $exename ***" && $RSH "killall $exename" # - clean_all # -} # kill target process -# -trap 'clean_all' SIGINT SIGQUIT # trap keyboard interrupt (control-c) -# -# Transfer the test executable file and its auxiliary libraries (named as {test}_dll.so) to the target device. -copy_files $fexename `ls ${exename%\.*}*.so 2>/dev/null ||:` # -# -# Collect all dependencies of the test and its auxiliary libraries to transfer them to the target device. -ldd_list+="libtbbmalloc*.so* libirml*.so* `$RSH ldd $targetdir/\* | grep = | cut -d= -f1 2>/dev/null`" # -fnamelist="" # -# -# Find the libraries and add them to the list. -# For example, go through MIC_LD_LIBRARY_PATH and add TBB libraries from the first -# directory that contains tbb files -mic_dir_list=`echo .:$MIC_LD_LIBRARY_PATH | tr : " "` # -for name in $ldd_list; do # adds the first matched name in specified dirs - fnamelist+="`find $mic_dir_list -name $name -a -readable -print -quit 2>/dev/null` "||: # -done # -# -# Remove extra spaces. -fnamelist=`echo $fnamelist` # -# Transfer collected executable and library files to the target device. -[ -n "$fnamelist" ] && copy_files $fnamelist -# -# Transfer input files used by example codes by scanning the executable argument list. -argfiles= # -args= # -for arg in "$@"; do # - if [ -r $arg ]; then # - argfiles+="$arg " # - args+="$(basename $arg) " # - else # - args+="$arg " # - fi # -done # -[ -n "$argfiles" ] && copy_files $argfiles # -# -# Get the list of transferred files -testfiles="`$RSH find $targetdir/ -type f | tr '\n' ' ' 2>/dev/null`" # -# -[ $verbose ] && echo Running $run_prefix ./$exename $args # -# Run the test on the target device -trap 'kill_interrupt' SIGINT SIGQUIT # trap keyboard interrupt (control-c) -trap - ERR # -run_env+="cd $targetdir; export LD_LIBRARY_PATH=.:\$LD_LIBRARY_PATH;" # -$RSH "$run_env $run_prefix ./$exename $args" # -# -# Delete the test files and get the list of output files -outfiles=`$RSH rm $testfiles 2>/dev/null; find $targetdir/ -type f 2>/dev/null` ||: # -if [ -n "$outfiles" ]; then # - for outfile in $outfiles; do # - filename=$(basename $outfile) # - subdir=$(dirname $outfile) # - subdir="${subdir#$targetdir}" # - [ -n $subdir ] subdir=$subdir/ # - # Create directories on host - [ ! -d "$hostdir/$subdir" ] && mkdir -p "$hostdir/$subdir" # - [ ! -d "$currentdir/$subdir" ] && mkdir -p "$currentdir/$subdir" # - # Copy the output file to the temporary directory on host - eval "$RCP -r '$MICDEV:${outfile#}' '$hostdir/$subdir$filename' $SUPPRESS 2>&1 || exit \$?" # - # Copy the output file from the temporary directory to the current directory - eval "cp '$hostdir/$subdir$filename' '$currentdir/$subdir$filename' $SUPPRESS 2>&1 || exit \$?" # - done # -fi # -# -# Clean up temporary directories -clean_all -# -# Return the exit code of the test. -exit $? # diff --git a/src/tbb/build/mic.offload.inc b/src/tbb/build/mic.offload.inc deleted file mode 100644 index e52c8100c..000000000 --- a/src/tbb/build/mic.offload.inc +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -ifneq (mic,$(offload)) - $(error File mic.offload.inc should not be included directly. Use offload=mic instead.) -endif -ifneq (icc,$(compiler)) - $(error Only Intel(R) Compiler is supported for MIC offload compilation) -endif - -# The same build prefix should be used in mic.linux.inc -mic_tbb_build_prefix=mic_icc$(CPF_SUFFIX) -MIC_OFFLOAD_NATIVE_PATH?=../$(mic_tbb_build_prefix)_$(cfg) - -ifdef BUILDING_PHASE - ifeq ($(BUILDING_PHASE),1) - # Tests - export MIC_OFFLOAD_NATIVE_PATH - LINK_TBB_NATIVE.LIB=$(MIC_OFFLOAD_NATIVE_PATH)/$(TBB.LIB) - LINK_TBB.LIB=-offload-option,mic,ld,"$(LINK_TBB_NATIVE.LIB)" $(TBB.LIB) - LINK_MALLOC_NATIVE.LIB=$(MIC_OFFLOAD_NATIVE_PATH)/$(MALLOC.DLL) - LINK_MALLOC.LIB=-offload-option,mic,ld,"$(LINK_MALLOC_NATIVE.LIB)" $(MALLOC.LIB) - LINK_MALLOCPROXY_NATIVE.LIB=$(MIC_OFFLOAD_NATIVE_PATH)/$(MALLOCPROXY.DLL) - LINK_MALLOCPROXY.LIB=-offload-option,mic,ld,"$(LINK_MALLOCPROXY_NATIVE.LIB)" $(MALLOCPROXY.LIB) - - # Export extensions for test_launcher - export DLL - export TEST_EXT=offload.exe - OBJ=offload.o - - # Do not use -Werror because it is too strict for the early offload compiler. - # Need to set anything because WARNING_AS_ERROR_KEY should not be empty. - # Treat #2426 as a warning. Print errors only. - tbb_strict=0 - WARNING_AS_ERROR_KEY = Warning as error - WARNING_KEY = -diag-warning 2426 -w0 - - CXX_MIC_STUFF = -offload-attribute-target=mic -D__TBB_MIC_OFFLOAD=1 -offload-option,mic,compiler,"-D__TBB_MIC_OFFLOAD=1 $(CXX_MIC_NATIVE_STUFF)" - CXX_MIC_NATIVE_STUFF = -DHARNESS_INCOMPLETE_SOURCES=1 -D__TBB_MIC_NATIVE -DTBB_USE_EXCEPTIONS=0 - CPLUS_FLAGS += $(CXX_MIC_STUFF) - - .PHONY: FORCE - FORCE: - - $(MIC_OFFLOAD_NATIVE_PATH)/%_dll.$(DLL): FORCE - @$(MAKE) --no-print-directory -C "$(MIC_OFFLOAD_NATIVE_PATH)" target=mic offload= -f$(tbb_root)/build/Makefile.$(TESTFILE) $*_dll.$(DLL) - %_dll.$(DLL): $(MIC_OFFLOAD_NATIVE_PATH)/%_dll.$(DLL) FORCE - @$(MAKE) --no-print-directory offload= -f$(tbb_root)/build/Makefile.$(TESTFILE) $*_dll.$(DLL) - - .PRECIOUS: $(MIC_OFFLOAD_NATIVE_PATH)/%_dll.$(DLL) - - %.$(TEST_EXT): LINK_FILES+=-offload-option,mic,ld,"$(addprefix $(MIC_OFFLOAD_NATIVE_PATH)/,$(TEST_LIBS))" - - TEST_LAUNCHER=sh $(tbb_root)/build/test_launcher.sh $(largs) - - ifdef MIC_LD_LIBRARY_PATH - export MIC_LD_LIBRARY_PATH := $(MIC_OFFLOAD_NATIVE_PATH):$(MIC_LD_LIBRARY_PATH) - else - export MIC_LD_LIBRARY_PATH := $(MIC_OFFLOAD_NATIVE_PATH) - endif - else - # Examples - export UI = con - export x64 = 64 - endif -else - # Libraries - LIB_TARGETS = tbb tbbmalloc tbbproxy rml - addsuffixes = $(foreach suff,$(1),$(addsuffix $(suff),$(2))) - - .PHONY: $(call addsuffixes, _debug _release _debug_mic _release_mic,$(LIB_TARGETS)) - - # The dependence on *_debug and *_release targets unifies the offload support - # for top-level Makefile and src/Makefile - $(LIB_TARGETS): %: %_release %_debug - - # "override offload=" suppresses the "offload" variable value for nested makes - $(LIB_TARGETS) $(call addsuffixes, _debug _release,$(LIB_TARGETS)): override offload= - # Apply overriding for library builds - export offload - export tbb_build_prefix - # Add the dependency on target libraries - $(call addsuffixes, _debug _release,$(LIB_TARGETS)): %: %_mic - - # tbb_build_prefix should be overriden since we want to restart make in "clear" enviroment - $(call addsuffixes, _debug_mic _release_mic,$(LIB_TARGETS)): override tbb_build_prefix= - $(call addsuffixes, _debug_mic _release_mic,$(LIB_TARGETS)): %_mic: - @$(MAKE) --no-print-directory -C "$(full_tbb_root)/src" $* target=mic tbb_root=.. - - mic_clean: override tbb_build_prefix= - mic_clean: - @$(MAKE) --no-print-directory -C "$(full_tbb_root)/src" clean offload= target=mic tbb_root=.. - clean: mic_clean -endif diff --git a/src/tbb/build/suncc.map.pause b/src/tbb/build/suncc.map.pause deleted file mode 100644 index a92d08eb1..000000000 --- a/src/tbb/build/suncc.map.pause +++ /dev/null @@ -1 +0,0 @@ -hwcap_1 = OVERRIDE; \ No newline at end of file diff --git a/src/tbb/build/test_launcher.bat b/src/tbb/build/test_launcher.bat deleted file mode 100644 index 315c24866..000000000 --- a/src/tbb/build/test_launcher.bat +++ /dev/null @@ -1,74 +0,0 @@ -@echo off -REM -REM Copyright 2005-2014 Intel Corporation. All Rights Reserved. -REM -REM This file is part of Threading Building Blocks. Threading Building Blocks is free software; -REM you can redistribute it and/or modify it under the terms of the GNU General Public License -REM version 2 as published by the Free Software Foundation. Threading Building Blocks is -REM distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -REM implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -REM See the GNU General Public License for more details. You should have received a copy of -REM the GNU General Public License along with Threading Building Blocks; if not, write to the -REM Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -REM -REM As a special exception, you may use this file as part of a free software library without -REM restriction. Specifically, if other files instantiate templates or use macros or inline -REM functions from this file, or you compile this file and link it with other files to produce -REM an executable, this file does not by itself cause the resulting executable to be covered -REM by the GNU General Public License. This exception does not however invalidate any other -REM reasons why the executable file might be covered by the GNU General Public License. -REM - -set cmd_line= -if DEFINED run_prefix set cmd_line=%run_prefix% -:while -if NOT "%1"=="" ( - REM Verbose mode - if "%1"=="-v" ( - set verbose=yes - GOTO continue - ) - REM Silent mode of 'make' requires additional support for associating - REM of test output with the test name. Verbose mode is the simplest way - if "%1"=="-q" ( - set verbose=yes - GOTO continue - ) - REM Run in stress mode - if "%1"=="-s" ( - echo Doing stress testing. Press Ctrl-C to terminate - set stress=yes - GOTO continue - ) - REM Repeat execution specified number of times - if "%1"=="-r" ( - set repeat=%2 - SHIFT - GOTO continue - ) - REM no LD_PRELOAD under Windows - REM but run the test to check "#pragma comment" construction - if "%1"=="-l" ( - REM The command line may specify -l with empty dll name, - REM e.g. "test_launcher.bat -l app.exe". If the dll name is - REM empty then %2 contains the application name and the SHIFT - REM operation is not necessary. - if exist "%3" SHIFT - GOTO continue - ) - REM no need to setup up stack size under Windows - if "%1"=="-u" GOTO continue - set cmd_line=%cmd_line% %1 -:continue - SHIFT - GOTO while -) -set cmd_line=%cmd_line:./=.\% -if DEFINED verbose echo Running %cmd_line% -if DEFINED stress set cmd_line=%cmd_line% ^& IF NOT ERRORLEVEL 1 GOTO stress -:stress -if DEFINED repeat ( - for /L %%i in (1,1,%repeat%) do echo %%i of %repeat%: & %cmd_line% -) else ( - %cmd_line% -) diff --git a/src/tbb/build/test_launcher.sh b/src/tbb/build/test_launcher.sh deleted file mode 100644 index 39c7ee00d..000000000 --- a/src/tbb/build/test_launcher.sh +++ /dev/null @@ -1,89 +0,0 @@ -#!/bin/sh -# -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -# Usage: -# test_launcher.sh [-v] [-q] [-s] [-r ] [-u] [-l ] -# where: -v enables verbose output -# where: -q enables quiet mode -# where: -s runs the test in stress mode (until non-zero exit code or ctrl-c pressed) -# where: -r specifies number of times to repeat execution -# where: -u limits stack size -# where: -l specifies the library name to be assigned to LD_PRELOAD - -while getopts "qvsr:ul:" flag # -do case $flag in # - s ) # Stress testing mode - run_prefix="stressed $run_prefix" ;; # - r ) # Repeats test n times - repeat=$OPTARG # - run_prefix="repeated $run_prefix" ;; # - l ) if [ `uname` = 'Linux' ] ; then # - LD_PRELOAD=$OPTARG # - elif [ `uname` = 'Darwin' ] ; then # - DYLD_INSERT_LIBRARIES=$OPTARG # - else # - echo 'skip' # - exit # - fi ;; # - u ) # Set stack limit - ulimit -s 10240 ;; # - q ) # Quiet mode, removes 'done' but prepends any other output by test name - OUTPUT='2>&1 | sed -e "s/done//;/^[[:space:]]*$/d;s!^!$1: !"' ;; # - v ) # Verbose mode - verbose=1 ;; # -esac done # -shift `expr $OPTIND - 1` # -if [ $MIC_OFFLOAD_NATIVE_PATH ] ; then # - LIB_NAME=${1/%.$TEST_EXT/_dll.$DLL} # - if [ -f "$MIC_OFFLOAD_NATIVE_PATH/$LIB_NAME" ]; then # - [ -z "$MIC_CARD" ] && MIC_CARD=mic0 # - TMPDIR_HOST=`mktemp -d /tmp/tbbtestXXXXXX` # - TMPDIR_MIC=`sudo ssh $MIC_CARD mktemp -d /tmp/tbbtestXXXXXX` # - sudo ssh $MIC_CARD "chmod +x $TMPDIR_MIC" # - # Test specific library may depend on libtbbmalloc* - cp "$MIC_OFFLOAD_NATIVE_PATH/$LIB_NAME" "$MIC_OFFLOAD_NATIVE_PATH"/libtbbmalloc* "$TMPDIR_HOST" >/dev/null 2>/dev/null # - sudo scp "$TMPDIR_HOST"/* $MIC_CARD:"$TMPDIR_MIC" >/dev/null 2>/dev/null # - - LD_LIBRARY_PATH=$TMPDIR_MIC:$LD_LIBRARY_PATH # - export LD_LIBRARY_PATH # - fi # -fi # -stressed() { echo Doing stress testing. Press Ctrl-C to terminate # - while :; do $*; done;# -} # -repeated() { # - i=0; while [ "$i" -lt $repeat ]; do i=`expr $i + 1`; echo $i of $repeat:; $*; done # -} # -# Run the command line passed via parameters -[ $verbose ] && echo Running $run_prefix $* # -if [ -n "$LD_PRELOAD" ] ; then # - export LD_PRELOAD # -elif [ -n "$DYLD_INSERT_LIBRARIES" ] ; then # - export DYLD_INSERT_LIBRARIES # -fi -exec 4>&1 # extracting exit code of the first command in pipeline needs duplicated stdout -# custom redirection needs eval, otherwise shell cannot parse it -err=`eval '( $run_prefix $* || echo \$? >&3; )' ${OUTPUT} 3>&1 >&4` # -[ -z "$err" ] || echo $1: exited with error $err # -if [ $MIC_OFFLOAD_NATIVE_PATH ] ; then # - sudo ssh $MIC_CARD rm -fr "$TMPDIR_MIC" >/dev/null 2>/dev/null # - rm -fr "$TMPDIR_HOST" >/dev/null 2>/dev/null # -fi # -exit $err # diff --git a/src/tbb/build/version_info_aix.sh b/src/tbb/build/version_info_aix.sh deleted file mode 100644 index 78d951406..000000000 --- a/src/tbb/build/version_info_aix.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/sh -# -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -# Script used to generate version info string -echo "#define __TBB_VERSION_STRINGS(N) \\" -echo '#N": BUILD_HOST'"\t\t"`hostname -s`" ("`uname -m`")"'" ENDL \' -# find OS name in *-release and issue* files by filtering blank lines and lsb-release content out -echo '#N": BUILD_OS'"\t\t"`lsb_release -sd 2>/dev/null | grep -ih '[a-z] ' - /etc/*release /etc/issue 2>/dev/null | head -1 | sed -e 's/["\\\\]//g'`'" ENDL \' -echo '#N": BUILD_KERNEL'"\t"`uname -srv`'" ENDL \' -echo '#N": BUILD_GCC'"\t\t"`g++ -v &1 | grep 'gcc.*version'`'" ENDL \' -[ -z "$COMPILER_VERSION" ] || echo '#N": BUILD_COMPILER'"\t"$COMPILER_VERSION'" ENDL \' -echo '#N": BUILD_LIBC'"\t"`getconf GNU_LIBC_VERSION | grep glibc | sed -e 's/^glibc //'`'" ENDL \' -echo '#N": BUILD_LD'"\t\t"`ld -v 2>&1 | grep 'version'`'" ENDL \' -echo '#N": BUILD_TARGET'"\t$arch on $runtime"'" ENDL \' -echo '#N": BUILD_COMMAND'"\t"$*'" ENDL \' -echo "" -echo "#define __TBB_DATETIME \""`date -u`"\"" diff --git a/src/tbb/build/version_info_android.sh b/src/tbb/build/version_info_android.sh deleted file mode 100644 index 8ffd9fb8a..000000000 --- a/src/tbb/build/version_info_android.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh -# -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -# Script used to generate version info string -# remove extraneous cntrl-M characters from ends of lines generated by adb shell command -android_os=`(adb shell getprop ro.build.version.release) | sed -e 's/\\r$//g'` -android_kernel=`(adb shell uname -srv) | sed -e 's/\\r$//g'` -echo "#define __TBB_VERSION_STRINGS(N) \\" -echo '#N": BUILD_HOST'"\t\t"`hostname -s`" ("`uname -m`")"'" ENDL \' -# find OS name in *-release and issue* files by filtering blank lines and lsb-release content out -echo '#N": BUILD_OS'"\t\t"`lsb_release -sd 2>/dev/null | grep -ih '[a-z] ' - /etc/*release /etc/issue 2>/dev/null | head -1 | sed -e 's/["\\\\]//g'`'" ENDL \' -echo '#N": BUILD_TARGET_OS'"\t\tAndroid ${android_os}"'" ENDL \' -echo '#N": BUILD_TARGET_KERNEL'"\t${android_kernel}"'" ENDL \' -echo '#N": BUILD_GCC'"\t\t"`${tbb_tool_prefix}g++ -dumpversion`'" ENDL \' -[ -z "$COMPILER_VERSION" ] || echo '#N": BUILD_COMPILER'"\t"$COMPILER_VERSION'" ENDL \' -[ -z "$ndk_version" ] || echo '#N": BUILD_NDK'"\t\t$ndk_version"'" ENDL \' -echo '#N": BUILD_LD'"\t\t"`${tbb_tool_prefix}ld -v 2>&1 | grep 'ld'`'" ENDL \' -echo '#N": BUILD_TARGET'"\t$arch on $runtime"'" ENDL \' -echo '#N": BUILD_COMMAND'"\t"$*'" ENDL \' -echo "" -echo "#define __TBB_DATETIME \""`date -u`"\"" diff --git a/src/tbb/build/version_info_linux.sh b/src/tbb/build/version_info_linux.sh deleted file mode 100644 index b7c3315b4..000000000 --- a/src/tbb/build/version_info_linux.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/sh -# -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -# Script used to generate version info string -echo "#define __TBB_VERSION_STRINGS(N) \\" -echo '#N": BUILD_HOST'"\t\t"`hostname -s`" ("`uname -m`")"'" ENDL \' -# find OS name in *-release and issue* files by filtering blank lines and lsb-release content out -echo '#N": BUILD_OS'"\t\t"`lsb_release -sd 2>/dev/null | grep -ih '[a-z] ' - /etc/*release /etc/issue 2>/dev/null | head -1 | sed -e 's/["\\\\]//g'`'" ENDL \' -echo '#N": BUILD_KERNEL'"\t"`uname -srv`'" ENDL \' -echo '#N": BUILD_GCC'"\t\t"`g++ -v &1 | grep 'gcc.*version '`'" ENDL \' -[ -z "$COMPILER_VERSION" ] || echo '#N": BUILD_COMPILER'"\t"$COMPILER_VERSION'" ENDL \' -echo '#N": BUILD_LIBC'"\t"`getconf GNU_LIBC_VERSION | grep glibc | sed -e 's/^glibc //'`'" ENDL \' -echo '#N": BUILD_LD'"\t\t"`ld -v 2>&1 | grep 'version'`'" ENDL \' -echo '#N": BUILD_TARGET'"\t$arch on $runtime"'" ENDL \' -echo '#N": BUILD_COMMAND'"\t"$*'" ENDL \' -echo "" -echo "#define __TBB_DATETIME \""`date -u`"\"" diff --git a/src/tbb/build/version_info_macos.sh b/src/tbb/build/version_info_macos.sh deleted file mode 100644 index d07b3865f..000000000 --- a/src/tbb/build/version_info_macos.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/sh -# -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -# Script used to generate version info string -echo "#define __TBB_VERSION_STRINGS(N) \\" -echo '#N": BUILD_HOST'"\t\t"`hostname -s`" ("`arch`")"'" ENDL \' -echo '#N": BUILD_OS'"\t\t"`sw_vers -productName`" version "`sw_vers -productVersion`'" ENDL \' -echo '#N": BUILD_KERNEL'"\t"`uname -v`'" ENDL \' -echo '#N": BUILD_GCC'"\t\t"`gcc -v &1 | grep 'version '`'" ENDL \' -[ -z "$COMPILER_VERSION" ] || echo '#N": BUILD_COMPILER'"\t"$COMPILER_VERSION'" ENDL \' -echo '#N": BUILD_TARGET'"\t$arch on $runtime"'" ENDL \' -echo '#N": BUILD_COMMAND'"\t"$*'" ENDL \' -echo "" -echo "#define __TBB_DATETIME \""`date -u`"\"" diff --git a/src/tbb/build/version_info_sunos.sh b/src/tbb/build/version_info_sunos.sh deleted file mode 100644 index 7306ee299..000000000 --- a/src/tbb/build/version_info_sunos.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/sh -# -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -# Script used to generate version info string -echo "#define __TBB_VERSION_STRINGS(N) \\" -echo '#N": BUILD_HOST'"\t"`hostname`" ("`arch`")"'" ENDL \' -echo '#N": BUILD_OS'"\t\t"`uname`'" ENDL \' -echo '#N": BUILD_KERNEL'"\t"`uname -srv`'" ENDL \' -echo '#N": BUILD_SUNCC'"\t"`CC -V &1 | grep 'C++'`'" ENDL \' -[ -z "$COMPILER_VERSION" ] || echo '#N": BUILD_COMPILER'"\t"$COMPILER_VERSION'" ENDL \' -echo '#N": BUILD_TARGET'"\t$arch on $runtime"'" ENDL \' -echo '#N": BUILD_COMMAND'"\t"$*'" ENDL \' -echo "" -echo "#define __TBB_DATETIME \""`date -u`"\"" diff --git a/src/tbb/build/version_info_windows.js b/src/tbb/build/version_info_windows.js deleted file mode 100644 index c57fe7a78..000000000 --- a/src/tbb/build/version_info_windows.js +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2005-2014 Intel Corporation. All Rights Reserved. -// -// This file is part of Threading Building Blocks. Threading Building Blocks is free software; -// you can redistribute it and/or modify it under the terms of the GNU General Public License -// version 2 as published by the Free Software Foundation. Threading Building Blocks is -// distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. You should have received a copy of -// the GNU General Public License along with Threading Building Blocks; if not, write to the -// Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -// -// As a special exception, you may use this file as part of a free software library without -// restriction. Specifically, if other files instantiate templates or use macros or inline -// functions from this file, or you compile this file and link it with other files to produce -// an executable, this file does not by itself cause the resulting executable to be covered -// by the GNU General Public License. This exception does not however invalidate any other -// reasons why the executable file might be covered by the GNU General Public License. - -var WshShell = WScript.CreateObject("WScript.Shell"); - -var tmpExec; - -WScript.Echo("#define __TBB_VERSION_STRINGS(N) \\"); - -//Getting BUILD_HOST -WScript.echo( "#N \": BUILD_HOST\\t\\t" + - WshShell.ExpandEnvironmentStrings("%COMPUTERNAME%") + - "\" ENDL \\" ); - -//Getting BUILD_OS -tmpExec = WshShell.Exec("cmd /c ver"); -while ( tmpExec.Status == 0 ) { - WScript.Sleep(100); -} -tmpExec.StdOut.ReadLine(); - -WScript.echo( "#N \": BUILD_OS\\t\\t" + - tmpExec.StdOut.ReadLine() + - "\" ENDL \\" ); - -if ( WScript.Arguments(0).toLowerCase().match("gcc") ) { - tmpExec = WshShell.Exec(WScript.Arguments(0) + " --version"); - WScript.echo( "#N \": BUILD_COMPILER\\t" + - tmpExec.StdOut.ReadLine() + - "\" ENDL \\" ); - -} else { // MS / Intel compilers - //Getting BUILD_CL - tmpExec = WshShell.Exec("cmd /c echo #define 0 0>empty.cpp"); - tmpExec = WshShell.Exec("cl -c empty.cpp "); - while ( tmpExec.Status == 0 ) { - WScript.Sleep(100); - } - var clVersion = tmpExec.StdErr.ReadLine(); - WScript.echo( "#N \": BUILD_CL\\t\\t" + - clVersion + - "\" ENDL \\" ); - - //Getting BUILD_COMPILER - if ( WScript.Arguments(0).toLowerCase().match("icl") ) { - tmpExec = WshShell.Exec("icl -c empty.cpp "); - while ( tmpExec.Status == 0 ) { - WScript.Sleep(100); - } - WScript.echo( "#N \": BUILD_COMPILER\\t" + - tmpExec.StdErr.ReadLine() + - "\" ENDL \\" ); - } else { - WScript.echo( "#N \": BUILD_COMPILER\\t\\t" + - clVersion + - "\" ENDL \\" ); - } - tmpExec = WshShell.Exec("cmd /c del /F /Q empty.obj empty.cpp"); -} - -//Getting BUILD_TARGET -WScript.echo( "#N \": BUILD_TARGET\\t" + - WScript.Arguments(1) + - "\" ENDL \\" ); - -//Getting BUILD_COMMAND -WScript.echo( "#N \": BUILD_COMMAND\\t" + WScript.Arguments(2) + "\" ENDL" ); - -//Getting __TBB_DATETIME and __TBB_VERSION_YMD -var date = new Date(); -WScript.echo( "#define __TBB_DATETIME \"" + date.toUTCString() + "\"" ); -WScript.echo( "#define __TBB_VERSION_YMD " + date.getUTCFullYear() + ", " + - (date.getUTCMonth() > 8 ? (date.getUTCMonth()+1):("0"+(date.getUTCMonth()+1))) + - (date.getUTCDate() > 9 ? date.getUTCDate():("0"+date.getUTCDate())) ); - - -/* - -Original strings - -#define __TBB_VERSION_STRINGS \ -"TBB: BUILD_HOST\t\tvpolin-mobl1 (ia32)" ENDL \ -"TBB: BUILD_OS\t\tMicrosoft Windows XP [Version 5.1.2600]" ENDL \ -"TBB: BUILD_CL\t\tMicrosoft (R) 32-bit C/C++ Optimizing Compiler Version 13.10.3077 for 80x86" ENDL \ -"TBB: BUILD_COMPILER\tIntel(R) C++ Compiler for 32-bit applications, Version 9.1 Build 20070109Z Package ID: W_CC_C_9.1.034 " ENDL \ -"TBB: BUILD_TARGET\t" ENDL \ -"TBB: BUILD_COMMAND\t" ENDL \ - -#define __TBB_DATETIME "Mon Jun 4 10:16:07 UTC 2007" -#define __TBB_VERSION_YMD 2007, 0604 - - - -# The script must be run from two directory levels below this level. -x='"TBB: ' -y='" ENDL \' -echo "#define __TBB_VERSION_STRINGS \\" -echo $x "BUILD_HOST\t\t"`hostname`" ("`../../arch.exe`")"$y -echo $x "BUILD_OS\t\t"`../../win_version.bat|grep -i 'Version'`$y -echo >empty.cpp -echo $x "BUILD_CL\t\t"`cl -c empty.cpp 2>&1 | grep -i Version`$y -echo $x "BUILD_COMPILER\t"`icl -c empty.cpp 2>&1 | grep -i Version`$y -echo $x "BUILD_TARGET\t"$TBB_ARCH$y -echo $x "BUILD_COMMAND\t"$*$y -echo "" -# A workaround for MKS 8.6 where `date -u` crashes. -date -u > date.tmp -echo "#define __TBB_DATETIME \""`cat date.tmp`"\"" -echo "#define __TBB_VERSION_YMD "`date '+%Y, %m%d'` -rm empty.cpp -rm empty.obj -rm date.tmp -*/ diff --git a/src/tbb/build/vs2010/index.html b/src/tbb/build/vs2010/index.html deleted file mode 100644 index 1bcbc7e8b..000000000 --- a/src/tbb/build/vs2010/index.html +++ /dev/null @@ -1,30 +0,0 @@ - - - -

Overview

-This directory contains the visual studio* 2010 solution to build Intel® Threading Building Blocks. - - -

Files

-
-
makefile.sln -
Solution file. -
tbb.vcxproj -
Library project file. -
tbbmalloc.vcxproj -
Scalable allocator library project file. Allocator sources are expected to be located in ../../src/tbbmalloc folder. -
tbbmalloc_proxy.vcxproj -
Standard allocator replacement project file. -
- -
-Up to parent directory -

-Copyright © 2005-2014 Intel Corporation. All Rights Reserved. -

-Intel is a registered trademark or trademark of Intel Corporation -or its subsidiaries in the United States and other countries. -

-* Other names and brands may be claimed as the property of others. - - diff --git a/src/tbb/build/vs2010/makefile.sln b/src/tbb/build/vs2010/makefile.sln deleted file mode 100644 index 1174098cc..000000000 --- a/src/tbb/build/vs2010/makefile.sln +++ /dev/null @@ -1,78 +0,0 @@ -Microsoft Visual Studio Solution File, Format Version 11.00 -# Visual Studio 2010 -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{8898CE0B-0BFB-45AE-AA71-83735ED2510D}" - ProjectSection(SolutionItems) = preProject - index.html = index.html - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "tbb", "tbb.vcxproj", "{F62787DD-1327-448B-9818-030062BCFAA5}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "tbbmalloc", "tbbmalloc.vcxproj", "{B15F131E-328A-4D42-ADC2-9FF4CA6306D8}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "tbbmalloc_proxy", "tbbmalloc_proxy.vcxproj", "{02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|Win32 = Debug|Win32 - Debug|x64 = Debug|x64 - Debug-MT|Win32 = Debug-MT|Win32 - Debug-MT|x64 = Debug-MT|x64 - Release|Win32 = Release|Win32 - Release|x64 = Release|x64 - Release-MT|Win32 = Release-MT|Win32 - Release-MT|x64 = Release-MT|x64 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {F62787DD-1327-448B-9818-030062BCFAA5}.Debug|Win32.ActiveCfg = Debug|Win32 - {F62787DD-1327-448B-9818-030062BCFAA5}.Debug|Win32.Build.0 = Debug|Win32 - {F62787DD-1327-448B-9818-030062BCFAA5}.Debug|x64.ActiveCfg = Debug|x64 - {F62787DD-1327-448B-9818-030062BCFAA5}.Debug|x64.Build.0 = Debug|x64 - {F62787DD-1327-448B-9818-030062BCFAA5}.Debug-MT|Win32.ActiveCfg = Debug-MT|Win32 - {F62787DD-1327-448B-9818-030062BCFAA5}.Debug-MT|Win32.Build.0 = Debug-MT|Win32 - {F62787DD-1327-448B-9818-030062BCFAA5}.Debug-MT|x64.ActiveCfg = Debug-MT|x64 - {F62787DD-1327-448B-9818-030062BCFAA5}.Debug-MT|x64.Build.0 = Debug-MT|x64 - {F62787DD-1327-448B-9818-030062BCFAA5}.Release|Win32.ActiveCfg = Release|Win32 - {F62787DD-1327-448B-9818-030062BCFAA5}.Release|Win32.Build.0 = Release|Win32 - {F62787DD-1327-448B-9818-030062BCFAA5}.Release|x64.ActiveCfg = Release|x64 - {F62787DD-1327-448B-9818-030062BCFAA5}.Release|x64.Build.0 = Release|x64 - {F62787DD-1327-448B-9818-030062BCFAA5}.Release-MT|Win32.ActiveCfg = Release-MT|Win32 - {F62787DD-1327-448B-9818-030062BCFAA5}.Release-MT|Win32.Build.0 = Release-MT|Win32 - {F62787DD-1327-448B-9818-030062BCFAA5}.Release-MT|x64.ActiveCfg = Release-MT|x64 - {F62787DD-1327-448B-9818-030062BCFAA5}.Release-MT|x64.Build.0 = Release-MT|x64 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug|Win32.ActiveCfg = Debug|Win32 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug|Win32.Build.0 = Debug|Win32 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug|x64.ActiveCfg = Debug|x64 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug|x64.Build.0 = Debug|x64 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug-MT|Win32.ActiveCfg = Debug-MT|Win32 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug-MT|Win32.Build.0 = Debug-MT|Win32 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug-MT|x64.ActiveCfg = Debug-MT|x64 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug-MT|x64.Build.0 = Debug-MT|x64 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release|Win32.ActiveCfg = Release|Win32 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release|Win32.Build.0 = Release|Win32 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release|x64.ActiveCfg = Release|x64 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release|x64.Build.0 = Release|x64 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release-MT|Win32.ActiveCfg = Release-MT|Win32 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release-MT|Win32.Build.0 = Release-MT|Win32 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release-MT|x64.ActiveCfg = Release-MT|x64 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release-MT|x64.Build.0 = Release-MT|x64 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug|Win32.ActiveCfg = Debug|Win32 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug|Win32.Build.0 = Debug|Win32 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug|x64.ActiveCfg = Debug|x64 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug|x64.Build.0 = Debug|x64 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug-MT|Win32.ActiveCfg = Debug-MT|Win32 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug-MT|Win32.Build.0 = Debug-MT|Win32 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug-MT|x64.ActiveCfg = Debug-MT|x64 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug-MT|x64.Build.0 = Debug-MT|x64 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release|Win32.ActiveCfg = Release|Win32 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release|Win32.Build.0 = Release|Win32 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release|x64.ActiveCfg = Release|x64 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release|x64.Build.0 = Release|x64 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release-MT|Win32.ActiveCfg = Release-MT|Win32 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release-MT|Win32.Build.0 = Release-MT|Win32 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release-MT|x64.ActiveCfg = Release-MT|x64 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release-MT|x64.Build.0 = Release-MT|x64 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection -EndGlobal diff --git a/src/tbb/build/vs2010/tbb.vcxproj b/src/tbb/build/vs2010/tbb.vcxproj deleted file mode 100644 index 4ba5a5911..000000000 --- a/src/tbb/build/vs2010/tbb.vcxproj +++ /dev/null @@ -1,712 +0,0 @@ - - - - - Debug-MT - Win32 - - - Debug-MT - x64 - - - Debug - Win32 - - - Debug - x64 - - - Release-MT - Win32 - - - Release-MT - x64 - - - Release - Win32 - - - Release - x64 - - - - {F62787DD-1327-448B-9818-030062BCFAA5} - tbb - Win32Proj - - - - DynamicLibrary - NotSet - true - - - DynamicLibrary - NotSet - - - DynamicLibrary - NotSet - true - - - DynamicLibrary - NotSet - - - DynamicLibrary - NotSet - true - - - DynamicLibrary - NotSet - - - DynamicLibrary - NotSet - true - - - DynamicLibrary - NotSet - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - <_ProjectFileVersion>10.0.40219.1 - $(SolutionDir)ia32\$(Configuration)\ - $(SolutionDir)ia32\$(Configuration)\ - false - $(SolutionDir)intel64\$(Configuration)\ - $(SolutionDir)intel64\$(Configuration)\ - false - $(SolutionDir)ia32\$(Configuration)\ - $(SolutionDir)ia32\$(Configuration)\ - false - $(SolutionDir)intel64\$(Configuration)\ - $(SolutionDir)intel64\$(Configuration)\ - false - $(SolutionDir)ia32\$(Configuration)\ - $(SolutionDir)ia32\$(Configuration)\ - false - $(SolutionDir)intel64\$(Configuration)\ - $(SolutionDir)intel64\$(Configuration)\ - false - $(SolutionDir)ia32\$(Configuration)\ - $(SolutionDir)ia32\$(Configuration)\ - false - $(SolutionDir)intel64\$(Configuration)\ - $(SolutionDir)intel64\$(Configuration)\ - false - AllRules.ruleset - - - AllRules.ruleset - - - AllRules.ruleset - - - AllRules.ruleset - - - AllRules.ruleset - - - AllRules.ruleset - - - AllRules.ruleset - - - AllRules.ruleset - - - $(ProjectName)_debug - $(ProjectName)_debug - $(ProjectName)_debug - $(ProjectName)_debug - - - - /c /MDd /Od /Ob0 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /D__TBB_LIB_NAME=tbb_debug.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /D__TBB_BUILD=1 /W4 /I../../src /I../../src/rml/include /I../../include - Disabled - .;%(AdditionalIncludeDirectories) - %(PreprocessorDefinitions) - true - EnableFastChecks - MultiThreadedDebugDLL - - - Level4 - ProgramDatabase - - - /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DEF:"$(IntDir)tbb.def" %(AdditionalOptions) - $(OutDir)tbb_debug.dll - true - Windows - false - - - MachineX86 - - - - - X64 - - - /c /MDd /Od /Ob0 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /D__TBB_LIB_NAME=tbb_debug.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /D__TBB_BUILD=1 /W4 /I../../src /I../../src/rml/include /I../../include - Disabled - .;%(AdditionalIncludeDirectories) - %(PreprocessorDefinitions) - true - EnableFastChecks - MultiThreadedDebugDLL - false - - - Level4 - ProgramDatabase - false - - - /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DEF:"$(IntDir)tbb.def" %(AdditionalOptions) - $(OutDir)tbb_debug.dll - true - Windows - false - - - MachineX64 - false - - - - - /c /MD /O2 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /D__TBB_LIB_NAME=tbb.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /D__TBB_BUILD=1 /W4 /I../../src /I../../src/rml/include /I../../include - .;%(AdditionalIncludeDirectories) - %(PreprocessorDefinitions) - MultiThreadedDLL - - - Level4 - ProgramDatabase - - - /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DEF:"$(IntDir)tbb.def" %(AdditionalOptions) - $(OutDir)tbb.dll - true - Windows - true - true - false - - - MachineX86 - - - - - X64 - - - /c /MD /O2 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /D__TBB_LIB_NAME=tbb.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /D__TBB_BUILD=1 /W4 /I../../src /I../../src/rml/include /I../../include - .;%(AdditionalIncludeDirectories) - %(PreprocessorDefinitions) - MultiThreadedDLL - false - - - Level4 - ProgramDatabase - - - /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DEF:"$(IntDir)tbb.def" %(AdditionalOptions) - $(OutDir)tbb.dll - true - Windows - true - true - false - - - MachineX64 - false - - - - - /c /MTd /Od /Ob0 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /D__TBB_LIB_NAME=tbb_debug.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /D__TBB_BUILD=1 /W4 /I../../src /I../../src/rml/include /I../../include - Disabled - .;%(AdditionalIncludeDirectories) - %(PreprocessorDefinitions) - true - EnableFastChecks - MultiThreadedDebug - - - Level4 - ProgramDatabase - - - /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DEF:"$(IntDir)tbb.def" %(AdditionalOptions) - $(OutDir)tbb_debug.dll - true - Windows - false - - - MachineX86 - - - - - X64 - - - /c /MTd /Od /Ob0 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /D__TBB_LIB_NAME=tbb_debug.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /D__TBB_BUILD=1 /W4 /I../../src /I../../src/rml/include /I../../include - Disabled - .;%(AdditionalIncludeDirectories) - %(PreprocessorDefinitions) - true - EnableFastChecks - MultiThreadedDebug - false - - - Level4 - ProgramDatabase - false - - - /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DEF:"$(IntDir)tbb.def" %(AdditionalOptions) - $(OutDir)tbb_debug.dll - true - Windows - false - - - MachineX64 - false - - - - - /c /MT /O2 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /D__TBB_LIB_NAME=tbb.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /D__TBB_BUILD=1 /W4 /I../../src /I../../src/rml/include /I../../include - .;%(AdditionalIncludeDirectories) - %(PreprocessorDefinitions) - MultiThreaded - - - Level4 - ProgramDatabase - - - /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DEF:"$(IntDir)tbb.def" %(AdditionalOptions) - $(OutDir)tbb.dll - true - Windows - true - true - false - - - MachineX86 - - - - - X64 - - - /c /MT /O2 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /D__TBB_LIB_NAME=tbb.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /D__TBB_BUILD=1 /W4 /I../../src /I../../src/rml/include /I../../include - .;%(AdditionalIncludeDirectories) - %(PreprocessorDefinitions) - MultiThreaded - false - - - Level4 - ProgramDatabase - - - /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DEF:"$(IntDir)tbb.def" %(AdditionalOptions) - $(OutDir)tbb.dll - true - Windows - true - true - false - - - MachineX64 - false - - - - - /coff /Zi - true - true - /coff /Zi - true - true - /coff - true - true - /coff - true - true - - - true - building atomic_support.obj - ml64 /Fo"intel64\Debug-MT\atomic_support.obj" /DUSE_FRAME_POINTER /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/atomic_support.asm - - intel64\Debug-MT\atomic_support.obj;%(Outputs) - true - building atomic_support.obj - ml64 /Fo"intel64\Debug\atomic_support.obj" /DUSE_FRAME_POINTER /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/atomic_support.asm - - intel64\Debug\atomic_support.obj;%(Outputs) - true - building atomic_support.obj - ml64 /Fo"intel64\Release-MT\atomic_support.obj" /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/atomic_support.asm - - intel64\Release-MT\atomic_support.obj;%(Outputs) - true - building atomic_support.obj - ml64 /Fo"intel64\Release\atomic_support.obj" /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/atomic_support.asm - - intel64\Release\atomic_support.obj;%(Outputs) - - - true - building intel64_misc.obj - ml64 /Fo"intel64\Debug-MT\intel64_misc.obj" /DUSE_FRAME_POINTER /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/intel64_misc.asm - - intel64\Debug-MT\intel64_misc.obj;%(Outputs) - true - building intel64_misc.obj - ml64 /Fo"intel64\Debug\intel64_misc.obj" /DUSE_FRAME_POINTER /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/intel64_misc.asm - - intel64\Debug\intel64_misc.obj;%(Outputs) - true - building intel64_misc.obj - ml64 /Fo"intel64\Release-MT\intel64_misc.obj" /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/intel64_misc.asm - - intel64\Release-MT\intel64_misc.obj;%(Outputs) - true - building intel64_misc.obj - ml64 /Fo"intel64\Release\intel64_misc.obj" /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/intel64_misc.asm - - intel64\Release\intel64_misc.obj;%(Outputs) - - - /coff /Zi - true - true - /coff /Zi - true - true - /coff - true - true - /coff - true - true - - - true - building itsx.obj - ml64 /Fo"intel64\Debug-MT\itsx.obj" /DUSE_FRAME_POINTER /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/itsx.asm - - intel64\Debug-MT\itsx.obj;%(Outputs) - true - building itsx.obj - ml64 /Fo"intel64\Debug\itsx.obj" /DUSE_FRAME_POINTER /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/itsx.asm - - intel64\Debug\itsx.obj;%(Outputs) - true - building itsx.obj - ml64 /Fo"intel64\Release-MT\itsx.obj" /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/itsx.asm - - intel64\Release-MT\itsx.obj;%(Outputs) - true - building itsx.obj - ml64 /Fo"intel64\Release\itsx.obj" /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/itsx.asm - - intel64\Release\itsx.obj;%(Outputs) - - - /coff /Zi - true - true - /coff /Zi - /coff /Zi - true - true - /coff /Zi - /coff - true - true - /coff - true - true - - - - - generating tbb.def file - cl /nologo /TC /EP ../../src/tbb/win32-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /I../../src /I../../include >"$(IntDir)tbb.def" - - $(IntDir)tbb.def;%(Outputs) - true - generating tbb.def file - cl /nologo /TC /EP ../../src/tbb/win32-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 >"$(IntDir)tbb.def" - - $(IntDir)tbb.def;%(Outputs) - generating tbb.def file - cl /nologo /TC /EP ../../src/tbb/win32-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /I../../src /I../../include >"$(IntDir)tbb.def" - - $(IntDir)tbb.def;%(Outputs) - true - generating tbb.def file - cl /nologo /TC /EP ../../src/tbb/win32-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 >"$(IntDir)tbb.def" - - $(IntDir)tbb.def;%(Outputs) - generating tbb.def file - cl /nologo /TC /EP ../../src/tbb/win32-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /I../../src /I../../include >"$(IntDir)tbb.def" - - $(IntDir)tbb.def;%(Outputs) - true - generating tbb.def file - cl /nologo /TC /EP ../../src/tbb/win32-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 >"$(IntDir)tbb.def" - - $(IntDir)tbb.def;%(Outputs) - generating tbb.def file - cl /nologo /TC /EP ../../src/tbb/win32-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /I../../src /I../../include >"$(IntDir)tbb.def" - - $(IntDir)tbb.def;%(Outputs) - true - generating tbb.def file - cl /nologo /TC /EP ../../src/tbb/win32-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 >"$(IntDir)tbb.def" - - $(IntDir)tbb.def;%(Outputs) - - - true - generating tbb.def file - cl /nologo /TC /EP ../../src/tbb/win64-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 >"$(IntDir)tbb.def" - - $(IntDir)tbb.def;%(Outputs) - generating tbb.def file - cl /nologo /TC /EP ../../src/tbb/win64-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /I../../src /I../../include >"$(IntDir)tbb.def" - - $(IntDir)tbb.def;%(Outputs) - true - generating tbb.def file - cl /nologo /TC /EP ../../src/tbb/win64-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 >"$(IntDir)tbb.def" - - $(IntDir)tbb.def;%(Outputs) - generating tbb.def file - cl /nologo /TC /EP ../../src/tbb/win64-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /I../../src /I../../include >"$(IntDir)tbb.def" - - $(IntDir)tbb.def;%(Outputs) - true - generating tbb.def file - cl /nologo /TC /EP ../../src/tbb/win64-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 >"$(IntDir)tbb.def" - - $(IntDir)tbb.def;%(Outputs) - generating tbb.def file - cl /nologo /TC /EP ../../src/tbb/win64-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /I../../src /I../../include >"$(IntDir)tbb.def" - - $(IntDir)tbb.def;%(Outputs) - true - generating tbb.def file - cl /nologo /TC /EP ../../src/tbb/win64-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 >"$(IntDir)tbb.def" - - $(IntDir)tbb.def;%(Outputs) - generating tbb.def file - cl /nologo /TC /EP ../../src/tbb/win64-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /I../../src /I../../include >"$(IntDir)tbb.def" - - $(IntDir)tbb.def;%(Outputs) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - - - - - - - diff --git a/src/tbb/build/vs2010/tbbmalloc.vcxproj b/src/tbb/build/vs2010/tbbmalloc.vcxproj deleted file mode 100644 index ad9a793d3..000000000 --- a/src/tbb/build/vs2010/tbbmalloc.vcxproj +++ /dev/null @@ -1,584 +0,0 @@ - - - - - Debug-MT - Win32 - - - Debug-MT - x64 - - - Debug - Win32 - - - Debug - x64 - - - Release-MT - Win32 - - - Release-MT - x64 - - - Release - Win32 - - - Release - x64 - - - - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8} - tbbmalloc - Win32Proj - - - - DynamicLibrary - NotSet - true - - - DynamicLibrary - NotSet - - - DynamicLibrary - NotSet - true - - - DynamicLibrary - NotSet - - - DynamicLibrary - NotSet - true - - - DynamicLibrary - NotSet - - - DynamicLibrary - NotSet - true - - - DynamicLibrary - NotSet - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - <_ProjectFileVersion>10.0.40219.1 - $(SolutionDir)ia32\$(Configuration)\ - $(SolutionDir)ia32\$(Configuration)\ - false - $(SolutionDir)intel64\$(Configuration)\ - $(SolutionDir)intel64\$(Configuration)\ - false - $(SolutionDir)ia32\$(Configuration)\ - $(SolutionDir)ia32\$(Configuration)\ - false - $(SolutionDir)intel64\$(Configuration)\ - $(SolutionDir)intel64\$(Configuration)\ - false - $(SolutionDir)ia32\$(Configuration)\ - $(SolutionDir)ia32\$(Configuration)\ - false - $(SolutionDir)intel64\$(Configuration)\ - $(SolutionDir)intel64\$(Configuration)\ - false - $(SolutionDir)ia32\$(Configuration)\ - $(SolutionDir)ia32\$(Configuration)\ - false - $(SolutionDir)intel64\$(Configuration)\ - $(SolutionDir)intel64\$(Configuration)\ - false - AllRules.ruleset - - - AllRules.ruleset - - - AllRules.ruleset - - - AllRules.ruleset - - - AllRules.ruleset - - - AllRules.ruleset - - - AllRules.ruleset - - - AllRules.ruleset - - - $(ProjectName)_debug - $(ProjectName)_debug - $(ProjectName)_debug - $(ProjectName)_debug - - - - /c /MDd /Od /Ob0 /Zi /EHs- /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /D__TBB_LIB_NAME=tbb_debug.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /D__TBBMALLOC_BUILD=1 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc /I. - Disabled - .;%(AdditionalIncludeDirectories) - %(PreprocessorDefinitions) - true - - - Default - MultiThreadedDebugDLL - - - Level4 - ProgramDatabase - - - /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DEF:"$(IntDir)tbbmalloc.def" %(AdditionalOptions) - $(OutDir)tbbmalloc_debug.dll - true - Windows - false - - - MachineX86 - - - - - X64 - - - /c /MDd /Od /Ob0 /Zi /EHs- /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /D__TBB_LIB_NAME=tbb_debug.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /D__TBBMALLOC_BUILD=1 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc /I. - Disabled - .;%(AdditionalIncludeDirectories) - false - - - Default - MultiThreadedDebugDLL - false - true - - - Level4 - ProgramDatabase - false - - - /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DEF:"$(IntDir)tbbmalloc.def" %(AdditionalOptions) - $(OutDir)tbbmalloc_debug.dll - true - Windows - false - - - MachineX64 - - - - - /c /MD /O2 /Zi /EHs- /Zc:forScope /Zc:wchar_t /D__TBB_LIB_NAME=tbb.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /D__TBBMALLOC_BUILD=1 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc /I. - .;%(AdditionalIncludeDirectories) - %(PreprocessorDefinitions) - - - MultiThreadedDLL - - - Level4 - ProgramDatabase - - - /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DEF:"$(IntDir)tbbmalloc.def" %(AdditionalOptions) - $(OutDir)tbbmalloc.dll - true - Windows - true - true - false - - - MachineX86 - - - - - X64 - - - /c /MD /O2 /Zi /EHs- /Zc:forScope /Zc:wchar_t /D__TBB_LIB_NAME=tbb.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /D__TBBMALLOC_BUILD=1 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc /I. - .;%(AdditionalIncludeDirectories) - %(PreprocessorDefinitions) - - - MultiThreadedDLL - false - - - Level4 - ProgramDatabase - - - /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DEF:"$(IntDir)tbbmalloc.def" %(AdditionalOptions) - $(OutDir)tbbmalloc.dll - true - Windows - true - true - false - - - MachineX64 - - - - - /c /MTd /Od /Ob0 /Zi /EHs- /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /D__TBB_LIB_NAME=tbb_debug.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /D__TBBMALLOC_BUILD=1 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc /I. - Disabled - .;%(AdditionalIncludeDirectories) - %(PreprocessorDefinitions) - true - - - Default - MultiThreadedDebug - - - Level4 - ProgramDatabase - - - /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DEF:"$(IntDir)tbbmalloc.def" %(AdditionalOptions) - $(OutDir)tbbmalloc_debug.dll - true - Windows - false - - - MachineX86 - - - - - X64 - - - /c /MTd /Od /Ob0 /Zi /EHs- /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /D__TBB_LIB_NAME=tbb_debug.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /D__TBBMALLOC_BUILD=1 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc /I. - Disabled - .;%(AdditionalIncludeDirectories) - false - - - Default - MultiThreadedDebug - false - true - - - Level4 - ProgramDatabase - false - - - /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DEF:"$(IntDir)tbbmalloc.def" %(AdditionalOptions) - $(OutDir)tbbmalloc_debug.dll - true - Windows - false - - - MachineX64 - - - - - /c /MT /O2 /Zi /EHs- /Zc:forScope /Zc:wchar_t /D__TBB_LIB_NAME=tbb.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /D__TBBMALLOC_BUILD=1 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc /I. - .;%(AdditionalIncludeDirectories) - %(PreprocessorDefinitions) - - - MultiThreaded - - - Level4 - ProgramDatabase - - - /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DEF:"$(IntDir)tbbmalloc.def" %(AdditionalOptions) - $(OutDir)tbbmalloc.dll - true - Windows - true - true - false - - - MachineX86 - - - - - X64 - - - /c /MT /O2 /Zi /EHs- /Zc:forScope /Zc:wchar_t /D__TBB_LIB_NAME=tbb.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /D__TBBMALLOC_BUILD=1 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc /I. - .;%(AdditionalIncludeDirectories) - %(PreprocessorDefinitions) - - - MultiThreaded - false - - - Level4 - ProgramDatabase - - - /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DEF:"$(IntDir)tbbmalloc.def" %(AdditionalOptions) - $(OutDir)tbbmalloc.dll - true - Windows - true - true - false - - - MachineX64 - - - - - true - building atomic_support.obj - ml64 /Fo"intel64\Debug-MT\atomic_support.obj" /DUSE_FRAME_POINTER /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/atomic_support.asm - - intel64\Debug-MT\atomic_support.obj;%(Outputs) - true - building atomic_support.obj - ml64 /Fo"intel64\Debug\atomic_support.obj" /DUSE_FRAME_POINTER /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/atomic_support.asm - - intel64\Debug\atomic_support.obj;%(Outputs) - true - building atomic_support.obj - ml64 /Fo"intel64\Release-MT\atomic_support.obj" /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/atomic_support.asm - - intel64\Release-MT\atomic_support.obj;%(Outputs) - true - building atomic_support.obj - ml64 /Fo"intel64\Release\atomic_support.obj" /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/atomic_support.asm - - intel64\Release\atomic_support.obj;%(Outputs) - - - - - generating tbbmalloc.def file - cl /nologo /TC /EP ../../src/tbbmalloc/win32-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBBMALLOC_BUILD=1 >"$(IntDir)tbbmalloc.def" - - $(IntDir)tbbmalloc.def;%(Outputs) - true - generating tbbmalloc.def file - cl /nologo /TC /EP ../../src/tbbmalloc/win32-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBBMALLOC_BUILD=1 >"$(IntDir)tbbmalloc.def" - - $(IntDir)tbbmalloc.def;%(Outputs) - generating tbbmalloc.def file - cl /nologo /TC /EP ../../src/tbbmalloc/win32-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBBMALLOC_BUILD=1 >"$(IntDir)tbbmalloc.def" - - $(IntDir)tbbmalloc.def;%(Outputs) - true - generating tbbmalloc.def file - cl /nologo /TC /EP ../../src/tbbmalloc/win32-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBBMALLOC_BUILD=1 >"$(IntDir)tbbmalloc.def" - - $(IntDir)tbbmalloc.def;%(Outputs) - generating tbbmalloc.def file - cl /nologo /TC /EP ../../src/tbbmalloc/win32-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBBMALLOC_BUILD=1 >"$(IntDir)tbbmalloc.def" - - $(IntDir)tbbmalloc.def;%(Outputs) - true - generating tbbmalloc.def file - cl /nologo /TC /EP ../../src/tbb/win32-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBBMALLOC_BUILD=1 >"$(IntDir)tbbmalloc.def" - - $(IntDir)tbbmalloc.def;%(Outputs) - generating tbbmalloc.def file - cl /nologo /TC /EP ../../src/tbbmalloc/win32-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBBMALLOC_BUILD=1 >"$(IntDir)tbbmalloc.def" - - $(IntDir)tbbmalloc.def;%(Outputs) - true - generating tbbmalloc.def file - cl /nologo /TC /EP ../../src/tbb/win32-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBBMALLOC_BUILD=1 >"$(IntDir)tbbmalloc.def" - - $(IntDir)tbbmalloc.def;%(Outputs) - - - true - generating tbbmalloc.def file - cl /nologo /TC /EP ../../src/tbb/win64-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBBMALLOC_BUILD=1 >"$(IntDir)tbbmalloc.def" - - $(IntDir)tbbmalloc.def;%(Outputs) - generating tbbmalloc.def file - cl /nologo /TC /EP ../../src/tbbmalloc/win64-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBBMALLOC_BUILD=1 >"$(IntDir)tbbmalloc.def" - - $(IntDir)tbbmalloc.def;%(Outputs) - true - generating tbbmalloc.def file - cl /nologo /TC /EP ../../src/tbb/win64-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBBMALLOC_BUILD=1 >"$(IntDir)tbbmalloc.def" - - $(IntDir)tbbmalloc.def;%(Outputs) - generating tbbmalloc.def file - cl /nologo /TC /EP ../../src/tbbmalloc/win64-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBBMALLOC_BUILD=1 >"$(IntDir)tbbmalloc.def" - - $(IntDir)tbbmalloc.def;%(Outputs) - true - generating tbbmalloc.def file - cl /nologo /TC /EP ../../src/tbb/win64-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBBMALLOC_BUILD=1 >"$(IntDir)tbbmalloc.def" - - $(IntDir)tbbmalloc.def;%(Outputs) - generating tbbmalloc.def file - cl /nologo /TC /EP ../../src/tbbmalloc/win64-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBBMALLOC_BUILD=1 >"$(IntDir)tbbmalloc.def" - - $(IntDir)tbbmalloc.def;%(Outputs) - true - generating tbbmalloc.def file - cl /nologo /TC /EP ../../src/tbb/win64-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBBMALLOC_BUILD=1 >"$(IntDir)tbbmalloc.def" - - $(IntDir)tbbmalloc.def;%(Outputs) - generating tbbmalloc.def file - cl /nologo /TC /EP ../../src/tbbmalloc/win64-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBBMALLOC_BUILD=1 >"$(IntDir)tbbmalloc.def" - - $(IntDir)tbbmalloc.def;%(Outputs) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - - - - - {f62787dd-1327-448b-9818-030062bcfaa5} - false - - - - - - - diff --git a/src/tbb/build/vs2010/tbbmalloc_proxy.vcxproj b/src/tbb/build/vs2010/tbbmalloc_proxy.vcxproj deleted file mode 100644 index 6462df2cf..000000000 --- a/src/tbb/build/vs2010/tbbmalloc_proxy.vcxproj +++ /dev/null @@ -1,421 +0,0 @@ - - - - - Debug-MT - Win32 - - - Debug-MT - x64 - - - Debug - Win32 - - - Debug - x64 - - - Release-MT - Win32 - - - Release-MT - x64 - - - Release - Win32 - - - Release - x64 - - - - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7} - tbbmalloc_proxy - Win32Proj - - - - DynamicLibrary - NotSet - true - - - DynamicLibrary - NotSet - - - DynamicLibrary - NotSet - true - - - DynamicLibrary - NotSet - - - DynamicLibrary - NotSet - true - - - DynamicLibrary - NotSet - - - DynamicLibrary - NotSet - true - - - DynamicLibrary - NotSet - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - <_ProjectFileVersion>10.0.40219.1 - $(SolutionDir)ia32\$(Configuration)\ - $(SolutionDir)ia32\$(Configuration)\ - false - $(SolutionDir)intel64\$(Configuration)\ - $(SolutionDir)intel64\$(Configuration)\ - false - $(SolutionDir)ia32\$(Configuration)\ - $(SolutionDir)ia32\$(Configuration)\ - false - $(SolutionDir)intel64\$(Configuration)\ - $(SolutionDir)intel64\$(Configuration)\ - false - $(SolutionDir)ia32\$(Configuration)\ - $(SolutionDir)ia32\$(Configuration)\ - false - $(SolutionDir)intel64\$(Configuration)\ - $(SolutionDir)intel64\$(Configuration)\ - false - $(SolutionDir)ia32\$(Configuration)\ - $(SolutionDir)ia32\$(Configuration)\ - false - $(SolutionDir)intel64\$(Configuration)\ - $(SolutionDir)intel64\$(Configuration)\ - false - AllRules.ruleset - - - AllRules.ruleset - - - AllRules.ruleset - - - AllRules.ruleset - - - AllRules.ruleset - - - AllRules.ruleset - - - AllRules.ruleset - - - AllRules.ruleset - - - $(ProjectName)_debug - $(ProjectName)_debug - $(ProjectName)_debug - $(ProjectName)_debug - - - - /c /MDd /Od /Ob0 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /D__TBB_LIB_NAME=tbb_debug.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /W4 /D__TBBMALLOC_BUILD=1 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc - Disabled - .;%(AdditionalIncludeDirectories) - %(PreprocessorDefinitions) - true - Sync - Default - MultiThreadedDebugDLL - - - Level4 - ProgramDatabase - - - /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO %(AdditionalOptions) - $(OutDir)tbbmalloc_proxy_debug.dll - true - Windows - false - - - MachineX86 - - - - - X64 - - - /c /MDd /Od /Ob0 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /D__TBB_LIB_NAME=tbb_debug.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /W4 /D__TBBMALLOC_BUILD=1 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc - Disabled - .;%(AdditionalIncludeDirectories) - false - - - Default - MultiThreadedDebugDLL - false - true - - - Level4 - ProgramDatabase - false - - - /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO %(AdditionalOptions) - $(OutDir)tbbmalloc_proxy_debug.dll - true - Windows - false - - - MachineX64 - - - - - /c /MD /O2 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /D__TBB_LIB_NAME=tbb.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /W4 /D__TBBMALLOC_BUILD=1 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc - .;%(AdditionalIncludeDirectories) - %(PreprocessorDefinitions) - - - MultiThreadedDLL - - - Level4 - ProgramDatabase - - - /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO %(AdditionalOptions) - $(OutDir)tbbmalloc_proxy.dll - true - Windows - true - true - false - - - MachineX86 - - - - - X64 - - - /c /MD /O2 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /D__TBB_LIB_NAME=tbb.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /W4 /D__TBBMALLOC_BUILD=1 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc - .;%(AdditionalIncludeDirectories) - %(PreprocessorDefinitions) - - - MultiThreadedDLL - false - - - Level4 - ProgramDatabase - - - /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO %(AdditionalOptions) - $(OutDir)tbbmalloc_proxy.dll - true - Windows - true - true - false - - - MachineX64 - - - - - /c /MTd /Od /Ob0 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /D__TBB_LIB_NAME=tbb_debug.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /W4 /D__TBBMALLOC_BUILD=1 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc - Disabled - .;%(AdditionalIncludeDirectories) - %(PreprocessorDefinitions) - true - Sync - Default - MultiThreadedDebug - - - Level4 - ProgramDatabase - - - /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO %(AdditionalOptions) - $(OutDir)tbbmalloc_proxy_debug.dll - true - Windows - false - - - MachineX86 - - - - - X64 - - - /c /MTd /Od /Ob0 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /D__TBB_LIB_NAME=tbb_debug.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /W4 /D__TBBMALLOC_BUILD=1 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc - Disabled - .;%(AdditionalIncludeDirectories) - false - - - Default - MultiThreadedDebug - false - true - - - Level4 - ProgramDatabase - false - - - /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO %(AdditionalOptions) - $(OutDir)tbbmalloc_proxy_debug.dll - true - Windows - false - - - MachineX64 - - - - - /c /MT /O2 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /D__TBB_LIB_NAME=tbb.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /W4 /D__TBBMALLOC_BUILD=1 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc - .;%(AdditionalIncludeDirectories) - %(PreprocessorDefinitions) - - - MultiThreaded - - - Level4 - ProgramDatabase - - - /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO %(AdditionalOptions) - $(OutDir)tbbmalloc_proxy.dll - true - Windows - true - true - false - - - MachineX86 - - - - - X64 - - - /c /MT /O2 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /D__TBB_LIB_NAME=tbb.lib /DDO_ITT_NOTIFY /GS /volatile:iso /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0502 /W4 /D__TBBMALLOC_BUILD=1 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc - .;%(AdditionalIncludeDirectories) - %(PreprocessorDefinitions) - - - MultiThreaded - false - - - Level4 - ProgramDatabase - - - /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO %(AdditionalOptions) - $(OutDir)tbbmalloc_proxy.dll - true - Windows - true - true - false - - - MachineX64 - - - - - - - - - - - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - /I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 %(AdditionalOptions) - - - - - - - {b15f131e-328a-4d42-adc2-9ff4ca6306d8} - false - - - - - - - diff --git a/src/tbb/build/vs2010/version_string.ver b/src/tbb/build/vs2010/version_string.ver deleted file mode 100644 index 5d8f04e5d..000000000 --- a/src/tbb/build/vs2010/version_string.ver +++ /dev/null @@ -1 +0,0 @@ -#define __TBB_VERSION_STRINGS(N) "Empty" diff --git a/src/tbb/build/windows.cl.inc b/src/tbb/build/windows.cl.inc deleted file mode 100644 index 312871a22..000000000 --- a/src/tbb/build/windows.cl.inc +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -#------------------------------------------------------------------------------ -# Define compiler-specific variables. -#------------------------------------------------------------------------------ - - -#------------------------------------------------------------------------------ -# Setting compiler flags. -#------------------------------------------------------------------------------ -CPLUS = cl /nologo -LINK_FLAGS = /link /nologo -LIB_LINK_FLAGS=/link /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DYNAMICBASE /NXCOMPAT - -ifeq ($(arch), ia32) - LIB_LINK_FLAGS += /SAFESEH -endif - -ifeq ($(runtime), vc_mt) - MS_CRT_KEY = /MT$(if $(findstring debug,$(cfg)),d) -else - MS_CRT_KEY = /MD$(if $(findstring debug,$(cfg)),d) -endif -EH_FLAGS = /EHsc /GR - -ifeq ($(cfg), release) - CPLUS_FLAGS = $(MS_CRT_KEY) /O2 /Zi $(EH_FLAGS) /Zc:forScope /Zc:wchar_t /D__TBB_LIB_NAME=$(TBB.LIB) - ASM_FLAGS = -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = $(MS_CRT_KEY) /Od /Ob0 /Zi $(EH_FLAGS) /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /D__TBB_LIB_NAME=$(TBB.LIB) - ASM_FLAGS = /DUSE_FRAME_POINTER -endif - -ifeq ($(target_ui), win8ui) - CPLUS_FLAGS += /D "_UNICODE" /D "UNICODE" /D "WINAPI_FAMILY=WINAPI_FAMILY_APP" - _WIN32_WINNT=0x0602 -ifeq ($(target_ui_mode), production) - LIB_LINK_FLAGS += /APPCONTAINER -endif -else - CPLUS_FLAGS += /DDO_ITT_NOTIFY -endif - -CPLUS_FLAGS += /GS - -COMPILE_ONLY = /c -PREPROC_ONLY = /TP /EP -INCLUDE_KEY = /I -DEFINE_KEY = /D -OUTPUT_KEY = /Fe -OUTPUTOBJ_KEY = /Fo -WARNING_AS_ERROR_KEY = /WX - -ifeq ($(runtime),vc7.1) - WARNING_KEY = /W3 -else - WARNING_KEY = /W4 - OPENMP_FLAG = /openmp -endif - -DYLIB_KEY = /DLL -EXPORT_KEY = /DEF: -NODEFAULTLIB_KEY = /Zl -NOINTRINSIC_KEY = /Oi- - -ifeq ($(runtime),vc8) - WARNING_KEY += /Wp64 - CPLUS_FLAGS += /D_USE_RTM_VERSION -endif - -# Since VS2012, VC++ provides /volatile option to control semantics of volatile variables. -# We want to use strict ISO semantics in the library and tests -ifeq (ok,$(call detect_js,/minversion cl 17)) - CPLUS_FLAGS += /volatile:iso -endif - -CPLUS_FLAGS += /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE \ - /D_WIN32_WINNT=$(_WIN32_WINNT) -C_FLAGS = $(CPLUS_FLAGS) - -#------------------------------------------------------------------------------ -# End of setting compiler flags. -#------------------------------------------------------------------------------ - - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ -ASSEMBLY_SOURCE=$(arch)-masm -ifeq (intel64,$(arch)) - ASM=ml64 /nologo - ASM_FLAGS += /DEM64T=1 /c /Zi - TBB_ASM.OBJ = atomic_support.obj intel64_misc.obj itsx.obj - MALLOC_ASM.OBJ = atomic_support.obj -else -ifeq (armv7,$(arch)) - ASM= - TBB_ASM.OBJ= -else - ASM=ml /nologo - ASM_FLAGS += /c /coff /Zi /safeseh - TBB_ASM.OBJ = atomic_support.obj lock_byte.obj itsx.obj -endif -endif -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ -ifneq ($(target_ui), win8ui) -M_CPLUS_FLAGS = $(subst $(EH_FLAGS),/EHs-,$(CPLUS_FLAGS)) -else -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -endif -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# End of define compiler-specific variables. -#------------------------------------------------------------------------------ diff --git a/src/tbb/build/windows.gcc.inc b/src/tbb/build/windows.gcc.inc deleted file mode 100644 index 00f3375fb..000000000 --- a/src/tbb/build/windows.gcc.inc +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -#------------------------------------------------------------------------------ -# Overriding settings from windows.inc -#------------------------------------------------------------------------------ - -SLASH= $(strip \) -OBJ = o -LIBEXT = dll # MinGW allows linking with DLLs directly - -TBB.RES = -MALLOC.RES = -RML.RES = -TBB.MANIFEST = -MALLOC.MANIFEST = -RML.MANIFEST = - -ifeq (ia32,$(arch)) - TBB.LST = $(tbb_root)/src/tbb/lin32-tbb-export.lst -else - TBB.LST = $(tbb_root)/src/tbb/win64-gcc-tbb-export.lst -endif -MALLOC.DEF = $(MALLOC_ROOT)/$(def_prefix)-gcc-tbbmalloc-export.def -RML.DEF = $(RML_SERVER_ROOT)/lin-rml-export.def - -LINK_TBB.LIB = $(TBB.LIB) -# no TBB proxy for the configuration -PROXY.LIB = - -#------------------------------------------------------------------------------ -# End of overridden settings -#------------------------------------------------------------------------------ -# Compiler-specific variables -#------------------------------------------------------------------------------ - -CPLUS = g++ -COMPILE_ONLY = -c -MMD -PREPROC_ONLY = -E -x c++ -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -WARNING_AS_ERROR_KEY = -Werror -WARNING_KEY = -Wall -TEST_WARNING_KEY = -Wextra -Wshadow -Wcast-qual -Woverloaded-virtual -Wnon-virtual-dtor -Wno-uninitialized -WARNING_SUPPRESS = -Wno-parentheses -Wno-uninitialized -DYLIB_KEY = -shared -LIBDL = -EXPORT_KEY = -Wl,--version-script, -LIBS = -lpsapi - -#------------------------------------------------------------------------------ -# End of compiler-specific variables -#------------------------------------------------------------------------------ -# Command lines -#------------------------------------------------------------------------------ - -LINK_FLAGS = -Wl,--enable-auto-import -LIB_LINK_FLAGS = $(DYLIB_KEY) -# gcc 4.4 and higher support -std=c++0x -ifeq (ok,$(call detect_js,/minversion gcc 4.4)) - CPP11_FLAGS = -std=c++0x -D_TBB_CPP0X -endif - -# gcc 4.8 and later support RTM intrinsics, but require command line switch to enable them -ifeq (ok,$(call detect_js,/minversion gcc 4.8)) - RTM_KEY = -mrtm -endif - -ifeq ($(cfg), release) - CPLUS_FLAGS = -g -O2 -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = -g -O0 -DTBB_USE_DEBUG -endif - -ifneq (00,$(lambdas)$(cpp0x)) - CXX_ONLY_FLAGS += $(CPP11_FLAGS) -endif - -CPLUS_FLAGS += -DUSE_WINTHREAD -CPLUS_FLAGS += -D_WIN32_WINNT=$(_WIN32_WINNT) - -# MinGW specific -CPLUS_FLAGS += -DMINGW_HAS_SECURE_API=1 -D__MSVCRT_VERSION__=0x0700 -msse -mthreads - -CONLY = gcc -debugger = gdb -C_FLAGS = $(CPLUS_FLAGS) - -ifeq (intel64,$(arch)) - CPLUS_FLAGS += -m64 $(RTM_KEY) - LIB_LINK_FLAGS += -m64 -endif - -ifeq (ia32,$(arch)) - CPLUS_FLAGS += -m32 -march=i686 $(RTM_KEY) - LIB_LINK_FLAGS += -m32 -endif - -# For examples -export UNIXMODE = 1 - -#------------------------------------------------------------------------------ -# End of command lines -#------------------------------------------------------------------------------ -# Setting assembler data -#------------------------------------------------------------------------------ - -ASM= -ASM_FLAGS= -TBB_ASM.OBJ= -ASSEMBLY_SOURCE=$(arch)-gas - -#------------------------------------------------------------------------------ -# End of setting assembler data -#------------------------------------------------------------------------------ -# Setting tbbmalloc data -#------------------------------------------------------------------------------ - -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions - -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data -#------------------------------------------------------------------------------ diff --git a/src/tbb/build/windows.icl.inc b/src/tbb/build/windows.icl.inc deleted file mode 100644 index dd7690378..000000000 --- a/src/tbb/build/windows.icl.inc +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -#------------------------------------------------------------------------------ -# Define compiler-specific variables. -#------------------------------------------------------------------------------ - - -#------------------------------------------------------------------------------ -# Setting default configuration to release. -#------------------------------------------------------------------------------ -cfg ?= release -#------------------------------------------------------------------------------ -# End of setting default configuration to release. -#------------------------------------------------------------------------------ - - -#------------------------------------------------------------------------------ -# Setting compiler flags. -#------------------------------------------------------------------------------ -CPLUS = icl /nologo $(VCCOMPAT_FLAG) -LINK_FLAGS = /link /nologo -LIB_LINK_FLAGS= /link /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DYNAMICBASE /NXCOMPAT - -ifeq ($(arch), ia32) - LIB_LINK_FLAGS += /SAFESEH -endif - - -# ICC 11.0 and higher support -std=c++0x -ifeq (ok,$(call detect_js,/minversion icl 11)) - CPP11_FLAGS = /Qstd=c++0x /D_TBB_CPP0X -endif - -# ICC 12.0 and higher provide Intel(R) Cilk(TM) Plus -ifeq (ok,$(call detect_js,/minversion icl 12)) - CILK_AVAILABLE = yes -endif - -ifeq ($(runtime), vc_mt) - MS_CRT_KEY = /MT$(if $(findstring debug,$(cfg)),d) -else - MS_CRT_KEY = /MD$(if $(findstring debug,$(cfg)),d) -endif -EH_FLAGS = /EHsc /GR - -ifeq ($(cfg), release) - CPLUS_FLAGS = $(MS_CRT_KEY) /O2 /Zi $(EH_FLAGS) /Zc:forScope /Zc:wchar_t /D__TBB_LIB_NAME=$(TBB.LIB) - ASM_FLAGS = -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = $(MS_CRT_KEY) /Od /Ob0 /Zi $(EH_FLAGS) /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /D__TBB_LIB_NAME=$(TBB.LIB) - ASM_FLAGS = /DUSE_FRAME_POINTER -endif -CPLUS_FLAGS += /GS - -COMPILE_ONLY = /c /QMMD -# PREPROC_ONLY should really use /TP which applies to all files in the command line. -# But with /TP, ICL does not preprocess *.def files. -PREPROC_ONLY = /EP /Tp -INCLUDE_KEY = /I -DEFINE_KEY = /D -OUTPUT_KEY = /Fe -OUTPUTOBJ_KEY = /Fo -WARNING_AS_ERROR_KEY = /WX -WARNING_KEY = /W3 -DYLIB_KEY = /DLL -EXPORT_KEY = /DEF: -NODEFAULTLIB_KEY = /Zl -NOINTRINSIC_KEY = /Oi- - - -ifneq (,$(codecov)) - CPLUS_FLAGS += /Qprof-genx -else - CPLUS_FLAGS += /DDO_ITT_NOTIFY -endif - -OPENMP_FLAG = /Qopenmp -CPLUS_FLAGS += /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE \ - /D_WIN32_WINNT=$(_WIN32_WINNT) - -ifeq ($(runtime),vc8) - CPLUS_FLAGS += /D_USE_RTM_VERSION -endif - - -C_FLAGS = $(CPLUS_FLAGS) - -ifneq (00,$(lambdas)$(cpp0x)) - CXX_ONLY_FLAGS += $(CPP11_FLAGS) -endif - -VCVERSION:=$(runtime) -VCCOMPAT_FLAG ?= $(if $(findstring vc7.1, $(VCVERSION)),/Qvc7.1) -ifeq ($(VCCOMPAT_FLAG),) - VCCOMPAT_FLAG := $(if $(findstring vc8, $(VCVERSION)),/Qvc8) -endif -ifeq ($(VCCOMPAT_FLAG),) - VCCOMPAT_FLAG := $(if $(findstring vc_mt, $(VCVERSION)),/Qvc10) -endif -ifeq ($(VCCOMPAT_FLAG),) - VCCOMPAT_FLAG := $(if $(findstring vc9, $(VCVERSION)),/Qvc9) -endif -ifeq ($(VCCOMPAT_FLAG),) - VCCOMPAT_FLAG := $(if $(findstring vc10, $(VCVERSION)),/Qvc10) -endif -ifeq ($(VCCOMPAT_FLAG),) - VCCOMPAT_FLAG := $(if $(findstring vc11, $(VCVERSION)),/Qvc11) -endif -ifeq ($(VCCOMPAT_FLAG),) - VCCOMPAT_FLAG := $(if $(findstring vc12, $(VCVERSION)),/Qvc12) -endif -ifeq ($(VCCOMPAT_FLAG),) - $(error VC version not detected correctly: $(VCVERSION) ) -endif -export VCCOMPAT_FLAG - -#------------------------------------------------------------------------------ -# End of setting compiler flags. -#------------------------------------------------------------------------------ - - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ -ASSEMBLY_SOURCE=$(arch)-masm -ifeq (intel64,$(arch)) - ASM=ml64 /nologo - ASM_FLAGS += /DEM64T=1 /c /Zi - TBB_ASM.OBJ = atomic_support.obj intel64_misc.obj itsx.obj - MALLOC_ASM.OBJ = atomic_support.obj -else - ASM=ml /nologo - ASM_FLAGS += /c /coff /Zi /safeseh - TBB_ASM.OBJ = atomic_support.obj lock_byte.obj itsx.obj -endif -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ -M_CPLUS_FLAGS = $(subst $(EH_FLAGS),/EHs-,$(CPLUS_FLAGS)) -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# End of define compiler-specific variables. -#------------------------------------------------------------------------------ diff --git a/src/tbb/build/windows.inc b/src/tbb/build/windows.inc deleted file mode 100644 index 3b8b70914..000000000 --- a/src/tbb/build/windows.inc +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -ifdef tbb_build_dir - test_dir:=$(tbb_build_dir) -else - test_dir:=. -endif - -ifndef rtools - export SHELL = cmd - CMD:=cmd /C - SLASH=\\ - RM=cmd /C del /Q /F - RD=cmd /C rmdir - MD=cmd /c mkdir -else - CMD:=cmd /C - export SHELL = sh.exe - SLASH=/ - RD=rmdir - MD=mkdir - RM=rm -endif - -# A convenience wrapper for calls to detect.js. -# $(1) is the full command line for the script, e.g. /minversion icl 12 -detect_js = $(shell cmd /C "cscript /nologo /E:jscript $(tbb_root)/build/detect.js $(1)") - -# TODO give an error if archs doesn't match -ifndef arch - export arch:=$(call detect_js, /arch $(compiler)) -endif - -ifndef runtime - export runtime:=$(call detect_js, /runtime $(compiler)) -endif - -native_compiler := cl -export compiler ?= cl -debugger ?= devenv /debugexe - -CMD=cmd /C -CWD=$(shell cmd /C echo %CD%) -NUL = nul - -AR=lib -AR_OUTPUT_KEY=/out: -AR_FLAGS=/nologo /nodefaultlib - -OBJ = obj -DLL = dll -LIBEXT = lib -ASMEXT = asm - -def_prefix = $(if $(findstring intel64,$(arch)),win64,win32) - -# Target Windows version. Do not increase beyond 0x0502 without prior discussion! -# Used as the value for macro definition opiton in windows.cl.inc etc. -# For tests, we need at least Windows XP SP2 for sake of enabling stack backtraces. -_WIN32_WINNT=0x0502 - -TBB.LST = $(tbb_root)/src/tbb/$(def_prefix)-tbb-export.lst -TBB.DEF = $(TBB.LST:.lst=.def) -TBB.DLL = tbb$(CPF_SUFFIX)$(DEBUG_SUFFIX).$(DLL) -TBB.LIB = tbb$(CPF_SUFFIX)$(DEBUG_SUFFIX).$(LIBEXT) -TBB.RES = tbb_resource.res -# On Windows, we use #pragma comment to set the proper TBB lib to link with. -# But for cross-configuration testing, need to link explicitly. -# Tests use this variable to detect dependency on TBB binary, so have to be non-empty. -LINK_TBB.LIB = $(if $(crosstest),$(TBB.LIB),$(DEFINE_KEY)__TBB_IMPLICITLY_LINKED) -TBB.MANIFEST = -ifneq ($(filter vc8 vc9,$(runtime)),) - TBB.MANIFEST = tbbmanifest.exe.manifest -endif - -MALLOC.DEF = $(MALLOC_ROOT)/$(def_prefix)-tbbmalloc-export.def -MALLOC.DLL = tbbmalloc$(DEBUG_SUFFIX).$(DLL) -MALLOC.LIB = tbbmalloc$(DEBUG_SUFFIX).$(LIBEXT) -MALLOC.RES = tbbmalloc.res -MALLOC.MANIFEST = -ifneq ($(filter vc8 vc9,$(runtime)),) -MALLOC.MANIFEST = tbbmanifest.exe.manifest -endif -LINK_MALLOC.LIB = $(MALLOC.LIB) - -MALLOCPROXY.DLL = tbbmalloc_proxy$(DEBUG_SUFFIX).$(DLL) -MALLOCPROXY.LIB = tbbmalloc_proxy$(DEBUG_SUFFIX).$(LIBEXT) -LINK_MALLOCPROXY.LIB = $(MALLOCPROXY.LIB) - -PROXY.LIB = tbbproxy$(DEBUG_SUFFIX).$(LIBEXT) - -RML.DEF = $(RML_SERVER_ROOT)/$(def_prefix)-rml-export.def -RML.DLL = irml$(DEBUG_SUFFIX).$(DLL) -RML.LIB = irml$(DEBUG_SUFFIX).$(LIBEXT) -RML.RES = irml.res -ifneq ($(filter vc8 vc9,$(runtime)),) -RML.MANIFEST = tbbmanifest.exe.manifest -endif - -MAKE_VERSIONS = cmd /C cscript /nologo /E:jscript $(subst \,/,$(tbb_root))/build/version_info_windows.js $(compiler) $(arch) $(subst \,/,"$(VERSION_FLAGS)") > version_string.ver -MAKE_TBBVARS = cmd /C "$(subst /,\,$(tbb_root))\build\generate_tbbvars.bat" - -TEST_LAUNCHER = $(subst /,\,$(tbb_root))\build\test_launcher.bat $(largs) diff --git a/src/tbb/build/xbox360.cl.inc b/src/tbb/build/xbox360.cl.inc deleted file mode 100644 index 640990f40..000000000 --- a/src/tbb/build/xbox360.cl.inc +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -#------------------------------------------------------------------------------ -# Define compiler-specific variables. -#------------------------------------------------------------------------------ - - -#------------------------------------------------------------------------------ -# Setting compiler flags. -#------------------------------------------------------------------------------ -CPLUS = cl /nologo -LINK_FLAGS = /link /nologo -LIB_LINK_FLAGS=/link /nologo /DLL /MAP /DEBUG -MS_CRT_KEY = /MT$(if $(findstring debug,$(cfg)),d) -EH_FLAGS = /EHsc /GR - -ifeq ($(cfg), release) - CPLUS_FLAGS = $(MS_CRT_KEY) /O2 /Zi $(EH_FLAGS) /Zc:forScope /D_XBOX /DTBB_NO_LEGACY=1 - ASM_FLAGS = -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = $(MS_CRT_KEY) /Od /Ob0 /Zi $(EH_FLAGS) /Zc:forScope \ - /DTBB_USE_DEBUG /D_XBOX /DTBB_NO_LEGACY=1 - ASM_FLAGS = /DUSE_FRAME_POINTER -endif - - -COMPILE_ONLY = /c -PREPROC_ONLY = /TP /EP -INCLUDE_KEY = /I -DEFINE_KEY = /D -OUTPUT_KEY = /Fe -OUTPUTOBJ_KEY = /Fo -WARNING_AS_ERROR_KEY = /WX -WARNING_KEY = /W3 -DYLIB_KEY = /DLL -EXPORT_KEY = /DEF: - -ifeq (em64t,$(arch)) - CPLUS_FLAGS += /GS- -endif - -CPLUS_FLAGS += /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=$(_WIN32_WINNT) -C_FLAGS = $(CPLUS_FLAGS) /TC -#------------------------------------------------------------------------------ -# End of setting compiler flags. -#------------------------------------------------------------------------------ - - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ -# nothing for XBOX360 -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ -M_CPLUS_FLAGS = $(subst $(EH_FLAGS),/EHs-,$(CPLUS_FLAGS)) -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# End of define compiler-specific variables. -#------------------------------------------------------------------------------ diff --git a/src/tbb/build/xbox360.inc b/src/tbb/build/xbox360.inc deleted file mode 100644 index 4b5fbadce..000000000 --- a/src/tbb/build/xbox360.inc +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -ifdef tbb_build_dir - test_dir:=$(tbb_build_dir) -else - test_dir:=. -endif - -# TODO give an error if archs doesn't match -ifndef arch - export arch:=xbox360 -endif - -ifndef runtime - export runtime:=xdk -endif - -native_compiler := cl -export compiler ?= cl -debugger ?= devenv /debugexe - -CMD=cmd /C -CWD=$(shell cmd /C echo %CD%) -RM=cmd /C del /Q /F -RD=cmd /C rmdir -MD=cmd /c mkdir -SLASH=\\ -NUL = nul - -OBJ = obj -DLL = dll -LIBEXT = lib - -def_prefix = $(arch) - -# Target Windows version. Do not increase beyond 0x0500 without prior discussion! -# Used as the value for macro definition opiton in compiler specific inc files. -_WIN32_WINNT=0x0400 - -TBB.LST = -TBB.DEF = $(tbb_root)/src/tbb/$(def_prefix)-tbb-export.def -TBB.DLL = tbb$(CPF_SUFFIX)$(DEBUG_SUFFIX).$(DLL) -TBB.LIB = tbb$(CPF_SUFFIX)$(DEBUG_SUFFIX).$(LIBEXT) -TBB.RES = -#On Windows we specify appropriate tbb library using #pragma comment -LINK_TBB.LIB = - -MALLOC.DEF = $(MALLOC_ROOT)/$(def_prefix)-tbbmalloc-export.def -MALLOC.DLL = tbbmalloc$(DEBUG_SUFFIX).$(DLL) -MALLOC.LIB = tbbmalloc$(DEBUG_SUFFIX).$(LIBEXT) -#On Windows we specify appropriate tbbmalloc library using #pragma comment -LINK_MALLOC.LIB = -MALLOC.RES = - -MAKE_VERSIONS = cmd /C cscript /nologo /E:jscript $(subst \,/,$(tbb_root))/build/version_info_windows.js $(compiler) $(arch) $(subst \,/,"$(VERSION_FLAGS)") > version_string.ver -MAKE_TBBVARS = cmd /C "$(subst /,\,$(tbb_root))\build\generate_tbbvars.bat" diff --git a/src/tbb/cmake/README.md b/src/tbb/cmake/README.md new file mode 100644 index 000000000..3a357218d --- /dev/null +++ b/src/tbb/cmake/README.md @@ -0,0 +1,332 @@ +# Build System Description + +The project uses CMake* build configuration. + +The following controls are available during the configure stage: +``` +TBB_TEST:BOOL - Enable testing (ON by default) +TBB_STRICT:BOOL - Treat compiler warnings as errors (ON by default) +TBB_SANITIZE:STRING - Sanitizer parameter, passed to compiler/linker +TBB_SIGNTOOL:FILEPATH - Tool for digital signing, used in post-install step for libraries if provided. +TBB_SIGNTOOL_ARGS:STRING - Additional arguments for TBB_SIGNTOOL, used if TBB_SIGNTOOL is set. +TBB_BUILD:BOOL - Enable Intel(R) oneAPI Threading Building Blocks (oneTBB) build (ON by default) +TBB_FIND_PACKAGE - Enable search for external oneTBB using find_package instead of build from sources (OFF by default) +TBBMALLOC_BUILD:BOOL - Enable Intel(R) oneAPI Threading Building Blocks (oneTBB) memory allocator build (ON by default) +TBBMALLOC_PROXY_BUILD:BOOL - Enable Intel(R) oneAPI Threading Building Blocks (oneTBB) memory allocator proxy build (requires TBBMALLOC_BUILD. ON by default) +TBB4PY_BUILD:BOOL - Enable Intel(R) oneAPI Threading Building Blocks (oneTBB) Python module build (OFF by default) +TBB_CPF:BOOL - Enable preview features of the library (OFF by default) +TBB_INSTALL:BOOL - Enable installation (ON by default) +TBB_INSTALL_VARS:BOOL - Enable auto-generated vars installation(packages generated by `cpack` and `make install` will also include the vars script)(OFF by default) +TBB_VALGRIND_MEMCHECK:BOOL - Enable scan for memory leaks using Valgrind (OFF by default) +TBB_DISABLE_HWLOC_AUTOMATIC_SEARCH - Disable HWLOC automatic search by pkg-config tool (OFF by default) +TBB_ENABLE_IPO - Enable Interprocedural Optimization (IPO) during the compilation (ON by default) +TBB_BUILD_APPLE_FRAMEWORKS - Enable the Apple* frameworks instead of dylibs, only available on the Apple platform. (OFF by default) +``` + +## Configure, Build, and Test + +### Preparation + +To perform an out-of-source build, create a build directory and go there: + +```bash +mkdir /tmp/my-build +cd /tmp/my-build +``` + +### Configure + +```bash +cmake +``` + +Some useful options: +- `-G ` - specify particular project generator. See `cmake --help` for details. +- `-DCMAKE_BUILD_TYPE=Debug` - specify for Debug build. It is not applicable for multi-config generators, e.g., Microsoft* Visual Studio* generator. + +#### TBBBind Library Configuration + +> **_TIP:_** It is recommended to install the HWLOC* library. See [oneTBB documentation](https://oneapi-src.github.io/oneTBB/GSG/next_steps.html#hybrid-cpu-and-numa-support) for details. + +The TBBbind library has three versions: `tbbbind`, `tbbbind_2_0`, and `tbbbind_2_5`. Each of these versions is linked with the corresponding HWLOC* library version: +- `tbbbind` links with `HWLOC 1.11.x` +- `tbbbind_2_0` links with `HWLOC 2.1–2.4` +- `tbbbind_2_5` links with `HWLOC 2.5` and later + +The search for a suitable version of the HWLOC library is enabled by default. If you want to use a specific version of the library, you can specify the path to it manually using the following CMake variables: + + - `CMAKE_HWLOC__LIBRARY_PATH` - path to the corresponding HWLOC version shared library on Linux* OS or path to `.lib` file on Windows* OS + - `CMAKE_HWLOC__INCLUDE_PATH` - path to the corresponding HWLOC version including directory + + +--- +**NOTE:** Automatic HWLOC searching requires CMake version 3.6 or higher. + +--- + + +Windows* OS requires an additional variable for correct TBBBind library building: + - `CMAKE_HWLOC__DLL_PATH` - path to the corresponding HWLOC version `.dll` file. + +The `HWLOC_VER` substring used earlier can be replaced with one of the three values: +- `1_11` for the `tbbbind` library configuration +- `2` for the `tbbbind_2_0` library configuration +- `2_5` for the `tbbbind_2_5` library configuration + +If you specify variables for several TBBBind versions, the building process for all of these versions is performed during a single build session. + +--- +**TIP** + +Specify the `TBB_DISABLE_HWLOC_AUTOMATIC_SEARCH` to turn off the HWLOC library's automatic search. + +--- + + +### Build + +```bash +cmake --build . +``` + +Some useful options: +- `--target ` - specific target, "all" is the default. +- `--config ` - build configuration, applicable only for multi-config generators, e.g., Visual Studio* generator. + +The binaries are placed to `./__cxx_`. For example, `./gnu_4.8_cxx11_release`. + +#### Build For 32-bit + +* **Intel(R) Compiler**. Source Intel(R) C++ Compiler with `ia32` and build as usual. +* **MSVC**. Use switch for [generator](https://cmake.org/cmake/help/latest/manual/cmake-generators.7.html) (e.g., `-A Win32` for [VS2019](https://cmake.org/cmake/help/latest/generator/Visual%20Studio%2016%202019.html)) during the configuration stage and then build as usual. +* **GCC/Clang**. Specify `-m32` during the configuration. It can be `CXXFLAGS=-m32 cmake ..` or `cmake -DCMAKE_CXX_FLAGS=-m32 ..` +* For any other compiler, which builds for 64-bit by default, specify a 32-bit compiler key during the configuration as above. + +#### Windows* OS-Specific Builds + +--- +**NOTE** + +The following builds require CMake version 3.15 or higher. + +--- + +* **Dynamic linkage with C Runtime Library (CRT)**. The default behavior can be explicitly specified by setting `CMAKE_MSVC_RUNTIME_LIBRARY` to `MultiThreadedDLL` or `MultiThreadedDebugDLL`. +```bash +cmake .. # dynamic linkage is used by default +``` +```bash +cmake -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreadedDLL .. +``` +```bash +cmake -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreadedDebugDLL -DCMAKE_BUILD_TYPE=Debug .. +``` +* **Static linkage with CRT**. Set `CMAKE_MSVC_RUNTIME_LIBRARY` to `MultiThreaded` or `MultiThreadedDebug`. +```bash +cmake -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded .. +``` +```bash +cmake -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreadedDebug -DCMAKE_BUILD_TYPE=Debug .. +``` +* **Windows OS 10 Universal Windows application build**. Set `CMAKE_SYSTEM_NAME` to `WindowsStore` and `CMAKE_SYSTEM_VERSION` to `10.0`. + +--- +**NOTE** + +Set `TBB_NO_APPCONTAINER` to `ON` to apply the `/APPCONTAINER:NO` option during the compilation (used for testing). + +--- + +```bash +cmake -DCMAKE_SYSTEM_NAME:STRING=WindowsStore -DCMAKE_SYSTEM_VERSION:STRING=10.0 .. +``` + +* **Universal Windows OS Driver build**. Set `TBB_WINDOWS_DRIVER` to `ON` and use static linkage with CRT. + +```bash +cmake -DTBB_WINDOWS_DRIVER=ON -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded .. +``` + +#### Example + +```bash +cmake -DCMAKE_CXX_COMPILER=icpc -DCMAKE_C_COMPILER=icc -DTBB_TEST=off -DCMAKE_HWLOC_1_11_LIBRARY_PATH=/libhwloc.so.15 +-DCMAKE_HWLOC_1_11_INCLUDE_PATH= -DCMAKE_INSTALL_PREFIX=/oneTBB_install .. +make -j8 && make install +``` + +--- +**NOTE** + +The library path points to a file, while the include path points to a directory and not to ``hwloc.h``. + +--- + +### Test + +#### Build test +To build a test, use the default target ``all``: +``` +cmake --build . +``` + +Or use a specific test target: +``` +cmake --build . --target # e.g. test_version +``` + +#### Run Test + +You can run a test by using CTest: +```bash +ctest +``` + +Or by using the ``test`` target: +```bash +cmake --build . --target test # currently does not work on Windows* OS +``` + +## Installation +See [Installation from Sources](../INSTALL.md) to learn how to install oneTBB. + +To install oneTBB from the release packages, use the following commands: +```bash +tar -xvf oneapi-tbb-xxx.xx.x-*.tgz +source env/vars.sh +``` + + +## Sanitizers - Configure, Build, and Run + +```bash +mkdir build +cd build +cmake -DTBB_SANITIZE=thread .. # or -DTBB_SANITIZE=memory or any other sanitizer +make -j +ctest -V +``` + +## Valgrind Memcheck - Configure, Build, and Run + +### Prerequisites +* Valgrind tool executable +### Example +```bash +mkdir build +cd build +cmake -DTBB_VALGRIND_MEMCHECK=ON .. +make -j memcheck- # or memcheck-all to scan all tests +``` + +## Test Specification + +Use Doxygen* to generate oneTBB test specification: + +```bash +mkdir build +cd build +cmake -DTBB_TEST_SPEC=ON .. +make test_spec +``` + +## TBBConfig - Integration of Binary Packages + +It is a configuration module that is used for the integration of prebuilt oneTBB. It consists of two files (``TBBConfig.cmake`` and ``TBBConfigVersion.cmake``) and can be used via the [find_package](https://cmake.org/cmake/help/latest/command/find_package.html) function. + +To use this module in your CMake project: + 1. Let CMake know where to search for TBBConfig, e.g. specify the location of ``TBBConfig.cmake`` in `TBB_DIR` (for more details about search paths, see [find_package](https://cmake.org/cmake/help/latest/command/find_package.html)). + 2. Use [find_package](https://cmake.org/cmake/help/latest/command/find_package.html) to find oneTBB. + 3. Use provided variables and/or imported targets (described below) to work with the found oneTBB. + +Example: + +```cmake +add_executable(foo foo.cpp) +find_package(TBB) +target_link_libraries(foo TBB::tbb) +``` + +oneTBB components can be passed to [find_package](https://cmake.org/cmake/help/latest/command/find_package.html) after keyword ``COMPONENTS`` or ``REQUIRED``. +Use basic names of components (`tbb`, `tbbmalloc`, etc.). + +If components are not specified, then the default set is used: `tbb`, `tbbmalloc`, and ``tbbmalloc_proxy``. + +If `tbbmalloc_proxy` is requested, the `tbbmalloc` component is also added and set as a dependency for `tbbmalloc_proxy`. + +TBBConfig creates [imported targets](https://cmake.org/cmake/help/latest/manual/cmake-buildsystem.7.html#imported-targets>) as +shared libraries using the following format: `TBB::`. For example, `TBB::tbb` or `TBB::tbbmalloc`. + +To search only for release oneTBB version, set `TBB_FIND_RELEASE_ONLY` to `TRUE` before calling `find_package`. This variable helps to avoid simultaneous linkage of release and debug oneTBB versions when CMake configuration is `Debug,` but a third-party component depends on the release oneTBB version. + +Variables set during TBB configuration: + +Variable | Description +--- | --- +`TBB_FOUND` | oneTBB is found +`TBB__FOUND` | Specific oneTBB component is found +`TBB_VERSION` | oneTBB version (format: `...`) +`TBB_IMPORTED_TARGETS` | All created oneTBB imported targets (not supported for builds from source code) + +Starting from [oneTBB 2021.1](https://github.com/oneapi-src/oneTBB/releases/tag/v2021.1), GitHub* release TBBConfig files in the binary packages are located under `/lib/cmake/TBB`. +For example, `TBB_DIR` should be set to `/lib/cmake/TBB`. + +TBBConfig files are automatically created during the build from source code and can be installed together with the library. +Also, oneTBB provides a helper function that creates TBBConfig files from predefined templates. See `tbb_generate_config` in `cmake/config_generation.cmake`. + +## oneTBB Python Module Support +The `TBB4PY_BUILD` Cmake option provides the ability to build a Python module for oneTBB. + +### Targets: + - `irml` - build IPC RML server + - `python_build` - build oneTBB module for Python + +`python_build` target requirements: + - Python version 3.5 or newer + - SWIG version 3.0.6 or newer + +## CMake Files + +### Compile and Link Options + +Compile and link options may be specific for certain compilers. This part is handled in `cmake/compilers/*` files. + +Options in TBB CMake are handled via variables in two ways for convenience: + +* by options group +* by the specific option + +#### Options Group + +Naming convention is the following: `TBB___`, where: + +* `` can be: + * `LIB` - options applied during libraries build. + * `TEST` - options applied during test build. + * `BENCH` - options applied during benchmarks build. + * `COMMON` - options applied during all (libraries, test, benchmarks) builds. +* `` can be: + * `COMPILE` - options applied during the compilation. + * `LINK` - options applied during the linkage. +* `` can be: + * `FLAGS` - list of flags + * `LIBS` - list of libraries + +*Examples* + +Variable | Description +--- | --- +`TBB_COMMON_COMPILE_FLAGS` | Applied to libraries, tests, and benchmarks as compile options +`TBB_LIB_LINK_FLAGS` | Applied to libraries as link options +`TBB_LIB_LINK_LIBS ` | Applied to libraries as link libraries +`TBB_TEST_COMPILE_FLAGS` | Applied to tests as compile options + +Specify the `LINK` options prefixed with a dash(-) for MSVC(Visual Studio) compiler with CMake < 3.13 to avoid issues caused by `target_link_libraries` CMake command usage. + +#### Specific Options + +If the option is used only in part of the places (library, tests, benchmarks) and adding this option to the group of other options is not possible, +then the option must be named using common sense. + +Warning suppressions should be added to the `TBB_WARNING_SUPPRESS` variable, which is applied during the compilation of libraries, tests, and benchmarks. +Additional warnings should be added to the `TBB_WARNING_TEST_FLAGS` variable, which is applied during the compilation of tests. diff --git a/src/tbb/cmake/android/device_environment_cleanup.cmake b/src/tbb/cmake/android/device_environment_cleanup.cmake new file mode 100644 index 000000000..53bd9e54f --- /dev/null +++ b/src/tbb/cmake/android/device_environment_cleanup.cmake @@ -0,0 +1,17 @@ +# Copyright (c) 2020-2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +include(${CMAKE_CURRENT_LIST_DIR}/environment.cmake) + +execute_on_device("rm -rf ${ANDROID_DEVICE_TESTING_DIRECTORY}") diff --git a/src/tbb/cmake/android/environment.cmake b/src/tbb/cmake/android/environment.cmake new file mode 100644 index 000000000..c209dfc5c --- /dev/null +++ b/src/tbb/cmake/android/environment.cmake @@ -0,0 +1,35 @@ +# Copyright (c) 2020-2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set(ANDROID_DEVICE_TESTING_DIRECTORY "/data/local/tmp/tbb_testing") + +find_program(adb_executable adb) +if (NOT adb_executable) + message(FATAL_ERROR "Could not find adb") +endif() + +macro(execute_on_device cmd) + execute_process(COMMAND ${adb_executable} shell ${cmd} RESULT_VARIABLE CMD_RESULT) + if (CMD_RESULT) + message(FATAL_ERROR "Error while on device execution: ${cmd} error_code: ${CMD_RESULT}") + endif() +endmacro() + +macro(transfer_data data_path) + execute_process(COMMAND ${adb_executable} push --sync ${data_path} ${ANDROID_DEVICE_TESTING_DIRECTORY} + RESULT_VARIABLE CMD_RESULT OUTPUT_QUIET) + if (CMD_RESULT) + message(FATAL_ERROR "Error while data transferring: ${data_path} error_code: ${CMD_RESULT}") + endif() +endmacro() diff --git a/src/tbb/cmake/android/test_launcher.cmake b/src/tbb/cmake/android/test_launcher.cmake new file mode 100644 index 000000000..843d9bef1 --- /dev/null +++ b/src/tbb/cmake/android/test_launcher.cmake @@ -0,0 +1,27 @@ +# Copyright (c) 2020-2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +include(${CMAKE_CURRENT_LIST_DIR}/environment.cmake) + +# transfer data to device +execute_on_device("mkdir -m 755 -p ${ANDROID_DEVICE_TESTING_DIRECTORY}") + +file (GLOB_RECURSE BINARIES_LIST "${BINARIES_PATH}/*.so*" "${BINARIES_PATH}/${TEST_NAME}") +foreach(BINARY_FILE ${BINARIES_LIST}) + transfer_data(${BINARY_FILE}) +endforeach() + +# execute binary +execute_on_device("chmod -R 755 ${ANDROID_DEVICE_TESTING_DIRECTORY}") +execute_on_device("LD_LIBRARY_PATH=${ANDROID_DEVICE_TESTING_DIRECTORY} ${ANDROID_DEVICE_TESTING_DIRECTORY}/${TEST_NAME}") diff --git a/src/tbb/cmake/compilers/AppleClang.cmake b/src/tbb/cmake/compilers/AppleClang.cmake new file mode 100644 index 000000000..5ebbdbd1a --- /dev/null +++ b/src/tbb/cmake/compilers/AppleClang.cmake @@ -0,0 +1,47 @@ +# Copyright (c) 2020-2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set(TBB_LINK_DEF_FILE_FLAG -Wl,-exported_symbols_list,) +set(TBB_DEF_FILE_PREFIX mac${TBB_ARCH}) +set(TBB_WARNING_LEVEL -Wall -Wextra $<$:-Werror>) +set(TBB_TEST_WARNING_FLAGS -Wshadow -Wcast-qual -Woverloaded-virtual -Wnon-virtual-dtor) +set(TBB_WARNING_SUPPRESS -Wno-parentheses -Wno-non-virtual-dtor -Wno-dangling-else) +# For correct ucontext.h structures layout +set(TBB_COMMON_COMPILE_FLAGS -D_XOPEN_SOURCE) + +# Depfile options (e.g. -MD) are inserted automatically in some cases. +# Don't add -MMD to avoid conflicts in such cases. +if (NOT CMAKE_GENERATOR MATCHES "Ninja" AND NOT CMAKE_CXX_DEPENDS_USE_COMPILER) + set(TBB_MMD_FLAG -MMD) +endif() + +# Ignore -Werror set through add_compile_options() or added to CMAKE_CXX_FLAGS if TBB_STRICT is disabled. +if (NOT TBB_STRICT AND COMMAND tbb_remove_compile_flag) + tbb_remove_compile_flag(-Werror) +endif() + +# Enable Intel(R) Transactional Synchronization Extensions (-mrtm) and WAITPKG instructions support (-mwaitpkg) on relevant processors +if (CMAKE_OSX_ARCHITECTURES) + set(_tbb_target_architectures "${CMAKE_OSX_ARCHITECTURES}") +else() + set(_tbb_target_architectures "${CMAKE_SYSTEM_PROCESSOR}") +endif() +if ("${_tbb_target_architectures}" MATCHES "(x86_64|amd64|AMD64)") # OSX systems are 64-bit only + set(TBB_COMMON_COMPILE_FLAGS ${TBB_COMMON_COMPILE_FLAGS} -mrtm $<$>:-mwaitpkg>) +endif() +unset(_tbb_target_architectures) + +# TBB malloc settings +set(TBBMALLOC_LIB_COMPILE_FLAGS -fno-rtti -fno-exceptions) + diff --git a/src/tbb/cmake/compilers/Clang.cmake b/src/tbb/cmake/compilers/Clang.cmake new file mode 100644 index 000000000..dcd66634f --- /dev/null +++ b/src/tbb/cmake/compilers/Clang.cmake @@ -0,0 +1,87 @@ +# Copyright (c) 2020-2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if (EMSCRIPTEN) + set(TBB_EMSCRIPTEN 1) + set(TBB_COMMON_COMPILE_FLAGS ${TBB_COMMON_COMPILE_FLAGS} -fexceptions) + set(TBB_TEST_LINK_FLAGS ${TBB_COMMON_LINK_FLAGS} -fexceptions -sINITIAL_MEMORY=65536000 -sALLOW_MEMORY_GROWTH=1 -sMALLOC=mimalloc -sEXIT_RUNTIME=1) + if (NOT EMSCRIPTEN_WITHOUT_PTHREAD) + set_property(TARGET Threads::Threads PROPERTY INTERFACE_LINK_LIBRARIES "-pthread") + endif() + set(TBB_EMSCRIPTEN_STACK_SIZE 65536) + set(TBB_LIB_COMPILE_FLAGS -D__TBB_EMSCRIPTEN_STACK_SIZE=${TBB_EMSCRIPTEN_STACK_SIZE}) + set(TBB_TEST_LINK_FLAGS ${TBB_TEST_LINK_FLAGS} -sTOTAL_STACK=${TBB_EMSCRIPTEN_STACK_SIZE}) + unset(TBB_EMSCRIPTEN_STACK_SIZE) +endif() + +if (MINGW) + set(TBB_LINK_DEF_FILE_FLAG "") + set(TBB_DEF_FILE_PREFIX "") +elseif (APPLE) + set(TBB_LINK_DEF_FILE_FLAG -Wl,-exported_symbols_list,) + set(TBB_DEF_FILE_PREFIX mac${TBB_ARCH}) + + # For correct ucontext.h structures layout + set(TBB_COMMON_COMPILE_FLAGS ${TBB_COMMON_COMPILE_FLAGS} -D_XOPEN_SOURCE) +elseif (MSVC) + include(${CMAKE_CURRENT_LIST_DIR}/MSVC.cmake) + return() +else() + set(TBB_LINK_DEF_FILE_FLAG -Wl,--version-script=) + set(TBB_DEF_FILE_PREFIX lin${TBB_ARCH}) + set(TBB_TEST_COMPILE_FLAGS ${TBB_TEST_COMPILE_FLAGS} $<$>:-ffp-model=precise>) +endif() + +# Depfile options (e.g. -MD) are inserted automatically in some cases. +# Don't add -MMD to avoid conflicts in such cases. +if (NOT CMAKE_GENERATOR MATCHES "Ninja" AND NOT CMAKE_CXX_DEPENDS_USE_COMPILER) + set(TBB_MMD_FLAG -MMD) +endif() + +set(TBB_WARNING_LEVEL -Wall -Wextra $<$:-Werror>) +set(TBB_TEST_WARNING_FLAGS -Wshadow -Wcast-qual -Woverloaded-virtual -Wnon-virtual-dtor) + +# Ignore -Werror set through add_compile_options() or added to CMAKE_CXX_FLAGS if TBB_STRICT is disabled. +if (NOT TBB_STRICT AND COMMAND tbb_remove_compile_flag) + tbb_remove_compile_flag(-Werror) +endif() + +# Enable Intel(R) Transactional Synchronization Extensions (-mrtm) and WAITPKG instructions support (-mwaitpkg) on relevant processors +if (CMAKE_SYSTEM_PROCESSOR MATCHES "(AMD64|amd64|i.86|x86)" AND NOT EMSCRIPTEN) + set(TBB_COMMON_COMPILE_FLAGS ${TBB_COMMON_COMPILE_FLAGS} -mrtm $<$>:-mwaitpkg>) +endif() + +# Clang flags to prevent compiler from optimizing out security checks +set(TBB_COMMON_COMPILE_FLAGS ${TBB_COMMON_COMPILE_FLAGS} -Wformat -Wformat-security -Werror=format-security -fPIC $<$>:-fstack-protector-strong>) + +# -z switch is not supported on MacOS +if (NOT APPLE) + set(TBB_LIB_LINK_FLAGS ${TBB_LIB_LINK_FLAGS} -Wl,-z,relro,-z,now) +endif() + +set(TBB_COMMON_LINK_LIBS ${CMAKE_DL_LIBS}) + +if (NOT CMAKE_CXX_FLAGS MATCHES "_FORTIFY_SOURCE") + set(TBB_COMMON_COMPILE_FLAGS ${TBB_COMMON_COMPILE_FLAGS} $<$>:-D_FORTIFY_SOURCE=2>) +endif () + +if (MINGW) + list(APPEND TBB_COMMON_COMPILE_FLAGS -U__STRICT_ANSI__) +endif() + +set(TBB_IPO_COMPILE_FLAGS $<$>:-flto>) +set(TBB_IPO_LINK_FLAGS $<$>:-flto>) + +# TBB malloc settings +set(TBBMALLOC_LIB_COMPILE_FLAGS -fno-rtti -fno-exceptions) diff --git a/src/tbb/cmake/compilers/GNU.cmake b/src/tbb/cmake/compilers/GNU.cmake new file mode 100644 index 000000000..cf6d8bdbc --- /dev/null +++ b/src/tbb/cmake/compilers/GNU.cmake @@ -0,0 +1,118 @@ +# Copyright (c) 2020-2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if (MINGW) + set(TBB_LINK_DEF_FILE_FLAG "") + set(TBB_DEF_FILE_PREFIX "") +elseif (APPLE) + set(TBB_LINK_DEF_FILE_FLAG -Wl,-exported_symbols_list,) + set(TBB_DEF_FILE_PREFIX mac${TBB_ARCH}) + + # For correct ucontext.h structures layout + set(TBB_COMMON_COMPILE_FLAGS ${TBB_COMMON_COMPILE_FLAGS} -D_XOPEN_SOURCE) +else() + set(TBB_LINK_DEF_FILE_FLAG -Wl,--version-script=) + set(TBB_DEF_FILE_PREFIX lin${TBB_ARCH}) +endif() + +set(TBB_WARNING_LEVEL -Wall -Wextra $<$:-Werror> -Wfatal-errors) +set(TBB_TEST_WARNING_FLAGS -Wshadow -Wcast-qual -Woverloaded-virtual -Wnon-virtual-dtor) + +# Depfile options (e.g. -MD) are inserted automatically in some cases. +# Don't add -MMD to avoid conflicts in such cases. +if (NOT CMAKE_GENERATOR MATCHES "Ninja" AND NOT CMAKE_CXX_DEPENDS_USE_COMPILER) + set(TBB_MMD_FLAG -MMD) +endif() + + +# Binutils < 2.31.1 do not support the tpause instruction. When compiling with +# a modern version of GCC (supporting it) but relying on an outdated assembler, +# will result in an error reporting "no such instruction: tpause". +# The following code invokes the GNU assembler to extract the version number +# and convert it to an integer that can be used in the C++ code to compare +# against, and conditionally disable the __TBB_WAITPKG_INTRINSICS_PRESENT +# macro if the version is incompatible. Binutils only report the version in the +# MAJOR.MINOR format, therefore the version checked is >=2.32 (instead of +# >=2.31.1). Capturing the output in CMake can be done like below. The version +# information is written to either stdout or stderr. To not make any +# assumptions, both are captured. +execute_process( + COMMAND ${CMAKE_COMMAND} -E env "LANG=C" ${CMAKE_CXX_COMPILER} -xc -c /dev/null -Wa,-v -o/dev/null + OUTPUT_VARIABLE ASSEMBLER_VERSION_LINE_OUT + ERROR_VARIABLE ASSEMBLER_VERSION_LINE_ERR + OUTPUT_STRIP_TRAILING_WHITESPACE + ERROR_STRIP_TRAILING_WHITESPACE +) +set(ASSEMBLER_VERSION_LINE ${ASSEMBLER_VERSION_LINE_OUT}${ASSEMBLER_VERSION_LINE_ERR}) +string(REGEX REPLACE ".*GNU assembler version ([0-9]+)\\.([0-9]+).*" "\\1" _tbb_gnu_asm_major_version "${ASSEMBLER_VERSION_LINE}") +string(REGEX REPLACE ".*GNU assembler version ([0-9]+)\\.([0-9]+).*" "\\2" _tbb_gnu_asm_minor_version "${ASSEMBLER_VERSION_LINE}") +unset(ASSEMBLER_VERSION_LINE_OUT) +unset(ASSEMBLER_VERSION_LINE_ERR) +unset(ASSEMBLER_VERSION_LINE) +message(TRACE "Extracted GNU assembler version: major=${_tbb_gnu_asm_major_version} minor=${_tbb_gnu_asm_minor_version}") + +math(EXPR _tbb_gnu_asm_version_number "${_tbb_gnu_asm_major_version} * 1000 + ${_tbb_gnu_asm_minor_version}") +set(TBB_COMMON_COMPILE_FLAGS ${TBB_COMMON_COMPILE_FLAGS} "-D__TBB_GNU_ASM_VERSION=${_tbb_gnu_asm_version_number}") +message(STATUS "GNU Assembler version: ${_tbb_gnu_asm_major_version}.${_tbb_gnu_asm_minor_version} (${_tbb_gnu_asm_version_number})") + +# Enable Intel(R) Transactional Synchronization Extensions (-mrtm) and WAITPKG instructions support (-mwaitpkg) on relevant processors +if (CMAKE_SYSTEM_PROCESSOR MATCHES "(AMD64|amd64|i.86|x86)" AND NOT EMSCRIPTEN) + set(TBB_COMMON_COMPILE_FLAGS ${TBB_COMMON_COMPILE_FLAGS} -mrtm $<$>,$>>:-mwaitpkg>) +endif() + +set(TBB_COMMON_LINK_LIBS ${CMAKE_DL_LIBS}) + +# Ignore -Werror set through add_compile_options() or added to CMAKE_CXX_FLAGS if TBB_STRICT is disabled. +if (NOT TBB_STRICT AND COMMAND tbb_remove_compile_flag) + tbb_remove_compile_flag(-Werror) +endif() + +if (NOT ${CMAKE_CXX_COMPILER_ID} STREQUAL Intel) + # gcc 6.0 and later have -flifetime-dse option that controls elimination of stores done outside the object lifetime + set(TBB_DSE_FLAG $<$>:-flifetime-dse=1>) + set(TBB_COMMON_COMPILE_FLAGS ${TBB_COMMON_COMPILE_FLAGS} $<$>:-fstack-clash-protection>) + + # Suppress GCC 12.x-13.x warning here that to_wait_node(n)->my_is_in_list might have size 0 + set(TBB_COMMON_LINK_FLAGS ${TBB_COMMON_LINK_FLAGS} $<$>,$>:-Wno-stringop-overflow>) +endif() + +# Workaround for heavy tests and too many symbols in debug (rellocation truncated to fit: R_MIPS_CALL16) +if ("${CMAKE_SYSTEM_PROCESSOR}" MATCHES "mips") + set(TBB_TEST_COMPILE_FLAGS ${TBB_TEST_COMPILE_FLAGS} -DTBB_TEST_LOW_WORKLOAD $<$:-fPIE -mxgot>) + set(TBB_TEST_LINK_FLAGS ${TBB_TEST_LINK_FLAGS} $<$:-pie>) +endif() + +set(TBB_IPO_COMPILE_FLAGS $<$>:-flto>) +set(TBB_IPO_LINK_FLAGS $<$>:-flto>) + + +if (MINGW AND CMAKE_SYSTEM_PROCESSOR MATCHES "i.86") + list (APPEND TBB_COMMON_COMPILE_FLAGS -msse2) +endif () + +# Gnu flags to prevent compiler from optimizing out security checks +set(TBB_COMMON_COMPILE_FLAGS ${TBB_COMMON_COMPILE_FLAGS} -fno-strict-overflow -fno-delete-null-pointer-checks -fwrapv) +set(TBB_COMMON_COMPILE_FLAGS ${TBB_COMMON_COMPILE_FLAGS} -Wformat -Wformat-security -Werror=format-security + -fstack-protector-strong ) +# -z switch is not supported on MacOS and MinGW +if (NOT APPLE AND NOT MINGW) + set(TBB_LIB_LINK_FLAGS ${TBB_LIB_LINK_FLAGS} -Wl,-z,relro,-z,now,-z,noexecstack) +endif() +if (NOT CMAKE_CXX_FLAGS MATCHES "_FORTIFY_SOURCE") + set(TBB_COMMON_COMPILE_FLAGS ${TBB_COMMON_COMPILE_FLAGS} $<$>:-D_FORTIFY_SOURCE=2> ) +endif () + +# TBB malloc settings +set(TBBMALLOC_LIB_COMPILE_FLAGS -fno-rtti -fno-exceptions) +set(TBB_OPENMP_FLAG -fopenmp) diff --git a/src/tbb/cmake/compilers/Intel.cmake b/src/tbb/cmake/compilers/Intel.cmake new file mode 100644 index 000000000..531e078ec --- /dev/null +++ b/src/tbb/cmake/compilers/Intel.cmake @@ -0,0 +1,38 @@ +# Copyright (c) 2020-2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if (MSVC) + include(${CMAKE_CURRENT_LIST_DIR}/MSVC.cmake) + set(TBB_WARNING_LEVEL ${TBB_WARNING_LEVEL} /W3) + set(TBB_OPENMP_FLAG /Qopenmp) + set(TBB_IPO_COMPILE_FLAGS $<$>:/Qipo>) + set(TBB_IPO_LINK_FLAGS $<$>:/INCREMENTAL:NO>) +elseif (APPLE) + include(${CMAKE_CURRENT_LIST_DIR}/AppleClang.cmake) + set(TBB_COMMON_COMPILE_FLAGS ${TBB_COMMON_COMPILE_FLAGS} -fstack-protector -Wformat -Wformat-security + $<$>:-fno-omit-frame-pointer -qno-opt-report-embed>) + if (NOT CMAKE_CXX_FLAGS MATCHES "_FORTIFY_SOURCE") + set(TBB_COMMON_COMPILE_FLAGS ${TBB_COMMON_COMPILE_FLAGS} $<$>:-D_FORTIFY_SOURCE=2>) + endif () + + set(TBB_OPENMP_FLAG -qopenmp) + set(TBB_IPO_COMPILE_FLAGS $<$>:-ipo>) +else() + include(${CMAKE_CURRENT_LIST_DIR}/GNU.cmake) + set(TBB_COMMON_COMPILE_FLAGS ${TBB_COMMON_COMPILE_FLAGS} $<$:-falign-stack=maintain-16-byte>) + set(TBB_LIB_LINK_FLAGS ${TBB_LIB_LINK_FLAGS} -static-intel) + set(TBB_OPENMP_FLAG -qopenmp) + set(TBB_IPO_COMPILE_FLAGS $<$>:-ipo>) +endif() +set(TBB_IPO_LINK_FLAGS ${TBB_IPO_LINK_FLAGS} ${TBB_IPO_COMPILE_FLAGS}) diff --git a/src/tbb/cmake/compilers/IntelLLVM.cmake b/src/tbb/cmake/compilers/IntelLLVM.cmake new file mode 100644 index 000000000..b51437816 --- /dev/null +++ b/src/tbb/cmake/compilers/IntelLLVM.cmake @@ -0,0 +1,28 @@ +# Copyright (c) 2020-2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if (WIN32) + include(${CMAKE_CURRENT_LIST_DIR}/MSVC.cmake) + set(TBB_OPENMP_FLAG /Qopenmp) + set(TBB_IPO_COMPILE_FLAGS $<$>:/Qipo>) + set(TBB_IPO_LINK_FLAGS $<$>:/INCREMENTAL:NO>) +else() + include(${CMAKE_CURRENT_LIST_DIR}/Clang.cmake) + set(TBB_IPO_COMPILE_FLAGS $<$>:-ipo>) + # "--exclude-libs,ALL" is used to avoid accidental exporting of symbols + # from statically linked libraries + set(TBB_LIB_LINK_FLAGS ${TBB_LIB_LINK_FLAGS} -static-intel -Wl,--exclude-libs,ALL) + set(TBB_OPENMP_FLAG -qopenmp) +endif() +set(TBB_IPO_LINK_FLAGS ${TBB_IPO_LINK_FLAGS} ${TBB_IPO_COMPILE_FLAGS}) diff --git a/src/tbb/cmake/compilers/MSVC.cmake b/src/tbb/cmake/compilers/MSVC.cmake new file mode 100644 index 000000000..6568ec7eb --- /dev/null +++ b/src/tbb/cmake/compilers/MSVC.cmake @@ -0,0 +1,92 @@ +# Copyright (c) 2020-2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set(TBB_LINK_DEF_FILE_FLAG ${CMAKE_LINK_DEF_FILE_FLAG}) +set(TBB_DEF_FILE_PREFIX win${TBB_ARCH}) + +# Workaround for CMake issue https://gitlab.kitware.com/cmake/cmake/issues/18317. +# TODO: consider use of CMP0092 CMake policy. +string(REGEX REPLACE "/W[0-4]" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + +set(TBB_WARNING_LEVEL $<$>:/W4> $<$:/WX>) + +# Warning suppression C4324: structure was padded due to alignment specifier +set(TBB_WARNING_SUPPRESS /wd4324) + +set(TBB_TEST_COMPILE_FLAGS ${TBB_TEST_COMPILE_FLAGS} /bigobj) +if (MSVC_VERSION LESS_EQUAL 1900) + # Warning suppression C4503 for VS2015 and earlier: + # decorated name length exceeded, name was truncated. + # More info can be found at + # https://docs.microsoft.com/en-us/cpp/error-messages/compiler-warnings/compiler-warning-level-1-c4503 + set(TBB_TEST_COMPILE_FLAGS ${TBB_TEST_COMPILE_FLAGS} /wd4503) +endif() +set(TBB_LIB_COMPILE_FLAGS -D_CRT_SECURE_NO_WARNINGS /GS) +set(TBB_COMMON_COMPILE_FLAGS ${TBB_COMMON_COMPILE_FLAGS} /volatile:iso /FS /EHsc) + +set(TBB_LIB_LINK_FLAGS ${TBB_LIB_LINK_FLAGS} /DEPENDENTLOADFLAG:0x2000 /DYNAMICBASE /NXCOMPAT) + +if (TBB_ARCH EQUAL 32) + set(TBB_LIB_LINK_FLAGS ${TBB_LIB_LINK_FLAGS} /SAFESEH ) +endif() + +# Ignore /WX set through add_compile_options() or added to CMAKE_CXX_FLAGS if TBB_STRICT is disabled. +if (NOT TBB_STRICT AND COMMAND tbb_remove_compile_flag) + tbb_remove_compile_flag(/WX) +endif() + +if (WINDOWS_STORE OR TBB_WINDOWS_DRIVER) + set(TBB_COMMON_COMPILE_FLAGS ${TBB_COMMON_COMPILE_FLAGS} /D_WIN32_WINNT=0x0A00) + set(TBB_COMMON_LINK_FLAGS -NODEFAULTLIB:kernel32.lib -INCREMENTAL:NO) + set(TBB_COMMON_LINK_LIBS OneCore.lib) +endif() + +if (WINDOWS_STORE) + if (NOT CMAKE_SYSTEM_VERSION EQUAL 10.0) + message(FATAL_ERROR "CMAKE_SYSTEM_VERSION must be equal to 10.0") + endif() + + set(TBB_COMMON_COMPILE_FLAGS ${TBB_COMMON_COMPILE_FLAGS} /ZW /ZW:nostdlib) + + # CMake define this extra lib, remove it for this build type + string(REGEX REPLACE "WindowsApp.lib" "" CMAKE_CXX_STANDARD_LIBRARIES "${CMAKE_CXX_STANDARD_LIBRARIES}") + + if (TBB_NO_APPCONTAINER) + set(TBB_LIB_LINK_FLAGS ${TBB_LIB_LINK_FLAGS} -APPCONTAINER:NO) + endif() +endif() + +if (TBB_WINDOWS_DRIVER) + # Since this is universal driver disable this variable + set(CMAKE_SYSTEM_PROCESSOR "") + + # CMake define list additional libs, remove it for this build type + set(CMAKE_CXX_STANDARD_LIBRARIES "") + + set(TBB_COMMON_COMPILE_FLAGS ${TBB_COMMON_COMPILE_FLAGS} /D _UNICODE /DUNICODE /DWINAPI_FAMILY=WINAPI_FAMILY_APP /D__WRL_NO_DEFAULT_LIB__) +endif() + +if (CMAKE_CXX_COMPILER_ID MATCHES "(Clang|IntelLLVM)") + if (CMAKE_SYSTEM_PROCESSOR MATCHES "(x86|AMD64|i.86)") + set(TBB_COMMON_COMPILE_FLAGS ${TBB_COMMON_COMPILE_FLAGS} -mrtm -mwaitpkg) + endif() + set(TBB_IPO_COMPILE_FLAGS $<$>:-flto>) + set(TBB_IPO_LINK_FLAGS $<$>:-flto>) +else() + set(TBB_IPO_COMPILE_FLAGS $<$>:/GL>) + set(TBB_IPO_LINK_FLAGS $<$>:-LTCG> $<$>:-INCREMENTAL:NO>) +endif() + +set(TBB_OPENMP_FLAG /openmp) +set(TBB_OPENMP_NO_LINK_FLAG TRUE) # TBB_OPENMP_FLAG will be used only on compilation but not on linkage diff --git a/src/tbb/cmake/compilers/QCC.cmake b/src/tbb/cmake/compilers/QCC.cmake new file mode 100644 index 000000000..52285a616 --- /dev/null +++ b/src/tbb/cmake/compilers/QCC.cmake @@ -0,0 +1,18 @@ +# Copyright (c) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/compilers/GNU.cmake) + +# Remove dl library not present in QNX systems +unset(TBB_COMMON_LINK_LIBS) diff --git a/src/tbb/cmake/config_generation.cmake b/src/tbb/cmake/config_generation.cmake new file mode 100644 index 000000000..e4ef7bcea --- /dev/null +++ b/src/tbb/cmake/config_generation.cmake @@ -0,0 +1,142 @@ +# Copyright (c) 2020-2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Save current location, +# see for details: https://cmake.org/cmake/help/latest/variable/CMAKE_CURRENT_LIST_DIR.html +set(_tbb_gen_cfg_path ${CMAKE_CURRENT_LIST_DIR}) + +include(CMakeParseArguments) + +function(tbb_generate_config) + set(options HANDLE_SUBDIRS) + set(oneValueArgs INSTALL_DIR + SYSTEM_NAME + LIB_REL_PATH INC_REL_PATH + VERSION + TBB_BINARY_VERSION + TBBMALLOC_BINARY_VERSION + TBBMALLOC_PROXY_BINARY_VERSION + TBBBIND_BINARY_VERSION) + + cmake_parse_arguments(tbb_gen_cfg "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + get_filename_component(config_install_dir ${tbb_gen_cfg_INSTALL_DIR} ABSOLUTE) + file(MAKE_DIRECTORY ${config_install_dir}) + + file(TO_CMAKE_PATH "${tbb_gen_cfg_LIB_REL_PATH}" TBB_LIB_REL_PATH) + file(TO_CMAKE_PATH "${tbb_gen_cfg_INC_REL_PATH}" TBB_INC_REL_PATH) + + set(TBB_VERSION ${tbb_gen_cfg_VERSION}) + + set(_tbb_pc_lib_name tbb) + set(_prefix_for_pc_file "\${pcfiledir}/../../") + set(_includedir_for_pc_file "\${prefix}/include") + + set(TBB_COMPONENTS_BIN_VERSION " +set(_tbb_bin_version ${tbb_gen_cfg_TBB_BINARY_VERSION}) +set(_tbbmalloc_bin_version ${tbb_gen_cfg_TBBMALLOC_BINARY_VERSION}) +set(_tbbmalloc_proxy_bin_version ${tbb_gen_cfg_TBBMALLOC_PROXY_BINARY_VERSION}) +set(_tbbbind_bin_version ${tbb_gen_cfg_TBBBIND_BINARY_VERSION}) +") + + if (tbb_gen_cfg_SYSTEM_NAME STREQUAL "Linux") + set(TBB_LIB_PREFIX "lib") + set(TBB_LIB_EXT "so.\${_\${_tbb_component}_bin_version}") + + set (TBB_HANDLE_IMPLIB " + set (_tbb_release_dll \${_tbb_release_lib}) + set (_tbb_debug_dll \${_tbb_debug_lib}) +") + if (tbb_gen_cfg_HANDLE_SUBDIRS) + set(TBB_HANDLE_SUBDIRS "set(_tbb_subdir gcc4.8)") + + set(_libdir_for_pc_file "\${prefix}/lib/intel64/gcc4.8") + set(_tbb_pc_extra_libdir "-L\${prefix}/lib") + configure_file(${_tbb_gen_cfg_path}/../integration/pkg-config/tbb.pc.in ${config_install_dir}/tbb.pc @ONLY) + + set(_libdir_for_pc_file "\${prefix}/lib/ia32/gcc4.8") + set(_tbb_pc_extra_libdir "-L\${prefix}/lib32") + configure_file(${_tbb_gen_cfg_path}/../integration/pkg-config/tbb.pc.in ${config_install_dir}/tbb32.pc @ONLY) + endif() + elseif (tbb_gen_cfg_SYSTEM_NAME STREQUAL "Darwin") + set(TBB_LIB_PREFIX "lib") + set(TBB_LIB_EXT "\${_\${_tbb_component}_bin_version}.dylib") + + set (TBB_HANDLE_IMPLIB " + set (_tbb_release_dll \${_tbb_release_lib}) + set (_tbb_debug_dll \${_tbb_debug_lib}) +") + set(_libdir_for_pc_file "\${prefix}/lib") + configure_file(${_tbb_gen_cfg_path}/../integration/pkg-config/tbb.pc.in ${config_install_dir}/tbb.pc @ONLY) + elseif (tbb_gen_cfg_SYSTEM_NAME STREQUAL "Windows") + set(TBB_LIB_PREFIX "") + set(TBB_LIB_EXT "lib") + set(TBB_COMPILE_DEFINITIONS " + INTERFACE_COMPILE_DEFINITIONS \"__TBB_NO_IMPLICIT_LINKAGE=1\"") + + # .lib - installed to TBB_LIB_REL_PATH (e.g. /lib) and are passed as IMPORTED_IMPLIB_ property to target + # .dll - installed to /bin or /redist and are passed as IMPORTED_LOCATION_ property to target + set (TBB_HANDLE_IMPLIB " + find_file(_tbb_release_dll + NAMES \${_tbb_component}\${_bin_version}.dll + PATHS \${_tbb_root} + PATH_SUFFIXES \"redist/\${_tbb_intel_arch}/\${_tbb_subdir}\" \"bin\${_tbb_arch_suffix}/\${_tbb_subdir}\" \"bin\${_tbb_arch_suffix}/\" \"bin\" + NO_DEFAULT_PATH + ) + + if (EXISTS \"\${_tbb_debug_lib}\") + find_file(_tbb_debug_dll + NAMES \${_tbb_component}\${_bin_version}_debug.dll + PATHS \${_tbb_root} + PATH_SUFFIXES \"redist/\${_tbb_intel_arch}/\${_tbb_subdir}\" \"bin\${_tbb_arch_suffix}/\${_tbb_subdir}\" \"bin\${_tbb_arch_suffix}/\" \"bin\" + NO_DEFAULT_PATH + ) + endif() +") + set(TBB_IMPLIB_RELEASE " + IMPORTED_IMPLIB_RELEASE \"\${_tbb_release_lib}\"") + set(TBB_IMPLIB_DEBUG " + IMPORTED_IMPLIB_DEBUG \"\${_tbb_debug_lib}\"") + + if (tbb_gen_cfg_HANDLE_SUBDIRS) + set(TBB_HANDLE_SUBDIRS " +set(_tbb_subdir vc14) +if (WINDOWS_STORE) + set(_tbb_subdir \${_tbb_subdir}_uwp) +endif() +") + set(_tbb_pc_lib_name ${_tbb_pc_lib_name}${TBB_BINARY_VERSION}) + + set(_libdir_for_pc_file "\${prefix}/lib/intel64/vc14") + set(_tbb_pc_extra_libdir "-L\${prefix}/lib") + configure_file(${_tbb_gen_cfg_path}/../integration/pkg-config/tbb.pc.in ${config_install_dir}/tbb.pc @ONLY) + + set(_libdir_for_pc_file "\${prefix}/lib/ia32/vc14") + set(_tbb_pc_extra_libdir "-L\${prefix}/lib32") + configure_file(${_tbb_gen_cfg_path}/../integration/pkg-config/tbb.pc.in ${config_install_dir}/tbb32.pc @ONLY) + endif() + + set(TBB_HANDLE_BIN_VERSION " + unset(_bin_version) + if (_tbb_component STREQUAL tbb) + set(_bin_version \${_tbb_bin_version}) + endif() +") + else() + message(FATAL_ERROR "Unsupported OS name: ${tbb_system_name}") + endif() + + configure_file(${_tbb_gen_cfg_path}/templates/TBBConfig.cmake.in ${config_install_dir}/TBBConfig.cmake @ONLY) + configure_file(${_tbb_gen_cfg_path}/templates/TBBConfigVersion.cmake.in ${config_install_dir}/TBBConfigVersion.cmake @ONLY) +endfunction() diff --git a/src/tbb/cmake/hwloc_detection.cmake b/src/tbb/cmake/hwloc_detection.cmake new file mode 100644 index 000000000..aaca5a599 --- /dev/null +++ b/src/tbb/cmake/hwloc_detection.cmake @@ -0,0 +1,64 @@ +# Copyright (c) 2020-2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +list(APPEND HWLOC_REQUIRED_VERSIONS 1_11 2 2_5) + +foreach(hwloc_version ${HWLOC_REQUIRED_VERSIONS}) + if (NOT WIN32) + set(CMAKE_HWLOC_${hwloc_version}_DLL_PATH STUB) + endif() + set(HWLOC_TARGET_NAME HWLOC::hwloc_${hwloc_version}) + + if (NOT TARGET ${HWLOC_TARGET_NAME} AND + CMAKE_HWLOC_${hwloc_version}_LIBRARY_PATH AND + CMAKE_HWLOC_${hwloc_version}_DLL_PATH AND + CMAKE_HWLOC_${hwloc_version}_INCLUDE_PATH + ) + add_library(${HWLOC_TARGET_NAME} SHARED IMPORTED) + set_target_properties(${HWLOC_TARGET_NAME} PROPERTIES INTERFACE_INCLUDE_DIRECTORIES + "${CMAKE_HWLOC_${hwloc_version}_INCLUDE_PATH}") + if (WIN32) + set_target_properties(${HWLOC_TARGET_NAME} PROPERTIES + IMPORTED_LOCATION "${CMAKE_HWLOC_${hwloc_version}_DLL_PATH}" + IMPORTED_IMPLIB "${CMAKE_HWLOC_${hwloc_version}_LIBRARY_PATH}") + else() + set_target_properties(${HWLOC_TARGET_NAME} PROPERTIES + IMPORTED_LOCATION "${CMAKE_HWLOC_${hwloc_version}_LIBRARY_PATH}") + endif() + endif() + + if (TARGET ${HWLOC_TARGET_NAME}) + set(HWLOC_TARGET_EXPLICITLY_DEFINED TRUE) + endif() +endforeach() + +unset(HWLOC_TARGET_NAME) + +if (NOT HWLOC_TARGET_EXPLICITLY_DEFINED AND + NOT TBB_DISABLE_HWLOC_AUTOMATIC_SEARCH +) + find_package(PkgConfig QUIET) + if (PKG_CONFIG_FOUND AND NOT CMAKE_VERSION VERSION_LESS 3.6) + pkg_search_module(HWLOC hwloc IMPORTED_TARGET) + if (TARGET PkgConfig::HWLOC) + if (HWLOC_VERSION VERSION_LESS 2) + set(TBBBIND_LIBRARY_NAME tbbbind) + elseif(HWLOC_VERSION VERSION_LESS 2.5) + set(TBBBIND_LIBRARY_NAME tbbbind_2_0) + else() + set(TBBBIND_LIBRARY_NAME tbbbind_2_5) + endif() + endif() + endif() +endif() diff --git a/src/tbb/cmake/memcheck.cmake b/src/tbb/cmake/memcheck.cmake new file mode 100644 index 000000000..fd5e920c5 --- /dev/null +++ b/src/tbb/cmake/memcheck.cmake @@ -0,0 +1,112 @@ +# Copyright (c) 2020-2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +option(TBB_VALGRIND_MEMCHECK "Enable scan for memory leaks using Valgrind" OFF) + +if (NOT TBB_VALGRIND_MEMCHECK) + return() +endif() + +add_custom_target(memcheck-all + COMMENT "Run memcheck on all tests") + +find_program(VALGRIND_EXE valgrind) + +if (NOT VALGRIND_EXE) + message(FATAL_ERROR "Valgrind executable is not found, add tool to PATH or turn off TBB_VALGRIND_MEMCHECK") +else() + message(STATUS "Found Valgrind to run memory leak scan") +endif() + +file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/memcheck) + +function(_tbb_run_memcheck test_target subdir) + set(target_name memcheck-${test_target}) + if(${subdir} STREQUAL "tbbmalloc") + # Valgring intercepts all allocation symbols with its own by default, + # so it disables using tbbmalloc. In case of tbbmalloc tests + # intercept allocation symbols only in the default system libraries, + # but not in any other shared library or the executable + # defining public malloc or operator new related functions. + set(option "--soname-synonyms=somalloc=nouserintercepts") + endif() + add_custom_target(${target_name} + COMMAND ${VALGRIND_EXE} ${option} --leak-check=full --show-leak-kinds=all --log-file=${CMAKE_BINARY_DIR}/memcheck/${target_name}.log -v $) + add_dependencies(memcheck-all ${target_name}) +endfunction() + +add_custom_target(memcheck-short + COMMENT "Run memcheck scan on specified list") + +# List of reasonable and quick enough tests to use in automated memcheck +add_dependencies(memcheck-short + memcheck-test_allocators + memcheck-test_arena_constraints + memcheck-test_dynamic_link + memcheck-test_concurrent_lru_cache + memcheck-conformance_concurrent_unordered_map + memcheck-conformance_concurrent_unordered_set + memcheck-conformance_concurrent_map + memcheck-conformance_concurrent_set + memcheck-conformance_concurrent_priority_queue + memcheck-conformance_concurrent_vector + memcheck-conformance_concurrent_queue + memcheck-conformance_concurrent_hash_map + memcheck-test_parallel_for + memcheck-test_parallel_for_each + memcheck-test_parallel_reduce + memcheck-test_parallel_sort + memcheck-test_parallel_invoke + memcheck-test_parallel_scan + memcheck-test_parallel_pipeline + memcheck-test_eh_algorithms + memcheck-test_task_group + memcheck-test_task_arena + memcheck-test_enumerable_thread_specific + memcheck-test_resumable_tasks + memcheck-conformance_mutex + memcheck-test_function_node + memcheck-test_multifunction_node + memcheck-test_broadcast_node + memcheck-test_buffer_node + memcheck-test_composite_node + memcheck-test_continue_node + memcheck-test_eh_flow_graph + memcheck-test_flow_graph + memcheck-test_flow_graph_priorities + memcheck-test_flow_graph_whitebox + memcheck-test_indexer_node + memcheck-test_join_node + memcheck-test_join_node_key_matching + memcheck-test_join_node_msg_key_matching + memcheck-test_priority_queue_node + memcheck-test_sequencer_node + memcheck-test_split_node + memcheck-test_tagged_msg + memcheck-test_overwrite_node + memcheck-test_write_once_node + memcheck-test_async_node + memcheck-test_input_node + memcheck-test_profiling + memcheck-test_concurrent_queue_whitebox + memcheck-test_intrusive_list + memcheck-test_semaphore + memcheck-test_environment_whitebox + memcheck-test_handle_perror + memcheck-test_hw_concurrency + memcheck-test_eh_thread + memcheck-test_global_control + memcheck-test_task + memcheck-test_concurrent_monitor +) diff --git a/src/tbb/cmake/packaging.cmake b/src/tbb/cmake/packaging.cmake new file mode 100644 index 000000000..aa2acc4d4 --- /dev/null +++ b/src/tbb/cmake/packaging.cmake @@ -0,0 +1,24 @@ +# Copyright (c) 2020-2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Note: current implementation uses CMAKE_BUILD_TYPE, +# this parameter is not defined for multi-config generators. +set(CPACK_PACKAGE_NAME "${PROJECT_NAME}") +set(CPACK_PACKAGE_VERSION "${TBB_VERSION}") +string(TOLOWER ${CPACK_PACKAGE_NAME}-${PROJECT_VERSION}-${CMAKE_SYSTEM_NAME}_${TBB_OUTPUT_DIR_BASE}_${CMAKE_BUILD_TYPE} CPACK_PACKAGE_FILE_NAME) +set(CPACK_GENERATOR ZIP) +# Note: this is an internal non-documented variable set by CPack +if (NOT CPack_CMake_INCLUDED) + include(CPack) +endif() diff --git a/src/tbb/cmake/post_install/CMakeLists.txt b/src/tbb/cmake/post_install/CMakeLists.txt new file mode 100644 index 000000000..a7a025457 --- /dev/null +++ b/src/tbb/cmake/post_install/CMakeLists.txt @@ -0,0 +1,22 @@ +# Copyright (c) 2020-2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Add code signing as post-install step. +if (DEFINED TBB_SIGNTOOL) + file(TO_CMAKE_PATH "${TBB_SIGNTOOL}" TBB_SIGNTOOL) + install(CODE " + file(GLOB_RECURSE FILES_TO_SIGN \${CMAKE_INSTALL_PREFIX}/*${CMAKE_SHARED_LIBRARY_SUFFIX}) + execute_process(COMMAND ${TBB_SIGNTOOL} \${FILES_TO_SIGN} ${TBB_SIGNTOOL_ARGS}) +") +endif() diff --git a/src/tbb/cmake/python/test_launcher.cmake b/src/tbb/cmake/python/test_launcher.cmake new file mode 100644 index 000000000..9b1bdde98 --- /dev/null +++ b/src/tbb/cmake/python/test_launcher.cmake @@ -0,0 +1,38 @@ +# Copyright (c) 2020-2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +find_package(PythonInterp 3.5 REQUIRED) + +file(GLOB_RECURSE MODULES_LIST "${PYTHON_MODULE_BUILD_PATH}/*TBB.py*" ) +list(LENGTH MODULES_LIST MODULES_COUNT) + +if (MODULES_COUNT EQUAL 0) + message(FATAL_ERROR "Cannot find oneTBB Python module") +elseif (MODULES_COUNT GREATER 1) + message(WARNING "Found more than oneTBB Python modules, the only first found module will be tested") +endif() + +list(GET MODULES_LIST 0 PYTHON_MODULE) +get_filename_component(PYTHON_MODULE_PATH ${PYTHON_MODULE} DIRECTORY) + +execute_process( + COMMAND + ${CMAKE_COMMAND} -E env LD_LIBRARY_PATH=${TBB_BINARIES_PATH} + ${PYTHON_EXECUTABLE} -m tbb test + WORKING_DIRECTORY ${PYTHON_MODULE_PATH} + RESULT_VARIABLE CMD_RESULT +) +if (CMD_RESULT) + message(FATAL_ERROR "Error while test execution: ${cmd} error_code: ${CMD_RESULT}") +endif() diff --git a/src/tbb/cmake/resumable_tasks.cmake b/src/tbb/cmake/resumable_tasks.cmake new file mode 100644 index 000000000..d379d4ed3 --- /dev/null +++ b/src/tbb/cmake/resumable_tasks.cmake @@ -0,0 +1,31 @@ +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +include(CheckSymbolExists) + +if (UNIX) + set(CMAKE_REQUIRED_FLAGS -Wno-deprecated-declarations) + if (APPLE) + set(CMAKE_REQUIRED_DEFINITIONS -D_XOPEN_SOURCE) + endif() + + check_symbol_exists("getcontext" "ucontext.h" _tbb_have_ucontext) + if (NOT _tbb_have_ucontext) + set(TBB_RESUMABLE_TASKS_USE_THREADS "__TBB_RESUMABLE_TASKS_USE_THREADS=1") + endif() + + unset(_tbb_have_ucontext) + unset(CMAKE_REQUIRED_DEFINITIONS) + unset(CMAKE_REQUIRED_FLAGS) +endif() diff --git a/src/tbb/cmake/sanitize.cmake b/src/tbb/cmake/sanitize.cmake new file mode 100644 index 000000000..d07b32986 --- /dev/null +++ b/src/tbb/cmake/sanitize.cmake @@ -0,0 +1,43 @@ +# Copyright (c) 2020-2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set(TBB_SANITIZE ${TBB_SANITIZE} CACHE STRING "Sanitizer parameter passed to compiler/linker" FORCE) +# Possible values of sanitizer parameter for cmake-gui for convenience, user still can use any other value. +set_property(CACHE TBB_SANITIZE PROPERTY STRINGS "thread" "memory" "leak" "address -fno-omit-frame-pointer") + +if (NOT TBB_SANITIZE) + return() +endif() + +set(TBB_SANITIZE_OPTION -fsanitize=${TBB_SANITIZE}) + +# It is required to add sanitizer option to CMAKE_REQUIRED_LIBRARIES to make check_cxx_compiler_flag working properly: +# sanitizer option should be passed during the compilation phase as well as during the compilation. +set(CMAKE_REQUIRED_LIBRARIES "${TBB_SANITIZE_OPTION} ${CMAKE_REQUIRED_LIBRARIES}") + +string(MAKE_C_IDENTIFIER ${TBB_SANITIZE_OPTION} FLAG_DISPLAY_NAME) +check_cxx_compiler_flag(${TBB_SANITIZE_OPTION} ${FLAG_DISPLAY_NAME}) +if (NOT ${FLAG_DISPLAY_NAME}) + message(FATAL_ERROR + "${TBB_SANITIZE_OPTION} is not supported by compiler ${CMAKE_CXX_COMPILER_ID}:${CMAKE_CXX_COMPILER_VERSION}, " + "please try another compiler or omit TBB_SANITIZE variable") +endif() + +set(TBB_TESTS_ENVIRONMENT ${TBB_TESTS_ENVIRONMENT} + "TSAN_OPTIONS=suppressions=${CMAKE_CURRENT_SOURCE_DIR}/cmake/suppressions/tsan.suppressions" + "LSAN_OPTIONS=suppressions=${CMAKE_CURRENT_SOURCE_DIR}/cmake/suppressions/lsan.suppressions") + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TBB_SANITIZE_OPTION}") +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${TBB_SANITIZE_OPTION}") +set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${TBB_SANITIZE_OPTION}") diff --git a/src/tbb/cmake/scripts/cmake_gen_github_configs.cmake b/src/tbb/cmake/scripts/cmake_gen_github_configs.cmake new file mode 100644 index 000000000..4d9eb73d6 --- /dev/null +++ b/src/tbb/cmake/scripts/cmake_gen_github_configs.cmake @@ -0,0 +1,49 @@ +# Copyright (c) 2020-2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +include(${CMAKE_CURRENT_LIST_DIR}/../config_generation.cmake) + +# TBBConfig in TBB provided packages are expected to be placed into: /lib/cmake/tbb* +set(TBB_ROOT_REL_PATH "../../..") + +# Paths relative to TBB root directory +set(INC_REL_PATH "include") +set(LIB_REL_PATH "lib") + +# Parse version info +file(READ ${CMAKE_CURRENT_LIST_DIR}/../../include/oneapi/tbb/version.h _tbb_version_info) +string(REGEX REPLACE ".*#define TBB_VERSION_MAJOR ([0-9]+).*" "\\1" _tbb_ver_major "${_tbb_version_info}") +string(REGEX REPLACE ".*#define TBB_VERSION_MINOR ([0-9]+).*" "\\1" _tbb_ver_minor "${_tbb_version_info}") +string(REGEX REPLACE ".*#define TBB_VERSION_PATCH ([0-9]+).*" "\\1" _tbb_ver_patch "${_tbb_version_info}") +string(REGEX REPLACE ".*#define __TBB_BINARY_VERSION ([0-9]+).*" "\\1" TBB_BINARY_VERSION "${_tbb_version_info}") +file(READ ${CMAKE_CURRENT_LIST_DIR}/../../CMakeLists.txt _tbb_cmakelist) +string(REGEX REPLACE ".*TBBMALLOC_BINARY_VERSION ([0-9]+).*" "\\1" TBBMALLOC_BINARY_VERSION "${_tbb_cmakelist}") +set(TBBMALLOC_PROXY_BINARY_VERSION ${TBBMALLOC_BINARY_VERSION}) +string(REGEX REPLACE ".*TBBBIND_BINARY_VERSION ([0-9]+).*" "\\1" TBBBIND_BINARY_VERSION "${_tbb_cmakelist}") + +set(COMMON_ARGS + TBB_ROOT_REL_PATH ${TBB_ROOT_REL_PATH} + INC_REL_PATH ${INC_REL_PATH} + LIB_REL_PATH ${LIB_REL_PATH} + VERSION ${_tbb_ver_major}.${_tbb_ver_minor}.${_tbb_ver_patch} + TBB_BINARY_VERSION ${TBB_BINARY_VERSION} + TBBMALLOC_BINARY_VERSION ${TBBMALLOC_BINARY_VERSION} + TBBMALLOC_PROXY_BINARY_VERSION ${TBBMALLOC_PROXY_BINARY_VERSION} + TBBBIND_BINARY_VERSION ${TBBBIND_BINARY_VERSION} +) + +tbb_generate_config(INSTALL_DIR ${INSTALL_DIR}/linux SYSTEM_NAME Linux HANDLE_SUBDIRS ${COMMON_ARGS}) +tbb_generate_config(INSTALL_DIR ${INSTALL_DIR}/windows SYSTEM_NAME Windows HANDLE_SUBDIRS ${COMMON_ARGS}) +tbb_generate_config(INSTALL_DIR ${INSTALL_DIR}/darwin SYSTEM_NAME Darwin ${COMMON_ARGS}) +message(STATUS "TBBConfig files were created in ${INSTALL_DIR}") diff --git a/src/tbb/cmake/suppressions/lsan.suppressions b/src/tbb/cmake/suppressions/lsan.suppressions new file mode 100644 index 000000000..4a2dea29b --- /dev/null +++ b/src/tbb/cmake/suppressions/lsan.suppressions @@ -0,0 +1,2 @@ +# LSAN suppression for ltdl library known issue. +leak:libltdl.so diff --git a/src/tbb/cmake/suppressions/tsan.suppressions b/src/tbb/cmake/suppressions/tsan.suppressions new file mode 100644 index 000000000..1bbb833b6 --- /dev/null +++ b/src/tbb/cmake/suppressions/tsan.suppressions @@ -0,0 +1,4 @@ +# TSAN suppression for known issues. +# Possible data race during ittnotify initialization. Low impact. +race:__itt_nullify_all_pointers +race:__itt_init_ittlib diff --git a/src/tbb/cmake/templates/TBBConfig.cmake.in b/src/tbb/cmake/templates/TBBConfig.cmake.in new file mode 100644 index 000000000..3131e3dd7 --- /dev/null +++ b/src/tbb/cmake/templates/TBBConfig.cmake.in @@ -0,0 +1,128 @@ +# Copyright (c) 2017-2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# It defines the following variables: +# TBB__FOUND +# TBB_IMPORTED_TARGETS +# +# TBBConfigVersion.cmake defines TBB_VERSION +# +# Initialize to default values +if (NOT TBB_IMPORTED_TARGETS) + set(TBB_IMPORTED_TARGETS "") +endif() + +if (NOT TBB_FIND_COMPONENTS) + set(TBB_FIND_COMPONENTS "tbb;tbbmalloc;tbbmalloc_proxy") + foreach (_tbb_component ${TBB_FIND_COMPONENTS}) + set(TBB_FIND_REQUIRED_${_tbb_component} 1) + endforeach() +endif() + +get_filename_component(_tbb_root "${CMAKE_CURRENT_LIST_DIR}" REALPATH) +get_filename_component(_tbb_root "${_tbb_root}/@TBB_ROOT_REL_PATH@" ABSOLUTE) + +set(TBB_INTERFACE_VERSION @TBB_INTERFACE_VERSION@) +@TBB_COMPONENTS_BIN_VERSION@ +# Add components with internal dependencies: tbbmalloc_proxy -> tbbmalloc +list(FIND TBB_FIND_COMPONENTS tbbmalloc_proxy _tbbmalloc_proxy_ix) +if (NOT _tbbmalloc_proxy_ix EQUAL -1) + list(APPEND TBB_FIND_COMPONENTS tbbmalloc) + list(REMOVE_DUPLICATES TBB_FIND_COMPONENTS) + set(TBB_FIND_REQUIRED_tbbmalloc ${TBB_FIND_REQUIRED_tbbmalloc_proxy}) +endif() +unset(_tbbmalloc_proxy_ix) + +if (CMAKE_SIZEOF_VOID_P STREQUAL "8") + set(_tbb_intel_arch intel64) +else () + set(_tbb_intel_arch ia32) + set(_tbb_arch_suffix 32) +endif() + +@TBB_HANDLE_SUBDIRS@ +foreach (_tbb_component ${TBB_FIND_COMPONENTS}) + unset(_tbb_release_dll CACHE) + unset(_tbb_debug_dll CACHE) + unset(_tbb_release_lib CACHE) + unset(_tbb_debug_lib CACHE) + + set(TBB_${_tbb_component}_FOUND 0) + @TBB_HANDLE_BIN_VERSION@ + + find_library(_tbb_release_lib + NAMES @TBB_LIB_PREFIX@${_tbb_component}${_bin_version}.@TBB_LIB_EXT@ + PATHS ${_tbb_root} + PATH_SUFFIXES "@TBB_LIB_REL_PATH@/${_tbb_intel_arch}/${_tbb_subdir}" "@TBB_LIB_REL_PATH@${_tbb_arch_suffix}/${_tbb_subdir}" "@TBB_LIB_REL_PATH@${_tbb_arch_suffix}" "@TBB_LIB_REL_PATH@" + NO_DEFAULT_PATH + ) + + if (NOT TBB_FIND_RELEASE_ONLY) + find_library(_tbb_debug_lib + NAMES @TBB_LIB_PREFIX@${_tbb_component}${_bin_version}_debug.@TBB_LIB_EXT@ + PATHS ${_tbb_root} + PATH_SUFFIXES "@TBB_LIB_REL_PATH@/${_tbb_intel_arch}/${_tbb_subdir}" "@TBB_LIB_REL_PATH@${_tbb_arch_suffix}/${_tbb_subdir}" "@TBB_LIB_REL_PATH@${_tbb_arch_suffix}" "@TBB_LIB_REL_PATH@" + NO_DEFAULT_PATH + ) + endif() + + if (EXISTS "${_tbb_release_lib}" OR EXISTS "${_tbb_debug_lib}") + if (NOT TARGET TBB::${_tbb_component}) + add_library(TBB::${_tbb_component} SHARED IMPORTED) + + get_filename_component(_tbb_include_dir "${_tbb_root}/@TBB_INC_REL_PATH@" ABSOLUTE) + set_target_properties(TBB::${_tbb_component} PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${_tbb_include_dir}"@TBB_COMPILE_DEFINITIONS@) + unset(_tbb_current_realpath) + unset(_tbb_include_dir) + + @TBB_HANDLE_IMPLIB@ + + if (EXISTS "${_tbb_release_dll}") + set_target_properties(TBB::${_tbb_component} PROPERTIES + IMPORTED_LOCATION_RELEASE "${_tbb_release_dll}"@TBB_IMPLIB_RELEASE@) + set_property(TARGET TBB::${_tbb_component} APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) + endif() + + if (EXISTS "${_tbb_debug_dll}") + set_target_properties(TBB::${_tbb_component} PROPERTIES + IMPORTED_LOCATION_DEBUG "${_tbb_debug_dll}"@TBB_IMPLIB_DEBUG@) + set_property(TARGET TBB::${_tbb_component} APPEND PROPERTY IMPORTED_CONFIGURATIONS DEBUG) + endif() + + # Add internal dependencies for imported targets: TBB::tbbmalloc_proxy -> TBB::tbbmalloc + if (_tbb_component STREQUAL tbbmalloc_proxy) + set_target_properties(TBB::tbbmalloc_proxy PROPERTIES INTERFACE_LINK_LIBRARIES TBB::tbbmalloc) + endif() + endif() + list(APPEND TBB_IMPORTED_TARGETS TBB::${_tbb_component}) + set(TBB_${_tbb_component}_FOUND 1) + elseif (TBB_FIND_REQUIRED AND TBB_FIND_REQUIRED_${_tbb_component}) + message(STATUS "Missed required oneTBB component: ${_tbb_component}") + if (TBB_FIND_RELEASE_ONLY) + message(STATUS " ${_tbb_release_lib} must exist.") + else() + message(STATUS " one or both of:\n ${_tbb_release_lib}\n ${_tbb_debug_lib}\n files must exist.") + endif() + set(TBB_FOUND FALSE) + endif() +endforeach() +list(REMOVE_DUPLICATES TBB_IMPORTED_TARGETS) +unset(_tbb_release_dll) +unset(_tbb_debug_dll) +unset(_tbb_release_lib) +unset(_tbb_debug_lib) +unset(_tbb_root) +unset(_tbb_intel_arch) +unset(_tbb_arch_suffix) diff --git a/src/tbb/cmake/templates/TBBConfigVersion.cmake.in b/src/tbb/cmake/templates/TBBConfigVersion.cmake.in new file mode 100644 index 000000000..d97bc3fea --- /dev/null +++ b/src/tbb/cmake/templates/TBBConfigVersion.cmake.in @@ -0,0 +1,24 @@ +# Copyright (c) 2017-2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set(PACKAGE_VERSION @TBB_VERSION@) + +if ("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}") + set(PACKAGE_VERSION_COMPATIBLE FALSE) +else() + set(PACKAGE_VERSION_COMPATIBLE TRUE) + if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}") + set(PACKAGE_VERSION_EXACT TRUE) + endif() +endif() diff --git a/src/tbb/cmake/test_spec.cmake b/src/tbb/cmake/test_spec.cmake new file mode 100644 index 000000000..9ac811f50 --- /dev/null +++ b/src/tbb/cmake/test_spec.cmake @@ -0,0 +1,35 @@ +# Copyright (c) 2020-2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +option(TBB_TEST_SPEC "Generate test specification (Doxygen)" OFF) + +if (TBB_TEST_SPEC) + find_package(Doxygen REQUIRED) + + set(DOXYGEN_PREDEFINED_MACROS + "TBB_USE_EXCEPTIONS \ + __TBB_RESUMABLE_TASKS \ + __TBB_HWLOC_PRESENT \ + __TBB_CPP17_DEDUCTION_GUIDES_PRESENT \ + __TBB_CPP17_MEMORY_RESOURCE_PRESENT \ + __TBB_CPP14_GENERIC_LAMBDAS_PRESENT" + ) + + add_custom_target( + test_spec ALL + COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile + COMMENT "Generating test specification with Doxygen" + VERBATIM) + configure_file(${CMAKE_CURRENT_SOURCE_DIR}/doc/Doxyfile.in ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile @ONLY) +endif() diff --git a/src/tbb/cmake/toolchains/mips.cmake b/src/tbb/cmake/toolchains/mips.cmake new file mode 100644 index 000000000..13af9519b --- /dev/null +++ b/src/tbb/cmake/toolchains/mips.cmake @@ -0,0 +1,40 @@ +# Copyright (c) 2020-2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Prevent double invocation. +if (MIPS_TOOLCHAIN_INCLUDED) + return() +endif() +set(MIPS_TOOLCHAIN_INCLUDED TRUE) + +set(CMAKE_SYSTEM_NAME Linux) +set(CMAKE_SYSTEM_VERSION 1) +set(CMAKE_SYSTEM_PROCESSOR mips) + +set(CMAKE_C_COMPILER ${CMAKE_FIND_ROOT_PATH}/bin/mips-img-linux-gnu-gcc) +set(CMAKE_CXX_COMPILER ${CMAKE_FIND_ROOT_PATH}/bin/mips-img-linux-gnu-g++) +set(CMAKE_LINKER ${CMAKE_FIND_ROOT_PATH}/bin/mips-img-linux-gnu-ld) + +# Define result for try_run used in find_package(Threads). +# In old CMake versions (checked on 3.5) there is invocation of try_run command in FindThreads.cmake module. +# It can't be executed on host system in case of cross-compilation for MIPS architecture. +# Define return code for this try_run as 0 since threads are expected to be available on target machine. +set(THREADS_PTHREAD_ARG "0" CACHE STRING "Result from TRY_RUN" FORCE) + +set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -EL -mabi=64 -march=mips64r6 -mcrc -mfp64 -mmt -mtune=mips64r6 -ggdb -ffp-contract=off -mhard-float" CACHE INTERNAL "") +set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -mvirt -mxpa" CACHE INTERNAL "") +set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -mvirt -mxpa" CACHE INTERNAL "") # for tests diff --git a/src/tbb/cmake/toolchains/riscv64.cmake b/src/tbb/cmake/toolchains/riscv64.cmake new file mode 100644 index 000000000..96c0014b6 --- /dev/null +++ b/src/tbb/cmake/toolchains/riscv64.cmake @@ -0,0 +1,34 @@ +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Prevent double invocation. +if (RISCV_TOOLCHAIN_INCLUDED) + return() +endif() +set(RISCV_TOOLCHAIN_INCLUDED TRUE) + +set(CMAKE_SYSTEM_NAME Linux) +set(CMAKE_SYSTEM_VERSION 1) +set(CMAKE_SYSTEM_PROCESSOR riscv) + +# User can use -DCMAKE_FIND_ROOT_PATH to specific toolchain path +set(CMAKE_C_COMPILER ${CMAKE_FIND_ROOT_PATH}/bin/riscv64-unknown-linux-gnu-clang) +set(CMAKE_CXX_COMPILER ${CMAKE_FIND_ROOT_PATH}/bin/riscv64-unknown-linux-gnu-clang++) +set(CMAKE_LINKER ${CMAKE_FIND_ROOT_PATH}/bin/riscv64-unknown-linux-gnu-ld) + +set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) + +# Most linux on riscv64 support rv64imafd_zba_zbb extensions +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=rv64imafd_zba_zbb -mabi=lp64d " CACHE INTERNAL "") diff --git a/src/tbb/cmake/utils.cmake b/src/tbb/cmake/utils.cmake new file mode 100644 index 000000000..211019896 --- /dev/null +++ b/src/tbb/cmake/utils.cmake @@ -0,0 +1,74 @@ +# Copyright (c) 2020-2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +macro(tbb_remove_compile_flag flag) + get_property(_tbb_compile_options DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY COMPILE_OPTIONS) + list(REMOVE_ITEM _tbb_compile_options ${flag}) + set_property(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY COMPILE_OPTIONS ${_tbb_compile_options}) + unset(_tbb_compile_options) + if (CMAKE_CXX_FLAGS) + string(REGEX REPLACE "(^|[ \t\r\n]+)${flag}($|[ \t\r\n]+)" " " CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) + endif() +endmacro() + +macro(tbb_install_target target) + if (TBB_INSTALL) + install(TARGETS ${target} + EXPORT TBBTargets + LIBRARY + DESTINATION ${CMAKE_INSTALL_LIBDIR} + NAMELINK_SKIP + COMPONENT runtime + RUNTIME + DESTINATION ${CMAKE_INSTALL_BINDIR} + COMPONENT runtime + ARCHIVE + DESTINATION ${CMAKE_INSTALL_LIBDIR} + COMPONENT devel + FRAMEWORK + DESTINATION ${CMAKE_INSTALL_LIBDIR} + COMPONENT runtime + OPTIONAL) + + if (BUILD_SHARED_LIBS) + install(TARGETS ${target} + LIBRARY + DESTINATION ${CMAKE_INSTALL_LIBDIR} + NAMELINK_ONLY + COMPONENT devel) + endif() + if (MSVC AND BUILD_SHARED_LIBS) + install(FILES $ + DESTINATION ${CMAKE_INSTALL_BINDIR} + COMPONENT devel + OPTIONAL) + endif() + endif() +endmacro() + +macro(tbb_handle_ipo target) + if (TBB_IPO_PROPERTY) + set_target_properties(${target} PROPERTIES + INTERPROCEDURAL_OPTIMIZATION TRUE + INTERPROCEDURAL_OPTIMIZATION_DEBUG FALSE + ) + elseif (TBB_IPO_FLAGS) + target_compile_options(${target} PRIVATE ${TBB_IPO_COMPILE_FLAGS}) + if (COMMAND target_link_options) + target_link_options(${target} PRIVATE ${TBB_IPO_LINK_FLAGS}) + else() + target_link_libraries(${target} PRIVATE ${TBB_IPO_LINK_FLAGS}) + endif() + endif() +endmacro() diff --git a/src/tbb/cmake/vars_utils.cmake b/src/tbb/cmake/vars_utils.cmake new file mode 100644 index 000000000..54a9fda18 --- /dev/null +++ b/src/tbb/cmake/vars_utils.cmake @@ -0,0 +1,56 @@ +# Copyright (c) 2020-2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +option(TBB_INSTALL_VARS "Enable auto-generated vars installation" OFF) + +if (WIN32) + set(TBB_VARS_TEMPLATE "windows/env/vars.bat.in") +elseif (APPLE) + set(TBB_VARS_TEMPLATE "mac/env/vars.sh.in") +else() + set(TBB_VARS_TEMPLATE "linux/env/vars.sh.in") +endif() + +get_filename_component(TBB_VARS_TEMPLATE_NAME ${PROJECT_SOURCE_DIR}/integration/${TBB_VARS_TEMPLATE} NAME) +string(REPLACE ".in" "" TBB_VARS_NAME ${TBB_VARS_TEMPLATE_NAME}) + +macro(tbb_gen_vars target) + if (NOT TBB_BUILD_APPLE_FRAMEWORKS) + set(BIN_PATH $) + else() + # For Apple* frameworks, the binaries are placed in a framework bundle. + # When using an Apple* framework, you refer to the bundle, not the binary inside, so we take the bundle's path and go up one level. + # This path will then be used to generate the vars file, and the contents of the vars file will use the bundle's parent directory. + set(BIN_PATH $/..) + endif() + if (${CMAKE_PROJECT_NAME} STREQUAL ${PROJECT_NAME}) + add_custom_command(TARGET ${target} POST_BUILD COMMAND + ${CMAKE_COMMAND} + -DBINARY_DIR=${CMAKE_BINARY_DIR} + -DSOURCE_DIR=${PROJECT_SOURCE_DIR} + -DBIN_PATH=${BIN_PATH} + -DVARS_TEMPLATE=${TBB_VARS_TEMPLATE} + -DVARS_NAME=${TBB_VARS_NAME} + -DTBB_INSTALL_VARS=${TBB_INSTALL_VARS} + -DTBB_CMAKE_INSTALL_LIBDIR=${CMAKE_INSTALL_LIBDIR} + -P ${PROJECT_SOURCE_DIR}/integration/cmake/generate_vars.cmake + ) + endif() +endmacro(tbb_gen_vars) + +if (TBB_INSTALL_VARS) + install(PROGRAMS "${CMAKE_BINARY_DIR}/internal_install_vars" + DESTINATION env + RENAME ${TBB_VARS_NAME}) +endif() diff --git a/src/tbb/include/index.html b/src/tbb/include/index.html deleted file mode 100644 index 0c85b47f8..000000000 --- a/src/tbb/include/index.html +++ /dev/null @@ -1,23 +0,0 @@ - - - -

Overview

-Include files for Intel® Threading Building Blocks (Intel® TBB). - -

Directories

-
-
tbb -
Include files for Intel TBB classes and functions. -
- -
-Up to parent directory -

-Copyright © 2005-2014 Intel Corporation. All Rights Reserved. -

-Intel is a registered trademark or trademark of Intel Corporation -or its subsidiaries in the United States and other countries. -

-* Other names and brands may be claimed as the property of others. - - diff --git a/src/tbb/include/oneapi/tbb.h b/src/tbb/include/oneapi/tbb.h new file mode 100644 index 000000000..ad9601137 --- /dev/null +++ b/src/tbb/include/oneapi/tbb.h @@ -0,0 +1,76 @@ +/* + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_tbb_H +#define __TBB_tbb_H + +/** + This header bulk-includes declarations or definitions of all the functionality + provided by TBB (save for tbbmalloc and 3rd party dependent headers). + + If you use only a few TBB constructs, consider including specific headers only. + Any header listed below can be included independently of others. +**/ + +#include "oneapi/tbb/blocked_range.h" +#include "oneapi/tbb/blocked_range2d.h" +#include "oneapi/tbb/blocked_range3d.h" +#if TBB_PREVIEW_BLOCKED_RANGE_ND +#include "tbb/blocked_rangeNd.h" +#endif +#include "oneapi/tbb/cache_aligned_allocator.h" +#include "oneapi/tbb/combinable.h" +#include "oneapi/tbb/concurrent_hash_map.h" +#if TBB_PREVIEW_CONCURRENT_LRU_CACHE +#include "tbb/concurrent_lru_cache.h" +#endif +#include "oneapi/tbb/collaborative_call_once.h" +#include "oneapi/tbb/concurrent_priority_queue.h" +#include "oneapi/tbb/concurrent_queue.h" +#include "oneapi/tbb/concurrent_unordered_map.h" +#include "oneapi/tbb/concurrent_unordered_set.h" +#include "oneapi/tbb/concurrent_map.h" +#include "oneapi/tbb/concurrent_set.h" +#include "oneapi/tbb/concurrent_vector.h" +#include "oneapi/tbb/enumerable_thread_specific.h" +#include "oneapi/tbb/flow_graph.h" +#include "oneapi/tbb/global_control.h" +#include "oneapi/tbb/info.h" +#include "oneapi/tbb/null_mutex.h" +#include "oneapi/tbb/null_rw_mutex.h" +#include "oneapi/tbb/parallel_for.h" +#include "oneapi/tbb/parallel_for_each.h" +#include "oneapi/tbb/parallel_invoke.h" +#include "oneapi/tbb/parallel_pipeline.h" +#include "oneapi/tbb/parallel_reduce.h" +#include "oneapi/tbb/parallel_scan.h" +#include "oneapi/tbb/parallel_sort.h" +#include "oneapi/tbb/partitioner.h" +#include "oneapi/tbb/queuing_mutex.h" +#include "oneapi/tbb/queuing_rw_mutex.h" +#include "oneapi/tbb/spin_mutex.h" +#include "oneapi/tbb/spin_rw_mutex.h" +#include "oneapi/tbb/mutex.h" +#include "oneapi/tbb/rw_mutex.h" +#include "oneapi/tbb/task.h" +#include "oneapi/tbb/task_arena.h" +#include "oneapi/tbb/task_group.h" +#include "oneapi/tbb/task_scheduler_observer.h" +#include "oneapi/tbb/tbb_allocator.h" +#include "oneapi/tbb/tick_count.h" +#include "oneapi/tbb/version.h" + +#endif /* __TBB_tbb_H */ diff --git a/inst/include/tbb/blocked_range.h b/src/tbb/include/oneapi/tbb/blocked_range.h similarity index 65% rename from inst/include/tbb/blocked_range.h rename to src/tbb/include/oneapi/tbb/blocked_range.h index 4b95bf17c..12862fa2a 100644 --- a/inst/include/tbb/blocked_range.h +++ b/src/tbb/include/oneapi/tbb/blocked_range.h @@ -1,29 +1,32 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef __TBB_blocked_range_H #define __TBB_blocked_range_H -#include "tbb_stddef.h" +#include + +#include "detail/_range_common.h" +#include "detail/_namespace_injection.h" + +#include "version.h" namespace tbb { +namespace detail { +namespace d1 { /** \page range_req Requirements on range concept Class \c R implementing the concept of range must define: @@ -37,19 +40,16 @@ namespace tbb { //! A range over which to iterate. /** @ingroup algorithms */ template + __TBB_requires(blocked_range_value) class blocked_range { public: //! Type of a value /** Called a const_iterator for sake of algorithms that need to treat a blocked_range as an STL container. */ - typedef Value const_iterator; + using const_iterator = Value; //! Type for size of a range - typedef std::size_t size_type; - - //! Construct range with default-constructed values for begin and end. - /** Requires that Value have a default constructor. */ - blocked_range() : my_end(), my_begin() {} + using size_type = std::size_t; //! Construct range over half-open interval [begin,end), with the given grainsize. blocked_range( Value begin_, Value end_, size_type grainsize_=1 ) : @@ -59,10 +59,10 @@ class blocked_range { } //! Beginning of range. - const_iterator begin() const {return my_begin;} + const_iterator begin() const { return my_begin; } //! One past last value in range. - const_iterator end() const {return my_end;} + const_iterator end() const { return my_end; } //! Size of the range /** Unspecified if end() + __TBB_requires(blocked_range_value && + blocked_range_value) friend class blocked_range2d; template + __TBB_requires(blocked_range_value && + blocked_range_value && + blocked_range_value) friend class blocked_range3d; + + template + __TBB_requires(blocked_range_value) + friend class blocked_rangeNd_impl; }; +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::blocked_range; +// Split types +using detail::split; +using detail::proportional_split; +} // namespace v1 + } // namespace tbb #endif /* __TBB_blocked_range_H */ diff --git a/src/tbb/include/oneapi/tbb/blocked_range2d.h b/src/tbb/include/oneapi/tbb/blocked_range2d.h new file mode 100644 index 000000000..41385db98 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/blocked_range2d.h @@ -0,0 +1,111 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_blocked_range2d_H +#define __TBB_blocked_range2d_H + +#include + +#include "detail/_config.h" +#include "detail/_namespace_injection.h" +#include "detail/_range_common.h" + +#include "blocked_range.h" + +namespace tbb { +namespace detail { +namespace d1 { + +//! A 2-dimensional range that models the Range concept. +/** @ingroup algorithms */ +template + __TBB_requires(blocked_range_value && + blocked_range_value) +class blocked_range2d { +public: + //! Type for size of an iteration range + using row_range_type = blocked_range; + using col_range_type = blocked_range; + +private: + row_range_type my_rows; + col_range_type my_cols; + +public: + blocked_range2d( RowValue row_begin, RowValue row_end, typename row_range_type::size_type row_grainsize, + ColValue col_begin, ColValue col_end, typename col_range_type::size_type col_grainsize ) : + my_rows(row_begin,row_end,row_grainsize), + my_cols(col_begin,col_end,col_grainsize) + {} + + blocked_range2d( RowValue row_begin, RowValue row_end, + ColValue col_begin, ColValue col_end ) : + my_rows(row_begin,row_end), + my_cols(col_begin,col_end) + {} + + //! True if range is empty + bool empty() const { + // Range is empty if at least one dimension is empty. + return my_rows.empty() || my_cols.empty(); + } + + //! True if range is divisible into two pieces. + bool is_divisible() const { + return my_rows.is_divisible() || my_cols.is_divisible(); + } + + blocked_range2d( blocked_range2d& r, split ) : + my_rows(r.my_rows), + my_cols(r.my_cols) + { + split split_obj; + do_split(r, split_obj); + } + + blocked_range2d( blocked_range2d& r, proportional_split& proportion ) : + my_rows(r.my_rows), + my_cols(r.my_cols) + { + do_split(r, proportion); + } + + //! The rows of the iteration space + const row_range_type& rows() const { return my_rows; } + + //! The columns of the iteration space + const col_range_type& cols() const { return my_cols; } + +private: + template + void do_split( blocked_range2d& r, Split& split_obj ) { + if ( my_rows.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_rows.grainsize()) ) { + my_cols.my_begin = col_range_type::do_split(r.my_cols, split_obj); + } else { + my_rows.my_begin = row_range_type::do_split(r.my_rows, split_obj); + } + } +}; + +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::blocked_range2d; +} // namespace v1 +} // namespace tbb + +#endif /* __TBB_blocked_range2d_H */ diff --git a/inst/include/tbb/blocked_range3d.h b/src/tbb/include/oneapi/tbb/blocked_range3d.h similarity index 55% rename from inst/include/tbb/blocked_range3d.h rename to src/tbb/include/oneapi/tbb/blocked_range3d.h index 1557d72d0..d8932192b 100644 --- a/inst/include/tbb/blocked_range3d.h +++ b/src/tbb/include/oneapi/tbb/blocked_range3d.h @@ -1,40 +1,45 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef __TBB_blocked_range3d_H #define __TBB_blocked_range3d_H -#include "tbb_stddef.h" +#include + +#include "detail/_config.h" +#include "detail/_namespace_injection.h" + #include "blocked_range.h" namespace tbb { +namespace detail { +namespace d1 { //! A 3-dimensional range that models the Range concept. /** @ingroup algorithms */ -template +template + __TBB_requires(blocked_range_value && + blocked_range_value && + blocked_range_value) class blocked_range3d { public: //! Type for size of an iteration range - typedef blocked_range page_range_type; - typedef blocked_range row_range_type; - typedef blocked_range col_range_type; + using page_range_type = blocked_range; + using row_range_type = blocked_range; + using col_range_type = blocked_range; private: page_range_type my_pages; @@ -49,8 +54,7 @@ class blocked_range3d { my_pages(page_begin,page_end), my_rows(row_begin,row_end), my_cols(col_begin,col_end) - { - } + {} blocked_range3d( PageValue page_begin, PageValue page_end, typename page_range_type::size_type page_grainsize, RowValue row_begin, RowValue row_end, typename row_range_type::size_type row_grainsize, @@ -58,12 +62,11 @@ class blocked_range3d { my_pages(page_begin,page_end,page_grainsize), my_rows(row_begin,row_end,row_grainsize), my_cols(col_begin,col_end,col_grainsize) - { - } + {} //! True if range is empty bool empty() const { - // Yes, it is a logical OR here, not AND. + // Range is empty if at least one dimension is empty. return my_pages.empty() || my_rows.empty() || my_cols.empty(); } @@ -72,19 +75,14 @@ class blocked_range3d { return my_pages.is_divisible() || my_rows.is_divisible() || my_cols.is_divisible(); } - blocked_range3d( blocked_range3d& r, split ) : + blocked_range3d( blocked_range3d& r, split split_obj ) : my_pages(r.my_pages), my_rows(r.my_rows), my_cols(r.my_cols) { - split split_obj; do_split(r, split_obj); } -#if __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES - //! Static field to support proportional split - static const bool is_divisible_in_proportion = true; - blocked_range3d( blocked_range3d& r, proportional_split& proportion ) : my_pages(r.my_pages), my_rows(r.my_rows), @@ -92,18 +90,26 @@ class blocked_range3d { { do_split(r, proportion); } -#endif /* __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES */ + //! The pages of the iteration space + const page_range_type& pages() const { return my_pages; } + + //! The rows of the iteration space + const row_range_type& rows() const { return my_rows; } + + //! The columns of the iteration space + const col_range_type& cols() const { return my_cols; } + +private: template - void do_split( blocked_range3d& r, Split& split_obj) - { + void do_split( blocked_range3d& r, Split& split_obj) { if ( my_pages.size()*double(my_rows.grainsize()) < my_rows.size()*double(my_pages.grainsize()) ) { if ( my_rows.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_rows.grainsize()) ) { my_cols.my_begin = col_range_type::do_split(r.my_cols, split_obj); } else { my_rows.my_begin = row_range_type::do_split(r.my_rows, split_obj); } - } else { + } else { if ( my_pages.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_pages.grainsize()) ) { my_cols.my_begin = col_range_type::do_split(r.my_cols, split_obj); } else { @@ -111,18 +117,14 @@ class blocked_range3d { } } } - - //! The pages of the iteration space - const page_range_type& pages() const {return my_pages;} - - //! The rows of the iteration space - const row_range_type& rows() const {return my_rows;} - - //! The columns of the iteration space - const col_range_type& cols() const {return my_cols;} - }; +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::blocked_range3d; +} // namespace v1 } // namespace tbb #endif /* __TBB_blocked_range3d_H */ diff --git a/src/tbb/include/oneapi/tbb/blocked_rangeNd.h b/src/tbb/include/oneapi/tbb/blocked_rangeNd.h new file mode 100644 index 000000000..a7ba13750 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/blocked_rangeNd.h @@ -0,0 +1,147 @@ +/* + Copyright (c) 2017-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_blocked_rangeNd_H +#define __TBB_blocked_rangeNd_H + +#if !TBB_PREVIEW_BLOCKED_RANGE_ND + #error Set TBB_PREVIEW_BLOCKED_RANGE_ND to include blocked_rangeNd.h +#endif + +#include // std::any_of +#include +#include +#include // std::is_same, std::enable_if + +#include "detail/_config.h" +#include "detail/_template_helpers.h" // index_sequence, make_index_sequence +#include "detail/_range_common.h" + +#include "blocked_range.h" + +namespace tbb { +namespace detail { +namespace d1 { + +/* + The blocked_rangeNd_impl uses make_index_sequence to automatically generate a ctor with + exactly N arguments of the type tbb::blocked_range. Such ctor provides an opportunity + to use braced-init-list parameters to initialize each dimension. + Use of parameters, whose representation is a braced-init-list, but they're not + std::initializer_list or a reference to one, produces a non-deduced context + within template argument deduction. + + NOTE: blocked_rangeNd must be exactly a templated alias to the blocked_rangeNd_impl + (and not e.g. a derived class), otherwise it would need to declare its own ctor + facing the same problem that the impl class solves. +*/ + +template> + __TBB_requires(blocked_range_value) +class blocked_rangeNd_impl; + +template + __TBB_requires(blocked_range_value) +class blocked_rangeNd_impl> { +public: + //! Type of a value. + using value_type = Value; + +private: + //! Helper type to construct range with N tbb::blocked_range objects. + template + using dim_type_helper = tbb::blocked_range; + +public: + blocked_rangeNd_impl() = delete; + + //! Constructs N-dimensional range over N half-open intervals each represented as tbb::blocked_range. + blocked_rangeNd_impl(const dim_type_helper&... args) : my_dims{ {args...} } {} + + //! Dimensionality of a range. + static constexpr unsigned int ndims() { return N; } + + //! Range in certain dimension. + const tbb::blocked_range& dim(unsigned int dimension) const { + __TBB_ASSERT(dimension < N, "out of bound"); + return my_dims[dimension]; + } + + //------------------------------------------------------------------------ + // Methods that implement Range concept + //------------------------------------------------------------------------ + + //! True if at least one dimension is empty. + bool empty() const { + return std::any_of(my_dims.begin(), my_dims.end(), [](const tbb::blocked_range& d) { + return d.empty(); + }); + } + + //! True if at least one dimension is divisible. + bool is_divisible() const { + return std::any_of(my_dims.begin(), my_dims.end(), [](const tbb::blocked_range& d) { + return d.is_divisible(); + }); + } + + blocked_rangeNd_impl(blocked_rangeNd_impl& r, proportional_split proportion) : my_dims(r.my_dims) { + do_split(r, proportion); + } + + blocked_rangeNd_impl(blocked_rangeNd_impl& r, split proportion) : my_dims(r.my_dims) { + do_split(r, proportion); + } + +private: + static_assert(N != 0, "zero dimensional blocked_rangeNd can't be constructed"); + + //! Ranges in each dimension. + std::array, N> my_dims; + + template + void do_split(blocked_rangeNd_impl& r, split_type proportion) { + static_assert((std::is_same::value || std::is_same::value), "type of split object is incorrect"); + __TBB_ASSERT(r.is_divisible(), "can't split not divisible range"); + + auto my_it = std::max_element(my_dims.begin(), my_dims.end(), [](const tbb::blocked_range& first, const tbb::blocked_range& second) { + return (first.size() * second.grainsize() < second.size() * first.grainsize()); + }); + + auto r_it = r.my_dims.begin() + (my_it - my_dims.begin()); + + my_it->my_begin = tbb::blocked_range::do_split(*r_it, proportion); + + // (!(my_it->my_begin < r_it->my_end) && !(r_it->my_end < my_it->my_begin)) equals to + // (my_it->my_begin == r_it->my_end), but we can't use operator== due to Value concept + __TBB_ASSERT(!(my_it->my_begin < r_it->my_end) && !(r_it->my_end < my_it->my_begin), + "blocked_range has been split incorrectly"); + } +}; + +template +using blocked_rangeNd = blocked_rangeNd_impl; + +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::blocked_rangeNd; +} // namespace v1 +} // namespace tbb + +#endif /* __TBB_blocked_rangeNd_H */ + diff --git a/src/tbb/include/oneapi/tbb/cache_aligned_allocator.h b/src/tbb/include/oneapi/tbb/cache_aligned_allocator.h new file mode 100644 index 000000000..0ff3972d2 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/cache_aligned_allocator.h @@ -0,0 +1,189 @@ +/* + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_cache_aligned_allocator_H +#define __TBB_cache_aligned_allocator_H + +#include "detail/_utils.h" +#include "detail/_namespace_injection.h" +#include +#include + +#if __TBB_CPP17_MEMORY_RESOURCE_PRESENT +#include +#endif + +namespace tbb { +namespace detail { + +namespace r1 { +TBB_EXPORT void* __TBB_EXPORTED_FUNC cache_aligned_allocate(std::size_t size); +TBB_EXPORT void __TBB_EXPORTED_FUNC cache_aligned_deallocate(void* p); +TBB_EXPORT std::size_t __TBB_EXPORTED_FUNC cache_line_size(); +} + +namespace d1 { + +template +class cache_aligned_allocator { +public: + using value_type = T; + using propagate_on_container_move_assignment = std::true_type; + + //! Always defined for TBB containers (supported since C++17 for std containers) + using is_always_equal = std::true_type; + + cache_aligned_allocator() = default; + template cache_aligned_allocator(const cache_aligned_allocator&) noexcept {} + + //! Allocate space for n objects, starting on a cache/sector line. + __TBB_nodiscard T* allocate(std::size_t n) { + return static_cast(r1::cache_aligned_allocate(n * sizeof(value_type))); + } + + //! Free block of memory that starts on a cache line + void deallocate(T* p, std::size_t) { + r1::cache_aligned_deallocate(p); + } + + //! Largest value for which method allocate might succeed. + std::size_t max_size() const noexcept { + return (~std::size_t(0) - r1::cache_line_size()) / sizeof(value_type); + } + +#if TBB_ALLOCATOR_TRAITS_BROKEN + using pointer = value_type*; + using const_pointer = const value_type*; + using reference = value_type&; + using const_reference = const value_type&; + using difference_type = std::ptrdiff_t; + using size_type = std::size_t; + template struct rebind { + using other = cache_aligned_allocator; + }; + template + void construct(U *p, Args&&... args) + { ::new (p) U(std::forward(args)...); } + void destroy(pointer p) { p->~value_type(); } + pointer address(reference x) const { return &x; } + const_pointer address(const_reference x) const { return &x; } +#endif // TBB_ALLOCATOR_TRAITS_BROKEN +}; + +#if TBB_ALLOCATOR_TRAITS_BROKEN + template<> + class cache_aligned_allocator { + public: + using pointer = void*; + using const_pointer = const void*; + using value_type = void; + template struct rebind { + using other = cache_aligned_allocator; + }; + }; +#endif + +template +bool operator==(const cache_aligned_allocator&, const cache_aligned_allocator&) noexcept { return true; } + +#if !__TBB_CPP20_COMPARISONS_PRESENT +template +bool operator!=(const cache_aligned_allocator&, const cache_aligned_allocator&) noexcept { return false; } +#endif + +#if __TBB_CPP17_MEMORY_RESOURCE_PRESENT + +//! C++17 memory resource wrapper to ensure cache line size alignment +class cache_aligned_resource : public std::pmr::memory_resource { +public: + cache_aligned_resource() : cache_aligned_resource(std::pmr::get_default_resource()) {} + explicit cache_aligned_resource(std::pmr::memory_resource* upstream) : m_upstream(upstream) {} + + std::pmr::memory_resource* upstream_resource() const { + return m_upstream; + } + +private: + //! We don't know what memory resource set. Use padding to guarantee alignment + void* do_allocate(std::size_t bytes, std::size_t alignment) override { + // TODO: make it common with tbb_allocator.cpp + std::size_t cache_line_alignment = correct_alignment(alignment); + std::size_t space = correct_size(bytes) + cache_line_alignment; + std::uintptr_t base = reinterpret_cast(m_upstream->allocate(space)); + __TBB_ASSERT(base != 0, "Upstream resource returned nullptr."); + + // Round up to the next cache line (align the base address) + std::uintptr_t result = (base + cache_line_alignment) & ~(cache_line_alignment - 1); + __TBB_ASSERT((result - base) >= sizeof(std::uintptr_t), "Can`t store a base pointer to the header"); + __TBB_ASSERT(space - (result - base) >= bytes, "Not enough space for the storage"); + + // Record where block actually starts. + (reinterpret_cast(result))[-1] = base; + return reinterpret_cast(result); + } + + void do_deallocate(void* ptr, std::size_t bytes, std::size_t alignment) override { + if (ptr) { + // Recover where block actually starts + std::uintptr_t base = (reinterpret_cast(ptr))[-1]; + m_upstream->deallocate(reinterpret_cast(base), correct_size(bytes) + correct_alignment(alignment)); + } + } + + bool do_is_equal(const std::pmr::memory_resource& other) const noexcept override { + if (this == &other) { return true; } +#if __TBB_USE_OPTIONAL_RTTI + const cache_aligned_resource* other_res = dynamic_cast(&other); + return other_res && (upstream_resource() == other_res->upstream_resource()); +#else + return false; +#endif + } + + std::size_t correct_alignment(std::size_t alignment) { + __TBB_ASSERT(tbb::detail::is_power_of_two(alignment), "Alignment is not a power of 2"); +#if __TBB_CPP17_HW_INTERFERENCE_SIZE_PRESENT + std::size_t cache_line_size = std::hardware_destructive_interference_size; +#else + std::size_t cache_line_size = r1::cache_line_size(); +#endif + return alignment < cache_line_size ? cache_line_size : alignment; + } + + std::size_t correct_size(std::size_t bytes) { + // To handle the case, when small size requested. There could be not + // enough space to store the original pointer. + return bytes < sizeof(std::uintptr_t) ? sizeof(std::uintptr_t) : bytes; + } + + std::pmr::memory_resource* m_upstream; +}; + +#endif // __TBB_CPP17_MEMORY_RESOURCE_PRESENT + +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::cache_aligned_allocator; +#if __TBB_CPP17_MEMORY_RESOURCE_PRESENT +using detail::d1::cache_aligned_resource; +#endif +} // namespace v1 +} // namespace tbb + +#endif /* __TBB_cache_aligned_allocator_H */ + diff --git a/src/tbb/include/oneapi/tbb/collaborative_call_once.h b/src/tbb/include/oneapi/tbb/collaborative_call_once.h new file mode 100644 index 000000000..0a24f17d2 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/collaborative_call_once.h @@ -0,0 +1,256 @@ +/* + Copyright (c) 2021-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_collaborative_call_once_H +#define __TBB_collaborative_call_once_H + +#include "task_arena.h" +#include "task_group.h" + +#include + +namespace tbb { +namespace detail { +namespace d1 { + +#if _MSC_VER && !defined(__INTEL_COMPILER) + // Suppress warning: structure was padded due to alignment specifier + // #pragma warning (push) + // #pragma warning (disable: 4324) +#endif + +template +class collaborative_call_stack_task : public task { + const F& m_func; + wait_context& m_wait_ctx; + + void finalize() { + m_wait_ctx.release(); + } + task* execute(d1::execution_data&) override { + task* res = d2::task_ptr_or_nullptr(m_func); + finalize(); + return res; + } + task* cancel(d1::execution_data&) override { + finalize(); + return nullptr; + } +public: + collaborative_call_stack_task(const F& f, wait_context& wctx) : m_func(f), m_wait_ctx(wctx) {} +}; + +constexpr std::uintptr_t collaborative_once_max_references = max_nfs_size; +constexpr std::uintptr_t collaborative_once_references_mask = collaborative_once_max_references-1; + +class alignas(max_nfs_size) collaborative_once_runner : no_copy { + + struct storage_t { + task_arena m_arena{ task_arena::attach{} }; + wait_context m_wait_context{1}; + }; + + std::atomic m_ref_count{0}; + std::atomic m_is_ready{false}; + + // Storage with task_arena and wait_context must be initialized only by winner thread + union { + storage_t m_storage; + }; + + template + void isolated_execute(Fn f) { + auto func = [f] { + f(); + // delegate_base requires bool returning functor while isolate_within_arena ignores the result + return true; + }; + + delegated_function delegate(func); + + r1::isolate_within_arena(delegate, reinterpret_cast(this)); + } + +public: + class lifetime_guard : no_copy { + collaborative_once_runner& m_runner; + public: + lifetime_guard(collaborative_once_runner& r) : m_runner(r) { + m_runner.m_ref_count++; + } + ~lifetime_guard() { + m_runner.m_ref_count--; + } + }; + + collaborative_once_runner() {} + + ~collaborative_once_runner() { + spin_wait_until_eq(m_ref_count, 0, std::memory_order_acquire); + if (m_is_ready.load(std::memory_order_relaxed)) { + m_storage.~storage_t(); + } + } + + std::uintptr_t to_bits() { + return reinterpret_cast(this); + } + + static collaborative_once_runner* from_bits(std::uintptr_t bits) { + __TBB_ASSERT( (bits & collaborative_once_references_mask) == 0, "invalid pointer, last log2(max_nfs_size) bits must be zero" ); + return reinterpret_cast(bits); + } + + template + void run_once(F&& f) { + __TBB_ASSERT(!m_is_ready.load(std::memory_order_relaxed), "storage with task_arena and wait_context is already initialized"); + // Initialize internal state + new(&m_storage) storage_t(); + m_storage.m_arena.execute([&] { + isolated_execute([&] { + task_group_context context{ task_group_context::bound, + task_group_context::default_traits | task_group_context::concurrent_wait }; + + collaborative_call_stack_task t{ std::forward(f), m_storage.m_wait_context }; + + // Set the ready flag after entering the execute body to prevent + // moonlighting threads from occupying all slots inside the arena. + m_is_ready.store(true, std::memory_order_release); + execute_and_wait(t, context, m_storage.m_wait_context, context); + }); + }); + } + + void assist() noexcept { + // Do not join the arena until the winner thread takes the slot + spin_wait_while_eq(m_is_ready, false); + m_storage.m_arena.execute([&] { + isolated_execute([&] { + // We do not want to get an exception from user functor on moonlighting threads. + // The exception is handled with the winner thread + task_group_context stub_context; + wait(m_storage.m_wait_context, stub_context); + }); + }); + } + +}; + +class collaborative_once_flag : no_copy { + enum state : std::uintptr_t { + uninitialized, + done, +#if TBB_USE_ASSERT + dead +#endif + }; + std::atomic m_state{ state::uninitialized }; + + template + friend void collaborative_call_once(collaborative_once_flag& flag, Fn&& f, Args&&... args); + + void set_completion_state(std::uintptr_t runner_bits, std::uintptr_t desired) { + std::uintptr_t expected = runner_bits; + do { + expected = runner_bits; + // Possible inefficiency: when we start waiting, + // some moonlighting threads might continue coming that will prolong our waiting. + // Fortunately, there are limited number of threads on the system so wait time is limited. + spin_wait_until_eq(m_state, expected); + } while (!m_state.compare_exchange_strong(expected, desired)); + } + + template + void do_collaborative_call_once(Fn&& f) { + std::uintptr_t expected = m_state.load(std::memory_order_acquire); + collaborative_once_runner runner; + + do { + if (expected == state::uninitialized && m_state.compare_exchange_strong(expected, runner.to_bits())) { + // Winner thread + runner.run_once([&] { + try_call([&] { + std::forward(f)(); + }).on_exception([&] { + // Reset the state to uninitialized to allow other threads to try initialization again + set_completion_state(runner.to_bits(), state::uninitialized); + }); + // We successfully executed functor + set_completion_state(runner.to_bits(), state::done); + }); + break; + } else { + // Moonlighting thread: we need to add a reference to the state to prolong runner lifetime. + // However, the maximum number of references are limited with runner alignment. + // So, we use CAS loop and spin_wait to guarantee that references never exceed "max_value". + do { + auto max_value = expected | collaborative_once_references_mask; + expected = spin_wait_while_eq(m_state, max_value); + // "expected > state::done" prevents storing values, when state is uninitialized or done + } while (expected > state::done && !m_state.compare_exchange_strong(expected, expected + 1)); + + if (auto shared_runner = collaborative_once_runner::from_bits(expected & ~collaborative_once_references_mask)) { + collaborative_once_runner::lifetime_guard guard{*shared_runner}; + m_state.fetch_sub(1); + + // The moonlighting threads are not expected to handle exceptions from user functor. + // Therefore, no exception is expected from assist(). + shared_runner->assist(); + } + } + __TBB_ASSERT(m_state.load(std::memory_order_relaxed) != state::dead, + "collaborative_once_flag has been prematurely destroyed"); + } while (expected != state::done); + } + +#if TBB_USE_ASSERT +public: + ~collaborative_once_flag() { + m_state.store(state::dead, std::memory_order_relaxed); + } +#endif +}; + + +template +void collaborative_call_once(collaborative_once_flag& flag, Fn&& fn, Args&&... args) { + __TBB_ASSERT(flag.m_state.load(std::memory_order_relaxed) != collaborative_once_flag::dead, + "collaborative_once_flag has been prematurely destroyed"); + if (flag.m_state.load(std::memory_order_acquire) != collaborative_once_flag::done) { + #if __TBB_GCC_PARAMETER_PACK_IN_LAMBDAS_BROKEN + // Using stored_pack to suppress bug in GCC 4.8 + // with parameter pack expansion in lambda + auto stored_pack = save_pack(std::forward(args)...); + auto func = [&] { call(std::forward(fn), std::move(stored_pack)); }; + #else + auto func = [&] { fn(std::forward(args)...); }; + #endif + flag.do_collaborative_call_once(func); + } +} + +#if _MSC_VER && !defined(__INTEL_COMPILER) + // #pragma warning (pop) // 4324 warning +#endif + +} // namespace d1 +} // namespace detail + +using detail::d1::collaborative_call_once; +using detail::d1::collaborative_once_flag; +} // namespace tbb + +#endif // __TBB_collaborative_call_once_H diff --git a/src/tbb/include/oneapi/tbb/combinable.h b/src/tbb/include/oneapi/tbb/combinable.h new file mode 100644 index 000000000..b676a30cc --- /dev/null +++ b/src/tbb/include/oneapi/tbb/combinable.h @@ -0,0 +1,69 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_combinable_H +#define __TBB_combinable_H + +#include "detail/_namespace_injection.h" + +#include "enumerable_thread_specific.h" +#include "cache_aligned_allocator.h" + +namespace tbb { +namespace detail { +namespace d1 { +/** \name combinable **/ +//@{ +//! Thread-local storage with optional reduction +/** @ingroup containers */ +template +class combinable { + using my_alloc = typename tbb::cache_aligned_allocator; + using my_ets_type = typename tbb::enumerable_thread_specific; + my_ets_type my_ets; + +public: + combinable() = default; + + template + explicit combinable(Finit _finit) : my_ets(_finit) { } + + void clear() { my_ets.clear(); } + + T& local() { return my_ets.local(); } + + T& local(bool& exists) { return my_ets.local(exists); } + + // combine_func_t has signature T(T,T) or T(const T&, const T&) + template + T combine(CombineFunc f_combine) { return my_ets.combine(f_combine); } + + // combine_func_t has signature void(T) or void(const T&) + template + void combine_each(CombineFunc f_combine) { my_ets.combine_each(f_combine); } +}; + +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::combinable; +} // inline namespace v1 + +} // namespace tbb + +#endif /* __TBB_combinable_H */ + diff --git a/src/tbb/include/oneapi/tbb/concurrent_hash_map.h b/src/tbb/include/oneapi/tbb/concurrent_hash_map.h new file mode 100644 index 000000000..b30033742 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/concurrent_hash_map.h @@ -0,0 +1,1664 @@ +/* + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_concurrent_hash_map_H +#define __TBB_concurrent_hash_map_H + +#include "detail/_namespace_injection.h" +#include "detail/_utils.h" +#include "detail/_assert.h" +#include "detail/_allocator_traits.h" +#include "detail/_containers_helpers.h" +#include "detail/_template_helpers.h" +#include "detail/_hash_compare.h" +#include "detail/_range_common.h" +#include "tbb_allocator.h" +#include "spin_rw_mutex.h" + +#include +#include +#include +#include +#include // Need std::pair +#include // Need std::memset + +namespace tbb { +namespace detail { +namespace d2 { + +#if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS && __TBB_CPP20_CONCEPTS_PRESENT +template +concept ch_map_rw_scoped_lockable = rw_scoped_lockable && + requires(const typename Mutex::scoped_lock& sl) { + { sl.is_writer() } -> std::convertible_to; +}; +#endif + +template +struct hash_map_node_base : no_copy { + using mutex_type = MutexType; + // Scoped lock type for mutex + using scoped_type = typename MutexType::scoped_lock; + // Next node in chain + hash_map_node_base* next; + mutex_type mutex; +}; + +// Incompleteness flag value +static void* const rehash_req_flag = reinterpret_cast(std::size_t(3)); +// Rehashed empty bucket flag +static void* const empty_rehashed_flag = reinterpret_cast(std::size_t(0)); + +template +bool rehash_required( hash_map_node_base* node_ptr ) { + return reinterpret_cast(node_ptr) == rehash_req_flag; +} + +#if TBB_USE_ASSERT +template +bool empty_rehashed( hash_map_node_base* node_ptr ) { + return reinterpret_cast(node_ptr) == empty_rehashed_flag; +} +#endif + +// base class of concurrent_hash_map + +template +class hash_map_base { +public: + using size_type = std::size_t; + using hashcode_type = std::size_t; + using segment_index_type = std::size_t; + using node_base = hash_map_node_base; + + struct bucket : no_copy { + using mutex_type = MutexType; + using scoped_type = typename mutex_type::scoped_lock; + + bucket() : node_list(nullptr) {} + bucket( node_base* ptr ) : node_list(ptr) {} + + mutex_type mutex; + std::atomic node_list; + }; + + using allocator_type = Allocator; + using allocator_traits_type = tbb::detail::allocator_traits; + using bucket_allocator_type = typename allocator_traits_type::template rebind_alloc; + using bucket_allocator_traits = tbb::detail::allocator_traits; + + // Count of segments in the first block + static constexpr size_type embedded_block = 1; + // Count of segments in the first block + static constexpr size_type embedded_buckets = 1 << embedded_block; + // Count of segments in the first block + static constexpr size_type first_block = 8; //including embedded_block. perfect with bucket size 16, so the allocations are power of 4096 + // Size of a pointer / table size + static constexpr size_type pointers_per_table = sizeof(segment_index_type) * 8; // one segment per bit + + using segment_ptr_type = bucket*; + using atomic_segment_type = std::atomic; + using segments_table_type = atomic_segment_type[pointers_per_table]; + + hash_map_base( const allocator_type& alloc ) : my_allocator(alloc), my_mask(embedded_buckets - 1), my_size(0) { + for (size_type i = 0; i != embedded_buckets; ++i) { + my_embedded_segment[i].node_list.store(nullptr, std::memory_order_relaxed); + } + + for (size_type segment_index = 0; segment_index < pointers_per_table; ++segment_index) { + auto argument = segment_index < embedded_block ? my_embedded_segment + segment_base(segment_index) : nullptr; + my_table[segment_index].store(argument, std::memory_order_relaxed); + } + + __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks"); + } + + // segment index of given index in the array + static segment_index_type segment_index_of( size_type index ) { + return segment_index_type(tbb::detail::log2( index|1 )); + } + + // the first array index of given segment + static segment_index_type segment_base( segment_index_type k ) { + return (segment_index_type(1) << k & ~segment_index_type(1)); + } + + // segment size except for k == 0 + static size_type segment_size( segment_index_type k ) { + return size_type(1) << k; // fake value for k==0 + } + + // true if ptr is valid pointer + static bool is_valid( void* ptr ) { + return reinterpret_cast(ptr) > uintptr_t(63); + } + + template + void init_buckets_impl( segment_ptr_type ptr, size_type sz, const Args&... args ) { + for (size_type i = 0; i < sz; ++i) { + bucket_allocator_traits::construct(my_allocator, ptr + i, args...); + } + } + + // Initialize buckets + void init_buckets( segment_ptr_type ptr, size_type sz, bool is_initial ) { + if (is_initial) { + init_buckets_impl(ptr, sz); + } else { + init_buckets_impl(ptr, sz, reinterpret_cast(rehash_req_flag)); + } + } + + // Add node n to bucket b + static void add_to_bucket( bucket* b, node_base* n ) { + __TBB_ASSERT(!rehash_required(b->node_list.load(std::memory_order_relaxed)), nullptr); + n->next = b->node_list.load(std::memory_order_relaxed); + b->node_list.store(n, std::memory_order_relaxed); // its under lock and flag is set + } + + const bucket_allocator_type& get_allocator() const { + return my_allocator; + } + + bucket_allocator_type& get_allocator() { + return my_allocator; + } + + // Enable segment + void enable_segment( segment_index_type k, bool is_initial = false ) { + __TBB_ASSERT( k, "Zero segment must be embedded" ); + size_type sz; + __TBB_ASSERT( !is_valid(my_table[k].load(std::memory_order_relaxed)), "Wrong concurrent assignment"); + if (k >= first_block) { + sz = segment_size(k); + segment_ptr_type ptr = nullptr; + try_call( [&] { + ptr = bucket_allocator_traits::allocate(my_allocator, sz); + } ).on_exception( [&] { + my_table[k].store(nullptr, std::memory_order_relaxed); + }); + + __TBB_ASSERT(ptr, nullptr); + init_buckets(ptr, sz, is_initial); + my_table[k].store(ptr, std::memory_order_release); + sz <<= 1;// double it to get entire capacity of the container + } else { // the first block + __TBB_ASSERT( k == embedded_block, "Wrong segment index" ); + sz = segment_size(first_block); + segment_ptr_type ptr = nullptr; + try_call( [&] { + ptr = bucket_allocator_traits::allocate(my_allocator, sz - embedded_buckets); + } ).on_exception( [&] { + my_table[k].store(nullptr, std::memory_order_relaxed); + }); + + __TBB_ASSERT(ptr, nullptr); + init_buckets(ptr, sz - embedded_buckets, is_initial); + ptr -= segment_base(embedded_block); + for(segment_index_type i = embedded_block; i < first_block; i++) // calc the offsets + my_table[i].store(ptr + segment_base(i), std::memory_order_release); + } + my_mask.store(sz-1, std::memory_order_release); + } + + void delete_segment( segment_index_type s ) { + segment_ptr_type buckets_ptr = my_table[s].load(std::memory_order_relaxed); + size_type sz = segment_size( s ? s : 1 ); + + size_type deallocate_size = 0; + + if (s >= first_block) { // the first segment or the next + deallocate_size = sz; + } else if (s == embedded_block && embedded_block != first_block) { + deallocate_size = segment_size(first_block) - embedded_buckets; + } + + for (size_type i = 0; i < deallocate_size; ++i) { + bucket_allocator_traits::destroy(my_allocator, buckets_ptr + i); + } + if (deallocate_size != 0) { + bucket_allocator_traits::deallocate(my_allocator, buckets_ptr, deallocate_size); + } + + if (s >= embedded_block) my_table[s].store(nullptr, std::memory_order_relaxed); + } + + // Get bucket by (masked) hashcode + bucket *get_bucket( hashcode_type h ) const noexcept { + segment_index_type s = segment_index_of( h ); + h -= segment_base(s); + segment_ptr_type seg = my_table[s].load(std::memory_order_acquire); + __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" ); + return &seg[h]; + } + + // detail serial rehashing helper + void mark_rehashed_levels( hashcode_type h ) noexcept { + segment_index_type s = segment_index_of( h ); + while (segment_ptr_type seg = my_table[++s].load(std::memory_order_relaxed)) + if (rehash_required(seg[h].node_list.load(std::memory_order_relaxed))) { + seg[h].node_list.store(reinterpret_cast(empty_rehashed_flag), std::memory_order_relaxed); + mark_rehashed_levels( h + ((hashcode_type)1<node_list.load(std::memory_order_acquire))) { + return true; + } + } + return false; + } + + // Insert a node and check for load factor. @return segment index to enable. + segment_index_type insert_new_node( bucket *b, node_base *n, hashcode_type mask ) { + size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted + add_to_bucket( b, n ); + // check load factor + if( sz >= mask ) { // TODO: add custom load_factor + segment_index_type new_seg = tbb::detail::log2( mask+1 ); //optimized segment_index_of + __TBB_ASSERT( is_valid(my_table[new_seg-1].load(std::memory_order_relaxed)), "new allocations must not publish new mask until segment has allocated"); + static const segment_ptr_type is_allocating = segment_ptr_type(2); + segment_ptr_type disabled = nullptr; + if (!(my_table[new_seg].load(std::memory_order_acquire)) + && my_table[new_seg].compare_exchange_strong(disabled, is_allocating)) + return new_seg; // The value must be processed + } + return 0; + } + + // Prepare enough segments for number of buckets + void reserve(size_type buckets) { + if( !buckets-- ) return; + bool is_initial = !my_size.load(std::memory_order_relaxed); + for (size_type m = my_mask.load(std::memory_order_relaxed); buckets > m; + m = my_mask.load(std::memory_order_relaxed)) + { + enable_segment( segment_index_of( m+1 ), is_initial ); + } + } + + // Swap hash_map_bases + void internal_swap_content(hash_map_base &table) { + using std::swap; + swap_atomics_relaxed(my_mask, table.my_mask); + swap_atomics_relaxed(my_size, table.my_size); + + for(size_type i = 0; i < embedded_buckets; i++) { + auto temp = my_embedded_segment[i].node_list.load(std::memory_order_relaxed); + my_embedded_segment[i].node_list.store(table.my_embedded_segment[i].node_list.load(std::memory_order_relaxed), + std::memory_order_relaxed); + table.my_embedded_segment[i].node_list.store(temp, std::memory_order_relaxed); + } + for(size_type i = embedded_block; i < pointers_per_table; i++) { + auto temp = my_table[i].load(std::memory_order_relaxed); + my_table[i].store(table.my_table[i].load(std::memory_order_relaxed), + std::memory_order_relaxed); + table.my_table[i].store(temp, std::memory_order_relaxed); + } + } + + void internal_move(hash_map_base&& other) { + my_mask.store(other.my_mask.load(std::memory_order_relaxed), std::memory_order_relaxed); + other.my_mask.store(embedded_buckets - 1, std::memory_order_relaxed); + + my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed); + other.my_size.store(0, std::memory_order_relaxed); + + for (size_type i = 0; i < embedded_buckets; ++i) { + my_embedded_segment[i].node_list.store(other.my_embedded_segment[i].node_list, std::memory_order_relaxed); + other.my_embedded_segment[i].node_list.store(nullptr, std::memory_order_relaxed); + } + + for (size_type i = embedded_block; i < pointers_per_table; ++i) { + my_table[i].store(other.my_table[i].load(std::memory_order_relaxed), + std::memory_order_relaxed); + other.my_table[i].store(nullptr, std::memory_order_relaxed); + } + } + +protected: + bucket_allocator_type my_allocator; + // Hash mask = sum of allocated segment sizes - 1 + std::atomic my_mask; + // Size of container in stored items + std::atomic my_size; // It must be in separate cache line from my_mask due to performance effects + // Zero segment + bucket my_embedded_segment[embedded_buckets]; + // Segment pointers table. Also prevents false sharing between my_mask and my_size + segments_table_type my_table; +}; + +template +class hash_map_range; + +// Meets requirements of a forward iterator for STL +// Value is either the T or const T type of the container. +template +class hash_map_iterator { + using map_type = Container; + using node = typename Container::node; + using map_base = typename Container::base_type; + using node_base = typename map_base::node_base; + using bucket = typename map_base::bucket; +public: + using value_type = Value; + using size_type = typename Container::size_type; + using difference_type = typename Container::difference_type; + using pointer = value_type*; + using reference = value_type&; + using iterator_category = std::forward_iterator_tag; + + // Construct undefined iterator + hash_map_iterator(): my_map(), my_index(), my_bucket(), my_node() {} + hash_map_iterator( const hash_map_iterator& other ) : + my_map(other.my_map), + my_index(other.my_index), + my_bucket(other.my_bucket), + my_node(other.my_node) + {} + + hash_map_iterator& operator=( const hash_map_iterator& other ) { + my_map = other.my_map; + my_index = other.my_index; + my_bucket = other.my_bucket; + my_node = other.my_node; + return *this; + } + + Value& operator*() const { + __TBB_ASSERT( map_base::is_valid(my_node), "iterator uninitialized or at end of container?" ); + return my_node->value(); + } + + Value* operator->() const {return &operator*();} + + hash_map_iterator& operator++() { + my_node = static_cast( my_node->next ); + if( !my_node ) advance_to_next_bucket(); + return *this; + } + + // Post increment + hash_map_iterator operator++(int) { + hash_map_iterator old(*this); + operator++(); + return old; + } +private: + template + friend bool operator==( const hash_map_iterator& i, const hash_map_iterator& j ); + + template + friend bool operator!=( const hash_map_iterator& i, const hash_map_iterator& j ); + + template + friend ptrdiff_t operator-( const hash_map_iterator& i, const hash_map_iterator& j ); + + template + friend class hash_map_iterator; + + template + friend class hash_map_range; + + void advance_to_next_bucket() { // TODO?: refactor to iterator_base class + size_t k = my_index+1; + __TBB_ASSERT( my_bucket, "advancing an invalid iterator?"); + while (k <= my_map->my_mask.load(std::memory_order_relaxed)) { + // Following test uses 2's-complement wizardry + if( k&(k-2) ) // not the beginning of a segment + ++my_bucket; + else my_bucket = my_map->get_bucket( k ); + node_base *n = my_bucket->node_list.load(std::memory_order_relaxed); + if( map_base::is_valid(n) ) { + my_node = static_cast(n); + my_index = k; + return; + } + ++k; + } + my_bucket = nullptr; my_node = nullptr; my_index = k; // the end + } + + template + __TBB_requires(tbb::detail::hash_compare && + ch_map_rw_scoped_lockable) +#else + > + __TBB_requires(tbb::detail::hash_compare) +#endif + friend class concurrent_hash_map; + + hash_map_iterator( const Container &map, std::size_t index, const bucket *b, node_base *n ) : + my_map(&map), my_index(index), my_bucket(b), my_node(static_cast(n)) + { + if( b && !map_base::is_valid(n) ) + advance_to_next_bucket(); + } + + // concurrent_hash_map over which we are iterating. + const Container *my_map; + // Index in hash table for current item + size_t my_index; + // Pointer to bucket + const bucket* my_bucket; + // Pointer to node that has current item + node* my_node; +}; + +template +bool operator==( const hash_map_iterator& i, const hash_map_iterator& j ) { + return i.my_node == j.my_node && i.my_map == j.my_map; +} + +template +bool operator!=( const hash_map_iterator& i, const hash_map_iterator& j ) { + return i.my_node != j.my_node || i.my_map != j.my_map; +} + +// Range class used with concurrent_hash_map +template +class hash_map_range { + using map_type = typename Iterator::map_type; +public: + // Type for size of a range + using size_type = std::size_t; + using value_type = typename Iterator::value_type; + using reference = typename Iterator::reference; + using difference_type = typename Iterator::difference_type; + using iterator = Iterator; + + // True if range is empty. + bool empty() const { return my_begin == my_end; } + + // True if range can be partitioned into two subranges. + bool is_divisible() const { + return my_midpoint != my_end; + } + + // Split range. + hash_map_range( hash_map_range& r, split ) : + my_end(r.my_end), + my_grainsize(r.my_grainsize) + { + r.my_end = my_begin = r.my_midpoint; + __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" ); + __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" ); + set_midpoint(); + r.set_midpoint(); + } + + // Init range with container and grainsize specified + hash_map_range( const map_type &map, size_type grainsize_ = 1 ) : + my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list.load(std::memory_order_relaxed) ) ), + my_end( Iterator( map, map.my_mask.load(std::memory_order_relaxed) + 1, nullptr, nullptr ) ), + my_grainsize( grainsize_ ) + { + __TBB_ASSERT( grainsize_>0, "grainsize must be positive" ); + set_midpoint(); + } + + Iterator begin() const { return my_begin; } + Iterator end() const { return my_end; } + // The grain size for this range. + size_type grainsize() const { return my_grainsize; } + +private: + Iterator my_begin; + Iterator my_end; + mutable Iterator my_midpoint; + size_t my_grainsize; + // Set my_midpoint to point approximately half way between my_begin and my_end. + void set_midpoint() const; + template friend class hash_map_range; +}; + +template +void hash_map_range::set_midpoint() const { + // Split by groups of nodes + size_t m = my_end.my_index-my_begin.my_index; + if( m > my_grainsize ) { + m = my_begin.my_index + m/2u; + auto b = my_begin.my_map->get_bucket(m); + my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list.load(std::memory_order_relaxed)); + } else { + my_midpoint = my_end; + } + __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index, + "my_begin is after my_midpoint" ); + __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index, + "my_midpoint is after my_end" ); + __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end, + "[my_begin, my_midpoint) range should not be empty" ); +} + +template , + typename Allocator = tbb_allocator> +#if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS + , typename MutexType = spin_rw_mutex + > + __TBB_requires(tbb::detail::hash_compare && + ch_map_rw_scoped_lockable) +#else + > + __TBB_requires(tbb::detail::hash_compare) +#endif +class concurrent_hash_map +#if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS + : protected hash_map_base +#else + : protected hash_map_base +#endif +{ + template + friend class hash_map_iterator; + + template + friend class hash_map_range; + using allocator_traits_type = tbb::detail::allocator_traits; + +#if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS + using base_type = hash_map_base; +#else + using base_type = hash_map_base; +#endif +public: + using key_type = Key; + using mapped_type = T; + // type_identity is needed to disable implicit deduction guides for std::initializer_list constructors + // and copy/move constructor with explicit allocator argument + using allocator_type = tbb::detail::type_identity_t; + using hash_compare_type = tbb::detail::type_identity_t; + using value_type = std::pair; + using size_type = typename base_type::size_type; + using difference_type = std::ptrdiff_t; +#if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS + using mutex_type = MutexType; +#endif + using pointer = typename allocator_traits_type::pointer; + using const_pointer = typename allocator_traits_type::const_pointer; + + using reference = value_type&; + using const_reference = const value_type&; + using iterator = hash_map_iterator; + using const_iterator = hash_map_iterator; + using range_type = hash_map_range; + using const_range_type = hash_map_range; + +protected: + static_assert(std::is_same::value, + "value_type of the container must be the same as its allocator's"); + + friend class const_accessor; + class node; + using segment_index_type = typename base_type::segment_index_type; + using segment_ptr_type = typename base_type::segment_ptr_type; + using node_base = typename base_type::node_base; + using bucket = typename base_type::bucket; + using hashcode_type = typename base_type::hashcode_type; + using bucket_allocator_type = typename base_type::bucket_allocator_type; + using node_allocator_type = typename base_type::allocator_traits_type::template rebind_alloc; + using node_allocator_traits = tbb::detail::allocator_traits; + hash_compare_type my_hash_compare; + + class node : public node_base { + public: + node() {} + ~node() {} + pointer storage() { return &my_value; } + value_type& value() { return *storage(); } + private: + union { + value_type my_value; + }; + }; + + void delete_node( node_base *n ) { + node_allocator_type node_allocator(this->get_allocator()); + node_allocator_traits::destroy(node_allocator, static_cast(n)->storage()); + node_allocator_traits::destroy(node_allocator, static_cast(n)); + node_allocator_traits::deallocate(node_allocator, static_cast(n), 1); + } + + template + static node* create_node(bucket_allocator_type& allocator, Args&&... args) { + node_allocator_type node_allocator(allocator); + node* node_ptr = node_allocator_traits::allocate(node_allocator, 1); + auto guard = make_raii_guard([&] { + node_allocator_traits::destroy(node_allocator, node_ptr); + node_allocator_traits::deallocate(node_allocator, node_ptr, 1); + }); + + node_allocator_traits::construct(node_allocator, node_ptr); + node_allocator_traits::construct(node_allocator, node_ptr->storage(), std::forward(args)...); + guard.dismiss(); + return node_ptr; + } + + static node* allocate_node_copy_construct(bucket_allocator_type& allocator, const Key &key, const T * t){ + return create_node(allocator, key, *t); + } + + static node* allocate_node_move_construct(bucket_allocator_type& allocator, const Key &key, const T * t){ + return create_node(allocator, key, std::move(*const_cast(t))); + } + + template + static node* allocate_node_default_construct(bucket_allocator_type& allocator, const K &key, const T * ){ + // Emplace construct an empty T object inside the pair + return create_node(allocator, std::piecewise_construct, + std::forward_as_tuple(key), std::forward_as_tuple()); + } + + static node* do_not_allocate_node(bucket_allocator_type& , const Key &, const T * ){ + __TBB_ASSERT(false,"this dummy function should not be called"); + return nullptr; + } + + template + node *search_bucket( const K &key, bucket *b ) const { + node *n = static_cast( b->node_list.load(std::memory_order_relaxed) ); + while (this->is_valid(n) && !my_hash_compare.equal(key, n->value().first)) + n = static_cast( n->next ); + __TBB_ASSERT(!rehash_required(n), "Search can be executed only for rehashed bucket"); + return n; + } + + // bucket accessor is to find, rehash, acquire a lock, and access a bucket + class bucket_accessor : public bucket::scoped_type { + bucket *my_b; + public: + bucket_accessor( concurrent_hash_map *base, const hashcode_type h, bool writer = false ) { acquire( base, h, writer ); } + // find a bucket by masked hashcode, optionally rehash, and acquire the lock + inline void acquire( concurrent_hash_map *base, const hashcode_type h, bool writer = false ) { + my_b = base->get_bucket( h ); + // TODO: actually, notification is unnecessary here, just hiding double-check + if (rehash_required(my_b->node_list.load(std::memory_order_acquire)) + && bucket::scoped_type::try_acquire( my_b->mutex, /*write=*/true ) ) + { + if (rehash_required(my_b->node_list.load(std::memory_order_relaxed))) base->rehash_bucket(my_b, h); // recursive rehashing + } + else bucket::scoped_type::acquire( my_b->mutex, writer ); + __TBB_ASSERT(!rehash_required(my_b->node_list.load(std::memory_order_relaxed)), nullptr); + } + + // get bucket pointer + bucket *operator() () { return my_b; } + }; + + // TODO refactor to hash_base + void rehash_bucket( bucket *b_new, const hashcode_type hash ) { + __TBB_ASSERT( hash > 1, "The lowermost buckets can't be rehashed" ); + b_new->node_list.store(reinterpret_cast(empty_rehashed_flag), std::memory_order_release); // mark rehashed + hashcode_type mask = (hashcode_type(1) << tbb::detail::log2(hash)) - 1; // get parent mask from the topmost bit + bucket_accessor b_old( this, hash & mask ); + + mask = (mask<<1) | 1; // get full mask for new bucket + __TBB_ASSERT( (mask&(mask+1))==0 && (hash & mask) == hash, nullptr ); + restart: + node_base* prev = nullptr; + node_base* curr = b_old()->node_list.load(std::memory_order_acquire); + while (this->is_valid(curr)) { + hashcode_type curr_node_hash = my_hash_compare.hash(static_cast(curr)->value().first); + + if ((curr_node_hash & mask) == hash) { + if (!b_old.is_writer()) { + if (!b_old.upgrade_to_writer()) { + goto restart; // node ptr can be invalid due to concurrent erase + } + } + node_base* next = curr->next; + // exclude from b_old + if (prev == nullptr) { + b_old()->node_list.store(curr->next, std::memory_order_relaxed); + } else { + prev->next = curr->next; + } + this->add_to_bucket(b_new, curr); + curr = next; + } else { + prev = curr; + curr = curr->next; + } + } + } + + template + using hash_compare_is_transparent = dependent_bool, U>; + +public: + + class accessor; + // Combines data access, locking, and garbage collection. + class const_accessor : private node::scoped_type /*which derived from no_copy*/ { +#if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS + friend class concurrent_hash_map; +#else + friend class concurrent_hash_map; +#endif + friend class accessor; + public: + // Type of value + using value_type = const typename concurrent_hash_map::value_type; + + // True if result is empty. + bool empty() const { return !my_node; } + + // Set to null + void release() { + if( my_node ) { + node::scoped_type::release(); + my_node = nullptr; + } + } + + // Return reference to associated value in hash table. + const_reference operator*() const { + __TBB_ASSERT( my_node, "attempt to dereference empty accessor" ); + return my_node->value(); + } + + // Return pointer to associated value in hash table. + const_pointer operator->() const { + return &operator*(); + } + + // Create empty result + const_accessor() : my_node(nullptr), my_hash() {} + + // Destroy result after releasing the underlying reference. + ~const_accessor() { + my_node = nullptr; // scoped lock's release() is called in its destructor + } + protected: + bool is_writer() { return node::scoped_type::is_writer(); } + node *my_node; + hashcode_type my_hash; + }; + + // Allows write access to elements and combines data access, locking, and garbage collection. + class accessor: public const_accessor { + public: + // Type of value + using value_type = typename concurrent_hash_map::value_type; + + // Return reference to associated value in hash table. + reference operator*() const { + __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" ); + return this->my_node->value(); + } + + // Return pointer to associated value in hash table. + pointer operator->() const { + return &operator*(); + } + }; + + explicit concurrent_hash_map( const hash_compare_type& compare, const allocator_type& a = allocator_type() ) + : base_type(a) + , my_hash_compare(compare) + {} + + concurrent_hash_map() : concurrent_hash_map(hash_compare_type()) {} + + explicit concurrent_hash_map( const allocator_type& a ) + : concurrent_hash_map(hash_compare_type(), a) + {} + + // Construct empty table with n preallocated buckets. This number serves also as initial concurrency level. + concurrent_hash_map( size_type n, const allocator_type &a = allocator_type() ) + : concurrent_hash_map(a) + { + this->reserve(n); + } + + concurrent_hash_map( size_type n, const hash_compare_type& compare, const allocator_type& a = allocator_type() ) + : concurrent_hash_map(compare, a) + { + this->reserve(n); + } + + // Copy constructor + concurrent_hash_map( const concurrent_hash_map &table ) + : concurrent_hash_map(node_allocator_traits::select_on_container_copy_construction(table.get_allocator())) + { + try_call( [&] { + internal_copy(table); + }).on_exception( [&] { + this->clear(); + }); + } + + concurrent_hash_map( const concurrent_hash_map &table, const allocator_type &a) + : concurrent_hash_map(a) + { + try_call( [&] { + internal_copy(table); + }).on_exception( [&] { + this->clear(); + }); + } + + // Move constructor + concurrent_hash_map( concurrent_hash_map &&table ) + : concurrent_hash_map(std::move(table.get_allocator())) + { + this->internal_move(std::move(table)); + } + + // Move constructor + concurrent_hash_map( concurrent_hash_map &&table, const allocator_type &a ) + : concurrent_hash_map(a) + { + using is_equal_type = typename node_allocator_traits::is_always_equal; + internal_move_construct_with_allocator(std::move(table), a, is_equal_type()); + } + + // Construction with copying iteration range and given allocator instance + template + concurrent_hash_map( I first, I last, const allocator_type &a = allocator_type() ) + : concurrent_hash_map(a) + { + try_call( [&] { + internal_copy(first, last, std::distance(first, last)); + }).on_exception( [&] { + this->clear(); + }); + } + + template + concurrent_hash_map( I first, I last, const hash_compare_type& compare, const allocator_type& a = allocator_type() ) + : concurrent_hash_map(compare, a) + { + try_call( [&] { + internal_copy(first, last, std::distance(first, last)); + }).on_exception( [&] { + this->clear(); + }); + } + + concurrent_hash_map( std::initializer_list il, const hash_compare_type& compare = hash_compare_type(), const allocator_type& a = allocator_type() ) + : concurrent_hash_map(compare, a) + { + try_call( [&] { + internal_copy(il.begin(), il.end(), il.size()); + }).on_exception( [&] { + this->clear(); + }); + } + + concurrent_hash_map( std::initializer_list il, const allocator_type& a ) + : concurrent_hash_map(il, hash_compare_type(), a) {} + + // Assignment + concurrent_hash_map& operator=( const concurrent_hash_map &table ) { + if( this != &table ) { + clear(); + copy_assign_allocators(this->my_allocator, table.my_allocator); + internal_copy(table); + } + return *this; + } + + // Move Assignment + concurrent_hash_map& operator=( concurrent_hash_map &&table ) { + if( this != &table ) { + using pocma_type = typename node_allocator_traits::propagate_on_container_move_assignment; + using is_equal_type = typename node_allocator_traits::is_always_equal; + move_assign_allocators(this->my_allocator, table.my_allocator); + internal_move_assign(std::move(table), tbb::detail::disjunction()); + } + return *this; + } + + // Assignment + concurrent_hash_map& operator=( std::initializer_list il ) { + clear(); + internal_copy(il.begin(), il.end(), il.size()); + return *this; + } + + // Rehashes and optionally resizes the whole table. + /** Useful to optimize performance before or after concurrent operations. + Also enables using of find() and count() concurrent methods in serial context. */ + void rehash(size_type sz = 0) { + this->reserve(sz); // TODO: add reduction of number of buckets as well + hashcode_type mask = this->my_mask.load(std::memory_order_relaxed); + hashcode_type b = (mask+1)>>1; // size or first index of the last segment + __TBB_ASSERT((b&(b-1))==0, nullptr); // zero or power of 2 + bucket *bp = this->get_bucket( b ); // only the last segment should be scanned for rehashing + for(; b <= mask; b++, bp++ ) { + node_base *n = bp->node_list.load(std::memory_order_relaxed); + __TBB_ASSERT( this->is_valid(n) || empty_rehashed(n) || rehash_required(n), "Broken internal structure" ); + __TBB_ASSERT( *reinterpret_cast(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" ); + if (rehash_required(n)) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one + hashcode_type h = b; bucket *b_old = bp; + do { + __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" ); + hashcode_type m = ( hashcode_type(1) << tbb::detail::log2( h ) ) - 1; // get parent mask from the topmost bit + b_old = this->get_bucket( h &= m ); + } while( rehash_required(b_old->node_list.load(std::memory_order_relaxed)) ); + // now h - is index of the root rehashed bucket b_old + this->mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments + node_base* prev = nullptr; + node_base* curr = b_old->node_list.load(std::memory_order_relaxed); + while (this->is_valid(curr)) { + hashcode_type curr_node_hash = my_hash_compare.hash(static_cast(curr)->value().first); + + if ((curr_node_hash & mask) != h) { // should be rehashed + node_base* next = curr->next; + // exclude from b_old + if (prev == nullptr) { + b_old->node_list.store(curr->next, std::memory_order_relaxed); + } else { + prev->next = curr->next; + } + bucket *b_new = this->get_bucket(curr_node_hash & mask); + __TBB_ASSERT(!rehash_required(b_new->node_list.load(std::memory_order_relaxed)), "hash() function changed for key in table or internal error"); + this->add_to_bucket(b_new, curr); + curr = next; + } else { + prev = curr; + curr = curr->next; + } + } + } + } + } + + // Clear table + void clear() { + hashcode_type m = this->my_mask.load(std::memory_order_relaxed); + __TBB_ASSERT((m&(m+1))==0, "data structure is invalid"); + this->my_size.store(0, std::memory_order_relaxed); + segment_index_type s = this->segment_index_of( m ); + __TBB_ASSERT( s+1 == this->pointers_per_table || !this->my_table[s+1].load(std::memory_order_relaxed), "wrong mask or concurrent grow" ); + do { + __TBB_ASSERT(this->is_valid(this->my_table[s].load(std::memory_order_relaxed)), "wrong mask or concurrent grow" ); + segment_ptr_type buckets_ptr = this->my_table[s].load(std::memory_order_relaxed); + size_type sz = this->segment_size( s ? s : 1 ); + for( segment_index_type i = 0; i < sz; i++ ) + for( node_base *n = buckets_ptr[i].node_list.load(std::memory_order_relaxed); + this->is_valid(n); n = buckets_ptr[i].node_list.load(std::memory_order_relaxed) ) + { + buckets_ptr[i].node_list.store(n->next, std::memory_order_relaxed); + delete_node( n ); + } + this->delete_segment(s); + } while(s-- > 0); + this->my_mask.store(this->embedded_buckets - 1, std::memory_order_relaxed); + } + + // Clear table and destroy it. + ~concurrent_hash_map() { clear(); } + + //------------------------------------------------------------------------ + // Parallel algorithm support + //------------------------------------------------------------------------ + range_type range( size_type grainsize=1 ) { + return range_type( *this, grainsize ); + } + const_range_type range( size_type grainsize=1 ) const { + return const_range_type( *this, grainsize ); + } + + //------------------------------------------------------------------------ + // STL support - not thread-safe methods + //------------------------------------------------------------------------ + iterator begin() { return iterator( *this, 0, this->my_embedded_segment, this->my_embedded_segment->node_list.load(std::memory_order_relaxed) ); } + const_iterator begin() const { return const_iterator( *this, 0, this->my_embedded_segment, this->my_embedded_segment->node_list.load(std::memory_order_relaxed) ); } + const_iterator cbegin() const { return const_iterator( *this, 0, this->my_embedded_segment, this->my_embedded_segment->node_list.load(std::memory_order_relaxed) ); } + iterator end() { return iterator( *this, 0, nullptr, nullptr ); } + const_iterator end() const { return const_iterator( *this, 0, nullptr, nullptr ); } + const_iterator cend() const { return const_iterator( *this, 0, nullptr, nullptr ); } + std::pair equal_range( const Key& key ) { return internal_equal_range( key, end() ); } + std::pair equal_range( const Key& key ) const { return internal_equal_range( key, end() ); } + + template + typename std::enable_if::value, + std::pair>::type equal_range( const K& key ) { + return internal_equal_range(key, end()); + } + + template + typename std::enable_if::value, + std::pair>::type equal_range( const K& key ) const { + return internal_equal_range(key, end()); + } + + // Number of items in table. + size_type size() const { return this->my_size.load(std::memory_order_acquire); } + + // True if size()==0. + __TBB_nodiscard bool empty() const { return size() == 0; } + + // Upper bound on size. + size_type max_size() const { + return allocator_traits_type::max_size(base_type::get_allocator()); + } + + // Returns the current number of buckets + size_type bucket_count() const { return this->my_mask.load(std::memory_order_relaxed) + 1; } + + // return allocator object + allocator_type get_allocator() const { return base_type::get_allocator(); } + + // swap two instances. Iterators are invalidated + void swap(concurrent_hash_map& table) { + using pocs_type = typename node_allocator_traits::propagate_on_container_swap; + using is_equal_type = typename node_allocator_traits::is_always_equal; + swap_allocators(this->my_allocator, table.my_allocator); + internal_swap(table, tbb::detail::disjunction()); + } + + //------------------------------------------------------------------------ + // concurrent map operations + //------------------------------------------------------------------------ + + // Return count of items (0 or 1) + size_type count( const Key &key ) const { + return const_cast(this)->lookup(key, nullptr, nullptr, /*write=*/false, &do_not_allocate_node); + } + + template + typename std::enable_if::value, + size_type>::type count( const K& key ) const { + return const_cast(this)->lookup(key, nullptr, nullptr, /*write=*/false, &do_not_allocate_node); + } + + // Find item and acquire a read lock on the item. + /** Return true if item is found, false otherwise. */ + bool find( const_accessor &result, const Key &key ) const { + result.release(); + return const_cast(this)->lookup(key, nullptr, &result, /*write=*/false, &do_not_allocate_node ); + } + + // Find item and acquire a write lock on the item. + /** Return true if item is found, false otherwise. */ + bool find( accessor &result, const Key &key ) { + result.release(); + return lookup(key, nullptr, &result, /*write=*/true, &do_not_allocate_node); + } + + template + typename std::enable_if::value, + bool>::type find( const_accessor& result, const K& key ) { + result.release(); + return lookup(key, nullptr, &result, /*write=*/false, &do_not_allocate_node); + } + + template + typename std::enable_if::value, + bool>::type find( accessor& result, const K& key ) { + result.release(); + return lookup(key, nullptr, &result, /*write=*/true, &do_not_allocate_node); + } + + // Insert item (if not already present) and acquire a read lock on the item. + /** Returns true if item is new. */ + bool insert( const_accessor &result, const Key &key ) { + result.release(); + return lookup(key, nullptr, &result, /*write=*/false, &allocate_node_default_construct<>); + } + + // Insert item (if not already present) and acquire a write lock on the item. + /** Returns true if item is new. */ + bool insert( accessor &result, const Key &key ) { + result.release(); + return lookup(key, nullptr, &result, /*write=*/true, &allocate_node_default_construct<>); + } + + template + typename std::enable_if::value && + std::is_constructible::value, + bool>::type insert( const_accessor& result, const K& key ) { + result.release(); + return lookup(key, nullptr, &result, /*write=*/false, &allocate_node_default_construct); + } + + template + typename std::enable_if::value && + std::is_constructible::value, + bool>::type insert( accessor& result, const K& key ) { + result.release(); + return lookup(key, nullptr, &result, /*write=*/true, &allocate_node_default_construct); + } + + // Insert item by copying if there is no such key present already and acquire a read lock on the item. + /** Returns true if item is new. */ + bool insert( const_accessor &result, const value_type &value ) { + result.release(); + return lookup(value.first, &value.second, &result, /*write=*/false, &allocate_node_copy_construct); + } + + // Insert item by copying if there is no such key present already and acquire a write lock on the item. + /** Returns true if item is new. */ + bool insert( accessor &result, const value_type &value ) { + result.release(); + return lookup(value.first, &value.second, &result, /*write=*/true, &allocate_node_copy_construct); + } + + // Insert item by copying if there is no such key present already + /** Returns true if item is inserted. */ + bool insert( const value_type &value ) { + return lookup(value.first, &value.second, nullptr, /*write=*/false, &allocate_node_copy_construct); + } + + // Insert item by copying if there is no such key present already and acquire a read lock on the item. + /** Returns true if item is new. */ + bool insert( const_accessor &result, value_type && value ) { + return generic_move_insert(result, std::move(value)); + } + + // Insert item by copying if there is no such key present already and acquire a write lock on the item. + /** Returns true if item is new. */ + bool insert( accessor &result, value_type && value ) { + return generic_move_insert(result, std::move(value)); + } + + // Insert item by copying if there is no such key present already + /** Returns true if item is inserted. */ + bool insert( value_type && value ) { + return generic_move_insert(accessor_not_used(), std::move(value)); + } + + // Insert item by copying if there is no such key present already and acquire a read lock on the item. + /** Returns true if item is new. */ + template + bool emplace( const_accessor &result, Args&&... args ) { + return generic_emplace(result, std::forward(args)...); + } + + // Insert item by copying if there is no such key present already and acquire a write lock on the item. + /** Returns true if item is new. */ + template + bool emplace( accessor &result, Args&&... args ) { + return generic_emplace(result, std::forward(args)...); + } + + // Insert item by copying if there is no such key present already + /** Returns true if item is inserted. */ + template + bool emplace( Args&&... args ) { + return generic_emplace(accessor_not_used(), std::forward(args)...); + } + + // Insert range [first, last) + template + void insert( I first, I last ) { + for ( ; first != last; ++first ) + insert( *first ); + } + + // Insert initializer list + void insert( std::initializer_list il ) { + insert( il.begin(), il.end() ); + } + + // Erase item. + /** Return true if item was erased by particularly this call. */ + bool erase( const Key &key ) { + return internal_erase(key); + } + + template + typename std::enable_if::value, + bool>::type erase( const K& key ) { + return internal_erase(key); + } + + // Erase item by const_accessor. + /** Return true if item was erased by particularly this call. */ + bool erase( const_accessor& item_accessor ) { + return exclude( item_accessor ); + } + + // Erase item by accessor. + /** Return true if item was erased by particularly this call. */ + bool erase( accessor& item_accessor ) { + return exclude( item_accessor ); + } + +protected: + template + node* allocate_node_helper( const K& key, const T* t, AllocateNodeType allocate_node, std::true_type ) { + return allocate_node(base_type::get_allocator(), key, t); + } + + template + node* allocate_node_helper( const K&, const T*, AllocateNodeType, std::false_type ) { + __TBB_ASSERT(false, "allocate_node_helper with std::false_type should never been called"); + return nullptr; + } + + // Insert or find item and optionally acquire a lock on the item. + template + bool lookup( const K &key, const T *t, const_accessor *result, bool write, AllocateNodeType allocate_node, node *tmp_n = nullptr) + { + __TBB_ASSERT( !result || !result->my_node, nullptr ); + bool return_value; + hashcode_type const h = my_hash_compare.hash( key ); + hashcode_type m = this->my_mask.load(std::memory_order_acquire); + segment_index_type grow_segment = 0; + node *n; + restart: + {//lock scope + __TBB_ASSERT((m&(m+1))==0, "data structure is invalid"); + return_value = false; + // get bucket + bucket_accessor b( this, h & m ); + // find a node + n = search_bucket( key, b() ); + if( OpInsert ) { + // [opt] insert a key + if( !n ) { + if( !tmp_n ) { + tmp_n = allocate_node_helper(key, t, allocate_node, std::integral_constant{}); + } + while ( !b.is_writer() && !b.upgrade_to_writer() ) { // TODO: improved insertion + // Rerun search list, in case another thread inserted the intem during the upgrade + n = search_bucket(key, b()); + if (this->is_valid(n)) { // unfortunately, it did + if (!b.downgrade_to_reader()) { + // If the lock was downgraded with reacquiring the mutex + // Rerun search list in case another thread removed the item during the downgrade + n = search_bucket(key, b()); + if (!this->is_valid(n)) { + // Unfortunately, it did + // We need to try upgrading to writer again + continue; + } + } + goto exists; + } + } + + if( this->check_mask_race(h, m) ) + goto restart; // b.release() is done in ~b(). + // insert and set flag to grow the container + grow_segment = this->insert_new_node( b(), n = tmp_n, m ); + tmp_n = nullptr; + return_value = true; + } + } else { // find or count + if( !n ) { + if( this->check_mask_race( h, m ) ) + goto restart; // b.release() is done in ~b(). TODO: replace by continue + return false; + } + return_value = true; + } + exists: + if( !result ) goto check_growth; + // TODO: the following seems as generic/regular operation + // acquire the item + if( !result->try_acquire( n->mutex, write ) ) { + for( tbb::detail::atomic_backoff backoff(true);; ) { + if( result->try_acquire( n->mutex, write ) ) break; + if( !backoff.bounded_pause() ) { + // the wait takes really long, restart the operation + b.release(); + __TBB_ASSERT( !OpInsert || !return_value, "Can't acquire new item in locked bucket?" ); + yield(); + m = this->my_mask.load(std::memory_order_acquire); + goto restart; + } + } + } + }//lock scope + result->my_node = n; + result->my_hash = h; + check_growth: + // [opt] grow the container + if( grow_segment ) { + this->enable_segment( grow_segment ); + } + if( tmp_n ) // if OpInsert only + delete_node( tmp_n ); + return return_value; + } + + struct accessor_not_used { void release(){}}; + friend const_accessor* accessor_location( accessor_not_used const& ){ return nullptr;} + friend const_accessor* accessor_location( const_accessor & a ) { return &a;} + + friend bool is_write_access_needed( accessor const& ) { return true;} + friend bool is_write_access_needed( const_accessor const& ) { return false;} + friend bool is_write_access_needed( accessor_not_used const& ) { return false;} + + template + bool generic_move_insert( Accessor && result, value_type && value ) { + result.release(); + return lookup(value.first, &value.second, accessor_location(result), is_write_access_needed(result), &allocate_node_move_construct); + } + + template + bool generic_emplace( Accessor && result, Args &&... args ) { + result.release(); + node * node_ptr = create_node(base_type::get_allocator(), std::forward(args)...); + return lookup(node_ptr->value().first, nullptr, accessor_location(result), is_write_access_needed(result), &do_not_allocate_node, node_ptr); + } + + // delete item by accessor + bool exclude( const_accessor &item_accessor ) { + __TBB_ASSERT( item_accessor.my_node, nullptr ); + node_base *const exclude_node = item_accessor.my_node; + hashcode_type const hash = item_accessor.my_hash; + hashcode_type mask = this->my_mask.load(std::memory_order_acquire); + do { + // get bucket + bucket_accessor b( this, hash & mask, /*writer=*/true ); + node_base* prev = nullptr; + node_base* curr = b()->node_list.load(std::memory_order_relaxed); + + while (curr && curr != exclude_node) { + prev = curr; + curr = curr->next; + } + + if (curr == nullptr) { // someone else was first + if (this->check_mask_race(hash, mask)) + continue; + item_accessor.release(); + return false; + } + __TBB_ASSERT( curr == exclude_node, nullptr ); + // remove from container + if (prev == nullptr) { + b()->node_list.store(curr->next, std::memory_order_relaxed); + } else { + prev->next = curr->next; + } + + this->my_size--; + break; + } while(true); + if (!item_accessor.is_writer()) { // need to get exclusive lock + item_accessor.upgrade_to_writer(); // return value means nothing here + } + + item_accessor.release(); + delete_node(exclude_node); // Only one thread can delete it + return true; + } + + template + bool internal_erase( const K& key ) { + node_base *erase_node; + hashcode_type const hash = my_hash_compare.hash(key); + hashcode_type mask = this->my_mask.load(std::memory_order_acquire); + restart: + {//lock scope + // get bucket + bucket_accessor b( this, hash & mask ); + search: + node_base* prev = nullptr; + erase_node = b()->node_list.load(std::memory_order_relaxed); + while (this->is_valid(erase_node) && !my_hash_compare.equal(key, static_cast(erase_node)->value().first ) ) { + prev = erase_node; + erase_node = erase_node->next; + } + + if (erase_node == nullptr) { // not found, but mask could be changed + if (this->check_mask_race(hash, mask)) + goto restart; + return false; + } else if (!b.is_writer() && !b.upgrade_to_writer()) { + if (this->check_mask_race(hash, mask)) // contended upgrade, check mask + goto restart; + goto search; + } + + // remove from container + if (prev == nullptr) { + b()->node_list.store(erase_node->next, std::memory_order_relaxed); + } else { + prev->next = erase_node->next; + } + this->my_size--; + } + { + typename node::scoped_type item_locker( erase_node->mutex, /*write=*/true ); + } + // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor! + delete_node(erase_node); // Only one thread can delete it due to write lock on the bucket + return true; + } + + // Returns an iterator for an item defined by the key, or for the next item after it (if upper==true) + template + std::pair internal_equal_range( const K& key, I end_ ) const { + hashcode_type h = my_hash_compare.hash( key ); + hashcode_type m = this->my_mask.load(std::memory_order_relaxed); + __TBB_ASSERT((m&(m+1))==0, "data structure is invalid"); + h &= m; + bucket *b = this->get_bucket( h ); + while (rehash_required(b->node_list.load(std::memory_order_relaxed))) { + m = ( hashcode_type(1) << tbb::detail::log2( h ) ) - 1; // get parent mask from the topmost bit + b = this->get_bucket( h &= m ); + } + node *n = search_bucket( key, b ); + if( !n ) + return std::make_pair(end_, end_); + iterator lower(*this, h, b, n), upper(lower); + return std::make_pair(lower, ++upper); + } + + // Copy "source" to *this, where *this must start out empty. + void internal_copy( const concurrent_hash_map& source ) { + hashcode_type mask = source.my_mask.load(std::memory_order_relaxed); + if( this->my_mask.load(std::memory_order_relaxed) == mask ) { // optimized version + this->reserve(source.my_size.load(std::memory_order_relaxed)); // TODO: load_factor? + bucket *dst = nullptr, *src = nullptr; + bool rehashing_required = false; + for( hashcode_type k = 0; k <= mask; k++ ) { + if( k & (k-2) ) ++dst,src++; // not the beginning of a segment + else { dst = this->get_bucket( k ); src = source.get_bucket( k ); } + __TBB_ASSERT(!rehash_required(dst->node_list.load(std::memory_order_relaxed)), "Invalid bucket in destination table"); + node *n = static_cast( src->node_list.load(std::memory_order_relaxed) ); + if (rehash_required(n)) { // source is not rehashed, items are in previous buckets + rehashing_required = true; + dst->node_list.store(reinterpret_cast(rehash_req_flag), std::memory_order_relaxed); + } else for(; n; n = static_cast( n->next ) ) { + node* node_ptr = create_node(base_type::get_allocator(), n->value().first, n->value().second); + this->add_to_bucket( dst, node_ptr); + this->my_size.fetch_add(1, std::memory_order_relaxed); + } + } + if( rehashing_required ) rehash(); + } else internal_copy(source.begin(), source.end(), source.my_size.load(std::memory_order_relaxed)); + } + + template + void internal_copy( I first, I last, size_type reserve_size ) { + this->reserve(reserve_size); // TODO: load_factor? + hashcode_type m = this->my_mask.load(std::memory_order_relaxed); + for(; first != last; ++first) { + hashcode_type h = my_hash_compare.hash( (*first).first ); + bucket *b = this->get_bucket( h & m ); + __TBB_ASSERT(!rehash_required(b->node_list.load(std::memory_order_relaxed)), "Invalid bucket in destination table"); + node* node_ptr = create_node(base_type::get_allocator(), (*first).first, (*first).second); + this->add_to_bucket( b, node_ptr ); + ++this->my_size; // TODO: replace by non-atomic op + } + } + + void internal_move_construct_with_allocator( concurrent_hash_map&& other, const allocator_type&, + /*is_always_equal=*/std::true_type ) + { + this->internal_move(std::move(other)); + } + + void internal_move_construct_with_allocator( concurrent_hash_map&& other, const allocator_type& a, + /*is_always_equal=*/std::false_type ) + { + if (a == other.get_allocator()){ + this->internal_move(std::move(other)); + } else { + try_call( [&] { + internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()), + other.size()); + }).on_exception( [&] { + this->clear(); + }); + } + } + + void internal_move_assign( concurrent_hash_map&& other, + /*is_always_equal || POCMA = */std::true_type) + { + this->internal_move(std::move(other)); + } + + void internal_move_assign(concurrent_hash_map&& other, /*is_always_equal=*/ std::false_type) { + if (this->my_allocator == other.my_allocator) { + this->internal_move(std::move(other)); + } else { + //do per element move + internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()), + other.size()); + } + } + + void internal_swap(concurrent_hash_map& other, /*is_always_equal || POCS = */ std::true_type) { + this->internal_swap_content(other); + } + + void internal_swap(concurrent_hash_map& other, /*is_always_equal || POCS = */ std::false_type) { + __TBB_ASSERT(this->my_allocator == other.my_allocator, nullptr); + this->internal_swap_content(other); + } + + // Fast find when no concurrent erasure is used. For internal use inside TBB only! + /** Return pointer to item with given key, or nullptr if no such item exists. + Must not be called concurrently with erasure operations. */ + const_pointer internal_fast_find( const Key& key ) const { + hashcode_type h = my_hash_compare.hash( key ); + hashcode_type m = this->my_mask.load(std::memory_order_acquire); + node *n; + restart: + __TBB_ASSERT((m&(m+1))==0, "data structure is invalid"); + bucket *b = this->get_bucket( h & m ); + // TODO: actually, notification is unnecessary here, just hiding double-check + if (rehash_required(b->node_list.load(std::memory_order_acquire))) + { + typename bucket::scoped_type lock; + if( lock.try_acquire( b->mutex, /*write=*/true ) ) { + if (rehash_required(b->node_list.load(std::memory_order_relaxed))) + const_cast(this)->rehash_bucket( b, h & m ); //recursive rehashing + } + else lock.acquire( b->mutex, /*write=*/false ); + __TBB_ASSERT(!rehash_required(b->node_list.load(std::memory_order_relaxed)), nullptr); + } + n = search_bucket( key, b ); + if( n ) + return n->storage(); + else if( this->check_mask_race( h, m ) ) + goto restart; + return nullptr; + } +}; + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +template >, + typename Alloc = tbb_allocator>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_hash_map( It, It, HashCompare = HashCompare(), Alloc = Alloc() ) +-> concurrent_hash_map, iterator_mapped_t, HashCompare, Alloc>; + +template >, + typename = std::enable_if_t>> +concurrent_hash_map( It, It, Alloc ) +-> concurrent_hash_map, iterator_mapped_t, d1::tbb_hash_compare>, Alloc>; + +template >, + typename Alloc = tbb_allocator>, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_hash_map( std::initializer_list>, HashCompare = HashCompare(), Alloc = Alloc() ) +-> concurrent_hash_map, T, HashCompare, Alloc>; + +template >> +concurrent_hash_map( std::initializer_list>, Alloc ) +-> concurrent_hash_map, T, d1::tbb_hash_compare>, Alloc>; + +#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */ + +template +inline bool operator==(const concurrent_hash_map &a, const concurrent_hash_map &b) { + if(a.size() != b.size()) return false; + typename concurrent_hash_map::const_iterator i(a.begin()), i_end(a.end()); + typename concurrent_hash_map::const_iterator j, j_end(b.end()); + for(; i != i_end; ++i) { + j = b.equal_range(i->first).first; + if( j == j_end || !(i->second == j->second) ) return false; + } + return true; +} + +#if !__TBB_CPP20_COMPARISONS_PRESENT +template +inline bool operator!=(const concurrent_hash_map &a, const concurrent_hash_map &b) +{ return !(a == b); } +#endif // !__TBB_CPP20_COMPARISONS_PRESENT + +template +inline void swap(concurrent_hash_map &a, concurrent_hash_map &b) +{ a.swap( b ); } + +} // namespace d2 +} // namespace detail + +inline namespace v1 { + using detail::split; + using detail::d2::concurrent_hash_map; + using detail::d1::tbb_hash_compare; +} // namespace v1 + +} // namespace tbb + +#endif /* __TBB_concurrent_hash_map_H */ diff --git a/src/tbb/include/oneapi/tbb/concurrent_lru_cache.h b/src/tbb/include/oneapi/tbb/concurrent_lru_cache.h new file mode 100644 index 000000000..83d0576ea --- /dev/null +++ b/src/tbb/include/oneapi/tbb/concurrent_lru_cache.h @@ -0,0 +1,374 @@ +/* + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_concurrent_lru_cache_H +#define __TBB_concurrent_lru_cache_H + +#if ! TBB_PREVIEW_CONCURRENT_LRU_CACHE + #error Set TBB_PREVIEW_CONCURRENT_LRU_CACHE to include concurrent_lru_cache.h +#endif + +#include "detail/_assert.h" +#include "detail/_aggregator.h" + +#include // for std::map +#include // for std::list +#include // for std::make_pair +#include // for std::find +#include // for std::atomic + +namespace tbb { + +namespace detail { +namespace d1 { + +//----------------------------------------------------------------------------- +// Concurrent LRU cache +//----------------------------------------------------------------------------- + +template +class concurrent_lru_cache : no_assign { +// incapsulated helper classes +private: + struct handle_object; + struct storage_map_value_type; + + struct aggregator_operation; + struct retrieve_aggregator_operation; + struct signal_end_of_usage_aggregator_operation; + +// typedefs +public: + using key_type = KeyT; + using value_type = ValT; + using pointer = ValT*; + using reference = ValT&; + using const_pointer = const ValT*; + using const_reference = const ValT&; + + using value_function_type = KeyToValFunctorT; + using handle = handle_object; +private: + using lru_cache_type = concurrent_lru_cache; + + using storage_map_type = std::map; + using storage_map_iterator_type = typename storage_map_type::iterator; + using storage_map_pointer_type = typename storage_map_type::pointer; + using storage_map_reference_type = typename storage_map_type::reference; + + using history_list_type = std::list; + using history_list_iterator_type = typename history_list_type::iterator; + + using aggregator_operation_type = aggregator_operation; + using aggregator_function_type = aggregating_functor; + using aggregator_type = aggregator; + + friend class aggregating_functor; + +// fields +private: + value_function_type my_value_function; + aggregator_type my_aggregator; + + storage_map_type my_storage_map; // storage map for used objects + history_list_type my_history_list; // history list for unused objects + const std::size_t my_history_list_capacity; // history list's allowed capacity + +// interface +public: + + concurrent_lru_cache(value_function_type value_function, std::size_t cache_capacity) + : my_value_function(value_function), my_history_list_capacity(cache_capacity) { + my_aggregator.initialize_handler(aggregator_function_type(this)); + } + + handle operator[](key_type key) { + retrieve_aggregator_operation op(key); + my_aggregator.execute(&op); + + if (op.is_new_value_needed()) { + op.result().second.my_value = my_value_function(key); + op.result().second.my_is_ready.store(true, std::memory_order_release); + } else { + spin_wait_while_eq(op.result().second.my_is_ready, false); + } + + return handle(*this, op.result()); + } + +private: + + void handle_operations(aggregator_operation* op_list) { + while (op_list) { + op_list->cast_and_handle(*this); + aggregator_operation* prev_op = op_list; + op_list = op_list->next; + + (prev_op->status).store(1, std::memory_order_release); + } + } + + void signal_end_of_usage(storage_map_reference_type map_record_ref) { + signal_end_of_usage_aggregator_operation op(map_record_ref); + my_aggregator.execute(&op); + } + + void signal_end_of_usage_serial(storage_map_reference_type map_record_ref) { + storage_map_iterator_type map_it = my_storage_map.find(map_record_ref.first); + + __TBB_ASSERT(map_it != my_storage_map.end(), + "cache should not return past-end iterators to outer world"); + __TBB_ASSERT(&(*map_it) == &map_record_ref, + "dangling reference has been returned to outside world: data race?"); + __TBB_ASSERT(std::find(my_history_list.begin(), my_history_list.end(), map_it) == my_history_list.end(), + "object in use should not be in list of unused objects "); + + // if it was the last reference, put it to the LRU history + if (! --(map_it->second.my_ref_counter)) { + // if the LRU history is full, evict the oldest items to get space + if (my_history_list.size() >= my_history_list_capacity) { + if (my_history_list_capacity == 0) { + // Since LRU history capacity is zero, there is no need to keep the element in history + my_storage_map.erase(map_it); + return; + } + std::size_t number_of_elements_to_evict = 1 + my_history_list.size() - my_history_list_capacity; + + for (std::size_t i = 0; i < number_of_elements_to_evict; ++i) { + storage_map_iterator_type map_it_to_evict = my_history_list.back(); + + __TBB_ASSERT(map_it_to_evict->second.my_ref_counter == 0, + "item to be evicted should not have a live references"); + + // TODO: can we use forward_list instead of list? pop_front / insert_after last + my_history_list.pop_back(); + my_storage_map.erase(map_it_to_evict); + } + } + + // TODO: can we use forward_list instead of list? pop_front / insert_after last + my_history_list.push_front(map_it); + map_it->second.my_history_list_iterator = my_history_list.begin(); + } + } + + storage_map_reference_type retrieve_serial(key_type key, bool& is_new_value_needed) { + storage_map_iterator_type map_it = my_storage_map.find(key); + + if (map_it == my_storage_map.end()) { + map_it = my_storage_map.emplace_hint( + map_it, std::piecewise_construct, std::make_tuple(key), std::make_tuple(value_type(), 0, my_history_list.end(), false)); + is_new_value_needed = true; + } else { + history_list_iterator_type list_it = map_it->second.my_history_list_iterator; + if (list_it != my_history_list.end()) { + __TBB_ASSERT(map_it->second.my_ref_counter == 0, + "item to be evicted should not have a live references"); + + // Item is going to be used. Therefore it is not a subject for eviction, + // so we remove it from LRU history. + my_history_list.erase(list_it); + map_it->second.my_history_list_iterator = my_history_list.end(); + } + } + + ++(map_it->second.my_ref_counter); + return *map_it; + } +}; + +//----------------------------------------------------------------------------- +// Value type for storage map in concurrent LRU cache +//----------------------------------------------------------------------------- + +template +struct concurrent_lru_cache::storage_map_value_type { +//typedefs +public: + using ref_counter_type = std::size_t; + +// fields +public: + value_type my_value; + ref_counter_type my_ref_counter; + history_list_iterator_type my_history_list_iterator; + std::atomic my_is_ready; + +// interface +public: + storage_map_value_type( + value_type const& value, ref_counter_type ref_counter, + history_list_iterator_type history_list_iterator, bool is_ready) + : my_value(value), my_ref_counter(ref_counter), + my_history_list_iterator(history_list_iterator), my_is_ready(is_ready) {} +}; + +//----------------------------------------------------------------------------- +// Handle object for operator[] in concurrent LRU cache +//----------------------------------------------------------------------------- + +template +struct concurrent_lru_cache::handle_object { +// fields +private: + lru_cache_type* my_lru_cache_ptr; + storage_map_pointer_type my_map_record_ptr; + +// interface +public: + handle_object() + : my_lru_cache_ptr(nullptr), my_map_record_ptr(nullptr) {} + handle_object(lru_cache_type& lru_cache_ref, storage_map_reference_type map_record_ref) + : my_lru_cache_ptr(&lru_cache_ref), my_map_record_ptr(&map_record_ref) {} + + handle_object(handle_object&) = delete; + void operator=(handle_object&) = delete; + + handle_object(handle_object&& other) + : my_lru_cache_ptr(other.my_lru_cache_ptr), my_map_record_ptr(other.my_map_record_ptr) { + + __TBB_ASSERT( + (other.my_lru_cache_ptr != nullptr && other.my_map_record_ptr != nullptr) || + (other.my_lru_cache_ptr == nullptr && other.my_map_record_ptr == nullptr), + "invalid state of moving object?"); + + other.my_lru_cache_ptr = nullptr; + other.my_map_record_ptr = nullptr; + } + + handle_object& operator=(handle_object&& other) { + __TBB_ASSERT( + (other.my_lru_cache_ptr != nullptr && other.my_map_record_ptr != nullptr) || + (other.my_lru_cache_ptr == nullptr && other.my_map_record_ptr == nullptr), + "invalid state of moving object?"); + + if (my_lru_cache_ptr) + my_lru_cache_ptr->signal_end_of_usage(*my_map_record_ptr); + + my_lru_cache_ptr = other.my_lru_cache_ptr; + my_map_record_ptr = other.my_map_record_ptr; + other.my_lru_cache_ptr = nullptr; + other.my_map_record_ptr = nullptr; + + return *this; + } + + ~handle_object() { + if (my_lru_cache_ptr) + my_lru_cache_ptr->signal_end_of_usage(*my_map_record_ptr); + } + + operator bool() const { + return (my_lru_cache_ptr && my_map_record_ptr); + } + + value_type& value() { + __TBB_ASSERT(my_lru_cache_ptr, "get value from already moved object?"); + __TBB_ASSERT(my_map_record_ptr, "get value from an invalid or already moved object?"); + + return my_map_record_ptr->second.my_value; + } +}; + +//----------------------------------------------------------------------------- +// Aggregator operation for aggregator type in concurrent LRU cache +//----------------------------------------------------------------------------- + +template +struct concurrent_lru_cache::aggregator_operation + : aggregated_operation { +// incapsulated helper classes +public: + enum class op_type { retrieve, signal_end_of_usage }; + +// fields +private: + op_type my_op; + +// interface +public: + aggregator_operation(op_type op) : my_op(op) {} + + // TODO: aggregator_operation can be implemented + // - as a statically typed variant type or CRTP? (static, dependent on the use case) + // - or use pointer to function and apply_visitor (dynamic) + // - or use virtual functions (dynamic) + void cast_and_handle(lru_cache_type& lru_cache_ref) { + if (my_op == op_type::retrieve) + static_cast(this)->handle(lru_cache_ref); + else + static_cast(this)->handle(lru_cache_ref); + } +}; + +template +struct concurrent_lru_cache::retrieve_aggregator_operation + : aggregator_operation, private no_assign { +public: + key_type my_key; + storage_map_pointer_type my_map_record_ptr; + bool my_is_new_value_needed; + +public: + retrieve_aggregator_operation(key_type key) + : aggregator_operation(aggregator_operation::op_type::retrieve), + my_key(key), my_map_record_ptr(nullptr), my_is_new_value_needed(false) {} + + void handle(lru_cache_type& lru_cache_ref) { + my_map_record_ptr = &lru_cache_ref.retrieve_serial(my_key, my_is_new_value_needed); + } + + storage_map_reference_type result() { + __TBB_ASSERT(my_map_record_ptr, "Attempt to call result() before calling handle()"); + return *my_map_record_ptr; + } + + bool is_new_value_needed() { return my_is_new_value_needed; } +}; + +template +struct concurrent_lru_cache::signal_end_of_usage_aggregator_operation + : aggregator_operation, private no_assign { + +private: + storage_map_reference_type my_map_record_ref; + +public: + signal_end_of_usage_aggregator_operation(storage_map_reference_type map_record_ref) + : aggregator_operation(aggregator_operation::op_type::signal_end_of_usage), + my_map_record_ref(map_record_ref) {} + + void handle(lru_cache_type& lru_cache_ref) { + lru_cache_ref.signal_end_of_usage_serial(my_map_record_ref); + } +}; + +// TODO: if we have guarantees that KeyToValFunctorT always have +// ValT as a return type and KeyT as an argument type +// we can deduce template parameters of concurrent_lru_cache +// by pattern matching on KeyToValFunctorT + +} // namespace d1 +} // namespace detail + +inline namespace v1 { + +using detail::d1::concurrent_lru_cache; + +} // inline namespace v1 +} // namespace tbb + +#endif // __TBB_concurrent_lru_cache_H diff --git a/src/tbb/include/oneapi/tbb/concurrent_map.h b/src/tbb/include/oneapi/tbb/concurrent_map.h new file mode 100644 index 000000000..c1817a18c --- /dev/null +++ b/src/tbb/include/oneapi/tbb/concurrent_map.h @@ -0,0 +1,350 @@ +/* + Copyright (c) 2019-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_concurrent_map_H +#define __TBB_concurrent_map_H + +#include "detail/_namespace_injection.h" +#include "detail/_concurrent_skip_list.h" +#include "tbb_allocator.h" +#include +#include +#include + +namespace tbb { +namespace detail { +namespace d2 { + +template +struct map_traits { + static constexpr std::size_t max_level = RandomGenerator::max_level; + using random_level_generator_type = RandomGenerator; + using key_type = Key; + using mapped_type = Value; + using compare_type = KeyCompare; + using value_type = std::pair; + using reference = value_type&; + using const_reference = const value_type&; + using allocator_type = Allocator; + + static constexpr bool allow_multimapping = AllowMultimapping; + + class value_compare { + public: + bool operator()(const value_type& lhs, const value_type& rhs) const { + return comp(lhs.first, rhs.first); + } + + protected: + value_compare(compare_type c) : comp(c) {} + + friend struct map_traits; + + compare_type comp; + }; + + static value_compare value_comp(compare_type comp) { return value_compare(comp); } + + static const key_type& get_key(const_reference val) { + return val.first; + } +}; // struct map_traits + +template +class concurrent_multimap; + +template , typename Allocator = tbb::tbb_allocator>> +class concurrent_map : public concurrent_skip_list, Allocator, false>> { + using base_type = concurrent_skip_list, Allocator, false>>; +public: + using key_type = Key; + using mapped_type = Value; + using value_type = typename base_type::value_type; + using size_type = typename base_type::size_type; + using difference_type = typename base_type::difference_type; + using key_compare = Compare; + using value_compare = typename base_type::value_compare; + using allocator_type = Allocator; + + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using pointer = typename base_type::pointer; + using const_pointer = typename base_type::const_pointer; + + using iterator = typename base_type::iterator; + using const_iterator = typename base_type::const_iterator; + + using node_type = typename base_type::node_type; + + // Include constructors of base type + using base_type::base_type; + + // Required for implicit deduction guides + concurrent_map() = default; + concurrent_map( const concurrent_map& ) = default; + concurrent_map( const concurrent_map& other, const allocator_type& alloc ) : base_type(other, alloc) {} + concurrent_map( concurrent_map&& ) = default; + concurrent_map( concurrent_map&& other, const allocator_type& alloc ) : base_type(std::move(other), alloc) {} + // Required to respect the rule of 5 + concurrent_map& operator=( const concurrent_map& ) = default; + concurrent_map& operator=( concurrent_map&& ) = default; + + concurrent_map& operator=( std::initializer_list il ) { + base_type::operator= (il); + return *this; + } + + // Observers + mapped_type& at(const key_type& key) { + iterator it = this->find(key); + + if (it == this->end()) { + throw_exception(exception_id::invalid_key); + } + return it->second; + } + + const mapped_type& at(const key_type& key) const { + return const_cast(this)->at(key); + } + + mapped_type& operator[](const key_type& key) { + iterator it = this->find(key); + + if (it == this->end()) { + it = this->emplace(std::piecewise_construct, std::forward_as_tuple(key), std::tuple<>()).first; + } + return it->second; + } + + mapped_type& operator[](key_type&& key) { + iterator it = this->find(key); + + if (it == this->end()) { + it = this->emplace(std::piecewise_construct, std::forward_as_tuple(std::move(key)), std::tuple<>()).first; + } + return it->second; + } + + using base_type::insert; + + template + typename std::enable_if::value, + std::pair>::type insert( P&& value ) + { + return this->emplace(std::forward

(value)); + } + + template + typename std::enable_if::value, + iterator>::type insert( const_iterator hint, P&& value ) + { + return this->emplace_hint(hint, std::forward

(value)); + } + + template + void merge(concurrent_map& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_map&& source) { + this->internal_merge(std::move(source)); + } + + template + void merge(concurrent_multimap& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_multimap&& source) { + this->internal_merge(std::move(source)); + } +}; // class concurrent_map + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +template >, + typename Alloc = tbb::tbb_allocator>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_map( It, It, Comp = Comp(), Alloc = Alloc() ) +-> concurrent_map, iterator_mapped_t, Comp, Alloc>; + +template >, + typename Alloc = tbb::tbb_allocator>, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_map( std::initializer_list>, Comp = Comp(), Alloc = Alloc() ) +-> concurrent_map, T, Comp, Alloc>; + +template >, + typename = std::enable_if_t>> +concurrent_map( It, It, Alloc ) +-> concurrent_map, iterator_mapped_t, + std::less>, Alloc>; + +template >> +concurrent_map( std::initializer_list>, Alloc ) +-> concurrent_map, T, std::less>, Alloc>; + +#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +template +void swap( concurrent_map& lhs, + concurrent_map& rhs ) +{ + lhs.swap(rhs); +} + +template , typename Allocator = tbb::tbb_allocator>> +class concurrent_multimap : public concurrent_skip_list, Allocator, true>> { + using base_type = concurrent_skip_list, Allocator, true>>; +public: + using key_type = Key; + using mapped_type = Value; + using value_type = typename base_type::value_type; + using size_type = typename base_type::size_type; + using difference_type = typename base_type::difference_type; + using key_compare = Compare; + using value_compare = typename base_type::value_compare; + using allocator_type = Allocator; + + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using pointer = typename base_type::pointer; + using const_pointer = typename base_type::const_pointer; + + using iterator = typename base_type::iterator; + using const_iterator = typename base_type::const_iterator; + + using node_type = typename base_type::node_type; + + // Include constructors of base_type + using base_type::base_type; + using base_type::insert; + + // Required for implicit deduction guides + concurrent_multimap() = default; + concurrent_multimap( const concurrent_multimap& ) = default; + concurrent_multimap( const concurrent_multimap& other, const allocator_type& alloc ) : base_type(other, alloc) {} + concurrent_multimap( concurrent_multimap&& ) = default; + concurrent_multimap( concurrent_multimap&& other, const allocator_type& alloc ) : base_type(std::move(other), alloc) {} + // Required to respect the rule of 5 + concurrent_multimap& operator=( const concurrent_multimap& ) = default; + concurrent_multimap& operator=( concurrent_multimap&& ) = default; + + concurrent_multimap& operator=( std::initializer_list il ) { + base_type::operator= (il); + return *this; + } + + template + typename std::enable_if::value, + std::pair>::type insert( P&& value ) + { + return this->emplace(std::forward

(value)); + } + + template + typename std::enable_if::value, + iterator>::type insert( const_iterator hint, P&& value ) + { + return this->emplace_hint(hint, std::forward

(value)); + } + + template + void merge(concurrent_multimap& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_multimap&& source) { + this->internal_merge(std::move(source)); + } + + template + void merge(concurrent_map& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_map&& source) { + this->internal_merge(std::move(source)); + } +}; // class concurrent_multimap + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +template >, + typename Alloc = tbb::tbb_allocator>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_multimap( It, It, Comp = Comp(), Alloc = Alloc() ) +-> concurrent_multimap, iterator_mapped_t, Comp, Alloc>; + +template >, + typename Alloc = tbb::tbb_allocator>, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_multimap( std::initializer_list>, Comp = Comp(), Alloc = Alloc() ) +-> concurrent_multimap, T, Comp, Alloc>; + +template >, + typename = std::enable_if_t>> +concurrent_multimap( It, It, Alloc ) +-> concurrent_multimap, iterator_mapped_t, + std::less>, Alloc>; + +template >> +concurrent_multimap( std::initializer_list>, Alloc ) +-> concurrent_multimap, T, std::less>, Alloc>; + + +#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +template +void swap( concurrent_multimap& lhs, + concurrent_multimap& rhs ) +{ + lhs.swap(rhs); +} + +} // namespace d2 +} // namespace detail + +inline namespace v1 { + +using detail::d2::concurrent_map; +using detail::d2::concurrent_multimap; +using detail::split; + +} // inline namespace v1 +} // namespace tbb + +#endif // __TBB_concurrent_map_H diff --git a/src/tbb/include/oneapi/tbb/concurrent_priority_queue.h b/src/tbb/include/oneapi/tbb/concurrent_priority_queue.h new file mode 100644 index 000000000..6d7eb3eb9 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/concurrent_priority_queue.h @@ -0,0 +1,490 @@ +/* + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_concurrent_priority_queue_H +#define __TBB_concurrent_priority_queue_H + +#include "detail/_namespace_injection.h" +#include "detail/_aggregator.h" +#include "detail/_template_helpers.h" +#include "detail/_allocator_traits.h" +#include "detail/_range_common.h" +#include "detail/_exception.h" +#include "detail/_utils.h" +#include "detail/_containers_helpers.h" +#include "cache_aligned_allocator.h" +#include +#include +#include +#include +#include +#include + +namespace tbb { +namespace detail { +namespace d1 { + +template , typename Allocator = cache_aligned_allocator> +class concurrent_priority_queue { +public: + using value_type = T; + using reference = T&; + using const_reference = const T&; + + using size_type = std::size_t; + using difference_type = std::ptrdiff_t; + + using allocator_type = Allocator; + + concurrent_priority_queue() : concurrent_priority_queue(allocator_type{}) {} + + explicit concurrent_priority_queue( const allocator_type& alloc ) + : mark(0), my_size(0), my_compare(), data(alloc) + { + my_aggregator.initialize_handler(functor{this}); + } + + explicit concurrent_priority_queue( const Compare& compare, const allocator_type& alloc = allocator_type() ) + : mark(0), my_size(0), my_compare(compare), data(alloc) + { + my_aggregator.initialize_handler(functor{this}); + } + + explicit concurrent_priority_queue( size_type init_capacity, const allocator_type& alloc = allocator_type() ) + : mark(0), my_size(0), my_compare(), data(alloc) + { + data.reserve(init_capacity); + my_aggregator.initialize_handler(functor{this}); + } + + explicit concurrent_priority_queue( size_type init_capacity, const Compare& compare, const allocator_type& alloc = allocator_type() ) + : mark(0), my_size(0), my_compare(compare), data(alloc) + { + data.reserve(init_capacity); + my_aggregator.initialize_handler(functor{this}); + } + + template + concurrent_priority_queue( InputIterator begin, InputIterator end, const Compare& compare, const allocator_type& alloc = allocator_type() ) + : mark(0), my_compare(compare), data(begin, end, alloc) + { + my_aggregator.initialize_handler(functor{this}); + heapify(); + my_size.store(data.size(), std::memory_order_relaxed); + } + + template + concurrent_priority_queue( InputIterator begin, InputIterator end, const allocator_type& alloc = allocator_type() ) + : concurrent_priority_queue(begin, end, Compare(), alloc) {} + + concurrent_priority_queue( std::initializer_list init, const Compare& compare, const allocator_type& alloc = allocator_type() ) + : concurrent_priority_queue(init.begin(), init.end(), compare, alloc) {} + + concurrent_priority_queue( std::initializer_list init, const allocator_type& alloc = allocator_type() ) + : concurrent_priority_queue(init, Compare(), alloc) {} + + concurrent_priority_queue( const concurrent_priority_queue& other ) + : mark(other.mark), my_size(other.my_size.load(std::memory_order_relaxed)), my_compare(other.my_compare), + data(other.data) + { + my_aggregator.initialize_handler(functor{this}); + } + + concurrent_priority_queue( const concurrent_priority_queue& other, const allocator_type& alloc ) + : mark(other.mark), my_size(other.my_size.load(std::memory_order_relaxed)), my_compare(other.my_compare), + data(other.data, alloc) + { + my_aggregator.initialize_handler(functor{this}); + } + + concurrent_priority_queue( concurrent_priority_queue&& other ) + : mark(other.mark), my_size(other.my_size.load(std::memory_order_relaxed)), my_compare(other.my_compare), + data(std::move(other.data)) + { + my_aggregator.initialize_handler(functor{this}); + } + + concurrent_priority_queue( concurrent_priority_queue&& other, const allocator_type& alloc ) + : mark(other.mark), my_size(other.my_size.load(std::memory_order_relaxed)), my_compare(other.my_compare), + data(std::move(other.data), alloc) + { + my_aggregator.initialize_handler(functor{this}); + } + + concurrent_priority_queue& operator=( const concurrent_priority_queue& other ) { + if (this != &other) { + data = other.data; + mark = other.mark; + my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed); + } + return *this; + } + + concurrent_priority_queue& operator=( concurrent_priority_queue&& other ) { + if (this != &other) { + // TODO: check if exceptions from std::vector::operator=(vector&&) should be handled separately + data = std::move(other.data); + mark = other.mark; + my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed); + } + return *this; + } + + concurrent_priority_queue& operator=( std::initializer_list init ) { + assign(init.begin(), init.end()); + return *this; + } + + template + void assign( InputIterator begin, InputIterator end ) { + data.assign(begin, end); + mark = 0; + my_size.store(data.size(), std::memory_order_relaxed); + heapify(); + } + + void assign( std::initializer_list init ) { + assign(init.begin(), init.end()); + } + + /* Returned value may not reflect results of pending operations. + This operation reads shared data and will trigger a race condition. */ + __TBB_nodiscard bool empty() const { return size() == 0; } + + // Returns the current number of elements contained in the queue + /* Returned value may not reflect results of pending operations. + This operation reads shared data and will trigger a race condition. */ + size_type size() const { return my_size.load(std::memory_order_relaxed); } + + /* This operation can be safely used concurrently with other push, try_pop or emplace operations. */ + void push( const value_type& value ) { + cpq_operation op_data(value, PUSH_OP); + my_aggregator.execute(&op_data); + if (op_data.status == FAILED) + throw_exception(exception_id::bad_alloc); + } + + /* This operation can be safely used concurrently with other push, try_pop or emplace operations. */ + void push( value_type&& value ) { + cpq_operation op_data(value, PUSH_RVALUE_OP); + my_aggregator.execute(&op_data); + if (op_data.status == FAILED) + throw_exception(exception_id::bad_alloc); + } + + /* This operation can be safely used concurrently with other push, try_pop or emplace operations. */ + template + void emplace( Args&&... args ) { + // TODO: support uses allocator construction in this place + push(value_type(std::forward(args)...)); + } + + // Gets a reference to and removes highest priority element + /* If a highest priority element was found, sets elem and returns true, + otherwise returns false. + This operation can be safely used concurrently with other push, try_pop or emplace operations. */ + bool try_pop( value_type& value ) { + cpq_operation op_data(value, POP_OP); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + + // This operation affects the whole container => it is not thread-safe + void clear() { + data.clear(); + mark = 0; + my_size.store(0, std::memory_order_relaxed); + } + + // This operation affects the whole container => it is not thread-safe + void swap( concurrent_priority_queue& other ) { + if (this != &other) { + using std::swap; + swap(data, other.data); + swap(mark, other.mark); + + size_type sz = my_size.load(std::memory_order_relaxed); + my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed); + other.my_size.store(sz, std::memory_order_relaxed); + } + } + + allocator_type get_allocator() const { return data.get_allocator(); } +private: + enum operation_type {INVALID_OP, PUSH_OP, POP_OP, PUSH_RVALUE_OP}; + enum operation_status {WAIT = 0, SUCCEEDED, FAILED}; + + class cpq_operation : public aggregated_operation { + public: + operation_type type; + union { + value_type* elem; + size_type sz; + }; + cpq_operation( const value_type& value, operation_type t ) + : type(t), elem(const_cast(&value)) {} + }; // class cpq_operation + + class functor { + concurrent_priority_queue* my_cpq; + public: + functor() : my_cpq(nullptr) {} + functor( concurrent_priority_queue* cpq ) : my_cpq(cpq) {} + + void operator()(cpq_operation* op_list) { + __TBB_ASSERT(my_cpq != nullptr, "Invalid functor"); + my_cpq->handle_operations(op_list); + } + }; // class functor + + void handle_operations( cpq_operation* op_list ) { + call_itt_notify(acquired, this); + cpq_operation* tmp, *pop_list = nullptr; + __TBB_ASSERT(mark == data.size(), nullptr); + + // First pass processes all constant (amortized; reallocation may happen) time pushes and pops. + while(op_list) { + // ITT note: &(op_list->status) tag is used to cover accesses to op_list + // node. This thread is going to handle the operation, and so will acquire it + // and perform the associated operation w/o triggering a race condition; the + // thread that created the operation is waiting on the status field, so when + // this thread is done with the operation, it will perform a + // store_with_release to give control back to the waiting thread in + // aggregator::insert_operation. + // TODO: enable + call_itt_notify(acquired, &(op_list->status)); + __TBB_ASSERT(op_list->type != INVALID_OP, nullptr); + + tmp = op_list; + op_list = op_list->next.load(std::memory_order_relaxed); + if (tmp->type == POP_OP) { + if (mark < data.size() && + my_compare(data[0], data.back())) + { + // there are newly pushed elems and the last one is higher than top + *(tmp->elem) = std::move(data.back()); + my_size.store(my_size.load(std::memory_order_relaxed) - 1, std::memory_order_relaxed); + tmp->status.store(uintptr_t(SUCCEEDED), std::memory_order_release); + + data.pop_back(); + __TBB_ASSERT(mark <= data.size(), nullptr); + } else { // no convenient item to pop; postpone + tmp->next.store(pop_list, std::memory_order_relaxed); + pop_list = tmp; + } + } else { // PUSH_OP or PUSH_RVALUE_OP + __TBB_ASSERT(tmp->type == PUSH_OP || tmp->type == PUSH_RVALUE_OP, "Unknown operation"); +#if TBB_USE_EXCEPTIONS + try +#endif + { + if (tmp->type == PUSH_OP) { + push_back_helper(*(tmp->elem)); + } else { + data.push_back(std::move(*(tmp->elem))); + } + my_size.store(my_size.load(std::memory_order_relaxed) + 1, std::memory_order_relaxed); + tmp->status.store(uintptr_t(SUCCEEDED), std::memory_order_release); + } +#if TBB_USE_EXCEPTIONS + catch(...) { + tmp->status.store(uintptr_t(FAILED), std::memory_order_release); + } +#endif + } + } + + // Second pass processes pop operations + while(pop_list) { + tmp = pop_list; + pop_list = pop_list->next.load(std::memory_order_relaxed); + __TBB_ASSERT(tmp->type == POP_OP, nullptr); + if (data.empty()) { + tmp->status.store(uintptr_t(FAILED), std::memory_order_release); + } else { + __TBB_ASSERT(mark <= data.size(), nullptr); + if (mark < data.size() && + my_compare(data[0], data.back())) + { + // there are newly pushed elems and the last one is higher than top + *(tmp->elem) = std::move(data.back()); + my_size.store(my_size.load(std::memory_order_relaxed) - 1, std::memory_order_relaxed); + tmp->status.store(uintptr_t(SUCCEEDED), std::memory_order_release); + data.pop_back(); + } else { // extract top and push last element down heap + *(tmp->elem) = std::move(data[0]); + my_size.store(my_size.load(std::memory_order_relaxed) - 1, std::memory_order_relaxed); + tmp->status.store(uintptr_t(SUCCEEDED), std::memory_order_release); + reheap(); + } + } + } + + // heapify any leftover pushed elements before doing the next + // batch of operations + if (mark < data.size()) heapify(); + __TBB_ASSERT(mark == data.size(), nullptr); + call_itt_notify(releasing, this); + } + + // Merge unsorted elements into heap + void heapify() { + if (!mark && data.size() > 0) mark = 1; + for (; mark < data.size(); ++mark) { + // for each unheapified element under size + size_type cur_pos = mark; + value_type to_place = std::move(data[mark]); + do { // push to_place up the heap + size_type parent = (cur_pos - 1) >> 1; + if (!my_compare(data[parent], to_place)) + break; + data[cur_pos] = std::move(data[parent]); + cur_pos = parent; + } while(cur_pos); + data[cur_pos] = std::move(to_place); + } + } + + // Re-heapify after an extraction + // Re-heapify by pushing last element down the heap from the root. + void reheap() { + size_type cur_pos = 0, child = 1; + + while(child < mark) { + size_type target = child; + if (child + 1 < mark && my_compare(data[child], data[child + 1])) + ++target; + // target now has the higher priority child + if (my_compare(data[target], data.back())) + break; + data[cur_pos] = std::move(data[target]); + cur_pos = target; + child = (cur_pos << 1) + 1; + } + if (cur_pos != data.size() - 1) + data[cur_pos] = std::move(data.back()); + data.pop_back(); + if (mark > data.size()) mark = data.size(); + } + + void push_back_helper( const T& value ) { + push_back_helper_impl(value, std::is_copy_constructible{}); + } + + void push_back_helper_impl( const T& value, /*is_copy_constructible = */std::true_type ) { + data.push_back(value); + } + + void push_back_helper_impl( const T&, /*is_copy_constructible = */std::false_type ) { + __TBB_ASSERT(false, "error: calling tbb::concurrent_priority_queue.push(const value_type&) for move-only type"); + } + + using aggregator_type = aggregator; + + aggregator_type my_aggregator; + // Padding added to avoid false sharing + char padding1[max_nfs_size - sizeof(aggregator_type)]; + // The point at which unsorted elements begin + size_type mark; + std::atomic my_size; + Compare my_compare; + + // Padding added to avoid false sharing + char padding2[max_nfs_size - (2*sizeof(size_type)) - sizeof(Compare)]; + //! Storage for the heap of elements in queue, plus unheapified elements + /** data has the following structure: + + binary unheapified + heap elements + ____|_______|____ + | | | + v v v + [_|...|_|_|...|_| |...| ] + 0 ^ ^ ^ + | | |__capacity + | |__my_size + |__mark + + Thus, data stores the binary heap starting at position 0 through + mark-1 (it may be empty). Then there are 0 or more elements + that have not yet been inserted into the heap, in positions + mark through my_size-1. */ + + using vector_type = std::vector; + vector_type data; + + friend bool operator==( const concurrent_priority_queue& lhs, + const concurrent_priority_queue& rhs ) + { + return lhs.data == rhs.data; + } + +#if !__TBB_CPP20_COMPARISONS_PRESENT + friend bool operator!=( const concurrent_priority_queue& lhs, + const concurrent_priority_queue& rhs ) + { + return !(lhs == rhs); + } +#endif +}; // class concurrent_priority_queue + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +template >, + typename Alloc = tbb::cache_aligned_allocator>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_priority_queue( It, It, Comp = Comp(), Alloc = Alloc() ) +-> concurrent_priority_queue, Comp, Alloc>; + +template >, + typename = std::enable_if_t>> +concurrent_priority_queue( It, It, Alloc ) +-> concurrent_priority_queue, std::less>, Alloc>; + +template , + typename Alloc = tbb::cache_aligned_allocator, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_priority_queue( std::initializer_list, Comp = Comp(), Alloc = Alloc() ) +-> concurrent_priority_queue; + +template >> +concurrent_priority_queue( std::initializer_list, Alloc ) +-> concurrent_priority_queue, Alloc>; + +#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +template +void swap( concurrent_priority_queue& lhs, + concurrent_priority_queue& rhs ) +{ + lhs.swap(rhs); +} + +} // namespace d1 +} // namespace detail +inline namespace v1 { +using detail::d1::concurrent_priority_queue; + +} // inline namespace v1 +} // namespace tbb + +#endif // __TBB_concurrent_priority_queue_H diff --git a/src/tbb/include/oneapi/tbb/concurrent_queue.h b/src/tbb/include/oneapi/tbb/concurrent_queue.h new file mode 100644 index 000000000..cfd5db6a5 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/concurrent_queue.h @@ -0,0 +1,700 @@ +/* + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_concurrent_queue_H +#define __TBB_concurrent_queue_H + +#include "detail/_namespace_injection.h" +#include "detail/_concurrent_queue_base.h" +#include "detail/_allocator_traits.h" +#include "detail/_exception.h" +#include "detail/_containers_helpers.h" +#include "cache_aligned_allocator.h" + +namespace tbb { +namespace detail { +namespace d2 { + +template +std::pair internal_try_pop_impl(void* dst, QueueRep& queue, Allocator& alloc ) { + ticket_type ticket{}; + do { + // Basically, we need to read `head_counter` before `tail_counter`. To achieve it we build happens-before on `head_counter` + ticket = queue.head_counter.load(std::memory_order_acquire); + do { + if (static_cast(queue.tail_counter.load(std::memory_order_relaxed) - ticket) <= 0) { // queue is empty + // Queue is empty + return { false, ticket }; + } + // Queue had item with ticket k when we looked. Attempt to get that item. + // Another thread snatched the item, retry. + } while (!queue.head_counter.compare_exchange_strong(ticket, ticket + 1)); + } while (!queue.choose(ticket).pop(dst, ticket, queue, alloc)); + return { true, ticket }; +} + +// A high-performance thread-safe non-blocking concurrent queue. +// Multiple threads may each push and pop concurrently. +// Assignment construction is not allowed. +template > +class concurrent_queue { + using allocator_traits_type = tbb::detail::allocator_traits; + using queue_representation_type = concurrent_queue_rep; + using queue_allocator_type = typename allocator_traits_type::template rebind_alloc; + using queue_allocator_traits = tbb::detail::allocator_traits; +public: + using size_type = std::size_t; + using value_type = T; + using reference = T&; + using const_reference = const T&; + using difference_type = std::ptrdiff_t; + + using allocator_type = Allocator; + using pointer = typename allocator_traits_type::pointer; + using const_pointer = typename allocator_traits_type::const_pointer; + + using iterator = concurrent_queue_iterator; + using const_iterator = concurrent_queue_iterator; + + concurrent_queue() : concurrent_queue(allocator_type()) {} + + explicit concurrent_queue(const allocator_type& a) : + my_allocator(a), my_queue_representation(nullptr) + { + my_queue_representation = static_cast(r1::cache_aligned_allocate(sizeof(queue_representation_type))); + queue_allocator_traits::construct(my_allocator, my_queue_representation); + + __TBB_ASSERT(is_aligned(my_queue_representation, max_nfs_size), "alignment error" ); + __TBB_ASSERT(is_aligned(&my_queue_representation->head_counter, max_nfs_size), "alignment error" ); + __TBB_ASSERT(is_aligned(&my_queue_representation->tail_counter, max_nfs_size), "alignment error" ); + __TBB_ASSERT(is_aligned(&my_queue_representation->array, max_nfs_size), "alignment error" ); + } + + template + concurrent_queue(InputIterator begin, InputIterator end, const allocator_type& a = allocator_type()) : + concurrent_queue(a) + { + for (; begin != end; ++begin) + push(*begin); + } + + concurrent_queue( std::initializer_list init, const allocator_type& alloc = allocator_type() ) : + concurrent_queue(init.begin(), init.end(), alloc) + {} + + concurrent_queue(const concurrent_queue& src, const allocator_type& a) : + concurrent_queue(a) + { + my_queue_representation->assign(*src.my_queue_representation, my_allocator, copy_construct_item); + } + + concurrent_queue(const concurrent_queue& src) : + concurrent_queue(queue_allocator_traits::select_on_container_copy_construction(src.get_allocator())) + { + my_queue_representation->assign(*src.my_queue_representation, my_allocator, copy_construct_item); + } + + // Move constructors + concurrent_queue(concurrent_queue&& src) : + concurrent_queue(std::move(src.my_allocator)) + { + internal_swap(src); + } + + concurrent_queue(concurrent_queue&& src, const allocator_type& a) : + concurrent_queue(a) + { + // checking that memory allocated by one instance of allocator can be deallocated + // with another + if (my_allocator == src.my_allocator) { + internal_swap(src); + } else { + // allocators are different => performing per-element move + my_queue_representation->assign(*src.my_queue_representation, my_allocator, move_construct_item); + src.clear(); + } + } + + // Destroy queue + ~concurrent_queue() { + clear(); + my_queue_representation->clear(my_allocator); + queue_allocator_traits::destroy(my_allocator, my_queue_representation); + r1::cache_aligned_deallocate(my_queue_representation); + } + + concurrent_queue& operator=( const concurrent_queue& other ) { + //TODO: implement support for std::allocator_traits::propagate_on_container_copy_assignment + if (my_queue_representation != other.my_queue_representation) { + clear(); + my_allocator = other.my_allocator; + my_queue_representation->assign(*other.my_queue_representation, my_allocator, copy_construct_item); + } + return *this; + } + + concurrent_queue& operator=( concurrent_queue&& other ) { + //TODO: implement support for std::allocator_traits::propagate_on_container_move_assignment + if (my_queue_representation != other.my_queue_representation) { + clear(); + if (my_allocator == other.my_allocator) { + internal_swap(other); + } else { + my_queue_representation->assign(*other.my_queue_representation, other.my_allocator, move_construct_item); + other.clear(); + my_allocator = std::move(other.my_allocator); + } + } + return *this; + } + + concurrent_queue& operator=( std::initializer_list init ) { + assign(init); + return *this; + } + + template + void assign( InputIterator first, InputIterator last ) { + concurrent_queue src(first, last); + clear(); + my_queue_representation->assign(*src.my_queue_representation, my_allocator, move_construct_item); + } + + void assign( std::initializer_list init ) { + assign(init.begin(), init.end()); + } + + void swap ( concurrent_queue& other ) { + //TODO: implement support for std::allocator_traits::propagate_on_container_swap + __TBB_ASSERT(my_allocator == other.my_allocator, "unequal allocators"); + internal_swap(other); + } + + // Enqueue an item at tail of queue. + void push(const T& value) { + internal_push(value); + } + + void push(T&& value) { + internal_push(std::move(value)); + } + + template + void emplace( Args&&... args ) { + internal_push(std::forward(args)...); + } + + // Attempt to dequeue an item from head of queue. + /** Does not wait for item to become available. + Returns true if successful; false otherwise. */ + bool try_pop( T& result ) { + return internal_try_pop(&result); + } + + // Return the number of items in the queue; thread unsafe + size_type unsafe_size() const { + std::ptrdiff_t size = my_queue_representation->size(); + return size < 0 ? 0 : size_type(size); + } + + // Equivalent to size()==0. + __TBB_nodiscard bool empty() const { + return my_queue_representation->empty(); + } + + // Clear the queue. not thread-safe. + void clear() { + my_queue_representation->clear(my_allocator); + } + + // Return allocator object + allocator_type get_allocator() const { return my_allocator; } + + //------------------------------------------------------------------------ + // The iterators are intended only for debugging. They are slow and not thread safe. + //------------------------------------------------------------------------ + + iterator unsafe_begin() { return concurrent_queue_iterator_provider::get(*this); } + iterator unsafe_end() { return iterator(); } + const_iterator unsafe_begin() const { return concurrent_queue_iterator_provider::get(*this); } + const_iterator unsafe_end() const { return const_iterator(); } + const_iterator unsafe_cbegin() const { return concurrent_queue_iterator_provider::get(*this); } + const_iterator unsafe_cend() const { return const_iterator(); } + +private: + void internal_swap(concurrent_queue& src) { + using std::swap; + swap(my_queue_representation, src.my_queue_representation); + } + + template + void internal_push( Args&&... args ) { + ticket_type k = my_queue_representation->tail_counter++; + my_queue_representation->choose(k).push(k, *my_queue_representation, my_allocator, std::forward(args)...); + } + + bool internal_try_pop( void* dst ) { + return internal_try_pop_impl(dst, *my_queue_representation, my_allocator).first; + } + + template + friend class concurrent_queue_iterator; + + static void copy_construct_item(T* location, const void* src) { + // TODO: use allocator_traits for copy construction + new (location) value_type(*static_cast(src)); + // queue_allocator_traits::construct(my_allocator, location, *static_cast(src)); + } + + static void move_construct_item(T* location, const void* src) { + // TODO: use allocator_traits for move construction + new (location) value_type(std::move(*static_cast(const_cast(src)))); + } + + queue_allocator_type my_allocator; + queue_representation_type* my_queue_representation; + + friend void swap( concurrent_queue& lhs, concurrent_queue& rhs ) { + lhs.swap(rhs); + } + + friend bool operator==( const concurrent_queue& lhs, const concurrent_queue& rhs ) { + return lhs.unsafe_size() == rhs.unsafe_size() && std::equal(lhs.unsafe_begin(), lhs.unsafe_end(), rhs.unsafe_begin()); + } + +#if !__TBB_CPP20_COMPARISONS_PRESENT + friend bool operator!=( const concurrent_queue& lhs, const concurrent_queue& rhs ) { + return !(lhs == rhs); + } +#endif // __TBB_CPP20_COMPARISONS_PRESENT +}; // class concurrent_queue + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +// Deduction guide for the constructor from two iterators +template >, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_queue( It, It, Alloc = Alloc() ) +-> concurrent_queue, Alloc>; + +#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */ + +class concurrent_monitor; + +// The concurrent monitor tags for concurrent_bounded_queue. +static constexpr std::size_t cbq_slots_avail_tag = 0; +static constexpr std::size_t cbq_items_avail_tag = 1; +} // namespace d2 + + +namespace r1 { + class concurrent_monitor; + + TBB_EXPORT std::uint8_t* __TBB_EXPORTED_FUNC allocate_bounded_queue_rep( std::size_t queue_rep_size ); + TBB_EXPORT void __TBB_EXPORTED_FUNC deallocate_bounded_queue_rep( std::uint8_t* mem, std::size_t queue_rep_size ); + TBB_EXPORT void __TBB_EXPORTED_FUNC abort_bounded_queue_monitors( concurrent_monitor* monitors ); + TBB_EXPORT void __TBB_EXPORTED_FUNC notify_bounded_queue_monitor( concurrent_monitor* monitors, std::size_t monitor_tag + , std::size_t ticket ); + TBB_EXPORT void __TBB_EXPORTED_FUNC wait_bounded_queue_monitor( concurrent_monitor* monitors, std::size_t monitor_tag, + std::ptrdiff_t target, d1::delegate_base& predicate ); +} // namespace r1 + + +namespace d2 { +// A high-performance thread-safe blocking concurrent bounded queue. +// Supports boundedness and blocking semantics. +// Multiple threads may each push and pop concurrently. +// Assignment construction is not allowed. +template > +class concurrent_bounded_queue { + using allocator_traits_type = tbb::detail::allocator_traits; + using queue_representation_type = concurrent_queue_rep; + using queue_allocator_type = typename allocator_traits_type::template rebind_alloc; + using queue_allocator_traits = tbb::detail::allocator_traits; + + template + void internal_wait(r1::concurrent_monitor* monitors, std::size_t monitor_tag, std::ptrdiff_t target, FuncType pred) { + d1::delegated_function func(pred); + r1::wait_bounded_queue_monitor(monitors, monitor_tag, target, func); + } +public: + using size_type = std::ptrdiff_t; + using value_type = T; + using reference = T&; + using const_reference = const T&; + using difference_type = std::ptrdiff_t; + + using allocator_type = Allocator; + using pointer = typename allocator_traits_type::pointer; + using const_pointer = typename allocator_traits_type::const_pointer; + + using iterator = concurrent_queue_iterator; + using const_iterator = concurrent_queue_iterator ; + + concurrent_bounded_queue() : concurrent_bounded_queue(allocator_type()) {} + + explicit concurrent_bounded_queue( const allocator_type& a ) : + my_allocator(a), my_capacity(0), my_abort_counter(0), my_queue_representation(nullptr) + { + my_queue_representation = reinterpret_cast( + r1::allocate_bounded_queue_rep(sizeof(queue_representation_type))); + my_monitors = reinterpret_cast(my_queue_representation + 1); + queue_allocator_traits::construct(my_allocator, my_queue_representation); + my_capacity = std::size_t(-1) / (queue_representation_type::item_size > 1 ? queue_representation_type::item_size : 2); + + __TBB_ASSERT(is_aligned(my_queue_representation, max_nfs_size), "alignment error" ); + __TBB_ASSERT(is_aligned(&my_queue_representation->head_counter, max_nfs_size), "alignment error" ); + __TBB_ASSERT(is_aligned(&my_queue_representation->tail_counter, max_nfs_size), "alignment error" ); + __TBB_ASSERT(is_aligned(&my_queue_representation->array, max_nfs_size), "alignment error" ); + } + + template + concurrent_bounded_queue( InputIterator begin, InputIterator end, const allocator_type& a = allocator_type() ) : + concurrent_bounded_queue(a) + { + for (; begin != end; ++begin) + push(*begin); + } + + concurrent_bounded_queue( std::initializer_list init, const allocator_type& alloc = allocator_type() ): + concurrent_bounded_queue(init.begin(), init.end(), alloc) + {} + + concurrent_bounded_queue( const concurrent_bounded_queue& src, const allocator_type& a ) : + concurrent_bounded_queue(a) + { + my_queue_representation->assign(*src.my_queue_representation, my_allocator, copy_construct_item); + } + + concurrent_bounded_queue( const concurrent_bounded_queue& src ) : + concurrent_bounded_queue(queue_allocator_traits::select_on_container_copy_construction(src.get_allocator())) + { + my_queue_representation->assign(*src.my_queue_representation, my_allocator, copy_construct_item); + } + + // Move constructors + concurrent_bounded_queue( concurrent_bounded_queue&& src ) : + concurrent_bounded_queue(std::move(src.my_allocator)) + { + internal_swap(src); + } + + concurrent_bounded_queue( concurrent_bounded_queue&& src, const allocator_type& a ) : + concurrent_bounded_queue(a) + { + // checking that memory allocated by one instance of allocator can be deallocated + // with another + if (my_allocator == src.my_allocator) { + internal_swap(src); + } else { + // allocators are different => performing per-element move + my_queue_representation->assign(*src.my_queue_representation, my_allocator, move_construct_item); + src.clear(); + } + } + + // Destroy queue + ~concurrent_bounded_queue() { + clear(); + my_queue_representation->clear(my_allocator); + queue_allocator_traits::destroy(my_allocator, my_queue_representation); + r1::deallocate_bounded_queue_rep(reinterpret_cast(my_queue_representation), + sizeof(queue_representation_type)); + } + + concurrent_bounded_queue& operator=( const concurrent_bounded_queue& other ) { + //TODO: implement support for std::allocator_traits::propagate_on_container_copy_assignment + if (my_queue_representation != other.my_queue_representation) { + clear(); + my_allocator = other.my_allocator; + my_queue_representation->assign(*other.my_queue_representation, my_allocator, copy_construct_item); + } + return *this; + } + + concurrent_bounded_queue& operator=( concurrent_bounded_queue&& other ) { + //TODO: implement support for std::allocator_traits::propagate_on_container_move_assignment + if (my_queue_representation != other.my_queue_representation) { + clear(); + if (my_allocator == other.my_allocator) { + internal_swap(other); + } else { + my_queue_representation->assign(*other.my_queue_representation, other.my_allocator, move_construct_item); + other.clear(); + my_allocator = std::move(other.my_allocator); + } + } + return *this; + } + + concurrent_bounded_queue& operator=( std::initializer_list init ) { + assign(init); + return *this; + } + + template + void assign( InputIterator first, InputIterator last ) { + concurrent_bounded_queue src(first, last); + clear(); + my_queue_representation->assign(*src.my_queue_representation, my_allocator, move_construct_item); + } + + void assign( std::initializer_list init ) { + assign(init.begin(), init.end()); + } + + void swap ( concurrent_bounded_queue& other ) { + //TODO: implement support for std::allocator_traits::propagate_on_container_swap + __TBB_ASSERT(my_allocator == other.my_allocator, "unequal allocators"); + internal_swap(other); + } + + // Enqueue an item at tail of queue. + void push( const T& value ) { + internal_push(value); + } + + void push( T&& value ) { + internal_push(std::move(value)); + } + + // Enqueue an item at tail of queue if queue is not already full. + // Does not wait for queue to become not full. + // Returns true if item is pushed; false if queue was already full. + bool try_push( const T& value ) { + return internal_push_if_not_full(value); + } + + bool try_push( T&& value ) { + return internal_push_if_not_full(std::move(value)); + } + + template + void emplace( Args&&... args ) { + internal_push(std::forward(args)...); + } + + template + bool try_emplace( Args&&... args ) { + return internal_push_if_not_full(std::forward(args)...); + } + + // Attempt to dequeue an item from head of queue. + void pop( T& result ) { + internal_pop(&result); + } + + /** Does not wait for item to become available. + Returns true if successful; false otherwise. */ + bool try_pop( T& result ) { + return internal_pop_if_present(&result); + } + + void abort() { + internal_abort(); + } + + // Return the number of items in the queue; thread unsafe + std::ptrdiff_t size() const { + return my_queue_representation->size(); + } + + void set_capacity( size_type new_capacity ) { + std::ptrdiff_t c = new_capacity < 0 ? infinite_capacity : new_capacity; + my_capacity = c; + } + + size_type capacity() const { + return my_capacity; + } + + // Equivalent to size()==0. + __TBB_nodiscard bool empty() const { + return my_queue_representation->empty(); + } + + // Clear the queue. not thread-safe. + void clear() { + my_queue_representation->clear(my_allocator); + } + + // Return allocator object + allocator_type get_allocator() const { return my_allocator; } + + //------------------------------------------------------------------------ + // The iterators are intended only for debugging. They are slow and not thread safe. + //------------------------------------------------------------------------ + + iterator unsafe_begin() { return concurrent_queue_iterator_provider::get(*this); } + iterator unsafe_end() { return iterator(); } + const_iterator unsafe_begin() const { return concurrent_queue_iterator_provider::get(*this); } + const_iterator unsafe_end() const { return const_iterator(); } + const_iterator unsafe_cbegin() const { return concurrent_queue_iterator_provider::get(*this); } + const_iterator unsafe_cend() const { return const_iterator(); } + +private: + void internal_swap( concurrent_bounded_queue& src ) { + std::swap(my_queue_representation, src.my_queue_representation); + std::swap(my_monitors, src.my_monitors); + } + + static constexpr std::ptrdiff_t infinite_capacity = std::ptrdiff_t(~size_type(0) / 2); + + template + void internal_push( Args&&... args ) { + unsigned old_abort_counter = my_abort_counter.load(std::memory_order_relaxed); + ticket_type ticket = my_queue_representation->tail_counter++; + std::ptrdiff_t target = ticket - my_capacity; + + if (static_cast(my_queue_representation->head_counter.load(std::memory_order_relaxed)) <= target) { // queue is full + auto pred = [&] { + if (my_abort_counter.load(std::memory_order_relaxed) != old_abort_counter) { + throw_exception(exception_id::user_abort); + } + + return static_cast(my_queue_representation->head_counter.load(std::memory_order_relaxed)) <= target; + }; + + try_call( [&] { + internal_wait(my_monitors, cbq_slots_avail_tag, target, pred); + }).on_exception( [&] { + my_queue_representation->choose(ticket).abort_push(ticket, *my_queue_representation, my_allocator); + }); + + } + __TBB_ASSERT((static_cast(my_queue_representation->head_counter.load(std::memory_order_relaxed)) > target), nullptr); + my_queue_representation->choose(ticket).push(ticket, *my_queue_representation, my_allocator, std::forward(args)...); + r1::notify_bounded_queue_monitor(my_monitors, cbq_items_avail_tag, ticket); + } + + template + bool internal_push_if_not_full( Args&&... args ) { + ticket_type ticket = my_queue_representation->tail_counter.load(std::memory_order_relaxed); + do { + if (static_cast(ticket - my_queue_representation->head_counter.load(std::memory_order_relaxed)) >= my_capacity) { + // Queue is full + return false; + } + // Queue had empty slot with ticket k when we looked. Attempt to claim that slot. + // Another thread claimed the slot, so retry. + } while (!my_queue_representation->tail_counter.compare_exchange_strong(ticket, ticket + 1)); + + my_queue_representation->choose(ticket).push(ticket, *my_queue_representation, my_allocator, std::forward(args)...); + r1::notify_bounded_queue_monitor(my_monitors, cbq_items_avail_tag, ticket); + return true; + } + + void internal_pop( void* dst ) { + std::ptrdiff_t target; + // This loop is a single pop operation; abort_counter should not be re-read inside + unsigned old_abort_counter = my_abort_counter.load(std::memory_order_relaxed); + + do { + target = my_queue_representation->head_counter++; + if (static_cast(my_queue_representation->tail_counter.load(std::memory_order_relaxed)) <= target) { + auto pred = [&] { + if (my_abort_counter.load(std::memory_order_relaxed) != old_abort_counter) { + throw_exception(exception_id::user_abort); + } + + return static_cast(my_queue_representation->tail_counter.load(std::memory_order_relaxed)) <= target; + }; + + try_call( [&] { + internal_wait(my_monitors, cbq_items_avail_tag, target, pred); + }).on_exception( [&] { + my_queue_representation->head_counter--; + }); + } + __TBB_ASSERT(static_cast(my_queue_representation->tail_counter.load(std::memory_order_relaxed)) > target, nullptr); + } while (!my_queue_representation->choose(target).pop(dst, target, *my_queue_representation, my_allocator)); + + r1::notify_bounded_queue_monitor(my_monitors, cbq_slots_avail_tag, target); + } + + bool internal_pop_if_present( void* dst ) { + bool present{}; + ticket_type ticket{}; + std::tie(present, ticket) = internal_try_pop_impl(dst, *my_queue_representation, my_allocator); + + if (present) { + r1::notify_bounded_queue_monitor(my_monitors, cbq_slots_avail_tag, ticket); + } + return present; + } + + void internal_abort() { + ++my_abort_counter; + r1::abort_bounded_queue_monitors(my_monitors); + } + + static void copy_construct_item(T* location, const void* src) { + // TODO: use allocator_traits for copy construction + new (location) value_type(*static_cast(src)); + } + + static void move_construct_item(T* location, const void* src) { + // TODO: use allocator_traits for move construction + new (location) value_type(std::move(*static_cast(const_cast(src)))); + } + + template + friend class concurrent_queue_iterator; + + queue_allocator_type my_allocator; + std::ptrdiff_t my_capacity; + std::atomic my_abort_counter; + queue_representation_type* my_queue_representation; + + r1::concurrent_monitor* my_monitors; + + friend void swap( concurrent_bounded_queue& lhs, concurrent_bounded_queue& rhs ) { + lhs.swap(rhs); + } + + friend bool operator==( const concurrent_bounded_queue& lhs, const concurrent_bounded_queue& rhs ) { + return lhs.size() == rhs.size() && std::equal(lhs.unsafe_begin(), lhs.unsafe_end(), rhs.unsafe_begin()); + } + +#if !__TBB_CPP20_COMPARISONS_PRESENT + friend bool operator!=( const concurrent_bounded_queue& lhs, const concurrent_bounded_queue& rhs ) { + return !(lhs == rhs); + } +#endif // __TBB_CPP20_COMPARISONS_PRESENT +}; // class concurrent_bounded_queue + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +// Deduction guide for the constructor from two iterators +template >> +concurrent_bounded_queue( It, It, Alloc = Alloc() ) +-> concurrent_bounded_queue, Alloc>; + +#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */ + +} //namespace d2 +} // namespace detail + +inline namespace v1 { + +using detail::d2::concurrent_queue; +using detail::d2::concurrent_bounded_queue; +using detail::r1::user_abort; +using detail::r1::bad_last_alloc; + +} // inline namespace v1 +} // namespace tbb + +#endif // __TBB_concurrent_queue_H diff --git a/src/tbb/include/oneapi/tbb/concurrent_set.h b/src/tbb/include/oneapi/tbb/concurrent_set.h new file mode 100644 index 000000000..dd143311b --- /dev/null +++ b/src/tbb/include/oneapi/tbb/concurrent_set.h @@ -0,0 +1,267 @@ +/* + Copyright (c) 2019-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_concurrent_set_H +#define __TBB_concurrent_set_H + +#include "detail/_namespace_injection.h" +#include "detail/_concurrent_skip_list.h" +#include "tbb_allocator.h" +#include +#include + +namespace tbb { +namespace detail { +namespace d2 { + +template +struct set_traits { + static constexpr std::size_t max_level = RandomGenerator::max_level; + using random_level_generator_type = RandomGenerator; + using key_type = Key; + using value_type = key_type; + using compare_type = KeyCompare; + using value_compare = compare_type; + using reference = value_type&; + using const_reference = const value_type&; + using allocator_type = Allocator; + + static constexpr bool allow_multimapping = AllowMultimapping; + + static const key_type& get_key(const_reference val) { + return val; + } + + static value_compare value_comp(compare_type comp) { return comp; } +}; // struct set_traits + +template +class concurrent_multiset; + +template , typename Allocator = tbb::tbb_allocator> +class concurrent_set : public concurrent_skip_list, Allocator, false>> { + using base_type = concurrent_skip_list, Allocator, false>>; +public: + using key_type = Key; + using value_type = typename base_type::value_type; + using size_type = typename base_type::size_type; + using difference_type = typename base_type::difference_type; + using key_compare = Compare; + using value_compare = typename base_type::value_compare; + using allocator_type = Allocator; + + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using pointer = typename base_type::pointer; + using const_pointer = typename base_type::const_pointer; + + using iterator = typename base_type::iterator; + using const_iterator = typename base_type::const_iterator; + + using node_type = typename base_type::node_type; + + // Include constructors of base_type + using base_type::base_type; + + // Required for implicit deduction guides + concurrent_set() = default; + concurrent_set( const concurrent_set& ) = default; + concurrent_set( const concurrent_set& other, const allocator_type& alloc ) : base_type(other, alloc) {} + concurrent_set( concurrent_set&& ) = default; + concurrent_set( concurrent_set&& other, const allocator_type& alloc ) : base_type(std::move(other), alloc) {} + // Required to respect the rule of 5 + concurrent_set& operator=( const concurrent_set& ) = default; + concurrent_set& operator=( concurrent_set&& ) = default; + + concurrent_set& operator=( std::initializer_list il ) { + base_type::operator= (il); + return *this; + } + + template + void merge(concurrent_set& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_set&& source) { + this->internal_merge(std::move(source)); + } + + template + void merge(concurrent_multiset& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_multiset&& source) { + this->internal_merge(std::move(source)); + } +}; // class concurrent_set + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +template >, + typename Alloc = tbb::tbb_allocator>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_set( It, It, Comp = Comp(), Alloc = Alloc() ) +-> concurrent_set, Comp, Alloc>; + +template , + typename Alloc = tbb::tbb_allocator, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_set( std::initializer_list, Comp = Comp(), Alloc = Alloc() ) +-> concurrent_set; + +template >, + typename = std::enable_if_t>> +concurrent_set( It, It, Alloc ) +-> concurrent_set, + std::less>, Alloc>; + +template >> +concurrent_set( std::initializer_list, Alloc ) +-> concurrent_set, Alloc>; + +#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +template +void swap( concurrent_set& lhs, + concurrent_set& rhs ) +{ + lhs.swap(rhs); +} + +template , typename Allocator = tbb::tbb_allocator> +class concurrent_multiset : public concurrent_skip_list, Allocator, true>> { + using base_type = concurrent_skip_list, Allocator, true>>; +public: + using key_type = Key; + using value_type = typename base_type::value_type; + using size_type = typename base_type::size_type; + using difference_type = typename base_type::difference_type; + using key_compare = Compare; + using value_compare = typename base_type::value_compare; + using allocator_type = Allocator; + + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using pointer = typename base_type::pointer; + using const_pointer = typename base_type::const_pointer; + + using iterator = typename base_type::iterator; + using const_iterator = typename base_type::const_iterator; + + using node_type = typename base_type::node_type; + + // Include constructors of base_type; + using base_type::base_type; + + // Required for implicit deduction guides + concurrent_multiset() = default; + concurrent_multiset( const concurrent_multiset& ) = default; + concurrent_multiset( const concurrent_multiset& other, const allocator_type& alloc ) : base_type(other, alloc) {} + concurrent_multiset( concurrent_multiset&& ) = default; + concurrent_multiset( concurrent_multiset&& other, const allocator_type& alloc ) : base_type(std::move(other), alloc) {} + // Required to respect the rule of 5 + concurrent_multiset& operator=( const concurrent_multiset& ) = default; + concurrent_multiset& operator=( concurrent_multiset&& ) = default; + + concurrent_multiset& operator=( std::initializer_list il ) { + base_type::operator= (il); + return *this; + } + + template + void merge(concurrent_set& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_set&& source) { + this->internal_merge(std::move(source)); + } + + template + void merge(concurrent_multiset& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_multiset&& source) { + this->internal_merge(std::move(source)); + } +}; // class concurrent_multiset + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +template >, + typename Alloc = tbb::tbb_allocator>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_multiset( It, It, Comp = Comp(), Alloc = Alloc() ) +-> concurrent_multiset, Comp, Alloc>; + +template , + typename Alloc = tbb::tbb_allocator, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_multiset( std::initializer_list, Comp = Comp(), Alloc = Alloc() ) +-> concurrent_multiset; + +template >, + typename = std::enable_if_t>> +concurrent_multiset( It, It, Alloc ) +-> concurrent_multiset, std::less>, Alloc>; + +template >> +concurrent_multiset( std::initializer_list, Alloc ) +-> concurrent_multiset, Alloc>; + +#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +template +void swap( concurrent_multiset& lhs, + concurrent_multiset& rhs ) +{ + lhs.swap(rhs); +} + +} // namespace d2 +} // namespace detail + +inline namespace v1 { + +using detail::d2::concurrent_set; +using detail::d2::concurrent_multiset; +using detail::split; + +} // inline namespace v1 +} // namespace tbb + +#endif // __TBB_concurrent_set_H diff --git a/src/tbb/include/oneapi/tbb/concurrent_unordered_map.h b/src/tbb/include/oneapi/tbb/concurrent_unordered_map.h new file mode 100644 index 000000000..9cade0a94 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/concurrent_unordered_map.h @@ -0,0 +1,414 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_concurrent_unordered_map_H +#define __TBB_concurrent_unordered_map_H + +#include "detail/_namespace_injection.h" +#include "detail/_concurrent_unordered_base.h" +#include "tbb_allocator.h" +#include + +namespace tbb { +namespace detail { +namespace d2 { + +template +struct concurrent_unordered_map_traits { + using value_type = std::pair; + using key_type = Key; + using allocator_type = Allocator; + using hash_compare_type = d1::hash_compare; + static constexpr bool allow_multimapping = AllowMultimapping; + + static constexpr const key_type& get_key( const value_type& value ) { + return value.first; + } +}; // struct concurrent_unordered_map_traits + +template +class concurrent_unordered_multimap; + +template , typename KeyEqual = std::equal_to, + typename Allocator = tbb::tbb_allocator> > +class concurrent_unordered_map + : public concurrent_unordered_base> +{ + using traits_type = concurrent_unordered_map_traits; + using base_type = concurrent_unordered_base; +public: + using key_type = typename base_type::key_type; + using mapped_type = T; + using value_type = typename base_type::value_type; + using size_type = typename base_type::size_type; + using difference_type = typename base_type::difference_type; + using hasher = typename base_type::hasher; + using key_equal = typename base_type::key_equal; + using allocator_type = typename base_type::allocator_type; + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using pointer = typename base_type::pointer; + using const_pointer = typename base_type::const_pointer; + using iterator = typename base_type::iterator; + using const_iterator = typename base_type::const_iterator; + using local_iterator = typename base_type::local_iterator; + using const_local_iterator = typename base_type::const_local_iterator; + using node_type = typename base_type::node_type; + + // Include constructors of base type + using base_type::base_type; + + // Required for implicit deduction guides + concurrent_unordered_map() = default; + concurrent_unordered_map( const concurrent_unordered_map& ) = default; + concurrent_unordered_map( const concurrent_unordered_map& other, const allocator_type& alloc ) : base_type(other, alloc) {} + concurrent_unordered_map( concurrent_unordered_map&& ) = default; + concurrent_unordered_map( concurrent_unordered_map&& other, const allocator_type& alloc ) : base_type(std::move(other), alloc) {} + // Required to respect the rule of 5 + concurrent_unordered_map& operator=( const concurrent_unordered_map& ) = default; + concurrent_unordered_map& operator=( concurrent_unordered_map&& ) = default; + + concurrent_unordered_map& operator=( std::initializer_list il ) { + base_type::operator= (il); + return *this; + } + + // Observers + mapped_type& operator[]( const key_type& key ) { + iterator where = this->find(key); + + if (where == this->end()) { + where = this->emplace(std::piecewise_construct, std::forward_as_tuple(key), std::tuple<>()).first; + } + return where->second; + } + + mapped_type& operator[]( key_type&& key ) { + iterator where = this->find(key); + + if (where == this->end()) { + where = this->emplace(std::piecewise_construct, std::forward_as_tuple(std::move(key)), std::tuple<>()).first; + } + return where->second; + } + + mapped_type& at( const key_type& key ) { + iterator where = this->find(key); + + if (where == this->end()) { + throw_exception(exception_id::invalid_key); + } + return where->second; + } + + const mapped_type& at( const key_type& key ) const { + const_iterator where = this->find(key); + + if (where == this->end()) { + throw_exception(exception_id::out_of_range); + } + return where->second; + } + + using base_type::insert; + + template + typename std::enable_if::value, + std::pair>::type insert( P&& value ) { + return this->emplace(std::forward

(value)); + } + + template + typename std::enable_if::value, + iterator>::type insert( const_iterator hint, P&& value ) { + return this->emplace_hint(hint, std::forward

(value)); + } + + template + void merge( concurrent_unordered_map& source ) { + this->internal_merge(source); + } + + template + void merge( concurrent_unordered_map&& source ) { + this->internal_merge(std::move(source)); + } + + template + void merge( concurrent_unordered_multimap& source ) { + this->internal_merge(source); + } + + template + void merge( concurrent_unordered_multimap&& source ) { + this->internal_merge(std::move(source)); + } +}; // class concurrent_unordered_map + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +template >, + typename KeyEq = std::equal_to>, + typename Alloc = tbb::tbb_allocator>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_unordered_map( It, It, std::size_t = {}, + Hash = Hash(), KeyEq = KeyEq(), Alloc = Alloc() ) +-> concurrent_unordered_map, iterator_mapped_t, Hash, KeyEq, Alloc>; + +template >, + typename KeyEq = std::equal_to>, + typename Alloc = tbb::tbb_allocator>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_unordered_map( std::initializer_list>, std::size_t = {}, + Hash = Hash(), KeyEq = KeyEq(), Alloc = Alloc() ) +-> concurrent_unordered_map, T, Hash, KeyEq, Alloc>; + +template >, + typename = std::enable_if_t>> +concurrent_unordered_map( It, It, std::size_t, Alloc ) +-> concurrent_unordered_map, iterator_mapped_t, + std::hash>, + std::equal_to>, Alloc>; + +// TODO: investigate if a deduction guide for concurrent_unordered_map(It, It, Alloc) is needed + +template >, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_unordered_map( It, It, std::size_t, Hash, Alloc ) +-> concurrent_unordered_map, iterator_mapped_t, + Hash, std::equal_to>, Alloc>; + +template >> +concurrent_unordered_map( std::initializer_list>, std::size_t, Alloc ) +-> concurrent_unordered_map, T, std::hash>, + std::equal_to>, Alloc>; + +template >> +concurrent_unordered_map( std::initializer_list>, Alloc ) +-> concurrent_unordered_map, T, std::hash>, + std::equal_to>, Alloc>; + +template >, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_unordered_map( std::initializer_list>, std::size_t, Hash, Alloc ) +-> concurrent_unordered_map, T, Hash, + std::equal_to>, Alloc>; + +#if __APPLE__ && __TBB_CLANG_VERSION == 100000 +// An explicit deduction guide is required for copy/move constructor with allocator for APPLE LLVM 10.0.0 +// due to an issue with generating an implicit deduction guide for these constructors under several strange surcumstances. +// Currently the issue takes place because the last template parameter for Traits is boolean, it should not affect the deduction guides +// The issue reproduces only on this version of the compiler +template +concurrent_unordered_map( concurrent_unordered_map, Alloc ) +-> concurrent_unordered_map; +#endif + +#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +template +void swap( concurrent_unordered_map& lhs, + concurrent_unordered_map& rhs ) { + lhs.swap(rhs); +} + +template , typename KeyEqual = std::equal_to, + typename Allocator = tbb::tbb_allocator> > +class concurrent_unordered_multimap + : public concurrent_unordered_base> +{ + using traits_type = concurrent_unordered_map_traits; + using base_type = concurrent_unordered_base; +public: + using key_type = typename base_type::key_type; + using mapped_type = T; + using value_type = typename base_type::value_type; + using size_type = typename base_type::size_type; + using difference_type = typename base_type::difference_type; + using hasher = typename base_type::hasher; + using key_equal = typename base_type::key_equal; + using allocator_type = typename base_type::allocator_type; + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using pointer = typename base_type::pointer; + using const_pointer = typename base_type::const_pointer; + using iterator = typename base_type::iterator; + using const_iterator = typename base_type::const_iterator; + using local_iterator = typename base_type::local_iterator; + using const_local_iterator = typename base_type::const_local_iterator; + using node_type = typename base_type::node_type; + + // Include constructors of base type + using base_type::base_type; + using base_type::insert; + + // Required for implicit deduction guides + concurrent_unordered_multimap() = default; + concurrent_unordered_multimap( const concurrent_unordered_multimap& ) = default; + concurrent_unordered_multimap( const concurrent_unordered_multimap& other, const allocator_type& alloc ) : base_type(other, alloc) {} + concurrent_unordered_multimap( concurrent_unordered_multimap&& ) = default; + concurrent_unordered_multimap( concurrent_unordered_multimap&& other, const allocator_type& alloc ) : base_type(std::move(other), alloc) {} + // Required to respect the rule of 5 + concurrent_unordered_multimap& operator=( const concurrent_unordered_multimap& ) = default; + concurrent_unordered_multimap& operator=( concurrent_unordered_multimap&& ) = default; + + concurrent_unordered_multimap& operator=( std::initializer_list il ) { + base_type::operator= (il); + return *this; + } + + template + typename std::enable_if::value, + std::pair>::type insert( P&& value ) { + return this->emplace(std::forward

(value)); + } + + template + typename std::enable_if::value, + iterator>::type insert( const_iterator hint, P&& value ) { + return this->emplace_hint(hint, std::forward(value)); + } + + template + void merge( concurrent_unordered_map& source ) { + this->internal_merge(source); + } + + template + void merge( concurrent_unordered_map&& source ) { + this->internal_merge(std::move(source)); + } + + template + void merge( concurrent_unordered_multimap& source ) { + this->internal_merge(source); + } + + template + void merge( concurrent_unordered_multimap&& source ) { + this->internal_merge(std::move(source)); + } +}; // class concurrent_unordered_multimap + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +template >, + typename KeyEq = std::equal_to>, + typename Alloc = tbb::tbb_allocator>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_unordered_multimap( It, It, std::size_t = {}, Hash = Hash(), KeyEq = KeyEq(), Alloc = Alloc() ) +-> concurrent_unordered_multimap, iterator_mapped_t, Hash, KeyEq, Alloc>; + +template >, + typename KeyEq = std::equal_to>, + typename Alloc = tbb::tbb_allocator>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_unordered_multimap( std::initializer_list>, std::size_t = {}, + Hash = Hash(), KeyEq = KeyEq(), Alloc = Alloc() ) +-> concurrent_unordered_multimap, T, Hash, KeyEq, Alloc>; + +template >, + typename = std::enable_if_t>> +concurrent_unordered_multimap( It, It, std::size_t, Alloc ) +-> concurrent_unordered_multimap, iterator_mapped_t, + std::hash>, + std::equal_to>, Alloc>; + +template >, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_unordered_multimap( It, It, std::size_t, Hash, Alloc ) +-> concurrent_unordered_multimap, iterator_mapped_t, Hash, + std::equal_to>, Alloc>; + +template >> +concurrent_unordered_multimap( std::initializer_list>, std::size_t, Alloc ) +-> concurrent_unordered_multimap, T, std::hash>, + std::equal_to>, Alloc>; + +template >> +concurrent_unordered_multimap( std::initializer_list>, Alloc ) +-> concurrent_unordered_multimap, T, std::hash>, + std::equal_to>, Alloc>; + +template >, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_unordered_multimap( std::initializer_list>, std::size_t, Hash, Alloc ) +-> concurrent_unordered_multimap, T, Hash, + std::equal_to>, Alloc>; + +#if __APPLE__ && __TBB_CLANG_VERSION == 100000 +// An explicit deduction guide is required for copy/move constructor with allocator for APPLE LLVM 10.0.0 +// due to an issue with generating an implicit deduction guide for these constructors under several strange surcumstances. +// Currently the issue takes place because the last template parameter for Traits is boolean, it should not affect the deduction guides +// The issue reproduces only on this version of the compiler +template +concurrent_unordered_multimap( concurrent_unordered_multimap, Alloc ) +-> concurrent_unordered_multimap; +#endif +#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +template +void swap( concurrent_unordered_multimap& lhs, + concurrent_unordered_multimap& rhs ) { + lhs.swap(rhs); +} + +} // namespace d2 +} // namespace detail + +inline namespace v1 { + +using detail::d2::concurrent_unordered_map; +using detail::d2::concurrent_unordered_multimap; +using detail::split; + +} // inline namespace v1 +} // namespace tbb + +#endif // __TBB_concurrent_unordered_map_H diff --git a/src/tbb/include/oneapi/tbb/concurrent_unordered_set.h b/src/tbb/include/oneapi/tbb/concurrent_unordered_set.h new file mode 100644 index 000000000..b7e4b4caf --- /dev/null +++ b/src/tbb/include/oneapi/tbb/concurrent_unordered_set.h @@ -0,0 +1,333 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_concurrent_unordered_set_H +#define __TBB_concurrent_unordered_set_H + +#include "detail/_namespace_injection.h" +#include "detail/_concurrent_unordered_base.h" +#include "tbb_allocator.h" + +namespace tbb { +namespace detail { +namespace d2 { + +template +struct concurrent_unordered_set_traits { + using key_type = Key; + using value_type = key_type; + using allocator_type = Allocator; + using hash_compare_type = d1::hash_compare; + static constexpr bool allow_multimapping = AllowMultimapping; + + static constexpr const key_type& get_key( const value_type& value ) { + return value; + } +}; // class concurrent_unordered_set_traits + +template +class concurrent_unordered_multiset; + +template , typename KeyEqual = std::equal_to, + typename Allocator = tbb::tbb_allocator> +class concurrent_unordered_set + : public concurrent_unordered_base> +{ + using traits_type = concurrent_unordered_set_traits; + using base_type = concurrent_unordered_base; +public: + using key_type = typename base_type::key_type; + using value_type = typename base_type::value_type; + using size_type = typename base_type::size_type; + using difference_type = typename base_type::difference_type; + using hasher = typename base_type::hasher; + using key_equal = typename base_type::key_equal; + using allocator_type = typename base_type::allocator_type; + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using pointer = typename base_type::pointer; + using const_pointer = typename base_type::const_pointer; + using iterator = typename base_type::iterator; + using const_iterator = typename base_type::const_iterator; + using local_iterator = typename base_type::local_iterator; + using const_local_iterator = typename base_type::const_local_iterator; + using node_type = typename base_type::node_type; + + // Include constructors of base_type; + using base_type::base_type; + + // Required for implicit deduction guides + concurrent_unordered_set() = default; + concurrent_unordered_set( const concurrent_unordered_set& ) = default; + concurrent_unordered_set( const concurrent_unordered_set& other, const allocator_type& alloc ) : base_type(other, alloc) {} + concurrent_unordered_set( concurrent_unordered_set&& ) = default; + concurrent_unordered_set( concurrent_unordered_set&& other, const allocator_type& alloc ) : base_type(std::move(other), alloc) {} + // Required to respect the rule of 5 + concurrent_unordered_set& operator=( const concurrent_unordered_set& ) = default; + concurrent_unordered_set& operator=( concurrent_unordered_set&& ) = default; + + concurrent_unordered_set& operator=( std::initializer_list il ) { + base_type::operator= (il); + return *this; + } + + template + void merge( concurrent_unordered_set& source ) { + this->internal_merge(source); + } + + template + void merge( concurrent_unordered_set&& source ) { + this->internal_merge(std::move(source)); + } + + template + void merge( concurrent_unordered_multiset& source ) { + this->internal_merge(source); + } + + template + void merge( concurrent_unordered_multiset&& source ) { + this->internal_merge(std::move(source)); + } +}; // class concurrent_unordered_set + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +template >, + typename KeyEq = std::equal_to>, + typename Alloc = tbb::tbb_allocator>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_unordered_set( It, It, std::size_t = {}, Hash = Hash(), KeyEq = KeyEq(), Alloc = Alloc() ) +-> concurrent_unordered_set, Hash, KeyEq, Alloc>; + +template , + typename KeyEq = std::equal_to, + typename Alloc = tbb::tbb_allocator, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_unordered_set( std::initializer_list, std::size_t = {}, + Hash = Hash(), KeyEq = KeyEq(), Alloc = Alloc() ) +-> concurrent_unordered_set; + +template >, + typename = std::enable_if_t>> +concurrent_unordered_set( It, It, std::size_t, Alloc ) +-> concurrent_unordered_set, std::hash>, + std::equal_to>, Alloc>; + +template >, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_unordered_set( It, It, std::size_t, Hash, Alloc ) +-> concurrent_unordered_set, Hash, std::equal_to>, Alloc>; + +template >> +concurrent_unordered_set( std::initializer_list, std::size_t, Alloc ) +-> concurrent_unordered_set, std::equal_to, Alloc>; + +template >> +concurrent_unordered_set( std::initializer_list, Alloc ) +-> concurrent_unordered_set, std::equal_to, Alloc>; + +template >, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_unordered_set( std::initializer_list, std::size_t, Hash, Alloc ) +-> concurrent_unordered_set, Alloc>; + +#if __APPLE__ && __TBB_CLANG_VERSION == 100000 +// An explicit deduction guide is required for copy/move constructor with allocator for APPLE LLVM 10.0.0 +// due to an issue with generating an implicit deduction guide for these constructors under several strange surcumstances. +// Currently the issue takes place because the last template parameter for Traits is boolean, it should not affect the deduction guides +// The issue reproduces only on this version of the compiler +template +concurrent_unordered_set( concurrent_unordered_set, Alloc ) +-> concurrent_unordered_set; +#endif +#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +template +void swap( concurrent_unordered_set& lhs, + concurrent_unordered_set& rhs ) { + lhs.swap(rhs); +} + +template , typename KeyEqual = std::equal_to, + typename Allocator = tbb::tbb_allocator> +class concurrent_unordered_multiset + : public concurrent_unordered_base> +{ + using traits_type = concurrent_unordered_set_traits; + using base_type = concurrent_unordered_base; +public: + using key_type = typename base_type::key_type; + using value_type = typename base_type::value_type; + using size_type = typename base_type::size_type; + using difference_type = typename base_type::difference_type; + using hasher = typename base_type::hasher; + using key_equal = typename base_type::key_equal; + using allocator_type = typename base_type::allocator_type; + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using pointer = typename base_type::pointer; + using const_pointer = typename base_type::const_pointer; + using iterator = typename base_type::iterator; + using const_iterator = typename base_type::const_iterator; + using local_iterator = typename base_type::local_iterator; + using const_local_iterator = typename base_type::const_local_iterator; + using node_type = typename base_type::node_type; + + // Include constructors of base_type; + using base_type::base_type; + + // Required for implicit deduction guides + concurrent_unordered_multiset() = default; + concurrent_unordered_multiset( const concurrent_unordered_multiset& ) = default; + concurrent_unordered_multiset( const concurrent_unordered_multiset& other, const allocator_type& alloc ) : base_type(other, alloc) {} + concurrent_unordered_multiset( concurrent_unordered_multiset&& ) = default; + concurrent_unordered_multiset( concurrent_unordered_multiset&& other, const allocator_type& alloc ) : base_type(std::move(other), alloc) {} + // Required to respect the rule of 5 + concurrent_unordered_multiset& operator=( const concurrent_unordered_multiset& ) = default; + concurrent_unordered_multiset& operator=( concurrent_unordered_multiset&& ) = default; + + concurrent_unordered_multiset& operator=( std::initializer_list il ) { + base_type::operator= (il); + return *this; + } + + template + void merge( concurrent_unordered_set& source ) { + this->internal_merge(source); + } + + template + void merge( concurrent_unordered_set&& source ) { + this->internal_merge(std::move(source)); + } + + template + void merge( concurrent_unordered_multiset& source ) { + this->internal_merge(source); + } + + template + void merge( concurrent_unordered_multiset&& source ) { + this->internal_merge(std::move(source)); + } +}; // class concurrent_unordered_multiset + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +template >, + typename KeyEq = std::equal_to>, + typename Alloc = tbb::tbb_allocator>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_unordered_multiset( It, It, std::size_t = {}, Hash = Hash(), KeyEq = KeyEq(), Alloc = Alloc() ) +-> concurrent_unordered_multiset, Hash, KeyEq, Alloc>; + +template , + typename KeyEq = std::equal_to, + typename Alloc = tbb::tbb_allocator, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_unordered_multiset( std::initializer_list, std::size_t = {}, + Hash = Hash(), KeyEq = KeyEq(), Alloc = Alloc() ) +-> concurrent_unordered_multiset; + +template >, + typename = std::enable_if_t>> +concurrent_unordered_multiset( It, It, std::size_t, Alloc ) +-> concurrent_unordered_multiset, std::hash>, + std::equal_to>, Alloc>; + +template >, + typename = std::enable_if_t>, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_unordered_multiset( It, It, std::size_t, Hash, Alloc ) +-> concurrent_unordered_multiset, Hash, std::equal_to>, Alloc>; + +template >> +concurrent_unordered_multiset( std::initializer_list, std::size_t, Alloc ) +-> concurrent_unordered_multiset, std::equal_to, Alloc>; + +template >> +concurrent_unordered_multiset( std::initializer_list, Alloc ) +-> concurrent_unordered_multiset, std::equal_to, Alloc>; + +template >, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_unordered_multiset( std::initializer_list, std::size_t, Hash, Alloc ) +-> concurrent_unordered_multiset, Alloc>; + +#if __APPLE__ && __TBB_CLANG_VERSION == 100000 +// An explicit deduction guide is required for copy/move constructor with allocator for APPLE LLVM 10.0.0 +// due to an issue with generating an implicit deduction guide for these constructors under several strange surcumstances. +// Currently the issue takes place because the last template parameter for Traits is boolean, it should not affect the deduction guides +// The issue reproduces only on this version of the compiler +template +concurrent_unordered_multiset( concurrent_unordered_multiset, Alloc ) +-> concurrent_unordered_multiset; +#endif +#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +template +void swap( concurrent_unordered_multiset& lhs, + concurrent_unordered_multiset& rhs ) { + lhs.swap(rhs); +} + +} // namespace d2 +} // namespace detail + +inline namespace v1 { + +using detail::d2::concurrent_unordered_set; +using detail::d2::concurrent_unordered_multiset; +using detail::split; + +} // inline namespace v1 +} // namespace tbb + +#endif // __TBB_concurrent_unordered_set_H diff --git a/src/tbb/include/oneapi/tbb/concurrent_vector.h b/src/tbb/include/oneapi/tbb/concurrent_vector.h new file mode 100644 index 000000000..2a2cb1e4b --- /dev/null +++ b/src/tbb/include/oneapi/tbb/concurrent_vector.h @@ -0,0 +1,1129 @@ +/* + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_concurrent_vector_H +#define __TBB_concurrent_vector_H + +#include "detail/_namespace_injection.h" +#include "detail/_utils.h" +#include "detail/_assert.h" +#include "detail/_allocator_traits.h" +#include "detail/_segment_table.h" +#include "detail/_containers_helpers.h" +#include "blocked_range.h" +#include "cache_aligned_allocator.h" + +#include +#include // std::move_if_noexcept +#include +#if __TBB_CPP20_COMPARISONS_PRESENT +#include +#endif + +namespace tbb { +namespace detail { +namespace d1 { + +template +class vector_iterator { + using vector_type = Vector; + +public: + using value_type = Value; + using size_type = typename vector_type::size_type; + using difference_type = typename vector_type::difference_type; + using pointer = value_type*; + using reference = value_type&; + using iterator_category = std::random_access_iterator_tag; + + template + friend vector_iterator operator+( typename vector_iterator::difference_type, const vector_iterator& ); + + template + friend typename vector_iterator::difference_type operator-( const vector_iterator&, const vector_iterator& ); + + template + friend bool operator==( const vector_iterator&, const vector_iterator& ); + + template + friend bool operator<( const vector_iterator&, const vector_iterator& ); + + template + friend class vector_iterator; + + template + friend class concurrent_vector; + +private: + vector_iterator( const vector_type& vector, size_type index, value_type* item = nullptr ) + : my_vector(const_cast(&vector)), my_index(index), my_item(item) + {} + +public: + vector_iterator() : my_vector(nullptr), my_index(~size_type(0)), my_item(nullptr) + {} + + vector_iterator( const vector_iterator& other ) + : my_vector(other.my_vector), my_index(other.my_index), my_item(other.my_item) + {} + + vector_iterator& operator=( const vector_iterator& other ) { + my_vector = other.my_vector; + my_index = other.my_index; + my_item = other.my_item; + return *this; + } + + vector_iterator operator+( difference_type offset ) const { + return vector_iterator(*my_vector, my_index + offset); + } + + vector_iterator& operator+=( difference_type offset ) { + my_index += offset; + my_item = nullptr; + return *this; + } + + vector_iterator operator-( difference_type offset ) const { + return vector_iterator(*my_vector, my_index - offset); + } + + vector_iterator& operator-=( difference_type offset ) { + my_index -= offset; + my_item = nullptr; + return *this; + } + + reference operator*() const { + value_type *item = my_item; + if (item == nullptr) { + item = &my_vector->internal_subscript(my_index); + } else { + __TBB_ASSERT(item == &my_vector->internal_subscript(my_index), "corrupt cache"); + } + return *item; + } + + pointer operator->() const { return &(operator*()); } + + reference operator[]( difference_type k ) const { + return my_vector->internal_subscript(my_index + k); + } + + vector_iterator& operator++() { + ++my_index; + if (my_item != nullptr) { + if (vector_type::is_first_element_in_segment(my_index)) { + // If the iterator crosses a segment boundary, the pointer become invalid + // as possibly next segment is in another memory location + my_item = nullptr; + } else { + ++my_item; + } + } + return *this; + } + + vector_iterator operator++(int) { + vector_iterator result = *this; + ++(*this); + return result; + } + + vector_iterator& operator--() { + __TBB_ASSERT(my_index > 0, "operator--() applied to iterator already at beginning of concurrent_vector"); + --my_index; + if (my_item != nullptr) { + if (vector_type::is_first_element_in_segment(my_index)) { + // If the iterator crosses a segment boundary, the pointer become invalid + // as possibly next segment is in another memory location + my_item = nullptr; + } else { + --my_item; + } + } + return *this; + } + + vector_iterator operator--(int) { + vector_iterator result = *this; + --(*this); + return result; + } + +private: + // concurrent_vector over which we are iterating. + vector_type* my_vector; + + // Index into the vector + size_type my_index; + + // Caches my_vector *it; + // If my_item == nullptr cached value is not available use internal_subscript(my_index) + mutable value_type* my_item; +}; // class vector_iterator + +template +vector_iterator operator+( typename vector_iterator::difference_type offset, + const vector_iterator& v ) +{ + return vector_iterator(*v.my_vector, v.my_index + offset); +} + +template +typename vector_iterator::difference_type operator-( const vector_iterator& i, + const vector_iterator& j ) +{ + using difference_type = typename vector_iterator::difference_type; + return static_cast(i.my_index) - static_cast(j.my_index); +} + +template +bool operator==( const vector_iterator& i, const vector_iterator& j ) { + return i.my_vector == j.my_vector && i.my_index == j.my_index; +} + +template +bool operator!=( const vector_iterator& i, const vector_iterator& j ) { + return !(i == j); +} + +template +bool operator<( const vector_iterator& i, const vector_iterator& j ) { + return i.my_index < j.my_index; +} + +template +bool operator>( const vector_iterator& i, const vector_iterator& j ) { + return j < i; +} + +template +bool operator>=( const vector_iterator& i, const vector_iterator& j ) { + return !(i < j); +} + +template +bool operator<=( const vector_iterator& i, const vector_iterator& j ) { + return !(j < i); +} + +static constexpr std::size_t embedded_table_num_segments = 3; + +template > +class concurrent_vector + : private segment_table, embedded_table_num_segments> +{ + using self_type = concurrent_vector; + using base_type = segment_table; + + friend class segment_table; + + template + class generic_range_type : public tbb::blocked_range { + using base_type = tbb::blocked_range; + public: + using value_type = T; + using reference = T&; + using const_reference = const T&; + using iterator = Iterator; + using difference_type = std::ptrdiff_t; + + using base_type::base_type; + + template + generic_range_type( const generic_range_type& r) : blocked_range(r.begin(), r.end(), r.grainsize()) {} + generic_range_type( generic_range_type& r, split ) : blocked_range(r, split()) {} + }; // class generic_range_type + + static_assert(std::is_same::value, + "value_type of the container must be the same as its allocator's"); + using allocator_traits_type = tbb::detail::allocator_traits; + // Segment table for concurrent_vector can be extended + static constexpr bool allow_table_extending = true; + static constexpr bool is_noexcept_assignment = allocator_traits_type::propagate_on_container_move_assignment::value || + allocator_traits_type::is_always_equal::value; + static constexpr bool is_noexcept_swap = allocator_traits_type::propagate_on_container_swap::value || + allocator_traits_type::is_always_equal::value; + +public: + using value_type = T; + using allocator_type = Allocator; + using size_type = std::size_t; + using difference_type = std::ptrdiff_t; + using reference = value_type&; + using const_reference = const value_type&; + + using pointer = typename allocator_traits_type::pointer; + using const_pointer = typename allocator_traits_type::const_pointer; + + using iterator = vector_iterator; + using const_iterator = vector_iterator; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + using range_type = generic_range_type; + using const_range_type = generic_range_type; + + concurrent_vector() : concurrent_vector(allocator_type()) {} + + explicit concurrent_vector( const allocator_type& alloc ) noexcept + : base_type(alloc) + {} + + explicit concurrent_vector( size_type count, const value_type& value, + const allocator_type& alloc = allocator_type() ) + : concurrent_vector(alloc) + { + try_call( [&] { + grow_by(count, value); + } ).on_exception( [&] { + base_type::clear(); + }); + } + + explicit concurrent_vector( size_type count, const allocator_type& alloc = allocator_type() ) + : concurrent_vector(alloc) + { + try_call( [&] { + grow_by(count); + } ).on_exception( [&] { + base_type::clear(); + }); + } + + template + concurrent_vector( InputIterator first, InputIterator last, const allocator_type& alloc = allocator_type() ) + : concurrent_vector(alloc) + { + try_call( [&] { + grow_by(first, last); + } ).on_exception( [&] { + base_type::clear(); + }); + } + + concurrent_vector( const concurrent_vector& other ) + : base_type(segment_table_allocator_traits::select_on_container_copy_construction(other.get_allocator())) + { + try_call( [&] { + grow_by(other.begin(), other.end()); + } ).on_exception( [&] { + base_type::clear(); + }); + } + + concurrent_vector( const concurrent_vector& other, const allocator_type& alloc ) + : base_type(other, alloc) {} + + concurrent_vector(concurrent_vector&& other) noexcept + : base_type(std::move(other)) + {} + + concurrent_vector( concurrent_vector&& other, const allocator_type& alloc ) + : base_type(std::move(other), alloc) + {} + + concurrent_vector( std::initializer_list init, + const allocator_type& alloc = allocator_type() ) + : concurrent_vector(init.begin(), init.end(), alloc) + {} + + ~concurrent_vector() {} + + // Assignment + concurrent_vector& operator=( const concurrent_vector& other ) { + base_type::operator=(other); + return *this; + } + + concurrent_vector& operator=( concurrent_vector&& other ) noexcept(is_noexcept_assignment) { + base_type::operator=(std::move(other)); + return *this; + } + + concurrent_vector& operator=( std::initializer_list init ) { + assign(init); + return *this; + } + + void assign( size_type count, const value_type& value ) { + destroy_elements(); + grow_by(count, value); + } + + template + typename std::enable_if::value, void>::type + assign( InputIterator first, InputIterator last ) { + destroy_elements(); + grow_by(first, last); + } + + void assign( std::initializer_list init ) { + destroy_elements(); + assign(init.begin(), init.end()); + } + + // Concurrent growth + iterator grow_by( size_type delta ) { + return internal_grow_by_delta(delta); + } + + iterator grow_by( size_type delta, const value_type& value ) { + return internal_grow_by_delta(delta, value); + } + + template + typename std::enable_if::value, iterator>::type + grow_by( ForwardIterator first, ForwardIterator last ) { + auto delta = std::distance(first, last); + return internal_grow_by_delta(delta, first, last); + } + + iterator grow_by( std::initializer_list init ) { + return grow_by(init.begin(), init.end()); + } + + iterator grow_to_at_least( size_type n ) { + return internal_grow_to_at_least(n); + } + iterator grow_to_at_least( size_type n, const value_type& value ) { + return internal_grow_to_at_least(n, value); + } + + iterator push_back( const value_type& item ) { + return internal_emplace_back(item); + } + + iterator push_back( value_type&& item ) { + return internal_emplace_back(std::move(item)); + } + + template + iterator emplace_back( Args&&... args ) { + return internal_emplace_back(std::forward(args)...); + } + + // Items access + reference operator[]( size_type index ) { + return internal_subscript(index); + } + const_reference operator[]( size_type index ) const { + return internal_subscript(index); + } + + reference at( size_type index ) { + return internal_subscript_with_exceptions(index); + } + const_reference at( size_type index ) const { + return internal_subscript_with_exceptions(index); + } + + // Get range for iterating with parallel algorithms + range_type range( size_t grainsize = 1 ) { + return range_type(begin(), end(), grainsize); + } + + // Get const range for iterating with parallel algorithms + const_range_type range( size_t grainsize = 1 ) const { + return const_range_type(begin(), end(), grainsize); + } + + reference front() { + return internal_subscript(0); + } + + const_reference front() const { + return internal_subscript(0); + } + + reference back() { + return internal_subscript(size() - 1); + } + + const_reference back() const { + return internal_subscript(size() - 1); + } + + // Iterators + iterator begin() { return iterator(*this, 0); } + const_iterator begin() const { return const_iterator(*this, 0); } + const_iterator cbegin() const { return const_iterator(*this, 0); } + + iterator end() { return iterator(*this, size()); } + const_iterator end() const { return const_iterator(*this, size()); } + const_iterator cend() const { return const_iterator(*this, size()); } + + reverse_iterator rbegin() { return reverse_iterator(end()); } + const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); } + const_reverse_iterator crbegin() const { return const_reverse_iterator(cend()); } + + reverse_iterator rend() { return reverse_iterator(begin()); } + const_reverse_iterator rend() const { return const_reverse_iterator(begin()); } + const_reverse_iterator crend() const { return const_reverse_iterator(cbegin()); } + + allocator_type get_allocator() const { + return base_type::get_allocator(); + } + + // Storage + bool empty() const noexcept { + return 0 == size(); + } + + size_type size() const noexcept { + return std::min(this->my_size.load(std::memory_order_acquire), capacity()); + } + + size_type max_size() const noexcept { + return allocator_traits_type::max_size(base_type::get_allocator()); + } + + size_type capacity() const noexcept { + return base_type::capacity(); + } + + void reserve( size_type n ) { + if (n == 0) return; + + if (n > max_size()) { + tbb::detail::throw_exception(exception_id::reservation_length_error); + } + + this->assign_first_block_if_necessary(this->segment_index_of(n - 1) + 1); + base_type::reserve(n); + } + + void resize( size_type n ) { + internal_resize(n); + } + + void resize( size_type n, const value_type& val ) { + internal_resize(n, val); + } + + void shrink_to_fit() { + internal_compact(); + } + + void swap(concurrent_vector& other) noexcept(is_noexcept_swap) { + base_type::swap(other); + } + + void clear() { + destroy_elements(); + } + +private: + using segment_type = typename base_type::segment_type; + using segment_table_type = typename base_type::segment_table_type; + using segment_table_allocator_traits = typename base_type::segment_table_allocator_traits; + using segment_index_type = typename base_type::segment_index_type; + + using segment_element_type = typename base_type::value_type; + using segment_element_allocator_type = typename allocator_traits_type::template rebind_alloc; + using segment_element_allocator_traits = tbb::detail::allocator_traits; + + segment_table_type allocate_long_table( const typename base_type::atomic_segment* embedded_table, size_type start_index ) { + __TBB_ASSERT(start_index <= this->embedded_table_size, "Start index out of embedded table"); + + // If other threads are trying to set pointers in the short segment, wait for them to finish their + // assignments before we copy the short segment to the long segment. Note: grow_to_at_least depends on it + for (segment_index_type i = 0; this->segment_base(i) < start_index; ++i) { + spin_wait_while_eq(embedded_table[i], segment_type(nullptr)); + } + + // It is possible that the table was extend by a thread allocating first_block, need to check this. + if (this->get_table() != embedded_table) { + return nullptr; + } + + // Allocate long segment table and fill with null pointers + segment_table_type new_segment_table = segment_table_allocator_traits::allocate(base_type::get_allocator(), this->pointers_per_long_table); + // Copy segment pointers from the embedded table + for (size_type segment_index = 0; segment_index < this->pointers_per_embedded_table; ++segment_index) { + segment_table_allocator_traits::construct(base_type::get_allocator(), &new_segment_table[segment_index], + embedded_table[segment_index].load(std::memory_order_relaxed)); + } + for (size_type segment_index = this->pointers_per_embedded_table; segment_index < this->pointers_per_long_table; ++segment_index) { + segment_table_allocator_traits::construct(base_type::get_allocator(), &new_segment_table[segment_index], nullptr); + } + + return new_segment_table; + } + + // create_segment function is required by the segment_table base class + segment_type create_segment( segment_table_type table, segment_index_type seg_index, size_type index ) { + size_type first_block = this->my_first_block.load(std::memory_order_relaxed); + // First block allocation + if (seg_index < first_block) { + // If 0 segment is already allocated, then it remains to wait until the segments are filled to requested + if (table[0].load(std::memory_order_acquire) != nullptr) { + spin_wait_while_eq(table[seg_index], segment_type(nullptr)); + return nullptr; + } + + segment_element_allocator_type segment_allocator(base_type::get_allocator()); + segment_type new_segment = nullptr; + size_type first_block_size = this->segment_size(first_block); + try_call( [&] { + new_segment = segment_element_allocator_traits::allocate(segment_allocator, first_block_size); + } ).on_exception( [&] { + segment_type disabled_segment = nullptr; + if (table[0].compare_exchange_strong(disabled_segment, this->segment_allocation_failure_tag)) { + size_type end_segment = table == this->my_embedded_table ? this->pointers_per_embedded_table : first_block; + for (size_type i = 1; i < end_segment; ++i) { + table[i].store(this->segment_allocation_failure_tag, std::memory_order_release); + } + } + }); + + segment_type disabled_segment = nullptr; + if (table[0].compare_exchange_strong(disabled_segment, new_segment)) { + this->extend_table_if_necessary(table, 0, first_block_size); + for (size_type i = 1; i < first_block; ++i) { + table[i].store(new_segment, std::memory_order_release); + } + + // Other threads can wait on a snapshot of an embedded table, need to fill it. + for (size_type i = 1; i < first_block && i < this->pointers_per_embedded_table; ++i) { + this->my_embedded_table[i].store(new_segment, std::memory_order_release); + } + } else if (new_segment != this->segment_allocation_failure_tag) { + // Deallocate the memory + segment_element_allocator_traits::deallocate(segment_allocator, new_segment, first_block_size); + // 0 segment is already allocated, then it remains to wait until the segments are filled to requested + spin_wait_while_eq(table[seg_index], segment_type(nullptr)); + } + } else { + size_type offset = this->segment_base(seg_index); + if (index == offset) { + __TBB_ASSERT(table[seg_index].load(std::memory_order_relaxed) == nullptr, "Only this thread can enable this segment"); + segment_element_allocator_type segment_allocator(base_type::get_allocator()); + segment_type new_segment = this->segment_allocation_failure_tag; + try_call( [&] { + new_segment = segment_element_allocator_traits::allocate(segment_allocator,this->segment_size(seg_index)); + // Shift base address to simplify access by index + new_segment -= this->segment_base(seg_index); + } ).on_completion( [&] { + table[seg_index].store(new_segment, std::memory_order_release); + }); + } else { + spin_wait_while_eq(table[seg_index], segment_type(nullptr)); + } + } + return nullptr; + } + + // Returns the number of elements in the segment to be destroy + size_type number_of_elements_in_segment( segment_index_type seg_index ) { + size_type curr_vector_size = this->my_size.load(std::memory_order_relaxed); + size_type curr_segment_base = this->segment_base(seg_index); + + if (seg_index == 0) { + return std::min(curr_vector_size, this->segment_size(seg_index)); + } else { + // Perhaps the segment is allocated, but there are no elements in it. + if (curr_vector_size < curr_segment_base) { + return 0; + } + return curr_segment_base * 2 > curr_vector_size ? curr_vector_size - curr_segment_base : curr_segment_base; + } + } + + segment_type nullify_segment( segment_table_type table, size_type segment_index ) { + segment_type target_segment = table[segment_index].load(std::memory_order_relaxed); + if (segment_index >= this->my_first_block) { + table[segment_index].store(nullptr, std::memory_order_relaxed); + } else { + if (segment_index == 0) { + for (size_type i = 0; i < this->my_first_block; ++i) { + table[i].store(nullptr, std::memory_order_relaxed); + } + } + } + + return target_segment; + } + + void deallocate_segment( segment_type address, segment_index_type seg_index ) { + segment_element_allocator_type segment_allocator(base_type::get_allocator()); + size_type first_block = this->my_first_block.load(std::memory_order_relaxed); + if (seg_index >= first_block) { + segment_element_allocator_traits::deallocate(segment_allocator, address, this->segment_size(seg_index)); + } + else if (seg_index == 0) { + size_type elements_to_deallocate = first_block > 0 ? this->segment_size(first_block) : this->segment_size(0); + segment_element_allocator_traits::deallocate(segment_allocator, address, elements_to_deallocate); + } + } + + // destroy_segment function is required by the segment_table base class + void destroy_segment( segment_type address, segment_index_type seg_index ) { + size_type elements_to_destroy = number_of_elements_in_segment(seg_index); + segment_element_allocator_type segment_allocator(base_type::get_allocator()); + + for (size_type i = 0; i < elements_to_destroy; ++i) { + segment_element_allocator_traits::destroy(segment_allocator, address + i); + } + + deallocate_segment(address, seg_index); + } + + // copy_segment function is required by the segment_table base class + void copy_segment( segment_index_type seg_index, segment_type from, segment_type to ) { + size_type i = 0; + try_call( [&] { + for (; i != number_of_elements_in_segment(seg_index); ++i) { + segment_table_allocator_traits::construct(base_type::get_allocator(), to + i, from[i]); + } + } ).on_exception( [&] { + // Zero-initialize items left not constructed after the exception + zero_unconstructed_elements(this->get_segment(seg_index) + i, this->segment_size(seg_index) - i); + + segment_index_type last_segment = this->segment_index_of(this->my_size.load(std::memory_order_relaxed)); + auto table = this->get_table(); + for (segment_index_type j = seg_index + 1; j != last_segment; ++j) { + auto curr_segment = table[j].load(std::memory_order_relaxed); + if (curr_segment) { + zero_unconstructed_elements(curr_segment + this->segment_base(j), this->segment_size(j)); + } + } + this->my_size.store(this->segment_size(seg_index) + i, std::memory_order_relaxed); + }); + } + + // move_segment function is required by the segment_table base class + void move_segment( segment_index_type seg_index, segment_type from, segment_type to ) { + size_type i = 0; + try_call( [&] { + for (; i != number_of_elements_in_segment(seg_index); ++i) { + segment_table_allocator_traits::construct(base_type::get_allocator(), to + i, std::move(from[i])); + } + } ).on_exception( [&] { + // Zero-initialize items left not constructed after the exception + zero_unconstructed_elements(this->get_segment(seg_index) + i, this->segment_size(seg_index) - i); + + segment_index_type last_segment = this->segment_index_of(this->my_size.load(std::memory_order_relaxed)); + auto table = this->get_table(); + for (segment_index_type j = seg_index + 1; j != last_segment; ++j) { + auto curr_segment = table[j].load(std::memory_order_relaxed); + if (curr_segment) { + zero_unconstructed_elements(curr_segment + this->segment_base(j), this->segment_size(j)); + } + } + this->my_size.store(this->segment_size(seg_index) + i, std::memory_order_relaxed); + }); + } + + static constexpr bool is_first_element_in_segment( size_type index ) { + // An element is the first in a segment if its index is equal to a power of two + return is_power_of_two_at_least(index, 2); + } + + const_reference internal_subscript( size_type index ) const { + return const_cast(this)->internal_subscript(index); + } + + reference internal_subscript( size_type index ) { + __TBB_ASSERT(index < this->my_size.load(std::memory_order_relaxed), "Invalid subscript index"); + return base_type::template internal_subscript(index); + } + + const_reference internal_subscript_with_exceptions( size_type index ) const { + return const_cast(this)->internal_subscript_with_exceptions(index); + } + + reference internal_subscript_with_exceptions( size_type index ) { + if (index >= this->my_size.load(std::memory_order_acquire)) { + tbb::detail::throw_exception(exception_id::out_of_range); + } + + segment_table_type table = this->my_segment_table.load(std::memory_order_acquire); + + size_type seg_index = this->segment_index_of(index); + if (base_type::number_of_segments(table) < seg_index) { + tbb::detail::throw_exception(exception_id::out_of_range); + } + + if (table[seg_index] <= this->segment_allocation_failure_tag) { + tbb::detail::throw_exception(exception_id::out_of_range); + } + + return base_type::template internal_subscript(index); + } + + static void zero_unconstructed_elements( pointer start, size_type count ) { + std::memset(static_cast(start), 0, count * sizeof(value_type)); + } + + template + iterator internal_emplace_back( Args&&... args ) { + size_type old_size = this->my_size++; + this->assign_first_block_if_necessary(default_first_block_size); + auto element_address = &base_type::template internal_subscript(old_size); + + // try_call API is not convenient here due to broken + // variadic capture on GCC 4.8.5 + auto value_guard = make_raii_guard([&] { + zero_unconstructed_elements(element_address, /*count =*/1); + }); + + segment_table_allocator_traits::construct(base_type::get_allocator(), element_address, std::forward(args)...); + value_guard.dismiss(); + return iterator(*this, old_size, element_address); + } + + template + void internal_loop_construct( segment_table_type table, size_type start_idx, size_type end_idx, const Args&... args ) { + static_assert(sizeof...(Args) < 2, "Too many parameters"); + for (size_type idx = start_idx; idx < end_idx; ++idx) { + auto element_address = &base_type::template internal_subscript(idx); + // try_call API is not convenient here due to broken + // variadic capture on GCC 4.8.5 + auto value_guard = make_raii_guard( [&] { + segment_index_type last_allocated_segment = this->find_last_allocated_segment(table); + size_type segment_size = this->segment_size(last_allocated_segment); + end_idx = end_idx < segment_size ? end_idx : segment_size; + for (size_type i = idx; i < end_idx; ++i) { + zero_unconstructed_elements(&this->internal_subscript(i), /*count =*/1); + } + }); + segment_table_allocator_traits::construct(base_type::get_allocator(), element_address, args...); + value_guard.dismiss(); + } + } + + template + void internal_loop_construct( segment_table_type table, size_type start_idx, size_type end_idx, ForwardIterator first, ForwardIterator ) { + for (size_type idx = start_idx; idx < end_idx; ++idx) { + auto element_address = &base_type::template internal_subscript(idx); + try_call( [&] { + segment_table_allocator_traits::construct(base_type::get_allocator(), element_address, *first++); + } ).on_exception( [&] { + segment_index_type last_allocated_segment = this->find_last_allocated_segment(table); + size_type segment_size = this->segment_size(last_allocated_segment); + end_idx = end_idx < segment_size ? end_idx : segment_size; + for (size_type i = idx; i < end_idx; ++i) { + zero_unconstructed_elements(&this->internal_subscript(i), /*count =*/1); + } + }); + } + } + + template + iterator internal_grow( size_type start_idx, size_type end_idx, const Args&... args ) { + this->assign_first_block_if_necessary(this->segment_index_of(end_idx - 1) + 1); + size_type seg_index = this->segment_index_of(end_idx - 1); + segment_table_type table = this->get_table(); + this->extend_table_if_necessary(table, start_idx, end_idx); + + if (seg_index > this->my_first_block.load(std::memory_order_relaxed)) { + // So that other threads be able to work with the last segment of grow_by, allocate it immediately. + // If the last segment is not less than the first block + if (table[seg_index].load(std::memory_order_relaxed) == nullptr) { + size_type first_element = this->segment_base(seg_index); + if (first_element >= start_idx && first_element < end_idx) { + segment_type segment = table[seg_index].load(std::memory_order_relaxed); + base_type::enable_segment(segment, table, seg_index, first_element); + } + } + } + + internal_loop_construct(table, start_idx, end_idx, args...); + + return iterator(*this, start_idx, &base_type::template internal_subscript(start_idx)); + } + + + template + iterator internal_grow_by_delta( size_type delta, const Args&... args ) { + if (delta == size_type(0)) { + return end(); + } + size_type start_idx = this->my_size.fetch_add(delta); + size_type end_idx = start_idx + delta; + + return internal_grow(start_idx, end_idx, args...); + } + + template + iterator internal_grow_to_at_least( size_type new_size, const Args&... args ) { + size_type old_size = this->my_size.load(std::memory_order_relaxed); + if (new_size == size_type(0)) return iterator(*this, 0); + while (old_size < new_size && !this->my_size.compare_exchange_weak(old_size, new_size)) + {} + + int delta = static_cast(new_size) - static_cast(old_size); + if (delta > 0) { + return internal_grow(old_size, new_size, args...); + } + + size_type end_segment = this->segment_index_of(new_size - 1); + + // Check/wait for segments allocation completes + if (end_segment >= this->pointers_per_embedded_table && + this->get_table() == this->my_embedded_table) + { + spin_wait_while_eq(this->my_segment_table, this->my_embedded_table); + } + + for (segment_index_type seg_idx = 0; seg_idx <= end_segment; ++seg_idx) { + if (this->get_table()[seg_idx].load(std::memory_order_relaxed) == nullptr) { + atomic_backoff backoff(true); + while (this->get_table()[seg_idx].load(std::memory_order_relaxed) == nullptr) { + backoff.pause(); + } + } + } + + #if TBB_USE_DEBUG + size_type cap = capacity(); + __TBB_ASSERT( cap >= new_size, nullptr); + #endif + return iterator(*this, size()); + } + + template + void internal_resize( size_type n, const Args&... args ) { + if (n == 0) { + clear(); + return; + } + + size_type old_size = this->my_size.load(std::memory_order_acquire); + if (n > old_size) { + reserve(n); + grow_to_at_least(n, args...); + } else { + if (old_size == n) { + return; + } + size_type last_segment = this->segment_index_of(old_size - 1); + // Delete segments + for (size_type seg_idx = this->segment_index_of(n - 1) + 1; seg_idx <= last_segment; ++seg_idx) { + this->delete_segment(seg_idx); + } + + // If n > segment_size(n) => we need to destroy all of the items in the first segment + // Otherwise, we need to destroy only items with the index < n + size_type n_segment = this->segment_index_of(n - 1); + size_type last_index_to_destroy = std::min(this->segment_base(n_segment) + this->segment_size(n_segment), old_size); + // Destroy elements in curr segment + for (size_type idx = n; idx < last_index_to_destroy; ++idx) { + segment_table_allocator_traits::destroy(base_type::get_allocator(), &base_type::template internal_subscript(idx)); + } + this->my_size.store(n, std::memory_order_release); + } + } + + void destroy_elements() { + allocator_type alloc(base_type::get_allocator()); + for (size_type i = 0; i < this->my_size.load(std::memory_order_relaxed); ++i) { + allocator_traits_type::destroy(alloc, &base_type::template internal_subscript(i)); + } + this->my_size.store(0, std::memory_order_relaxed); + } + + static bool incompact_predicate( size_type size ) { + // memory page size + const size_type page_size = 4096; + return size < page_size || ((size - 1) % page_size < page_size / 2 && size < page_size * 128); + } + + void internal_compact() { + const size_type curr_size = this->my_size.load(std::memory_order_relaxed); + segment_table_type table = this->get_table(); + const segment_index_type k_end = this->find_last_allocated_segment(table); // allocated segments + const segment_index_type k_stop = curr_size ? this->segment_index_of(curr_size - 1) + 1 : 0; // number of segments to store existing items: 0=>0; 1,2=>1; 3,4=>2; [5-8]=>3;.. + const segment_index_type first_block = this->my_first_block; // number of merged segments, getting values from atomics + + segment_index_type k = first_block; + if (k_stop < first_block) { + k = k_stop; + } + else { + while (k < k_stop && incompact_predicate(this->segment_size(k) * sizeof(value_type))) k++; + } + + if (k_stop == k_end && k == first_block) { + return; + } + + // First segment optimization + if (k != first_block && k) { + size_type max_block = std::max(first_block, k); + + auto buffer_table = segment_table_allocator_traits::allocate(base_type::get_allocator(), max_block); + + for (size_type seg_idx = 0; seg_idx < max_block; ++seg_idx) { + segment_table_allocator_traits::construct(base_type::get_allocator(), &buffer_table[seg_idx], + table[seg_idx].load(std::memory_order_relaxed)); + table[seg_idx].store(nullptr, std::memory_order_relaxed); + } + + this->my_first_block.store(k, std::memory_order_relaxed); + size_type index = 0; + try_call( [&] { + for (; index < std::min(this->segment_size(max_block), curr_size); ++index) { + auto element_address = &static_cast(this)->operator[](index); + segment_index_type seg_idx = this->segment_index_of(index); + segment_table_allocator_traits::construct(base_type::get_allocator(), element_address, + std::move_if_noexcept(buffer_table[seg_idx].load(std::memory_order_relaxed)[index])); + } + } ).on_exception( [&] { + segment_element_allocator_type allocator(base_type::get_allocator()); + for (size_type i = 0; i < index; ++i) { + auto element_adress = &this->operator[](i); + segment_element_allocator_traits::destroy(allocator, element_adress); + } + segment_element_allocator_traits::deallocate(allocator, + table[0].load(std::memory_order_relaxed), this->segment_size(max_block)); + + for (size_type seg_idx = 0; seg_idx < max_block; ++seg_idx) { + table[seg_idx].store(buffer_table[seg_idx].load(std::memory_order_relaxed), + std::memory_order_relaxed); + buffer_table[seg_idx].store(nullptr, std::memory_order_relaxed); + } + segment_table_allocator_traits::deallocate(base_type::get_allocator(), + buffer_table, max_block); + this->my_first_block.store(first_block, std::memory_order_relaxed); + }); + + // Need to correct deallocate old segments + // Method destroy_segment respect active first_block, therefore, + // in order for the segment deletion to work correctly, set the first_block size that was earlier, + // destroy the unnecessary segments. + this->my_first_block.store(first_block, std::memory_order_relaxed); + for (size_type seg_idx = max_block; seg_idx > 0 ; --seg_idx) { + auto curr_segment = buffer_table[seg_idx - 1].load(std::memory_order_relaxed); + if (curr_segment != nullptr) { + destroy_segment(buffer_table[seg_idx - 1].load(std::memory_order_relaxed) + this->segment_base(seg_idx - 1), + seg_idx - 1); + } + } + + this->my_first_block.store(k, std::memory_order_relaxed); + + for (size_type seg_idx = 0; seg_idx < max_block; ++seg_idx) { + segment_table_allocator_traits::destroy(base_type::get_allocator(), &buffer_table[seg_idx]); + } + + segment_table_allocator_traits::deallocate(base_type::get_allocator(), buffer_table, max_block); + } + // free unnecessary segments allocated by reserve() call + if (k_stop < k_end) { + for (size_type seg_idx = k_end; seg_idx != k_stop; --seg_idx) { + if (table[seg_idx - 1].load(std::memory_order_relaxed) != nullptr) { + this->delete_segment(seg_idx - 1); + } + } + if (!k) this->my_first_block.store(0, std::memory_order_relaxed); + } + } + + // Lever for adjusting the size of first_block at the very first insertion. + // TODO: consider >1 value, check performance + static constexpr size_type default_first_block_size = 1; + + template + friend class vector_iterator; +}; // class concurrent_vector + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +// Deduction guide for the constructor from two iterators +template >, + typename = std::enable_if_t>, + typename = std::enable_if_t>> +concurrent_vector( It, It, Alloc = Alloc() ) +-> concurrent_vector, Alloc>; +#endif + +template +void swap(concurrent_vector &lhs, + concurrent_vector &rhs) +{ + lhs.swap(rhs); +} + +template +bool operator==(const concurrent_vector &lhs, + const concurrent_vector &rhs) +{ + return lhs.size() == rhs.size() && std::equal(lhs.begin(), lhs.end(), rhs.begin()); +} + +#if !__TBB_CPP20_COMPARISONS_PRESENT +template +bool operator!=(const concurrent_vector &lhs, + const concurrent_vector &rhs) +{ + return !(lhs == rhs); +} +#endif // !__TBB_CPP20_COMPARISONS_PRESENT + +#if __TBB_CPP20_COMPARISONS_PRESENT && __TBB_CPP20_CONCEPTS_PRESENT +template +tbb::detail::synthesized_three_way_result::value_type> +operator<=>(const concurrent_vector &lhs, + const concurrent_vector &rhs) +{ + return std::lexicographical_compare_three_way(lhs.begin(), lhs.end(), + rhs.begin(), rhs.end(), + tbb::detail::synthesized_three_way_comparator{}); +} + +#else + +template +bool operator<(const concurrent_vector &lhs, + const concurrent_vector &rhs) +{ + return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(), rhs.end()); +} + +template +bool operator<=(const concurrent_vector &lhs, + const concurrent_vector &rhs) +{ + return !(rhs < lhs); +} + +template +bool operator>(const concurrent_vector &lhs, + const concurrent_vector &rhs) +{ + return rhs < lhs; +} + +template +bool operator>=(const concurrent_vector &lhs, + const concurrent_vector &rhs) +{ + return !(lhs < rhs); +} +#endif // __TBB_CPP20_COMPARISONS_PRESENT && __TBB_CPP20_CONCEPTS_PRESENT + +} // namespace d1 +} // namespace detail + +inline namespace v1 { + using detail::d1::concurrent_vector; +} // namespace v1 + +} // namespace tbb + +#endif // __TBB_concurrent_vector_H diff --git a/src/tbb/include/oneapi/tbb/detail/_aggregator.h b/src/tbb/include/oneapi/tbb/detail/_aggregator.h new file mode 100644 index 000000000..2e3e5cc3d --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_aggregator.h @@ -0,0 +1,176 @@ +/* + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + + +#ifndef __TBB_detail__aggregator_H +#define __TBB_detail__aggregator_H + +#include "_assert.h" +#include "_utils.h" +#include +#if !__TBBMALLOC_BUILD // TODO: check this macro with TBB Malloc +#include "../profiling.h" +#endif + +namespace tbb { +namespace detail { +namespace d1 { + +// Base class for aggregated operation +template +class aggregated_operation { +public: + // Zero value means "wait" status, all other values are "user" specified values and + // are defined into the scope of a class which uses "status" + std::atomic status; + + std::atomic next; + aggregated_operation() : status{}, next(nullptr) {} +}; // class aggregated_operation + +// Aggregator base class +/* An aggregator for collecting operations coming from multiple sources and executing + them serially on a single thread. OperationType must be derived from + aggregated_operation. The parameter HandlerType is a functor that will be passed the + list of operations and is expected to handle each operation appropriately, setting the + status of each operation to non-zero. */ +template +class aggregator_generic { +public: + aggregator_generic() : pending_operations(nullptr), handler_busy(false) {} + + // Execute an operation + /* Places an operation into the waitlist (pending_operations), and either handles the list, + or waits for the operation to complete, or returns. + The long_life_time parameter specifies the life time of the given operation object. + Operations with long_life_time == true may be accessed after execution. + A "short" life time operation (long_life_time == false) can be destroyed + during execution, and so any access to it after it was put into the waitlist, + including status check, is invalid. As a consequence, waiting for completion + of such operation causes undefined behavior. */ + template + void execute( OperationType* op, HandlerType& handle_operations, bool long_life_time = true ) { + // op->status should be read before inserting the operation into the + // aggregator waitlist since it can become invalid after executing a + // handler (if the operation has 'short' life time.) + const uintptr_t status = op->status.load(std::memory_order_relaxed); + + // ITT note: &(op->status) tag is used to cover accesses to this op node. This + // thread has created the operation, and now releases it so that the handler + // thread may handle the associated operation w/o triggering a race condition; + // thus this tag will be acquired just before the operation is handled in the + // handle_operations functor. + call_itt_notify(releasing, &(op->status)); + // insert the operation in the queue. + OperationType* res = pending_operations.load(std::memory_order_relaxed); + do { + op->next.store(res, std::memory_order_relaxed); + } while (!pending_operations.compare_exchange_strong(res, op)); + if (!res) { // first in the list; handle the operations + // ITT note: &pending_operations tag covers access to the handler_busy flag, + // which this waiting handler thread will try to set before entering + // handle_operations. + call_itt_notify(acquired, &pending_operations); + start_handle_operations(handle_operations); + // The operation with 'short' life time can already be destroyed + if (long_life_time) + __TBB_ASSERT(op->status.load(std::memory_order_relaxed), nullptr); + } + // Not first; wait for op to be ready + else if (!status) { // operation is blocking here. + __TBB_ASSERT(long_life_time, "Waiting for an operation object that might be destroyed during processing"); + call_itt_notify(prepare, &(op->status)); + spin_wait_while_eq(op->status, uintptr_t(0)); + } + } + +private: + // Trigger the handling of operations when the handler is free + template + void start_handle_operations( HandlerType& handle_operations ) { + OperationType* op_list; + + // ITT note: &handler_busy tag covers access to pending_operations as it is passed + // between active and waiting handlers. Below, the waiting handler waits until + // the active handler releases, and the waiting handler acquires &handler_busy as + // it becomes the active_handler. The release point is at the end of this + // function, when all operations in pending_operations have been handled by the + // owner of this aggregator. + call_itt_notify(prepare, &handler_busy); + // get the handler_busy: + // only one thread can possibly spin here at a time + spin_wait_until_eq(handler_busy, uintptr_t(0)); + call_itt_notify(acquired, &handler_busy); + // acquire fence not necessary here due to causality rule and surrounding atomics + handler_busy.store(1, std::memory_order_relaxed); + + // ITT note: &pending_operations tag covers access to the handler_busy flag + // itself. Capturing the state of the pending_operations signifies that + // handler_busy has been set and a new active handler will now process that list's + // operations. + call_itt_notify(releasing, &pending_operations); + // grab pending_operations + op_list = pending_operations.exchange(nullptr); + + // handle all the operations + handle_operations(op_list); + + // release the handler + handler_busy.store(0, std::memory_order_release); + } + + // An atomically updated list (aka mailbox) of pending operations + std::atomic pending_operations; + // Controls threads access to handle_operations + std::atomic handler_busy; +}; // class aggregator_generic + +template +class aggregator : public aggregator_generic { + HandlerType handle_operations; +public: + aggregator() = default; + + void initialize_handler( HandlerType h ) { handle_operations = h; } + + void execute(OperationType* op) { + aggregator_generic::execute(op, handle_operations); + } +}; // class aggregator + +// the most-compatible friend declaration (vs, gcc, icc) is +// template friend class aggregating_functor; +template +class aggregating_functor { + AggregatingClass* my_object{nullptr}; +public: + aggregating_functor() = default; + aggregating_functor( AggregatingClass* object ) : my_object(object) { + __TBB_ASSERT(my_object, nullptr); + } + + void operator()( OperationList* op_list ) { + __TBB_ASSERT(my_object, nullptr); + my_object->handle_operations(op_list); + } +}; // class aggregating_functor + + +} // namespace d1 +} // namespace detail +} // namespace tbb + +#endif // __TBB_detail__aggregator_H diff --git a/src/tbb/include/oneapi/tbb/detail/_aligned_space.h b/src/tbb/include/oneapi/tbb/detail/_aligned_space.h new file mode 100644 index 000000000..13857c47c --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_aligned_space.h @@ -0,0 +1,46 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +#ifndef __TBB_aligned_space_H +#define __TBB_aligned_space_H + +#include + +#include "_template_helpers.h" + +namespace tbb { +namespace detail { +inline namespace d0 { + +//! Block of space aligned sufficiently to construct an array T with N elements. +/** The elements are not constructed or destroyed by this class. + @ingroup memory_allocation */ +template +class aligned_space { + alignas(alignof(T)) std::uint8_t aligned_array[N * sizeof(T)]; + +public: + //! Pointer to beginning of array + T* begin() const { return punned_cast(&aligned_array); } + + //! Pointer to one past last element in array. + T* end() const { return begin() + N; } +}; + +} // namespace d0 +} // namespace detail +} // namespace tbb + +#endif /* __TBB_aligned_space_H */ diff --git a/src/tbb/include/oneapi/tbb/detail/_allocator_traits.h b/src/tbb/include/oneapi/tbb/detail/_allocator_traits.h new file mode 100644 index 000000000..8c60e25e7 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_allocator_traits.h @@ -0,0 +1,107 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_detail__allocator_traits_H +#define __TBB_detail__allocator_traits_H + +#include "_config.h" +#include "_template_helpers.h" +#include +#include + +namespace tbb { +namespace detail { +inline namespace d0 { + +#if !__TBB_CPP17_ALLOCATOR_IS_ALWAYS_EQUAL_PRESENT +// Struct is_always_equal_detector provides the member type "type" which is +// Allocator::is_always_equal if it is present, std::false_type otherwise +template +struct is_always_equal_detector { + using type = std::false_type; +}; + +template +struct is_always_equal_detector> +{ + using type = typename Allocator::is_always_equal; +}; +#endif // !__TBB_CPP17_ALLOCATOR_IS_ALWAYS_EQUAL_PRESENT + +template +class allocator_traits : public std::allocator_traits +{ + using base_type = std::allocator_traits; +public: +#if !__TBB_CPP17_ALLOCATOR_IS_ALWAYS_EQUAL_PRESENT + using is_always_equal = typename is_always_equal_detector::type; +#endif + + template + using rebind_traits = typename tbb::detail::allocator_traits>; +}; // struct allocator_traits + +template +void copy_assign_allocators_impl( Allocator& lhs, const Allocator& rhs, /*pocca = */std::true_type ) { + lhs = rhs; +} + +template +void copy_assign_allocators_impl( Allocator&, const Allocator&, /*pocca = */ std::false_type ) {} + +// Copy assigns allocators only if propagate_on_container_copy_assignment is true +template +void copy_assign_allocators( Allocator& lhs, const Allocator& rhs ) { + using pocca_type = typename allocator_traits::propagate_on_container_copy_assignment; + copy_assign_allocators_impl(lhs, rhs, pocca_type()); +} + +template +void move_assign_allocators_impl( Allocator& lhs, Allocator& rhs, /*pocma = */ std::true_type ) { + lhs = std::move(rhs); +} + +template +void move_assign_allocators_impl( Allocator&, Allocator&, /*pocma = */ std::false_type ) {} + +// Move assigns allocators only if propagate_on_container_move_assignment is true +template +void move_assign_allocators( Allocator& lhs, Allocator& rhs ) { + using pocma_type = typename allocator_traits::propagate_on_container_move_assignment; + move_assign_allocators_impl(lhs, rhs, pocma_type()); +} + +template +void swap_allocators_impl( Allocator& lhs, Allocator& rhs, /*pocs = */ std::true_type ) { + using std::swap; + swap(lhs, rhs); +} + +template +void swap_allocators_impl( Allocator&, Allocator&, /*pocs = */ std::false_type ) {} + +// Swaps allocators only if propagate_on_container_swap is true +template +void swap_allocators( Allocator& lhs, Allocator& rhs ) { + using pocs_type = typename allocator_traits::propagate_on_container_swap; + swap_allocators_impl(lhs, rhs, pocs_type()); +} + +} // inline namespace d0 +} // namespace detail +} // namespace tbb + +#endif // __TBB_detail__allocator_traits_H diff --git a/src/tbb/include/oneapi/tbb/detail/_assert.h b/src/tbb/include/oneapi/tbb/detail/_assert.h new file mode 100644 index 000000000..fce714ffe --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_assert.h @@ -0,0 +1,64 @@ +/* + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_detail__assert_H +#define __TBB_detail__assert_H + +#include "_config.h" + +#if __TBBMALLOC_BUILD +namespace rml { namespace internal { +#else +namespace tbb { +namespace detail { +namespace r1 { +#endif +//! Process an assertion failure. +/** Normally called from __TBB_ASSERT macro. + If assertion handler is null, print message for assertion failure and abort. + Otherwise call the assertion handler. */ +TBB_EXPORT void __TBB_EXPORTED_FUNC assertion_failure(const char* location, int line, const char* expression, const char* comment); +#if __TBBMALLOC_BUILD +}} // namespaces rml::internal +#else +} // namespace r1 +} // namespace detail +} // namespace tbb +#endif + +#if __TBBMALLOC_BUILD +//! Release version of assertions +#define __TBB_ASSERT_RELEASE(predicate,message) ((predicate)?((void)0) : rml::internal::assertion_failure(__func__,__LINE__,#predicate,message)) +#else +#define __TBB_ASSERT_RELEASE(predicate,message) ((predicate)?((void)0) : tbb::detail::r1::assertion_failure(__func__,__LINE__,#predicate,message)) +#endif + +#if TBB_USE_ASSERT + //! Assert that predicate is true. + /** If predicate is false, print assertion failure message. + If the comment argument is not nullptr, it is printed as part of the failure message. + The comment argument has no other effect. */ + #define __TBB_ASSERT(predicate,message) __TBB_ASSERT_RELEASE(predicate,message) + //! "Extended" version + #define __TBB_ASSERT_EX __TBB_ASSERT +#else + //! No-op version of __TBB_ASSERT. + #define __TBB_ASSERT(predicate,comment) ((void)0) + //! "Extended" version is useful to suppress warnings if a variable is only used with an assert + #define __TBB_ASSERT_EX(predicate,comment) ((void)(1 && (predicate))) +#endif // TBB_USE_ASSERT + +#endif // __TBB_detail__assert_H diff --git a/src/tbb/include/oneapi/tbb/detail/_attach.h b/src/tbb/include/oneapi/tbb/detail/_attach.h new file mode 100644 index 000000000..45f29727a --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_attach.h @@ -0,0 +1,32 @@ +/* + Copyright (c) 2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_detail__attach_H +#define __TBB_detail__attach_H + +#include "_config.h" + +namespace tbb { +namespace detail { +namespace d1 { + + struct attach {}; + +} // namespace d1 +} // namespace detail +} // namespace tbb + +#endif // __TBB_detail__attach_H diff --git a/src/tbb/include/oneapi/tbb/detail/_concurrent_queue_base.h b/src/tbb/include/oneapi/tbb/detail/_concurrent_queue_base.h new file mode 100644 index 000000000..b195cc4ab --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_concurrent_queue_base.h @@ -0,0 +1,650 @@ +/* + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_detail__concurrent_queue_base_H +#define __TBB_detail__concurrent_queue_base_H + +#include "_utils.h" +#include "_exception.h" +#include "_machine.h" +#include "_allocator_traits.h" + +#include "../profiling.h" +#include "../spin_mutex.h" +#include "../cache_aligned_allocator.h" + +#include + +namespace tbb { +namespace detail { +namespace d2 { + +using ticket_type = std::size_t; + +template +inline bool is_valid_page(const Page p) { + return reinterpret_cast(p) > 1; +} + +template +struct concurrent_queue_rep; + +template +class micro_queue_pop_finalizer; + +#if _MSC_VER && !defined(__INTEL_COMPILER) +// unary minus operator applied to unsigned type, result still unsigned +// #pragma warning( push ) +// #pragma warning( disable: 4146 ) +#endif + +// A queue using simple locking. +// For efficiency, this class has no constructor. +// The caller is expected to zero-initialize it. +template +class micro_queue { +private: + using queue_rep_type = concurrent_queue_rep; + using self_type = micro_queue; +public: + using size_type = std::size_t; + using value_type = T; + using reference = value_type&; + using const_reference = const value_type&; + + using allocator_type = Allocator; + using allocator_traits_type = tbb::detail::allocator_traits; + using queue_allocator_type = typename allocator_traits_type::template rebind_alloc; + + static constexpr size_type item_size = sizeof(T); + static constexpr size_type items_per_page = item_size <= 8 ? 32 : + item_size <= 16 ? 16 : + item_size <= 32 ? 8 : + item_size <= 64 ? 4 : + item_size <= 128 ? 2 : 1; + + struct padded_page { + padded_page() {} + ~padded_page() {} + + reference operator[] (std::size_t index) { + __TBB_ASSERT(index < items_per_page, "Index out of range"); + return items[index]; + } + + const_reference operator[] (std::size_t index) const { + __TBB_ASSERT(index < items_per_page, "Index out of range"); + return items[index]; + } + + padded_page* next{ nullptr }; + std::atomic mask{}; + + union { + value_type items[items_per_page]; + }; + }; // struct padded_page + + using page_allocator_type = typename allocator_traits_type::template rebind_alloc; +protected: + using page_allocator_traits = tbb::detail::allocator_traits; + +public: + using item_constructor_type = void (*)(value_type* location, const void* src); + micro_queue() = default; + micro_queue( const micro_queue& ) = delete; + micro_queue& operator=( const micro_queue& ) = delete; + + size_type prepare_page( ticket_type k, queue_rep_type& base, page_allocator_type page_allocator, + padded_page*& p ) { + __TBB_ASSERT(p == nullptr, "Invalid page argument for prepare_page"); + k &= -queue_rep_type::n_queue; + size_type index = modulo_power_of_two(k / queue_rep_type::n_queue, items_per_page); + if (!index) { + try_call( [&] { + p = page_allocator_traits::allocate(page_allocator, 1); + }).on_exception( [&] { + ++base.n_invalid_entries; + invalidate_page( k ); + }); + page_allocator_traits::construct(page_allocator, p); + } + + spin_wait_until_my_turn(tail_counter, k, base); + d1::call_itt_notify(d1::acquired, &tail_counter); + + if (p) { + spin_mutex::scoped_lock lock( page_mutex ); + padded_page* q = tail_page.load(std::memory_order_relaxed); + if (is_valid_page(q)) { + q->next = p; + } else { + head_page.store(p, std::memory_order_relaxed); + } + tail_page.store(p, std::memory_order_relaxed); + } else { + p = tail_page.load(std::memory_order_relaxed); + } + return index; + } + + template + void push( ticket_type k, queue_rep_type& base, queue_allocator_type& allocator, Args&&... args ) + { + padded_page* p = nullptr; + page_allocator_type page_allocator(allocator); + size_type index = prepare_page(k, base, page_allocator, p); + __TBB_ASSERT(p != nullptr, "Page was not prepared"); + + // try_call API is not convenient here due to broken + // variadic capture on GCC 4.8.5 + auto value_guard = make_raii_guard([&] { + ++base.n_invalid_entries; + d1::call_itt_notify(d1::releasing, &tail_counter); + tail_counter.fetch_add(queue_rep_type::n_queue); + }); + + page_allocator_traits::construct(page_allocator, &(*p)[index], std::forward(args)...); + // If no exception was thrown, mark item as present. + p->mask.store(p->mask.load(std::memory_order_relaxed) | uintptr_t(1) << index, std::memory_order_relaxed); + d1::call_itt_notify(d1::releasing, &tail_counter); + + value_guard.dismiss(); + tail_counter.fetch_add(queue_rep_type::n_queue); + } + + void abort_push( ticket_type k, queue_rep_type& base, queue_allocator_type& allocator ) { + padded_page* p = nullptr; + prepare_page(k, base, allocator, p); + ++base.n_invalid_entries; + tail_counter.fetch_add(queue_rep_type::n_queue); + } + + bool pop( void* dst, ticket_type k, queue_rep_type& base, queue_allocator_type& allocator ) { + k &= -queue_rep_type::n_queue; + spin_wait_until_eq(head_counter, k); + d1::call_itt_notify(d1::acquired, &head_counter); + spin_wait_while_eq(tail_counter, k); + d1::call_itt_notify(d1::acquired, &tail_counter); + padded_page *p = head_page.load(std::memory_order_relaxed); + __TBB_ASSERT( p, nullptr ); + size_type index = modulo_power_of_two( k/queue_rep_type::n_queue, items_per_page ); + bool success = false; + { + page_allocator_type page_allocator(allocator); + micro_queue_pop_finalizer finalizer(*this, page_allocator, + k + queue_rep_type::n_queue, index == items_per_page - 1 ? p : nullptr ); + if (p->mask.load(std::memory_order_relaxed) & (std::uintptr_t(1) << index)) { + success = true; + assign_and_destroy_item(dst, *p, index); + } else { + --base.n_invalid_entries; + } + } + return success; + } + + micro_queue& assign( const micro_queue& src, queue_allocator_type& allocator, + item_constructor_type construct_item ) + { + head_counter.store(src.head_counter.load(std::memory_order_relaxed), std::memory_order_relaxed); + tail_counter.store(src.tail_counter.load(std::memory_order_relaxed), std::memory_order_relaxed); + + const padded_page* srcp = src.head_page.load(std::memory_order_relaxed); + if( is_valid_page(srcp) ) { + ticket_type g_index = head_counter.load(std::memory_order_relaxed); + size_type n_items = (tail_counter.load(std::memory_order_relaxed) - head_counter.load(std::memory_order_relaxed)) + / queue_rep_type::n_queue; + size_type index = modulo_power_of_two(head_counter.load(std::memory_order_relaxed) / queue_rep_type::n_queue, items_per_page); + size_type end_in_first_page = (index+n_items < items_per_page) ? (index + n_items) : items_per_page; + + try_call( [&] { + head_page.store(make_copy(allocator, srcp, index, end_in_first_page, g_index, construct_item), std::memory_order_relaxed); + }).on_exception( [&] { + head_counter.store(0, std::memory_order_relaxed); + tail_counter.store(0, std::memory_order_relaxed); + }); + padded_page* cur_page = head_page.load(std::memory_order_relaxed); + + try_call( [&] { + if (srcp != src.tail_page.load(std::memory_order_relaxed)) { + for (srcp = srcp->next; srcp != src.tail_page.load(std::memory_order_relaxed); srcp=srcp->next ) { + cur_page->next = make_copy( allocator, srcp, 0, items_per_page, g_index, construct_item ); + cur_page = cur_page->next; + } + + __TBB_ASSERT(srcp == src.tail_page.load(std::memory_order_relaxed), nullptr ); + size_type last_index = modulo_power_of_two(tail_counter.load(std::memory_order_relaxed) / queue_rep_type::n_queue, items_per_page); + if( last_index==0 ) last_index = items_per_page; + + cur_page->next = make_copy( allocator, srcp, 0, last_index, g_index, construct_item ); + cur_page = cur_page->next; + } + tail_page.store(cur_page, std::memory_order_relaxed); + }).on_exception( [&] { + padded_page* invalid_page = reinterpret_cast(std::uintptr_t(1)); + tail_page.store(invalid_page, std::memory_order_relaxed); + }); + } else { + head_page.store(nullptr, std::memory_order_relaxed); + tail_page.store(nullptr, std::memory_order_relaxed); + } + return *this; + } + + padded_page* make_copy( queue_allocator_type& allocator, const padded_page* src_page, size_type begin_in_page, + size_type end_in_page, ticket_type& g_index, item_constructor_type construct_item ) + { + page_allocator_type page_allocator(allocator); + padded_page* new_page = page_allocator_traits::allocate(page_allocator, 1); + new_page->next = nullptr; + new_page->mask.store(src_page->mask.load(std::memory_order_relaxed), std::memory_order_relaxed); + for (; begin_in_page!=end_in_page; ++begin_in_page, ++g_index) { + if (new_page->mask.load(std::memory_order_relaxed) & uintptr_t(1) << begin_in_page) { + copy_item(*new_page, begin_in_page, *src_page, begin_in_page, construct_item); + } + } + return new_page; + } + + void invalidate_page( ticket_type k ) { + // Append an invalid page at address 1 so that no more pushes are allowed. + padded_page* invalid_page = reinterpret_cast(std::uintptr_t(1)); + { + spin_mutex::scoped_lock lock( page_mutex ); + tail_counter.store(k + queue_rep_type::n_queue + 1, std::memory_order_relaxed); + padded_page* q = tail_page.load(std::memory_order_relaxed); + if (is_valid_page(q)) { + q->next = invalid_page; + } else { + head_page.store(invalid_page, std::memory_order_relaxed); + } + tail_page.store(invalid_page, std::memory_order_relaxed); + } + } + + padded_page* get_head_page() { + return head_page.load(std::memory_order_relaxed); + } + + void clear(queue_allocator_type& allocator, padded_page* new_head = nullptr, padded_page* new_tail = nullptr) { + padded_page* curr_page = get_head_page(); + size_type index = (head_counter.load(std::memory_order_relaxed) / queue_rep_type::n_queue) % items_per_page; + page_allocator_type page_allocator(allocator); + + while (curr_page && is_valid_page(curr_page)) { + while (index != items_per_page) { + if (curr_page->mask.load(std::memory_order_relaxed) & (std::uintptr_t(1) << index)) { + page_allocator_traits::destroy(page_allocator, &curr_page->operator[](index)); + } + ++index; + } + + index = 0; + padded_page* next_page = curr_page->next; + page_allocator_traits::destroy(page_allocator, curr_page); + page_allocator_traits::deallocate(page_allocator, curr_page, 1); + curr_page = next_page; + } + head_counter.store(0, std::memory_order_relaxed); + tail_counter.store(0, std::memory_order_relaxed); + head_page.store(new_head, std::memory_order_relaxed); + tail_page.store(new_tail, std::memory_order_relaxed); + } + + void clear_and_invalidate(queue_allocator_type& allocator) { + padded_page* invalid_page = reinterpret_cast(std::uintptr_t(1)); + clear(allocator, invalid_page, invalid_page); + } + +private: + // template + friend class micro_queue_pop_finalizer; + + // Class used to ensure exception-safety of method "pop" + class destroyer { + value_type& my_value; + public: + destroyer( reference value ) : my_value(value) {} + destroyer( const destroyer& ) = delete; + destroyer& operator=( const destroyer& ) = delete; + ~destroyer() {my_value.~T();} + }; // class destroyer + + void copy_item( padded_page& dst, size_type dindex, const padded_page& src, size_type sindex, + item_constructor_type construct_item ) + { + auto& src_item = src[sindex]; + construct_item( &dst[dindex], static_cast(&src_item) ); + } + + void assign_and_destroy_item( void* dst, padded_page& src, size_type index ) { + auto& from = src[index]; + destroyer d(from); + *static_cast(dst) = std::move(from); + } + + void spin_wait_until_my_turn( std::atomic& counter, ticket_type k, queue_rep_type& rb ) const { + for (atomic_backoff b{};; b.pause()) { + ticket_type c = counter.load(std::memory_order_acquire); + if (c == k) return; + else if (c & 1) { + ++rb.n_invalid_entries; + throw_exception( exception_id::bad_last_alloc); + } + } + } + + std::atomic head_page{}; + std::atomic head_counter{}; + + std::atomic tail_page{}; + std::atomic tail_counter{}; + + spin_mutex page_mutex{}; +}; // class micro_queue + +#if _MSC_VER && !defined(__INTEL_COMPILER) +// #pragma warning( pop ) +#endif // warning 4146 is back + +template +class micro_queue_pop_finalizer { +public: + using padded_page = typename Container::padded_page; + using allocator_type = Allocator; + using allocator_traits_type = tbb::detail::allocator_traits; + + micro_queue_pop_finalizer( Container& queue, Allocator& alloc, ticket_type k, padded_page* p ) : + my_ticket_type(k), my_queue(queue), my_page(p), allocator(alloc) + {} + + micro_queue_pop_finalizer( const micro_queue_pop_finalizer& ) = delete; + micro_queue_pop_finalizer& operator=( const micro_queue_pop_finalizer& ) = delete; + + ~micro_queue_pop_finalizer() { + padded_page* p = my_page; + if( is_valid_page(p) ) { + spin_mutex::scoped_lock lock( my_queue.page_mutex ); + padded_page* q = p->next; + my_queue.head_page.store(q, std::memory_order_relaxed); + if( !is_valid_page(q) ) { + my_queue.tail_page.store(nullptr, std::memory_order_relaxed); + } + } + my_queue.head_counter.store(my_ticket_type, std::memory_order_release); + if ( is_valid_page(p) ) { + allocator_traits_type::destroy(allocator, static_cast(p)); + allocator_traits_type::deallocate(allocator, static_cast(p), 1); + } + } +private: + ticket_type my_ticket_type; + Container& my_queue; + padded_page* my_page; + Allocator& allocator; +}; // class micro_queue_pop_finalizer + +#if _MSC_VER && !defined(__INTEL_COMPILER) +// structure was padded due to alignment specifier +// #pragma warning( push ) +// #pragma warning( disable: 4324 ) +#endif + +template +struct concurrent_queue_rep { + using self_type = concurrent_queue_rep; + using size_type = std::size_t; + using micro_queue_type = micro_queue; + using allocator_type = Allocator; + using allocator_traits_type = tbb::detail::allocator_traits; + using padded_page = typename micro_queue_type::padded_page; + using page_allocator_type = typename micro_queue_type::page_allocator_type; + using item_constructor_type = typename micro_queue_type::item_constructor_type; +private: + using page_allocator_traits = tbb::detail::allocator_traits; + using queue_allocator_type = typename allocator_traits_type::template rebind_alloc; + +public: + // must be power of 2 + static constexpr size_type n_queue = 8; + // Approximately n_queue/golden ratio + static constexpr size_type phi = 3; + static constexpr size_type item_size = micro_queue_type::item_size; + static constexpr size_type items_per_page = micro_queue_type::items_per_page; + + concurrent_queue_rep() {} + + concurrent_queue_rep( const concurrent_queue_rep& ) = delete; + concurrent_queue_rep& operator=( const concurrent_queue_rep& ) = delete; + + void clear( queue_allocator_type& alloc ) { + for (size_type index = 0; index < n_queue; ++index) { + array[index].clear(alloc); + } + head_counter.store(0, std::memory_order_relaxed); + tail_counter.store(0, std::memory_order_relaxed); + n_invalid_entries.store(0, std::memory_order_relaxed); + } + + void assign( const concurrent_queue_rep& src, queue_allocator_type& alloc, item_constructor_type construct_item ) { + head_counter.store(src.head_counter.load(std::memory_order_relaxed), std::memory_order_relaxed); + tail_counter.store(src.tail_counter.load(std::memory_order_relaxed), std::memory_order_relaxed); + n_invalid_entries.store(src.n_invalid_entries.load(std::memory_order_relaxed), std::memory_order_relaxed); + + // copy or move micro_queues + size_type queue_idx = 0; + try_call( [&] { + for (; queue_idx < n_queue; ++queue_idx) { + array[queue_idx].assign(src.array[queue_idx], alloc, construct_item); + } + }).on_exception( [&] { + for (size_type i = 0; i < queue_idx + 1; ++i) { + array[i].clear_and_invalidate(alloc); + } + head_counter.store(0, std::memory_order_relaxed); + tail_counter.store(0, std::memory_order_relaxed); + n_invalid_entries.store(0, std::memory_order_relaxed); + }); + + __TBB_ASSERT(head_counter.load(std::memory_order_relaxed) == src.head_counter.load(std::memory_order_relaxed) && + tail_counter.load(std::memory_order_relaxed) == src.tail_counter.load(std::memory_order_relaxed), + "the source concurrent queue should not be concurrently modified." ); + } + + bool empty() const { + ticket_type tc = tail_counter.load(std::memory_order_acquire); + ticket_type hc = head_counter.load(std::memory_order_relaxed); + // if tc!=r.tail_counter, the queue was not empty at some point between the two reads. + return tc == tail_counter.load(std::memory_order_relaxed) && + std::ptrdiff_t(tc - hc - n_invalid_entries.load(std::memory_order_relaxed)) <= 0; + } + + std::ptrdiff_t size() const { + __TBB_ASSERT(sizeof(std::ptrdiff_t) <= sizeof(size_type), nullptr); + std::ptrdiff_t hc = head_counter.load(std::memory_order_acquire); + std::ptrdiff_t tc = tail_counter.load(std::memory_order_relaxed); + std::ptrdiff_t nie = n_invalid_entries.load(std::memory_order_relaxed); + + return tc - hc - nie; + } + + friend class micro_queue; + + // Map ticket_type to an array index + static size_type index( ticket_type k ) { + return k * phi % n_queue; + } + + micro_queue_type& choose( ticket_type k ) { + // The formula here approximates LRU in a cache-oblivious way. + return array[index(k)]; + } + + alignas(max_nfs_size) micro_queue_type array[n_queue]; + + alignas(max_nfs_size) std::atomic head_counter{}; + alignas(max_nfs_size) std::atomic tail_counter{}; + alignas(max_nfs_size) std::atomic n_invalid_entries{}; +}; // class concurrent_queue_rep + +#if _MSC_VER && !defined(__INTEL_COMPILER) +// #pragma warning( pop ) +#endif + +template +class concurrent_queue_iterator_base { + using queue_rep_type = concurrent_queue_rep; + using padded_page = typename queue_rep_type::padded_page; +protected: + concurrent_queue_iterator_base() = default; + + concurrent_queue_iterator_base( const concurrent_queue_iterator_base& other ) { + assign(other); + } + + concurrent_queue_iterator_base( queue_rep_type* queue_rep ) + : my_queue_rep(queue_rep), + my_head_counter(my_queue_rep->head_counter.load(std::memory_order_relaxed)) + { + for (std::size_t i = 0; i < queue_rep_type::n_queue; ++i) { + my_array[i] = my_queue_rep->array[i].get_head_page(); + } + + if (!get_item(my_item, my_head_counter)) advance(); + } + + void assign( const concurrent_queue_iterator_base& other ) { + my_item = other.my_item; + my_queue_rep = other.my_queue_rep; + + if (my_queue_rep != nullptr) { + my_head_counter = other.my_head_counter; + + for (std::size_t i = 0; i < queue_rep_type::n_queue; ++i) { + my_array[i] = other.my_array[i]; + } + } + } + + void advance() { + __TBB_ASSERT(my_item, "Attempt to increment iterator past end of the queue"); + std::size_t k = my_head_counter; +#if TBB_USE_ASSERT + Value* tmp; + get_item(tmp, k); + __TBB_ASSERT(my_item == tmp, nullptr); +#endif + std::size_t i = modulo_power_of_two(k / queue_rep_type::n_queue, my_queue_rep->items_per_page); + if (i == my_queue_rep->items_per_page - 1) { + padded_page*& root = my_array[queue_rep_type::index(k)]; + root = root->next; + } + // Advance k + my_head_counter = ++k; + if (!get_item(my_item, k)) advance(); + } + + concurrent_queue_iterator_base& operator=( const concurrent_queue_iterator_base& other ) { + this->assign(other); + return *this; + } + + bool get_item( Value*& item, std::size_t k ) { + if (k == my_queue_rep->tail_counter.load(std::memory_order_relaxed)) { + item = nullptr; + return true; + } else { + padded_page* p = my_array[queue_rep_type::index(k)]; + __TBB_ASSERT(p, nullptr); + std::size_t i = modulo_power_of_two(k / queue_rep_type::n_queue, my_queue_rep->items_per_page); + item = &(*p)[i]; + return (p->mask & uintptr_t(1) << i) != 0; + } + } + + Value* my_item{ nullptr }; + queue_rep_type* my_queue_rep{ nullptr }; + ticket_type my_head_counter{}; + padded_page* my_array[queue_rep_type::n_queue]{}; +}; // class concurrent_queue_iterator_base + +struct concurrent_queue_iterator_provider { + template + static Iterator get( const Container& container ) { + return Iterator(container); + } +}; // struct concurrent_queue_iterator_provider + +template +class concurrent_queue_iterator : public concurrent_queue_iterator_base::type, Allocator> { + using base_type = concurrent_queue_iterator_base::type, Allocator>; +public: + using value_type = Value; + using pointer = value_type*; + using reference = value_type&; + using difference_type = std::ptrdiff_t; + using iterator_category = std::forward_iterator_tag; + + concurrent_queue_iterator() = default; + + /** If Value==Container::value_type, then this routine is the copy constructor. + If Value==const Container::value_type, then this routine is a conversion constructor. */ + concurrent_queue_iterator( const concurrent_queue_iterator& other ) + : base_type(other) {} + +private: + concurrent_queue_iterator( const Container& container ) + : base_type(container.my_queue_representation) {} +public: + concurrent_queue_iterator& operator=( const concurrent_queue_iterator& other ) { + this->assign(other); + return *this; + } + + reference operator*() const { + return *static_cast(this->my_item); + } + + pointer operator->() const { return &operator*(); } + + concurrent_queue_iterator& operator++() { + this->advance(); + return *this; + } + + concurrent_queue_iterator operator++(int) { + concurrent_queue_iterator tmp = *this; + ++*this; + return tmp; + } + + friend bool operator==( const concurrent_queue_iterator& lhs, const concurrent_queue_iterator& rhs ) { + return lhs.my_item == rhs.my_item; + } + + friend bool operator!=( const concurrent_queue_iterator& lhs, const concurrent_queue_iterator& rhs ) { + return lhs.my_item != rhs.my_item; + } +private: + friend struct concurrent_queue_iterator_provider; +}; // class concurrent_queue_iterator + +} // namespace d2 +} // namespace detail +} // tbb + +#endif // __TBB_detail__concurrent_queue_base_H diff --git a/src/tbb/include/oneapi/tbb/detail/_concurrent_skip_list.h b/src/tbb/include/oneapi/tbb/detail/_concurrent_skip_list.h new file mode 100644 index 000000000..096a44df9 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_concurrent_skip_list.h @@ -0,0 +1,1290 @@ +/* + Copyright (c) 2019-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_detail__concurrent_skip_list_H +#define __TBB_detail__concurrent_skip_list_H + +#if !defined(__TBB_concurrent_map_H) && !defined(__TBB_concurrent_set_H) +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#include "_config.h" +#include "_range_common.h" +#include "_allocator_traits.h" +#include "_template_helpers.h" +#include "_node_handle.h" +#include "_containers_helpers.h" +#include "_assert.h" +#include "_exception.h" +#include "../enumerable_thread_specific.h" +#include +#include +#include +#include +#include +#include // Need std::geometric_distribution +#include // Need std::equal and std::lexicographical_compare +#include +#if __TBB_CPP20_COMPARISONS_PRESENT +#include +#endif + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) +// #pragma warning(push) +// #pragma warning(disable: 4127) // warning C4127: conditional expression is constant +#endif + +namespace tbb { +namespace detail { +namespace d2 { + +template +class skip_list_node { + using node_ptr = skip_list_node*; +public: + using value_type = Value; + using atomic_node_ptr = std::atomic; + using size_type = std::size_t; + using container_allocator_type = Allocator; + + using reference = value_type&; + using const_reference = const value_type&; +private: + using allocator_traits = tbb::detail::allocator_traits; + + // Allocator is the same as the container allocator=> allocates unitptr_t + // It is required to rebind it to value_type to get the correct pointer and const_pointer + using value_allocator_traits = typename allocator_traits::template rebind_traits; +public: + using pointer = typename value_allocator_traits::pointer; + using const_pointer = typename value_allocator_traits::const_pointer; + + //In perfect world these constructor and destructor would have been private, + //however this seems technically impractical due to use of allocator_traits. + + //Should not be called directly, instead use create method + skip_list_node( size_type levels ) + : my_height(levels), my_index_number(0) + {} + + //Should not be called directly, instead use destroy method + ~skip_list_node() {} + + skip_list_node( const skip_list_node& ) = delete; + skip_list_node( skip_list_node&& ) = delete; + skip_list_node& operator=( const skip_list_node& ) = delete; + skip_list_node& operator=( skip_list_node&& ) = delete; + + static skip_list_node* create( container_allocator_type& alloc, size_type height ) { + size_type sz = calc_node_size(height); + static_assert(std::is_same::value, "skip_list_node assumes that passed in allocator operates on bytes"); + auto* node = reinterpret_cast(allocator_traits::allocate(alloc, sz)); + + //Construct the node itself + allocator_traits::construct(alloc, node, height); + + //Construct the level pointers + for (size_type l = 0; l < height; ++l) { + allocator_traits::construct(alloc, &node->get_atomic_next(l), nullptr); + } + + return node; + } + + static void destroy( container_allocator_type& alloc, skip_list_node* node ) { + //Destroy the level pointers + for (size_type l = 0; l < node->height(); ++l) { + allocator_traits::destroy(alloc, &node->atomic_next(l)); + } + size_type sz = calc_node_size(node->height()); + // Destroy the node itself + allocator_traits::destroy(alloc, node); + + // Deallocate the node + allocator_traits::deallocate(alloc, reinterpret_cast(node), sz); + } + + + pointer storage() { + return &my_value; + } + + reference value() { + return *storage(); + } + + node_ptr next( size_type level ) const { + node_ptr res = get_atomic_next(level).load(std::memory_order_acquire); + __TBB_ASSERT(res == nullptr || res->height() > level, "Broken internal structure"); + return res; + } + + atomic_node_ptr& atomic_next( size_type level ) { + atomic_node_ptr& res = get_atomic_next(level); +#if TBB_USE_DEBUG + node_ptr node = res.load(std::memory_order_acquire); + __TBB_ASSERT(node == nullptr || node->height() > level, "Broken internal structure"); +#endif + return res; + } + + void set_next( size_type level, node_ptr n ) { + __TBB_ASSERT(n == nullptr || n->height() > level, "Broken internal structure"); + get_atomic_next(level).store(n, std::memory_order_relaxed); + } + + size_type height() const { + return my_height; + } + + void set_index_number( size_type index_num ) { + my_index_number = index_num; + } + + size_type index_number() const { + return my_index_number; + } + +private: + static size_type calc_node_size( size_type height ) { + static_assert(alignof(skip_list_node) >= alignof(atomic_node_ptr), "Incorrect alignment"); + return sizeof(skip_list_node) + height * sizeof(atomic_node_ptr); + } + + atomic_node_ptr& get_atomic_next( size_type level ) { + atomic_node_ptr* arr = reinterpret_cast(this + 1); + return arr[level]; + } + + const atomic_node_ptr& get_atomic_next( size_type level ) const { + const atomic_node_ptr* arr = reinterpret_cast(this + 1); + return arr[level]; + } + + union { + value_type my_value; + }; + size_type my_height; + size_type my_index_number; +}; // class skip_list_node + +template +class skip_list_iterator { + using node_type = NodeType; + using node_ptr = node_type*; +public: + using iterator_category = std::forward_iterator_tag; + using value_type = ValueType; + + using difference_type = std::ptrdiff_t; + using pointer = value_type*; + using reference = value_type&; + + skip_list_iterator() : skip_list_iterator(nullptr) {} + + skip_list_iterator( const skip_list_iterator& other ) + : my_node_ptr(other.my_node_ptr) {} + + skip_list_iterator& operator=( const skip_list_iterator& other ) { + my_node_ptr = other.my_node_ptr; + return *this; + } + + reference operator*() const { return my_node_ptr->value(); } + pointer operator->() const { return my_node_ptr->storage(); } + + skip_list_iterator& operator++() { + __TBB_ASSERT(my_node_ptr != nullptr, nullptr); + my_node_ptr = my_node_ptr->next(0); + return *this; + } + + skip_list_iterator operator++(int) { + skip_list_iterator tmp = *this; + ++*this; + return tmp; + } + +private: + skip_list_iterator(node_type* n) : my_node_ptr(n) {} + + node_ptr my_node_ptr; + + template + friend class concurrent_skip_list; + + template + friend class skip_list_iterator; + + friend class const_range; + friend class range; + + friend bool operator==( const skip_list_iterator& lhs, const skip_list_iterator& rhs ) { + return lhs.my_node_ptr == rhs.my_node_ptr; + } + + friend bool operator!=( const skip_list_iterator& lhs, const skip_list_iterator& rhs ) { + return lhs.my_node_ptr != rhs.my_node_ptr; + } +}; // class skip_list_iterator + +template +class concurrent_skip_list { +protected: + using container_traits = Traits; + using self_type = concurrent_skip_list; + using allocator_type = typename container_traits::allocator_type; + using allocator_traits_type = tbb::detail::allocator_traits; + using key_compare = typename container_traits::compare_type; + using value_compare = typename container_traits::value_compare; + using key_type = typename container_traits::key_type; + using value_type = typename container_traits::value_type; + static_assert(std::is_same::value, + "value_type of the container should be the same as its allocator"); + + using size_type = std::size_t; + using difference_type = std::ptrdiff_t; + + static constexpr size_type max_level = container_traits::max_level; + + using node_allocator_type = typename allocator_traits_type::template rebind_alloc; + using node_allocator_traits = tbb::detail::allocator_traits; + + using list_node_type = skip_list_node; + using node_type = d1::node_handle; + + using iterator = skip_list_iterator; + using const_iterator = skip_list_iterator; + + using reference = value_type&; + using const_reference = const value_type&; + using pointer = typename allocator_traits_type::pointer; + using const_pointer = typename allocator_traits_type::const_pointer; + + using random_level_generator_type = typename container_traits::random_level_generator_type; + + using node_ptr = list_node_type*; + + using array_type = std::array; +private: + template + using is_transparent = dependent_bool, T>; +public: + static constexpr bool allow_multimapping = container_traits::allow_multimapping; + + concurrent_skip_list() : my_head_ptr(nullptr), my_size(0), my_max_height(0) {} + + explicit concurrent_skip_list( const key_compare& comp, const allocator_type& alloc = allocator_type() ) + : my_node_allocator(alloc), my_compare(comp), my_head_ptr(nullptr), my_size(0), my_max_height(0) {} + + explicit concurrent_skip_list( const allocator_type& alloc ) + : concurrent_skip_list(key_compare(), alloc) {} + + template + concurrent_skip_list( InputIterator first, InputIterator last, const key_compare& comp = key_compare(), + const allocator_type& alloc = allocator_type() ) + : concurrent_skip_list(comp, alloc) + { + internal_copy(first, last); + } + + template + concurrent_skip_list( InputIterator first, InputIterator last, const allocator_type& alloc ) + : concurrent_skip_list(first, last, key_compare(), alloc) {} + + concurrent_skip_list( std::initializer_list init, const key_compare& comp = key_compare(), + const allocator_type& alloc = allocator_type() ) + : concurrent_skip_list(init.begin(), init.end(), comp, alloc) {} + + concurrent_skip_list( std::initializer_list init, const allocator_type& alloc ) + : concurrent_skip_list(init, key_compare(), alloc) {} + + concurrent_skip_list( const concurrent_skip_list& other ) + : my_node_allocator(node_allocator_traits::select_on_container_copy_construction(other.get_allocator())), + my_compare(other.my_compare), my_rng(other.my_rng), my_head_ptr(nullptr), + my_size(0), my_max_height(0) + { + internal_copy(other); + __TBB_ASSERT(my_size == other.my_size, "Wrong size of copy-constructed container"); + } + + concurrent_skip_list( const concurrent_skip_list& other, const allocator_type& alloc ) + : my_node_allocator(alloc), my_compare(other.my_compare), my_rng(other.my_rng), my_head_ptr(nullptr), + my_size(0), my_max_height(0) + { + internal_copy(other); + __TBB_ASSERT(my_size == other.my_size, "Wrong size of copy-constructed container"); + } + + concurrent_skip_list( concurrent_skip_list&& other ) + : my_node_allocator(std::move(other.my_node_allocator)), my_compare(other.my_compare), + my_rng(std::move(other.my_rng)), my_head_ptr(nullptr) // my_head_ptr would be stored in internal_move + { + internal_move(std::move(other)); + } + + concurrent_skip_list( concurrent_skip_list&& other, const allocator_type& alloc ) + : my_node_allocator(alloc), my_compare(other.my_compare), + my_rng(std::move(other.my_rng)), my_head_ptr(nullptr) + { + using is_always_equal = typename allocator_traits_type::is_always_equal; + internal_move_construct_with_allocator(std::move(other), is_always_equal()); + } + + ~concurrent_skip_list() { + clear(); + delete_head(); + } + + concurrent_skip_list& operator=( const concurrent_skip_list& other ) { + if (this != &other) { + clear(); + copy_assign_allocators(my_node_allocator, other.my_node_allocator); + my_compare = other.my_compare; + my_rng = other.my_rng; + internal_copy(other); + } + return *this; + } + + concurrent_skip_list& operator=( concurrent_skip_list&& other ) { + if (this != &other) { + clear(); + delete_head(); + + my_compare = std::move(other.my_compare); + my_rng = std::move(other.my_rng); + + move_assign_allocators(my_node_allocator, other.my_node_allocator); + using pocma_type = typename node_allocator_traits::propagate_on_container_move_assignment; + using is_always_equal = typename node_allocator_traits::is_always_equal; + internal_move_assign(std::move(other), tbb::detail::disjunction()); + } + return *this; + } + + concurrent_skip_list& operator=( std::initializer_list il ) + { + clear(); + insert(il.begin(),il.end()); + return *this; + } + + std::pair insert( const value_type& value ) { + return internal_insert(value); + } + + std::pair insert( value_type&& value ) { + return internal_insert(std::move(value)); + } + + iterator insert( const_iterator, const_reference value ) { + // Ignore hint + return insert(value).first; + } + + iterator insert( const_iterator, value_type&& value ) { + // Ignore hint + return insert(std::move(value)).first; + } + + template + void insert( InputIterator first, InputIterator last ) { + while (first != last) { + insert(*first); + ++first; + } + } + + void insert( std::initializer_list init ) { + insert(init.begin(), init.end()); + } + + std::pair insert( node_type&& nh ) { + if (!nh.empty()) { + auto insert_node = d1::node_handle_accessor::get_node_ptr(nh); + std::pair insert_result = internal_insert_node(insert_node); + if (insert_result.second) { + d1::node_handle_accessor::deactivate(nh); + } + return insert_result; + } + return std::pair(end(), false); + } + + iterator insert( const_iterator, node_type&& nh ) { + // Ignore hint + return insert(std::move(nh)).first; + } + + template + std::pair emplace( Args&&... args ) { + return internal_insert(std::forward(args)...); + } + + template + iterator emplace_hint( const_iterator, Args&&... args ) { + // Ignore hint + return emplace(std::forward(args)...).first; + } + + iterator unsafe_erase( iterator pos ) { + std::pair extract_result = internal_extract(pos); + if (extract_result.first) { // node was extracted + delete_value_node(extract_result.first); + return extract_result.second; + } + return end(); + } + + iterator unsafe_erase( const_iterator pos ) { + return unsafe_erase(get_iterator(pos)); + } + + iterator unsafe_erase( const_iterator first, const_iterator last ) { + while (first != last) { + // Unsafe erase returns the iterator which follows the erased one + first = unsafe_erase(first); + } + return get_iterator(first); + } + + size_type unsafe_erase( const key_type& key ) { + return internal_erase(key); + } + + template + typename std::enable_if::value + && !std::is_convertible::value + && !std::is_convertible::value, + size_type>::type unsafe_erase( const K& key ) + { + return internal_erase(key); + } + + node_type unsafe_extract( const_iterator pos ) { + std::pair extract_result = internal_extract(pos); + return extract_result.first ? d1::node_handle_accessor::construct(extract_result.first) : node_type(); + } + + node_type unsafe_extract( iterator pos ) { + return unsafe_extract(const_iterator(pos)); + } + + node_type unsafe_extract( const key_type& key ) { + return unsafe_extract(find(key)); + } + + template + typename std::enable_if::value + && !std::is_convertible::value + && !std::is_convertible::value, + node_type>::type unsafe_extract( const K& key ) + { + return unsafe_extract(find(key)); + } + + iterator lower_bound( const key_type& key ) { + return iterator(internal_get_bound(key, my_compare)); + } + + const_iterator lower_bound( const key_type& key ) const { + return const_iterator(internal_get_bound(key, my_compare)); + } + + template + typename std::enable_if::value, iterator>::type lower_bound( const K& key ) { + return iterator(internal_get_bound(key, my_compare)); + } + + template + typename std::enable_if::value, const_iterator>::type lower_bound( const K& key ) const { + return const_iterator(internal_get_bound(key, my_compare)); + } + + iterator upper_bound( const key_type& key ) { + return iterator(internal_get_bound(key, not_greater_compare(my_compare))); + } + + const_iterator upper_bound( const key_type& key ) const { + return const_iterator(internal_get_bound(key, not_greater_compare(my_compare))); + } + + template + typename std::enable_if::value, iterator>::type upper_bound( const K& key ) { + return iterator(internal_get_bound(key, not_greater_compare(my_compare))); + } + + template + typename std::enable_if::value, const_iterator>::type upper_bound( const K& key ) const { + return const_iterator(internal_get_bound(key, not_greater_compare(my_compare))); + } + + iterator find( const key_type& key ) { + return iterator(internal_find(key)); + } + + const_iterator find( const key_type& key ) const { + return const_iterator(internal_find(key)); + } + + template + typename std::enable_if::value, iterator>::type find( const K& key ) { + return iterator(internal_find(key)); + } + + template + typename std::enable_if::value, const_iterator>::type find( const K& key ) const { + return const_iterator(internal_find(key)); + } + + size_type count( const key_type& key ) const { + return internal_count(key); + } + + template + typename std::enable_if::value, size_type>::type count( const K& key ) const { + return internal_count(key); + } + + bool contains( const key_type& key ) const { + return find(key) != end(); + } + + template + typename std::enable_if::value, bool>::type contains( const K& key ) const { + return find(key) != end(); + } + + void clear() noexcept { + // clear is not thread safe - load can be relaxed + node_ptr head = my_head_ptr.load(std::memory_order_relaxed); + + if (head == nullptr) return; // Head is not allocated => container is empty + + node_ptr current = head->next(0); + + // Delete all value nodes in the container + while (current) { + node_ptr next = current->next(0); + delete_value_node(current); + current = next; + } + + for (size_type level = 0; level < head->height(); ++level) { + head->set_next(level, nullptr); + } + + my_size.store(0, std::memory_order_relaxed); + my_max_height.store(0, std::memory_order_relaxed); + } + + iterator begin() { + return iterator(internal_begin()); + } + + const_iterator begin() const { + return const_iterator(internal_begin()); + } + + const_iterator cbegin() const { + return const_iterator(internal_begin()); + } + + iterator end() { + return iterator(nullptr); + } + + const_iterator end() const { + return const_iterator(nullptr); + } + + const_iterator cend() const { + return const_iterator(nullptr); + } + + size_type size() const { + return my_size.load(std::memory_order_relaxed); + } + + size_type max_size() const { + return node_allocator_traits::max_size(my_node_allocator); + } + + __TBB_nodiscard bool empty() const { + return 0 == size(); + } + + allocator_type get_allocator() const { + return my_node_allocator; + } + + void swap(concurrent_skip_list& other) { + if (this != &other) { + using pocs_type = typename node_allocator_traits::propagate_on_container_swap; + using is_always_equal = typename node_allocator_traits::is_always_equal; + internal_swap(other, tbb::detail::disjunction()); + } + } + + std::pair equal_range(const key_type& key) { + return internal_equal_range(key); + } + + std::pair equal_range(const key_type& key) const { + return internal_equal_range(key); + } + + template + typename std::enable_if::value, std::pair>::type equal_range( const K& key ) { + return internal_equal_range(key); + } + + template + typename std::enable_if::value, std::pair>::type equal_range( const K& key ) const { + return internal_equal_range(key); + } + + key_compare key_comp() const { return my_compare; } + + value_compare value_comp() const { return container_traits::value_comp(my_compare); } + + class const_range_type { + public: + using size_type = typename concurrent_skip_list::size_type; + using difference_type = typename concurrent_skip_list::difference_type; + using iterator = typename concurrent_skip_list::const_iterator; + using value_type = typename iterator::value_type; + using reference = typename iterator::reference; + + bool empty() const { + return my_begin.my_node_ptr ? (my_begin.my_node_ptr->next(0) == my_end.my_node_ptr) + : true; + } + + bool is_divisible() const { + return my_begin.my_node_ptr && my_level != 0 + ? my_begin.my_node_ptr->next(my_level - 1) != my_end.my_node_ptr + : false; + } + + size_type size() const { return std::distance(my_begin, my_end); } + + const_range_type( const_range_type& r, split) + : my_end(r.my_end) { + if (r.empty()) { + __TBB_ASSERT(my_end.my_node_ptr == nullptr, nullptr); + my_begin = my_end; + my_level = 0; + } else { + my_begin = iterator(r.my_begin.my_node_ptr->next(r.my_level - 1)); + my_level = my_begin.my_node_ptr->height(); + } + r.my_end = my_begin; + } + + const_range_type( const concurrent_skip_list& l) + : my_end(l.end()), my_begin(l.begin()), + my_level(my_begin.my_node_ptr ? my_begin.my_node_ptr->height() : 0) {} + + iterator begin() const { return my_begin; } + iterator end() const { return my_end; } + size_type grainsize() const { return 1; } + + private: + const_iterator my_end; + const_iterator my_begin; + size_type my_level; + }; // class const_range_type + + class range_type : public const_range_type { + public: + using iterator = typename concurrent_skip_list::iterator; + using value_type = typename iterator::value_type; + using reference = typename iterator::reference; + + range_type(range_type& r, split) : const_range_type(r, split()) {} + range_type(const concurrent_skip_list& l) : const_range_type(l) {} + + iterator begin() const { + node_ptr node = const_range_type::begin().my_node_ptr; + return iterator(node); + } + + iterator end() const { + node_ptr node = const_range_type::end().my_node_ptr; + return iterator(node); + } + }; // class range_type + + range_type range() { return range_type(*this); } + const_range_type range() const { return const_range_type(*this); } + +private: + node_ptr internal_begin() const { + node_ptr head = get_head(); + return head == nullptr ? head : head->next(0); + } + + void internal_move(concurrent_skip_list&& other) { + my_head_ptr.store(other.my_head_ptr.load(std::memory_order_relaxed), std::memory_order_relaxed); + other.my_head_ptr.store(nullptr, std::memory_order_relaxed); + + my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed); + other.my_size.store(0, std::memory_order_relaxed); + + my_max_height.store(other.my_max_height.load(std::memory_order_relaxed), std::memory_order_relaxed); + other.my_max_height.store(0, std::memory_order_relaxed); + } + + void internal_move_construct_with_allocator(concurrent_skip_list&& other, + /*is_always_equal = */std::true_type) { + internal_move(std::move(other)); + } + + void internal_move_construct_with_allocator(concurrent_skip_list&& other, + /*is_always_equal = */std::false_type) { + if (my_node_allocator == other.get_allocator()) { + internal_move(std::move(other)); + } else { + my_size.store(0, std::memory_order_relaxed); + my_max_height.store(other.my_max_height.load(std::memory_order_relaxed), std::memory_order_relaxed); + internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end())); + } + } + + static const key_type& get_key( node_ptr n ) { + __TBB_ASSERT(n, nullptr); + return container_traits::get_key(static_cast(n)->value()); + } + + template + bool found( node_ptr node, const K& key ) const { + return node != nullptr && !my_compare(key, get_key(node)); + } + + template + node_ptr internal_find(const K& key) const { + return allow_multimapping ? internal_find_multi(key) : internal_find_unique(key); + } + + template + node_ptr internal_find_multi( const K& key ) const { + node_ptr prev = get_head(); + if (prev == nullptr) return nullptr; // If the head node is not allocated - exit + + node_ptr curr = nullptr; + node_ptr old_curr = curr; + + for (size_type h = my_max_height.load(std::memory_order_acquire); h > 0; --h) { + curr = internal_find_position(h - 1, prev, key, my_compare); + + if (curr != old_curr && found(curr, key)) { + return curr; + } + old_curr = curr; + } + return nullptr; + } + + template + node_ptr internal_find_unique( const K& key ) const { + const_iterator it = lower_bound(key); + return (it == end() || my_compare(key, container_traits::get_key(*it))) ? nullptr : it.my_node_ptr; + } + + template + size_type internal_count( const K& key ) const { + if (allow_multimapping) { + // TODO: reimplement without double traversal + std::pair r = equal_range(key); + return std::distance(r.first, r.second); + } + return size_type(contains(key) ? 1 : 0); + } + + template + std::pair internal_equal_range(const K& key) const { + iterator lb = get_iterator(lower_bound(key)); + auto result = std::make_pair(lb, lb); + + // If the lower bound points to the node with the requested key + if (found(lb.my_node_ptr, key)) { + + if (!allow_multimapping) { + // For unique containers - move the second iterator forward and exit + ++result.second; + } else { + // For multi containers - find the upper bound starting from the lower bound + node_ptr prev = lb.my_node_ptr; + node_ptr curr = nullptr; + not_greater_compare cmp(my_compare); + + // Start from the lower bound of the range + for (size_type h = prev->height(); h > 0; --h) { + curr = prev->next(h - 1); + while (curr && cmp(get_key(curr), key)) { + prev = curr; + // If the height of the next node is greater than the current one - jump to its height + if (h < curr->height()) { + h = curr->height(); + } + curr = prev->next(h - 1); + } + } + result.second = iterator(curr); + } + } + + return result; + } + + // Finds position on the level using comparator cmp starting from the node prev + template + node_ptr internal_find_position( size_type level, node_ptr& prev, const K& key, + const Comparator& cmp ) const { + __TBB_ASSERT(level < prev->height(), "Wrong level to find position"); + node_ptr curr = prev->next(level); + + while (curr && cmp(get_key(curr), key)) { + prev = curr; + __TBB_ASSERT(level < prev->height(), nullptr); + curr = prev->next(level); + } + + return curr; + } + + // The same as previous overload, but allows index_number comparison + template + node_ptr internal_find_position( size_type level, node_ptr& prev, node_ptr node, + const Comparator& cmp ) const { + __TBB_ASSERT(level < prev->height(), "Wrong level to find position"); + node_ptr curr = prev->next(level); + + while (curr && cmp(get_key(curr), get_key(node))) { + if (allow_multimapping && cmp(get_key(node), get_key(curr)) && curr->index_number() > node->index_number()) { + break; + } + + prev = curr; + __TBB_ASSERT(level < prev->height(), nullptr); + curr = prev->next(level); + } + return curr; + } + + template + void fill_prev_curr_arrays(array_type& prev_nodes, array_type& curr_nodes, node_ptr node, const key_type& key, + const Comparator& cmp, node_ptr head ) { + + size_type curr_max_height = my_max_height.load(std::memory_order_acquire); + size_type node_height = node->height(); + if (curr_max_height < node_height) { + std::fill(prev_nodes.begin() + curr_max_height, prev_nodes.begin() + node_height, head); + std::fill(curr_nodes.begin() + curr_max_height, curr_nodes.begin() + node_height, nullptr); + } + + node_ptr prev = head; + for (size_type level = curr_max_height; level > 0; --level) { + node_ptr curr = internal_find_position(level - 1, prev, key, cmp); + prev_nodes[level - 1] = prev; + curr_nodes[level - 1] = curr; + } + } + + void fill_prev_array_for_existing_node( array_type& prev_nodes, node_ptr node ) { + node_ptr head = create_head_if_necessary(); + prev_nodes.fill(head); + + node_ptr prev = head; + for (size_type level = node->height(); level > 0; --level) { + while (prev->next(level - 1) != node) { + prev = prev->next(level - 1); + } + prev_nodes[level - 1] = prev; + } + } + + struct not_greater_compare { + const key_compare& my_less_compare; + + not_greater_compare( const key_compare& less_compare ) : my_less_compare(less_compare) {} + + template + bool operator()( const K1& first, const K2& second ) const { + return !my_less_compare(second, first); + } + }; + + not_greater_compare select_comparator( /*allow_multimapping = */ std::true_type ) { + return not_greater_compare(my_compare); + } + + key_compare select_comparator( /*allow_multimapping = */ std::false_type ) { + return my_compare; + } + + template + std::pair internal_insert( Args&&... args ) { + node_ptr new_node = create_value_node(std::forward(args)...); + std::pair insert_result = internal_insert_node(new_node); + if (!insert_result.second) { + delete_value_node(new_node); + } + return insert_result; + } + + std::pair internal_insert_node( node_ptr new_node ) { + array_type prev_nodes; + array_type curr_nodes; + size_type new_height = new_node->height(); + auto compare = select_comparator(std::integral_constant{}); + + node_ptr head_node = create_head_if_necessary(); + + for (;;) { + fill_prev_curr_arrays(prev_nodes, curr_nodes, new_node, get_key(new_node), compare, head_node); + + node_ptr prev = prev_nodes[0]; + node_ptr next = curr_nodes[0]; + + if (allow_multimapping) { + new_node->set_index_number(prev->index_number() + 1); + } else { + if (found(next, get_key(new_node))) { + return std::pair(iterator(next), false); + } + } + + new_node->set_next(0, next); + if (!prev->atomic_next(0).compare_exchange_strong(next, new_node)) { + continue; + } + + // If the node was successfully linked on the first level - it will be linked on other levels + // Insertion cannot fail starting from this point + + // If the height of inserted node is greater than maximum - increase maximum + size_type max_height = my_max_height.load(std::memory_order_acquire); + for (;;) { + if (new_height <= max_height || my_max_height.compare_exchange_strong(max_height, new_height)) { + // If the maximum was successfully updated by current thread + // or by an other thread for the value, greater or equal to new_height + break; + } + } + + for (std::size_t level = 1; level < new_height; ++level) { + // Link the node on upper levels + for (;;) { + prev = prev_nodes[level]; + next = static_cast(curr_nodes[level]); + + new_node->set_next(level, next); + __TBB_ASSERT(new_node->height() > level, "Internal structure break"); + if (prev->atomic_next(level).compare_exchange_strong(next, new_node)) { + break; + } + + for (size_type lev = level; lev != new_height; ++lev ) { + curr_nodes[lev] = internal_find_position(lev, prev_nodes[lev], new_node, compare); + } + } + } + ++my_size; + return std::pair(iterator(new_node), true); + } + } + + template + node_ptr internal_get_bound( const K& key, const Comparator& cmp ) const { + node_ptr prev = get_head(); + if (prev == nullptr) return nullptr; // If the head node is not allocated - exit + + node_ptr curr = nullptr; + + for (size_type h = my_max_height.load(std::memory_order_acquire); h > 0; --h) { + curr = internal_find_position(h - 1, prev, key, cmp); + } + + return curr; + } + + template + size_type internal_erase( const K& key ) { + auto eq = equal_range(key); + size_type old_size = size(); + unsafe_erase(eq.first, eq.second); + return old_size - size(); + } + + // Returns node_ptr to the extracted node and node_ptr to the next node after the extracted + std::pair internal_extract( const_iterator it ) { + std::pair result(nullptr, nullptr); + if ( it != end() ) { + array_type prev_nodes; + + node_ptr erase_node = it.my_node_ptr; + node_ptr next_node = erase_node->next(0); + fill_prev_array_for_existing_node(prev_nodes, erase_node); + + for (size_type level = 0; level < erase_node->height(); ++level) { + prev_nodes[level]->set_next(level, erase_node->next(level)); + erase_node->set_next(level, nullptr); + } + my_size.fetch_sub(1, std::memory_order_relaxed); + + result.first = erase_node; + result.second = next_node; + } + return result; + } + +protected: + template + void internal_merge( SourceType&& source ) { + using source_type = typename std::decay::type; + using source_iterator = typename source_type::iterator; + static_assert((std::is_same::value), "Incompatible containers cannot be merged"); + + for (source_iterator it = source.begin(); it != source.end();) { + source_iterator where = it++; + if (allow_multimapping || !contains(container_traits::get_key(*where))) { + node_type handle = source.unsafe_extract(where); + __TBB_ASSERT(!handle.empty(), "Extracted handle in merge is empty"); + + if (!insert(std::move(handle)).second) { + __TBB_ASSERT(!handle.empty(), "Handle should not be empty if insert fails"); + //If the insertion fails - return the node into source + source.insert(std::move(handle)); + } + __TBB_ASSERT(handle.empty(), "Node handle should be empty after the insertion"); + } + } + } + +private: + void internal_copy( const concurrent_skip_list& other ) { + internal_copy(other.begin(), other.end()); + } + + template + void internal_copy( Iterator first, Iterator last ) { + try_call([&] { + for (auto it = first; it != last; ++it) { + insert(*it); + } + }).on_exception([&] { + clear(); + delete_head(); + }); + } + + node_ptr create_node( size_type height ) { + return list_node_type::create(my_node_allocator, height); + } + + template + node_ptr create_value_node( Args&&... args ) { + node_ptr node = create_node(my_rng()); + + // try_call API is not convenient here due to broken + // variadic capture on GCC 4.8.5 + auto value_guard = make_raii_guard([&] { + delete_node(node); + }); + + // Construct the value inside the node + node_allocator_traits::construct(my_node_allocator, node->storage(), std::forward(args)...); + value_guard.dismiss(); + return node; + } + + node_ptr create_head_node() { + return create_node(max_level); + } + + void delete_head() { + node_ptr head = my_head_ptr.load(std::memory_order_relaxed); + if (head != nullptr) { + delete_node(head); + my_head_ptr.store(nullptr, std::memory_order_relaxed); + } + } + + void delete_node( node_ptr node ) { + list_node_type::destroy(my_node_allocator, node); + } + + void delete_value_node( node_ptr node ) { + // Destroy the value inside the node + node_allocator_traits::destroy(my_node_allocator, node->storage()); + delete_node(node); + } + + node_ptr get_head() const { + return my_head_ptr.load(std::memory_order_acquire); + } + + node_ptr create_head_if_necessary() { + node_ptr current_head = get_head(); + if (current_head == nullptr) { + // Head node was not created - create it + node_ptr new_head = create_head_node(); + if (my_head_ptr.compare_exchange_strong(current_head, new_head)) { + current_head = new_head; + } else { + // If an other thread has already created the head node - destroy new_head + // current_head now points to the actual head node + delete_node(new_head); + } + } + __TBB_ASSERT(my_head_ptr.load(std::memory_order_relaxed) != nullptr, nullptr); + __TBB_ASSERT(current_head != nullptr, nullptr); + return current_head; + } + + static iterator get_iterator( const_iterator it ) { + return iterator(it.my_node_ptr); + } + + void internal_move_assign( concurrent_skip_list&& other, /*POCMA || is_always_equal =*/std::true_type ) { + internal_move(std::move(other)); + } + + void internal_move_assign( concurrent_skip_list&& other, /*POCMA || is_always_equal =*/std::false_type ) { + if (my_node_allocator == other.my_node_allocator) { + internal_move(std::move(other)); + } else { + internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end())); + } + } + + void internal_swap_fields( concurrent_skip_list& other ) { + using std::swap; + swap_allocators(my_node_allocator, other.my_node_allocator); + swap(my_compare, other.my_compare); + swap(my_rng, other.my_rng); + + swap_atomics_relaxed(my_head_ptr, other.my_head_ptr); + swap_atomics_relaxed(my_size, other.my_size); + swap_atomics_relaxed(my_max_height, other.my_max_height); + } + + void internal_swap( concurrent_skip_list& other, /*POCMA || is_always_equal =*/std::true_type ) { + internal_swap_fields(other); + } + + void internal_swap( concurrent_skip_list& other, /*POCMA || is_always_equal =*/std::false_type ) { + __TBB_ASSERT(my_node_allocator == other.my_node_allocator, "Swapping with unequal allocators is not allowed"); + internal_swap_fields(other); + } + + node_allocator_type my_node_allocator; + key_compare my_compare; + random_level_generator_type my_rng; + std::atomic my_head_ptr; + std::atomic my_size; + std::atomic my_max_height; + + template + friend class concurrent_skip_list; +}; // class concurrent_skip_list + +template +bool operator==( const concurrent_skip_list& lhs, const concurrent_skip_list& rhs ) { + if (lhs.size() != rhs.size()) return false; +#if _MSC_VER + // Passing "unchecked" iterators to std::equal with 3 parameters + // causes compiler warnings. + // The workaround is to use overload with 4 parameters, which is + // available since C++14 - minimally supported version on MSVC + return std::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end()); +#else + return std::equal(lhs.begin(), lhs.end(), rhs.begin()); +#endif +} + +#if !__TBB_CPP20_COMPARISONS_PRESENT +template +bool operator!=( const concurrent_skip_list& lhs, const concurrent_skip_list& rhs ) { + return !(lhs == rhs); +} +#endif + +#if __TBB_CPP20_COMPARISONS_PRESENT && __TBB_CPP20_CONCEPTS_PRESENT +template +tbb::detail::synthesized_three_way_result +operator<=>( const concurrent_skip_list& lhs, const concurrent_skip_list& rhs ) { + return std::lexicographical_compare_three_way(lhs.begin(), lhs.end(), + rhs.begin(), rhs.end(), + tbb::detail::synthesized_three_way_comparator{}); +} +#else +template +bool operator<( const concurrent_skip_list& lhs, const concurrent_skip_list& rhs ) { + return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(), rhs.end()); +} + +template +bool operator>( const concurrent_skip_list& lhs, const concurrent_skip_list& rhs ) { + return rhs < lhs; +} + +template +bool operator<=( const concurrent_skip_list& lhs, const concurrent_skip_list& rhs ) { + return !(rhs < lhs); +} + +template +bool operator>=( const concurrent_skip_list& lhs, const concurrent_skip_list& rhs ) { + return !(lhs < rhs); +} +#endif // __TBB_CPP20_COMPARISONS_PRESENT && __TBB_CPP20_CONCEPTS_PRESENT + +// Generates a number from the interval [0, MaxLevel). +template +class concurrent_geometric_level_generator { +public: + static constexpr std::size_t max_level = MaxLevel; + // TODO: modify the algorithm to accept other values of max_level + static_assert(max_level == 32, "Incompatible max_level for rng"); + + concurrent_geometric_level_generator() : engines(std::minstd_rand::result_type(time(nullptr))) {} + + std::size_t operator()() { + // +1 is required to pass at least 1 into log2 (log2(0) is undefined) + // -1 is required to have an ability to return 0 from the generator (max_level - log2(2^31) - 1) + std::size_t result = max_level - std::size_t(tbb::detail::log2(engines.local()() + 1)) - 1; + __TBB_ASSERT(result <= max_level, nullptr); + return result; + } + +private: + tbb::enumerable_thread_specific engines; +}; + +} // namespace d2 + +} // namespace detail +} // namespace tbb + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) +// #pragma warning(pop) // warning 4127 is back +#endif + +#endif // __TBB_detail__concurrent_skip_list_H diff --git a/src/tbb/include/oneapi/tbb/detail/_concurrent_unordered_base.h b/src/tbb/include/oneapi/tbb/detail/_concurrent_unordered_base.h new file mode 100644 index 000000000..5e34d997c --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_concurrent_unordered_base.h @@ -0,0 +1,1515 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_detail__concurrent_unordered_base_H +#define __TBB_detail__concurrent_unordered_base_H + +#if !defined(__TBB_concurrent_unordered_map_H) && !defined(__TBB_concurrent_unordered_set_H) +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#include "_range_common.h" +#include "_containers_helpers.h" +#include "_segment_table.h" +#include "_hash_compare.h" +#include "_allocator_traits.h" +#include "_node_handle.h" +#include "_assert.h" +#include "_utils.h" +#include "_exception.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) +// #pragma warning(push) +// #pragma warning(disable: 4127) // warning C4127: conditional expression is constant +#endif + +namespace tbb { +namespace detail { +namespace d2 { + +template +class concurrent_unordered_base; + +template +class solist_iterator { +private: + using node_ptr = typename Container::value_node_ptr; + template + friend class split_ordered_list; + template + friend class solist_iterator; + template + friend class concurrent_unordered_base; + template + friend bool operator==( const solist_iterator& i, const solist_iterator& j ); + template + friend bool operator!=( const solist_iterator& i, const solist_iterator& j ); +public: + using value_type = Value; + using difference_type = typename Container::difference_type; + using pointer = value_type*; + using reference = value_type&; + using iterator_category = std::forward_iterator_tag; + + solist_iterator() : my_node_ptr(nullptr) {} + solist_iterator( const solist_iterator& other ) + : my_node_ptr(other.my_node_ptr) {} + + solist_iterator& operator=( const solist_iterator& other ) { + my_node_ptr = other.my_node_ptr; + return *this; + } + + reference operator*() const { + return my_node_ptr->value(); + } + + pointer operator->() const { + return my_node_ptr->storage(); + } + + solist_iterator& operator++() { + auto next_node = my_node_ptr->next(); + while(next_node && next_node->is_dummy()) { + next_node = next_node->next(); + } + my_node_ptr = static_cast(next_node); + return *this; + } + + solist_iterator operator++(int) { + solist_iterator tmp = *this; + ++*this; + return tmp; + } + +private: + solist_iterator( node_ptr pnode ) : my_node_ptr(pnode) {} + + node_ptr get_node_ptr() const { return my_node_ptr; } + + node_ptr my_node_ptr; +}; + +template +bool operator==( const solist_iterator& i, const solist_iterator& j ) { + return i.my_node_ptr == j.my_node_ptr; +} + +template +bool operator!=( const solist_iterator& i, const solist_iterator& j ) { + return i.my_node_ptr != j.my_node_ptr; +} + +template +class list_node { +public: + using node_ptr = list_node*; + using sokey_type = SokeyType; + + list_node(sokey_type key) : my_next(nullptr), my_order_key(key) {} + + void init( sokey_type key ) { + my_order_key = key; + } + + sokey_type order_key() const { + return my_order_key; + } + + bool is_dummy() { + // The last bit of order key is unset for dummy nodes + return (my_order_key & 0x1) == 0; + } + + node_ptr next() const { + return my_next.load(std::memory_order_acquire); + } + + void set_next( node_ptr next_node ) { + my_next.store(next_node, std::memory_order_release); + } + + bool try_set_next( node_ptr expected_next, node_ptr new_next ) { + return my_next.compare_exchange_strong(expected_next, new_next); + } + +private: + std::atomic my_next; + sokey_type my_order_key; +}; // class list_node + +template +class value_node : public list_node +{ +public: + using base_type = list_node; + using sokey_type = typename base_type::sokey_type; + using value_type = ValueType; + + value_node( sokey_type ord_key ) : base_type(ord_key) {} + ~value_node() {} + value_type* storage() { + return &my_value; + } + + value_type& value() { + return *storage(); + } + +private: + union { + value_type my_value; + }; +}; // class value_node + +template +class concurrent_unordered_base { + using self_type = concurrent_unordered_base; + using traits_type = Traits; + using hash_compare_type = typename traits_type::hash_compare_type; + class unordered_segment_table; +public: + using value_type = typename traits_type::value_type; + using key_type = typename traits_type::key_type; + using allocator_type = typename traits_type::allocator_type; + +private: + using allocator_traits_type = tbb::detail::allocator_traits; + // TODO: check assert conditions for different C++ standards + static_assert(std::is_same::value, + "value_type of the container must be the same as its allocator"); + using sokey_type = std::size_t; + +public: + using size_type = std::size_t; + using difference_type = std::ptrdiff_t; + + using iterator = solist_iterator; + using const_iterator = solist_iterator; + using local_iterator = iterator; + using const_local_iterator = const_iterator; + + using reference = value_type&; + using const_reference = const value_type&; + using pointer = typename allocator_traits_type::pointer; + using const_pointer = typename allocator_traits_type::const_pointer; + + using hasher = typename hash_compare_type::hasher; + using key_equal = typename hash_compare_type::key_equal; + +private: + using list_node_type = list_node; + using value_node_type = value_node; + using node_ptr = list_node_type*; + using value_node_ptr = value_node_type*; + + using value_node_allocator_type = typename allocator_traits_type::template rebind_alloc; + using node_allocator_type = typename allocator_traits_type::template rebind_alloc; + + using node_allocator_traits = tbb::detail::allocator_traits; + using value_node_allocator_traits = tbb::detail::allocator_traits; + + static constexpr size_type round_up_to_power_of_two( size_type bucket_count ) { + return size_type(1) << size_type(tbb::detail::log2(uintptr_t(bucket_count == 0 ? 1 : bucket_count) * 2 - 1)); + } + + template + using is_transparent = dependent_bool, T>; +public: + using node_type = d1::node_handle; + + explicit concurrent_unordered_base( size_type bucket_count, const hasher& hash = hasher(), + const key_equal& equal = key_equal(), const allocator_type& alloc = allocator_type() ) + : my_size(0), + my_bucket_count(round_up_to_power_of_two(bucket_count)), + my_max_load_factor(float(initial_max_load_factor)), + my_hash_compare(hash, equal), + my_head(sokey_type(0)), + my_segments(alloc) {} + + concurrent_unordered_base() : concurrent_unordered_base(initial_bucket_count) {} + + concurrent_unordered_base( size_type bucket_count, const allocator_type& alloc ) + : concurrent_unordered_base(bucket_count, hasher(), key_equal(), alloc) {} + + concurrent_unordered_base( size_type bucket_count, const hasher& hash, const allocator_type& alloc ) + : concurrent_unordered_base(bucket_count, hash, key_equal(), alloc) {} + + explicit concurrent_unordered_base( const allocator_type& alloc ) + : concurrent_unordered_base(initial_bucket_count, hasher(), key_equal(), alloc) {} + + template + concurrent_unordered_base( InputIterator first, InputIterator last, + size_type bucket_count = initial_bucket_count, const hasher& hash = hasher(), + const key_equal& equal = key_equal(), const allocator_type& alloc = allocator_type() ) + : concurrent_unordered_base(bucket_count, hash, equal, alloc) + { + insert(first, last); + } + + template + concurrent_unordered_base( InputIterator first, InputIterator last, + size_type bucket_count, const allocator_type& alloc ) + : concurrent_unordered_base(first, last, bucket_count, hasher(), key_equal(), alloc) {} + + template + concurrent_unordered_base( InputIterator first, InputIterator last, + size_type bucket_count, const hasher& hash, const allocator_type& alloc ) + : concurrent_unordered_base(first, last, bucket_count, hash, key_equal(), alloc) {} + + concurrent_unordered_base( const concurrent_unordered_base& other ) + : my_size(other.my_size.load(std::memory_order_relaxed)), + my_bucket_count(other.my_bucket_count.load(std::memory_order_relaxed)), + my_max_load_factor(other.my_max_load_factor), + my_hash_compare(other.my_hash_compare), + my_head(other.my_head.order_key()), + my_segments(other.my_segments) + { + try_call( [&] { + internal_copy(other); + } ).on_exception( [&] { + clear(); + }); + } + + concurrent_unordered_base( const concurrent_unordered_base& other, const allocator_type& alloc ) + : my_size(other.my_size.load(std::memory_order_relaxed)), + my_bucket_count(other.my_bucket_count.load(std::memory_order_relaxed)), + my_max_load_factor(other.my_max_load_factor), + my_hash_compare(other.my_hash_compare), + my_head(other.my_head.order_key()), + my_segments(other.my_segments, alloc) + { + try_call( [&] { + internal_copy(other); + } ).on_exception( [&] { + clear(); + }); + } + + concurrent_unordered_base( concurrent_unordered_base&& other ) + : my_size(other.my_size.load(std::memory_order_relaxed)), + my_bucket_count(other.my_bucket_count.load(std::memory_order_relaxed)), + my_max_load_factor(std::move(other.my_max_load_factor)), + my_hash_compare(std::move(other.my_hash_compare)), + my_head(other.my_head.order_key()), + my_segments(std::move(other.my_segments)) + { + move_content(std::move(other)); + } + + concurrent_unordered_base( concurrent_unordered_base&& other, const allocator_type& alloc ) + : my_size(other.my_size.load(std::memory_order_relaxed)), + my_bucket_count(other.my_bucket_count.load(std::memory_order_relaxed)), + my_max_load_factor(std::move(other.my_max_load_factor)), + my_hash_compare(std::move(other.my_hash_compare)), + my_head(other.my_head.order_key()), + my_segments(std::move(other.my_segments), alloc) + { + using is_always_equal = typename allocator_traits_type::is_always_equal; + internal_move_construct_with_allocator(std::move(other), alloc, is_always_equal()); + } + + concurrent_unordered_base( std::initializer_list init, + size_type bucket_count = initial_bucket_count, + const hasher& hash = hasher(), const key_equal& equal = key_equal(), + const allocator_type& alloc = allocator_type() ) + : concurrent_unordered_base(init.begin(), init.end(), bucket_count, hash, equal, alloc) {} + + concurrent_unordered_base( std::initializer_list init, + size_type bucket_count, const allocator_type& alloc ) + : concurrent_unordered_base(init, bucket_count, hasher(), key_equal(), alloc) {} + + concurrent_unordered_base( std::initializer_list init, + size_type bucket_count, const hasher& hash, const allocator_type& alloc ) + : concurrent_unordered_base(init, bucket_count, hash, key_equal(), alloc) {} + + ~concurrent_unordered_base() { + internal_clear(); + } + + concurrent_unordered_base& operator=( const concurrent_unordered_base& other ) { + if (this != &other) { + clear(); + my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed); + my_bucket_count.store(other.my_bucket_count.load(std::memory_order_relaxed), std::memory_order_relaxed); + my_max_load_factor = other.my_max_load_factor; + my_hash_compare = other.my_hash_compare; + my_segments = other.my_segments; + internal_copy(other); // TODO: guards for exceptions? + } + return *this; + } + + concurrent_unordered_base& operator=( concurrent_unordered_base&& other ) noexcept(unordered_segment_table::is_noexcept_assignment) { + if (this != &other) { + clear(); + my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed); + my_bucket_count.store(other.my_bucket_count.load(std::memory_order_relaxed), std::memory_order_relaxed); + my_max_load_factor = std::move(other.my_max_load_factor); + my_hash_compare = std::move(other.my_hash_compare); + my_segments = std::move(other.my_segments); + + using pocma_type = typename allocator_traits_type::propagate_on_container_move_assignment; + using is_always_equal = typename allocator_traits_type::is_always_equal; + internal_move_assign(std::move(other), tbb::detail::disjunction()); + } + return *this; + } + + concurrent_unordered_base& operator=( std::initializer_list init ) { + clear(); + insert(init); + return *this; + } + + void swap( concurrent_unordered_base& other ) noexcept(unordered_segment_table::is_noexcept_swap) { + if (this != &other) { + using pocs_type = typename allocator_traits_type::propagate_on_container_swap; + using is_always_equal = typename allocator_traits_type::is_always_equal; + internal_swap(other, tbb::detail::disjunction()); + } + } + + allocator_type get_allocator() const noexcept { return my_segments.get_allocator(); } + + iterator begin() noexcept { return iterator(first_value_node(&my_head)); } + const_iterator begin() const noexcept { return const_iterator(first_value_node(const_cast(&my_head))); } + const_iterator cbegin() const noexcept { return const_iterator(first_value_node(const_cast(&my_head))); } + + iterator end() noexcept { return iterator(nullptr); } + const_iterator end() const noexcept { return const_iterator(nullptr); } + const_iterator cend() const noexcept { return const_iterator(nullptr); } + + __TBB_nodiscard bool empty() const noexcept { return size() == 0; } + size_type size() const noexcept { return my_size.load(std::memory_order_relaxed); } + size_type max_size() const noexcept { return allocator_traits_type::max_size(get_allocator()); } + + void clear() noexcept { + internal_clear(); + } + + std::pair insert( const value_type& value ) { + return internal_insert_value(value); + } + + std::pair insert( value_type&& value ) { + return internal_insert_value(std::move(value)); + } + + iterator insert( const_iterator, const value_type& value ) { + // Ignore hint + return insert(value).first; + } + + iterator insert( const_iterator, value_type&& value ) { + // Ignore hint + return insert(std::move(value)).first; + } + + template + void insert( InputIterator first, InputIterator last ) { + for (; first != last; ++first) { + insert(*first); + } + } + + void insert( std::initializer_list init ) { + insert(init.begin(), init.end()); + } + + std::pair insert( node_type&& nh ) { + if (!nh.empty()) { + value_node_ptr insert_node = d1::node_handle_accessor::get_node_ptr(nh); + auto init_node = [&insert_node]( sokey_type order_key )->value_node_ptr { + insert_node->init(order_key); + return insert_node; + }; + auto insert_result = internal_insert(insert_node->value(), init_node); + if (insert_result.inserted) { + // If the insertion succeeded - set node handle to the empty state + __TBB_ASSERT(insert_result.remaining_node == nullptr, + "internal_insert_node should not return the remaining node if the insertion succeeded"); + d1::node_handle_accessor::deactivate(nh); + } + return { iterator(insert_result.node_with_equal_key), insert_result.inserted }; + } + return {end(), false}; + } + + iterator insert( const_iterator, node_type&& nh ) { + // Ignore hint + return insert(std::move(nh)).first; + } + + template + std::pair emplace( Args&&... args ) { + // Create a node with temporary order_key 0, which will be reinitialize + // in internal_insert after the hash calculation + value_node_ptr insert_node = create_node(0, std::forward(args)...); + + auto init_node = [&insert_node]( sokey_type order_key )->value_node_ptr { + insert_node->init(order_key); + return insert_node; + }; + + auto insert_result = internal_insert(insert_node->value(), init_node); + + if (!insert_result.inserted) { + // If the insertion failed - destroy the node which was created + insert_node->init(split_order_key_regular(1)); + destroy_node(insert_node); + } + + return { iterator(insert_result.node_with_equal_key), insert_result.inserted }; + } + + template + iterator emplace_hint( const_iterator, Args&&... args ) { + // Ignore hint + return emplace(std::forward(args)...).first; + } + + iterator unsafe_erase( const_iterator pos ) { + return iterator(first_value_node(internal_erase(pos.get_node_ptr()))); + } + + iterator unsafe_erase( iterator pos ) { + return iterator(first_value_node(internal_erase(pos.get_node_ptr()))); + } + + iterator unsafe_erase( const_iterator first, const_iterator last ) { + while(first != last) { + first = unsafe_erase(first); + } + return iterator(first.get_node_ptr()); + } + + size_type unsafe_erase( const key_type& key ) { + return internal_erase_by_key(key); + } + + template + typename std::enable_if::value + && !std::is_convertible::value + && !std::is_convertible::value, + size_type>::type unsafe_erase( const K& key ) + { + return internal_erase_by_key(key); + } + + node_type unsafe_extract( const_iterator pos ) { + internal_extract(pos.get_node_ptr()); + return d1::node_handle_accessor::construct(pos.get_node_ptr()); + } + + node_type unsafe_extract( iterator pos ) { + internal_extract(pos.get_node_ptr()); + return d1::node_handle_accessor::construct(pos.get_node_ptr()); + } + + node_type unsafe_extract( const key_type& key ) { + iterator item = find(key); + return item == end() ? node_type() : unsafe_extract(item); + } + + template + typename std::enable_if::value + && !std::is_convertible::value + && !std::is_convertible::value, + node_type>::type unsafe_extract( const K& key ) + { + iterator item = find(key); + return item == end() ? node_type() : unsafe_extract(item); + } + + // Lookup functions + iterator find( const key_type& key ) { + value_node_ptr result = internal_find(key); + return result == nullptr ? end() : iterator(result); + } + + const_iterator find( const key_type& key ) const { + value_node_ptr result = const_cast(this)->internal_find(key); + return result == nullptr ? end() : const_iterator(result); + } + + template + typename std::enable_if::value, iterator>::type find( const K& key ) { + value_node_ptr result = internal_find(key); + return result == nullptr ? end() : iterator(result); + } + + template + typename std::enable_if::value, const_iterator>::type find( const K& key ) const { + value_node_ptr result = const_cast(this)->internal_find(key); + return result == nullptr ? end() : const_iterator(result); + } + + std::pair equal_range( const key_type& key ) { + auto result = internal_equal_range(key); + return std::make_pair(iterator(result.first), iterator(result.second)); + } + + std::pair equal_range( const key_type& key ) const { + auto result = const_cast(this)->internal_equal_range(key); + return std::make_pair(const_iterator(result.first), const_iterator(result.second)); + } + + template + typename std::enable_if::value, std::pair>::type equal_range( const K& key ) { + auto result = internal_equal_range(key); + return std::make_pair(iterator(result.first), iterator(result.second)); + } + + template + typename std::enable_if::value, std::pair>::type equal_range( const K& key ) const { + auto result = const_cast(this)->internal_equal_range(key); + return std::make_pair(iterator(result.first), iterator(result.second)); + } + + size_type count( const key_type& key ) const { + return internal_count(key); + } + + template + typename std::enable_if::value, size_type>::type count( const K& key ) const { + return internal_count(key); + } + + bool contains( const key_type& key ) const { + return find(key) != end(); + } + + template + typename std::enable_if::value, bool>::type contains( const K& key ) const { + return find(key) != end(); + } + + // Bucket interface + local_iterator unsafe_begin( size_type n ) { + return local_iterator(first_value_node(get_bucket(n))); + } + + const_local_iterator unsafe_begin( size_type n ) const { + auto bucket_begin = first_value_node(const_cast(this)->get_bucket(n)); + return const_local_iterator(bucket_begin); + } + + const_local_iterator unsafe_cbegin( size_type n ) const { + auto bucket_begin = first_value_node(const_cast(this)->get_bucket(n)); + return const_local_iterator(bucket_begin); + } + + local_iterator unsafe_end( size_type n ) { + size_type bucket_count = my_bucket_count.load(std::memory_order_relaxed); + return n != bucket_count - 1 ? unsafe_begin(get_next_bucket_index(n)) : local_iterator(nullptr); + } + + const_local_iterator unsafe_end( size_type n ) const { + size_type bucket_count = my_bucket_count.load(std::memory_order_relaxed); + return n != bucket_count - 1 ? unsafe_begin(get_next_bucket_index(n)) : const_local_iterator(nullptr); + } + + const_local_iterator unsafe_cend( size_type n ) const { + size_type bucket_count = my_bucket_count.load(std::memory_order_relaxed); + return n != bucket_count - 1 ? unsafe_begin(get_next_bucket_index(n)) : const_local_iterator(nullptr); + } + + size_type unsafe_bucket_count() const { return my_bucket_count.load(std::memory_order_relaxed); } + + size_type unsafe_max_bucket_count() const { + return max_size(); + } + + size_type unsafe_bucket_size( size_type n ) const { + return size_type(std::distance(unsafe_begin(n), unsafe_end(n))); + } + + size_type unsafe_bucket( const key_type& key ) const { + return my_hash_compare(key) % my_bucket_count.load(std::memory_order_relaxed); + } + + // Hash policy + float load_factor() const { + return float(size() / float(my_bucket_count.load(std::memory_order_acquire))); + } + + float max_load_factor() const { return my_max_load_factor; } + + void max_load_factor( float mlf ) { + if (mlf != mlf || mlf < 0) { + tbb::detail::throw_exception(exception_id::invalid_load_factor); + } + my_max_load_factor = mlf; + } // TODO: unsafe? + + void rehash( size_type bucket_count ) { + size_type current_bucket_count = my_bucket_count.load(std::memory_order_acquire); + if (current_bucket_count < bucket_count) { + // TODO: do we need do-while here? + my_bucket_count.compare_exchange_strong(current_bucket_count, round_up_to_power_of_two(bucket_count)); + } + } + + void reserve( size_type elements_count ) { + size_type current_bucket_count = my_bucket_count.load(std::memory_order_acquire); + size_type necessary_bucket_count = current_bucket_count; + + // max_load_factor() is currently unsafe, so we can assume that my_max_load_factor + // would not be changed during the calculation + // TODO: Log2 seems useful here + while (necessary_bucket_count * max_load_factor() < elements_count) { + necessary_bucket_count <<= 1; + } + + while (!my_bucket_count.compare_exchange_strong(current_bucket_count, necessary_bucket_count)) { + if (current_bucket_count >= necessary_bucket_count) + break; + } + } + + // Observers + hasher hash_function() const { return my_hash_compare.hash_function(); } + key_equal key_eq() const { return my_hash_compare.key_eq(); } + + class const_range_type { + private: + const concurrent_unordered_base& my_instance; + node_ptr my_begin_node; // may be node* const + node_ptr my_end_node; + mutable node_ptr my_midpoint_node; + public: + using size_type = typename concurrent_unordered_base::size_type; + using value_type = typename concurrent_unordered_base::value_type; + using reference = typename concurrent_unordered_base::reference; + using difference_type = typename concurrent_unordered_base::difference_type; + using iterator = typename concurrent_unordered_base::const_iterator; + + bool empty() const { return my_begin_node == my_end_node; } + + bool is_divisible() const { + return my_midpoint_node != my_end_node; + } + + size_type grainsize() const { return 1; } + + const_range_type( const_range_type& range, split ) + : my_instance(range.my_instance), + my_begin_node(range.my_midpoint_node), + my_end_node(range.my_end_node) + { + range.my_end_node = my_begin_node; + __TBB_ASSERT(!empty(), "Splitting despite the range is not divisible"); + __TBB_ASSERT(!range.empty(), "Splitting despite the range is not divisible"); + set_midpoint(); + range.set_midpoint(); + } + + iterator begin() const { return iterator(my_instance.first_value_node(my_begin_node)); } + iterator end() const { return iterator(my_instance.first_value_node(my_end_node)); } + + const_range_type( const concurrent_unordered_base& table ) + : my_instance(table), my_begin_node(my_instance.first_value_node(const_cast(&table.my_head))), my_end_node(nullptr) + { + set_midpoint(); + } + private: + void set_midpoint() const { + if (empty()) { + my_midpoint_node = my_end_node; + } else { + sokey_type invalid_key = ~sokey_type(0); + sokey_type begin_key = my_begin_node != nullptr ? my_begin_node->order_key() : invalid_key; + sokey_type end_key = my_end_node != nullptr ? my_end_node->order_key() : invalid_key; + + size_type mid_bucket = reverse_bits(begin_key + (end_key - begin_key) / 2) % + my_instance.my_bucket_count.load(std::memory_order_relaxed); + while( my_instance.my_segments[mid_bucket].load(std::memory_order_relaxed) == nullptr) { + mid_bucket = my_instance.get_parent(mid_bucket); + } + if (reverse_bits(mid_bucket) > begin_key) { + // Found a dummy node between begin and end + my_midpoint_node = my_instance.first_value_node( + my_instance.my_segments[mid_bucket].load(std::memory_order_relaxed)); + } else { + // Didn't find a dummy node between begin and end + my_midpoint_node = my_end_node; + } + } + } + }; // class const_range_type + + class range_type : public const_range_type { + public: + using iterator = typename concurrent_unordered_base::iterator; + using const_range_type::const_range_type; + + iterator begin() const { return iterator(const_range_type::begin().get_node_ptr()); } + iterator end() const { return iterator(const_range_type::end().get_node_ptr()); } + }; // class range_type + + // Parallel iteration + range_type range() { + return range_type(*this); + } + + const_range_type range() const { + return const_range_type(*this); + } +protected: + static constexpr bool allow_multimapping = traits_type::allow_multimapping; + +private: + static constexpr size_type initial_bucket_count = 8; + static constexpr float initial_max_load_factor = 4; // TODO: consider 1? + static constexpr size_type pointers_per_embedded_table = sizeof(size_type) * 8 - 1; + + class unordered_segment_table + : public d1::segment_table, allocator_type, unordered_segment_table, pointers_per_embedded_table> + { + using self_type = unordered_segment_table; + using atomic_node_ptr = std::atomic; + using base_type = d1::segment_table, allocator_type, unordered_segment_table, pointers_per_embedded_table>; + using segment_type = typename base_type::segment_type; + using base_allocator_type = typename base_type::allocator_type; + + using segment_allocator_type = typename allocator_traits_type::template rebind_alloc; + using segment_allocator_traits = tbb::detail::allocator_traits; + public: + // Segment table for unordered containers should not be extended in the wait- free implementation + static constexpr bool allow_table_extending = false; + static constexpr bool is_noexcept_assignment = std::is_nothrow_move_assignable::value && + std::is_nothrow_move_assignable::value && + segment_allocator_traits::is_always_equal::value; + static constexpr bool is_noexcept_swap = tbb::detail::is_nothrow_swappable::value && + tbb::detail::is_nothrow_swappable::value && + segment_allocator_traits::is_always_equal::value; + + // TODO: using base_type::base_type is not compiling on Windows and Intel Compiler - investigate + unordered_segment_table( const base_allocator_type& alloc = base_allocator_type() ) + : base_type(alloc) {} + + unordered_segment_table( const unordered_segment_table& ) = default; + + unordered_segment_table( const unordered_segment_table& other, const base_allocator_type& alloc ) + : base_type(other, alloc) {} + + unordered_segment_table( unordered_segment_table&& ) = default; + + unordered_segment_table( unordered_segment_table&& other, const base_allocator_type& alloc ) + : base_type(std::move(other), alloc) {} + + unordered_segment_table& operator=( const unordered_segment_table& ) = default; + + unordered_segment_table& operator=( unordered_segment_table&& ) = default; + + segment_type create_segment( typename base_type::segment_table_type, typename base_type::segment_index_type segment_index, size_type ) { + segment_allocator_type alloc(this->get_allocator()); + size_type seg_size = this->segment_size(segment_index); + segment_type new_segment = segment_allocator_traits::allocate(alloc, seg_size); + for (size_type i = 0; i != seg_size; ++i) { + segment_allocator_traits::construct(alloc, new_segment + i, nullptr); + } + return new_segment; + } + + segment_type nullify_segment( typename base_type::segment_table_type table, size_type segment_index ) { + segment_type target_segment = table[segment_index].load(std::memory_order_relaxed); + table[segment_index].store(nullptr, std::memory_order_relaxed); + return target_segment; + } + + // deallocate_segment is required by the segment_table base class, but + // in unordered, it is also necessary to call the destructor during deallocation + void deallocate_segment( segment_type address, size_type index ) { + destroy_segment(address, index); + } + + void destroy_segment( segment_type address, size_type index ) { + segment_allocator_type alloc(this->get_allocator()); + for (size_type i = 0; i != this->segment_size(index); ++i) { + segment_allocator_traits::destroy(alloc, address + i); + } + segment_allocator_traits::deallocate(alloc, address, this->segment_size(index)); + } + + + void copy_segment( size_type index, segment_type, segment_type to ) { + if (index == 0) { + // The first element in the first segment is embedded into the table (my_head) + // so the first pointer should not be stored here + // It would be stored during move ctor/assignment operation + to[1].store(nullptr, std::memory_order_relaxed); + } else { + for (size_type i = 0; i != this->segment_size(index); ++i) { + to[i].store(nullptr, std::memory_order_relaxed); + } + } + } + + void move_segment( size_type index, segment_type from, segment_type to ) { + if (index == 0) { + // The first element in the first segment is embedded into the table (my_head) + // so the first pointer should not be stored here + // It would be stored during move ctor/assignment operation + to[1].store(from[1].load(std::memory_order_relaxed), std::memory_order_relaxed); + } else { + for (size_type i = 0; i != this->segment_size(index); ++i) { + to[i].store(from[i].load(std::memory_order_relaxed), std::memory_order_relaxed); + from[i].store(nullptr, std::memory_order_relaxed); + } + } + } + + // allocate_long_table is required by the segment_table base class, but unused for unordered containers + typename base_type::segment_table_type allocate_long_table( const typename base_type::atomic_segment*, size_type ) { + __TBB_ASSERT(false, "This method should never been called"); + // TableType is a pointer + return nullptr; + } + + // destroy_elements is required by the segment_table base class, but unused for unordered containers + // this function call but do nothing + void destroy_elements() {} + }; // struct unordered_segment_table + + void internal_clear() { + // TODO: consider usefulness of two versions of clear() - with dummy nodes deallocation and without it + node_ptr next = my_head.next(); + node_ptr curr = next; + + my_head.set_next(nullptr); + + while (curr != nullptr) { + next = curr->next(); + destroy_node(curr); + curr = next; + } + + my_size.store(0, std::memory_order_relaxed); + my_segments.clear(); + } + + void destroy_node( node_ptr node ) { + if (node->is_dummy()) { + node_allocator_type dummy_node_allocator(my_segments.get_allocator()); + // Destroy the node + node_allocator_traits::destroy(dummy_node_allocator, node); + // Deallocate the memory + node_allocator_traits::deallocate(dummy_node_allocator, node, 1); + } else { + // GCC 11.1 issues a warning here that incorrect destructor might be called for dummy_nodes + #if (__TBB_GCC_VERSION >= 110100 && __TBB_GCC_VERSION < 150000 ) && !__clang__ && !__INTEL_COMPILER + volatile + #endif + value_node_ptr val_node = static_cast(node); + value_node_allocator_type value_node_allocator(my_segments.get_allocator()); + // Destroy the value + value_node_allocator_traits::destroy(value_node_allocator, val_node->storage()); + // Destroy the node + value_node_allocator_traits::destroy(value_node_allocator, val_node); + // Deallocate the memory + value_node_allocator_traits::deallocate(value_node_allocator, val_node, 1); + } + } + + struct internal_insert_return_type { + // If the insertion failed - the remaining_node points to the node, which was failed to insert + // This node can be allocated in process of insertion + value_node_ptr remaining_node; + // If the insertion failed - node_with_equal_key points to the node in the list with the + // key, equivalent to the inserted, otherwise it points to the node, which was inserted. + value_node_ptr node_with_equal_key; + // Insertion status + // NOTE: if it is true - remaining_node should be nullptr + bool inserted; + }; // struct internal_insert_return_type + + // Inserts the value into the split ordered list + template + std::pair internal_insert_value( ValueType&& value ) { + + auto create_value_node = [&value, this]( sokey_type order_key )->value_node_ptr { + return create_node(order_key, std::forward(value)); + }; + + auto insert_result = internal_insert(value, create_value_node); + + if (insert_result.remaining_node != nullptr) { + // If the insertion fails - destroy the node which was failed to insert if it exist + __TBB_ASSERT(!insert_result.inserted, + "remaining_node should be nullptr if the node was successfully inserted"); + destroy_node(insert_result.remaining_node); + } + + return { iterator(insert_result.node_with_equal_key), insert_result.inserted }; + } + + // Inserts the node into the split ordered list + // Creates a node using the specified callback after the place for insertion was found + // Returns internal_insert_return_type object, where: + // - If the insertion succeeded: + // - remaining_node is nullptr + // - node_with_equal_key point to the inserted node + // - inserted is true + // - If the insertion failed: + // - remaining_node points to the node, that was failed to insert if it was created. + // nullptr if the node was not created, because the requested key was already + // presented in the list + // - node_with_equal_key point to the element in the list with the key, equivalent to + // to the requested key + // - inserted is false + template + internal_insert_return_type internal_insert( ValueType&& value, CreateInsertNode create_insert_node ) { + static_assert(std::is_same::type, value_type>::value, + "Incorrect type in internal_insert"); + const key_type& key = traits_type::get_key(value); + sokey_type hash_key = sokey_type(my_hash_compare(key)); + + sokey_type order_key = split_order_key_regular(hash_key); + node_ptr prev = prepare_bucket(hash_key); + __TBB_ASSERT(prev != nullptr, "Invalid head node"); + + auto search_result = search_after(prev, order_key, key); + + if (search_result.second) { + return internal_insert_return_type{ nullptr, search_result.first, false }; + } + + value_node_ptr new_node = create_insert_node(order_key); + node_ptr curr = search_result.first; + + while (!try_insert(prev, new_node, curr)) { + search_result = search_after(prev, order_key, key); + if (search_result.second) { + return internal_insert_return_type{ new_node, search_result.first, false }; + } + curr = search_result.first; + } + + auto sz = my_size.fetch_add(1); + adjust_table_size(sz + 1, my_bucket_count.load(std::memory_order_acquire)); + return internal_insert_return_type{ nullptr, static_cast(new_node), true }; + } + + // Searches the node with the key, equivalent to key with requested order key after the node prev + // Returns the existing node and true if the node is already in the list + // Returns the first node with the order key, greater than requested and false if the node is not presented in the list + std::pair search_after( node_ptr& prev, sokey_type order_key, const key_type& key ) { + // NOTE: static_cast(curr) should be done only after we would ensure + // that the node is not a dummy node + + node_ptr curr = prev->next(); + + while (curr != nullptr && (curr->order_key() < order_key || + (curr->order_key() == order_key && !my_hash_compare(traits_type::get_key(static_cast(curr)->value()), key)))) + { + prev = curr; + curr = curr->next(); + } + + if (curr != nullptr && curr->order_key() == order_key && !allow_multimapping) { + return { static_cast(curr), true }; + } + return { static_cast(curr), false }; + } + + void adjust_table_size( size_type total_elements, size_type current_size ) { + // Grow the table by a factor of 2 if possible and needed + if ( (float(total_elements) / float(current_size)) > my_max_load_factor ) { + // Double the size of the hash only if size hash not changed in between loads + my_bucket_count.compare_exchange_strong(current_size, 2u * current_size); + } + } + + node_ptr insert_dummy_node( node_ptr parent_dummy_node, sokey_type order_key ) { + node_ptr prev_node = parent_dummy_node; + + node_ptr dummy_node = create_dummy_node(order_key); + node_ptr next_node; + + do { + next_node = prev_node->next(); + // Move forward through the list while the order key is less than requested + while (next_node != nullptr && next_node->order_key() < order_key) { + prev_node = next_node; + next_node = next_node->next(); + } + + if (next_node != nullptr && next_node->order_key() == order_key) { + // Another dummy node with the same order key was inserted by another thread + // Destroy the node and exit + destroy_node(dummy_node); + return next_node; + } + } while (!try_insert(prev_node, dummy_node, next_node)); + + return dummy_node; + } + + // Try to insert a node between prev_node and expected next + // If the next is not equal to expected next - return false + static bool try_insert( node_ptr prev_node, node_ptr new_node, node_ptr current_next_node ) { + new_node->set_next(current_next_node); + return prev_node->try_set_next(current_next_node, new_node); + } + + // Returns the bucket, associated with the hash_key + node_ptr prepare_bucket( sokey_type hash_key ) { + size_type bucket = hash_key % my_bucket_count.load(std::memory_order_acquire); + return get_bucket(bucket); + } + + // Initialize the corresponding bucket if it is not initialized + node_ptr get_bucket( size_type bucket_index ) { + if (my_segments[bucket_index].load(std::memory_order_acquire) == nullptr) { + init_bucket(bucket_index); + } + return my_segments[bucket_index].load(std::memory_order_acquire); + } + + void init_bucket( size_type bucket ) { + if (bucket == 0) { + // Atomicaly store the first bucket into my_head + node_ptr disabled = nullptr; + my_segments[0].compare_exchange_strong(disabled, &my_head); + return; + } + + size_type parent_bucket = get_parent(bucket); + + while (my_segments[parent_bucket].load(std::memory_order_acquire) == nullptr) { + // Initialize all of the parent buckets + init_bucket(parent_bucket); + } + + __TBB_ASSERT(my_segments[parent_bucket].load(std::memory_order_acquire) != nullptr, "Parent bucket should be initialized"); + node_ptr parent = my_segments[parent_bucket].load(std::memory_order_acquire); + + // Insert dummy node into the list + node_ptr dummy_node = insert_dummy_node(parent, split_order_key_dummy(bucket)); + // TODO: consider returning pair to avoid store operation if the bucket was stored by an other thread + // or move store to insert_dummy_node + // Add dummy_node into the segment table + my_segments[bucket].store(dummy_node, std::memory_order_release); + } + + node_ptr create_dummy_node( sokey_type order_key ) { + node_allocator_type dummy_node_allocator(my_segments.get_allocator()); + node_ptr dummy_node = node_allocator_traits::allocate(dummy_node_allocator, 1); + node_allocator_traits::construct(dummy_node_allocator, dummy_node, order_key); + return dummy_node; + } + + template + value_node_ptr create_node( sokey_type order_key, Args&&... args ) { + value_node_allocator_type value_node_allocator(my_segments.get_allocator()); + // Allocate memory for the value_node + value_node_ptr new_node = value_node_allocator_traits::allocate(value_node_allocator, 1); + // Construct the node + value_node_allocator_traits::construct(value_node_allocator, new_node, order_key); + + // try_call API is not convenient here due to broken + // variadic capture on GCC 4.8.5 + auto value_guard = make_raii_guard([&] { + value_node_allocator_traits::destroy(value_node_allocator, new_node); + value_node_allocator_traits::deallocate(value_node_allocator, new_node, 1); + }); + + // Construct the value in the node + value_node_allocator_traits::construct(value_node_allocator, new_node->storage(), std::forward(args)...); + value_guard.dismiss(); + return new_node; + } + + value_node_ptr first_value_node( node_ptr first_node ) const { + while (first_node != nullptr && first_node->is_dummy()) { + first_node = first_node->next(); + } + return static_cast(first_node); + } + + // Unsafe method, which removes the node from the list and returns the next node + node_ptr internal_erase( value_node_ptr node_to_erase ) { + __TBB_ASSERT(node_to_erase != nullptr, "Invalid iterator for erase"); + node_ptr next_node = node_to_erase->next(); + internal_extract(node_to_erase); + destroy_node(node_to_erase); + return next_node; + } + + template + size_type internal_erase_by_key( const K& key ) { + // TODO: consider reimplementation without equal_range - it is not effective to perform lookup over a bucket + // for each unsafe_erase call + auto eq_range = equal_range(key); + size_type erased_count = 0; + + for (auto it = eq_range.first; it != eq_range.second;) { + it = unsafe_erase(it); + ++erased_count; + } + return erased_count; + } + + // Unsafe method, which extracts the node from the list + void internal_extract( value_node_ptr node_to_extract ) { + const key_type& key = traits_type::get_key(node_to_extract->value()); + sokey_type hash_key = sokey_type(my_hash_compare(key)); + + node_ptr prev_node = prepare_bucket(hash_key); + + for (node_ptr node = prev_node->next(); node != nullptr; prev_node = node, node = node->next()) { + if (node == node_to_extract) { + unlink_node(prev_node, node, node_to_extract->next()); + my_size.store(my_size.load(std::memory_order_relaxed) - 1, std::memory_order_relaxed); + return; + } + __TBB_ASSERT(node->order_key() <= node_to_extract->order_key(), + "node, which is going to be extracted should be presented in the list"); + } + } + +protected: + template + void internal_merge( SourceType&& source ) { + static_assert(std::is_same::type::node_type>::value, + "Incompatible containers cannot be merged"); + + for (node_ptr source_prev = &source.my_head; source_prev->next() != nullptr;) { + if (!source_prev->next()->is_dummy()) { + value_node_ptr curr = static_cast(source_prev->next()); + // If the multimapping is allowed, or the key is not presented + // in the *this container - extract the node from the list + if (allow_multimapping || !contains(traits_type::get_key(curr->value()))) { + node_ptr next_node = curr->next(); + source.unlink_node(source_prev, curr, next_node); + + // Remember the old order key + sokey_type old_order_key = curr->order_key(); + + // Node handle with curr cannot be used directly in insert call, because + // the destructor of node_type will destroy curr + node_type curr_node = d1::node_handle_accessor::construct(curr); + + // If the insertion fails - return ownership of the node to the source + if (!insert(std::move(curr_node)).second) { + __TBB_ASSERT(!allow_multimapping, "Insertion should succeed for multicontainer"); + __TBB_ASSERT(source_prev->next() == next_node, + "Concurrent operations with the source container in merge are prohibited"); + + // Initialize the node with the old order key, because the order key + // can change during the insertion + curr->init(old_order_key); + __TBB_ASSERT(old_order_key >= source_prev->order_key() && + (next_node == nullptr || old_order_key <= next_node->order_key()), + "Wrong nodes order in the source container"); + // Merge is unsafe for source container, so the insertion back can be done without compare_exchange + curr->set_next(next_node); + source_prev->set_next(curr); + source_prev = curr; + d1::node_handle_accessor::deactivate(curr_node); + } else { + source.my_size.fetch_sub(1, std::memory_order_relaxed); + } + } else { + source_prev = curr; + } + } else { + source_prev = source_prev->next(); + } + } + } + +private: + // Unsafe method, which unlinks the node between prev and next + void unlink_node( node_ptr prev_node, node_ptr node_to_unlink, node_ptr next_node ) { + __TBB_ASSERT(prev_node->next() == node_to_unlink && + node_to_unlink->next() == next_node, + "erasing and extracting nodes from the containers are unsafe in concurrent mode"); + prev_node->set_next(next_node); + node_to_unlink->set_next(nullptr); + } + + template + value_node_ptr internal_find( const K& key ) { + sokey_type hash_key = sokey_type(my_hash_compare(key)); + sokey_type order_key = split_order_key_regular(hash_key); + + node_ptr curr = prepare_bucket(hash_key); + + while (curr != nullptr) { + if (curr->order_key() > order_key) { + // If the order key is greater than the requested order key, + // the element is not in the hash table + return nullptr; + } else if (curr->order_key() == order_key && + my_hash_compare(traits_type::get_key(static_cast(curr)->value()), key)) { + // The fact that order keys match does not mean that the element is found. + // Key function comparison has to be performed to check whether this is the + // right element. If not, keep searching while order key is the same. + return static_cast(curr); + } + curr = curr->next(); + } + + return nullptr; + } + + template + std::pair internal_equal_range( const K& key ) { + sokey_type hash_key = sokey_type(my_hash_compare(key)); + sokey_type order_key = split_order_key_regular(hash_key); + + node_ptr curr = prepare_bucket(hash_key); + + while (curr != nullptr) { + if (curr->order_key() > order_key) { + // If the order key is greater than the requested order key, + // the element is not in the hash table + return std::make_pair(nullptr, nullptr); + } else if (curr->order_key() == order_key && + my_hash_compare(traits_type::get_key(static_cast(curr)->value()), key)) { + value_node_ptr first = static_cast(curr); + node_ptr last = first; + do { + last = last->next(); + } while (allow_multimapping && last != nullptr && !last->is_dummy() && + my_hash_compare(traits_type::get_key(static_cast(last)->value()), key)); + return std::make_pair(first, first_value_node(last)); + } + curr = curr->next(); + } + return {nullptr, nullptr}; + } + + template + size_type internal_count( const K& key ) const { + if (allow_multimapping) { + // TODO: consider reimplementing the internal_equal_range with elements counting to avoid std::distance + auto eq_range = equal_range(key); + return std::distance(eq_range.first, eq_range.second); + } else { + return contains(key) ? 1 : 0; + } + } + + void internal_copy( const concurrent_unordered_base& other ) { + node_ptr last_node = &my_head; + my_segments[0].store(&my_head, std::memory_order_relaxed); + + for (node_ptr node = other.my_head.next(); node != nullptr; node = node->next()) { + node_ptr new_node; + if (!node->is_dummy()) { + // The node in the right table contains a value + new_node = create_node(node->order_key(), static_cast(node)->value()); + } else { + // The node in the right table is a dummy node + new_node = create_dummy_node(node->order_key()); + my_segments[reverse_bits(node->order_key())].store(new_node, std::memory_order_relaxed); + } + + last_node->set_next(new_node); + last_node = new_node; + } + } + + void internal_move( concurrent_unordered_base&& other ) { + node_ptr last_node = &my_head; + my_segments[0].store(&my_head, std::memory_order_relaxed); + + for (node_ptr node = other.my_head.next(); node != nullptr; node = node->next()) { + node_ptr new_node; + if (!node->is_dummy()) { + // The node in the right table contains a value + new_node = create_node(node->order_key(), std::move(static_cast(node)->value())); + } else { + // TODO: do we need to destroy a dummy node in the right container? + // The node in the right table is a dummy_node + new_node = create_dummy_node(node->order_key()); + my_segments[reverse_bits(node->order_key())].store(new_node, std::memory_order_relaxed); + } + + last_node->set_next(new_node); + last_node = new_node; + } + } + + void move_content( concurrent_unordered_base&& other ) { + // NOTE: allocators should be equal + my_head.set_next(other.my_head.next()); + other.my_head.set_next(nullptr); + my_segments[0].store(&my_head, std::memory_order_relaxed); + + other.my_bucket_count.store(initial_bucket_count, std::memory_order_relaxed); + other.my_max_load_factor = initial_max_load_factor; + other.my_size.store(0, std::memory_order_relaxed); + } + + void internal_move_construct_with_allocator( concurrent_unordered_base&& other, const allocator_type&, + /*is_always_equal = */std::true_type ) { + // Allocators are always equal - no need to compare for equality + move_content(std::move(other)); + } + + void internal_move_construct_with_allocator( concurrent_unordered_base&& other, const allocator_type& alloc, + /*is_always_equal = */std::false_type ) { + // Allocators are not always equal + if (alloc == other.my_segments.get_allocator()) { + move_content(std::move(other)); + } else { + try_call( [&] { + internal_move(std::move(other)); + } ).on_exception( [&] { + clear(); + }); + } + } + + // Move assigns the hash table to other is any instances of allocator_type are always equal + // or propagate_on_container_move_assignment is true + void internal_move_assign( concurrent_unordered_base&& other, /*is_always_equal || POCMA = */std::true_type ) { + move_content(std::move(other)); + } + + // Move assigns the hash table to other is any instances of allocator_type are not always equal + // and propagate_on_container_move_assignment is false + void internal_move_assign( concurrent_unordered_base&& other, /*is_always_equal || POCMA = */std::false_type ) { + if (my_segments.get_allocator() == other.my_segments.get_allocator()) { + move_content(std::move(other)); + } else { + // TODO: guards for exceptions + internal_move(std::move(other)); + } + } + + void internal_swap( concurrent_unordered_base& other, /*is_always_equal || POCS = */std::true_type ) { + internal_swap_fields(other); + } + + void internal_swap( concurrent_unordered_base& other, /*is_always_equal || POCS = */std::false_type ) { + __TBB_ASSERT(my_segments.get_allocator() == other.my_segments.get_allocator(), + "Swapping with unequal allocators is not allowed"); + internal_swap_fields(other); + } + + void internal_swap_fields( concurrent_unordered_base& other ) { + node_ptr first_node = my_head.next(); + my_head.set_next(other.my_head.next()); + other.my_head.set_next(first_node); + + size_type current_size = my_size.load(std::memory_order_relaxed); + my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed); + other.my_size.store(current_size, std::memory_order_relaxed); + + size_type bucket_count = my_bucket_count.load(std::memory_order_relaxed); + my_bucket_count.store(other.my_bucket_count.load(std::memory_order_relaxed), std::memory_order_relaxed); + other.my_bucket_count.store(bucket_count, std::memory_order_relaxed); + + using std::swap; + swap(my_max_load_factor, other.my_max_load_factor); + swap(my_hash_compare, other.my_hash_compare); + my_segments.swap(other.my_segments); + + // swap() method from segment table swaps all of the segments including the first segment + // We should restore it to my_head. Without it the first segment of the container will point + // to other.my_head. + my_segments[0].store(&my_head, std::memory_order_relaxed); + other.my_segments[0].store(&other.my_head, std::memory_order_relaxed); + } + + // A regular order key has its original hash value reversed and the last bit set + static constexpr sokey_type split_order_key_regular( sokey_type hash ) { + return reverse_bits(hash) | 0x1; + } + + // A dummy order key has its original hash value reversed and the last bit unset + static constexpr sokey_type split_order_key_dummy( sokey_type hash ) { + return reverse_bits(hash) & ~sokey_type(0x1); + } + + size_type get_parent( size_type bucket ) const { + // Unset bucket's most significant turned-on bit + __TBB_ASSERT(bucket != 0, "Unable to get_parent of the bucket 0"); + size_type msb = tbb::detail::log2(bucket); + return bucket & ~(size_type(1) << msb); + } + + size_type get_next_bucket_index( size_type bucket ) const { + size_type bits = tbb::detail::log2(my_bucket_count.load(std::memory_order_relaxed)); + size_type reversed_next = reverse_n_bits(bucket, bits) + 1; + return reverse_n_bits(reversed_next, bits); + } + + std::atomic my_size; + std::atomic my_bucket_count; + float my_max_load_factor; + hash_compare_type my_hash_compare; + + list_node_type my_head; // Head node for split ordered list + unordered_segment_table my_segments; // Segment table of pointers to nodes + + template + friend class solist_iterator; + + template + friend class concurrent_unordered_base; +}; // class concurrent_unordered_base + +template +bool operator==( const concurrent_unordered_base& lhs, + const concurrent_unordered_base& rhs ) { + if (&lhs == &rhs) { return true; } + if (lhs.size() != rhs.size()) { return false; } + +#if _MSC_VER + // Passing "unchecked" iterators to std::permutation with 3 parameters + // causes compiler warnings. + // The workaround is to use overload with 4 parameters, which is + // available since C++14 - minimally supported version on MSVC + return std::is_permutation(lhs.begin(), lhs.end(), rhs.begin(), rhs.end()); +#else + return std::is_permutation(lhs.begin(), lhs.end(), rhs.begin()); +#endif +} + +#if !__TBB_CPP20_COMPARISONS_PRESENT +template +bool operator!=( const concurrent_unordered_base& lhs, + const concurrent_unordered_base& rhs ) { + return !(lhs == rhs); +} +#endif + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) +// #pragma warning(pop) // warning 4127 is back +#endif + +} // namespace d2 +} // namespace detail +} // namespace tbb + +#endif // __TBB_detail__concurrent_unordered_base_H diff --git a/src/tbb/include/oneapi/tbb/detail/_config.h b/src/tbb/include/oneapi/tbb/detail/_config.h new file mode 100644 index 000000000..e676b1558 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_config.h @@ -0,0 +1,537 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_detail__config_H +#define __TBB_detail__config_H + +/** This header is supposed to contain macro definitions only. + The macros defined here are intended to control such aspects of TBB build as + - presence of compiler features + - compilation modes + - feature sets + - known compiler/platform issues +**/ + +/* Check which standard library we use. */ +#include + +#ifdef __has_include +#if __has_include() +#include +#endif +#endif + +#include "_export.h" + +#if _MSC_VER + #define __TBB_EXPORTED_FUNC __cdecl + #define __TBB_EXPORTED_METHOD __thiscall +#else + #define __TBB_EXPORTED_FUNC + #define __TBB_EXPORTED_METHOD +#endif + +#if defined(_MSVC_LANG) + #define __TBB_LANG _MSVC_LANG +#else + #define __TBB_LANG __cplusplus +#endif // _MSVC_LANG + +#define __TBB_CPP14_PRESENT (__TBB_LANG >= 201402L) +#define __TBB_CPP17_PRESENT (__TBB_LANG >= 201703L) +#define __TBB_CPP20_PRESENT (__TBB_LANG >= 202002L) + +#if __INTEL_COMPILER || _MSC_VER + #define __TBB_NOINLINE(decl) __declspec(noinline) decl +#elif __GNUC__ + #define __TBB_NOINLINE(decl) decl __attribute__ ((noinline)) +#else + #define __TBB_NOINLINE(decl) decl +#endif + +#define __TBB_STRING_AUX(x) #x +#define __TBB_STRING(x) __TBB_STRING_AUX(x) + +// Note that when ICC or Clang is in use, __TBB_GCC_VERSION might not fully match +// the actual GCC version on the system. +#define __TBB_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) + +/* Check which standard library we use. */ + +// Prior to GCC 7, GNU libstdc++ did not have a convenient version macro. +// Therefore we use different ways to detect its version. +#ifdef TBB_USE_GLIBCXX_VERSION + // The version is explicitly specified in our public TBB_USE_GLIBCXX_VERSION macro. + // Its format should match the __TBB_GCC_VERSION above, e.g. 70301 for libstdc++ coming with GCC 7.3.1. + #define __TBB_GLIBCXX_VERSION TBB_USE_GLIBCXX_VERSION +#elif _GLIBCXX_RELEASE && _GLIBCXX_RELEASE != __GNUC__ + // Reported versions of GCC and libstdc++ do not match; trust the latter + #define __TBB_GLIBCXX_VERSION (_GLIBCXX_RELEASE*10000) +#elif __GLIBCPP__ || __GLIBCXX__ + // The version macro is not defined or matches the GCC version; use __TBB_GCC_VERSION + #define __TBB_GLIBCXX_VERSION __TBB_GCC_VERSION +#endif + +#if __clang__ + // according to clang documentation, version can be vendor specific + #define __TBB_CLANG_VERSION (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) +#endif + +/** Macro helpers **/ + +#define __TBB_CONCAT_AUX(A,B) A##B +// The additional level of indirection is needed to expand macros A and B (not to get the AB macro). +// See [cpp.subst] and [cpp.concat] for more details. +#define __TBB_CONCAT(A,B) __TBB_CONCAT_AUX(A,B) +// The IGNORED argument and comma are needed to always have 2 arguments (even when A is empty). +#define __TBB_IS_MACRO_EMPTY(A,IGNORED) __TBB_CONCAT_AUX(__TBB_MACRO_EMPTY,A) +#define __TBB_MACRO_EMPTY 1 + +#if _M_X64 || _M_ARM64 + #define __TBB_W(name) name##64 +#else + #define __TBB_W(name) name +#endif + +/** User controlled TBB features & modes **/ + +#ifndef TBB_USE_DEBUG + /* + There are four cases that are supported: + 1. "_DEBUG is undefined" means "no debug"; + 2. "_DEBUG defined to something that is evaluated to 0" (including "garbage", as per [cpp.cond]) means "no debug"; + 3. "_DEBUG defined to something that is evaluated to a non-zero value" means "debug"; + 4. "_DEBUG defined to nothing (empty)" means "debug". + */ + #ifdef _DEBUG + // Check if _DEBUG is empty. + #define __TBB_IS__DEBUG_EMPTY (__TBB_IS_MACRO_EMPTY(_DEBUG,IGNORED)==__TBB_MACRO_EMPTY) + #if __TBB_IS__DEBUG_EMPTY + #define TBB_USE_DEBUG 1 + #else + #define TBB_USE_DEBUG _DEBUG + #endif // __TBB_IS__DEBUG_EMPTY + #else + #define TBB_USE_DEBUG 0 + #endif // _DEBUG +#endif // TBB_USE_DEBUG + +#ifndef TBB_USE_ASSERT + #define TBB_USE_ASSERT TBB_USE_DEBUG +#endif // TBB_USE_ASSERT + +#ifndef TBB_USE_PROFILING_TOOLS +#if TBB_USE_DEBUG + #define TBB_USE_PROFILING_TOOLS 2 +#else // TBB_USE_DEBUG + #define TBB_USE_PROFILING_TOOLS 0 +#endif // TBB_USE_DEBUG +#endif // TBB_USE_PROFILING_TOOLS + +// Exceptions support cases +#if !(__EXCEPTIONS || defined(_CPPUNWIND) || __SUNPRO_CC) + #if TBB_USE_EXCEPTIONS + #error Compilation settings do not support exception handling. Please do not set TBB_USE_EXCEPTIONS macro or set it to 0. + #elif !defined(TBB_USE_EXCEPTIONS) + #define TBB_USE_EXCEPTIONS 0 + #endif +#elif !defined(TBB_USE_EXCEPTIONS) + #define TBB_USE_EXCEPTIONS 1 +#endif + +/** Preprocessor symbols to determine HW architecture **/ + +#if _WIN32 || _WIN64 + #if defined(_M_X64) || defined(__x86_64__) // the latter for MinGW support + #define __TBB_x86_64 1 + #elif defined(_M_IA64) + #define __TBB_ipf 1 + #elif defined(_M_IX86) || defined(__i386__) // the latter for MinGW support + #define __TBB_x86_32 1 + #else + #define __TBB_generic_arch 1 + #endif +#else /* Assume generic Unix */ + #if __x86_64__ + #define __TBB_x86_64 1 + #elif __ia64__ + #define __TBB_ipf 1 + #elif __i386__||__i386 // __i386 is for Sun OS + #define __TBB_x86_32 1 + #else + #define __TBB_generic_arch 1 + #endif +#endif + +/** Windows API or POSIX API **/ + +#if _WIN32 || _WIN64 + #define __TBB_USE_WINAPI 1 +#else + #define __TBB_USE_POSIX 1 +#endif + +/** Internal TBB features & modes **/ + +/** __TBB_DYNAMIC_LOAD_ENABLED describes the system possibility to load shared libraries at run time **/ +#ifndef __TBB_DYNAMIC_LOAD_ENABLED + #define __TBB_DYNAMIC_LOAD_ENABLED (!__EMSCRIPTEN__) +#endif + +/** __TBB_WIN8UI_SUPPORT enables support of Windows* Store Apps and limit a possibility to load + shared libraries at run time only from application container **/ +#if defined(WINAPI_FAMILY) && WINAPI_FAMILY == WINAPI_FAMILY_APP + #define __TBB_WIN8UI_SUPPORT 1 +#else + #define __TBB_WIN8UI_SUPPORT 0 +#endif + +/** __TBB_WEAK_SYMBOLS_PRESENT denotes that the system supports the weak symbol mechanism **/ +#ifndef __TBB_WEAK_SYMBOLS_PRESENT + #define __TBB_WEAK_SYMBOLS_PRESENT ( !__EMSCRIPTEN__ && !_WIN32 && !__APPLE__ && !__sun && (__TBB_GCC_VERSION >= 40000 || __INTEL_COMPILER ) ) +#endif + +/** Presence of compiler features **/ + +#if __clang__ && !__INTEL_COMPILER + #define __TBB_USE_OPTIONAL_RTTI __has_feature(cxx_rtti) +#elif defined(_CPPRTTI) + #define __TBB_USE_OPTIONAL_RTTI 1 +#else + #define __TBB_USE_OPTIONAL_RTTI (__GXX_RTTI || __RTTI || __INTEL_RTTI__) +#endif + +/** Address sanitizer detection **/ +#ifdef __SANITIZE_ADDRESS__ + #define __TBB_USE_ADDRESS_SANITIZER 1 +#elif defined(__has_feature) +#if __has_feature(address_sanitizer) + #define __TBB_USE_ADDRESS_SANITIZER 1 +#endif +#endif + +/** Library features presence macros **/ + +#define __TBB_CPP14_INTEGER_SEQUENCE_PRESENT (__TBB_LANG >= 201402L) +#define __TBB_CPP17_INVOKE_PRESENT (__TBB_LANG >= 201703L) + +// TODO: Remove the condition(__INTEL_COMPILER > 2021) from the __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +// macro when this feature start working correctly on this compiler. +#if __INTEL_COMPILER && (!_MSC_VER || __INTEL_CXX11_MOVE__) + #define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (__TBB_LANG >= 201402L) + #define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (__INTEL_COMPILER > 2021 && __TBB_LANG >= 201703L) + #define __TBB_CPP20_CONCEPTS_PRESENT 0 // TODO: add a mechanism for future addition +#elif __clang__ + #define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (__has_feature(cxx_variable_templates)) + #define __TBB_CPP20_CONCEPTS_PRESENT 0 // TODO: add a mechanism for future addition + #ifdef __cpp_deduction_guides + #define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (__cpp_deduction_guides >= 201611L) + #else + #define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT 0 + #endif +#elif __GNUC__ + #define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (__TBB_LANG >= 201402L && __TBB_GCC_VERSION >= 50000) + #define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (__cpp_deduction_guides >= 201606L) + #define __TBB_CPP20_CONCEPTS_PRESENT (__TBB_LANG >= 201709L && __TBB_GCC_VERSION >= 100201) +#elif _MSC_VER + #define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (_MSC_FULL_VER >= 190023918 && (!__INTEL_COMPILER || __INTEL_COMPILER >= 1700)) + #define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (_MSC_VER >= 1914 && __TBB_LANG >= 201703L && (!__INTEL_COMPILER || __INTEL_COMPILER > 2021)) + #define __TBB_CPP20_CONCEPTS_PRESENT (_MSC_VER >= 1923 && __TBB_LANG >= 202002L) // TODO: INTEL_COMPILER? +#else + #define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (__TBB_LANG >= 201402L) + #define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (__TBB_LANG >= 201703L) + #define __TBB_CPP20_CONCEPTS_PRESENT (__TBB_LANG >= 202002L) +#endif + +// GCC4.8 on RHEL7 does not support std::get_new_handler +#define __TBB_CPP11_GET_NEW_HANDLER_PRESENT (_MSC_VER >= 1900 || __TBB_GLIBCXX_VERSION >= 40900 && __GXX_EXPERIMENTAL_CXX0X__ || _LIBCPP_VERSION) +// GCC4.8 on RHEL7 does not support std::is_trivially_copyable +#define __TBB_CPP11_TYPE_PROPERTIES_PRESENT (_LIBCPP_VERSION || _MSC_VER >= 1700 || (__TBB_GLIBCXX_VERSION >= 50000 && __GXX_EXPERIMENTAL_CXX0X__)) + +#define __TBB_CPP17_MEMORY_RESOURCE_PRESENT (_MSC_VER >= 1913 && (__TBB_LANG > 201402L) || \ + __TBB_GLIBCXX_VERSION >= 90000 && __TBB_LANG >= 201703L) +#define __TBB_CPP17_HW_INTERFERENCE_SIZE_PRESENT (_MSC_VER >= 1911) +#define __TBB_CPP17_LOGICAL_OPERATIONS_PRESENT (__TBB_LANG >= 201703L) +#define __TBB_CPP17_ALLOCATOR_IS_ALWAYS_EQUAL_PRESENT (__TBB_LANG >= 201703L) +#define __TBB_CPP17_IS_SWAPPABLE_PRESENT (__TBB_LANG >= 201703L) + +#if defined(__cpp_impl_three_way_comparison) && defined(__cpp_lib_three_way_comparison) + #define __TBB_CPP20_COMPARISONS_PRESENT ((__cpp_impl_three_way_comparison >= 201907L) && (__cpp_lib_three_way_comparison >= 201907L)) +#else + #define __TBB_CPP20_COMPARISONS_PRESENT 0 +#endif + +#define __TBB_RESUMABLE_TASKS (!__TBB_WIN8UI_SUPPORT && !__ANDROID__ && !__QNXNTO__ && (!__linux__ || __GLIBC__)) + +/* This macro marks incomplete code or comments describing ideas which are considered for the future. + * See also for plain comment with TODO and FIXME marks for small improvement opportunities. + */ +#define __TBB_TODO 0 + +/* Check which standard library we use. */ +/* __TBB_SYMBOL is defined only while processing exported symbols list where C++ is not allowed. */ +#if !defined(__TBB_SYMBOL) && !__TBB_CONFIG_PREPROC_ONLY + #include +#endif + +/** Target OS is either iOS* or iOS* simulator **/ +#if __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ + #define __TBB_IOS 1 +#endif + +#if __APPLE__ + #if __INTEL_COMPILER && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ > 1099 \ + && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101000 + // ICC does not correctly set the macro if -mmacosx-min-version is not specified + #define __TBB_MACOS_TARGET_VERSION (100000 + 10*(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ - 1000)) + #else + #define __TBB_MACOS_TARGET_VERSION __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ + #endif +#endif + +#if defined(__GNUC__) && !defined(__INTEL_COMPILER) + #define __TBB_GCC_WARNING_IGNORED_ATTRIBUTES_PRESENT (__TBB_GCC_VERSION >= 60100) +#endif + +#if __GNUC__ && !__INTEL_COMPILER && !__clang__ + #define __TBB_GCC_PARAMETER_PACK_IN_LAMBDAS_BROKEN (__TBB_GCC_VERSION <= 40805) +#endif + +#define __TBB_CPP17_FALLTHROUGH_PRESENT (__TBB_LANG >= 201703L) +#define __TBB_CPP17_NODISCARD_PRESENT (__TBB_LANG >= 201703L) +#define __TBB_FALLTHROUGH_PRESENT (__TBB_GCC_VERSION >= 70000 && !__INTEL_COMPILER) + +#if __TBB_CPP17_FALLTHROUGH_PRESENT + #define __TBB_fallthrough [[fallthrough]] +#elif __TBB_FALLTHROUGH_PRESENT + #define __TBB_fallthrough __attribute__ ((fallthrough)) +#else + #define __TBB_fallthrough +#endif + +#if __TBB_CPP17_NODISCARD_PRESENT + #define __TBB_nodiscard [[nodiscard]] +#elif __clang__ || __GNUC__ + #define __TBB_nodiscard __attribute__((warn_unused_result)) +#else + #define __TBB_nodiscard +#endif + +#define __TBB_CPP17_UNCAUGHT_EXCEPTIONS_PRESENT (_MSC_VER >= 1900 || __GLIBCXX__ && __cpp_lib_uncaught_exceptions \ + || _LIBCPP_VERSION >= 3700 && (!__TBB_MACOS_TARGET_VERSION || __TBB_MACOS_TARGET_VERSION >= 101200)) + +#define __TBB_TSX_INTRINSICS_PRESENT (__RTM__ || __INTEL_COMPILER || (_MSC_VER>=1700 && (__TBB_x86_64 || __TBB_x86_32))) + +#define __TBB_WAITPKG_INTRINSICS_PRESENT ((__INTEL_COMPILER >= 1900 || (__TBB_GCC_VERSION >= 110000 && __TBB_GNU_ASM_VERSION >= 2032) || __TBB_CLANG_VERSION >= 120000) \ + && (_WIN32 || _WIN64 || __unix__ || __APPLE__) && (__TBB_x86_32 || __TBB_x86_64) && !__ANDROID__) + +/** Internal TBB features & modes **/ + +/** __TBB_SOURCE_DIRECTLY_INCLUDED is a mode used in whitebox testing when + it's necessary to test internal functions not exported from TBB DLLs +**/ +#if (_WIN32||_WIN64) && (__TBB_SOURCE_DIRECTLY_INCLUDED || TBB_USE_PREVIEW_BINARY) + #define __TBB_NO_IMPLICIT_LINKAGE 1 + #define __TBBMALLOC_NO_IMPLICIT_LINKAGE 1 +#endif + +#if (__TBB_BUILD || __TBBMALLOC_BUILD || __TBBMALLOCPROXY_BUILD || __TBBBIND_BUILD) && !defined(__TBB_NO_IMPLICIT_LINKAGE) + #define __TBB_NO_IMPLICIT_LINKAGE 1 +#endif + +#if _MSC_VER + #if !__TBB_NO_IMPLICIT_LINKAGE + #ifdef _DEBUG + #pragma comment(lib, "tbb12_debug.lib") + #else + #pragma comment(lib, "tbb12.lib") + #endif + #endif +#endif + +#ifndef __TBB_SCHEDULER_OBSERVER + #define __TBB_SCHEDULER_OBSERVER 1 +#endif /* __TBB_SCHEDULER_OBSERVER */ + +#ifndef __TBB_FP_CONTEXT + #define __TBB_FP_CONTEXT 1 +#endif /* __TBB_FP_CONTEXT */ + +#define __TBB_RECYCLE_TO_ENQUEUE __TBB_BUILD // keep non-official + +#ifndef __TBB_ARENA_OBSERVER + #define __TBB_ARENA_OBSERVER __TBB_SCHEDULER_OBSERVER +#endif /* __TBB_ARENA_OBSERVER */ + +#ifndef __TBB_ARENA_BINDING + #define __TBB_ARENA_BINDING 1 +#endif + +// Thread pinning is not available on macOS* +#define __TBB_CPUBIND_PRESENT (__TBB_ARENA_BINDING && !__APPLE__) + +#ifndef __TBB_ENQUEUE_ENFORCED_CONCURRENCY + #define __TBB_ENQUEUE_ENFORCED_CONCURRENCY 1 +#endif + +#if !defined(__TBB_SURVIVE_THREAD_SWITCH) && \ + (_WIN32 || _WIN64 || __APPLE__ || (defined(__unix__) && !__ANDROID__)) + #define __TBB_SURVIVE_THREAD_SWITCH 1 +#endif /* __TBB_SURVIVE_THREAD_SWITCH */ + +#ifndef TBB_PREVIEW_FLOW_GRAPH_FEATURES + #define TBB_PREVIEW_FLOW_GRAPH_FEATURES __TBB_CPF_BUILD +#endif + +#ifndef __TBB_DEFAULT_PARTITIONER + #define __TBB_DEFAULT_PARTITIONER tbb::auto_partitioner +#endif + +#ifndef __TBB_FLOW_TRACE_CODEPTR + #define __TBB_FLOW_TRACE_CODEPTR __TBB_CPF_BUILD +#endif + +// Intel(R) C++ Compiler starts analyzing usages of the deprecated content at the template +// instantiation site, which is too late for suppression of the corresponding messages for internal +// stuff. +#if !defined(__INTEL_COMPILER) && (!defined(TBB_SUPPRESS_DEPRECATED_MESSAGES) || (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0)) + #if (__TBB_LANG >= 201402L && (!defined(_MSC_VER) || _MSC_VER >= 1920)) + #define __TBB_DEPRECATED [[deprecated]] + #define __TBB_DEPRECATED_MSG(msg) [[deprecated(msg)]] + #elif _MSC_VER + #define __TBB_DEPRECATED __declspec(deprecated) + #define __TBB_DEPRECATED_MSG(msg) __declspec(deprecated(msg)) + #elif (__GNUC__ && __TBB_GCC_VERSION >= 40805) || __clang__ + #define __TBB_DEPRECATED __attribute__((deprecated)) + #define __TBB_DEPRECATED_MSG(msg) __attribute__((deprecated(msg))) + #endif +#endif // !defined(TBB_SUPPRESS_DEPRECATED_MESSAGES) || (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0) + +#if !defined(__TBB_DEPRECATED) + #define __TBB_DEPRECATED + #define __TBB_DEPRECATED_MSG(msg) +#elif !defined(__TBB_SUPPRESS_INTERNAL_DEPRECATED_MESSAGES) + // Suppress deprecated messages from self + #define __TBB_SUPPRESS_INTERNAL_DEPRECATED_MESSAGES 1 +#endif + +#if defined(TBB_SUPPRESS_DEPRECATED_MESSAGES) && (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0) + #define __TBB_DEPRECATED_VERBOSE __TBB_DEPRECATED + #define __TBB_DEPRECATED_VERBOSE_MSG(msg) __TBB_DEPRECATED_MSG(msg) +#else + #define __TBB_DEPRECATED_VERBOSE + #define __TBB_DEPRECATED_VERBOSE_MSG(msg) +#endif // (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0) + +#if (!defined(TBB_SUPPRESS_DEPRECATED_MESSAGES) || (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0)) && !(__TBB_LANG >= 201103L || _MSC_VER >= 1900) + #pragma message("TBB Warning: Support for C++98/03 is deprecated. Please use the compiler that supports C++11 features at least.") +#endif + +#ifdef _VARIADIC_MAX + #define __TBB_VARIADIC_MAX _VARIADIC_MAX +#else + #if _MSC_VER == 1700 + #define __TBB_VARIADIC_MAX 5 // VS11 setting, issue resolved in VS12 + #elif _MSC_VER == 1600 + #define __TBB_VARIADIC_MAX 10 // VS10 setting + #else + #define __TBB_VARIADIC_MAX 15 + #endif +#endif + +#if __SANITIZE_THREAD__ + #define __TBB_USE_THREAD_SANITIZER 1 +#elif defined(__has_feature) +#if __has_feature(thread_sanitizer) + #define __TBB_USE_THREAD_SANITIZER 1 +#endif +#endif + +#ifndef __TBB_USE_SANITIZERS +#define __TBB_USE_SANITIZERS (__TBB_USE_THREAD_SANITIZER || __TBB_USE_ADDRESS_SANITIZER) +#endif + +#ifndef __TBB_RESUMABLE_TASKS_USE_THREADS +#define __TBB_RESUMABLE_TASKS_USE_THREADS __TBB_USE_SANITIZERS +#endif + +#ifndef __TBB_USE_CONSTRAINTS +#define __TBB_USE_CONSTRAINTS 1 +#endif + +#ifndef __TBB_STRICT_CONSTRAINTS +#define __TBB_STRICT_CONSTRAINTS 1 +#endif + +#if __TBB_CPP20_CONCEPTS_PRESENT && __TBB_USE_CONSTRAINTS + #define __TBB_requires(...) requires __VA_ARGS__ +#else // __TBB_CPP20_CONCEPTS_PRESENT + #define __TBB_requires(...) +#endif // __TBB_CPP20_CONCEPTS_PRESENT + +/** Macros of the form __TBB_XXX_BROKEN denote known issues that are caused by + the bugs in compilers, standard or OS specific libraries. They should be + removed as soon as the corresponding bugs are fixed or the buggy OS/compiler + versions go out of the support list. +**/ + +// Some STL containers not support allocator traits in old GCC versions +#if __GXX_EXPERIMENTAL_CXX0X__ && __TBB_GLIBCXX_VERSION <= 50301 + #define TBB_ALLOCATOR_TRAITS_BROKEN 1 +#endif + +// GCC 4.8 C++ standard library implements std::this_thread::yield as no-op. +#if __TBB_GLIBCXX_VERSION >= 40800 && __TBB_GLIBCXX_VERSION < 40900 + #define __TBB_GLIBCXX_THIS_THREAD_YIELD_BROKEN 1 +#endif + +/** End of __TBB_XXX_BROKEN macro section **/ + +#if defined(_MSC_VER) && _MSC_VER>=1500 && !defined(__INTEL_COMPILER) + // A macro to suppress erroneous or benign "unreachable code" MSVC warning (4702) + #define __TBB_MSVC_UNREACHABLE_CODE_IGNORED 1 +#endif + +// Many OS versions (Android 4.0.[0-3] for example) need workaround for dlopen to avoid non-recursive loader lock hang +// Setting the workaround for all compile targets ($APP_PLATFORM) below Android 4.4 (android-19) +#if __ANDROID__ + #include +#endif + +#define __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING (TBB_PREVIEW_FLOW_GRAPH_FEATURES) + +#ifndef __TBB_PREVIEW_CRITICAL_TASKS +#define __TBB_PREVIEW_CRITICAL_TASKS 1 +#endif + +#ifndef __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +#define __TBB_PREVIEW_FLOW_GRAPH_NODE_SET (TBB_PREVIEW_FLOW_GRAPH_FEATURES) +#endif + +#ifndef __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT +#define __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT (TBB_PREVIEW_FLOW_GRAPH_FEATURES \ + || TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT) +#endif + +#if TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS +#define __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS 1 +#endif + +#if TBB_PREVIEW_TASK_GROUP_EXTENSIONS || __TBB_BUILD +#define __TBB_PREVIEW_TASK_GROUP_EXTENSIONS 1 +#endif + +#endif // __TBB_detail__config_H diff --git a/src/tbb/include/oneapi/tbb/detail/_containers_helpers.h b/src/tbb/include/oneapi/tbb/detail/_containers_helpers.h new file mode 100644 index 000000000..4dca07fa1 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_containers_helpers.h @@ -0,0 +1,67 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_detail__containers_helpers_H +#define __TBB_detail__containers_helpers_H + +#include "_template_helpers.h" +#include "_allocator_traits.h" +#include +#include +#include + +namespace tbb { +namespace detail { +inline namespace d0 { + +template +struct comp_is_transparent : std::false_type {}; + +template +struct comp_is_transparent> : std::true_type {}; + +template +struct has_transparent_key_equal : std::false_type { using type = KeyEqual; }; + +template +struct has_transparent_key_equal> : std::true_type { + using type = typename Hasher::transparent_key_equal; + static_assert(comp_is_transparent::value, "Hash::transparent_key_equal::is_transparent is not valid or does not denote a type."); + static_assert((std::is_same>::value || + std::is_same::value), "KeyEqual is a different type than equal_to or Hash::transparent_key_equal."); + }; + +struct is_iterator_impl { +template +using iter_traits_category = typename std::iterator_traits::iterator_category; + +template +using input_iter_category = typename std::enable_if>::value>::type; +}; // struct is_iterator_impl + +template +using is_input_iterator = supports; + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +template +inline constexpr bool is_input_iterator_v = is_input_iterator::value; +#endif + +} // inline namespace d0 +} // namespace detail +} // namespace tbb + +#endif // __TBB_detail__containers_helpers_H diff --git a/src/tbb/include/oneapi/tbb/detail/_exception.h b/src/tbb/include/oneapi/tbb/detail/_exception.h new file mode 100644 index 000000000..d1a8b18c4 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_exception.h @@ -0,0 +1,88 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__exception_H +#define __TBB__exception_H + +#include "_config.h" + +#include // std::bad_alloc +#include // std::exception +#include // std::runtime_error + +namespace tbb { +namespace detail { +inline namespace d0 { +enum class exception_id { + bad_alloc = 1, + bad_last_alloc, + user_abort, + nonpositive_step, + out_of_range, + reservation_length_error, + missing_wait, + invalid_load_factor, + invalid_key, + bad_tagged_msg_cast, + unsafe_wait, + last_entry +}; +} // namespace d0 + +#if _MSC_VER + // #pragma warning(disable: 4275) +#endif + +namespace r1 { +//! Exception for concurrent containers +class TBB_EXPORT bad_last_alloc : public std::bad_alloc { +public: + const char* __TBB_EXPORTED_METHOD what() const noexcept(true) override; +}; + +//! Exception for user-initiated abort +class TBB_EXPORT user_abort : public std::exception { +public: + const char* __TBB_EXPORTED_METHOD what() const noexcept(true) override; +}; + +//! Exception for missing wait on structured_task_group +class TBB_EXPORT missing_wait : public std::exception { +public: + const char* __TBB_EXPORTED_METHOD what() const noexcept(true) override; +}; + +//! Exception for impossible finalization of task_sheduler_handle +class TBB_EXPORT unsafe_wait : public std::runtime_error { +public: + unsafe_wait(const char* msg) : std::runtime_error(msg) {} +}; + +//! Gathers all throw operators in one place. +/** Its purpose is to minimize code bloat that can be caused by throw operators + scattered in multiple places, especially in templates. **/ +TBB_EXPORT void __TBB_EXPORTED_FUNC throw_exception ( exception_id ); +} // namespace r1 + +inline namespace d0 { +using r1::throw_exception; +} // namespace d0 + +} // namespace detail +} // namespace tbb + +#endif // __TBB__exception_H + diff --git a/src/tbb/include/oneapi/tbb/detail/_export.h b/src/tbb/include/oneapi/tbb/detail/_export.h new file mode 100644 index 000000000..4c015223b --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_export.h @@ -0,0 +1,46 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_detail__export_H +#define __TBB_detail__export_H + +#if defined(__MINGW32__) + #define _EXPORT __declspec(dllexport) +#elif defined(_WIN32) || defined(__unix__) || defined(__APPLE__) // Use .def files for these + #define _EXPORT +#else + #error "Unknown platform/compiler" +#endif + +#if __TBB_BUILD + #define TBB_EXPORT _EXPORT +#else + #define TBB_EXPORT +#endif + +#if __TBBMALLOC_BUILD + #define TBBMALLOC_EXPORT _EXPORT +#else + #define TBBMALLOC_EXPORT +#endif + +#if __TBBBIND_BUILD + #define TBBBIND_EXPORT _EXPORT +#else + #define TBBBIND_EXPORT +#endif + +#endif diff --git a/src/tbb/include/oneapi/tbb/detail/_flow_graph_body_impl.h b/src/tbb/include/oneapi/tbb/detail/_flow_graph_body_impl.h new file mode 100644 index 000000000..21da06ce0 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_flow_graph_body_impl.h @@ -0,0 +1,429 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__flow_graph_body_impl_H +#define __TBB__flow_graph_body_impl_H + +#ifndef __TBB_flow_graph_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +// included in namespace tbb::detail::d2 (in flow_graph.h) + +typedef std::uint64_t tag_value; + + +// TODO revamp: find out if there is already helper for has_policy. +template struct Policy {}; + +template struct has_policy; + +template +struct has_policy : + std::integral_constant::value || + has_policy::value> {}; + +template +struct has_policy : + std::integral_constant::value> {}; + +template +struct has_policy > : has_policy {}; + +namespace graph_policy_namespace { + + struct rejecting { }; + struct reserving { }; + struct queueing { }; + struct lightweight { }; + + // K == type of field used for key-matching. Each tag-matching port will be provided + // functor that, given an object accepted by the port, will return the + /// field of type K being used for matching. + template::type > > + __TBB_requires(tbb::detail::hash_compare) + struct key_matching { + typedef K key_type; + typedef typename std::decay::type base_key_type; + typedef KHash hash_compare_type; + }; + + // old tag_matching join's new specifier + typedef key_matching tag_matching; + + // Aliases for Policy combinations + typedef Policy queueing_lightweight; + typedef Policy rejecting_lightweight; + +} // namespace graph_policy_namespace + +// -------------- function_body containers ---------------------- + +//! A functor that takes no input and generates a value of type Output +template< typename Output > +class input_body : no_assign { +public: + virtual ~input_body() {} + virtual Output operator()(d1::flow_control& fc) = 0; + virtual input_body* clone() = 0; +}; + +//! The leaf for input_body +template< typename Output, typename Body> +class input_body_leaf : public input_body { +public: + input_body_leaf( const Body &_body ) : body(_body) { } + Output operator()(d1::flow_control& fc) override { return body(fc); } + input_body_leaf* clone() override { + return new input_body_leaf< Output, Body >(body); + } + Body get_body() { return body; } +private: + Body body; +}; + +//! A functor that takes an Input and generates an Output +template< typename Input, typename Output > +class function_body : no_assign { +public: + virtual ~function_body() {} + virtual Output operator()(const Input &input) = 0; + virtual function_body* clone() = 0; +}; + +//! the leaf for function_body +template +class function_body_leaf : public function_body< Input, Output > { +public: + function_body_leaf( const B &_body ) : body(_body) { } + Output operator()(const Input &i) override { return tbb::detail::invoke(body,i); } + B get_body() { return body; } + function_body_leaf* clone() override { + return new function_body_leaf< Input, Output, B >(body); + } +private: + B body; +}; + +//! the leaf for function_body specialized for Input and output of continue_msg +template +class function_body_leaf< continue_msg, continue_msg, B> : public function_body< continue_msg, continue_msg > { +public: + function_body_leaf( const B &_body ) : body(_body) { } + continue_msg operator()( const continue_msg &i ) override { + body(i); + return i; + } + B get_body() { return body; } + function_body_leaf* clone() override { + return new function_body_leaf< continue_msg, continue_msg, B >(body); + } +private: + B body; +}; + +//! the leaf for function_body specialized for Output of continue_msg +template +class function_body_leaf< Input, continue_msg, B> : public function_body< Input, continue_msg > { +public: + function_body_leaf( const B &_body ) : body(_body) { } + continue_msg operator()(const Input &i) override { + body(i); + return continue_msg(); + } + B get_body() { return body; } + function_body_leaf* clone() override { + return new function_body_leaf< Input, continue_msg, B >(body); + } +private: + B body; +}; + +//! the leaf for function_body specialized for Input of continue_msg +template +class function_body_leaf< continue_msg, Output, B > : public function_body< continue_msg, Output > { +public: + function_body_leaf( const B &_body ) : body(_body) { } + Output operator()(const continue_msg &i) override { + return body(i); + } + B get_body() { return body; } + function_body_leaf* clone() override { + return new function_body_leaf< continue_msg, Output, B >(body); + } +private: + B body; +}; + +//! function_body that takes an Input and a set of output ports +template +class multifunction_body : no_assign { +public: + virtual ~multifunction_body () {} + virtual void operator()(const Input &/* input*/, OutputSet &/*oset*/) = 0; + virtual multifunction_body* clone() = 0; + virtual void* get_body_ptr() = 0; +}; + +//! leaf for multifunction. OutputSet can be a std::tuple or a vector. +template +class multifunction_body_leaf : public multifunction_body { +public: + multifunction_body_leaf(const B &_body) : body(_body) { } + void operator()(const Input &input, OutputSet &oset) override { + tbb::detail::invoke(body, input, oset); // body may explicitly put() to one or more of oset. + } + void* get_body_ptr() override { return &body; } + multifunction_body_leaf* clone() override { + return new multifunction_body_leaf(body); + } + +private: + B body; +}; + +// ------ function bodies for hash_buffers and key-matching joins. + +template +class type_to_key_function_body : no_assign { + public: + virtual ~type_to_key_function_body() {} + virtual Output operator()(const Input &input) = 0; // returns an Output + virtual type_to_key_function_body* clone() = 0; +}; + +// specialization for ref output +template +class type_to_key_function_body : no_assign { + public: + virtual ~type_to_key_function_body() {} + virtual const Output & operator()(const Input &input) = 0; // returns a const Output& + virtual type_to_key_function_body* clone() = 0; +}; + +template +class type_to_key_function_body_leaf : public type_to_key_function_body { +public: + type_to_key_function_body_leaf( const B &_body ) : body(_body) { } + Output operator()(const Input &i) override { return tbb::detail::invoke(body, i); } + type_to_key_function_body_leaf* clone() override { + return new type_to_key_function_body_leaf< Input, Output, B>(body); + } +private: + B body; +}; + +template +class type_to_key_function_body_leaf : public type_to_key_function_body< Input, Output&> { +public: + type_to_key_function_body_leaf( const B &_body ) : body(_body) { } + const Output& operator()(const Input &i) override { + return tbb::detail::invoke(body, i); + } + type_to_key_function_body_leaf* clone() override { + return new type_to_key_function_body_leaf< Input, Output&, B>(body); + } +private: + B body; +}; + +// --------------------------- end of function_body containers ------------------------ + +// --------------------------- node task bodies --------------------------------------- + +//! A task that calls a node's forward_task function +template< typename NodeType > +class forward_task_bypass : public graph_task { + NodeType &my_node; +public: + forward_task_bypass( graph& g, d1::small_object_allocator& allocator, NodeType &n + , node_priority_t node_priority = no_priority + ) : graph_task(g, allocator, node_priority), + my_node(n) {} + + d1::task* execute(d1::execution_data& ed) override { + graph_task* next_task = my_node.forward_task(); + if (SUCCESSFULLY_ENQUEUED == next_task) + next_task = nullptr; + else if (next_task) + next_task = prioritize_task(my_node.graph_reference(), *next_task); + finalize(ed); + return next_task; + } + + d1::task* cancel(d1::execution_data& ed) override { + finalize(ed); + return nullptr; + } +}; + +//! A task that calls a node's apply_body_bypass function, passing in an input of type Input +// return the task* unless it is SUCCESSFULLY_ENQUEUED, in which case return nullptr +template< typename NodeType, typename Input, typename BaseTaskType = graph_task> +class apply_body_task_bypass + : public BaseTaskType +{ + NodeType &my_node; + Input my_input; + + using check_metainfo = std::is_same; + using without_metainfo = std::true_type; + using with_metainfo = std::false_type; + + graph_task* call_apply_body_bypass_impl(without_metainfo) { + return my_node.apply_body_bypass(my_input + __TBB_FLOW_GRAPH_METAINFO_ARG(message_metainfo{})); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + graph_task* call_apply_body_bypass_impl(with_metainfo) { + return my_node.apply_body_bypass(my_input, message_metainfo{this->get_msg_wait_context_vertices()}); + } +#endif + + graph_task* call_apply_body_bypass() { + return call_apply_body_bypass_impl(check_metainfo{}); + } + +public: +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + template + apply_body_task_bypass( graph& g, d1::small_object_allocator& allocator, NodeType &n, const Input &i, + node_priority_t node_priority, Metainfo&& metainfo ) + : BaseTaskType(g, allocator, node_priority, std::forward(metainfo).waiters()) + , my_node(n), my_input(i) {} +#endif + + apply_body_task_bypass( graph& g, d1::small_object_allocator& allocator, NodeType& n, const Input& i, + node_priority_t node_priority = no_priority ) + : BaseTaskType(g, allocator, node_priority), my_node(n), my_input(i) {} + + d1::task* execute(d1::execution_data& ed) override { + graph_task* next_task = call_apply_body_bypass(); + if (SUCCESSFULLY_ENQUEUED == next_task) + next_task = nullptr; + else if (next_task) + next_task = prioritize_task(my_node.graph_reference(), *next_task); + BaseTaskType::template finalize(ed); + return next_task; + } + + d1::task* cancel(d1::execution_data& ed) override { + BaseTaskType::template finalize(ed); + return nullptr; + } +}; + +//! A task that calls a node's apply_body_bypass function with no input +template< typename NodeType > +class input_node_task_bypass : public graph_task { + NodeType &my_node; +public: + input_node_task_bypass( graph& g, d1::small_object_allocator& allocator, NodeType &n ) + : graph_task(g, allocator), my_node(n) {} + + d1::task* execute(d1::execution_data& ed) override { + graph_task* next_task = my_node.apply_body_bypass( ); + if (SUCCESSFULLY_ENQUEUED == next_task) + next_task = nullptr; + else if (next_task) + next_task = prioritize_task(my_node.graph_reference(), *next_task); + finalize(ed); + return next_task; + } + + d1::task* cancel(d1::execution_data& ed) override { + finalize(ed); + return nullptr; + } +}; + +// ------------------------ end of node task bodies ----------------------------------- + +template +class threshold_regulator; + +template +class threshold_regulator::value>::type> + : public receiver, no_copy +{ + T* my_node; +protected: + + graph_task* try_put_task( const DecrementType& value ) override { + graph_task* result = my_node->decrement_counter( value ); + if( !result ) + result = SUCCESSFULLY_ENQUEUED; + return result; + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + // Intentionally ignore the metainformation + // If there are more items associated with passed metainfo to be processed + // They should be stored in the buffer before the limiter_node + graph_task* try_put_task(const DecrementType& value, const message_metainfo&) override { + return try_put_task(value); + } +#endif + + graph& graph_reference() const override { + return my_node->my_graph; + } + + template friend class limiter_node; + void reset_receiver( reset_flags ) {} + +public: + threshold_regulator(T* owner) : my_node(owner) { + // Do not work with the passed pointer here as it may not be fully initialized yet + } +}; + +template +class threshold_regulator : public continue_receiver, no_copy { + + T *my_node; + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + // Intentionally ignore the metainformation + // If there are more items associated with passed metainfo to be processed + // They should be stored in the buffer before the limiter_node + graph_task* execute(const message_metainfo&) override { +#else + graph_task* execute() override { +#endif + return my_node->decrement_counter( 1 ); + } + +protected: + + graph& graph_reference() const override { + return my_node->my_graph; + } + +public: + + typedef continue_msg input_type; + typedef continue_msg output_type; + threshold_regulator(T* owner) + : continue_receiver( /*number_of_predecessors=*/0, no_priority ), my_node(owner) + { + // Do not work with the passed pointer here as it may not be fully initialized yet + } +}; + +#endif // __TBB__flow_graph_body_impl_H diff --git a/src/tbb/include/oneapi/tbb/detail/_flow_graph_cache_impl.h b/src/tbb/include/oneapi/tbb/detail/_flow_graph_cache_impl.h new file mode 100644 index 000000000..647f3dc1b --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_flow_graph_cache_impl.h @@ -0,0 +1,501 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__flow_graph_cache_impl_H +#define __TBB__flow_graph_cache_impl_H + +#ifndef __TBB_flow_graph_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +// included in namespace tbb::detail::d2 (in flow_graph.h) + +//! A node_cache maintains a std::queue of elements of type T. Each operation is protected by a lock. +template< typename T, typename M=spin_mutex > +class node_cache { + public: + + typedef size_t size_type; + + bool empty() { + typename mutex_type::scoped_lock lock( my_mutex ); + return internal_empty(); + } + + void add( T &n ) { + typename mutex_type::scoped_lock lock( my_mutex ); + internal_push(n); + } + + void remove( T &n ) { + typename mutex_type::scoped_lock lock( my_mutex ); + for ( size_t i = internal_size(); i != 0; --i ) { + T &s = internal_pop(); + if ( &s == &n ) + break; // only remove one predecessor per request + internal_push(s); + } + } + + void clear() { + while( !my_q.empty()) (void)my_q.pop(); + } + +protected: + + typedef M mutex_type; + mutex_type my_mutex; + std::queue< T * > my_q; + + // Assumes lock is held + inline bool internal_empty( ) { + return my_q.empty(); + } + + // Assumes lock is held + inline size_type internal_size( ) { + return my_q.size(); + } + + // Assumes lock is held + inline void internal_push( T &n ) { + my_q.push(&n); + } + + // Assumes lock is held + inline T &internal_pop() { + T *v = my_q.front(); + my_q.pop(); + return *v; + } + +}; + +//! A cache of predecessors that only supports try_get +template< typename T, typename M=spin_mutex > +class predecessor_cache : public node_cache< sender, M > { +public: + typedef M mutex_type; + typedef T output_type; + typedef sender predecessor_type; + typedef receiver successor_type; + + predecessor_cache( successor_type* owner ) : my_owner( owner ) { + __TBB_ASSERT( my_owner, "predecessor_cache should have an owner." ); + // Do not work with the passed pointer here as it may not be fully initialized yet + } + +private: + bool get_item_impl( output_type& v + __TBB_FLOW_GRAPH_METAINFO_ARG(message_metainfo* metainfo_ptr = nullptr) ) + { + + bool successful_get = false; + + do { + predecessor_type *src; + { + typename mutex_type::scoped_lock lock(this->my_mutex); + if ( this->internal_empty() ) { + break; + } + src = &this->internal_pop(); + } + + // Try to get from this sender +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + if (metainfo_ptr) { + successful_get = src->try_get( v, *metainfo_ptr ); + } else +#endif + { + successful_get = src->try_get( v ); + } + + if (successful_get == false) { + // Relinquish ownership of the edge + register_successor(*src, *my_owner); + } else { + // Retain ownership of the edge + this->add(*src); + } + } while ( successful_get == false ); + return successful_get; + } +public: + bool get_item( output_type& v ) { + return get_item_impl(v); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + bool get_item( output_type& v, message_metainfo& metainfo ) { + return get_item_impl(v, &metainfo); + } +#endif + + // If we are removing arcs (rf_clear_edges), call clear() rather than reset(). + void reset() { + for(;;) { + predecessor_type *src; + { + if (this->internal_empty()) break; + src = &this->internal_pop(); + } + register_successor(*src, *my_owner); + } + } + +protected: + successor_type* my_owner; +}; + +//! An cache of predecessors that supports requests and reservations +template< typename T, typename M=spin_mutex > +class reservable_predecessor_cache : public predecessor_cache< T, M > { +public: + typedef M mutex_type; + typedef T output_type; + typedef sender predecessor_type; + typedef receiver successor_type; + + reservable_predecessor_cache( successor_type* owner ) + : predecessor_cache(owner), reserved_src(nullptr) + { + // Do not work with the passed pointer here as it may not be fully initialized yet + } + +private: + bool try_reserve_impl( output_type &v __TBB_FLOW_GRAPH_METAINFO_ARG(message_metainfo* metainfo) ) { + bool successful_reserve = false; + + do { + predecessor_type* pred = nullptr; + { + typename mutex_type::scoped_lock lock(this->my_mutex); + if ( reserved_src.load(std::memory_order_relaxed) || this->internal_empty() ) + return false; + + pred = &this->internal_pop(); + reserved_src.store(pred, std::memory_order_relaxed); + } + + // Try to get from this sender +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + if (metainfo) { + successful_reserve = pred->try_reserve( v, *metainfo ); + } else +#endif + { + successful_reserve = pred->try_reserve( v ); + } + + if (successful_reserve == false) { + typename mutex_type::scoped_lock lock(this->my_mutex); + // Relinquish ownership of the edge + register_successor( *pred, *this->my_owner ); + reserved_src.store(nullptr, std::memory_order_relaxed); + } else { + // Retain ownership of the edge + this->add( *pred); + } + } while ( successful_reserve == false ); + + return successful_reserve; + } +public: + bool try_reserve( output_type& v ) { + return try_reserve_impl(v __TBB_FLOW_GRAPH_METAINFO_ARG(nullptr)); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + bool try_reserve( output_type& v, message_metainfo& metainfo ) { + return try_reserve_impl(v, &metainfo); + } +#endif + + bool try_release() { + reserved_src.load(std::memory_order_relaxed)->try_release(); + reserved_src.store(nullptr, std::memory_order_relaxed); + return true; + } + + bool try_consume() { + reserved_src.load(std::memory_order_relaxed)->try_consume(); + reserved_src.store(nullptr, std::memory_order_relaxed); + return true; + } + + void reset() { + reserved_src.store(nullptr, std::memory_order_relaxed); + predecessor_cache::reset(); + } + + void clear() { + reserved_src.store(nullptr, std::memory_order_relaxed); + predecessor_cache::clear(); + } + +private: + std::atomic reserved_src; +}; + + +//! An abstract cache of successors +template +class successor_cache : no_copy { +protected: + + typedef M mutex_type; + mutex_type my_mutex; + + typedef receiver successor_type; + typedef receiver* pointer_type; + typedef sender owner_type; + // TODO revamp: introduce heapified collection of successors for strict priorities + typedef std::list< pointer_type > successors_type; + successors_type my_successors; + + owner_type* my_owner; + +public: + successor_cache( owner_type* owner ) : my_owner(owner) { + // Do not work with the passed pointer here as it may not be fully initialized yet + } + + virtual ~successor_cache() {} + + void register_successor( successor_type& r ) { + typename mutex_type::scoped_lock l(my_mutex, true); + if( r.priority() != no_priority ) + my_successors.push_front( &r ); + else + my_successors.push_back( &r ); + } + + void remove_successor( successor_type& r ) { + typename mutex_type::scoped_lock l(my_mutex, true); + for ( typename successors_type::iterator i = my_successors.begin(); + i != my_successors.end(); ++i ) { + if ( *i == & r ) { + my_successors.erase(i); + break; + } + } + } + + bool empty() { + typename mutex_type::scoped_lock l(my_mutex, false); + return my_successors.empty(); + } + + void clear() { + my_successors.clear(); + } + + virtual graph_task* try_put_task( const T& t ) = 0; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + virtual graph_task* try_put_task( const T& t, const message_metainfo& metainfo ) = 0; +#endif +}; // successor_cache + +//! An abstract cache of successors, specialized to continue_msg +template +class successor_cache< continue_msg, M > : no_copy { +protected: + + typedef M mutex_type; + mutex_type my_mutex; + + typedef receiver successor_type; + typedef receiver* pointer_type; + typedef sender owner_type; + typedef std::list< pointer_type > successors_type; + successors_type my_successors; + owner_type* my_owner; + +public: + successor_cache( sender* owner ) : my_owner(owner) { + // Do not work with the passed pointer here as it may not be fully initialized yet + } + + virtual ~successor_cache() {} + + void register_successor( successor_type& r ) { + typename mutex_type::scoped_lock l(my_mutex, true); + if( r.priority() != no_priority ) + my_successors.push_front( &r ); + else + my_successors.push_back( &r ); + __TBB_ASSERT( my_owner, "Cache of successors must have an owner." ); + if ( r.is_continue_receiver() ) { + r.register_predecessor( *my_owner ); + } + } + + void remove_successor( successor_type& r ) { + typename mutex_type::scoped_lock l(my_mutex, true); + for ( successors_type::iterator i = my_successors.begin(); i != my_successors.end(); ++i ) { + if ( *i == &r ) { + __TBB_ASSERT(my_owner, "Cache of successors must have an owner."); + // TODO: check if we need to test for continue_receiver before removing from r. + r.remove_predecessor( *my_owner ); + my_successors.erase(i); + break; + } + } + } + + bool empty() { + typename mutex_type::scoped_lock l(my_mutex, false); + return my_successors.empty(); + } + + void clear() { + my_successors.clear(); + } + + virtual graph_task* try_put_task( const continue_msg& t ) = 0; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + virtual graph_task* try_put_task( const continue_msg& t, const message_metainfo& metainfo ) = 0; +#endif +}; // successor_cache< continue_msg > + +//! A cache of successors that are broadcast to +template +class broadcast_cache : public successor_cache { + typedef successor_cache base_type; + typedef M mutex_type; + typedef typename successor_cache::successors_type successors_type; + + graph_task* try_put_task_impl( const T& t __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& metainfo) ) { + graph_task * last_task = nullptr; + typename mutex_type::scoped_lock l(this->my_mutex, /*write=*/true); + typename successors_type::iterator i = this->my_successors.begin(); + while ( i != this->my_successors.end() ) { + graph_task *new_task = (*i)->try_put_task(t __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + // workaround for icc bug + graph& graph_ref = (*i)->graph_reference(); + last_task = combine_tasks(graph_ref, last_task, new_task); // enqueue if necessary + if(new_task) { + ++i; + } + else { // failed + if ( (*i)->register_predecessor(*this->my_owner) ) { + i = this->my_successors.erase(i); + } else { + ++i; + } + } + } + return last_task; + } +public: + + broadcast_cache( typename base_type::owner_type* owner ): base_type(owner) { + // Do not work with the passed pointer here as it may not be fully initialized yet + } + + graph_task* try_put_task( const T &t ) override { + return try_put_task_impl(t __TBB_FLOW_GRAPH_METAINFO_ARG(message_metainfo{})); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + graph_task* try_put_task( const T &t, const message_metainfo& metainfo ) override { + return try_put_task_impl(t, metainfo); + } +#endif + + // call try_put_task and return list of received tasks + bool gather_successful_try_puts( const T &t, graph_task_list& tasks ) { + bool is_at_least_one_put_successful = false; + typename mutex_type::scoped_lock l(this->my_mutex, /*write=*/true); + typename successors_type::iterator i = this->my_successors.begin(); + while ( i != this->my_successors.end() ) { + graph_task * new_task = (*i)->try_put_task(t); + if(new_task) { + ++i; + if(new_task != SUCCESSFULLY_ENQUEUED) { + tasks.push_back(*new_task); + } + is_at_least_one_put_successful = true; + } + else { // failed + if ( (*i)->register_predecessor(*this->my_owner) ) { + i = this->my_successors.erase(i); + } else { + ++i; + } + } + } + return is_at_least_one_put_successful; + } +}; + +//! A cache of successors that are put in a round-robin fashion +template +class round_robin_cache : public successor_cache { + typedef successor_cache base_type; + typedef size_t size_type; + typedef M mutex_type; + typedef typename successor_cache::successors_type successors_type; + +public: + + round_robin_cache( typename base_type::owner_type* owner ): base_type(owner) { + // Do not work with the passed pointer here as it may not be fully initialized yet + } + + size_type size() { + typename mutex_type::scoped_lock l(this->my_mutex, false); + return this->my_successors.size(); + } + +private: + + graph_task* try_put_task_impl( const T &t + __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& metainfo) ) + { + typename mutex_type::scoped_lock l(this->my_mutex, /*write=*/true); + typename successors_type::iterator i = this->my_successors.begin(); + while ( i != this->my_successors.end() ) { + graph_task* new_task = (*i)->try_put_task(t __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + if ( new_task ) { + return new_task; + } else { + if ( (*i)->register_predecessor(*this->my_owner) ) { + i = this->my_successors.erase(i); + } + else { + ++i; + } + } + } + return nullptr; + } + +public: + graph_task* try_put_task(const T& t) override { + return try_put_task_impl(t __TBB_FLOW_GRAPH_METAINFO_ARG(message_metainfo{})); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + graph_task* try_put_task( const T& t, const message_metainfo& metainfo ) override { + return try_put_task_impl(t, metainfo); + } +#endif +}; + +#endif // __TBB__flow_graph_cache_impl_H diff --git a/src/tbb/include/oneapi/tbb/detail/_flow_graph_impl.h b/src/tbb/include/oneapi/tbb/detail/_flow_graph_impl.h new file mode 100644 index 000000000..19e00a8ef --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_flow_graph_impl.h @@ -0,0 +1,570 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_flow_graph_impl_H +#define __TBB_flow_graph_impl_H + +// #include "../config.h" +#include "_task.h" +#include "../task_group.h" +#include "../task_arena.h" +#include "../flow_graph_abstractions.h" + +#include "../concurrent_priority_queue.h" + +#include + +namespace tbb { +namespace detail { + +namespace d2 { + +class graph_task; +static graph_task* const SUCCESSFULLY_ENQUEUED = (graph_task*)-1; +typedef unsigned int node_priority_t; +static const node_priority_t no_priority = node_priority_t(0); + +class graph; +class graph_node; + +template +class graph_iterator { + friend class graph; + friend class graph_node; +public: + typedef size_t size_type; + typedef GraphNodeType value_type; + typedef GraphNodeType* pointer; + typedef GraphNodeType& reference; + typedef const GraphNodeType& const_reference; + typedef std::forward_iterator_tag iterator_category; + + //! Copy constructor + graph_iterator(const graph_iterator& other) : + my_graph(other.my_graph), current_node(other.current_node) + {} + + //! Assignment + graph_iterator& operator=(const graph_iterator& other) { + if (this != &other) { + my_graph = other.my_graph; + current_node = other.current_node; + } + return *this; + } + + //! Dereference + reference operator*() const; + + //! Dereference + pointer operator->() const; + + //! Equality + bool operator==(const graph_iterator& other) const { + return ((my_graph == other.my_graph) && (current_node == other.current_node)); + } + +#if !__TBB_CPP20_COMPARISONS_PRESENT + //! Inequality + bool operator!=(const graph_iterator& other) const { return !(operator==(other)); } +#endif + + //! Pre-increment + graph_iterator& operator++() { + internal_forward(); + return *this; + } + + //! Post-increment + graph_iterator operator++(int) { + graph_iterator result = *this; + operator++(); + return result; + } + +private: + // the graph over which we are iterating + GraphContainerType *my_graph; + // pointer into my_graph's my_nodes list + pointer current_node; + + //! Private initializing constructor for begin() and end() iterators + graph_iterator(GraphContainerType *g, bool begin); + void internal_forward(); +}; // class graph_iterator + +// flags to modify the behavior of the graph reset(). Can be combined. +enum reset_flags { + rf_reset_protocol = 0, + rf_reset_bodies = 1 << 0, // delete the current node body, reset to a copy of the initial node body. + rf_clear_edges = 1 << 1 // delete edges +}; + +void activate_graph(graph& g); +void deactivate_graph(graph& g); +bool is_graph_active(graph& g); +graph_task* prioritize_task(graph& g, graph_task& arena_task); +void spawn_in_graph_arena(graph& g, graph_task& arena_task); +void enqueue_in_graph_arena(graph &g, graph_task& arena_task); + +class graph; + +//! Base class for tasks generated by graph nodes. +class graph_task : public d1::task { +public: + graph_task(graph& g, d1::small_object_allocator& allocator, + node_priority_t node_priority = no_priority); + + graph& my_graph; // graph instance the task belongs to + // TODO revamp: rename to my_priority + node_priority_t priority; + template + void destruct_and_deallocate(const d1::execution_data& ed); +protected: + template + void finalize(const d1::execution_data& ed); +private: + // To organize task_list + graph_task* my_next{ nullptr }; + d1::small_object_allocator my_allocator; + d1::wait_tree_vertex_interface* my_reference_vertex; + // TODO revamp: elaborate internal interfaces to avoid friends declarations + friend class graph_task_list; + friend graph_task* prioritize_task(graph& g, graph_task& gt); +}; + +inline bool is_this_thread_in_graph_arena(graph& g); + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT +class trackable_messages_graph_task : public graph_task { +public: + trackable_messages_graph_task(graph& g, d1::small_object_allocator& allocator, + node_priority_t node_priority, + const std::forward_list& msg_waiters) + : graph_task(g, allocator, node_priority) + , my_msg_wait_context_vertices(msg_waiters) + { + auto last_iterator = my_msg_reference_vertices.cbefore_begin(); + + for (auto& msg_waiter : my_msg_wait_context_vertices) { + // If the task is created by the thread outside the graph arena, the lifetime of the thread reference vertex + // may be shorter that the lifetime of the task, so thread reference vertex approach cannot be used + // and the task should be associated with the msg wait context itself + d1::wait_tree_vertex_interface* ref_vertex = is_this_thread_in_graph_arena(g) ? + r1::get_thread_reference_vertex(msg_waiter) : + msg_waiter; + last_iterator = my_msg_reference_vertices.emplace_after(last_iterator, + ref_vertex); + ref_vertex->reserve(1); + } + } + + trackable_messages_graph_task(graph& g, d1::small_object_allocator& allocator, + node_priority_t node_priority, + std::forward_list&& msg_waiters) + : graph_task(g, allocator, node_priority) + , my_msg_wait_context_vertices(std::move(msg_waiters)) + { + } + + const std::forward_list get_msg_wait_context_vertices() const { + return my_msg_wait_context_vertices; + } + +protected: + template + void finalize(const d1::execution_data& ed) { + auto wait_context_vertices = std::move(my_msg_wait_context_vertices); + auto msg_reference_vertices = std::move(my_msg_reference_vertices); + graph_task::finalize(ed); + + // If there is no thread reference vertices associated with the task + // then this task was created by transferring the ownership from other metainfo + // instance (e.g. while taking from the buffer) + if (msg_reference_vertices.empty()) { + for (auto& msg_waiter : wait_context_vertices) { + msg_waiter->release(1); + } + } else { + for (auto& msg_waiter : msg_reference_vertices) { + msg_waiter->release(1); + } + } + } +private: + // Each task that holds information about single message wait_contexts should hold two lists + // The first one is wait_contexts associated with the message itself. They are needed + // to be able to broadcast the list of wait_contexts to the node successors while executing the task. + // The second list is a list of reference vertices for each wait_context_vertex in the first list + // to support the distributed reference counting schema + std::forward_list my_msg_wait_context_vertices; + std::forward_list my_msg_reference_vertices; +}; // class trackable_messages_graph_task +#endif // __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + +struct graph_task_comparator { + bool operator()(const graph_task* left, const graph_task* right) { + return left->priority < right->priority; + } +}; + +typedef tbb::concurrent_priority_queue graph_task_priority_queue_t; + +class priority_task_selector : public d1::task { +public: + priority_task_selector(graph_task_priority_queue_t& priority_queue, d1::small_object_allocator& allocator) + : my_priority_queue(priority_queue), my_allocator(allocator), my_task() {} + task* execute(d1::execution_data& ed) override { + next_task(); + __TBB_ASSERT(my_task, nullptr); + task* t_next = my_task->execute(ed); + my_allocator.delete_object(this, ed); + return t_next; + } + task* cancel(d1::execution_data& ed) override { + if (!my_task) { + next_task(); + } + __TBB_ASSERT(my_task, nullptr); + task* t_next = my_task->cancel(ed); + my_allocator.delete_object(this, ed); + return t_next; + } +private: + void next_task() { + // TODO revamp: hold functors in priority queue instead of real tasks + bool result = my_priority_queue.try_pop(my_task); + __TBB_ASSERT_EX(result, "Number of critical tasks for scheduler and tasks" + " in graph's priority queue mismatched"); + __TBB_ASSERT(my_task && my_task != SUCCESSFULLY_ENQUEUED, + "Incorrect task submitted to graph priority queue"); + __TBB_ASSERT(my_task->priority != no_priority, + "Tasks from graph's priority queue must have priority"); + } + + graph_task_priority_queue_t& my_priority_queue; + d1::small_object_allocator my_allocator; + graph_task* my_task; +}; + +template class run_and_put_task; +template class run_task; + +//******************************************************************************** +// graph tasks helpers +//******************************************************************************** + +//! The list of graph tasks +class graph_task_list : no_copy { +private: + graph_task* my_first; + graph_task** my_next_ptr; +public: + //! Construct empty list + graph_task_list() : my_first(nullptr), my_next_ptr(&my_first) {} + + //! True if list is empty; false otherwise. + bool empty() const { return !my_first; } + + //! Push task onto back of list. + void push_back(graph_task& task) { + task.my_next = nullptr; + *my_next_ptr = &task; + my_next_ptr = &task.my_next; + } + + //! Pop the front task from the list. + graph_task& pop_front() { + __TBB_ASSERT(!empty(), "attempt to pop item from empty task_list"); + graph_task* result = my_first; + my_first = result->my_next; + if (!my_first) { + my_next_ptr = &my_first; + } + return *result; + } +}; + +//! The graph class +/** This class serves as a handle to the graph */ +class graph : no_copy, public graph_proxy { + friend class graph_node; + + void prepare_task_arena(bool reinit = false) { + if (reinit) { + __TBB_ASSERT(my_task_arena, "task arena is nullptr"); + my_task_arena->terminate(); + my_task_arena->initialize(task_arena::attach()); + } + else { + __TBB_ASSERT(my_task_arena == nullptr, "task arena is not nullptr"); + my_task_arena = new task_arena(task_arena::attach()); + } + if (!my_task_arena->is_active()) // failed to attach + my_task_arena->initialize(); // create a new, default-initialized arena + __TBB_ASSERT(my_task_arena->is_active(), "task arena is not active"); + } + +public: + //! Constructs a graph with isolated task_group_context + graph(); + + //! Constructs a graph with use_this_context as context + explicit graph(task_group_context& use_this_context); + + //! Destroys the graph. + /** Calls wait_for_all, then destroys the root task and context. */ + ~graph(); + + //! Used to register that an external entity may still interact with the graph. + /** The graph will not return from wait_for_all until a matching number of release_wait calls is + made. */ + void reserve_wait() override; + + //! Deregisters an external entity that may have interacted with the graph. + /** The graph will not return from wait_for_all until all the number of reserve_wait calls + matches the number of release_wait calls. */ + void release_wait() override; + + //! Wait until graph is idle and the number of release_wait calls equals to the number of + //! reserve_wait calls. + /** The waiting thread will go off and steal work while it is blocked in the wait_for_all. */ + void wait_for_all() { + cancelled = false; + caught_exception = false; + try_call([this] { + my_task_arena->execute([this] { + wait(my_wait_context_vertex.get_context(), *my_context); + }); + cancelled = my_context->is_group_execution_cancelled(); + }).on_exception([this] { + my_context->reset(); + caught_exception = true; + cancelled = true; + }); + // TODO: the "if" condition below is just a work-around to support the concurrent wait + // mode. The cancellation and exception mechanisms are still broken in this mode. + // Consider using task group not to re-implement the same functionality. + if (!(my_context->traits() & task_group_context::concurrent_wait)) { + my_context->reset(); // consistent with behavior in catch() + } + } + + // TODO revamp: consider adding getter for task_group_context. + + // ITERATORS + template + friend class graph_iterator; + + // Graph iterator typedefs + typedef graph_iterator iterator; + typedef graph_iterator const_iterator; + + // Graph iterator constructors + //! start iterator + iterator begin(); + //! end iterator + iterator end(); + //! start const iterator + const_iterator begin() const; + //! end const iterator + const_iterator end() const; + //! start const iterator + const_iterator cbegin() const; + //! end const iterator + const_iterator cend() const; + + // thread-unsafe state reset. + void reset(reset_flags f = rf_reset_protocol); + + //! cancels execution of the associated task_group_context + void cancel(); + + //! return status of graph execution + bool is_cancelled() { return cancelled; } + bool exception_thrown() { return caught_exception; } + +private: + d1::wait_context_vertex my_wait_context_vertex; + task_group_context *my_context; + bool own_context; + bool cancelled; + bool caught_exception; + bool my_is_active; + + graph_node *my_nodes, *my_nodes_last; + + tbb::spin_mutex nodelist_mutex; + void register_node(graph_node *n); + void remove_node(graph_node *n); + + task_arena* my_task_arena; + + graph_task_priority_queue_t my_priority_queue; + + d1::wait_context_vertex& get_wait_context_vertex() { return my_wait_context_vertex; } + + friend void activate_graph(graph& g); + friend void deactivate_graph(graph& g); + friend bool is_graph_active(graph& g); + friend bool is_this_thread_in_graph_arena(graph& g); + friend graph_task* prioritize_task(graph& g, graph_task& arena_task); + friend void spawn_in_graph_arena(graph& g, graph_task& arena_task); + friend void enqueue_in_graph_arena(graph &g, graph_task& arena_task); + + friend class d1::task_arena_base; + friend class graph_task; + + template + friend class receiver; +}; // class graph + +template +inline void graph_task::destruct_and_deallocate(const d1::execution_data& ed) { + auto allocator = my_allocator; + // TODO: investigate if direct call of derived destructor gives any benefits. + this->~graph_task(); + allocator.deallocate(static_cast(this), ed); +} + +template +inline void graph_task::finalize(const d1::execution_data& ed) { + d1::wait_tree_vertex_interface* reference_vertex = my_reference_vertex; + destruct_and_deallocate(ed); + reference_vertex->release(); +} + +inline graph_task::graph_task(graph& g, d1::small_object_allocator& allocator, + node_priority_t node_priority) + : my_graph(g) + , priority(node_priority) + , my_allocator(allocator) +{ + // If the task is created by the thread outside the graph arena, the lifetime of the thread reference vertex + // may be shorter that the lifetime of the task, so thread reference vertex approach cannot be used + // and the task should be associated with the graph wait context itself + // TODO: consider how reference counting can be improved for such a use case. Most common example is the async_node + d1::wait_context_vertex* graph_wait_context_vertex = &my_graph.get_wait_context_vertex(); + my_reference_vertex = is_this_thread_in_graph_arena(g) ? r1::get_thread_reference_vertex(graph_wait_context_vertex) + : graph_wait_context_vertex; + __TBB_ASSERT(my_reference_vertex, nullptr); + my_reference_vertex->reserve(); +} + +//******************************************************************************** +// end of graph tasks helpers +//******************************************************************************** + + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +class get_graph_helper; +#endif + +//! The base of all graph nodes. +class graph_node : no_copy { + friend class graph; + template + friend class graph_iterator; + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + friend class get_graph_helper; +#endif + +protected: + graph& my_graph; + graph& graph_reference() const { + // TODO revamp: propagate graph_reference() method to all the reference places. + return my_graph; + } + graph_node* next = nullptr; + graph_node* prev = nullptr; +public: + explicit graph_node(graph& g); + + virtual ~graph_node(); + +protected: + // performs the reset on an individual node. + virtual void reset_node(reset_flags f = rf_reset_protocol) = 0; +}; // class graph_node + +inline void activate_graph(graph& g) { + g.my_is_active = true; +} + +inline void deactivate_graph(graph& g) { + g.my_is_active = false; +} + +inline bool is_graph_active(graph& g) { + return g.my_is_active; +} + +inline bool is_this_thread_in_graph_arena(graph& g) { + __TBB_ASSERT(g.my_task_arena && g.my_task_arena->is_active(), nullptr); + return r1::execution_slot(*g.my_task_arena) != d1::slot_id(-1); +} + +inline graph_task* prioritize_task(graph& g, graph_task& gt) { + if( no_priority == gt.priority ) + return > + + //! Non-preemptive priority pattern. The original task is submitted as a work item to the + //! priority queue, and a new critical task is created to take and execute a work item with + //! the highest known priority. The reference counting responsibility is transferred to + //! the new task. + d1::task* critical_task = gt.my_allocator.new_object(g.my_priority_queue, gt.my_allocator); + __TBB_ASSERT( critical_task, "bad_alloc?" ); + g.my_priority_queue.push(>); + using tbb::detail::d1::submit; + submit( *critical_task, *g.my_task_arena, *g.my_context, /*as_critical=*/true ); + return nullptr; +} + +//! Spawns a task inside graph arena +inline void spawn_in_graph_arena(graph& g, graph_task& arena_task) { + if (is_graph_active(g)) { + d1::task* gt = prioritize_task(g, arena_task); + if( !gt ) + return; + + __TBB_ASSERT(g.my_task_arena && g.my_task_arena->is_active(), nullptr); + submit( *gt, *g.my_task_arena, *g.my_context +#if __TBB_PREVIEW_CRITICAL_TASKS + , /*as_critical=*/false +#endif + ); + } +} + +// TODO revamp: unify *_in_graph_arena functions + +//! Enqueues a task inside graph arena +inline void enqueue_in_graph_arena(graph &g, graph_task& arena_task) { + if (is_graph_active(g)) { + __TBB_ASSERT( g.my_task_arena && g.my_task_arena->is_active(), "Is graph's arena initialized and active?" ); + + // TODO revamp: decide on the approach that does not postpone critical task + if( d1::task* gt = prioritize_task(g, arena_task) ) + submit( *gt, *g.my_task_arena, *g.my_context, /*as_critical=*/false); + } +} + +} // namespace d2 +} // namespace detail +} // namespace tbb + +#endif // __TBB_flow_graph_impl_H diff --git a/src/tbb/include/oneapi/tbb/detail/_flow_graph_indexer_impl.h b/src/tbb/include/oneapi/tbb/detail/_flow_graph_indexer_impl.h new file mode 100644 index 000000000..a74331007 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_flow_graph_indexer_impl.h @@ -0,0 +1,370 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__flow_graph_indexer_impl_H +#define __TBB__flow_graph_indexer_impl_H + +#ifndef __TBB_flow_graph_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +// included in namespace tbb::detail::d2 + +#include "_flow_graph_types_impl.h" + + // Output of the indexer_node is a tbb::flow::tagged_msg, and will be of + // the form tagged_msg + // where the value of tag will indicate which result was put to the + // successor. + + template + graph_task* do_try_put(const T &v, void *p __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& metainfo)) { + typename IndexerNodeBaseType::output_type o(K, v); + return reinterpret_cast(p)->try_put_task(&o __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + } + + template + struct indexer_helper { + template + static inline void set_indexer_node_pointer(PortTuple &my_input, IndexerNodeBaseType *p, graph& g) { + typedef typename std::tuple_element::type T; + auto indexer_node_put_task = do_try_put; + std::get(my_input).set_up(p, indexer_node_put_task, g); + indexer_helper::template set_indexer_node_pointer(my_input, p, g); + } + }; + + template + struct indexer_helper { + template + static inline void set_indexer_node_pointer(PortTuple &my_input, IndexerNodeBaseType *p, graph& g) { + typedef typename std::tuple_element<0, TupleTypes>::type T; + auto indexer_node_put_task = do_try_put; + std::get<0>(my_input).set_up(p, indexer_node_put_task, g); + } + }; + + template + class indexer_input_port : public receiver { + private: + void* my_indexer_ptr; + typedef graph_task* (* forward_function_ptr)(T const &, void* + __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo&)); + forward_function_ptr my_try_put_task; + graph* my_graph; + public: + void set_up(void* p, forward_function_ptr f, graph& g) { + my_indexer_ptr = p; + my_try_put_task = f; + my_graph = &g; + } + + protected: + template< typename R, typename B > friend class run_and_put_task; + template friend class broadcast_cache; + template friend class round_robin_cache; + graph_task* try_put_task(const T &v) override { + return my_try_put_task(v, my_indexer_ptr __TBB_FLOW_GRAPH_METAINFO_ARG(message_metainfo{})); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + graph_task* try_put_task(const T& v, const message_metainfo& metainfo) override { + return my_try_put_task(v, my_indexer_ptr, metainfo); + } +#endif + + graph& graph_reference() const override { + return *my_graph; + } + }; + + template + class indexer_node_FE { + public: + static const int N = std::tuple_size::value; + typedef OutputType output_type; + typedef InputTuple input_type; + + // Some versions of Intel(R) C++ Compiler fail to generate an implicit constructor for the class which has std::tuple as a member. + indexer_node_FE() : my_inputs() {} + + input_type &input_ports() { return my_inputs; } + protected: + input_type my_inputs; + }; + + //! indexer_node_base + template + class indexer_node_base : public graph_node, public indexer_node_FE, + public sender { + protected: + using graph_node::my_graph; + public: + static const size_t N = std::tuple_size::value; + typedef OutputType output_type; + typedef StructTypes tuple_types; + typedef typename sender::successor_type successor_type; + typedef indexer_node_FE input_ports_type; + + private: + // ----------- Aggregator ------------ + enum op_type { reg_succ, rem_succ, try__put_task + }; + typedef indexer_node_base class_type; + + class indexer_node_base_operation : public d1::aggregated_operation { + public: + char type; + union { + output_type const *my_arg; + successor_type *my_succ; + graph_task* bypass_t; + }; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + message_metainfo const* metainfo; +#endif + indexer_node_base_operation(const output_type* e, op_type t) : + type(char(t)), my_arg(e) __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo(nullptr)) + {} +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + indexer_node_base_operation(const output_type* e, op_type t, const message_metainfo& info) + : type(char(t)), my_arg(e), metainfo(&info) {} +#endif + indexer_node_base_operation(const successor_type &s, op_type t) : type(char(t)), + my_succ(const_cast(&s)) {} + }; + + typedef d1::aggregating_functor handler_type; + friend class d1::aggregating_functor; + d1::aggregator my_aggregator; + + void handle_operations(indexer_node_base_operation* op_list) { + indexer_node_base_operation *current; + while(op_list) { + current = op_list; + op_list = op_list->next; + switch(current->type) { + + case reg_succ: + my_successors.register_successor(*(current->my_succ)); + current->status.store( SUCCEEDED, std::memory_order_release); + break; + + case rem_succ: + my_successors.remove_successor(*(current->my_succ)); + current->status.store( SUCCEEDED, std::memory_order_release); + break; + case try__put_task: { + current->bypass_t = my_successors.try_put_task(*(current->my_arg) + __TBB_FLOW_GRAPH_METAINFO_ARG(*(current->metainfo))); + current->status.store( SUCCEEDED, std::memory_order_release); // return of try_put_task actual return value + } + break; + } + } + } + // ---------- end aggregator ----------- + public: + indexer_node_base(graph& g) : graph_node(g), input_ports_type(), my_successors(this) { + indexer_helper::set_indexer_node_pointer(this->my_inputs, this, g); + my_aggregator.initialize_handler(handler_type(this)); + } + + indexer_node_base(const indexer_node_base& other) + : graph_node(other.my_graph), input_ports_type(), sender(), my_successors(this) + { + indexer_helper::set_indexer_node_pointer(this->my_inputs, this, other.my_graph); + my_aggregator.initialize_handler(handler_type(this)); + } + + bool register_successor(successor_type &r) override { + indexer_node_base_operation op_data(r, reg_succ); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + + bool remove_successor( successor_type &r) override { + indexer_node_base_operation op_data(r, rem_succ); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + + // not a virtual method in this class + graph_task* try_put_task(output_type const *v + __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& metainfo)) + { + indexer_node_base_operation op_data(v, try__put_task __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + my_aggregator.execute(&op_data); + return op_data.bypass_t; + } + + protected: + void reset_node(reset_flags f) override { + if(f & rf_clear_edges) { + my_successors.clear(); + } + } + + private: + broadcast_cache my_successors; + }; //indexer_node_base + + + template struct input_types; + + template + struct input_types<1, InputTuple> { + typedef typename std::tuple_element<0, InputTuple>::type first_type; + typedef tagged_msg type; + }; + + template + struct input_types<2, InputTuple> { + typedef typename std::tuple_element<0, InputTuple>::type first_type; + typedef typename std::tuple_element<1, InputTuple>::type second_type; + typedef tagged_msg type; + }; + + template + struct input_types<3, InputTuple> { + typedef typename std::tuple_element<0, InputTuple>::type first_type; + typedef typename std::tuple_element<1, InputTuple>::type second_type; + typedef typename std::tuple_element<2, InputTuple>::type third_type; + typedef tagged_msg type; + }; + + template + struct input_types<4, InputTuple> { + typedef typename std::tuple_element<0, InputTuple>::type first_type; + typedef typename std::tuple_element<1, InputTuple>::type second_type; + typedef typename std::tuple_element<2, InputTuple>::type third_type; + typedef typename std::tuple_element<3, InputTuple>::type fourth_type; + typedef tagged_msg type; + }; + + template + struct input_types<5, InputTuple> { + typedef typename std::tuple_element<0, InputTuple>::type first_type; + typedef typename std::tuple_element<1, InputTuple>::type second_type; + typedef typename std::tuple_element<2, InputTuple>::type third_type; + typedef typename std::tuple_element<3, InputTuple>::type fourth_type; + typedef typename std::tuple_element<4, InputTuple>::type fifth_type; + typedef tagged_msg type; + }; + + template + struct input_types<6, InputTuple> { + typedef typename std::tuple_element<0, InputTuple>::type first_type; + typedef typename std::tuple_element<1, InputTuple>::type second_type; + typedef typename std::tuple_element<2, InputTuple>::type third_type; + typedef typename std::tuple_element<3, InputTuple>::type fourth_type; + typedef typename std::tuple_element<4, InputTuple>::type fifth_type; + typedef typename std::tuple_element<5, InputTuple>::type sixth_type; + typedef tagged_msg type; + }; + + template + struct input_types<7, InputTuple> { + typedef typename std::tuple_element<0, InputTuple>::type first_type; + typedef typename std::tuple_element<1, InputTuple>::type second_type; + typedef typename std::tuple_element<2, InputTuple>::type third_type; + typedef typename std::tuple_element<3, InputTuple>::type fourth_type; + typedef typename std::tuple_element<4, InputTuple>::type fifth_type; + typedef typename std::tuple_element<5, InputTuple>::type sixth_type; + typedef typename std::tuple_element<6, InputTuple>::type seventh_type; + typedef tagged_msg type; + }; + + + template + struct input_types<8, InputTuple> { + typedef typename std::tuple_element<0, InputTuple>::type first_type; + typedef typename std::tuple_element<1, InputTuple>::type second_type; + typedef typename std::tuple_element<2, InputTuple>::type third_type; + typedef typename std::tuple_element<3, InputTuple>::type fourth_type; + typedef typename std::tuple_element<4, InputTuple>::type fifth_type; + typedef typename std::tuple_element<5, InputTuple>::type sixth_type; + typedef typename std::tuple_element<6, InputTuple>::type seventh_type; + typedef typename std::tuple_element<7, InputTuple>::type eighth_type; + typedef tagged_msg type; + }; + + + template + struct input_types<9, InputTuple> { + typedef typename std::tuple_element<0, InputTuple>::type first_type; + typedef typename std::tuple_element<1, InputTuple>::type second_type; + typedef typename std::tuple_element<2, InputTuple>::type third_type; + typedef typename std::tuple_element<3, InputTuple>::type fourth_type; + typedef typename std::tuple_element<4, InputTuple>::type fifth_type; + typedef typename std::tuple_element<5, InputTuple>::type sixth_type; + typedef typename std::tuple_element<6, InputTuple>::type seventh_type; + typedef typename std::tuple_element<7, InputTuple>::type eighth_type; + typedef typename std::tuple_element<8, InputTuple>::type nineth_type; + typedef tagged_msg type; + }; + + template + struct input_types<10, InputTuple> { + typedef typename std::tuple_element<0, InputTuple>::type first_type; + typedef typename std::tuple_element<1, InputTuple>::type second_type; + typedef typename std::tuple_element<2, InputTuple>::type third_type; + typedef typename std::tuple_element<3, InputTuple>::type fourth_type; + typedef typename std::tuple_element<4, InputTuple>::type fifth_type; + typedef typename std::tuple_element<5, InputTuple>::type sixth_type; + typedef typename std::tuple_element<6, InputTuple>::type seventh_type; + typedef typename std::tuple_element<7, InputTuple>::type eighth_type; + typedef typename std::tuple_element<8, InputTuple>::type nineth_type; + typedef typename std::tuple_element<9, InputTuple>::type tenth_type; + typedef tagged_msg type; + }; + + // type generators + template + struct indexer_types : public input_types::value, OutputTuple> { + static const int N = std::tuple_size::value; + typedef typename input_types::type output_type; + typedef typename wrap_tuple_elements::type input_ports_type; + typedef indexer_node_FE indexer_FE_type; + typedef indexer_node_base indexer_base_type; + }; + + template + class unfolded_indexer_node : public indexer_types::indexer_base_type { + public: + typedef typename indexer_types::input_ports_type input_ports_type; + typedef OutputTuple tuple_types; + typedef typename indexer_types::output_type output_type; + private: + typedef typename indexer_types::indexer_base_type base_type; + public: + unfolded_indexer_node(graph& g) : base_type(g) {} + unfolded_indexer_node(const unfolded_indexer_node &other) : base_type(other) {} + }; + +#endif /* __TBB__flow_graph_indexer_impl_H */ diff --git a/src/tbb/include/oneapi/tbb/detail/_flow_graph_item_buffer_impl.h b/src/tbb/include/oneapi/tbb/detail/_flow_graph_item_buffer_impl.h new file mode 100644 index 000000000..cf7c54b85 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_flow_graph_item_buffer_impl.h @@ -0,0 +1,422 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__flow_graph_item_buffer_impl_H +#define __TBB__flow_graph_item_buffer_impl_H + +#ifndef __TBB_flow_graph_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#include "_aligned_space.h" + +// in namespace tbb::flow::interfaceX (included in _flow_graph_node_impl.h) + +//! Expandable buffer of items. The possible operations are push, pop, +//* tests for empty and so forth. No mutual exclusion is built in. +//* objects are constructed into and explicitly-destroyed. get_my_item gives +// a read-only reference to the item in the buffer. set_my_item may be called +// with either an empty or occupied slot. + +template > +class item_buffer { +public: + typedef T item_type; + enum buffer_item_state { no_item=0, has_item=1, reserved_item=2 }; +protected: + struct aligned_space_item { + item_type item; + buffer_item_state state; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + message_metainfo metainfo; +#endif + }; + typedef size_t size_type; + typedef aligned_space buffer_item_type; + typedef typename allocator_traits::template rebind_alloc allocator_type; + buffer_item_type *my_array; + size_type my_array_size; + static const size_type initial_buffer_size = 4; + size_type my_head; + size_type my_tail; + + bool buffer_empty() const { return my_head == my_tail; } + + aligned_space_item &element(size_type i) { + __TBB_ASSERT(!(size_type(&(my_array[i&(my_array_size-1)].begin()->state))%alignment_of::value), nullptr); + __TBB_ASSERT(!(size_type(&(my_array[i&(my_array_size-1)].begin()->item))%alignment_of::value), nullptr); +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + __TBB_ASSERT(!(size_type(&(my_array[i&(my_array_size-1)].begin()->metainfo))%alignment_of::value), nullptr); +#endif + return *my_array[i & (my_array_size - 1) ].begin(); + } + + const aligned_space_item &element(size_type i) const { + __TBB_ASSERT(!(size_type(&(my_array[i&(my_array_size-1)].begin()->state))%alignment_of::value), nullptr); + __TBB_ASSERT(!(size_type(&(my_array[i&(my_array_size-1)].begin()->item))%alignment_of::value), nullptr); +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + __TBB_ASSERT(!(size_type(&(my_array[i&(my_array_size-1)].begin()->metainfo))%alignment_of::value), nullptr); +#endif + return *my_array[i & (my_array_size-1)].begin(); + } + + bool my_item_valid(size_type i) const { return (i < my_tail) && (i >= my_head) && (element(i).state != no_item); } +#if TBB_USE_ASSERT + bool my_item_reserved(size_type i) const { return element(i).state == reserved_item; } +#endif + + // object management in buffer + const item_type &get_my_item(size_t i) const { + __TBB_ASSERT(my_item_valid(i),"attempt to get invalid item"); + return element(i).item; + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + message_metainfo& get_my_metainfo(size_t i) { + __TBB_ASSERT(my_item_valid(i), "attempt to get invalid item"); + return element(i).metainfo; + } +#endif + + // may be called with an empty slot or a slot that has already been constructed into. + void set_my_item(size_t i, const item_type &o + __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& metainfo)) + { + if(element(i).state != no_item) { + destroy_item(i); + } + new(&(element(i).item)) item_type(o); + element(i).state = has_item; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + new(&element(i).metainfo) message_metainfo(metainfo); + + for (auto& waiter : metainfo.waiters()) { + waiter->reserve(1); + } +#endif + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + void set_my_item(size_t i, const item_type& o, message_metainfo&& metainfo) { + if(element(i).state != no_item) { + destroy_item(i); + } + + new(&(element(i).item)) item_type(o); + new(&element(i).metainfo) message_metainfo(std::move(metainfo)); + // Skipping the reservation on metainfo.waiters since the ownership + // is moving from metainfo to the cache + element(i).state = has_item; + } +#endif + + // destructively-fetch an object from the buffer +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + void fetch_item(size_t i, item_type& o, message_metainfo& metainfo) { + __TBB_ASSERT(my_item_valid(i), "Trying to fetch an empty slot"); + o = get_my_item(i); // could have std::move assign semantics + metainfo = std::move(get_my_metainfo(i)); + destroy_item(i); + } +#else + void fetch_item(size_t i, item_type &o) { + __TBB_ASSERT(my_item_valid(i), "Trying to fetch an empty slot"); + o = get_my_item(i); // could have std::move assign semantics + destroy_item(i); + } +#endif + + // move an existing item from one slot to another. The moved-to slot must be unoccupied, + // the moved-from slot must exist and not be reserved. The after, from will be empty, + // to will be occupied but not reserved + void move_item(size_t to, size_t from) { + __TBB_ASSERT(!my_item_valid(to), "Trying to move to a non-empty slot"); + __TBB_ASSERT(my_item_valid(from), "Trying to move from an empty slot"); + // could have std::move semantics + set_my_item(to, get_my_item(from) __TBB_FLOW_GRAPH_METAINFO_ARG(get_my_metainfo(from))); + destroy_item(from); + } + + // put an item in an empty slot. Return true if successful, else false +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + template + bool place_item(size_t here, const item_type &me, Metainfo&& metainfo) { +#if !TBB_DEPRECATED_SEQUENCER_DUPLICATES + if(my_item_valid(here)) return false; +#endif + set_my_item(here, me, std::forward(metainfo)); + return true; + } +#else + bool place_item(size_t here, const item_type &me) { +#if !TBB_DEPRECATED_SEQUENCER_DUPLICATES + if(my_item_valid(here)) return false; +#endif + set_my_item(here, me); + return true; + } +#endif + + // could be implemented with std::move semantics + void swap_items(size_t i, size_t j) { + __TBB_ASSERT(my_item_valid(i) && my_item_valid(j), "attempt to swap invalid item(s)"); + item_type temp = get_my_item(i); +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + message_metainfo temp_metainfo = get_my_metainfo(i); + set_my_item(i, get_my_item(j), get_my_metainfo(j)); + set_my_item(j, temp, temp_metainfo); +#else + set_my_item(i, get_my_item(j)); + set_my_item(j, temp); +#endif + } + + void destroy_item(size_type i) { + __TBB_ASSERT(my_item_valid(i), "destruction of invalid item"); + + auto& e = element(i); + e.item.~item_type(); + e.state = no_item; + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + for (auto& msg_waiter : e.metainfo.waiters()) { + msg_waiter->release(1); + } + + e.metainfo.~message_metainfo(); +#endif + } + + // returns the front element + const item_type& front() const + { + __TBB_ASSERT(my_item_valid(my_head), "attempt to fetch head non-item"); + return get_my_item(my_head); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + const message_metainfo& front_metainfo() const + { + __TBB_ASSERT(my_item_valid(my_head), "attempt to fetch head non-item"); + return element(my_head).metainfo; + } +#endif + + // returns the back element + const item_type& back() const + { + __TBB_ASSERT(my_item_valid(my_tail - 1), "attempt to fetch head non-item"); + return get_my_item(my_tail - 1); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + const message_metainfo& back_metainfo() const { + __TBB_ASSERT(my_item_valid(my_tail - 1), "attempt to fetch head non-item"); + return element(my_tail - 1).metainfo; + } +#endif + + // following methods are for reservation of the front of a buffer. + void reserve_item(size_type i) { + __TBB_ASSERT(my_item_valid(i) && !my_item_reserved(i), "item cannot be reserved"); + element(i).state = reserved_item; + } + + void release_item(size_type i) { + __TBB_ASSERT(my_item_reserved(i), "item is not reserved"); + element(i).state = has_item; + } + + void destroy_front() { destroy_item(my_head); ++my_head; } + void destroy_back() { destroy_item(my_tail-1); --my_tail; } + + // we have to be able to test against a new tail value without changing my_tail + // grow_array doesn't work if we change my_tail when the old array is too small + size_type size(size_t new_tail = 0) { return (new_tail ? new_tail : my_tail) - my_head; } + size_type capacity() { return my_array_size; } + // sequencer_node does not use this method, so we don't + // need a version that passes in the new_tail value. + bool buffer_full() { return size() >= capacity(); } + + //! Grows the internal array. + void grow_my_array( size_t minimum_size ) { + // test that we haven't made the structure inconsistent. + __TBB_ASSERT(capacity() >= my_tail - my_head, "total items exceed capacity"); + size_type new_size = my_array_size ? 2*my_array_size : initial_buffer_size; + while( new_sizestate = no_item; } + + for( size_type i=my_head; iitem); + (void)new(new_space) item_type(get_my_item(i)); + new_array[i&(new_size-1)].begin()->state = element(i).state; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + char* meta_space = (char *)&(new_array[i&(new_size-1)].begin()->metainfo); + ::new(meta_space) message_metainfo(std::move(element(i).metainfo)); +#endif + } + } + + clean_up_buffer(/*reset_pointers*/false); + + my_array = new_array; + my_array_size = new_size; + } + + bool push_back(item_type& v + __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& metainfo)) + { + if (buffer_full()) { + grow_my_array(size() + 1); + } + set_my_item(my_tail, v __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + ++my_tail; + return true; + } + + bool pop_back(item_type& v + __TBB_FLOW_GRAPH_METAINFO_ARG(message_metainfo& metainfo)) + { + if (!my_item_valid(my_tail - 1)) { + return false; + } + auto& e = element(my_tail - 1); + v = e.item; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + metainfo = std::move(e.metainfo); +#endif + + destroy_back(); + return true; + } + + bool pop_front(item_type& v + __TBB_FLOW_GRAPH_METAINFO_ARG(message_metainfo& metainfo)) + { + if (!my_item_valid(my_head)) { + return false; + } + auto& e = element(my_head); + v = e.item; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + metainfo = std::move(e.metainfo); +#endif + + destroy_front(); + return true; + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + bool pop_back(item_type& v) { + message_metainfo metainfo; + return pop_back(v, metainfo); + } + + bool pop_front(item_type& v) { + message_metainfo metainfo; + return pop_front(v, metainfo); + } +#endif + + // This is used both for reset and for grow_my_array. In the case of grow_my_array + // we want to retain the values of the head and tail. + void clean_up_buffer(bool reset_pointers) { + if (my_array) { + for( size_type i=my_head; i > +class reservable_item_buffer : public item_buffer { +protected: + using item_buffer::my_item_valid; + using item_buffer::my_head; + +public: + reservable_item_buffer() : item_buffer(), my_reserved(false) {} + void reset() {my_reserved = false; item_buffer::reset(); } +protected: + + bool reserve_front(T &v) { + if(my_reserved || !my_item_valid(this->my_head)) return false; + my_reserved = true; + // reserving the head + v = this->front(); + this->reserve_item(this->my_head); + return true; + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + bool reserve_front(T& v, message_metainfo& metainfo) { + if (my_reserved || !my_item_valid(this->my_head)) return false; + my_reserved = true; + // reserving the head + v = this->front(); + metainfo = this->front_metainfo(); + this->reserve_item(this->my_head); + return true; + } +#endif + + void consume_front() { + __TBB_ASSERT(my_reserved, "Attempt to consume a non-reserved item"); + this->destroy_front(); + my_reserved = false; + } + + void release_front() { + __TBB_ASSERT(my_reserved, "Attempt to release a non-reserved item"); + this->release_item(this->my_head); + my_reserved = false; + } + + bool my_reserved; +}; + +#endif // __TBB__flow_graph_item_buffer_impl_H diff --git a/src/tbb/include/oneapi/tbb/detail/_flow_graph_join_impl.h b/src/tbb/include/oneapi/tbb/detail/_flow_graph_join_impl.h new file mode 100644 index 000000000..8bca9a2c4 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_flow_graph_join_impl.h @@ -0,0 +1,1970 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__flow_graph_join_impl_H +#define __TBB__flow_graph_join_impl_H + +#ifndef __TBB_flow_graph_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +// included into namespace tbb::detail::d2 + + struct forwarding_base : no_assign { + forwarding_base(graph &g) : graph_ref(g) {} + virtual ~forwarding_base() {} + graph& graph_ref; + }; + + struct queueing_forwarding_base : forwarding_base { + using forwarding_base::forwarding_base; + // decrement_port_count may create a forwarding task. If we cannot handle the task + // ourselves, ask decrement_port_count to deal with it. + virtual graph_task* decrement_port_count(bool handle_task) = 0; + }; + + struct reserving_forwarding_base : forwarding_base { + using forwarding_base::forwarding_base; + // decrement_port_count may create a forwarding task. If we cannot handle the task + // ourselves, ask decrement_port_count to deal with it. + virtual graph_task* decrement_port_count() = 0; + virtual void increment_port_count() = 0; + }; + + // specialization that lets us keep a copy of the current_key for building results. + // KeyType can be a reference type. + template + struct matching_forwarding_base : public forwarding_base { + typedef typename std::decay::type current_key_type; + matching_forwarding_base(graph &g) : forwarding_base(g) { } + virtual graph_task* increment_key_count(current_key_type const & /*t*/) = 0; + current_key_type current_key; // so ports can refer to FE's desired items + }; + + template< int N > + struct join_helper { + + template< typename TupleType, typename PortType > + static inline void set_join_node_pointer(TupleType &my_input, PortType *port) { + std::get( my_input ).set_join_node_pointer(port); + join_helper::set_join_node_pointer( my_input, port ); + } + template< typename TupleType > + static inline void consume_reservations( TupleType &my_input ) { + std::get( my_input ).consume(); + join_helper::consume_reservations( my_input ); + } + + template< typename TupleType > + static inline void release_my_reservation( TupleType &my_input ) { + std::get( my_input ).release(); + } + + template + static inline void release_reservations( TupleType &my_input) { + join_helper::release_reservations(my_input); + release_my_reservation(my_input); + } + + template< typename InputTuple, typename OutputTuple > + static inline bool reserve( InputTuple &my_input, OutputTuple &out) { + if ( !std::get( my_input ).reserve( std::get( out ) ) ) return false; + if ( !join_helper::reserve( my_input, out ) ) { + release_my_reservation( my_input ); + return false; + } + return true; + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + template + static inline bool reserve(InputTuple& my_input, OutputTuple& out, message_metainfo& metainfo) { + message_metainfo element_metainfo; + if (!std::get(my_input).reserve(std::get(out), element_metainfo)) return false; + if (!join_helper::reserve(my_input, out, metainfo)) { + release_my_reservation(my_input); + return false; + } + metainfo.merge(element_metainfo); + return true; + + } +#endif + + template + static inline bool get_my_item( InputTuple &my_input, OutputTuple &out) { + bool res = std::get(my_input).get_item(std::get(out) ); // may fail + return join_helper::get_my_item(my_input, out) && res; // do get on other inputs before returning + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + template + static inline bool get_my_item(InputTuple& my_input, OutputTuple& out, message_metainfo& metainfo) { + message_metainfo element_metainfo; + bool res = std::get(my_input).get_item(std::get(out), element_metainfo); + metainfo.merge(element_metainfo); + return join_helper::get_my_item(my_input, out, metainfo) && res; + } +#endif + + template + static inline bool get_items(InputTuple &my_input, OutputTuple &out) { + return get_my_item(my_input, out); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + template + static inline bool get_items(InputTuple& my_input, OutputTuple& out, message_metainfo& metainfo) { + return get_my_item(my_input, out, metainfo); + } +#endif + + template + static inline void reset_my_port(InputTuple &my_input) { + join_helper::reset_my_port(my_input); + std::get(my_input).reset_port(); + } + + template + static inline void reset_ports(InputTuple& my_input) { + reset_my_port(my_input); + } + + template + static inline void set_key_functors(InputTuple &my_input, KeyFuncTuple &my_key_funcs) { + std::get(my_input).set_my_key_func(std::get(my_key_funcs)); + std::get(my_key_funcs) = nullptr; + join_helper::set_key_functors(my_input, my_key_funcs); + } + + template< typename KeyFuncTuple> + static inline void copy_key_functors(KeyFuncTuple &my_inputs, KeyFuncTuple &other_inputs) { + __TBB_ASSERT( + std::get(other_inputs).get_my_key_func(), + "key matching join node should not be instantiated without functors." + ); + std::get(my_inputs).set_my_key_func(std::get(other_inputs).get_my_key_func()->clone()); + join_helper::copy_key_functors(my_inputs, other_inputs); + } + + template + static inline void reset_inputs(InputTuple &my_input, reset_flags f) { + join_helper::reset_inputs(my_input, f); + std::get(my_input).reset_receiver(f); + } + }; // join_helper + + template< > + struct join_helper<1> { + + template< typename TupleType, typename PortType > + static inline void set_join_node_pointer(TupleType &my_input, PortType *port) { + std::get<0>( my_input ).set_join_node_pointer(port); + } + + template< typename TupleType > + static inline void consume_reservations( TupleType &my_input ) { + std::get<0>( my_input ).consume(); + } + + template< typename TupleType > + static inline void release_my_reservation( TupleType &my_input ) { + std::get<0>( my_input ).release(); + } + + template + static inline void release_reservations( TupleType &my_input) { + release_my_reservation(my_input); + } + + template< typename InputTuple, typename OutputTuple > + static inline bool reserve( InputTuple &my_input, OutputTuple &out) { + return std::get<0>( my_input ).reserve( std::get<0>( out ) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + template + static inline bool reserve(InputTuple& my_input, OutputTuple& out, message_metainfo& metainfo) { + message_metainfo element_metainfo; + bool result = std::get<0>(my_input).reserve(std::get<0>(out), element_metainfo); + metainfo.merge(element_metainfo); + return result; + } +#endif + + template + static inline bool get_my_item( InputTuple &my_input, OutputTuple &out) { + return std::get<0>(my_input).get_item(std::get<0>(out)); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + template + static inline bool get_my_item(InputTuple& my_input, OutputTuple& out, message_metainfo& metainfo) { + message_metainfo element_metainfo; + bool res = std::get<0>(my_input).get_item(std::get<0>(out), element_metainfo); + metainfo.merge(element_metainfo); + return res; + } +#endif + + template + static inline bool get_items(InputTuple &my_input, OutputTuple &out) { + return get_my_item(my_input, out); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + template + static inline bool get_items(InputTuple& my_input, OutputTuple& out, message_metainfo& metainfo) { + return get_my_item(my_input, out, metainfo); + } +#endif + + template + static inline void reset_my_port(InputTuple &my_input) { + std::get<0>(my_input).reset_port(); + } + + template + static inline void reset_ports(InputTuple& my_input) { + reset_my_port(my_input); + } + + template + static inline void set_key_functors(InputTuple &my_input, KeyFuncTuple &my_key_funcs) { + std::get<0>(my_input).set_my_key_func(std::get<0>(my_key_funcs)); + std::get<0>(my_key_funcs) = nullptr; + } + + template< typename KeyFuncTuple> + static inline void copy_key_functors(KeyFuncTuple &my_inputs, KeyFuncTuple &other_inputs) { + __TBB_ASSERT( + std::get<0>(other_inputs).get_my_key_func(), + "key matching join node should not be instantiated without functors." + ); + std::get<0>(my_inputs).set_my_key_func(std::get<0>(other_inputs).get_my_key_func()->clone()); + } + template + static inline void reset_inputs(InputTuple &my_input, reset_flags f) { + std::get<0>(my_input).reset_receiver(f); + } + }; // join_helper<1> + + //! The two-phase join port + template< typename T > + class reserving_port : public receiver { + public: + typedef T input_type; + typedef typename receiver::predecessor_type predecessor_type; + + private: + // ----------- Aggregator ------------ + enum op_type { reg_pred, rem_pred, res_item, rel_res, con_res + }; + typedef reserving_port class_type; + + class reserving_port_operation : public d1::aggregated_operation { + public: + char type; + union { + T *my_arg; + predecessor_type *my_pred; + }; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + message_metainfo* metainfo; +#endif + reserving_port_operation(const T& e, op_type t __TBB_FLOW_GRAPH_METAINFO_ARG(message_metainfo& info)) : + type(char(t)), my_arg(const_cast(&e)) + __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo(&info)) {} +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + reserving_port_operation(const T& e, op_type t) + : type(char(t)), my_arg(const_cast(&e)), metainfo(nullptr) {} +#endif + reserving_port_operation(const predecessor_type &s, op_type t) : type(char(t)), + my_pred(const_cast(&s)) {} + reserving_port_operation(op_type t) : type(char(t)) {} + }; + + typedef d1::aggregating_functor handler_type; + friend class d1::aggregating_functor; + d1::aggregator my_aggregator; + + void handle_operations(reserving_port_operation* op_list) { + reserving_port_operation *current; + bool was_missing_predecessors = false; + while(op_list) { + current = op_list; + op_list = op_list->next; + switch(current->type) { + case reg_pred: + was_missing_predecessors = my_predecessors.empty(); + my_predecessors.add(*(current->my_pred)); + if ( was_missing_predecessors ) { + (void) my_join->decrement_port_count(); // may try to forward + } + current->status.store( SUCCEEDED, std::memory_order_release); + break; + case rem_pred: + if ( !my_predecessors.empty() ) { + my_predecessors.remove(*(current->my_pred)); + if ( my_predecessors.empty() ) // was the last predecessor + my_join->increment_port_count(); + } + // TODO: consider returning failure if there were no predecessors to remove + current->status.store( SUCCEEDED, std::memory_order_release ); + break; + case res_item: + if ( reserved ) { + current->status.store( FAILED, std::memory_order_release); + } + else { + bool reserve_result = false; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + if (current->metainfo) { + reserve_result = my_predecessors.try_reserve(*(current->my_arg), + *(current->metainfo)); + } else +#endif + { + reserve_result = my_predecessors.try_reserve(*(current->my_arg)); + } + if (reserve_result) { + reserved = true; + current->status.store( SUCCEEDED, std::memory_order_release); + } else { + if ( my_predecessors.empty() ) { + my_join->increment_port_count(); + } + current->status.store( FAILED, std::memory_order_release); + } + } + break; + case rel_res: + reserved = false; + my_predecessors.try_release( ); + current->status.store( SUCCEEDED, std::memory_order_release); + break; + case con_res: + reserved = false; + my_predecessors.try_consume( ); + current->status.store( SUCCEEDED, std::memory_order_release); + break; + } + } + } + + protected: + template< typename R, typename B > friend class run_and_put_task; + template friend class broadcast_cache; + template friend class round_robin_cache; + graph_task* try_put_task( const T & ) override { + return nullptr; + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + graph_task* try_put_task(const T&, const message_metainfo&) override { return nullptr; } +#endif + + graph& graph_reference() const override { + return my_join->graph_ref; + } + + public: + + //! Constructor + reserving_port() : my_join(nullptr), my_predecessors(this), reserved(false) { + my_aggregator.initialize_handler(handler_type(this)); + } + + // copy constructor + reserving_port(const reserving_port& /* other */) = delete; + + void set_join_node_pointer(reserving_forwarding_base *join) { + my_join = join; + } + + //! Add a predecessor + bool register_predecessor( predecessor_type &src ) override { + reserving_port_operation op_data(src, reg_pred); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + + //! Remove a predecessor + bool remove_predecessor( predecessor_type &src ) override { + reserving_port_operation op_data(src, rem_pred); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + + //! Reserve an item from the port + bool reserve( T &v ) { + reserving_port_operation op_data(v, res_item); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + bool reserve( T& v, message_metainfo& metainfo ) { + reserving_port_operation op_data(v, res_item, metainfo); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } +#endif + + //! Release the port + void release( ) { + reserving_port_operation op_data(rel_res); + my_aggregator.execute(&op_data); + } + + //! Complete use of the port + void consume( ) { + reserving_port_operation op_data(con_res); + my_aggregator.execute(&op_data); + } + + void reset_receiver( reset_flags f) { + if(f & rf_clear_edges) my_predecessors.clear(); + else + my_predecessors.reset(); + reserved = false; + __TBB_ASSERT(!(f&rf_clear_edges) || my_predecessors.empty(), "port edges not removed"); + } + + private: +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + friend class get_graph_helper; +#endif + + reserving_forwarding_base *my_join; + reservable_predecessor_cache< T, null_mutex > my_predecessors; + bool reserved; + }; // reserving_port + + //! queueing join_port + template + class queueing_port : public receiver, public item_buffer { + public: + typedef T input_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef queueing_port class_type; + + // ----------- Aggregator ------------ + private: + enum op_type { get__item, res_port, try__put_task + }; + + class queueing_port_operation : public d1::aggregated_operation { + public: + char type; + T my_val; + T* my_arg; + graph_task* bypass_t; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + message_metainfo* metainfo; +#endif + // constructor for value parameter + queueing_port_operation(const T& e, op_type t __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& info)) + : type(char(t)), my_val(e), my_arg(nullptr) + , bypass_t(nullptr) + __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo(const_cast(&info))) + {} + // constructor for pointer parameter + queueing_port_operation(const T* p, op_type t __TBB_FLOW_GRAPH_METAINFO_ARG(message_metainfo& info)) : + type(char(t)), my_arg(const_cast(p)) + , bypass_t(nullptr) + __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo(&info)) + {} +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + queueing_port_operation(const T* p, op_type t) + : type(char(t)), my_arg(const_cast(p)), bypass_t(nullptr), metainfo(nullptr) + {} +#endif + // constructor with no parameter + queueing_port_operation(op_type t) : type(char(t)), my_arg(nullptr) + , bypass_t(nullptr) + __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo(nullptr)) + {} + }; + + typedef d1::aggregating_functor handler_type; + friend class d1::aggregating_functor; + d1::aggregator my_aggregator; + + void handle_operations(queueing_port_operation* op_list) { + queueing_port_operation *current; + bool was_empty; + while(op_list) { + current = op_list; + op_list = op_list->next; + switch(current->type) { + case try__put_task: { + graph_task* rtask = nullptr; + was_empty = this->buffer_empty(); +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + __TBB_ASSERT(current->metainfo, nullptr); + this->push_back(current->my_val, *(current->metainfo)); +#else + this->push_back(current->my_val); +#endif + if (was_empty) rtask = my_join->decrement_port_count(false); + else + rtask = SUCCESSFULLY_ENQUEUED; + current->bypass_t = rtask; + current->status.store( SUCCEEDED, std::memory_order_release); + } + break; + case get__item: + if(!this->buffer_empty()) { + __TBB_ASSERT(current->my_arg, nullptr); + *(current->my_arg) = this->front(); +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + if (current->metainfo) { + *(current->metainfo) = this->front_metainfo(); + } +#endif + current->status.store( SUCCEEDED, std::memory_order_release); + } + else { + current->status.store( FAILED, std::memory_order_release); + } + break; + case res_port: + __TBB_ASSERT(this->my_item_valid(this->my_head), "No item to reset"); + this->destroy_front(); + if(this->my_item_valid(this->my_head)) { + (void)my_join->decrement_port_count(true); + } + current->status.store( SUCCEEDED, std::memory_order_release); + break; + } + } + } + // ------------ End Aggregator --------------- + + protected: + template< typename R, typename B > friend class run_and_put_task; + template friend class broadcast_cache; + template friend class round_robin_cache; + + private: + graph_task* try_put_task_impl(const T& v __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& metainfo)) { + queueing_port_operation op_data(v, try__put_task __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + my_aggregator.execute(&op_data); + __TBB_ASSERT(op_data.status == SUCCEEDED || !op_data.bypass_t, "inconsistent return from aggregator"); + if(!op_data.bypass_t) return SUCCESSFULLY_ENQUEUED; + return op_data.bypass_t; + } + + protected: + graph_task* try_put_task(const T &v) override { + return try_put_task_impl(v __TBB_FLOW_GRAPH_METAINFO_ARG(message_metainfo{})); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + graph_task* try_put_task(const T& v, const message_metainfo& metainfo) override { + return try_put_task_impl(v, metainfo); + } +#endif + + graph& graph_reference() const override { + return my_join->graph_ref; + } + + public: + + //! Constructor + queueing_port() : item_buffer() { + my_join = nullptr; + my_aggregator.initialize_handler(handler_type(this)); + } + + //! copy constructor + queueing_port(const queueing_port& /* other */) = delete; + + //! record parent for tallying available items + void set_join_node_pointer(queueing_forwarding_base *join) { + my_join = join; + } + + bool get_item( T &v ) { + queueing_port_operation op_data(&v, get__item); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + bool get_item( T& v, message_metainfo& metainfo ) { + queueing_port_operation op_data(&v, get__item, metainfo); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } +#endif + + // reset_port is called when item is accepted by successor, but + // is initiated by join_node. + void reset_port() { + queueing_port_operation op_data(res_port); + my_aggregator.execute(&op_data); + return; + } + + void reset_receiver(reset_flags) { + item_buffer::reset(); + } + + private: +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + friend class get_graph_helper; +#endif + + queueing_forwarding_base *my_join; + }; // queueing_port + +#include "_flow_graph_tagged_buffer_impl.h" + + template + struct count_element { + K my_key; + size_t my_value; + }; + + // method to access the key in the counting table + // the ref has already been removed from K + template< typename K > + struct key_to_count_functor { + typedef count_element table_item_type; + const K& operator()(const table_item_type& v) { return v.my_key; } + }; + + template + struct key_matching_port_base { +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + using type = metainfo_hash_buffer; +#else + using type = hash_buffer; +#endif + }; + + // the ports can have only one template parameter. We wrap the types needed in + // a traits type + template< class TraitsType > + class key_matching_port : + public receiver, + public key_matching_port_base< typename TraitsType::K, typename TraitsType::T, typename TraitsType::TtoK, + typename TraitsType::KHash >::type + { + public: + typedef TraitsType traits; + typedef key_matching_port class_type; + typedef typename TraitsType::T input_type; + typedef typename TraitsType::K key_type; + typedef typename std::decay::type noref_key_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef typename TraitsType::TtoK type_to_key_func_type; + typedef typename TraitsType::KHash hash_compare_type; + typedef typename key_matching_port_base::type buffer_type; + + private: +// ----------- Aggregator ------------ + private: + enum op_type { try__put, get__item, res_port + }; + + class key_matching_port_operation : public d1::aggregated_operation { + public: + char type; + input_type my_val; + input_type *my_arg; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + message_metainfo* metainfo = nullptr; +#endif + // constructor for value parameter + key_matching_port_operation(const input_type& e, op_type t + __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& info)) + : type(char(t)), my_val(e), my_arg(nullptr) + __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo(const_cast(&info))) {} + + // constructor for pointer parameter + key_matching_port_operation(const input_type* p, op_type t + __TBB_FLOW_GRAPH_METAINFO_ARG(message_metainfo& info)) + : type(char(t)), my_arg(const_cast(p)) + __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo(&info)) {} + + // constructor with no parameter + key_matching_port_operation(op_type t) : type(char(t)), my_arg(nullptr) {} + }; + + typedef d1::aggregating_functor handler_type; + friend class d1::aggregating_functor; + d1::aggregator my_aggregator; + + void handle_operations(key_matching_port_operation* op_list) { + key_matching_port_operation *current; + while(op_list) { + current = op_list; + op_list = op_list->next; + switch(current->type) { + case try__put: { +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + __TBB_ASSERT(current->metainfo, nullptr); + bool was_inserted = this->insert_with_key(current->my_val, *(current->metainfo)); +#else + bool was_inserted = this->insert_with_key(current->my_val); +#endif + // return failure if a duplicate insertion occurs + current->status.store( was_inserted ? SUCCEEDED : FAILED, std::memory_order_release); + } + break; + case get__item: { + // use current_key from FE for item + __TBB_ASSERT(current->my_arg, nullptr); +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + __TBB_ASSERT(current->metainfo, nullptr); + bool find_result = this->find_with_key(my_join->current_key, *(current->my_arg), + *(current->metainfo)); +#else + bool find_result = this->find_with_key(my_join->current_key, *(current->my_arg)); +#endif +#if TBB_USE_DEBUG + if (!find_result) { + __TBB_ASSERT(false, "Failed to find item corresponding to current_key."); + } +#else + tbb::detail::suppress_unused_warning(find_result); +#endif + current->status.store( SUCCEEDED, std::memory_order_release); + } + break; + case res_port: + // use current_key from FE for item + this->delete_with_key(my_join->current_key); + current->status.store( SUCCEEDED, std::memory_order_release); + break; + } + } + } +// ------------ End Aggregator --------------- + protected: + template< typename R, typename B > friend class run_and_put_task; + template friend class broadcast_cache; + template friend class round_robin_cache; + private: + graph_task* try_put_task_impl(const input_type& v __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& metainfo)) { + key_matching_port_operation op_data(v, try__put __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + graph_task* rtask = nullptr; + my_aggregator.execute(&op_data); + if(op_data.status == SUCCEEDED) { + rtask = my_join->increment_key_count((*(this->get_key_func()))(v)); // may spawn + // rtask has to reflect the return status of the try_put + if(!rtask) rtask = SUCCESSFULLY_ENQUEUED; + } + return rtask; + } + protected: + graph_task* try_put_task(const input_type& v) override { + return try_put_task_impl(v __TBB_FLOW_GRAPH_METAINFO_ARG(message_metainfo{})); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + graph_task* try_put_task(const input_type& v, const message_metainfo& metainfo) override { + return try_put_task_impl(v, metainfo); + } +#endif + + graph& graph_reference() const override { + return my_join->graph_ref; + } + + public: + + key_matching_port() : receiver(), buffer_type() { + my_join = nullptr; + my_aggregator.initialize_handler(handler_type(this)); + } + + // copy constructor + key_matching_port(const key_matching_port& /*other*/) = delete; +#if __INTEL_COMPILER <= 2021 + // Suppress superfluous diagnostic about virtual keyword absence in a destructor of an inherited + // class while the parent class has the virtual keyword for the destrocutor. + virtual +#endif + ~key_matching_port() { } + + void set_join_node_pointer(forwarding_base *join) { + my_join = dynamic_cast*>(join); + } + + void set_my_key_func(type_to_key_func_type *f) { this->set_key_func(f); } + + type_to_key_func_type* get_my_key_func() { return this->get_key_func(); } + + bool get_item( input_type &v ) { + // aggregator uses current_key from FE for Key + key_matching_port_operation op_data(&v, get__item); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + bool get_item( input_type& v, message_metainfo& metainfo ) { + // aggregator uses current_key from FE for Key + key_matching_port_operation op_data(&v, get__item, metainfo); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } +#endif + + // reset_port is called when item is accepted by successor, but + // is initiated by join_node. + void reset_port() { + key_matching_port_operation op_data(res_port); + my_aggregator.execute(&op_data); + return; + } + + void reset_receiver(reset_flags ) { + buffer_type::reset(); + } + + private: + // my_join forwarding base used to count number of inputs that + // received key. + matching_forwarding_base *my_join; + }; // key_matching_port + + using namespace graph_policy_namespace; + + template + class join_node_base; + + //! join_node_FE : implements input port policy + template + class join_node_FE; + + template + class join_node_FE : public reserving_forwarding_base { + private: + static const int N = std::tuple_size::value; + typedef OutputTuple output_type; + typedef InputTuple input_type; + typedef join_node_base base_node_type; // for forwarding + public: + join_node_FE(graph &g) : reserving_forwarding_base(g), my_node(nullptr) { + ports_with_no_inputs = N; + join_helper::set_join_node_pointer(my_inputs, this); + } + + join_node_FE(const join_node_FE& other) : reserving_forwarding_base((other.reserving_forwarding_base::graph_ref)), my_node(nullptr) { + ports_with_no_inputs = N; + join_helper::set_join_node_pointer(my_inputs, this); + } + + void set_my_node(base_node_type *new_my_node) { my_node = new_my_node; } + + void increment_port_count() override { + ++ports_with_no_inputs; + } + + // if all input_ports have predecessors, spawn forward to try and consume tuples + graph_task* decrement_port_count() override { + if(ports_with_no_inputs.fetch_sub(1) == 1) { + if(is_graph_active(this->graph_ref)) { + d1::small_object_allocator allocator{}; + typedef forward_task_bypass task_type; + graph_task* t = allocator.new_object(graph_ref, allocator, *my_node); + spawn_in_graph_arena(this->graph_ref, *t); + } + } + return nullptr; + } + + input_type &input_ports() { return my_inputs; } + + protected: + + void reset( reset_flags f) { + // called outside of parallel contexts + ports_with_no_inputs = N; + join_helper::reset_inputs(my_inputs, f); + } + + // all methods on input ports should be called under mutual exclusion from join_node_base. + + bool tuple_build_may_succeed() { + return !ports_with_no_inputs; + } + + bool try_to_make_tuple(output_type &out) { + if(ports_with_no_inputs) return false; + return join_helper::reserve(my_inputs, out); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + bool try_to_make_tuple(output_type &out, message_metainfo& metainfo) { + if (ports_with_no_inputs) return false; + return join_helper::reserve(my_inputs, out, metainfo); + } +#endif + + void tuple_accepted() { + join_helper::consume_reservations(my_inputs); + } + void tuple_rejected() { + join_helper::release_reservations(my_inputs); + } + + input_type my_inputs; + base_node_type *my_node; + std::atomic ports_with_no_inputs; + }; // join_node_FE + + template + class join_node_FE : public queueing_forwarding_base { + public: + static const int N = std::tuple_size::value; + typedef OutputTuple output_type; + typedef InputTuple input_type; + typedef join_node_base base_node_type; // for forwarding + + join_node_FE(graph &g) : queueing_forwarding_base(g), my_node(nullptr) { + ports_with_no_items = N; + join_helper::set_join_node_pointer(my_inputs, this); + } + + join_node_FE(const join_node_FE& other) : queueing_forwarding_base((other.queueing_forwarding_base::graph_ref)), my_node(nullptr) { + ports_with_no_items = N; + join_helper::set_join_node_pointer(my_inputs, this); + } + + // needed for forwarding + void set_my_node(base_node_type *new_my_node) { my_node = new_my_node; } + + void reset_port_count() { + ports_with_no_items = N; + } + + // if all input_ports have items, spawn forward to try and consume tuples + graph_task* decrement_port_count(bool handle_task) override + { + if(ports_with_no_items.fetch_sub(1) == 1) { + if(is_graph_active(this->graph_ref)) { + d1::small_object_allocator allocator{}; + typedef forward_task_bypass task_type; + graph_task* t = allocator.new_object(graph_ref, allocator, *my_node); + if( !handle_task ) + return t; + spawn_in_graph_arena(this->graph_ref, *t); + } + } + return nullptr; + } + + input_type &input_ports() { return my_inputs; } + + protected: + + void reset( reset_flags f) { + reset_port_count(); + join_helper::reset_inputs(my_inputs, f ); + } + + // all methods on input ports should be called under mutual exclusion from join_node_base. + + bool tuple_build_may_succeed() { + return !ports_with_no_items; + } + + bool try_to_make_tuple(output_type &out) { + if(ports_with_no_items) return false; + return join_helper::get_items(my_inputs, out); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + bool try_to_make_tuple(output_type &out, message_metainfo& metainfo) { + if(ports_with_no_items) return false; + return join_helper::get_items(my_inputs, out, metainfo); + } +#endif + + void tuple_accepted() { + reset_port_count(); + join_helper::reset_ports(my_inputs); + } + void tuple_rejected() { + // nothing to do. + } + + input_type my_inputs; + base_node_type *my_node; + std::atomic ports_with_no_items; + }; // join_node_FE + + // key_matching join front-end. + template + class join_node_FE, InputTuple, OutputTuple> : public matching_forwarding_base, + // buffer of key value counts + public hash_buffer< // typedefed below to key_to_count_buffer_type + typename std::decay::type&, // force ref type on K + count_element::type>, + type_to_key_function_body< + count_element::type>, + typename std::decay::type& >, + KHash >, + // buffer of output items + public item_buffer { + public: + static const int N = std::tuple_size::value; + typedef OutputTuple output_type; + typedef InputTuple input_type; + typedef K key_type; + typedef typename std::decay::type unref_key_type; + typedef KHash key_hash_compare; + // must use K without ref. + typedef count_element count_element_type; + // method that lets us refer to the key of this type. + typedef key_to_count_functor key_to_count_func; + typedef type_to_key_function_body< count_element_type, unref_key_type&> TtoK_function_body_type; + typedef type_to_key_function_body_leaf TtoK_function_body_leaf_type; + // this is the type of the special table that keeps track of the number of discrete + // elements corresponding to each key that we've seen. + typedef hash_buffer< unref_key_type&, count_element_type, TtoK_function_body_type, key_hash_compare > + key_to_count_buffer_type; + typedef item_buffer output_buffer_type; + typedef join_node_base, InputTuple, OutputTuple> base_node_type; // for forwarding + typedef matching_forwarding_base forwarding_base_type; + +// ----------- Aggregator ------------ + // the aggregator is only needed to serialize the access to the hash table. + // and the output_buffer_type base class + private: + enum op_type { res_count, inc_count, may_succeed, try_make }; + typedef join_node_FE, InputTuple, OutputTuple> class_type; + + class key_matching_FE_operation : public d1::aggregated_operation { + public: + char type; + unref_key_type my_val; + output_type* my_output; + graph_task* bypass_t; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + message_metainfo* metainfo = nullptr; +#endif + // constructor for value parameter + key_matching_FE_operation(const unref_key_type& e , op_type t) : type(char(t)), my_val(e), + my_output(nullptr), bypass_t(nullptr) {} + key_matching_FE_operation(output_type *p, op_type t) : type(char(t)), my_output(p), bypass_t(nullptr) {} +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + key_matching_FE_operation(output_type *p, op_type t, message_metainfo& info) + : type(char(t)), my_output(p), bypass_t(nullptr), metainfo(&info) {} +#endif + // constructor with no parameter + key_matching_FE_operation(op_type t) : type(char(t)), my_output(nullptr), bypass_t(nullptr) {} + }; + + typedef d1::aggregating_functor handler_type; + friend class d1::aggregating_functor; + d1::aggregator my_aggregator; + + // called from aggregator, so serialized + // returns a task pointer if the a task would have been enqueued but we asked that + // it be returned. Otherwise returns nullptr. + graph_task* fill_output_buffer(unref_key_type &t) { + output_type l_out; + graph_task* rtask = nullptr; + bool do_fwd = this->buffer_empty() && is_graph_active(this->graph_ref); + this->current_key = t; + this->delete_with_key(this->current_key); // remove the key +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + message_metainfo metainfo; +#endif + if(join_helper::get_items(my_inputs, l_out __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo))) { // <== call back + this->push_back(l_out __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + if(do_fwd) { // we enqueue if receiving an item from predecessor, not if successor asks for item + d1::small_object_allocator allocator{}; + typedef forward_task_bypass task_type; + rtask = allocator.new_object(this->graph_ref, allocator, *my_node); + do_fwd = false; + } + // retire the input values + join_helper::reset_ports(my_inputs); // <== call back + } + else { + __TBB_ASSERT(false, "should have had something to push"); + } + return rtask; + } + + void handle_operations(key_matching_FE_operation* op_list) { + key_matching_FE_operation *current; + while(op_list) { + current = op_list; + op_list = op_list->next; + switch(current->type) { + case res_count: // called from BE + { + this->destroy_front(); + current->status.store( SUCCEEDED, std::memory_order_release); + } + break; + case inc_count: { // called from input ports + count_element_type *p = nullptr; + unref_key_type &t = current->my_val; + if(!(this->find_ref_with_key(t,p))) { + count_element_type ev; + ev.my_key = t; + ev.my_value = 0; + this->insert_with_key(ev); + bool found = this->find_ref_with_key(t, p); + __TBB_ASSERT_EX(found, "should find key after inserting it"); + } + if(++(p->my_value) == size_t(N)) { + current->bypass_t = fill_output_buffer(t); + } + } + current->status.store( SUCCEEDED, std::memory_order_release); + break; + case may_succeed: // called from BE + current->status.store( this->buffer_empty() ? FAILED : SUCCEEDED, std::memory_order_release); + break; + case try_make: // called from BE + if(this->buffer_empty()) { + current->status.store( FAILED, std::memory_order_release); + } + else { + *(current->my_output) = this->front(); +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + if (current->metainfo) { + *(current->metainfo) = this->front_metainfo(); + } +#endif + current->status.store( SUCCEEDED, std::memory_order_release); + } + break; + } + } + } +// ------------ End Aggregator --------------- + + public: + template + join_node_FE(graph &g, FunctionTuple &TtoK_funcs) : forwarding_base_type(g), my_node(nullptr) { + join_helper::set_join_node_pointer(my_inputs, this); + join_helper::set_key_functors(my_inputs, TtoK_funcs); + my_aggregator.initialize_handler(handler_type(this)); + TtoK_function_body_type *cfb = new TtoK_function_body_leaf_type(key_to_count_func()); + this->set_key_func(cfb); + } + + join_node_FE(const join_node_FE& other) : forwarding_base_type((other.forwarding_base_type::graph_ref)), key_to_count_buffer_type(), + output_buffer_type() { + my_node = nullptr; + join_helper::set_join_node_pointer(my_inputs, this); + join_helper::copy_key_functors(my_inputs, const_cast(other.my_inputs)); + my_aggregator.initialize_handler(handler_type(this)); + TtoK_function_body_type *cfb = new TtoK_function_body_leaf_type(key_to_count_func()); + this->set_key_func(cfb); + } + + // needed for forwarding + void set_my_node(base_node_type *new_my_node) { my_node = new_my_node; } + + void reset_port_count() { // called from BE + key_matching_FE_operation op_data(res_count); + my_aggregator.execute(&op_data); + return; + } + + // if all input_ports have items, spawn forward to try and consume tuples + // return a task if we are asked and did create one. + graph_task *increment_key_count(unref_key_type const & t) override { // called from input_ports + key_matching_FE_operation op_data(t, inc_count); + my_aggregator.execute(&op_data); + return op_data.bypass_t; + } + + input_type &input_ports() { return my_inputs; } + + protected: + + void reset( reset_flags f ) { + // called outside of parallel contexts + join_helper::reset_inputs(my_inputs, f); + + key_to_count_buffer_type::reset(); + output_buffer_type::reset(); + } + + // all methods on input ports should be called under mutual exclusion from join_node_base. + + bool tuple_build_may_succeed() { // called from back-end + key_matching_FE_operation op_data(may_succeed); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + + // cannot lock while calling back to input_ports. current_key will only be set + // and reset under the aggregator, so it will remain consistent. + bool try_to_make_tuple(output_type &out) { + key_matching_FE_operation op_data(&out,try_make); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + bool try_to_make_tuple(output_type &out, message_metainfo& metainfo) { + key_matching_FE_operation op_data(&out, try_make, metainfo); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } +#endif + + void tuple_accepted() { + reset_port_count(); // reset current_key after ports reset. + } + + void tuple_rejected() { + // nothing to do. + } + + input_type my_inputs; // input ports + base_node_type *my_node; + }; // join_node_FE, InputTuple, OutputTuple> + + //! join_node_base + template + class join_node_base : public graph_node, public join_node_FE, + public sender { + protected: + using graph_node::my_graph; + public: + typedef OutputTuple output_type; + + typedef typename sender::successor_type successor_type; + typedef join_node_FE input_ports_type; + using input_ports_type::tuple_build_may_succeed; + using input_ports_type::try_to_make_tuple; + using input_ports_type::tuple_accepted; + using input_ports_type::tuple_rejected; + + private: + // ----------- Aggregator ------------ + enum op_type { reg_succ, rem_succ, try__get, do_fwrd, do_fwrd_bypass + }; + typedef join_node_base class_type; + + class join_node_base_operation : public d1::aggregated_operation { + public: + char type; + union { + output_type *my_arg; + successor_type *my_succ; + }; + graph_task* bypass_t; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + message_metainfo* metainfo; +#endif + join_node_base_operation(const output_type& e, op_type t __TBB_FLOW_GRAPH_METAINFO_ARG(message_metainfo& info)) + : type(char(t)), my_arg(const_cast(&e)), bypass_t(nullptr) + __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo(&info)) {} +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + join_node_base_operation(const output_type& e, op_type t) + : type(char(t)), my_arg(const_cast(&e)), bypass_t(nullptr), metainfo(nullptr) {} +#endif + join_node_base_operation(const successor_type &s, op_type t) : type(char(t)), + my_succ(const_cast(&s)), bypass_t(nullptr) {} + join_node_base_operation(op_type t) : type(char(t)), bypass_t(nullptr) {} + }; + + typedef d1::aggregating_functor handler_type; + friend class d1::aggregating_functor; + bool forwarder_busy; + d1::aggregator my_aggregator; + + void handle_operations(join_node_base_operation* op_list) { + join_node_base_operation *current; + while(op_list) { + current = op_list; + op_list = op_list->next; + switch(current->type) { + case reg_succ: { + my_successors.register_successor(*(current->my_succ)); + if(tuple_build_may_succeed() && !forwarder_busy && is_graph_active(my_graph)) { + d1::small_object_allocator allocator{}; + typedef forward_task_bypass< join_node_base > task_type; + graph_task* t = allocator.new_object(my_graph, allocator, *this); + spawn_in_graph_arena(my_graph, *t); + forwarder_busy = true; + } + current->status.store( SUCCEEDED, std::memory_order_release); + } + break; + case rem_succ: + my_successors.remove_successor(*(current->my_succ)); + current->status.store( SUCCEEDED, std::memory_order_release); + break; + case try__get: + if(tuple_build_may_succeed()) { + bool make_tuple_result = false; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + if (current->metainfo) { + make_tuple_result = try_to_make_tuple(*(current->my_arg), *(current->metainfo)); + } else +#endif + { + make_tuple_result = try_to_make_tuple(*(current->my_arg)); + } + if(make_tuple_result) { +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + if (current->metainfo) { + // Since elements would be removed from queues while calling to tuple_accepted + // together with corresponding message_metainfo objects + // we need to prolong the wait until the successor would create a task for removed elements + for (auto waiter : current->metainfo->waiters()) { + waiter->reserve(1); + } + } +#endif + tuple_accepted(); + current->status.store( SUCCEEDED, std::memory_order_release); + } + else current->status.store( FAILED, std::memory_order_release); + } + else current->status.store( FAILED, std::memory_order_release); + break; + case do_fwrd_bypass: { + bool build_succeeded; + graph_task *last_task = nullptr; + output_type out; + // forwarding must be exclusive, because try_to_make_tuple and tuple_accepted + // are separate locked methods in the FE. We could conceivably fetch the front + // of the FE queue, then be swapped out, have someone else consume the FE's + // object, then come back, forward, and then try to remove it from the queue + // again. Without reservation of the FE, the methods accessing it must be locked. + // We could remember the keys of the objects we forwarded, and then remove + // them from the input ports after forwarding is complete? + if(tuple_build_may_succeed()) { // checks output queue of FE + do { +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + message_metainfo metainfo; +#endif + // fetch front_end of queue + build_succeeded = try_to_make_tuple(out __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + if(build_succeeded) { + graph_task *new_task = + my_successors.try_put_task(out __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + last_task = combine_tasks(my_graph, last_task, new_task); + if(new_task) { + tuple_accepted(); + } + else { + tuple_rejected(); + build_succeeded = false; + } + } + } while(build_succeeded); + } + current->bypass_t = last_task; + current->status.store( SUCCEEDED, std::memory_order_release); + forwarder_busy = false; + } + break; + } + } + } + // ---------- end aggregator ----------- + public: + join_node_base(graph &g) + : graph_node(g), input_ports_type(g), forwarder_busy(false), my_successors(this) + { + input_ports_type::set_my_node(this); + my_aggregator.initialize_handler(handler_type(this)); + } + + join_node_base(const join_node_base& other) : + graph_node(other.graph_node::my_graph), input_ports_type(other), + sender(), forwarder_busy(false), my_successors(this) + { + input_ports_type::set_my_node(this); + my_aggregator.initialize_handler(handler_type(this)); + } + + template + join_node_base(graph &g, FunctionTuple f) + : graph_node(g), input_ports_type(g, f), forwarder_busy(false), my_successors(this) + { + input_ports_type::set_my_node(this); + my_aggregator.initialize_handler(handler_type(this)); + } + + bool register_successor(successor_type &r) override { + join_node_base_operation op_data(r, reg_succ); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + + bool remove_successor( successor_type &r) override { + join_node_base_operation op_data(r, rem_succ); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + + bool try_get( output_type &v) override { + join_node_base_operation op_data(v, try__get); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + bool try_get( output_type &v, message_metainfo& metainfo) override { + join_node_base_operation op_data(v, try__get, metainfo); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } +#endif + + protected: + void reset_node(reset_flags f) override { + input_ports_type::reset(f); + if(f & rf_clear_edges) my_successors.clear(); + } + + private: + broadcast_cache my_successors; + + friend class forward_task_bypass< join_node_base >; + graph_task *forward_task() { + join_node_base_operation op_data(do_fwrd_bypass); + my_aggregator.execute(&op_data); + return op_data.bypass_t; + } + + }; // join_node_base + + // join base class type generator + template class PT, typename OutputTuple, typename JP> + struct join_base { + typedef join_node_base::type, OutputTuple> type; + }; + + template + struct join_base > { + typedef key_matching key_traits_type; + typedef K key_type; + typedef KHash key_hash_compare; + typedef join_node_base< key_traits_type, + // ports type + typename wrap_key_tuple_elements::type, + OutputTuple > type; + }; + + //! unfolded_join_node : passes input_ports_type to join_node_base. We build the input port type + // using tuple_element. The class PT is the port type (reserving_port, queueing_port, key_matching_port) + // and should match the typename. + + template class PT, typename OutputTuple, typename JP> + class unfolded_join_node : public join_base::type { + public: + typedef typename wrap_tuple_elements::type input_ports_type; + typedef OutputTuple output_type; + private: + typedef join_node_base base_type; + public: + unfolded_join_node(graph &g) : base_type(g) {} + unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} + }; + +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + template + struct key_from_message_body { + K operator()(const T& t) const { + return key_from_message(t); + } + }; + // Adds const to reference type + template + struct key_from_message_body { + const K& operator()(const T& t) const { + return key_from_message(t); + } + }; +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + // key_matching unfolded_join_node. This must be a separate specialization because the constructors + // differ. + + template + class unfolded_join_node<2,key_matching_port,OutputTuple,key_matching > : public + join_base<2,key_matching_port,OutputTuple,key_matching >::type { + typedef typename std::tuple_element<0, OutputTuple>::type T0; + typedef typename std::tuple_element<1, OutputTuple>::type T1; + public: + typedef typename wrap_key_tuple_elements<2,key_matching_port,key_matching,OutputTuple>::type input_ports_type; + typedef OutputTuple output_type; + private: + typedef join_node_base, input_ports_type, output_type > base_type; + typedef type_to_key_function_body *f0_p; + typedef type_to_key_function_body *f1_p; + typedef std::tuple< f0_p, f1_p > func_initializer_type; + public: +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + unfolded_join_node(graph &g) : base_type(g, + func_initializer_type( + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()) + ) ) { + } +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + template + unfolded_join_node(graph &g, Body0 body0, Body1 body1) : base_type(g, + func_initializer_type( + new type_to_key_function_body_leaf(body0), + new type_to_key_function_body_leaf(body1) + ) ) { + static_assert(std::tuple_size::value == 2, "wrong number of body initializers"); + } + unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} + }; + + template + class unfolded_join_node<3,key_matching_port,OutputTuple,key_matching > : public + join_base<3,key_matching_port,OutputTuple,key_matching >::type { + typedef typename std::tuple_element<0, OutputTuple>::type T0; + typedef typename std::tuple_element<1, OutputTuple>::type T1; + typedef typename std::tuple_element<2, OutputTuple>::type T2; + public: + typedef typename wrap_key_tuple_elements<3,key_matching_port,key_matching,OutputTuple>::type input_ports_type; + typedef OutputTuple output_type; + private: + typedef join_node_base, input_ports_type, output_type > base_type; + typedef type_to_key_function_body *f0_p; + typedef type_to_key_function_body *f1_p; + typedef type_to_key_function_body *f2_p; + typedef std::tuple< f0_p, f1_p, f2_p > func_initializer_type; + public: +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + unfolded_join_node(graph &g) : base_type(g, + func_initializer_type( + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()) + ) ) { + } +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + template + unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2) : base_type(g, + func_initializer_type( + new type_to_key_function_body_leaf(body0), + new type_to_key_function_body_leaf(body1), + new type_to_key_function_body_leaf(body2) + ) ) { + static_assert(std::tuple_size::value == 3, "wrong number of body initializers"); + } + unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} + }; + + template + class unfolded_join_node<4,key_matching_port,OutputTuple,key_matching > : public + join_base<4,key_matching_port,OutputTuple,key_matching >::type { + typedef typename std::tuple_element<0, OutputTuple>::type T0; + typedef typename std::tuple_element<1, OutputTuple>::type T1; + typedef typename std::tuple_element<2, OutputTuple>::type T2; + typedef typename std::tuple_element<3, OutputTuple>::type T3; + public: + typedef typename wrap_key_tuple_elements<4,key_matching_port,key_matching,OutputTuple>::type input_ports_type; + typedef OutputTuple output_type; + private: + typedef join_node_base, input_ports_type, output_type > base_type; + typedef type_to_key_function_body *f0_p; + typedef type_to_key_function_body *f1_p; + typedef type_to_key_function_body *f2_p; + typedef type_to_key_function_body *f3_p; + typedef std::tuple< f0_p, f1_p, f2_p, f3_p > func_initializer_type; + public: +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + unfolded_join_node(graph &g) : base_type(g, + func_initializer_type( + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()) + ) ) { + } +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + template + unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2, Body3 body3) : base_type(g, + func_initializer_type( + new type_to_key_function_body_leaf(body0), + new type_to_key_function_body_leaf(body1), + new type_to_key_function_body_leaf(body2), + new type_to_key_function_body_leaf(body3) + ) ) { + static_assert(std::tuple_size::value == 4, "wrong number of body initializers"); + } + unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} + }; + + template + class unfolded_join_node<5,key_matching_port,OutputTuple,key_matching > : public + join_base<5,key_matching_port,OutputTuple,key_matching >::type { + typedef typename std::tuple_element<0, OutputTuple>::type T0; + typedef typename std::tuple_element<1, OutputTuple>::type T1; + typedef typename std::tuple_element<2, OutputTuple>::type T2; + typedef typename std::tuple_element<3, OutputTuple>::type T3; + typedef typename std::tuple_element<4, OutputTuple>::type T4; + public: + typedef typename wrap_key_tuple_elements<5,key_matching_port,key_matching,OutputTuple>::type input_ports_type; + typedef OutputTuple output_type; + private: + typedef join_node_base , input_ports_type, output_type > base_type; + typedef type_to_key_function_body *f0_p; + typedef type_to_key_function_body *f1_p; + typedef type_to_key_function_body *f2_p; + typedef type_to_key_function_body *f3_p; + typedef type_to_key_function_body *f4_p; + typedef std::tuple< f0_p, f1_p, f2_p, f3_p, f4_p > func_initializer_type; + public: +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + unfolded_join_node(graph &g) : base_type(g, + func_initializer_type( + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()) + ) ) { + } +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + template + unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2, Body3 body3, Body4 body4) : base_type(g, + func_initializer_type( + new type_to_key_function_body_leaf(body0), + new type_to_key_function_body_leaf(body1), + new type_to_key_function_body_leaf(body2), + new type_to_key_function_body_leaf(body3), + new type_to_key_function_body_leaf(body4) + ) ) { + static_assert(std::tuple_size::value == 5, "wrong number of body initializers"); + } + unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} + }; + +#if __TBB_VARIADIC_MAX >= 6 + template + class unfolded_join_node<6,key_matching_port,OutputTuple,key_matching > : public + join_base<6,key_matching_port,OutputTuple,key_matching >::type { + typedef typename std::tuple_element<0, OutputTuple>::type T0; + typedef typename std::tuple_element<1, OutputTuple>::type T1; + typedef typename std::tuple_element<2, OutputTuple>::type T2; + typedef typename std::tuple_element<3, OutputTuple>::type T3; + typedef typename std::tuple_element<4, OutputTuple>::type T4; + typedef typename std::tuple_element<5, OutputTuple>::type T5; + public: + typedef typename wrap_key_tuple_elements<6,key_matching_port,key_matching,OutputTuple>::type input_ports_type; + typedef OutputTuple output_type; + private: + typedef join_node_base , input_ports_type, output_type > base_type; + typedef type_to_key_function_body *f0_p; + typedef type_to_key_function_body *f1_p; + typedef type_to_key_function_body *f2_p; + typedef type_to_key_function_body *f3_p; + typedef type_to_key_function_body *f4_p; + typedef type_to_key_function_body *f5_p; + typedef std::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p > func_initializer_type; + public: +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + unfolded_join_node(graph &g) : base_type(g, + func_initializer_type( + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()) + ) ) { + } +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + template + unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2, Body3 body3, Body4 body4, Body5 body5) + : base_type(g, func_initializer_type( + new type_to_key_function_body_leaf(body0), + new type_to_key_function_body_leaf(body1), + new type_to_key_function_body_leaf(body2), + new type_to_key_function_body_leaf(body3), + new type_to_key_function_body_leaf(body4), + new type_to_key_function_body_leaf(body5) + ) ) { + static_assert(std::tuple_size::value == 6, "wrong number of body initializers"); + } + unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} + }; +#endif + +#if __TBB_VARIADIC_MAX >= 7 + template + class unfolded_join_node<7,key_matching_port,OutputTuple,key_matching > : public + join_base<7,key_matching_port,OutputTuple,key_matching >::type { + typedef typename std::tuple_element<0, OutputTuple>::type T0; + typedef typename std::tuple_element<1, OutputTuple>::type T1; + typedef typename std::tuple_element<2, OutputTuple>::type T2; + typedef typename std::tuple_element<3, OutputTuple>::type T3; + typedef typename std::tuple_element<4, OutputTuple>::type T4; + typedef typename std::tuple_element<5, OutputTuple>::type T5; + typedef typename std::tuple_element<6, OutputTuple>::type T6; + public: + typedef typename wrap_key_tuple_elements<7,key_matching_port,key_matching,OutputTuple>::type input_ports_type; + typedef OutputTuple output_type; + private: + typedef join_node_base , input_ports_type, output_type > base_type; + typedef type_to_key_function_body *f0_p; + typedef type_to_key_function_body *f1_p; + typedef type_to_key_function_body *f2_p; + typedef type_to_key_function_body *f3_p; + typedef type_to_key_function_body *f4_p; + typedef type_to_key_function_body *f5_p; + typedef type_to_key_function_body *f6_p; + typedef std::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p > func_initializer_type; + public: +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + unfolded_join_node(graph &g) : base_type(g, + func_initializer_type( + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()) + ) ) { + } +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + template + unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2, Body3 body3, Body4 body4, + Body5 body5, Body6 body6) : base_type(g, func_initializer_type( + new type_to_key_function_body_leaf(body0), + new type_to_key_function_body_leaf(body1), + new type_to_key_function_body_leaf(body2), + new type_to_key_function_body_leaf(body3), + new type_to_key_function_body_leaf(body4), + new type_to_key_function_body_leaf(body5), + new type_to_key_function_body_leaf(body6) + ) ) { + static_assert(std::tuple_size::value == 7, "wrong number of body initializers"); + } + unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} + }; +#endif + +#if __TBB_VARIADIC_MAX >= 8 + template + class unfolded_join_node<8,key_matching_port,OutputTuple,key_matching > : public + join_base<8,key_matching_port,OutputTuple,key_matching >::type { + typedef typename std::tuple_element<0, OutputTuple>::type T0; + typedef typename std::tuple_element<1, OutputTuple>::type T1; + typedef typename std::tuple_element<2, OutputTuple>::type T2; + typedef typename std::tuple_element<3, OutputTuple>::type T3; + typedef typename std::tuple_element<4, OutputTuple>::type T4; + typedef typename std::tuple_element<5, OutputTuple>::type T5; + typedef typename std::tuple_element<6, OutputTuple>::type T6; + typedef typename std::tuple_element<7, OutputTuple>::type T7; + public: + typedef typename wrap_key_tuple_elements<8,key_matching_port,key_matching,OutputTuple>::type input_ports_type; + typedef OutputTuple output_type; + private: + typedef join_node_base , input_ports_type, output_type > base_type; + typedef type_to_key_function_body *f0_p; + typedef type_to_key_function_body *f1_p; + typedef type_to_key_function_body *f2_p; + typedef type_to_key_function_body *f3_p; + typedef type_to_key_function_body *f4_p; + typedef type_to_key_function_body *f5_p; + typedef type_to_key_function_body *f6_p; + typedef type_to_key_function_body *f7_p; + typedef std::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p, f7_p > func_initializer_type; + public: +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + unfolded_join_node(graph &g) : base_type(g, + func_initializer_type( + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()) + ) ) { + } +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + template + unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2, Body3 body3, Body4 body4, + Body5 body5, Body6 body6, Body7 body7) : base_type(g, func_initializer_type( + new type_to_key_function_body_leaf(body0), + new type_to_key_function_body_leaf(body1), + new type_to_key_function_body_leaf(body2), + new type_to_key_function_body_leaf(body3), + new type_to_key_function_body_leaf(body4), + new type_to_key_function_body_leaf(body5), + new type_to_key_function_body_leaf(body6), + new type_to_key_function_body_leaf(body7) + ) ) { + static_assert(std::tuple_size::value == 8, "wrong number of body initializers"); + } + unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} + }; +#endif + +#if __TBB_VARIADIC_MAX >= 9 + template + class unfolded_join_node<9,key_matching_port,OutputTuple,key_matching > : public + join_base<9,key_matching_port,OutputTuple,key_matching >::type { + typedef typename std::tuple_element<0, OutputTuple>::type T0; + typedef typename std::tuple_element<1, OutputTuple>::type T1; + typedef typename std::tuple_element<2, OutputTuple>::type T2; + typedef typename std::tuple_element<3, OutputTuple>::type T3; + typedef typename std::tuple_element<4, OutputTuple>::type T4; + typedef typename std::tuple_element<5, OutputTuple>::type T5; + typedef typename std::tuple_element<6, OutputTuple>::type T6; + typedef typename std::tuple_element<7, OutputTuple>::type T7; + typedef typename std::tuple_element<8, OutputTuple>::type T8; + public: + typedef typename wrap_key_tuple_elements<9,key_matching_port,key_matching,OutputTuple>::type input_ports_type; + typedef OutputTuple output_type; + private: + typedef join_node_base , input_ports_type, output_type > base_type; + typedef type_to_key_function_body *f0_p; + typedef type_to_key_function_body *f1_p; + typedef type_to_key_function_body *f2_p; + typedef type_to_key_function_body *f3_p; + typedef type_to_key_function_body *f4_p; + typedef type_to_key_function_body *f5_p; + typedef type_to_key_function_body *f6_p; + typedef type_to_key_function_body *f7_p; + typedef type_to_key_function_body *f8_p; + typedef std::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p, f7_p, f8_p > func_initializer_type; + public: +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + unfolded_join_node(graph &g) : base_type(g, + func_initializer_type( + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()) + ) ) { + } +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + template + unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2, Body3 body3, Body4 body4, + Body5 body5, Body6 body6, Body7 body7, Body8 body8) : base_type(g, func_initializer_type( + new type_to_key_function_body_leaf(body0), + new type_to_key_function_body_leaf(body1), + new type_to_key_function_body_leaf(body2), + new type_to_key_function_body_leaf(body3), + new type_to_key_function_body_leaf(body4), + new type_to_key_function_body_leaf(body5), + new type_to_key_function_body_leaf(body6), + new type_to_key_function_body_leaf(body7), + new type_to_key_function_body_leaf(body8) + ) ) { + static_assert(std::tuple_size::value == 9, "wrong number of body initializers"); + } + unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} + }; +#endif + +#if __TBB_VARIADIC_MAX >= 10 + template + class unfolded_join_node<10,key_matching_port,OutputTuple,key_matching > : public + join_base<10,key_matching_port,OutputTuple,key_matching >::type { + typedef typename std::tuple_element<0, OutputTuple>::type T0; + typedef typename std::tuple_element<1, OutputTuple>::type T1; + typedef typename std::tuple_element<2, OutputTuple>::type T2; + typedef typename std::tuple_element<3, OutputTuple>::type T3; + typedef typename std::tuple_element<4, OutputTuple>::type T4; + typedef typename std::tuple_element<5, OutputTuple>::type T5; + typedef typename std::tuple_element<6, OutputTuple>::type T6; + typedef typename std::tuple_element<7, OutputTuple>::type T7; + typedef typename std::tuple_element<8, OutputTuple>::type T8; + typedef typename std::tuple_element<9, OutputTuple>::type T9; + public: + typedef typename wrap_key_tuple_elements<10,key_matching_port,key_matching,OutputTuple>::type input_ports_type; + typedef OutputTuple output_type; + private: + typedef join_node_base , input_ports_type, output_type > base_type; + typedef type_to_key_function_body *f0_p; + typedef type_to_key_function_body *f1_p; + typedef type_to_key_function_body *f2_p; + typedef type_to_key_function_body *f3_p; + typedef type_to_key_function_body *f4_p; + typedef type_to_key_function_body *f5_p; + typedef type_to_key_function_body *f6_p; + typedef type_to_key_function_body *f7_p; + typedef type_to_key_function_body *f8_p; + typedef type_to_key_function_body *f9_p; + typedef std::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p, f7_p, f8_p, f9_p > func_initializer_type; + public: +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + unfolded_join_node(graph &g) : base_type(g, + func_initializer_type( + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()), + new type_to_key_function_body_leaf >(key_from_message_body()) + ) ) { + } +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + template + unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2, Body3 body3, Body4 body4, + Body5 body5, Body6 body6, Body7 body7, Body8 body8, Body9 body9) : base_type(g, func_initializer_type( + new type_to_key_function_body_leaf(body0), + new type_to_key_function_body_leaf(body1), + new type_to_key_function_body_leaf(body2), + new type_to_key_function_body_leaf(body3), + new type_to_key_function_body_leaf(body4), + new type_to_key_function_body_leaf(body5), + new type_to_key_function_body_leaf(body6), + new type_to_key_function_body_leaf(body7), + new type_to_key_function_body_leaf(body8), + new type_to_key_function_body_leaf(body9) + ) ) { + static_assert(std::tuple_size::value == 10, "wrong number of body initializers"); + } + unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} + }; +#endif + + //! templated function to refer to input ports of the join node + template + typename std::tuple_element::type &input_port(JNT &jn) { + return std::get(jn.input_ports()); + } + +#endif // __TBB__flow_graph_join_impl_H diff --git a/src/tbb/include/oneapi/tbb/detail/_flow_graph_node_impl.h b/src/tbb/include/oneapi/tbb/detail/_flow_graph_node_impl.h new file mode 100644 index 000000000..b136ba89a --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_flow_graph_node_impl.h @@ -0,0 +1,894 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__flow_graph_node_impl_H +#define __TBB__flow_graph_node_impl_H + +#ifndef __TBB_flow_graph_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#include "_flow_graph_item_buffer_impl.h" + +template< typename T, typename A > +class function_input_queue : public item_buffer { +public: + bool empty() const { + return this->buffer_empty(); + } + + const T& front() const { + return this->item_buffer::front(); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + const message_metainfo& front_metainfo() const { + return this->item_buffer::front_metainfo(); + } +#endif + + void pop() { + this->destroy_front(); + } + + bool push( T& t ) { + return this->push_back( t ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + bool push( T& t, const message_metainfo& metainfo ) { + return this->push_back(t, metainfo); + } +#endif +}; + +//! Input and scheduling for a function node that takes a type Input as input +// The only up-ref is apply_body_impl, which should implement the function +// call and any handling of the result. +template< typename Input, typename Policy, typename A, typename ImplType > +class function_input_base : public receiver, no_assign { + enum op_type {reg_pred, rem_pred, try_fwd, tryput_bypass, app_body_bypass, occupy_concurrency + }; + typedef function_input_base class_type; + +public: + + //! The input type of this receiver + typedef Input input_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef predecessor_cache predecessor_cache_type; + typedef function_input_queue input_queue_type; + typedef typename allocator_traits::template rebind_alloc allocator_type; + static_assert(!has_policy::value || !has_policy::value, ""); + + //! Constructor for function_input_base + function_input_base( graph &g, size_t max_concurrency, node_priority_t a_priority, bool is_no_throw ) + : my_graph_ref(g), my_max_concurrency(max_concurrency) + , my_concurrency(0), my_priority(a_priority), my_is_no_throw(is_no_throw) + , my_queue(!has_policy::value ? new input_queue_type() : nullptr) + , my_predecessors(this) + , forwarder_busy(false) + { + my_aggregator.initialize_handler(handler_type(this)); + } + + //! Copy constructor + function_input_base( const function_input_base& src ) + : function_input_base(src.my_graph_ref, src.my_max_concurrency, src.my_priority, src.my_is_no_throw) {} + + //! Destructor + // The queue is allocated by the constructor for {multi}function_node. + // TODO: pass the graph_buffer_policy to the base so it can allocate the queue instead. + // This would be an interface-breaking change. + virtual ~function_input_base() { + delete my_queue; + my_queue = nullptr; + } + + graph_task* try_put_task( const input_type& t) override { + return try_put_task_base(t __TBB_FLOW_GRAPH_METAINFO_ARG(message_metainfo{})); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + graph_task* try_put_task( const input_type& t, const message_metainfo& metainfo ) override { + return try_put_task_base(t, metainfo); + } +#endif // __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + + //! Adds src to the list of cached predecessors. + bool register_predecessor( predecessor_type &src ) override { + operation_type op_data(reg_pred); + op_data.r = &src; + my_aggregator.execute(&op_data); + return true; + } + + //! Removes src from the list of cached predecessors. + bool remove_predecessor( predecessor_type &src ) override { + operation_type op_data(rem_pred); + op_data.r = &src; + my_aggregator.execute(&op_data); + return true; + } + +protected: + + void reset_function_input_base( reset_flags f) { + my_concurrency = 0; + if(my_queue) { + my_queue->reset(); + } + reset_receiver(f); + forwarder_busy = false; + } + + graph& my_graph_ref; + const size_t my_max_concurrency; + size_t my_concurrency; + node_priority_t my_priority; + const bool my_is_no_throw; + input_queue_type *my_queue; + predecessor_cache my_predecessors; + + void reset_receiver( reset_flags f) { + if( f & rf_clear_edges) my_predecessors.clear(); + else + my_predecessors.reset(); + __TBB_ASSERT(!(f & rf_clear_edges) || my_predecessors.empty(), "function_input_base reset failed"); + } + + graph& graph_reference() const override { + return my_graph_ref; + } + + graph_task* try_get_postponed_task(const input_type& i) { + operation_type op_data(i, app_body_bypass); // tries to pop an item or get_item + my_aggregator.execute(&op_data); + return op_data.bypass_t; + } + +private: + + friend class apply_body_task_bypass< class_type, input_type >; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + friend class apply_body_task_bypass< class_type, input_type, trackable_messages_graph_task >; +#endif + friend class forward_task_bypass< class_type >; + + class operation_type : public d1::aggregated_operation< operation_type > { + public: + char type; + union { + input_type *elem; + predecessor_type *r; + }; + graph_task* bypass_t; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + message_metainfo* metainfo; +#endif + operation_type(const input_type& e, op_type t) : + type(char(t)), elem(const_cast(&e)), bypass_t(nullptr) +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + , metainfo(nullptr) +#endif + {} +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + operation_type(const input_type& e, op_type t, const message_metainfo& info) : + type(char(t)), elem(const_cast(&e)), bypass_t(nullptr), + metainfo(const_cast(&info)) {} +#endif + operation_type(op_type t) : type(char(t)), r(nullptr), bypass_t(nullptr) {} + }; + + bool forwarder_busy; + typedef d1::aggregating_functor handler_type; + friend class d1::aggregating_functor; + d1::aggregator< handler_type, operation_type > my_aggregator; + + graph_task* perform_queued_requests() { + graph_task* new_task = nullptr; + if(my_queue) { + if(!my_queue->empty()) { + ++my_concurrency; + // TODO: consider removing metainfo from the queue using move semantics to avoid + // ref counter increase + new_task = create_body_task(my_queue->front() + __TBB_FLOW_GRAPH_METAINFO_ARG(my_queue->front_metainfo())); + + my_queue->pop(); + } + } + else { + input_type i; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + message_metainfo metainfo; +#endif + if(my_predecessors.get_item(i __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo))) { + ++my_concurrency; + new_task = create_body_task(i __TBB_FLOW_GRAPH_METAINFO_ARG(std::move(metainfo))); + } + } + return new_task; + } + void handle_operations(operation_type *op_list) { + operation_type* tmp; + while (op_list) { + tmp = op_list; + op_list = op_list->next; + switch (tmp->type) { + case reg_pred: + my_predecessors.add(*(tmp->r)); + tmp->status.store(SUCCEEDED, std::memory_order_release); + if (!forwarder_busy) { + forwarder_busy = true; + spawn_forward_task(); + } + break; + case rem_pred: + my_predecessors.remove(*(tmp->r)); + tmp->status.store(SUCCEEDED, std::memory_order_release); + break; + case app_body_bypass: { + tmp->bypass_t = nullptr; + __TBB_ASSERT(my_max_concurrency != 0, nullptr); + --my_concurrency; + if(my_concurrencybypass_t = perform_queued_requests(); + tmp->status.store(SUCCEEDED, std::memory_order_release); + } + break; + case tryput_bypass: internal_try_put_task(tmp); break; + case try_fwd: internal_forward(tmp); break; + case occupy_concurrency: + if (my_concurrency < my_max_concurrency) { + ++my_concurrency; + tmp->status.store(SUCCEEDED, std::memory_order_release); + } else { + tmp->status.store(FAILED, std::memory_order_release); + } + break; + } + } + } + + //! Put to the node, but return the task instead of enqueueing it + void internal_try_put_task(operation_type *op) { + __TBB_ASSERT(my_max_concurrency != 0, nullptr); + if (my_concurrency < my_max_concurrency) { + ++my_concurrency; + graph_task* new_task = create_body_task(*(op->elem) + __TBB_FLOW_GRAPH_METAINFO_ARG(*(op->metainfo))); + op->bypass_t = new_task; + op->status.store(SUCCEEDED, std::memory_order_release); + } else if ( my_queue && my_queue->push(*(op->elem) + __TBB_FLOW_GRAPH_METAINFO_ARG(*(op->metainfo))) ) + { + op->bypass_t = SUCCESSFULLY_ENQUEUED; + op->status.store(SUCCEEDED, std::memory_order_release); + } else { + op->bypass_t = nullptr; + op->status.store(FAILED, std::memory_order_release); + } + } + + //! Creates tasks for postponed messages if available and if concurrency allows + void internal_forward(operation_type *op) { + op->bypass_t = nullptr; + if (my_concurrency < my_max_concurrency) + op->bypass_t = perform_queued_requests(); + if(op->bypass_t) + op->status.store(SUCCEEDED, std::memory_order_release); + else { + forwarder_busy = false; + op->status.store(FAILED, std::memory_order_release); + } + } + + graph_task* internal_try_put_bypass( const input_type& t + __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& metainfo)) + { + operation_type op_data(t, tryput_bypass __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + my_aggregator.execute(&op_data); + if( op_data.status == SUCCEEDED ) { + return op_data.bypass_t; + } + return nullptr; + } + + graph_task* try_put_task_base(const input_type& t + __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& metainfo)) + { + if ( my_is_no_throw ) + return try_put_task_impl(t, has_policy() + __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + else + return try_put_task_impl(t, std::false_type() + __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + } + + graph_task* try_put_task_impl( const input_type& t, /*lightweight=*/std::true_type + __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& metainfo)) + { + if( my_max_concurrency == 0 ) { + return apply_body_bypass(t __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + } else { + operation_type check_op(t, occupy_concurrency); + my_aggregator.execute(&check_op); + if( check_op.status == SUCCEEDED ) { + return apply_body_bypass(t __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + } + return internal_try_put_bypass(t __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + } + } + + graph_task* try_put_task_impl( const input_type& t, /*lightweight=*/std::false_type + __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& metainfo)) + { + if( my_max_concurrency == 0 ) { + return create_body_task(t __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + } else { + return internal_try_put_bypass(t __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + } + } + + //! Applies the body to the provided input + // then decides if more work is available + graph_task* apply_body_bypass( const input_type &i + __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& metainfo)) + + { + return static_cast(this)->apply_body_impl_bypass(i __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + } + + //! allocates a task to apply a body +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + template + graph_task* create_body_task( const input_type &input, Metainfo&& metainfo ) +#else + graph_task* create_body_task( const input_type &input ) +#endif + { + if (!is_graph_active(my_graph_ref)) { + return nullptr; + } + // TODO revamp: extract helper for common graph task allocation part + d1::small_object_allocator allocator{}; + graph_task* t = nullptr; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + if (!metainfo.empty()) { + using task_type = apply_body_task_bypass; + t = allocator.new_object(my_graph_ref, allocator, *this, input, my_priority, std::forward(metainfo)); + } else +#endif + { + using task_type = apply_body_task_bypass; + t = allocator.new_object(my_graph_ref, allocator, *this, input, my_priority); + } + return t; + } + + //! This is executed by an enqueued task, the "forwarder" + graph_task* forward_task() { + operation_type op_data(try_fwd); + graph_task* rval = nullptr; + do { + op_data.status = WAIT; + my_aggregator.execute(&op_data); + if(op_data.status == SUCCEEDED) { + graph_task* ttask = op_data.bypass_t; + __TBB_ASSERT( ttask && ttask != SUCCESSFULLY_ENQUEUED, nullptr); + rval = combine_tasks(my_graph_ref, rval, ttask); + } + } while (op_data.status == SUCCEEDED); + return rval; + } + + inline graph_task* create_forward_task() { + if (!is_graph_active(my_graph_ref)) { + return nullptr; + } + d1::small_object_allocator allocator{}; + typedef forward_task_bypass task_type; + graph_task* t = allocator.new_object( graph_reference(), allocator, *this, my_priority ); + return t; + } + + //! Spawns a task that calls forward() + inline void spawn_forward_task() { + graph_task* tp = create_forward_task(); + if(tp) { + spawn_in_graph_arena(graph_reference(), *tp); + } + } + + node_priority_t priority() const override { return my_priority; } +}; // function_input_base + +//! Implements methods for a function node that takes a type Input as input and sends +// a type Output to its successors. +template< typename Input, typename Output, typename Policy, typename A> +class function_input : public function_input_base > { +public: + typedef Input input_type; + typedef Output output_type; + typedef function_body function_body_type; + typedef function_input my_class; + typedef function_input_base base_type; + typedef function_input_queue input_queue_type; + + // constructor + template + function_input( + graph &g, size_t max_concurrency, Body& body, node_priority_t a_priority ) + : base_type(g, max_concurrency, a_priority, noexcept(tbb::detail::invoke(body, input_type()))) + , my_body( new function_body_leaf< input_type, output_type, Body>(body) ) + , my_init_body( new function_body_leaf< input_type, output_type, Body>(body) ) { + } + + //! Copy constructor + function_input( const function_input& src ) : + base_type(src), + my_body( src.my_init_body->clone() ), + my_init_body(src.my_init_body->clone() ) { + } +#if __INTEL_COMPILER <= 2021 + // Suppress superfluous diagnostic about virtual keyword absence in a destructor of an inherited + // class while the parent class has the virtual keyword for the destrocutor. + virtual +#endif + ~function_input() { + delete my_body; + delete my_init_body; + } + + template< typename Body > + Body copy_function_object() { + function_body_type &body_ref = *this->my_body; + return dynamic_cast< function_body_leaf & >(body_ref).get_body(); + } + + output_type apply_body_impl( const input_type& i) { + // There is an extra copied needed to capture the + // body execution without the try_put + fgt_begin_body( my_body ); + output_type v = tbb::detail::invoke(*my_body, i); + fgt_end_body( my_body ); + return v; + } + + //TODO: consider moving into the base class + graph_task* apply_body_impl_bypass( const input_type &i + __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& metainfo)) + { + output_type v = apply_body_impl(i); + graph_task* postponed_task = nullptr; + if( base_type::my_max_concurrency != 0 ) { + postponed_task = base_type::try_get_postponed_task(i); + __TBB_ASSERT( !postponed_task || postponed_task != SUCCESSFULLY_ENQUEUED, nullptr); + } + if( postponed_task ) { + // make the task available for other workers since we do not know successors' + // execution policy + spawn_in_graph_arena(base_type::graph_reference(), *postponed_task); + } + graph_task* successor_task = successors().try_put_task(v __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); +#if _MSC_VER && !__INTEL_COMPILER +// #pragma warning (push) +// #pragma warning (disable: 4127) /* suppress conditional expression is constant */ +#endif + if(has_policy::value) { +#if _MSC_VER && !__INTEL_COMPILER +// #pragma warning (pop) +#endif + if(!successor_task) { + // Return confirmative status since current + // node's body has been executed anyway + successor_task = SUCCESSFULLY_ENQUEUED; + } + } + return successor_task; + } + +protected: + + void reset_function_input(reset_flags f) { + base_type::reset_function_input_base(f); + if(f & rf_reset_bodies) { + function_body_type *tmp = my_init_body->clone(); + delete my_body; + my_body = tmp; + } + } + + function_body_type *my_body; + function_body_type *my_init_body; + virtual broadcast_cache &successors() = 0; + +}; // function_input + + +// helper templates to clear the successor edges of the output ports of an multifunction_node +template struct clear_element { + template static void clear_this(P &p) { + (void)std::get(p).successors().clear(); + clear_element::clear_this(p); + } +#if TBB_USE_ASSERT + template static bool this_empty(P &p) { + if(std::get(p).successors().empty()) + return clear_element::this_empty(p); + return false; + } +#endif +}; + +template<> struct clear_element<1> { + template static void clear_this(P &p) { + (void)std::get<0>(p).successors().clear(); + } +#if TBB_USE_ASSERT + template static bool this_empty(P &p) { + return std::get<0>(p).successors().empty(); + } +#endif +}; + +template +struct init_output_ports { + template + static OutputTuple call(graph& g, const std::tuple&) { + return OutputTuple(Args(g)...); + } +}; // struct init_output_ports + +//! Implements methods for a function node that takes a type Input as input +// and has a tuple of output ports specified. +template< typename Input, typename OutputPortSet, typename Policy, typename A> +class multifunction_input : public function_input_base > { +public: + static const int N = std::tuple_size::value; + typedef Input input_type; + typedef OutputPortSet output_ports_type; + typedef multifunction_body multifunction_body_type; + typedef multifunction_input my_class; + typedef function_input_base base_type; + typedef function_input_queue input_queue_type; + + // constructor + template + multifunction_input(graph &g, size_t max_concurrency,Body& body, node_priority_t a_priority ) + : base_type(g, max_concurrency, a_priority, noexcept(tbb::detail::invoke(body, input_type(), my_output_ports))) + , my_body( new multifunction_body_leaf(body) ) + , my_init_body( new multifunction_body_leaf(body) ) + , my_output_ports(init_output_ports::call(g, my_output_ports)){ + } + + //! Copy constructor + multifunction_input( const multifunction_input& src ) : + base_type(src), + my_body( src.my_init_body->clone() ), + my_init_body(src.my_init_body->clone() ), + my_output_ports( init_output_ports::call(src.my_graph_ref, my_output_ports) ) { + } + + ~multifunction_input() { + delete my_body; + delete my_init_body; + } + + template< typename Body > + Body copy_function_object() { + multifunction_body_type &body_ref = *this->my_body; + return *static_cast(dynamic_cast< multifunction_body_leaf & >(body_ref).get_body_ptr()); + } + + // for multifunction nodes we do not have a single successor as such. So we just tell + // the task we were successful. + //TODO: consider moving common parts with implementation in function_input into separate function + graph_task* apply_body_impl_bypass( const input_type &i + __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo&) ) + { + fgt_begin_body( my_body ); + (*my_body)(i, my_output_ports); + fgt_end_body( my_body ); + graph_task* ttask = nullptr; + if(base_type::my_max_concurrency != 0) { + ttask = base_type::try_get_postponed_task(i); + } + return ttask ? ttask : SUCCESSFULLY_ENQUEUED; + } + + output_ports_type &output_ports(){ return my_output_ports; } + +protected: + + void reset(reset_flags f) { + base_type::reset_function_input_base(f); + if(f & rf_clear_edges)clear_element::clear_this(my_output_ports); + if(f & rf_reset_bodies) { + multifunction_body_type* tmp = my_init_body->clone(); + delete my_body; + my_body = tmp; + } + __TBB_ASSERT(!(f & rf_clear_edges) || clear_element::this_empty(my_output_ports), "multifunction_node reset failed"); + } + + multifunction_body_type *my_body; + multifunction_body_type *my_init_body; + output_ports_type my_output_ports; + +}; // multifunction_input + +// template to refer to an output port of a multifunction_node +template +typename std::tuple_element::type &output_port(MOP &op) { + return std::get(op.output_ports()); +} + +inline void check_task_and_spawn(graph& g, graph_task* t) { + if (t && t != SUCCESSFULLY_ENQUEUED) { + spawn_in_graph_arena(g, *t); + } +} + +// helper structs for split_node +template +struct emit_element { + template + static graph_task* emit_this(graph& g, const T &t, P &p) { + // TODO: consider to collect all the tasks in task_list and spawn them all at once + graph_task* last_task = std::get(p).try_put_task(std::get(t)); + check_task_and_spawn(g, last_task); + return emit_element::emit_this(g,t,p); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + template + static graph_task* emit_this(graph& g, const TupleType& t, PortsType& p, + const message_metainfo& metainfo) + { + // TODO: consider to collect all the tasks in task_list and spawn them all at once + graph_task* last_task = std::get(p).try_put_task(std::get(t), metainfo); + check_task_and_spawn(g, last_task); + return emit_element::emit_this(g, t, p, metainfo); + } +#endif +}; + +template<> +struct emit_element<1> { + template + static graph_task* emit_this(graph& g, const T &t, P &p) { + graph_task* last_task = std::get<0>(p).try_put_task(std::get<0>(t)); + check_task_and_spawn(g, last_task); + return SUCCESSFULLY_ENQUEUED; + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + template + static graph_task* emit_this(graph& g, const TupleType& t, PortsType& ports, + const message_metainfo& metainfo) + { + graph_task* last_task = std::get<0>(ports).try_put_task(std::get<0>(t), metainfo); + check_task_and_spawn(g, last_task); + return SUCCESSFULLY_ENQUEUED; + } +#endif +}; + +//! Implements methods for an executable node that takes continue_msg as input +template< typename Output, typename Policy> +class continue_input : public continue_receiver { +public: + + //! The input type of this receiver + typedef continue_msg input_type; + + //! The output type of this receiver + typedef Output output_type; + typedef function_body function_body_type; + typedef continue_input class_type; + + template< typename Body > + continue_input( graph &g, Body& body, node_priority_t a_priority ) + : continue_receiver(/*number_of_predecessors=*/0, a_priority) + , my_graph_ref(g) + , my_body( new function_body_leaf< input_type, output_type, Body>(body) ) + , my_init_body( new function_body_leaf< input_type, output_type, Body>(body) ) + { } + + template< typename Body > + continue_input( graph &g, int number_of_predecessors, + Body& body, node_priority_t a_priority ) + : continue_receiver( number_of_predecessors, a_priority ) + , my_graph_ref(g) + , my_body( new function_body_leaf< input_type, output_type, Body>(body) ) + , my_init_body( new function_body_leaf< input_type, output_type, Body>(body) ) + { } + + continue_input( const continue_input& src ) : continue_receiver(src), + my_graph_ref(src.my_graph_ref), + my_body( src.my_init_body->clone() ), + my_init_body( src.my_init_body->clone() ) {} + + ~continue_input() { + delete my_body; + delete my_init_body; + } + + template< typename Body > + Body copy_function_object() { + function_body_type &body_ref = *my_body; + return dynamic_cast< function_body_leaf & >(body_ref).get_body(); + } + + void reset_receiver( reset_flags f) override { + continue_receiver::reset_receiver(f); + if(f & rf_reset_bodies) { + function_body_type *tmp = my_init_body->clone(); + delete my_body; + my_body = tmp; + } + } + +protected: + + graph& my_graph_ref; + function_body_type *my_body; + function_body_type *my_init_body; + + virtual broadcast_cache &successors() = 0; + + friend class apply_body_task_bypass< class_type, continue_msg >; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + friend class apply_body_task_bypass< class_type, continue_msg, trackable_messages_graph_task >; +#endif + + //! Applies the body to the provided input + graph_task* apply_body_bypass( input_type __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& metainfo) ) { + // There is an extra copied needed to capture the + // body execution without the try_put + fgt_begin_body( my_body ); + output_type v = (*my_body)( continue_msg() ); + fgt_end_body( my_body ); + return successors().try_put_task( v __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + graph_task* execute(const message_metainfo& metainfo) override { +#else + graph_task* execute() override { +#endif + if(!is_graph_active(my_graph_ref)) { + return nullptr; + } +#if _MSC_VER && !__INTEL_COMPILER +// #pragma warning (push) +// #pragma warning (disable: 4127) /* suppress conditional expression is constant */ +#endif + if(has_policy::value) { +#if _MSC_VER && !__INTEL_COMPILER +// #pragma warning (pop) +#endif + return apply_body_bypass( continue_msg() __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo) ); + } + else { + d1::small_object_allocator allocator{}; + graph_task* t = nullptr; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + if (!metainfo.empty()) { + using task_type = apply_body_task_bypass; + t = allocator.new_object( graph_reference(), allocator, *this, continue_msg(), my_priority, metainfo ); + } else +#endif + { + using task_type = apply_body_task_bypass; + t = allocator.new_object( graph_reference(), allocator, *this, continue_msg(), my_priority ); + } + return t; + } + } + + graph& graph_reference() const override { + return my_graph_ref; + } +}; // continue_input + +//! Implements methods for both executable and function nodes that puts Output to its successors +template< typename Output > +class function_output : public sender { +public: + + template friend struct clear_element; + typedef Output output_type; + typedef typename sender::successor_type successor_type; + typedef broadcast_cache broadcast_cache_type; + + function_output(graph& g) : my_successors(this), my_graph_ref(g) {} + function_output(const function_output& other) = delete; + + //! Adds a new successor to this node + bool register_successor( successor_type &r ) override { + successors().register_successor( r ); + return true; + } + + //! Removes a successor from this node + bool remove_successor( successor_type &r ) override { + successors().remove_successor( r ); + return true; + } + + broadcast_cache_type &successors() { return my_successors; } + + graph& graph_reference() const { return my_graph_ref; } +protected: + broadcast_cache_type my_successors; + graph& my_graph_ref; +}; // function_output + +template< typename Output > +class multifunction_output : public function_output { +public: + typedef Output output_type; + typedef function_output base_type; + using base_type::my_successors; + + multifunction_output(graph& g) : base_type(g) {} + multifunction_output(const multifunction_output& other) : base_type(other.my_graph_ref) {} + + bool try_put(const output_type &i) { + graph_task *res = try_put_task(i); + if( !res ) return false; + if( res != SUCCESSFULLY_ENQUEUED ) { + // wrapping in task_arena::execute() is not needed since the method is called from + // inside task::execute() + spawn_in_graph_arena(graph_reference(), *res); + } + return true; + } + + using base_type::graph_reference; + +protected: + + graph_task* try_put_task(const output_type &i) { + return my_successors.try_put_task(i); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + graph_task* try_put_task(const output_type& i, const message_metainfo& metainfo) { + return my_successors.try_put_task(i, metainfo); + } +#endif + + template friend struct emit_element; + +}; // multifunction_output + +//composite_node +template +void add_nodes_impl(CompositeType*, bool) {} + +template< typename CompositeType, typename NodeType1, typename... NodeTypes > +void add_nodes_impl(CompositeType *c_node, bool visible, const NodeType1& n1, const NodeTypes&... n) { + void *addr = const_cast(&n1); + + fgt_alias_port(c_node, addr, visible); + add_nodes_impl(c_node, visible, n...); +} + +#endif // __TBB__flow_graph_node_impl_H diff --git a/src/tbb/include/oneapi/tbb/detail/_flow_graph_node_set_impl.h b/src/tbb/include/oneapi/tbb/detail/_flow_graph_node_set_impl.h new file mode 100644 index 000000000..8440bd700 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_flow_graph_node_set_impl.h @@ -0,0 +1,265 @@ +/* + Copyright (c) 2020-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_flow_graph_node_set_impl_H +#define __TBB_flow_graph_node_set_impl_H + +#ifndef __TBB_flow_graph_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +// Included in namespace tbb::detail::d2 (in flow_graph.h) + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +// Visual Studio 2019 reports an error while calling predecessor_selector::get and successor_selector::get +// Seems like the well-formed expression in trailing decltype is treated as ill-formed +// TODO: investigate problems with decltype in trailing return types or find the cross-platform solution +#define __TBB_MSVC_DISABLE_TRAILING_DECLTYPE (_MSC_VER >= 1900) + +namespace order { +struct undefined {}; +struct following {}; +struct preceding {}; +} + +class get_graph_helper { +public: + // TODO: consider making graph_reference() public and consistent interface to get a reference to the graph + // and remove get_graph_helper + template + static graph& get(const T& object) { + return get_impl(object, std::is_base_of()); + } + +private: + // Get graph from the object of type derived from graph_node + template + static graph& get_impl(const T& object, std::true_type) { + return static_cast(&object)->my_graph; + } + + template + static graph& get_impl(const T& object, std::false_type) { + return object.graph_reference(); + } +}; + +template +struct node_set { + typedef Order order_type; + + std::tuple nodes; + node_set(Nodes&... ns) : nodes(ns...) {} + + template + node_set(const node_set& set) : nodes(set.nodes) {} + + graph& graph_reference() const { + return get_graph_helper::get(std::get<0>(nodes)); + } +}; + +namespace alias_helpers { +template using output_type = typename T::output_type; +template using output_ports_type = typename T::output_ports_type; +template using input_type = typename T::input_type; +template using input_ports_type = typename T::input_ports_type; +} // namespace alias_helpers + +template +using has_output_type = supports; + +template +using has_input_type = supports; + +template +using has_input_ports_type = supports; + +template +using has_output_ports_type = supports; + +template +struct is_sender : std::is_base_of, T> {}; + +template +struct is_receiver : std::is_base_of, T> {}; + +template +struct is_async_node : std::false_type {}; + +template +struct is_async_node> : std::true_type {}; + +template +node_set +follows(FirstPredecessor& first_predecessor, Predecessors&... predecessors) { + static_assert((conjunction, + has_output_type...>::value), + "Not all node's predecessors has output_type typedef"); + static_assert((conjunction, is_sender...>::value), + "Not all node's predecessors are senders"); + return node_set(first_predecessor, predecessors...); +} + +template +node_set +follows(node_set& predecessors_set) { + static_assert((conjunction...>::value), + "Not all nodes in the set has output_type typedef"); + static_assert((conjunction...>::value), + "Not all nodes in the set are senders"); + return node_set(predecessors_set); +} + +template +node_set +precedes(FirstSuccessor& first_successor, Successors&... successors) { + static_assert((conjunction, + has_input_type...>::value), + "Not all node's successors has input_type typedef"); + static_assert((conjunction, is_receiver...>::value), + "Not all node's successors are receivers"); + return node_set(first_successor, successors...); +} + +template +node_set +precedes(node_set& successors_set) { + static_assert((conjunction...>::value), + "Not all nodes in the set has input_type typedef"); + static_assert((conjunction...>::value), + "Not all nodes in the set are receivers"); + return node_set(successors_set); +} + +template +node_set +make_node_set(Node& first_node, Nodes&... nodes) { + return node_set(first_node, nodes...); +} + +template +class successor_selector { + template + static auto get_impl(NodeType& node, std::true_type) -> decltype(input_port(node)) { + return input_port(node); + } + + template + static NodeType& get_impl(NodeType& node, std::false_type) { return node; } + +public: + template +#if __TBB_MSVC_DISABLE_TRAILING_DECLTYPE + static auto& get(NodeType& node) +#else + static auto get(NodeType& node) -> decltype(get_impl(node, has_input_ports_type())) +#endif + { + return get_impl(node, has_input_ports_type()); + } +}; + +template +class predecessor_selector { + template + static auto internal_get(NodeType& node, std::true_type) -> decltype(output_port(node)) { + return output_port(node); + } + + template + static NodeType& internal_get(NodeType& node, std::false_type) { return node;} + + template +#if __TBB_MSVC_DISABLE_TRAILING_DECLTYPE + static auto& get_impl(NodeType& node, std::false_type) +#else + static auto get_impl(NodeType& node, std::false_type) -> decltype(internal_get(node, has_output_ports_type())) +#endif + { + return internal_get(node, has_output_ports_type()); + } + + template + static AsyncNode& get_impl(AsyncNode& node, std::true_type) { return node; } + +public: + template +#if __TBB_MSVC_DISABLE_TRAILING_DECLTYPE + static auto& get(NodeType& node) +#else + static auto get(NodeType& node) -> decltype(get_impl(node, is_async_node())) +#endif + { + return get_impl(node, is_async_node()); + } +}; + +template +class make_edges_helper { +public: + template + static void connect_predecessors(PredecessorsTuple& predecessors, NodeType& node) { + make_edge(std::get(predecessors), successor_selector::get(node)); + make_edges_helper::connect_predecessors(predecessors, node); + } + + template + static void connect_successors(NodeType& node, SuccessorsTuple& successors) { + make_edge(predecessor_selector::get(node), std::get(successors)); + make_edges_helper::connect_successors(node, successors); + } +}; + +template<> +struct make_edges_helper<0> { + template + static void connect_predecessors(PredecessorsTuple& predecessors, NodeType& node) { + make_edge(std::get<0>(predecessors), successor_selector<0>::get(node)); + } + + template + static void connect_successors(NodeType& node, SuccessorsTuple& successors) { + make_edge(predecessor_selector<0>::get(node), std::get<0>(successors)); + } +}; + +// TODO: consider adding an overload for making edges between node sets +template +void make_edges(const node_set& s, NodeType& node) { + const std::size_t SetSize = std::tuple_size::value; + make_edges_helper::connect_predecessors(s.nodes, node); +} + +template +void make_edges(NodeType& node, const node_set& s) { + const std::size_t SetSize = std::tuple_size::value; + make_edges_helper::connect_successors(node, s.nodes); +} + +template +void make_edges_in_order(const node_set& ns, NodeType& node) { + make_edges(ns, node); +} + +template +void make_edges_in_order(const node_set& ns, NodeType& node) { + make_edges(node, ns); +} + +#endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + +#endif // __TBB_flow_graph_node_set_impl_H diff --git a/src/tbb/include/oneapi/tbb/detail/_flow_graph_nodes_deduction.h b/src/tbb/include/oneapi/tbb/detail/_flow_graph_nodes_deduction.h new file mode 100644 index 000000000..47ecfb2a8 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_flow_graph_nodes_deduction.h @@ -0,0 +1,277 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_flow_graph_nodes_deduction_H +#define __TBB_flow_graph_nodes_deduction_H + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +namespace tbb { +namespace detail { +namespace d2 { + +template +struct declare_body_types { + using input_type = Input; + using output_type = Output; +}; + +struct NoInputBody {}; + +template +struct declare_body_types { + using output_type = Output; +}; + +template struct body_types; + +template +struct body_types : declare_body_types {}; + +template +struct body_types : declare_body_types {}; + +template +struct body_types : declare_body_types {}; + +template +struct body_types : declare_body_types {}; + +template +struct body_types : declare_body_types {}; + +template +struct body_types : declare_body_types {}; + +template +struct body_types : declare_body_types {}; + +template +struct body_types : declare_body_types {}; + +template +struct body_types : declare_body_types {}; + +template +using input_t = typename body_types::input_type; + +template +using output_t = typename body_types::output_type; + +template +auto decide_on_operator_overload(Output (T::*name)(const Input&) const)->decltype(name); + +template +auto decide_on_operator_overload(Output (T::*name)(const Input&))->decltype(name); + +template +auto decide_on_operator_overload(Output (T::*name)(Input&) const)->decltype(name); + +template +auto decide_on_operator_overload(Output (T::*name)(Input&))->decltype(name); + +template +auto decide_on_operator_overload(Output (*name)(const Input&))->decltype(name); + +template +auto decide_on_operator_overload(Output (*name)(Input&))->decltype(name); + +template +decltype(decide_on_operator_overload(&Body::operator())) decide_on_callable_type(int); + +template +decltype(decide_on_operator_overload(std::declval())) decide_on_callable_type(...); + +// Deduction guides for Flow Graph nodes + +template +input_node(GraphOrSet&&, Body) +->input_node(0))>>; + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + +template +struct decide_on_set; + +template +struct decide_on_set> { + using type = typename Node::output_type; +}; + +template +struct decide_on_set> { + using type = typename Node::input_type; +}; + +template +using decide_on_set_t = typename decide_on_set>::type; + +template +broadcast_node(const NodeSet&) +->broadcast_node>; + +template +buffer_node(const NodeSet&) +->buffer_node>; + +template +queue_node(const NodeSet&) +->queue_node>; +#endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + +template +sequencer_node(GraphOrProxy&&, Sequencer) +->sequencer_node(0))>>; + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +template +priority_queue_node(const NodeSet&, const Compare&) +->priority_queue_node, Compare>; + +template +priority_queue_node(const NodeSet&) +->priority_queue_node, std::less>>; +#endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + +template +struct join_key { + using type = Key; +}; + +template +struct join_key { + using type = T&; +}; + +template +using join_key_t = typename join_key::type; + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +template +join_node(const node_set&, Policy) +->join_node, + Policy>; + +template +join_node(const node_set&, Policy) +->join_node; + +template +join_node(const node_set) +->join_node, + queueing>; + +template +join_node(const node_set) +->join_node; +#endif + +template +join_node(GraphOrProxy&&, Body, Bodies...) +->join_node(0))>, + input_t(0))>...>, + key_matching(0))>>>>; + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +template +indexer_node(const node_set&) +->indexer_node; +#endif + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +template +limiter_node(const NodeSet&, size_t) +->limiter_node>; + +template +split_node(const node_set&) +->split_node; + +template +split_node(const node_set&) +->split_node>; + +#endif + +template +function_node(GraphOrSet&&, + size_t, Body, + Policy, node_priority_t = no_priority) +->function_node(0))>, + output_t(0))>, + Policy>; + +template +function_node(GraphOrSet&&, size_t, + Body, node_priority_t = no_priority) +->function_node(0))>, + output_t(0))>, + queueing>; + +template +struct continue_output { + using type = Output; +}; + +template <> +struct continue_output { + using type = continue_msg; +}; + +template +using continue_output_t = typename continue_output::type; + +template +continue_node(GraphOrSet&&, Body, + Policy, node_priority_t = no_priority) +->continue_node>, + Policy>; + +template +continue_node(GraphOrSet&&, + int, Body, + Policy, node_priority_t = no_priority) +->continue_node>, + Policy>; + +template +continue_node(GraphOrSet&&, + Body, node_priority_t = no_priority) +->continue_node>, Policy>; + +template +continue_node(GraphOrSet&&, int, + Body, node_priority_t = no_priority) +->continue_node>, + Policy>; + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + +template +overwrite_node(const NodeSet&) +->overwrite_node>; + +template +write_once_node(const NodeSet&) +->write_once_node>; +#endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +} // namespace d2 +} // namespace detail +} // namespace tbb + +#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +#endif // __TBB_flow_graph_nodes_deduction_H diff --git a/src/tbb/include/oneapi/tbb/detail/_flow_graph_tagged_buffer_impl.h b/src/tbb/include/oneapi/tbb/detail/_flow_graph_tagged_buffer_impl.h new file mode 100644 index 000000000..0f7c0d174 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_flow_graph_tagged_buffer_impl.h @@ -0,0 +1,380 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// a hash table buffer that can expand, and can support as many deletions as +// additions, list-based, with elements of list held in array (for destruction +// management), multiplicative hashing (like ets). No synchronization built-in. +// + +#ifndef __TBB__flow_graph_hash_buffer_impl_H +#define __TBB__flow_graph_hash_buffer_impl_H + +#ifndef __TBB_flow_graph_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +// included in namespace tbb::flow::interfaceX::internal + +// elements in the table are a simple list; we need pointer to next element to +// traverse the chain + +template +struct hash_buffer_element : public aligned_pair { + using key_type = Key; + using value_type = ValueType; + + value_type* get_value_ptr() { return reinterpret_cast(this->first); } + hash_buffer_element* get_next() { return reinterpret_cast(this->second); } + void set_next(hash_buffer_element* new_next) { this->second = reinterpret_cast(new_next); } + + void create_element(const value_type& v) { + ::new(this->first) value_type(v); + } + + void create_element(hash_buffer_element&& other) { + ::new(this->first) value_type(std::move(*other.get_value_ptr())); + } + + void destroy_element() { + get_value_ptr()->~value_type(); + } +}; + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT +template +struct metainfo_hash_buffer_element : public aligned_triple { + using key_type = Key; + using value_type = ValueType; + + value_type* get_value_ptr() { return reinterpret_cast(this->first); } + metainfo_hash_buffer_element* get_next() { + return reinterpret_cast(this->second); + } + void set_next(metainfo_hash_buffer_element* new_next) { this->second = reinterpret_cast(new_next); } + message_metainfo& get_metainfo() { return this->third; } + + void create_element(const value_type& v, const message_metainfo& metainfo) { + __TBB_ASSERT(this->third.empty(), nullptr); + ::new(this->first) value_type(v); + this->third = metainfo; + + for (auto waiter : metainfo.waiters()) { + waiter->reserve(1); + } + } + + void create_element(metainfo_hash_buffer_element&& other) { + __TBB_ASSERT(this->third.empty(), nullptr); + ::new(this->first) value_type(std::move(*other.get_value_ptr())); + this->third = std::move(other.get_metainfo()); + } + + void destroy_element() { + get_value_ptr()->~value_type(); + + for (auto waiter : get_metainfo().waiters()) { + waiter->release(1); + } + get_metainfo() = message_metainfo{}; + } +}; +#endif + +template + < + typename ElementType, + typename ValueToKey, // abstract method that returns "const Key" or "const Key&" given ValueType + typename HashCompare, // has hash and equal + typename Allocator=tbb::cache_aligned_allocator + > +class hash_buffer_impl : public HashCompare { +public: + static const size_t INITIAL_SIZE = 8; // initial size of the hash pointer table + typedef typename ElementType::key_type key_type; + typedef typename ElementType::value_type value_type; + typedef ElementType element_type; + typedef value_type *pointer_type; + typedef element_type *list_array_type; // array we manage manually + typedef list_array_type *pointer_array_type; + typedef typename std::allocator_traits::template rebind_alloc pointer_array_allocator_type; + typedef typename std::allocator_traits::template rebind_alloc elements_array_allocator; + typedef typename std::decay::type Knoref; + +private: + ValueToKey *my_key; + size_t my_size; + size_t nelements; + pointer_array_type pointer_array; // pointer_array[my_size] + list_array_type elements_array; // elements_array[my_size / 2] + element_type* free_list; + + size_t mask() { return my_size - 1; } + + void set_up_free_list( element_type **p_free_list, list_array_type la, size_t sz) { + for(size_t i=0; i < sz - 1; ++i ) { // construct free list + la[i].set_next(&(la[i + 1])); + } + la[sz - 1].set_next(nullptr); + *p_free_list = (element_type *)&(la[0]); + } + + // cleanup for exceptions + struct DoCleanup { + pointer_array_type *my_pa; + list_array_type *my_elements; + size_t my_size; + + DoCleanup(pointer_array_type &pa, list_array_type &my_els, size_t sz) : + my_pa(&pa), my_elements(&my_els), my_size(sz) { } + ~DoCleanup() { + if(my_pa) { + size_t dont_care = 0; + internal_free_buffer(*my_pa, *my_elements, my_size, dont_care); + } + } + }; + + // exception-safety requires we do all the potentially-throwing operations first + void grow_array() { + size_t new_size = my_size*2; + size_t new_nelements = nelements; // internal_free_buffer zeroes this + list_array_type new_elements_array = nullptr; + pointer_array_type new_pointer_array = nullptr; + list_array_type new_free_list = nullptr; + { + DoCleanup my_cleanup(new_pointer_array, new_elements_array, new_size); + new_elements_array = elements_array_allocator().allocate(my_size); +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + for (std::size_t i = 0; i < my_size; ++i) { + ::new(new_elements_array + i) element_type(); + } +#endif + new_pointer_array = pointer_array_allocator_type().allocate(new_size); + for(size_t i=0; i < new_size; ++i) new_pointer_array[i] = nullptr; + set_up_free_list(&new_free_list, new_elements_array, my_size ); + + for(size_t i=0; i < my_size; ++i) { + for( element_type* op = pointer_array[i]; op; op = (element_type *)(op->get_next())) { + internal_insert_with_key(new_pointer_array, new_size, new_free_list, std::move(*op)); + } + } + my_cleanup.my_pa = nullptr; + my_cleanup.my_elements = nullptr; + } + + internal_free_buffer(pointer_array, elements_array, my_size, nelements); + free_list = new_free_list; + pointer_array = new_pointer_array; + elements_array = new_elements_array; + my_size = new_size; + nelements = new_nelements; + } + + // v should have perfect forwarding if std::move implemented. + // we use this method to move elements in grow_array, so can't use class fields + template + const value_type& get_value_from_pack(const Value& value, const Args&...) { + return value; + } + + template + const value_type& get_value_from_pack(Element&& element) { + return *(element.get_value_ptr()); + } + + template + void internal_insert_with_key( element_type **p_pointer_array, size_t p_sz, list_array_type &p_free_list, + Args&&... args) { + size_t l_mask = p_sz-1; + __TBB_ASSERT(my_key, "Error: value-to-key functor not provided"); + size_t h = this->hash(tbb::detail::invoke(*my_key, get_value_from_pack(args...))) & l_mask; + __TBB_ASSERT(p_free_list, "Error: free list not set up."); + element_type* my_elem = p_free_list; p_free_list = (element_type *)(p_free_list->get_next()); + my_elem->create_element(std::forward(args)...); + my_elem->set_next(p_pointer_array[h]); + p_pointer_array[h] = my_elem; + } + + void internal_initialize_buffer() { + pointer_array = pointer_array_allocator_type().allocate(my_size); + for(size_t i = 0; i < my_size; ++i) pointer_array[i] = nullptr; + elements_array = elements_array_allocator().allocate(my_size / 2); +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + for (std::size_t i = 0; i < my_size / 2; ++i) { + ::new(elements_array + i) element_type(); + } +#endif + set_up_free_list(&free_list, elements_array, my_size / 2); + } + + // made static so an enclosed class can use to properly dispose of the internals + static void internal_free_buffer( pointer_array_type &pa, list_array_type &el, size_t &sz, size_t &ne ) { + if(pa) { + for(size_t i = 0; i < sz; ++i ) { + element_type *p_next; + for( element_type *p = pa[i]; p; p = p_next) { + p_next = p->get_next(); + p->destroy_element(); + } + } + pointer_array_allocator_type().deallocate(pa, sz); + pa = nullptr; + } + // Separate test (if allocation of pa throws, el may be allocated. + // but no elements will be constructed.) + if(el) { +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + for (std::size_t i = 0; i < sz / 2; ++i) { + (el + i)->~element_type(); + } +#endif + elements_array_allocator().deallocate(el, sz / 2); + el = nullptr; + } + sz = INITIAL_SIZE; + ne = 0; + } + +public: + hash_buffer_impl() : my_key(nullptr), my_size(INITIAL_SIZE), nelements(0) { + internal_initialize_buffer(); + } + + ~hash_buffer_impl() { + internal_free_buffer(pointer_array, elements_array, my_size, nelements); + delete my_key; + my_key = nullptr; + } + hash_buffer_impl(const hash_buffer_impl&) = delete; + hash_buffer_impl& operator=(const hash_buffer_impl&) = delete; + + void reset() { + internal_free_buffer(pointer_array, elements_array, my_size, nelements); + internal_initialize_buffer(); + } + + // Take ownership of func object allocated with new. + // This method is only used internally, so can't be misused by user. + void set_key_func(ValueToKey *vtk) { my_key = vtk; } + // pointer is used to clone() + ValueToKey* get_key_func() { return my_key; } + + template + bool insert_with_key(const value_type &v, Args&&... args) { + element_type* p = nullptr; + __TBB_ASSERT(my_key, "Error: value-to-key functor not provided"); + if(find_element_ref_with_key(tbb::detail::invoke(*my_key, v), p)) { + p->destroy_element(); + p->create_element(v, std::forward(args)...); + return false; + } + ++nelements; + if(nelements*2 > my_size) grow_array(); + internal_insert_with_key(pointer_array, my_size, free_list, v, std::forward(args)...); + return true; + } + + bool find_element_ref_with_key(const Knoref& k, element_type*& v) { + size_t i = this->hash(k) & mask(); + for(element_type* p = pointer_array[i]; p; p = (element_type *)(p->get_next())) { + __TBB_ASSERT(my_key, "Error: value-to-key functor not provided"); + if(this->equal(tbb::detail::invoke(*my_key, *p->get_value_ptr()), k)) { + v = p; + return true; + } + } + return false; + } + + // returns true and sets v to array element if found, else returns false. + bool find_ref_with_key(const Knoref& k, pointer_type &v) { + element_type* element_ptr = nullptr; + bool res = find_element_ref_with_key(k, element_ptr); + v = element_ptr->get_value_ptr(); + return res; + } + + bool find_with_key( const Knoref& k, value_type &v) { + value_type *p; + if(find_ref_with_key(k, p)) { + v = *p; + return true; + } + else + return false; + } + + void delete_with_key(const Knoref& k) { + size_t h = this->hash(k) & mask(); + element_type* prev = nullptr; + for(element_type* p = pointer_array[h]; p; prev = p, p = (element_type *)(p->get_next())) { + value_type *vp = p->get_value_ptr(); + __TBB_ASSERT(my_key, "Error: value-to-key functor not provided"); + if(this->equal(tbb::detail::invoke(*my_key, *vp), k)) { + p->destroy_element(); + if(prev) prev->set_next(p->get_next()); + else pointer_array[h] = (element_type *)(p->get_next()); + p->set_next(free_list); + free_list = p; + --nelements; + return; + } + } + __TBB_ASSERT(false, "key not found for delete"); + } +}; + +template + < + typename Key, // type of key within ValueType + typename ValueType, + typename ValueToKey, // abstract method that returns "const Key" or "const Key&" given ValueType + typename HashCompare, // has hash and equal + typename Allocator=tbb::cache_aligned_allocator> + > +using hash_buffer = hash_buffer_impl, + ValueToKey, HashCompare, Allocator>; + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT +template + < + typename Key, // type of key within ValueType + typename ValueType, + typename ValueToKey, // abstract method that returns "const Key" or "const Key&" given ValueType + typename HashCompare, // has hash and equal + typename Allocator=tbb::cache_aligned_allocator> + > +struct metainfo_hash_buffer : public hash_buffer_impl, + ValueToKey, HashCompare, Allocator> +{ +private: + using base_type = hash_buffer_impl, + ValueToKey, HashCompare, Allocator>; +public: + bool find_with_key(const typename base_type::Knoref& k, + typename base_type::value_type& v, message_metainfo& metainfo) + { + typename base_type::element_type* p = nullptr; + bool result = this->find_element_ref_with_key(k, p); + if (result) { + v = *(p->get_value_ptr()); + metainfo = p->get_metainfo(); + } + return result; + } +}; +#endif // __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT +#endif // __TBB__flow_graph_hash_buffer_impl_H diff --git a/src/tbb/include/oneapi/tbb/detail/_flow_graph_trace_impl.h b/src/tbb/include/oneapi/tbb/detail/_flow_graph_trace_impl.h new file mode 100644 index 000000000..74ebf0845 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_flow_graph_trace_impl.h @@ -0,0 +1,364 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef _FGT_GRAPH_TRACE_IMPL_H +#define _FGT_GRAPH_TRACE_IMPL_H + +#include "../profiling.h" +#if (_MSC_VER >= 1900) + #include +#endif + +namespace tbb { +namespace detail { +namespace d2 { + +template< typename T > class sender; +template< typename T > class receiver; + +#if TBB_USE_PROFILING_TOOLS + #if __TBB_FLOW_TRACE_CODEPTR + #if (_MSC_VER >= 1900) + #define CODEPTR() (_ReturnAddress()) + #elif __TBB_GCC_VERSION >= 40800 + #define CODEPTR() ( __builtin_return_address(0)) + #else + #define CODEPTR() nullptr + #endif + #else + #define CODEPTR() nullptr + #endif /* __TBB_FLOW_TRACE_CODEPTR */ + +static inline void fgt_alias_port(void *node, void *p, bool visible) { + if(visible) + itt_relation_add( d1::ITT_DOMAIN_FLOW, node, FLOW_NODE, __itt_relation_is_parent_of, p, FLOW_NODE ); + else + itt_relation_add( d1::ITT_DOMAIN_FLOW, p, FLOW_NODE, __itt_relation_is_child_of, node, FLOW_NODE ); +} + +static inline void fgt_composite ( void* codeptr, void *node, void *graph ) { + itt_make_task_group( d1::ITT_DOMAIN_FLOW, node, FLOW_NODE, graph, FLOW_GRAPH, FLOW_COMPOSITE_NODE ); + suppress_unused_warning( codeptr ); +#if __TBB_FLOW_TRACE_CODEPTR + if (codeptr != nullptr) { + register_node_addr(d1::ITT_DOMAIN_FLOW, node, FLOW_NODE, CODE_ADDRESS, &codeptr); + } +#endif +} + +static inline void fgt_internal_alias_input_port( void *node, void *p, string_resource_index name_index ) { + itt_make_task_group( d1::ITT_DOMAIN_FLOW, p, FLOW_INPUT_PORT, node, FLOW_NODE, name_index ); + itt_relation_add( d1::ITT_DOMAIN_FLOW, node, FLOW_NODE, __itt_relation_is_parent_of, p, FLOW_INPUT_PORT ); +} + +static inline void fgt_internal_alias_output_port( void *node, void *p, string_resource_index name_index ) { + itt_make_task_group( d1::ITT_DOMAIN_FLOW, p, FLOW_OUTPUT_PORT, node, FLOW_NODE, name_index ); + itt_relation_add( d1::ITT_DOMAIN_FLOW, node, FLOW_NODE, __itt_relation_is_parent_of, p, FLOW_OUTPUT_PORT ); +} + +template +void alias_input_port(void *node, receiver* port, string_resource_index name_index) { + // TODO: Make fgt_internal_alias_input_port a function template? + fgt_internal_alias_input_port( node, port, name_index); +} + +template < typename PortsTuple, int N > +struct fgt_internal_input_alias_helper { + static void alias_port( void *node, PortsTuple &ports ) { + alias_input_port( node, &(std::get(ports)), static_cast(FLOW_INPUT_PORT_0 + N - 1) ); + fgt_internal_input_alias_helper::alias_port( node, ports ); + } +}; + +template < typename PortsTuple > +struct fgt_internal_input_alias_helper { + static void alias_port( void * /* node */, PortsTuple & /* ports */ ) { } +}; + +template +void alias_output_port(void *node, sender* port, string_resource_index name_index) { + // TODO: Make fgt_internal_alias_output_port a function template? + fgt_internal_alias_output_port( node, static_cast(port), name_index); +} + +template < typename PortsTuple, int N > +struct fgt_internal_output_alias_helper { + static void alias_port( void *node, PortsTuple &ports ) { + alias_output_port( node, &(std::get(ports)), static_cast(FLOW_OUTPUT_PORT_0 + N - 1) ); + fgt_internal_output_alias_helper::alias_port( node, ports ); + } +}; + +template < typename PortsTuple > +struct fgt_internal_output_alias_helper { + static void alias_port( void * /*node*/, PortsTuple &/*ports*/ ) { + } +}; + +static inline void fgt_internal_create_input_port( void *node, void *p, string_resource_index name_index ) { + itt_make_task_group( d1::ITT_DOMAIN_FLOW, p, FLOW_INPUT_PORT, node, FLOW_NODE, name_index ); +} + +static inline void fgt_internal_create_output_port( void* codeptr, void *node, void *p, string_resource_index name_index ) { + itt_make_task_group(d1::ITT_DOMAIN_FLOW, p, FLOW_OUTPUT_PORT, node, FLOW_NODE, name_index); + suppress_unused_warning( codeptr ); +#if __TBB_FLOW_TRACE_CODEPTR + if (codeptr != nullptr) { + register_node_addr(d1::ITT_DOMAIN_FLOW, node, FLOW_NODE, CODE_ADDRESS, &codeptr); + } +#endif +} + +template +void register_input_port(void *node, receiver* port, string_resource_index name_index) { + // TODO: Make fgt_internal_create_input_port a function template? + fgt_internal_create_input_port(node, static_cast(port), name_index); +} + +template < typename PortsTuple, int N > +struct fgt_internal_input_helper { + static void register_port( void *node, PortsTuple &ports ) { + register_input_port( node, &(std::get(ports)), static_cast(FLOW_INPUT_PORT_0 + N - 1) ); + fgt_internal_input_helper::register_port( node, ports ); + } +}; + +template < typename PortsTuple > +struct fgt_internal_input_helper { + static void register_port( void *node, PortsTuple &ports ) { + register_input_port( node, &(std::get<0>(ports)), FLOW_INPUT_PORT_0 ); + } +}; + +template +void register_output_port(void* codeptr, void *node, sender* port, string_resource_index name_index) { + // TODO: Make fgt_internal_create_output_port a function template? + fgt_internal_create_output_port( codeptr, node, static_cast(port), name_index); +} + +template < typename PortsTuple, int N > +struct fgt_internal_output_helper { + static void register_port( void* codeptr, void *node, PortsTuple &ports ) { + register_output_port( codeptr, node, &(std::get(ports)), static_cast(FLOW_OUTPUT_PORT_0 + N - 1) ); + fgt_internal_output_helper::register_port( codeptr, node, ports ); + } +}; + +template < typename PortsTuple > +struct fgt_internal_output_helper { + static void register_port( void* codeptr, void *node, PortsTuple &ports ) { + register_output_port( codeptr, node, &(std::get<0>(ports)), FLOW_OUTPUT_PORT_0 ); + } +}; + +template< typename NodeType > +void fgt_multioutput_node_desc( const NodeType *node, const char *desc ) { + void *addr = (void *)( static_cast< receiver< typename NodeType::input_type > * >(const_cast< NodeType *>(node)) ); + itt_metadata_str_add( d1::ITT_DOMAIN_FLOW, addr, FLOW_NODE, FLOW_OBJECT_NAME, desc ); +} + +template< typename NodeType > +void fgt_multiinput_multioutput_node_desc( const NodeType *node, const char *desc ) { + void *addr = const_cast(node); + itt_metadata_str_add( d1::ITT_DOMAIN_FLOW, addr, FLOW_NODE, FLOW_OBJECT_NAME, desc ); +} + +template< typename NodeType > +static inline void fgt_node_desc( const NodeType *node, const char *desc ) { + void *addr = (void *)( static_cast< sender< typename NodeType::output_type > * >(const_cast< NodeType *>(node)) ); + itt_metadata_str_add( d1::ITT_DOMAIN_FLOW, addr, FLOW_NODE, FLOW_OBJECT_NAME, desc ); +} + +static inline void fgt_graph_desc( const void *g, const char *desc ) { + void *addr = const_cast< void *>(g); + itt_metadata_str_add( d1::ITT_DOMAIN_FLOW, addr, FLOW_GRAPH, FLOW_OBJECT_NAME, desc ); +} + +static inline void fgt_body( void *node, void *body ) { + itt_relation_add( d1::ITT_DOMAIN_FLOW, body, FLOW_BODY, __itt_relation_is_child_of, node, FLOW_NODE ); +} + +template< int N, typename PortsTuple > +static inline void fgt_multioutput_node(void* codeptr, string_resource_index t, void *g, void *input_port, PortsTuple &ports ) { + itt_make_task_group( d1::ITT_DOMAIN_FLOW, input_port, FLOW_NODE, g, FLOW_GRAPH, t ); + fgt_internal_create_input_port( input_port, input_port, FLOW_INPUT_PORT_0 ); + fgt_internal_output_helper::register_port(codeptr, input_port, ports ); +} + +template< int N, typename PortsTuple > +static inline void fgt_multioutput_node_with_body( void* codeptr, string_resource_index t, void *g, void *input_port, PortsTuple &ports, void *body ) { + itt_make_task_group( d1::ITT_DOMAIN_FLOW, input_port, FLOW_NODE, g, FLOW_GRAPH, t ); + fgt_internal_create_input_port( input_port, input_port, FLOW_INPUT_PORT_0 ); + fgt_internal_output_helper::register_port( codeptr, input_port, ports ); + fgt_body( input_port, body ); +} + +template< int N, typename PortsTuple > +static inline void fgt_multiinput_node( void* codeptr, string_resource_index t, void *g, PortsTuple &ports, void *output_port) { + itt_make_task_group( d1::ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); + fgt_internal_create_output_port( codeptr, output_port, output_port, FLOW_OUTPUT_PORT_0 ); + fgt_internal_input_helper::register_port( output_port, ports ); +} + +static inline void fgt_multiinput_multioutput_node( void* codeptr, string_resource_index t, void *n, void *g ) { + itt_make_task_group( d1::ITT_DOMAIN_FLOW, n, FLOW_NODE, g, FLOW_GRAPH, t ); + suppress_unused_warning( codeptr ); +#if __TBB_FLOW_TRACE_CODEPTR + if (codeptr != nullptr) { + register_node_addr(d1::ITT_DOMAIN_FLOW, n, FLOW_NODE, CODE_ADDRESS, &codeptr); + } +#endif +} + +static inline void fgt_node( void* codeptr, string_resource_index t, void *g, void *output_port ) { + itt_make_task_group( d1::ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); + fgt_internal_create_output_port( codeptr, output_port, output_port, FLOW_OUTPUT_PORT_0 ); +} + +static void fgt_node_with_body( void* codeptr, string_resource_index t, void *g, void *output_port, void *body ) { + itt_make_task_group( d1::ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); + fgt_internal_create_output_port(codeptr, output_port, output_port, FLOW_OUTPUT_PORT_0 ); + fgt_body( output_port, body ); +} + +static inline void fgt_node( void* codeptr, string_resource_index t, void *g, void *input_port, void *output_port ) { + fgt_node( codeptr, t, g, output_port ); + fgt_internal_create_input_port( output_port, input_port, FLOW_INPUT_PORT_0 ); +} + +static inline void fgt_node_with_body( void* codeptr, string_resource_index t, void *g, void *input_port, void *output_port, void *body ) { + fgt_node_with_body( codeptr, t, g, output_port, body ); + fgt_internal_create_input_port( output_port, input_port, FLOW_INPUT_PORT_0 ); +} + + +static inline void fgt_node( void* codeptr, string_resource_index t, void *g, void *input_port, void *decrement_port, void *output_port ) { + fgt_node( codeptr, t, g, input_port, output_port ); + fgt_internal_create_input_port( output_port, decrement_port, FLOW_INPUT_PORT_1 ); +} + +static inline void fgt_make_edge( void *output_port, void *input_port ) { + itt_relation_add( d1::ITT_DOMAIN_FLOW, output_port, FLOW_OUTPUT_PORT, __itt_relation_is_predecessor_to, input_port, FLOW_INPUT_PORT); +} + +static inline void fgt_remove_edge( void *output_port, void *input_port ) { + itt_relation_add( d1::ITT_DOMAIN_FLOW, output_port, FLOW_OUTPUT_PORT, __itt_relation_is_sibling_of, input_port, FLOW_INPUT_PORT); +} + +static inline void fgt_graph( void *g ) { + itt_make_task_group( d1::ITT_DOMAIN_FLOW, g, FLOW_GRAPH, nullptr, FLOW_NULL, FLOW_GRAPH ); +} + +static inline void fgt_begin_body( void *body ) { + itt_task_begin( d1::ITT_DOMAIN_FLOW, body, FLOW_BODY, nullptr, FLOW_NULL, FLOW_BODY ); +} + +static inline void fgt_end_body( void * ) { + itt_task_end( d1::ITT_DOMAIN_FLOW ); +} + +static inline void fgt_async_try_put_begin( void *node, void *port ) { + itt_task_begin( d1::ITT_DOMAIN_FLOW, port, FLOW_OUTPUT_PORT, node, FLOW_NODE, FLOW_OUTPUT_PORT ); +} + +static inline void fgt_async_try_put_end( void *, void * ) { + itt_task_end( d1::ITT_DOMAIN_FLOW ); +} + +static inline void fgt_async_reserve( void *node, void *graph ) { + itt_region_begin( d1::ITT_DOMAIN_FLOW, node, FLOW_NODE, graph, FLOW_GRAPH, FLOW_NULL ); +} + +static inline void fgt_async_commit( void *node, void * /*graph*/) { + itt_region_end( d1::ITT_DOMAIN_FLOW, node, FLOW_NODE ); +} + +static inline void fgt_reserve_wait( void *graph ) { + itt_region_begin( d1::ITT_DOMAIN_FLOW, graph, FLOW_GRAPH, nullptr, FLOW_NULL, FLOW_NULL ); +} + +static inline void fgt_release_wait( void *graph ) { + itt_region_end( d1::ITT_DOMAIN_FLOW, graph, FLOW_GRAPH ); +} + +#else // TBB_USE_PROFILING_TOOLS + +#define CODEPTR() nullptr + +static inline void fgt_alias_port(void * /*node*/, void * /*p*/, bool /*visible*/ ) { } + +static inline void fgt_composite ( void* /*codeptr*/, void * /*node*/, void * /*graph*/ ) { } + +static inline void fgt_graph( void * /*g*/ ) { } + +template< typename NodeType > +static inline void fgt_multioutput_node_desc( const NodeType * /*node*/, const char * /*desc*/ ) { } + +template< typename NodeType > +static inline void fgt_node_desc( const NodeType * /*node*/, const char * /*desc*/ ) { } + +static inline void fgt_graph_desc( const void * /*g*/, const char * /*desc*/ ) { } + +template< int N, typename PortsTuple > +static inline void fgt_multioutput_node( void* /*codeptr*/, string_resource_index /*t*/, void * /*g*/, void * /*input_port*/, PortsTuple & /*ports*/ ) { } + +template< int N, typename PortsTuple > +static inline void fgt_multioutput_node_with_body( void* /*codeptr*/, string_resource_index /*t*/, void * /*g*/, void * /*input_port*/, PortsTuple & /*ports*/, void * /*body*/ ) { } + +template< int N, typename PortsTuple > +static inline void fgt_multiinput_node( void* /*codeptr*/, string_resource_index /*t*/, void * /*g*/, PortsTuple & /*ports*/, void * /*output_port*/ ) { } + +static inline void fgt_multiinput_multioutput_node( void* /*codeptr*/, string_resource_index /*t*/, void * /*node*/, void * /*graph*/ ) { } + +static inline void fgt_node( void* /*codeptr*/, string_resource_index /*t*/, void * /*g*/, void * /*input_port*/, void * /*output_port*/ ) { } +static inline void fgt_node( void* /*codeptr*/, string_resource_index /*t*/, void * /*g*/, void * /*input_port*/, void * /*decrement_port*/, void * /*output_port*/ ) { } + +static inline void fgt_node_with_body( void* /*codeptr*/, string_resource_index /*t*/, void * /*g*/, void * /*output_port*/, void * /*body*/ ) { } +static inline void fgt_node_with_body( void* /*codeptr*/, string_resource_index /*t*/, void * /*g*/, void * /*input_port*/, void * /*output_port*/, void * /*body*/ ) { } + +static inline void fgt_make_edge( void * /*output_port*/, void * /*input_port*/ ) { } +static inline void fgt_remove_edge( void * /*output_port*/, void * /*input_port*/ ) { } + +static inline void fgt_begin_body( void * /*body*/ ) { } +static inline void fgt_end_body( void * /*body*/) { } + +static inline void fgt_async_try_put_begin( void * /*node*/, void * /*port*/ ) { } +static inline void fgt_async_try_put_end( void * /*node*/ , void * /*port*/ ) { } +static inline void fgt_async_reserve( void * /*node*/, void * /*graph*/ ) { } +static inline void fgt_async_commit( void * /*node*/, void * /*graph*/ ) { } +static inline void fgt_reserve_wait( void * /*graph*/ ) { } +static inline void fgt_release_wait( void * /*graph*/ ) { } + +template< typename NodeType > +void fgt_multiinput_multioutput_node_desc( const NodeType * /*node*/, const char * /*desc*/ ) { } + +template < typename PortsTuple, int N > +struct fgt_internal_input_alias_helper { + static void alias_port( void * /*node*/, PortsTuple & /*ports*/ ) { } +}; + +template < typename PortsTuple, int N > +struct fgt_internal_output_alias_helper { + static void alias_port( void * /*node*/, PortsTuple & /*ports*/ ) { } +}; + +#endif // TBB_USE_PROFILING_TOOLS + +} // d2 +} // namespace detail +} // namespace tbb + +#endif // _FGT_GRAPH_TRACE_IMPL_H diff --git a/src/tbb/include/oneapi/tbb/detail/_flow_graph_types_impl.h b/src/tbb/include/oneapi/tbb/detail/_flow_graph_types_impl.h new file mode 100644 index 000000000..c5634413b --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_flow_graph_types_impl.h @@ -0,0 +1,422 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__flow_graph_types_impl_H +#define __TBB__flow_graph_types_impl_H + +#ifndef __TBB_flow_graph_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +// included in namespace tbb::detail::d2 + +// the change to key_matching (adding a K and KHash template parameter, making it a class) +// means we have to pass this data to the key_matching_port. All the ports have only one +// template parameter, so we have to wrap the following types in a trait: +// +// . K == key_type +// . KHash == hash and compare for Key +// . TtoK == function_body that given an object of T, returns its K +// . T == type accepted by port, and stored in the hash table +// +// The port will have an additional parameter on node construction, which is a function_body +// that accepts a const T& and returns a K which is the field in T which is its K. +template +struct KeyTrait { + typedef Kp K; + typedef Tp T; + typedef type_to_key_function_body TtoK; + typedef KHashp KHash; +}; + +// wrap each element of a tuple in a template, and make a tuple of the result. +template class PT, typename TypeTuple> +struct wrap_tuple_elements; + +// A wrapper that generates the traits needed for each port of a key-matching join, +// and the type of the tuple of input ports. +template class PT, typename KeyTraits, typename TypeTuple> +struct wrap_key_tuple_elements; + +template class PT, typename... Args> +struct wrap_tuple_elements >{ + typedef typename std::tuple... > type; +}; + +template class PT, typename KeyTraits, typename... Args> +struct wrap_key_tuple_elements > { + typedef typename KeyTraits::key_type K; + typedef typename KeyTraits::hash_compare_type KHash; + typedef typename std::tuple >... > type; +}; + +template< int... S > class sequence {}; + +template< int N, int... S > +struct make_sequence : make_sequence < N - 1, N - 1, S... > {}; + +template< int... S > +struct make_sequence < 0, S... > { + typedef sequence type; +}; + +template struct alignment_of { + typedef struct { char t; U padded; } test_alignment; + static const size_t value = sizeof(test_alignment) - sizeof(U); +}; + +template +struct max_alignment_helper; + +template +struct max_alignment_helper { + using type = typename max_alignment_helper::type>::type; +}; + +template +struct max_alignment_helper { + using type = typename std::conditional::type; +}; + +template +using max_alignment_helper_t = typename max_alignment_helper::type; + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) +// #pragma warning(push) +// #pragma warning(disable: 4324) // warning C4324: structure was padded due to alignment specifier +#endif + +// T1, T2 are actual types stored. The space defined for T1 in the type returned +// is a char array of the correct size. Type T2 should be trivially-constructible, +// T1 must be explicitly managed. + +template +struct alignas(alignof(max_alignment_helper_t)) aligned_pair { + char first[sizeof(T1)]; + T2 second; +}; + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT +template +struct alignas(alignof(max_alignment_helper_t)) aligned_triple { + char first[sizeof(T1)]; + T2 second; + T3 third; +}; +#endif + + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) +// #pragma warning(pop) // warning 4324 is back +#endif + +// support for variant type +// type we use when we're not storing a value +struct default_constructed { }; + +// type which contains another type, tests for what type is contained, and references to it. +// Wrapper +// void CopyTo( void *newSpace) : builds a Wrapper copy of itself in newSpace + +// struct to allow us to copy and test the type of objects +struct WrapperBase { + virtual ~WrapperBase() {} + virtual void CopyTo(void* /*newSpace*/) const = 0; +}; + +// Wrapper contains a T, with the ability to test what T is. The Wrapper can be +// constructed from a T, can be copy-constructed from another Wrapper, and can be +// examined via value(), but not modified. +template +struct Wrapper: public WrapperBase { + typedef T value_type; + typedef T* pointer_type; +private: + T value_space; +public: + const value_type &value() const { return value_space; } + +private: + Wrapper(); + + // on exception will ensure the Wrapper will contain only a trivially-constructed object + struct _unwind_space { + pointer_type space; + _unwind_space(pointer_type p) : space(p) {} + ~_unwind_space() { + if(space) (void) new (space) Wrapper(default_constructed()); + } + }; +public: + explicit Wrapper( const T& other ) : value_space(other) { } + explicit Wrapper(const Wrapper& other) = delete; + + void CopyTo(void* newSpace) const override { + _unwind_space guard((pointer_type)newSpace); + (void) new(newSpace) Wrapper(value_space); + guard.space = nullptr; + } + ~Wrapper() { } +}; + +// specialization for array objects +template +struct Wrapper : public WrapperBase { + typedef T value_type; + typedef T* pointer_type; + // space must be untyped. + typedef T ArrayType[N]; +private: + // The space is not of type T[N] because when copy-constructing, it would be + // default-initialized and then copied to in some fashion, resulting in two + // constructions and one destruction per element. If the type is char[ ], we + // placement new into each element, resulting in one construction per element. + static const size_t space_size = sizeof(ArrayType); + char value_space[space_size]; + + + // on exception will ensure the already-built objects will be destructed + // (the value_space is a char array, so it is already trivially-destructible.) + struct _unwind_class { + pointer_type space; + int already_built; + _unwind_class(pointer_type p) : space(p), already_built(0) {} + ~_unwind_class() { + if(space) { + for(size_t i = already_built; i > 0 ; --i ) space[i-1].~value_type(); + (void) new(space) Wrapper(default_constructed()); + } + } + }; +public: + const ArrayType &value() const { + char *vp = const_cast(value_space); + return reinterpret_cast(*vp); + } + +private: + Wrapper(); +public: + // have to explicitly construct because other decays to a const value_type* + explicit Wrapper(const ArrayType& other) { + _unwind_class guard((pointer_type)value_space); + pointer_type vp = reinterpret_cast(&value_space); + for(size_t i = 0; i < N; ++i ) { + (void) new(vp++) value_type(other[i]); + ++(guard.already_built); + } + guard.space = nullptr; + } + explicit Wrapper(const Wrapper& other) : WrapperBase() { + // we have to do the heavy lifting to copy contents + _unwind_class guard((pointer_type)value_space); + pointer_type dp = reinterpret_cast(value_space); + pointer_type sp = reinterpret_cast(const_cast(other.value_space)); + for(size_t i = 0; i < N; ++i, ++dp, ++sp) { + (void) new(dp) value_type(*sp); + ++(guard.already_built); + } + guard.space = nullptr; + } + + void CopyTo(void* newSpace) const override { + (void) new(newSpace) Wrapper(*this); // exceptions handled in copy constructor + } + + ~Wrapper() { + // have to destroy explicitly in reverse order + pointer_type vp = reinterpret_cast(&value_space); + for(size_t i = N; i > 0 ; --i ) vp[i-1].~value_type(); + } +}; + +// given a tuple, return the type of the element that has the maximum alignment requirement. +// Given a tuple and that type, return the number of elements of the object with the max +// alignment requirement that is at least as big as the largest object in the tuple. + +template struct pick_one; +template struct pick_one { typedef T1 type; }; +template struct pick_one { typedef T2 type; }; + +template< template class Selector, typename T1, typename T2 > +struct pick_max { + typedef typename pick_one< (Selector::value > Selector::value), T1, T2 >::type type; +}; + +template struct size_of { static const int value = sizeof(T); }; + +template< size_t N, class Tuple, template class Selector > struct pick_tuple_max { + typedef typename pick_tuple_max::type LeftMaxType; + typedef typename std::tuple_element::type ThisType; + typedef typename pick_max::type type; +}; + +template< class Tuple, template class Selector > struct pick_tuple_max<0, Tuple, Selector> { + typedef typename std::tuple_element<0, Tuple>::type type; +}; + +// is the specified type included in a tuple? +template +struct is_element_of { + typedef typename std::tuple_element::type T_i; + static const bool value = std::is_same::value || is_element_of::value; +}; + +template +struct is_element_of { + typedef typename std::tuple_element<0, Tuple>::type T_i; + static const bool value = std::is_same::value; +}; + +// allow the construction of types that are listed tuple. If a disallowed type +// construction is written, a method involving this type is created. The +// type has no definition, so a syntax error is generated. +template struct ERROR_Type_Not_allowed_In_Tagged_Msg_Not_Member_Of_Tuple; + +template struct do_if; +template +struct do_if { + static void construct(void *mySpace, const T& x) { + (void) new(mySpace) Wrapper(x); + } +}; +template +struct do_if { + static void construct(void * /*mySpace*/, const T& x) { + // This method is instantiated when the type T does not match any of the + // element types in the Tuple in variant. + ERROR_Type_Not_allowed_In_Tagged_Msg_Not_Member_Of_Tuple::bad_type(x); + } +}; + +// Tuple tells us the allowed types that variant can hold. It determines the alignment of the space in +// Wrapper, and how big Wrapper is. +// +// the object can only be tested for type, and a read-only reference can be fetched by cast_to(). + +using tbb::detail::punned_cast; +struct tagged_null_type {}; +template +class tagged_msg { + typedef std::tuple= 6 + , T5 + #endif + #if __TBB_VARIADIC_MAX >= 7 + , T6 + #endif + #if __TBB_VARIADIC_MAX >= 8 + , T7 + #endif + #if __TBB_VARIADIC_MAX >= 9 + , T8 + #endif + #if __TBB_VARIADIC_MAX >= 10 + , T9 + #endif + > Tuple; + +private: + class variant { + static const size_t N = std::tuple_size::value; + typedef typename pick_tuple_max::type AlignType; + typedef typename pick_tuple_max::type MaxSizeType; + static const size_t MaxNBytes = (sizeof(Wrapper)+sizeof(AlignType)-1); + static const size_t MaxNElements = MaxNBytes/sizeof(AlignType); + typedef aligned_space SpaceType; + SpaceType my_space; + static const size_t MaxSize = sizeof(SpaceType); + + public: + variant() { (void) new(&my_space) Wrapper(default_constructed()); } + + template + variant( const T& x ) { + do_if::value>::construct(&my_space,x); + } + + variant(const variant& other) { + const WrapperBase * h = punned_cast(&(other.my_space)); + h->CopyTo(&my_space); + } + + // assignment must destroy and re-create the Wrapper type, as there is no way + // to create a Wrapper-to-Wrapper assign even if we find they agree in type. + void operator=( const variant& rhs ) { + if(&rhs != this) { + WrapperBase *h = punned_cast(&my_space); + h->~WrapperBase(); + const WrapperBase *ch = punned_cast(&(rhs.my_space)); + ch->CopyTo(&my_space); + } + } + + template + const U& variant_cast_to() const { + const Wrapper *h = dynamic_cast*>(punned_cast(&my_space)); + if(!h) { + throw_exception(exception_id::bad_tagged_msg_cast); + } + return h->value(); + } + template + bool variant_is_a() const { return dynamic_cast*>(punned_cast(&my_space)) != nullptr; } + + bool variant_is_default_constructed() const {return variant_is_a();} + + ~variant() { + WrapperBase *h = punned_cast(&my_space); + h->~WrapperBase(); + } + }; //class variant + + TagType my_tag; + variant my_msg; + +public: + tagged_msg(): my_tag(TagType(~0)), my_msg(){} + + template + tagged_msg(T const &index, R const &value) : my_tag(index), my_msg(value) {} + + template + tagged_msg(T const &index, R (&value)[N]) : my_tag(index), my_msg(value) {} + + void set_tag(TagType const &index) {my_tag = index;} + TagType tag() const {return my_tag;} + + template + const V& cast_to() const {return my_msg.template variant_cast_to();} + + template + bool is_a() const {return my_msg.template variant_is_a();} + + bool is_default_constructed() const {return my_msg.variant_is_default_constructed();} +}; //class tagged_msg + +// template to simplify cast and test for tagged_msg in template contexts +template +const V& cast_to(T const &t) { return t.template cast_to(); } + +template +bool is_a(T const &t) { return t.template is_a(); } + +enum op_stat { WAIT = 0, SUCCEEDED, FAILED }; + +#endif /* __TBB__flow_graph_types_impl_H */ diff --git a/src/tbb/include/oneapi/tbb/detail/_hash_compare.h b/src/tbb/include/oneapi/tbb/detail/_hash_compare.h new file mode 100644 index 000000000..2060a2c1b --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_hash_compare.h @@ -0,0 +1,147 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_detail__hash_compare_H +#define __TBB_detail__hash_compare_H + +#include + +#include "_containers_helpers.h" + +namespace tbb { +namespace detail { +namespace d1 { + +template +class hash_compare { + using is_transparent_hash = has_transparent_key_equal; +public: + using hasher = Hash; + using key_equal = typename is_transparent_hash::type; + + hash_compare() = default; + hash_compare( hasher hash, key_equal equal ) : my_hasher(hash), my_equal(equal) {} + + std::size_t operator()( const Key& key ) const { + return std::size_t(my_hasher(key)); + } + + bool operator()( const Key& key1, const Key& key2 ) const { + return my_equal(key1, key2); + } + + template ::type> + std::size_t operator()( const K& key ) const { + return std::size_t(my_hasher(key)); + } + + template ::type> + bool operator()( const K1& key1, const K2& key2 ) const { + return my_equal(key1, key2); + } + + hasher hash_function() const { + return my_hasher; + } + + key_equal key_eq() const { + return my_equal; + } + + +private: + hasher my_hasher; + key_equal my_equal; +}; // class hash_compare + +//! hash_compare that is default argument for concurrent_hash_map +template +class tbb_hash_compare { +public: + std::size_t hash( const Key& a ) const { return my_hash_func(a); } +#if defined(_MSC_VER) && _MSC_VER <= 1900 +// #pragma warning (push) +// MSVC 2015 throws a strange warning: 'std::size_t': forcing value to bool 'true' or 'false' +// #pragma warning (disable: 4800) +#endif + bool equal( const Key& a, const Key& b ) const { return my_key_equal(a, b); } +#if defined(_MSC_VER) && _MSC_VER <= 1900 +// #pragma warning (pop) +#endif +private: + std::hash my_hash_func; + std::equal_to my_key_equal; +}; + +} // namespace d1 +#if __TBB_CPP20_CONCEPTS_PRESENT +inline namespace d0 { + +template +concept hash_compare = std::copy_constructible && + requires( const std::remove_reference_t& hc, const Key& key1, const Key& key2 ) { + { hc.hash(key1) } -> std::same_as; + { hc.equal(key1, key2) } -> std::convertible_to; + }; + +} // namespace d0 +#endif // __TBB_CPP20_CONCEPTS_PRESENT +} // namespace detail +} // namespace tbb + +#if TBB_DEFINE_STD_HASH_SPECIALIZATIONS + +namespace std { + +template +struct hash> { +public: + std::size_t operator()( const std::pair& p ) const { + return first_hash(p.first) ^ second_hash(p.second); + } + +private: + std::hash first_hash; + std::hash second_hash; +}; // struct hash + +// Apple clang and MSVC defines their own specializations for std::hash> +#if !(_LIBCPP_VERSION) && !(_CPPLIB_VER) + +template +struct hash> { +public: + std::size_t operator()( const std::basic_string& s ) const { + std::size_t h = 0; + for ( const CharT* c = s.c_str(); *c; ++c ) { + h = h * hash_multiplier ^ char_hash(*c); + } + return h; + } + +private: + static constexpr std::size_t hash_multiplier = tbb::detail::select_size_t_constant<2654435769U, 11400714819323198485ULL>::value; + + std::hash char_hash; +}; // struct hash + +#endif // !(_LIBCPP_VERSION || _CPPLIB_VER) + +} // namespace std + +#endif // TBB_DEFINE_STD_HASH_SPECIALIZATIONS + +#endif // __TBB_detail__hash_compare_H diff --git a/src/tbb/include/oneapi/tbb/detail/_intrusive_list_node.h b/src/tbb/include/oneapi/tbb/detail/_intrusive_list_node.h new file mode 100644 index 000000000..69286c8fa --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_intrusive_list_node.h @@ -0,0 +1,41 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef _TBB_detail__intrusive_list_node_H +#define _TBB_detail__intrusive_list_node_H + +namespace tbb { +namespace detail { +namespace d1 { + +//! Data structure to be inherited by the types that can form intrusive lists. +/** Intrusive list is formed by means of the member_intrusive_list template class. + Note that type T must derive from intrusive_list_node either publicly or + declare instantiation member_intrusive_list as a friend. + This class implements a limited subset of std::list interface. **/ +struct intrusive_list_node { + intrusive_list_node* my_prev_node{}; + intrusive_list_node* my_next_node{}; +#if TBB_USE_ASSERT + intrusive_list_node() { my_prev_node = my_next_node = this; } +#endif /* TBB_USE_ASSERT */ +}; + +} // namespace d1 +} // namespace detail +} // namespace tbb + +#endif // _TBB_detail__intrusive_list_node_H diff --git a/src/tbb/include/oneapi/tbb/detail/_machine.h b/src/tbb/include/oneapi/tbb/detail/_machine.h new file mode 100644 index 000000000..ca481380c --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_machine.h @@ -0,0 +1,373 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_detail__machine_H +#define __TBB_detail__machine_H + +#include "_config.h" +#include "_assert.h" + +#include +#include +#include +#include + +#ifdef _WIN32 +#include +#ifdef __TBBMALLOC_BUILD +#define WIN32_LEAN_AND_MEAN +#ifndef NOMINMAX +#define NOMINMAX +#endif +#include // SwitchToThread() +#endif +#ifdef _MSC_VER +#if __TBB_x86_64 || __TBB_x86_32 +#pragma intrinsic(__rdtsc) +#endif +#endif +#endif +#if __TBB_x86_64 || __TBB_x86_32 +#include // _mm_pause +#endif +#if (_WIN32) +#include // _control87 +#endif + +#if __TBB_GLIBCXX_THIS_THREAD_YIELD_BROKEN +#include // sched_yield +#else +#include // std::this_thread::yield() +#endif + +namespace tbb { +namespace detail { +inline namespace d0 { + +//-------------------------------------------------------------------------------------------------- +// Yield implementation +//-------------------------------------------------------------------------------------------------- + +#if __TBB_GLIBCXX_THIS_THREAD_YIELD_BROKEN +static inline void yield() { + int err = sched_yield(); + __TBB_ASSERT_EX(err == 0, "sched_yield has failed"); +} +#elif __TBBMALLOC_BUILD && _WIN32 +// Use Windows API for yield in tbbmalloc to avoid dependency on C++ runtime with some implementations. +static inline void yield() { + SwitchToThread(); +} +#else +using std::this_thread::yield; +#endif + +//-------------------------------------------------------------------------------------------------- +// atomic_fence_seq_cst implementation +//-------------------------------------------------------------------------------------------------- + +static inline void atomic_fence_seq_cst() { +#if (__TBB_x86_64 || __TBB_x86_32) && defined(__GNUC__) && __GNUC__ < 11 + unsigned char dummy = 0u; + __asm__ __volatile__ ("lock; notb %0" : "+m" (dummy) :: "memory"); +#else + std::atomic_thread_fence(std::memory_order_seq_cst); +#endif +} + +//-------------------------------------------------------------------------------------------------- +// Pause implementation +//-------------------------------------------------------------------------------------------------- + +static inline void machine_pause(int32_t delay) { +#if __TBB_x86_64 || __TBB_x86_32 + while (delay-- > 0) { _mm_pause(); } +#elif __ARM_ARCH_7A__ || __aarch64__ + while (delay-- > 0) { __asm__ __volatile__("isb sy" ::: "memory"); } +#else /* Generic */ + (void)delay; // suppress without including _template_helpers.h + yield(); +#endif +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// tbb::detail::log2() implementation +//////////////////////////////////////////////////////////////////////////////////////////////////// +// TODO: Use log2p1() function that will be available in C++20 standard + +#if defined(__GNUC__) || defined(__clang__) +namespace gnu_builtins { + inline uintptr_t clz(unsigned int x) { return static_cast(__builtin_clz(x)); } + inline uintptr_t clz(unsigned long int x) { return static_cast(__builtin_clzl(x)); } + inline uintptr_t clz(unsigned long long int x) { return static_cast(__builtin_clzll(x)); } +} +#elif defined(_MSC_VER) +#pragma intrinsic(__TBB_W(_BitScanReverse)) +namespace msvc_intrinsics { + static inline uintptr_t bit_scan_reverse(uintptr_t i) { + unsigned long j; + __TBB_W(_BitScanReverse)( &j, i ); + return j; + } +} +#endif + +template +constexpr std::uintptr_t number_of_bits() { + return sizeof(T) * CHAR_BIT; +} + +// logarithm is the index of the most significant non-zero bit +static inline uintptr_t machine_log2(uintptr_t x) { +#if defined(__GNUC__) || defined(__clang__) + // If P is a power of 2 and x() - 1) ^ gnu_builtins::clz(x); +#elif defined(_MSC_VER) + return msvc_intrinsics::bit_scan_reverse(x); +#elif __i386__ || __i386 /*for Sun OS*/ || __MINGW32__ + uintptr_t j, i = x; + __asm__("bsr %1,%0" : "=r"(j) : "r"(i)); + return j; +#elif __powerpc__ || __POWERPC__ + #if __TBB_WORDSIZE==8 + __asm__ __volatile__ ("cntlzd %0,%0" : "+r"(x)); + return 63 - static_cast(x); + #else + __asm__ __volatile__ ("cntlzw %0,%0" : "+r"(x)); + return 31 - static_cast(x); + #endif /*__TBB_WORDSIZE*/ +#elif __sparc + uint64_t count; + // one hot encode + x |= (x >> 1); + x |= (x >> 2); + x |= (x >> 4); + x |= (x >> 8); + x |= (x >> 16); + x |= (x >> 32); + // count 1's + __asm__ ("popc %1, %0" : "=r"(count) : "r"(x) ); + return count - 1; +#else + intptr_t result = 0; + + if( sizeof(x) > 4 && (uintptr_t tmp = x >> 32) ) { x = tmp; result += 32; } + if( uintptr_t tmp = x >> 16 ) { x = tmp; result += 16; } + if( uintptr_t tmp = x >> 8 ) { x = tmp; result += 8; } + if( uintptr_t tmp = x >> 4 ) { x = tmp; result += 4; } + if( uintptr_t tmp = x >> 2 ) { x = tmp; result += 2; } + + return (x & 2) ? result + 1 : result; +#endif +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// tbb::detail::reverse_bits() implementation +//////////////////////////////////////////////////////////////////////////////////////////////////// +#if TBB_USE_CLANG_BITREVERSE_BUILTINS +namespace llvm_builtins { + inline uint8_t builtin_bitreverse(uint8_t x) { return __builtin_bitreverse8 (x); } + inline uint16_t builtin_bitreverse(uint16_t x) { return __builtin_bitreverse16(x); } + inline uint32_t builtin_bitreverse(uint32_t x) { return __builtin_bitreverse32(x); } + inline uint64_t builtin_bitreverse(uint64_t x) { return __builtin_bitreverse64(x); } +} +#else // generic +template +struct reverse { + static const T byte_table[256]; +}; + +template +const T reverse::byte_table[256] = { + 0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0, + 0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8, + 0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4, + 0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC, + 0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2, + 0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA, + 0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6, + 0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE, + 0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1, + 0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9, + 0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5, + 0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD, + 0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3, + 0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB, + 0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7, + 0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF +}; + +inline unsigned char reverse_byte(unsigned char src) { + return reverse::byte_table[src]; +} +#endif // TBB_USE_CLANG_BITREVERSE_BUILTINS + +template +T machine_reverse_bits(T src) { +#if TBB_USE_CLANG_BITREVERSE_BUILTINS + return builtin_bitreverse(fixed_width_cast(src)); +#else /* Generic */ + T dst; + unsigned char *original = reinterpret_cast(&src); + unsigned char *reversed = reinterpret_cast(&dst); + + for ( int i = sizeof(T) - 1; i >= 0; i-- ) { + reversed[i] = reverse_byte( original[sizeof(T) - i - 1] ); + } + + return dst; +#endif // TBB_USE_CLANG_BITREVERSE_BUILTINS +} + +} // inline namespace d0 + +namespace d1 { + +#if (_WIN32) +// API to retrieve/update FPU control setting +#define __TBB_CPU_CTL_ENV_PRESENT 1 +struct cpu_ctl_env { + unsigned int x87cw{}; +#if (__TBB_x86_64) + // Changing the infinity mode or the floating-point precision is not supported on x64. + // The attempt causes an assertion. See + // https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/control87-controlfp-control87-2 + static constexpr unsigned int X87CW_CONTROL_MASK = _MCW_DN | _MCW_EM | _MCW_RC; +#else + static constexpr unsigned int X87CW_CONTROL_MASK = ~0U; +#endif +#if (__TBB_x86_32 || __TBB_x86_64) + unsigned int mxcsr{}; + static constexpr unsigned int MXCSR_CONTROL_MASK = ~0x3fu; /* all except last six status bits */ +#endif + + bool operator!=( const cpu_ctl_env& ctl ) const { + return +#if (__TBB_x86_32 || __TBB_x86_64) + mxcsr != ctl.mxcsr || +#endif + x87cw != ctl.x87cw; + } + void get_env() { + x87cw = _control87(0, 0); +#if (__TBB_x86_32 || __TBB_x86_64) + mxcsr = _mm_getcsr(); +#endif + } + void set_env() const { + _control87(x87cw, X87CW_CONTROL_MASK); +#if (__TBB_x86_32 || __TBB_x86_64) + _mm_setcsr(mxcsr & MXCSR_CONTROL_MASK); +#endif + } +}; +#elif (__TBB_x86_32 || __TBB_x86_64) +// API to retrieve/update FPU control setting +#define __TBB_CPU_CTL_ENV_PRESENT 1 +struct cpu_ctl_env { + int mxcsr{}; + short x87cw{}; + static const int MXCSR_CONTROL_MASK = ~0x3f; /* all except last six status bits */ + + bool operator!=(const cpu_ctl_env& ctl) const { + return mxcsr != ctl.mxcsr || x87cw != ctl.x87cw; + } + void get_env() { + __asm__ __volatile__( + "stmxcsr %0\n\t" + "fstcw %1" + : "=m"(mxcsr), "=m"(x87cw) + ); + mxcsr &= MXCSR_CONTROL_MASK; + } + void set_env() const { + __asm__ __volatile__( + "ldmxcsr %0\n\t" + "fldcw %1" + : : "m"(mxcsr), "m"(x87cw) + ); + } +}; +#endif + +} // namespace d1 + +} // namespace detail +} // namespace tbb + +#if !__TBB_CPU_CTL_ENV_PRESENT +#include + +#include + +namespace tbb { +namespace detail { + +namespace r1 { +void* __TBB_EXPORTED_FUNC cache_aligned_allocate(std::size_t size); +void __TBB_EXPORTED_FUNC cache_aligned_deallocate(void* p); +} // namespace r1 + +namespace d1 { + +class cpu_ctl_env { + fenv_t *my_fenv_ptr; +public: + cpu_ctl_env() : my_fenv_ptr(nullptr) {} + ~cpu_ctl_env() { + if ( my_fenv_ptr ) + r1::cache_aligned_deallocate( (void*)my_fenv_ptr ); + } + // It is possible not to copy memory but just to copy pointers but the following issues should be addressed: + // 1. The arena lifetime and the context lifetime are independent; + // 2. The user is allowed to recapture different FPU settings to context so 'current FPU settings' inside + // dispatch loop may become invalid. + // But do we really want to improve the fenv implementation? It seems to be better to replace the fenv implementation + // with a platform specific implementation. + cpu_ctl_env( const cpu_ctl_env &src ) : my_fenv_ptr(nullptr) { + *this = src; + } + cpu_ctl_env& operator=( const cpu_ctl_env &src ) { + __TBB_ASSERT( src.my_fenv_ptr, nullptr); + if ( !my_fenv_ptr ) + my_fenv_ptr = (fenv_t*)r1::cache_aligned_allocate(sizeof(fenv_t)); + *my_fenv_ptr = *src.my_fenv_ptr; + return *this; + } + bool operator!=( const cpu_ctl_env &ctl ) const { + __TBB_ASSERT( my_fenv_ptr, "cpu_ctl_env is not initialized." ); + __TBB_ASSERT( ctl.my_fenv_ptr, "cpu_ctl_env is not initialized." ); + return std::memcmp( (void*)my_fenv_ptr, (void*)ctl.my_fenv_ptr, sizeof(fenv_t) ); + } + void get_env () { + if ( !my_fenv_ptr ) + my_fenv_ptr = (fenv_t*)r1::cache_aligned_allocate(sizeof(fenv_t)); + fegetenv( my_fenv_ptr ); + } + const cpu_ctl_env& set_env () const { + __TBB_ASSERT( my_fenv_ptr, "cpu_ctl_env is not initialized." ); + fesetenv( my_fenv_ptr ); + return *this; + } +}; + +} // namespace d1 +} // namespace detail +} // namespace tbb + +#endif /* !__TBB_CPU_CTL_ENV_PRESENT */ + +#endif // __TBB_detail__machine_H diff --git a/src/tbb/include/oneapi/tbb/detail/_mutex_common.h b/src/tbb/include/oneapi/tbb/detail/_mutex_common.h new file mode 100644 index 000000000..4650c1926 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_mutex_common.h @@ -0,0 +1,61 @@ +/* + Copyright (c) 2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_detail__mutex_common_H +#define __TBB_detail__mutex_common_H + +#include "_config.h" +#include "_utils.h" + +#if __TBB_CPP20_CONCEPTS_PRESENT +#include + +namespace tbb { +namespace detail { +inline namespace d0 { + +template +concept mutex_scoped_lock = std::default_initializable && + std::constructible_from && + requires( Lock& lock, Mutex& mutex ) { + lock.acquire(mutex); + { lock.try_acquire(mutex) } -> adaptive_same_as; + lock.release(); + }; + +template +concept rw_mutex_scoped_lock = mutex_scoped_lock && + std::constructible_from && + requires( Lock& lock, Mutex& mutex ) { + lock.acquire(mutex, false); + { lock.try_acquire(mutex, false) } -> adaptive_same_as; + { lock.upgrade_to_writer() } -> adaptive_same_as; + { lock.downgrade_to_reader() } -> adaptive_same_as; + }; + +template +concept scoped_lockable = mutex_scoped_lock; + +template +concept rw_scoped_lockable = scoped_lockable && + rw_mutex_scoped_lock; + +} // namespace d0 +} // namespace detail +} // namespace tbb + +#endif // __TBB_CPP20_CONCEPTS_PRESENT +#endif // __TBB_detail__mutex_common_H diff --git a/src/tbb/include/oneapi/tbb/detail/_namespace_injection.h b/src/tbb/include/oneapi/tbb/detail/_namespace_injection.h new file mode 100644 index 000000000..2e1df3093 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_namespace_injection.h @@ -0,0 +1,24 @@ +/* + Copyright (c) 2020-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// All public entities of the OneAPI Spec are available under oneapi namespace + +// Define tbb namespace first as it might not be known yet +namespace tbb {} + +namespace oneapi { +namespace tbb = ::tbb; +} diff --git a/src/tbb/include/oneapi/tbb/detail/_node_handle.h b/src/tbb/include/oneapi/tbb/detail/_node_handle.h new file mode 100644 index 000000000..265be0755 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_node_handle.h @@ -0,0 +1,162 @@ +/* + Copyright (c) 2019-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_detail__node_handle_H +#define __TBB_detail__node_handle_H + +#include "_allocator_traits.h" +#include "_assert.h" + +namespace tbb { +namespace detail { +namespace d1 { + +// A structure to access private node handle methods in internal TBB classes +// Regular friend declaration is not convenient because classes which use node handle +// can be placed in the different versioning namespaces. +struct node_handle_accessor { + template + static typename NodeHandleType::node* get_node_ptr( NodeHandleType& nh ) { + return nh.get_node_ptr(); + } + + template + static NodeHandleType construct( typename NodeHandleType::node* node_ptr ) { + return NodeHandleType{node_ptr}; + } + + template + static void deactivate( NodeHandleType& nh ) { + nh.deactivate(); + } +}; // struct node_handle_accessor + +template +class node_handle_base { +public: + using allocator_type = Allocator; +protected: + using node = Node; + using allocator_traits_type = tbb::detail::allocator_traits; +public: + + node_handle_base() : my_node(nullptr), my_allocator() {} + node_handle_base(node_handle_base&& nh) : my_node(nh.my_node), + my_allocator(std::move(nh.my_allocator)) { + nh.my_node = nullptr; + } + + __TBB_nodiscard bool empty() const { return my_node == nullptr; } + explicit operator bool() const { return my_node != nullptr; } + + ~node_handle_base() { internal_destroy(); } + + node_handle_base& operator=( node_handle_base&& nh ) { + internal_destroy(); + my_node = nh.my_node; + move_assign_allocators(my_allocator, nh.my_allocator); + nh.deactivate(); + return *this; + } + + void swap( node_handle_base& nh ) { + using std::swap; + swap(my_node, nh.my_node); + swap_allocators(my_allocator, nh.my_allocator); + } + + allocator_type get_allocator() const { + return my_allocator; + } + +protected: + node_handle_base( node* n ) : my_node(n) {} + + void internal_destroy() { + if(my_node != nullptr) { + allocator_traits_type::destroy(my_allocator, my_node->storage()); + typename allocator_traits_type::template rebind_alloc node_allocator(my_allocator); + node_allocator.deallocate(my_node, 1); + } + } + + node* get_node_ptr() { return my_node; } + + void deactivate() { my_node = nullptr; } + + node* my_node; + allocator_type my_allocator; +}; + +// node handle for maps +template +class node_handle : public node_handle_base { + using base_type = node_handle_base; +public: + using key_type = Key; + using mapped_type = typename Value::second_type; + using allocator_type = typename base_type::allocator_type; + + node_handle() = default; + + key_type& key() const { + __TBB_ASSERT(!this->empty(), "Cannot get key from the empty node_type object"); + return *const_cast(&(this->my_node->value().first)); + } + + mapped_type& mapped() const { + __TBB_ASSERT(!this->empty(), "Cannot get mapped value from the empty node_type object"); + return this->my_node->value().second; + } + +private: + friend struct node_handle_accessor; + + node_handle( typename base_type::node* n ) : base_type(n) {} +}; // class node_handle + +// node handle for sets +template +class node_handle : public node_handle_base { + using base_type = node_handle_base; +public: + using value_type = Key; + using allocator_type = typename base_type::allocator_type; + + node_handle() = default; + + value_type& value() const { + __TBB_ASSERT(!this->empty(), "Cannot get value from the empty node_type object"); + return *const_cast(&(this->my_node->value())); + } + +private: + friend struct node_handle_accessor; + + node_handle( typename base_type::node* n ) : base_type(n) {} +}; // class node_handle + +template +void swap( node_handle& lhs, + node_handle& rhs ) { + return lhs.swap(rhs); +} + +} // namespace d1 +} // namespace detail +} // namespace tbb + +#endif // __TBB_detail__node_handle_H diff --git a/src/tbb/include/oneapi/tbb/detail/_pipeline_filters.h b/src/tbb/include/oneapi/tbb/detail/_pipeline_filters.h new file mode 100644 index 000000000..812194672 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_pipeline_filters.h @@ -0,0 +1,461 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_parallel_filters_H +#define __TBB_parallel_filters_H + +#include "_config.h" +#include "_task.h" +#include "_pipeline_filters_deduction.h" +#include "../tbb_allocator.h" + +#include +#include + +namespace tbb { +namespace detail { + +namespace d1 { +class base_filter; +} + +namespace d2 { +template +__TBB_requires(std::copyable) +class input_node; +} + +namespace r1 { +TBB_EXPORT void __TBB_EXPORTED_FUNC set_end_of_input(d1::base_filter&); +class pipeline; +class stage_task; +class input_buffer; +} + +namespace d1 { +class filter_node; + +//! A stage in a pipeline. +/** @ingroup algorithms */ +class base_filter{ +private: + //! Value used to mark "not in pipeline" + static base_filter* not_in_pipeline() { return reinterpret_cast(std::intptr_t(-1)); } +public: + //! The lowest bit 0 is for parallel vs serial + static constexpr unsigned int filter_is_serial = 0x1; + + //! 2nd bit distinguishes ordered vs unordered filters. + static constexpr unsigned int filter_is_out_of_order = 0x1<<1; + + //! 3rd bit marks input filters emitting small objects + static constexpr unsigned int filter_may_emit_null = 0x1<<2; + + base_filter(const base_filter&) = delete; + base_filter& operator=(const base_filter&) = delete; + +protected: + explicit base_filter( unsigned int m ) : + next_filter_in_pipeline(not_in_pipeline()), + my_input_buffer(nullptr), + my_filter_mode(m), + my_pipeline(nullptr) + {} + + // signal end-of-input for concrete_filters + void set_end_of_input() { + r1::set_end_of_input(*this); + } + +public: + //! True if filter is serial. + bool is_serial() const { + return bool( my_filter_mode & filter_is_serial ); + } + + //! True if filter must receive stream in order. + bool is_ordered() const { + return (my_filter_mode & filter_is_serial) && !(my_filter_mode & filter_is_out_of_order); + } + + //! true if an input filter can emit null + bool object_may_be_null() { + return ( my_filter_mode & filter_may_emit_null ) == filter_may_emit_null; + } + + //! Operate on an item from the input stream, and return item for output stream. + /** Returns nullptr if filter is a sink. */ + virtual void* operator()( void* item ) = 0; + + //! Destroy filter. + virtual ~base_filter() {}; + + //! Destroys item if pipeline was cancelled. + /** Required to prevent memory leaks. + Note it can be called concurrently even for serial filters.*/ + virtual void finalize( void* /*item*/ ) {} + +private: + //! Pointer to next filter in the pipeline. + base_filter* next_filter_in_pipeline; + + //! Buffer for incoming tokens, or nullptr if not required. + /** The buffer is required if the filter is serial. */ + r1::input_buffer* my_input_buffer; + + friend class r1::stage_task; + friend class r1::pipeline; + friend void r1::set_end_of_input(d1::base_filter&); + + //! Storage for filter mode and dynamically checked implementation version. + const unsigned int my_filter_mode; + + //! Pointer to the pipeline. + r1::pipeline* my_pipeline; +}; + +template +class concrete_filter; + +//! input_filter control to signal end-of-input for parallel_pipeline +class flow_control { + bool is_pipeline_stopped = false; + flow_control() = default; + template friend class concrete_filter; + template + __TBB_requires(std::copyable) + friend class d2::input_node; +public: + void stop() { is_pipeline_stopped = true; } +}; + +// Emulate std::is_trivially_copyable (false positives not allowed, false negatives suboptimal but safe). +#if __TBB_CPP11_TYPE_PROPERTIES_PRESENT +template using tbb_trivially_copyable = std::is_trivially_copyable; +#else +template struct tbb_trivially_copyable { enum { value = false }; }; +template struct tbb_trivially_copyable < T* > { enum { value = true }; }; +template<> struct tbb_trivially_copyable < bool > { enum { value = true }; }; +template<> struct tbb_trivially_copyable < char > { enum { value = true }; }; +template<> struct tbb_trivially_copyable < signed char > { enum { value = true }; }; +template<> struct tbb_trivially_copyable { enum { value = true }; }; +template<> struct tbb_trivially_copyable < short > { enum { value = true }; }; +template<> struct tbb_trivially_copyable { enum { value = true }; }; +template<> struct tbb_trivially_copyable < int > { enum { value = true }; }; +template<> struct tbb_trivially_copyable { enum { value = true }; }; +template<> struct tbb_trivially_copyable < long > { enum { value = true }; }; +template<> struct tbb_trivially_copyable { enum { value = true }; }; +template<> struct tbb_trivially_copyable < long long> { enum { value = true }; }; +template<> struct tbb_trivially_copyable { enum { value = true }; }; +template<> struct tbb_trivially_copyable < float > { enum { value = true }; }; +template<> struct tbb_trivially_copyable < double > { enum { value = true }; }; +template<> struct tbb_trivially_copyable < long double > { enum { value = true }; }; +#endif // __TBB_CPP11_TYPE_PROPERTIES_PRESENT + +template +struct use_allocator { + static constexpr bool value = sizeof(T) > sizeof(void *) || !tbb_trivially_copyable::value; +}; + +// A helper class to customize how a type is passed between filters. +// Usage: token_helper::value> +template struct token_helper; + +// using tbb_allocator +template +struct token_helper { + using pointer = T*; + using value_type = T; + static pointer create_token(value_type && source) { + return new (r1::allocate_memory(sizeof(T))) T(std::move(source)); + } + static value_type & token(pointer & t) { return *t; } + static void * cast_to_void_ptr(pointer ref) { return reinterpret_cast(ref); } + static pointer cast_from_void_ptr(void * ref) { return reinterpret_cast(ref); } + static void destroy_token(pointer token) { + token->~value_type(); + r1::deallocate_memory(token); + } +}; + +// pointer specialization +template +struct token_helper { + using pointer = T*; + using value_type = T*; + static pointer create_token(const value_type & source) { return source; } + static value_type & token(pointer & t) { return t; } + static void * cast_to_void_ptr(pointer ref) { return reinterpret_cast(ref); } + static pointer cast_from_void_ptr(void * ref) { return reinterpret_cast(ref); } + static void destroy_token( pointer /*token*/) {} +}; + +// converting type to and from void*, passing objects directly +template +struct token_helper { + typedef union { + T actual_value; + void * void_overlay; + } type_to_void_ptr_map; + using pointer = T; // not really a pointer in this case. + using value_type = T; + static pointer create_token(const value_type & source) { return source; } + static value_type & token(pointer & t) { return t; } + static void * cast_to_void_ptr(pointer ref) { + type_to_void_ptr_map mymap; + mymap.void_overlay = nullptr; + mymap.actual_value = ref; + return mymap.void_overlay; + } + static pointer cast_from_void_ptr(void * ref) { + type_to_void_ptr_map mymap; + mymap.void_overlay = ref; + return mymap.actual_value; + } + static void destroy_token( pointer /*token*/) {} +}; + +// intermediate +template +class concrete_filter: public base_filter { + const Body& my_body; + using input_helper = token_helper::value>; + using input_pointer = typename input_helper::pointer; + using output_helper = token_helper::value>; + using output_pointer = typename output_helper::pointer; + + void* operator()(void* input) override { + input_pointer temp_input = input_helper::cast_from_void_ptr(input); + output_pointer temp_output = output_helper::create_token(tbb::detail::invoke(my_body, std::move(input_helper::token(temp_input)))); + input_helper::destroy_token(temp_input); + return output_helper::cast_to_void_ptr(temp_output); + } + + void finalize(void * input) override { + input_pointer temp_input = input_helper::cast_from_void_ptr(input); + input_helper::destroy_token(temp_input); + } + +public: + concrete_filter(unsigned int m, const Body& body) : base_filter(m), my_body(body) {} +}; + +// input +template +class concrete_filter: public base_filter { + const Body& my_body; + using output_helper = token_helper::value>; + using output_pointer = typename output_helper::pointer; + + void* operator()(void*) override { + flow_control control; + output_pointer temp_output = output_helper::create_token(my_body(control)); + if(control.is_pipeline_stopped) { + output_helper::destroy_token(temp_output); + set_end_of_input(); + return nullptr; + } + return output_helper::cast_to_void_ptr(temp_output); + } + +public: + concrete_filter(unsigned int m, const Body& body) : + base_filter(m | filter_may_emit_null), + my_body(body) + {} +}; + +// output +template +class concrete_filter: public base_filter { + const Body& my_body; + using input_helper = token_helper::value>; + using input_pointer = typename input_helper::pointer; + + void* operator()(void* input) override { + input_pointer temp_input = input_helper::cast_from_void_ptr(input); + tbb::detail::invoke(my_body, std::move(input_helper::token(temp_input))); + input_helper::destroy_token(temp_input); + return nullptr; + } + void finalize(void* input) override { + input_pointer temp_input = input_helper::cast_from_void_ptr(input); + input_helper::destroy_token(temp_input); + } + +public: + concrete_filter(unsigned int m, const Body& body) : base_filter(m), my_body(body) {} +}; + +template +class concrete_filter: public base_filter { + const Body& my_body; + + void* operator()(void*) override { + flow_control control; + my_body(control); + void* output = control.is_pipeline_stopped ? nullptr : (void*)(std::intptr_t)-1; + return output; + } +public: + concrete_filter(unsigned int m, const Body& body) : base_filter(m), my_body(body) {} +}; + +class filter_node_ptr { + filter_node * my_node; + +public: + filter_node_ptr() : my_node(nullptr) {} + filter_node_ptr(filter_node *); + ~filter_node_ptr(); + filter_node_ptr(const filter_node_ptr &); + filter_node_ptr(filter_node_ptr &&); + void operator=(filter_node *); + void operator=(const filter_node_ptr &); + void operator=(filter_node_ptr &&); + filter_node& operator*() const; + operator bool() const; +}; + +//! Abstract base class that represents a node in a parse tree underlying a filter class. +/** These nodes are always heap-allocated and can be shared by filter objects. */ +class filter_node { + /** Count must be atomic because it is hidden state for user, but might be shared by threads. */ + std::atomic ref_count; +public: + filter_node_ptr left; + filter_node_ptr right; +protected: + filter_node() : ref_count(0), left(nullptr), right(nullptr) { +#ifdef __TBB_TEST_FILTER_NODE_COUNT + ++(__TBB_TEST_FILTER_NODE_COUNT); +#endif + } +public: + filter_node(const filter_node_ptr& x, const filter_node_ptr& y) : filter_node(){ + left = x; + right = y; + } + filter_node(const filter_node&) = delete; + filter_node& operator=(const filter_node&) = delete; + + //! Add concrete_filter to pipeline + virtual base_filter* create_filter() const { + __TBB_ASSERT(false, "method of non-leaf was called"); + return nullptr; + } + + //! Increment reference count + void add_ref() { ref_count.fetch_add(1, std::memory_order_relaxed); } + + //! Decrement reference count and delete if it becomes zero. + void remove_ref() { + __TBB_ASSERT(ref_count>0,"ref_count underflow"); + if( ref_count.fetch_sub(1, std::memory_order_relaxed) == 1 ) { + this->~filter_node(); + r1::deallocate_memory(this); + } + } + + virtual ~filter_node() { +#ifdef __TBB_TEST_FILTER_NODE_COUNT + --(__TBB_TEST_FILTER_NODE_COUNT); +#endif + } +}; + +inline filter_node_ptr::filter_node_ptr(filter_node * nd) : my_node(nd) { + if (my_node) { + my_node->add_ref(); + } +} + +inline filter_node_ptr::~filter_node_ptr() { + if (my_node) { + my_node->remove_ref(); + } +} + +inline filter_node_ptr::filter_node_ptr(const filter_node_ptr & rhs) : my_node(rhs.my_node) { + if (my_node) { + my_node->add_ref(); + } +} + +inline filter_node_ptr::filter_node_ptr(filter_node_ptr && rhs) : my_node(rhs.my_node) { + rhs.my_node = nullptr; +} + +inline void filter_node_ptr::operator=(filter_node * rhs) { + // Order of operations below carefully chosen so that reference counts remain correct + // in unlikely event that remove_ref throws exception. + filter_node* old = my_node; + my_node = rhs; + if (my_node) { + my_node->add_ref(); + } + if (old) { + old->remove_ref(); + } +} + +inline void filter_node_ptr::operator=(const filter_node_ptr & rhs) { + *this = rhs.my_node; +} + +inline void filter_node_ptr::operator=(filter_node_ptr && rhs) { + filter_node* old = my_node; + my_node = rhs.my_node; + rhs.my_node = nullptr; + if (old) { + old->remove_ref(); + } +} + +inline filter_node& filter_node_ptr::operator*() const{ + __TBB_ASSERT(my_node,"nullptr node is used"); + return *my_node; +} + +inline filter_node_ptr::operator bool() const { + return my_node != nullptr; +} + +//! Node in parse tree representing result of make_filter. +template +class filter_node_leaf: public filter_node { + const unsigned int my_mode; + const Body my_body; + base_filter* create_filter() const override { + return new(r1::allocate_memory(sizeof(concrete_filter))) concrete_filter(my_mode,my_body); + } +public: + filter_node_leaf( unsigned int m, const Body& b ) : my_mode(m), my_body(b) {} +}; + + +template ::input_type> +using filter_input = typename std::conditional::value, void, Input>::type; + +template +using filter_output = typename filter_body_types::output_type; + +} // namespace d1 +} // namespace detail +} // namespace tbb + + +#endif /* __TBB_parallel_filters_H */ diff --git a/src/tbb/include/oneapi/tbb/detail/_pipeline_filters_deduction.h b/src/tbb/include/oneapi/tbb/detail/_pipeline_filters_deduction.h new file mode 100644 index 000000000..c1a6e8aed --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_pipeline_filters_deduction.h @@ -0,0 +1,46 @@ +/* + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__pipeline_filters_deduction_H +#define __TBB__pipeline_filters_deduction_H + +#include "_config.h" +#include +#include + +namespace tbb { +namespace detail { +namespace d1 { + +template +struct declare_filter_types { + using input_type = typename std::remove_const::type>::type; + using output_type = typename std::remove_const::type>::type; +}; + +template struct filter_body_types; + +template +struct filter_body_types : declare_filter_types {}; + +template +struct filter_body_types : declare_filter_types {}; + +} // namespace d1 +} // namespace detail +} // namespace tbb + +#endif // __TBB__pipeline_filters_deduction_H diff --git a/src/tbb/include/oneapi/tbb/detail/_range_common.h b/src/tbb/include/oneapi/tbb/detail/_range_common.h new file mode 100644 index 000000000..1011f029d --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_range_common.h @@ -0,0 +1,130 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_detail__range_common_H +#define __TBB_detail__range_common_H + +#include "_config.h" +#include "_utils.h" +#if __TBB_CPP20_CONCEPTS_PRESENT +#include +#endif +#include + +namespace tbb { +namespace detail { +inline namespace d0 { + +//! Dummy type that distinguishes splitting constructor from copy constructor. +/** + * See description of parallel_for and parallel_reduce for example usages. + * @ingroup algorithms + */ +class split {}; + +//! Type enables transmission of splitting proportion from partitioners to range objects +/** + * In order to make use of such facility Range objects must implement + * splitting constructor with this type passed. + */ +class proportional_split : no_assign { +public: + proportional_split(size_t _left = 1, size_t _right = 1) : my_left(_left), my_right(_right) { } + + size_t left() const { return my_left; } + size_t right() const { return my_right; } + + // used when range does not support proportional split + explicit operator split() const { return split(); } + +private: + size_t my_left, my_right; +}; + +template +struct range_split_object_provider { + template + static split get( PartitionerSplitType& ) { return split(); } +}; + +template +struct range_split_object_provider::value>::type> { + template + static PartitionerSplitType& get( PartitionerSplitType& split_obj ) { return split_obj; } +}; + +template +auto get_range_split_object( PartitionerSplitType& split_obj ) +-> decltype(range_split_object_provider::get(split_obj)) { + return range_split_object_provider::get(split_obj); +} + +template +using range_iterator_type = decltype(std::begin(std::declval())); + +#if __TBB_CPP20_CONCEPTS_PRESENT +template +using iterator_reference_type = typename std::iterator_traits::reference; + +template +using range_reference_type = iterator_reference_type>; + +template +concept blocked_range_value = std::copyable && + requires( const std::remove_reference_t& lhs, const std::remove_reference_t& rhs ) { + { lhs < rhs } -> relaxed_convertible_to; + { lhs - rhs } -> std::convertible_to; + { lhs + (rhs - lhs) } -> std::convertible_to; + }; + +template +concept splittable = std::constructible_from; + +template +concept tbb_range = std::copy_constructible && + splittable && + requires( const std::remove_reference_t& range ) { + { range.empty() } -> relaxed_convertible_to; + { range.is_divisible() } -> relaxed_convertible_to; + }; + +template +constexpr bool iterator_concept_helper( std::input_iterator_tag ) { + return std::input_iterator; +} + +template +constexpr bool iterator_concept_helper( std::random_access_iterator_tag ) { + return std::random_access_iterator; +} + +template +concept iterator_satisfies = requires (IteratorTag tag) { + requires iterator_concept_helper(tag); +}; + +template +concept container_based_sequence = requires( Sequence& seq ) { + { std::begin(seq) } -> iterator_satisfies; + { std::end(seq) } -> iterator_satisfies; +}; +#endif // __TBB_CPP20_CONCEPTS_PRESENT +} // namespace d0 +} // namespace detail +} // namespace tbb + +#endif // __TBB_detail__range_common_H diff --git a/src/tbb/include/oneapi/tbb/detail/_rtm_mutex.h b/src/tbb/include/oneapi/tbb/detail/_rtm_mutex.h new file mode 100644 index 000000000..3901a7403 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_rtm_mutex.h @@ -0,0 +1,162 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__rtm_mutex_impl_H +#define __TBB__rtm_mutex_impl_H + +#include "_assert.h" +#include "_utils.h" +#include "../spin_mutex.h" + +#include "../profiling.h" + +namespace tbb { +namespace detail { +namespace r1 { +struct rtm_mutex_impl; +} +namespace d1 { + +#if _MSC_VER && !defined(__INTEL_COMPILER) + // Suppress warning: structure was padded due to alignment specifier + // #pragma warning (push) + // #pragma warning (disable: 4324) +#endif + +/** A rtm_mutex is an speculation-enabled spin mutex. + It should be used for locking short critical sections where the lock is + contended but the data it protects are not. If zero-initialized, the + mutex is considered unheld. + @ingroup synchronization */ +class alignas(max_nfs_size) rtm_mutex : private spin_mutex { +private: + enum class rtm_state { + rtm_none, + rtm_transacting, + rtm_real + }; +public: + //! Constructors + rtm_mutex() noexcept { + create_itt_sync(this, "tbb::speculative_spin_mutex", ""); + } + + //! Destructor + ~rtm_mutex() = default; + + //! Represents acquisition of a mutex. + class scoped_lock { + public: + friend class rtm_mutex; + //! Construct lock that has not acquired a mutex. + /** Equivalent to zero-initialization of *this. */ + constexpr scoped_lock() : m_mutex(nullptr), m_transaction_state(rtm_state::rtm_none) {} + + //! Acquire lock on given mutex. + scoped_lock(rtm_mutex& m) : m_mutex(nullptr), m_transaction_state(rtm_state::rtm_none) { + acquire(m); + } + + //! Release lock (if lock is held). + ~scoped_lock() { + if(m_transaction_state != rtm_state::rtm_none) { + release(); + } + } + + //! No Copy + scoped_lock(const scoped_lock&) = delete; + scoped_lock& operator=(const scoped_lock&) = delete; + + //! Acquire lock on given mutex. + void acquire(rtm_mutex& m); + + //! Try acquire lock on given mutex. + bool try_acquire(rtm_mutex& m); + + //! Release lock + void release(); + + private: + rtm_mutex* m_mutex; + rtm_state m_transaction_state; + friend r1::rtm_mutex_impl; + }; + + //! Mutex traits + static constexpr bool is_rw_mutex = false; + static constexpr bool is_recursive_mutex = false; + static constexpr bool is_fair_mutex = false; +private: + friend r1::rtm_mutex_impl; +}; // end of rtm_mutex +} // namespace d1 + +namespace r1 { + //! Internal acquire lock. + // only_speculate == true if we're doing a try_lock, else false. + TBB_EXPORT void __TBB_EXPORTED_FUNC acquire(d1::rtm_mutex&, d1::rtm_mutex::scoped_lock&, bool only_speculate = false); + //! Internal try_acquire lock. + TBB_EXPORT bool __TBB_EXPORTED_FUNC try_acquire(d1::rtm_mutex&, d1::rtm_mutex::scoped_lock&); + //! Internal release lock. + TBB_EXPORT void __TBB_EXPORTED_FUNC release(d1::rtm_mutex::scoped_lock&); +} // namespace r1 + +namespace d1 { +//! Acquire lock on given mutex. +inline void rtm_mutex::scoped_lock::acquire(rtm_mutex& m) { + __TBB_ASSERT(!m_mutex, "lock is already acquired"); + r1::acquire(m, *this); +} + +//! Try acquire lock on given mutex. +inline bool rtm_mutex::scoped_lock::try_acquire(rtm_mutex& m) { + __TBB_ASSERT(!m_mutex, "lock is already acquired"); + return r1::try_acquire(m, *this); +} + +//! Release lock +inline void rtm_mutex::scoped_lock::release() { + __TBB_ASSERT(m_mutex, "lock is not acquired"); + __TBB_ASSERT(m_transaction_state != rtm_state::rtm_none, "lock is not acquired"); + return r1::release(*this); +} + +#if _MSC_VER && !defined(__INTEL_COMPILER) + // #pragma warning (pop) // 4324 warning +#endif + +#if TBB_USE_PROFILING_TOOLS +inline void set_name(rtm_mutex& obj, const char* name) { + itt_set_sync_name(&obj, name); +} +#if (_WIN32||_WIN64) +inline void set_name(rtm_mutex& obj, const wchar_t* name) { + itt_set_sync_name(&obj, name); +} +#endif // WIN +#else +inline void set_name(rtm_mutex&, const char*) {} +#if (_WIN32||_WIN64) +inline void set_name(rtm_mutex&, const wchar_t*) {} +#endif // WIN +#endif + +} // namespace d1 +} // namespace detail +} // namespace tbb + +#endif /* __TBB__rtm_mutex_impl_H */ diff --git a/src/tbb/include/oneapi/tbb/detail/_rtm_rw_mutex.h b/src/tbb/include/oneapi/tbb/detail/_rtm_rw_mutex.h new file mode 100644 index 000000000..e7536caab --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_rtm_rw_mutex.h @@ -0,0 +1,215 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_detail__rtm_rw_mutex_H +#define __TBB_detail__rtm_rw_mutex_H + +#include "_assert.h" +#include "_utils.h" +#include "../spin_rw_mutex.h" + +#include + +namespace tbb { +namespace detail { + +namespace r1 { +struct rtm_rw_mutex_impl; +} + +namespace d1 { + +constexpr std::size_t speculation_granularity = 64; +#if _MSC_VER && !defined(__INTEL_COMPILER) + // Suppress warning: structure was padded due to alignment specifier + // #pragma warning (push) + // #pragma warning (disable: 4324) +#endif + +//! Fast, unfair, spinning speculation-enabled reader-writer lock with backoff and writer-preference +/** @ingroup synchronization */ +class alignas(max_nfs_size) rtm_rw_mutex : private spin_rw_mutex { + friend struct r1::rtm_rw_mutex_impl; +private: + enum class rtm_type { + rtm_not_in_mutex, + rtm_transacting_reader, + rtm_transacting_writer, + rtm_real_reader, + rtm_real_writer + }; +public: + //! Constructors + rtm_rw_mutex() noexcept : write_flag(false) { + create_itt_sync(this, "tbb::speculative_spin_rw_mutex", ""); + } + + //! Destructor + ~rtm_rw_mutex() = default; + + //! Represents acquisition of a mutex. + class scoped_lock { + friend struct r1::rtm_rw_mutex_impl; + public: + //! Construct lock that has not acquired a mutex. + /** Equivalent to zero-initialization of *this. */ + constexpr scoped_lock() : m_mutex(nullptr), m_transaction_state(rtm_type::rtm_not_in_mutex) {} + + //! Acquire lock on given mutex. + scoped_lock(rtm_rw_mutex& m, bool write = true) : m_mutex(nullptr), m_transaction_state(rtm_type::rtm_not_in_mutex) { + acquire(m, write); + } + + //! Release lock (if lock is held). + ~scoped_lock() { + if(m_transaction_state != rtm_type::rtm_not_in_mutex) { + release(); + } + } + + //! No Copy + scoped_lock(const scoped_lock&) = delete; + scoped_lock& operator=(const scoped_lock&) = delete; + + //! Acquire lock on given mutex. + inline void acquire(rtm_rw_mutex& m, bool write = true); + + //! Try acquire lock on given mutex. + inline bool try_acquire(rtm_rw_mutex& m, bool write = true); + + //! Release lock + inline void release(); + + //! Upgrade reader to become a writer. + /** Returns whether the upgrade happened without releasing and re-acquiring the lock */ + inline bool upgrade_to_writer(); + + //! Downgrade writer to become a reader. + inline bool downgrade_to_reader(); + + inline bool is_writer() const; + private: + rtm_rw_mutex* m_mutex; + rtm_type m_transaction_state; + }; + + //! Mutex traits + static constexpr bool is_rw_mutex = true; + static constexpr bool is_recursive_mutex = false; + static constexpr bool is_fair_mutex = false; + +private: + alignas(speculation_granularity) std::atomic write_flag; +}; + +#if _MSC_VER && !defined(__INTEL_COMPILER) + // #pragma warning (pop) // 4324 warning +#endif + +} // namespace d1 + +namespace r1 { + //! Internal acquire write lock. + // only_speculate == true if we're doing a try_lock, else false. + TBB_EXPORT void __TBB_EXPORTED_FUNC acquire_writer(d1::rtm_rw_mutex&, d1::rtm_rw_mutex::scoped_lock&, bool only_speculate = false); + //! Internal acquire read lock. + // only_speculate == true if we're doing a try_lock, else false. + TBB_EXPORT void __TBB_EXPORTED_FUNC acquire_reader(d1::rtm_rw_mutex&, d1::rtm_rw_mutex::scoped_lock&, bool only_speculate = false); + //! Internal upgrade reader to become a writer. + TBB_EXPORT bool __TBB_EXPORTED_FUNC upgrade(d1::rtm_rw_mutex::scoped_lock&); + //! Internal downgrade writer to become a reader. + TBB_EXPORT bool __TBB_EXPORTED_FUNC downgrade(d1::rtm_rw_mutex::scoped_lock&); + //! Internal try_acquire write lock. + TBB_EXPORT bool __TBB_EXPORTED_FUNC try_acquire_writer(d1::rtm_rw_mutex&, d1::rtm_rw_mutex::scoped_lock&); + //! Internal try_acquire read lock. + TBB_EXPORT bool __TBB_EXPORTED_FUNC try_acquire_reader(d1::rtm_rw_mutex&, d1::rtm_rw_mutex::scoped_lock&); + //! Internal release lock. + TBB_EXPORT void __TBB_EXPORTED_FUNC release(d1::rtm_rw_mutex::scoped_lock&); +} + +namespace d1 { +//! Acquire lock on given mutex. +void rtm_rw_mutex::scoped_lock::acquire(rtm_rw_mutex& m, bool write) { + __TBB_ASSERT(!m_mutex, "lock is already acquired"); + if (write) { + r1::acquire_writer(m, *this); + } else { + r1::acquire_reader(m, *this); + } +} + +//! Try acquire lock on given mutex. +bool rtm_rw_mutex::scoped_lock::try_acquire(rtm_rw_mutex& m, bool write) { + __TBB_ASSERT(!m_mutex, "lock is already acquired"); + if (write) { + return r1::try_acquire_writer(m, *this); + } else { + return r1::try_acquire_reader(m, *this); + } +} + +//! Release lock +void rtm_rw_mutex::scoped_lock::release() { + __TBB_ASSERT(m_mutex, "lock is not acquired"); + __TBB_ASSERT(m_transaction_state != rtm_type::rtm_not_in_mutex, "lock is not acquired"); + return r1::release(*this); +} + +//! Upgrade reader to become a writer. +/** Returns whether the upgrade happened without releasing and re-acquiring the lock */ +bool rtm_rw_mutex::scoped_lock::upgrade_to_writer() { + __TBB_ASSERT(m_mutex, "lock is not acquired"); + if (m_transaction_state == rtm_type::rtm_transacting_writer || m_transaction_state == rtm_type::rtm_real_writer) { + return true; // Already a writer + } + return r1::upgrade(*this); +} + +//! Downgrade writer to become a reader. +bool rtm_rw_mutex::scoped_lock::downgrade_to_reader() { + __TBB_ASSERT(m_mutex, "lock is not acquired"); + if (m_transaction_state == rtm_type::rtm_transacting_reader || m_transaction_state == rtm_type::rtm_real_reader) { + return true; // Already a reader + } + return r1::downgrade(*this); +} + +bool rtm_rw_mutex::scoped_lock::is_writer() const { + __TBB_ASSERT(m_mutex, "lock is not acquired"); + return m_transaction_state == rtm_type::rtm_transacting_writer || m_transaction_state == rtm_type::rtm_real_writer; +} + +#if TBB_USE_PROFILING_TOOLS +inline void set_name(rtm_rw_mutex& obj, const char* name) { + itt_set_sync_name(&obj, name); +} +#if (_WIN32||_WIN64) +inline void set_name(rtm_rw_mutex& obj, const wchar_t* name) { + itt_set_sync_name(&obj, name); +} +#endif // WIN +#else +inline void set_name(rtm_rw_mutex&, const char*) {} +#if (_WIN32||_WIN64) +inline void set_name(rtm_rw_mutex&, const wchar_t*) {} +#endif // WIN +#endif + +} // namespace d1 +} // namespace detail +} // namespace tbb + +#endif // __TBB_detail__rtm_rw_mutex_H diff --git a/src/tbb/include/oneapi/tbb/detail/_scoped_lock.h b/src/tbb/include/oneapi/tbb/detail/_scoped_lock.h new file mode 100644 index 000000000..a49dcdff5 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_scoped_lock.h @@ -0,0 +1,174 @@ +/* + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_detail_scoped_lock_H +#define __TBB_detail_scoped_lock_H + +namespace tbb { +namespace detail { +namespace d1 { + +// unique_scoped_lock supposes that Mutex operations never throw +template +class unique_scoped_lock { + //! Points to currently held Mutex, or nullptr if no lock is held. + Mutex* m_mutex{}; + +public: + //! Construct without acquiring a Mutex. + constexpr unique_scoped_lock() noexcept : m_mutex(nullptr) {} + + //! Construct and acquire lock on a Mutex. + unique_scoped_lock(Mutex& m) { + acquire(m); + } + + //! No Copy + unique_scoped_lock(const unique_scoped_lock&) = delete; + unique_scoped_lock& operator=(const unique_scoped_lock&) = delete; + + //! Acquire lock. + void acquire(Mutex& m) { + __TBB_ASSERT(m_mutex == nullptr, "The mutex is already acquired"); + m_mutex = &m; + m.lock(); + } + + //! Try acquiring lock (non-blocking) + /** Return true if lock acquired; false otherwise. */ + bool try_acquire(Mutex& m) { + __TBB_ASSERT(m_mutex == nullptr, "The mutex is already acquired"); + bool succeed = m.try_lock(); + if (succeed) { + m_mutex = &m; + } + return succeed; + } + + //! Release lock + void release() { + __TBB_ASSERT(m_mutex, "release on Mutex::unique_scoped_lock that is not holding a lock"); + m_mutex->unlock(); + m_mutex = nullptr; + } + + //! Destroy lock. If holding a lock, releases the lock first. + ~unique_scoped_lock() { + if (m_mutex) { + release(); + } + } +}; + +// rw_scoped_lock supposes that Mutex operations never throw +template +class rw_scoped_lock { +public: + //! Construct lock that has not acquired a mutex. + /** Equivalent to zero-initialization of *this. */ + constexpr rw_scoped_lock() noexcept {} + + //! Acquire lock on given mutex. + rw_scoped_lock(Mutex& m, bool write = true) { + acquire(m, write); + } + + //! Release lock (if lock is held). + ~rw_scoped_lock() { + if (m_mutex) { + release(); + } + } + + //! No Copy + rw_scoped_lock(const rw_scoped_lock&) = delete; + rw_scoped_lock& operator=(const rw_scoped_lock&) = delete; + + //! Acquire lock on given mutex. + void acquire(Mutex& m, bool write = true) { + __TBB_ASSERT(m_mutex == nullptr, "The mutex is already acquired"); + m_is_writer = write; + m_mutex = &m; + if (write) { + m_mutex->lock(); + } else { + m_mutex->lock_shared(); + } + } + + //! Try acquire lock on given mutex. + bool try_acquire(Mutex& m, bool write = true) { + bool succeed = write ? m.try_lock() : m.try_lock_shared(); + if (succeed) { + m_mutex = &m; + m_is_writer = write; + } + return succeed; + } + + //! Release lock. + void release() { + __TBB_ASSERT(m_mutex != nullptr, "The mutex is not acquired"); + Mutex* m = m_mutex; + m_mutex = nullptr; + + if (m_is_writer) { + m->unlock(); + } else { + m->unlock_shared(); + } + } + + //! Upgrade reader to become a writer. + /** Returns whether the upgrade happened without releasing and re-acquiring the lock */ + bool upgrade_to_writer() { + __TBB_ASSERT(m_mutex != nullptr, "The mutex is not acquired"); + if (m_is_writer) { + return true; // Already a writer + } + m_is_writer = true; + return m_mutex->upgrade(); + } + + //! Downgrade writer to become a reader. + bool downgrade_to_reader() { + __TBB_ASSERT(m_mutex != nullptr, "The mutex is not acquired"); + if (m_is_writer) { + m_mutex->downgrade(); + m_is_writer = false; + } + return true; + } + + bool is_writer() const { + __TBB_ASSERT(m_mutex != nullptr, "The mutex is not acquired"); + return m_is_writer; + } + +protected: + //! The pointer to the current mutex that is held, or nullptr if no mutex is held. + Mutex* m_mutex {nullptr}; + + //! If mutex != nullptr, then is_writer is true if holding a writer lock, false if holding a reader lock. + /** Not defined if not holding a lock. */ + bool m_is_writer {false}; +}; + +} // namespace d1 +} // namespace detail +} // namespace tbb + +#endif // __TBB_detail_scoped_lock_H diff --git a/src/tbb/include/oneapi/tbb/detail/_segment_table.h b/src/tbb/include/oneapi/tbb/detail/_segment_table.h new file mode 100644 index 000000000..cd4d35188 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_segment_table.h @@ -0,0 +1,566 @@ +/* + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_detail__segment_table_H +#define __TBB_detail__segment_table_H + +#include "_config.h" +#include "_allocator_traits.h" +#include "_template_helpers.h" +#include "_utils.h" +#include "_assert.h" +#include "_exception.h" +#include +#include +#include +#include + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) +// #pragma warning(push) +// #pragma warning(disable: 4127) // warning C4127: conditional expression is constant +#endif + +namespace tbb { +namespace detail { +namespace d1 { + +template +class segment_table { +public: + using value_type = T; + using segment_type = T*; + using atomic_segment = std::atomic; + using segment_table_type = atomic_segment*; + + using size_type = std::size_t; + using segment_index_type = std::size_t; + + using allocator_type = Allocator; + + using allocator_traits_type = tbb::detail::allocator_traits; + using segment_table_allocator_type = typename allocator_traits_type::template rebind_alloc; +protected: + using segment_table_allocator_traits = tbb::detail::allocator_traits; + using derived_type = DerivedType; + + static constexpr size_type pointers_per_embedded_table = PointersPerEmbeddedTable; + static constexpr size_type pointers_per_long_table = sizeof(size_type) * 8; +public: + segment_table( const allocator_type& alloc = allocator_type() ) + : my_segment_table_allocator(alloc), my_segment_table(nullptr) + , my_first_block{}, my_size{}, my_segment_table_allocation_failed{} + { + my_segment_table.store(my_embedded_table, std::memory_order_relaxed); + zero_table(my_embedded_table, pointers_per_embedded_table); + } + + segment_table( const segment_table& other ) + : my_segment_table_allocator(segment_table_allocator_traits:: + select_on_container_copy_construction(other.my_segment_table_allocator)) + , my_segment_table(nullptr), my_first_block{}, my_size{}, my_segment_table_allocation_failed{} + { + my_segment_table.store(my_embedded_table, std::memory_order_relaxed); + zero_table(my_embedded_table, pointers_per_embedded_table); + try_call( [&] { + internal_transfer(other, copy_segment_body_type{*this}); + } ).on_exception( [&] { + clear(); + }); + } + + segment_table( const segment_table& other, const allocator_type& alloc ) + : my_segment_table_allocator(alloc), my_segment_table(nullptr) + , my_first_block{}, my_size{}, my_segment_table_allocation_failed{} + { + my_segment_table.store(my_embedded_table, std::memory_order_relaxed); + zero_table(my_embedded_table, pointers_per_embedded_table); + try_call( [&] { + internal_transfer(other, copy_segment_body_type{*this}); + } ).on_exception( [&] { + clear(); + }); + } + + segment_table( segment_table&& other ) + : my_segment_table_allocator(std::move(other.my_segment_table_allocator)), my_segment_table(nullptr) + , my_first_block{}, my_size{}, my_segment_table_allocation_failed{} + { + my_segment_table.store(my_embedded_table, std::memory_order_relaxed); + zero_table(my_embedded_table, pointers_per_embedded_table); + internal_move(std::move(other)); + } + + segment_table( segment_table&& other, const allocator_type& alloc ) + : my_segment_table_allocator(alloc), my_segment_table(nullptr), my_first_block{} + , my_size{}, my_segment_table_allocation_failed{} + { + my_segment_table.store(my_embedded_table, std::memory_order_relaxed); + zero_table(my_embedded_table, pointers_per_embedded_table); + using is_equal_type = typename segment_table_allocator_traits::is_always_equal; + internal_move_construct_with_allocator(std::move(other), alloc, is_equal_type()); + } + + ~segment_table() { + clear(); + } + + segment_table& operator=( const segment_table& other ) { + if (this != &other) { + copy_assign_allocators(my_segment_table_allocator, other.my_segment_table_allocator); + internal_transfer(other, copy_segment_body_type{*this}); + } + return *this; + } + + segment_table& operator=( segment_table&& other ) + noexcept(derived_type::is_noexcept_assignment) + { + using pocma_type = typename segment_table_allocator_traits::propagate_on_container_move_assignment; + using is_equal_type = typename segment_table_allocator_traits::is_always_equal; + + if (this != &other) { + move_assign_allocators(my_segment_table_allocator, other.my_segment_table_allocator); + internal_move_assign(std::move(other), tbb::detail::disjunction()); + } + return *this; + } + + void swap( segment_table& other ) + noexcept(derived_type::is_noexcept_swap) + { + using is_equal_type = typename segment_table_allocator_traits::is_always_equal; + using pocs_type = typename segment_table_allocator_traits::propagate_on_container_swap; + + if (this != &other) { + swap_allocators(my_segment_table_allocator, other.my_segment_table_allocator); + internal_swap(other, tbb::detail::disjunction()); + } + } + + segment_type get_segment( segment_index_type index ) const { + return get_table()[index] + segment_base(index); + } + + value_type& operator[]( size_type index ) { + return internal_subscript(index); + } + + const value_type& operator[]( size_type index ) const { + return const_cast(this)->internal_subscript(index); + } + + const segment_table_allocator_type& get_allocator() const { + return my_segment_table_allocator; + } + + segment_table_allocator_type& get_allocator() { + return my_segment_table_allocator; + } + + void enable_segment( segment_type& segment, segment_table_type table, segment_index_type seg_index, size_type index ) { + // Allocate new segment + segment_type new_segment = self()->create_segment(table, seg_index, index); + if (new_segment != nullptr) { + // Store (new_segment - segment_base) into the segment table to allow access to the table by index via + // my_segment_table[segment_index_of(index)][index] + segment_type disabled_segment = nullptr; + if (!table[seg_index].compare_exchange_strong(disabled_segment, new_segment - segment_base(seg_index))) { + // compare_exchange failed => some other thread has already enabled this segment + // Deallocate the memory + self()->deallocate_segment(new_segment, seg_index); + } + } + + segment = table[seg_index].load(std::memory_order_acquire); + __TBB_ASSERT(segment != nullptr, "If create_segment returned nullptr, the element should be stored in the table"); + } + + void delete_segment( segment_index_type seg_index ) { + segment_type segment_to_delete = self()->nullify_segment(get_table(), seg_index); + if (segment_to_delete == segment_allocation_failure_tag) { + return; + } + + segment_to_delete += segment_base(seg_index); + + // Deallocate the segment + self()->destroy_segment(segment_to_delete, seg_index); + } + + size_type number_of_segments( segment_table_type table ) const { + // Check for an active table, if it is embedded table - return the number of embedded segments + // Otherwise - return the maximum number of segments + return table == my_embedded_table ? pointers_per_embedded_table : pointers_per_long_table; + } + + size_type capacity() const noexcept { + segment_table_type table = get_table(); + size_type num_segments = number_of_segments(table); + for (size_type seg_index = 0; seg_index < num_segments; ++seg_index) { + // Check if the pointer is valid (allocated) + if (table[seg_index].load(std::memory_order_relaxed) <= segment_allocation_failure_tag) { + return segment_base(seg_index); + } + } + return segment_base(num_segments); + } + + size_type find_last_allocated_segment( segment_table_type table ) const noexcept { + size_type end = 0; + size_type num_segments = number_of_segments(table); + for (size_type seg_index = 0; seg_index < num_segments; ++seg_index) { + // Check if the pointer is valid (allocated) + if (table[seg_index].load(std::memory_order_relaxed) > segment_allocation_failure_tag) { + end = seg_index + 1; + } + } + return end; + } + + void reserve( size_type n ) { + if (n > allocator_traits_type::max_size(my_segment_table_allocator)) { + throw_exception(exception_id::reservation_length_error); + } + + size_type size = my_size.load(std::memory_order_relaxed); + segment_index_type start_seg_idx = size == 0 ? 0 : segment_index_of(size - 1) + 1; + for (segment_index_type seg_idx = start_seg_idx; segment_base(seg_idx) < n; ++seg_idx) { + size_type first_index = segment_base(seg_idx); + internal_subscript(first_index); + } + } + + void clear() { + clear_segments(); + clear_table(); + my_size.store(0, std::memory_order_relaxed); + my_first_block.store(0, std::memory_order_relaxed); + } + + void clear_segments() { + segment_table_type current_segment_table = get_table(); + for (size_type i = number_of_segments(current_segment_table); i != 0; --i) { + if (current_segment_table[i - 1].load(std::memory_order_relaxed) != nullptr) { + // If the segment was enabled - disable and deallocate it + delete_segment(i - 1); + } + } + } + + void clear_table() { + segment_table_type current_segment_table = get_table(); + if (current_segment_table != my_embedded_table) { + // If the active table is not the embedded one - deallocate the active table + for (size_type i = 0; i != pointers_per_long_table; ++i) { + segment_table_allocator_traits::destroy(my_segment_table_allocator, ¤t_segment_table[i]); + } + + segment_table_allocator_traits::deallocate(my_segment_table_allocator, current_segment_table, pointers_per_long_table); + my_segment_table.store(my_embedded_table, std::memory_order_relaxed); + zero_table(my_embedded_table, pointers_per_embedded_table); + } + } + + void extend_table_if_necessary(segment_table_type& table, size_type start_index, size_type end_index) { + // extend_segment_table if an active table is an embedded table + // and the requested index is not in the embedded table + if (table == my_embedded_table && end_index > embedded_table_size) { + if (start_index <= embedded_table_size) { + try_call([&] { + table = self()->allocate_long_table(my_embedded_table, start_index); + // It is possible that the table was extended by the thread that allocated first_block. + // In this case it is necessary to re-read the current table. + + if (table) { + my_segment_table.store(table, std::memory_order_release); + } else { + table = my_segment_table.load(std::memory_order_acquire); + } + }).on_exception([&] { + my_segment_table_allocation_failed.store(true, std::memory_order_relaxed); + }); + } else { + atomic_backoff backoff; + do { + if (my_segment_table_allocation_failed.load(std::memory_order_relaxed)) { + throw_exception(exception_id::bad_alloc); + } + backoff.pause(); + table = my_segment_table.load(std::memory_order_acquire); + } while (table == my_embedded_table); + } + } + } + + // Return the segment where index is stored + static constexpr segment_index_type segment_index_of( size_type index ) { + return size_type(tbb::detail::log2(uintptr_t(index|1))); + } + + // Needed to calculate the offset in segment + static constexpr size_type segment_base( size_type index ) { + return size_type(1) << index & ~size_type(1); + } + + // Return size of the segment + static constexpr size_type segment_size( size_type index ) { + return index == 0 ? 2 : size_type(1) << index; + } + +private: + + derived_type* self() { + return static_cast(this); + } + + struct copy_segment_body_type { + void operator()( segment_index_type index, segment_type from, segment_type to ) const { + my_instance.self()->copy_segment(index, from, to); + } + segment_table& my_instance; + }; + + struct move_segment_body_type { + void operator()( segment_index_type index, segment_type from, segment_type to ) const { + my_instance.self()->move_segment(index, from, to); + } + segment_table& my_instance; + }; + + // Transgers all segments from the other table + template + void internal_transfer( const segment_table& other, TransferBody transfer_segment ) { + static_cast(this)->destroy_elements(); + + assign_first_block_if_necessary(other.my_first_block.load(std::memory_order_relaxed)); + my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed); + + segment_table_type other_table = other.get_table(); + size_type end_segment_size = segment_size(other.find_last_allocated_segment(other_table)); + + // If an exception occurred in other, then the size may be greater than the size of the end segment. + size_type other_size = end_segment_size < other.my_size.load(std::memory_order_relaxed) ? + other.my_size.load(std::memory_order_relaxed) : end_segment_size; + other_size = my_segment_table_allocation_failed ? embedded_table_size : other_size; + + for (segment_index_type i = 0; segment_base(i) < other_size; ++i) { + // If the segment in other table is enabled - transfer it + if (other_table[i].load(std::memory_order_relaxed) == segment_allocation_failure_tag) + { + my_size = segment_base(i); + break; + } else if (other_table[i].load(std::memory_order_relaxed) != nullptr) { + internal_subscript(segment_base(i)); + transfer_segment(i, other.get_table()[i].load(std::memory_order_relaxed) + segment_base(i), + get_table()[i].load(std::memory_order_relaxed) + segment_base(i)); + } + } + } + + // Moves the other segment table + // Only equal allocators are allowed + void internal_move( segment_table&& other ) { + // NOTE: allocators should be equal + clear(); + my_first_block.store(other.my_first_block.load(std::memory_order_relaxed), std::memory_order_relaxed); + my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed); + // If an active table in other is embedded - restore all of the embedded segments + if (other.get_table() == other.my_embedded_table) { + for ( size_type i = 0; i != pointers_per_embedded_table; ++i ) { + segment_type other_segment = other.my_embedded_table[i].load(std::memory_order_relaxed); + my_embedded_table[i].store(other_segment, std::memory_order_relaxed); + other.my_embedded_table[i].store(nullptr, std::memory_order_relaxed); + } + my_segment_table.store(my_embedded_table, std::memory_order_relaxed); + } else { + my_segment_table.store(other.my_segment_table, std::memory_order_relaxed); + other.my_segment_table.store(other.my_embedded_table, std::memory_order_relaxed); + zero_table(other.my_embedded_table, pointers_per_embedded_table); + } + other.my_size.store(0, std::memory_order_relaxed); + } + + // Move construct the segment table with the allocator object + // if any instances of allocator_type are always equal + void internal_move_construct_with_allocator( segment_table&& other, const allocator_type&, + /*is_always_equal = */ std::true_type ) { + internal_move(std::move(other)); + } + + // Move construct the segment table with the allocator object + // if any instances of allocator_type are always equal + void internal_move_construct_with_allocator( segment_table&& other, const allocator_type& alloc, + /*is_always_equal = */ std::false_type ) { + if (other.my_segment_table_allocator == alloc) { + // If allocators are equal - restore pointers + internal_move(std::move(other)); + } else { + // If allocators are not equal - perform per element move with reallocation + try_call( [&] { + internal_transfer(other, move_segment_body_type{*this}); + } ).on_exception( [&] { + clear(); + }); + } + } + + // Move assigns the segment table to other is any instances of allocator_type are always equal + // or propagate_on_container_move_assignment is true + void internal_move_assign( segment_table&& other, /*is_always_equal || POCMA = */ std::true_type ) { + internal_move(std::move(other)); + } + + // Move assigns the segment table to other is any instances of allocator_type are not always equal + // and propagate_on_container_move_assignment is false + void internal_move_assign( segment_table&& other, /*is_always_equal || POCMA = */ std::false_type ) { + if (my_segment_table_allocator == other.my_segment_table_allocator) { + // If allocators are equal - restore pointers + internal_move(std::move(other)); + } else { + // If allocators are not equal - perform per element move with reallocation + internal_transfer(other, move_segment_body_type{*this}); + } + } + + // Swaps two segment tables if any instances of allocator_type are always equal + // or propagate_on_container_swap is true + void internal_swap( segment_table& other, /*is_always_equal || POCS = */ std::true_type ) { + internal_swap_fields(other); + } + + // Swaps two segment tables if any instances of allocator_type are not always equal + // and propagate_on_container_swap is false + // According to the C++ standard, swapping of two containers with unequal allocators + // is an undefined behavior scenario + void internal_swap( segment_table& other, /*is_always_equal || POCS = */ std::false_type ) { + __TBB_ASSERT(my_segment_table_allocator == other.my_segment_table_allocator, + "Swapping with unequal allocators is not allowed"); + internal_swap_fields(other); + } + + void internal_swap_fields( segment_table& other ) { + // If an active table in either *this segment table or other is an embedded one - swaps the embedded tables + if (get_table() == my_embedded_table || + other.get_table() == other.my_embedded_table) { + + for (size_type i = 0; i != pointers_per_embedded_table; ++i) { + segment_type current_segment = my_embedded_table[i].load(std::memory_order_relaxed); + segment_type other_segment = other.my_embedded_table[i].load(std::memory_order_relaxed); + + my_embedded_table[i].store(other_segment, std::memory_order_relaxed); + other.my_embedded_table[i].store(current_segment, std::memory_order_relaxed); + } + } + + segment_table_type current_segment_table = get_table(); + segment_table_type other_segment_table = other.get_table(); + + // If an active table is an embedded one - + // store an active table in other to the embedded one from other + if (current_segment_table == my_embedded_table) { + other.my_segment_table.store(other.my_embedded_table, std::memory_order_relaxed); + } else { + // Otherwise - store it to the active segment table + other.my_segment_table.store(current_segment_table, std::memory_order_relaxed); + } + + // If an active table in other segment table is an embedded one - + // store an active table in other to the embedded one from *this + if (other_segment_table == other.my_embedded_table) { + my_segment_table.store(my_embedded_table, std::memory_order_relaxed); + } else { + // Otherwise - store it to the active segment table in other + my_segment_table.store(other_segment_table, std::memory_order_relaxed); + } + auto first_block = other.my_first_block.load(std::memory_order_relaxed); + other.my_first_block.store(my_first_block.load(std::memory_order_relaxed), std::memory_order_relaxed); + my_first_block.store(first_block, std::memory_order_relaxed); + + auto size = other.my_size.load(std::memory_order_relaxed); + other.my_size.store(my_size.load(std::memory_order_relaxed), std::memory_order_relaxed); + my_size.store(size, std::memory_order_relaxed); + } + +protected: + // A flag indicates that an exception was throws during segment allocations + const segment_type segment_allocation_failure_tag = reinterpret_cast(1); + static constexpr size_type embedded_table_size = segment_size(pointers_per_embedded_table); + + template + value_type& internal_subscript( size_type index ) { + segment_index_type seg_index = segment_index_of(index); + segment_table_type table = my_segment_table.load(std::memory_order_acquire); + segment_type segment = nullptr; + + if (allow_out_of_range_access) { + if (derived_type::allow_table_extending) { + extend_table_if_necessary(table, index, index + 1); + } + + segment = table[seg_index].load(std::memory_order_acquire); + // If the required segment is disabled - enable it + if (segment == nullptr) { + enable_segment(segment, table, seg_index, index); + } + // Check if an exception was thrown during segment allocation + if (segment == segment_allocation_failure_tag) { + throw_exception(exception_id::bad_alloc); + } + } else { + segment = table[seg_index].load(std::memory_order_acquire); + } + __TBB_ASSERT(segment != nullptr, nullptr); + + return segment[index]; + } + + void assign_first_block_if_necessary(segment_index_type index) { + size_type zero = 0; + if (this->my_first_block.load(std::memory_order_relaxed) == zero) { + this->my_first_block.compare_exchange_strong(zero, index); + } + } + + void zero_table( segment_table_type table, size_type count ) { + for (size_type i = 0; i != count; ++i) { + table[i].store(nullptr, std::memory_order_relaxed); + } + } + + segment_table_type get_table() const { + return my_segment_table.load(std::memory_order_acquire); + } + + segment_table_allocator_type my_segment_table_allocator; + std::atomic my_segment_table; + atomic_segment my_embedded_table[pointers_per_embedded_table]; + // Number of segments in first block + std::atomic my_first_block; + // Number of elements in table + std::atomic my_size; + // Flag to indicate failed extend table + std::atomic my_segment_table_allocation_failed; +}; // class segment_table + +} // namespace d1 +} // namespace detail +} // namespace tbb + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) +// #pragma warning(pop) // warning 4127 is back +#endif + +#endif // __TBB_detail__segment_table_H diff --git a/src/tbb/include/oneapi/tbb/detail/_small_object_pool.h b/src/tbb/include/oneapi/tbb/detail/_small_object_pool.h new file mode 100644 index 000000000..7485b31c7 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_small_object_pool.h @@ -0,0 +1,108 @@ +/* + Copyright (c) 2020-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__small_object_pool_H +#define __TBB__small_object_pool_H + +#include "_config.h" +#include "_assert.h" + +#include "../profiling.h" +#include +#include +#include + +namespace tbb { +namespace detail { + +namespace d1 { +class small_object_pool { +protected: + small_object_pool() = default; +}; +struct execution_data; +} + +namespace r1 { +TBB_EXPORT void* __TBB_EXPORTED_FUNC allocate(d1::small_object_pool*& pool, std::size_t number_of_bytes, + const d1::execution_data& ed); +TBB_EXPORT void* __TBB_EXPORTED_FUNC allocate(d1::small_object_pool*& pool, std::size_t number_of_bytes); +TBB_EXPORT void __TBB_EXPORTED_FUNC deallocate(d1::small_object_pool& pool, void* ptr, std::size_t number_of_bytes, + const d1::execution_data& ed); +TBB_EXPORT void __TBB_EXPORTED_FUNC deallocate(d1::small_object_pool& pool, void* ptr, std::size_t number_of_bytes); +} + +namespace d1 { +class small_object_allocator { +public: + template + Type* new_object(execution_data& ed, Args&&... args) { + void* allocated_object = r1::allocate(m_pool, sizeof(Type), ed); + + auto constructed_object = new(allocated_object) Type(std::forward(args)...); + return constructed_object; + } + + template + Type* new_object(Args&&... args) { + void* allocated_object = r1::allocate(m_pool, sizeof(Type)); + + auto constructed_object = new(allocated_object) Type(std::forward(args)...); + return constructed_object; + } + + template + void delete_object(Type* object, const execution_data& ed) { + // Copy this since it can be a member of the passed object and + // unintentionally destroyed when Type destructor is called below + small_object_allocator alloc = *this; + object->~Type(); + alloc.deallocate(object, ed); + } + + template + void delete_object(Type* object) { + // Copy this since it can be a member of the passed object and + // unintentionally destroyed when Type destructor is called below + small_object_allocator alloc = *this; + object->~Type(); + alloc.deallocate(object); + } + + template + void deallocate(Type* ptr, const execution_data& ed) { + call_itt_task_notify(destroy, ptr); + + __TBB_ASSERT(m_pool != nullptr, "Pool must be valid for deallocate call"); + r1::deallocate(*m_pool, ptr, sizeof(Type), ed); + } + + template + void deallocate(Type* ptr) { + call_itt_task_notify(destroy, ptr); + + __TBB_ASSERT(m_pool != nullptr, "Pool must be valid for deallocate call"); + r1::deallocate(*m_pool, ptr, sizeof(Type)); + } +private: + small_object_pool* m_pool{}; +}; + +} // namespace d1 +} // namespace detail +} // namespace tbb + +#endif /* __TBB__small_object_pool_H */ diff --git a/src/tbb/include/tbb/internal/_tbb_strings.h b/src/tbb/include/oneapi/tbb/detail/_string_resource.h similarity index 61% rename from src/tbb/include/tbb/internal/_tbb_strings.h rename to src/tbb/include/oneapi/tbb/detail/_string_resource.h index ccffe1d36..c06d5b5db 100644 --- a/src/tbb/include/tbb/internal/_tbb_strings.h +++ b/src/tbb/include/oneapi/tbb/detail/_string_resource.h @@ -1,23 +1,30 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ +TBB_STRING_RESOURCE(ALGORITHM, "tbb_algorithm") +TBB_STRING_RESOURCE(PARALLEL_FOR, "tbb_parallel_for") +TBB_STRING_RESOURCE(PARALLEL_FOR_EACH, "tbb_parallel_for_each") +TBB_STRING_RESOURCE(PARALLEL_INVOKE, "tbb_parallel_invoke") +TBB_STRING_RESOURCE(PARALLEL_REDUCE, "tbb_parallel_reduce") +TBB_STRING_RESOURCE(PARALLEL_SCAN, "tbb_parallel_scan") +TBB_STRING_RESOURCE(PARALLEL_SORT, "tbb_parallel_sort") +TBB_STRING_RESOURCE(PARALLEL_PIPELINE, "tbb_parallel_pipeline") +TBB_STRING_RESOURCE(CUSTOM_CTX, "tbb_custom") + +TBB_STRING_RESOURCE(FLOW_NULL, "null") TBB_STRING_RESOURCE(FLOW_BROADCAST_NODE, "broadcast_node") TBB_STRING_RESOURCE(FLOW_BUFFER_NODE, "buffer_node") TBB_STRING_RESOURCE(FLOW_CONTINUE_NODE, "continue_node") @@ -27,17 +34,16 @@ TBB_STRING_RESOURCE(FLOW_JOIN_NODE_RESERVING, "join_node (reserving)") TBB_STRING_RESOURCE(FLOW_JOIN_NODE_TAG_MATCHING, "join_node (tag_matching)") TBB_STRING_RESOURCE(FLOW_LIMITER_NODE, "limiter_node") TBB_STRING_RESOURCE(FLOW_MULTIFUNCTION_NODE, "multifunction_node") -TBB_STRING_RESOURCE(FLOW_OR_NODE, "or_node") //no longer in use, kept for backward compatibilty TBB_STRING_RESOURCE(FLOW_OVERWRITE_NODE, "overwrite_node") TBB_STRING_RESOURCE(FLOW_PRIORITY_QUEUE_NODE, "priority_queue_node") TBB_STRING_RESOURCE(FLOW_QUEUE_NODE, "queue_node") TBB_STRING_RESOURCE(FLOW_SEQUENCER_NODE, "sequencer_node") -TBB_STRING_RESOURCE(FLOW_SOURCE_NODE, "source_node") +TBB_STRING_RESOURCE(FLOW_INPUT_NODE, "input_node") TBB_STRING_RESOURCE(FLOW_SPLIT_NODE, "split_node") TBB_STRING_RESOURCE(FLOW_WRITE_ONCE_NODE, "write_once_node") -TBB_STRING_RESOURCE(FLOW_BODY, "body") -TBB_STRING_RESOURCE(FLOW_GRAPH, "graph") -TBB_STRING_RESOURCE(FLOW_NODE, "node") +TBB_STRING_RESOURCE(FLOW_INDEXER_NODE, "indexer_node") +TBB_STRING_RESOURCE(FLOW_COMPOSITE_NODE, "composite_node") +TBB_STRING_RESOURCE(FLOW_ASYNC_NODE, "async_node") TBB_STRING_RESOURCE(FLOW_INPUT_PORT, "input_port") TBB_STRING_RESOURCE(FLOW_INPUT_PORT_0, "input_port_0") TBB_STRING_RESOURCE(FLOW_INPUT_PORT_1, "input_port_1") @@ -61,5 +67,12 @@ TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_7, "output_port_7") TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_8, "output_port_8") TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_9, "output_port_9") TBB_STRING_RESOURCE(FLOW_OBJECT_NAME, "object_name") -TBB_STRING_RESOURCE(FLOW_NULL, "null") -TBB_STRING_RESOURCE(FLOW_INDEXER_NODE, "indexer_node") +TBB_STRING_RESOURCE(FLOW_BODY, "body") +TBB_STRING_RESOURCE(FLOW_GRAPH, "graph") +TBB_STRING_RESOURCE(FLOW_NODE, "node") +TBB_STRING_RESOURCE(FLOW_TASKS, "tbb_flow_graph") +TBB_STRING_RESOURCE(USER_EVENT, "user_event") + +#if __TBB_FLOW_TRACE_CODEPTR +TBB_STRING_RESOURCE(CODE_ADDRESS, "code_address") +#endif diff --git a/src/tbb/include/oneapi/tbb/detail/_task.h b/src/tbb/include/oneapi/tbb/detail/_task.h new file mode 100644 index 000000000..103a9868e --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_task.h @@ -0,0 +1,303 @@ +/* + Copyright (c) 2020-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__task_H +#define __TBB__task_H + +#include "_config.h" +#include "_assert.h" +#include "_template_helpers.h" +#include "_small_object_pool.h" + +#include "../profiling.h" + +#include +#include +#include +#include +#include +#include + +namespace tbb { +namespace detail { + +namespace d1 { +using slot_id = unsigned short; +constexpr slot_id no_slot = slot_id(~0); +constexpr slot_id any_slot = slot_id(~1); + +class task; +class wait_context; +class task_group_context; +struct execution_data; +class wait_tree_vertex_interface; +class task_arena_base; +} + +namespace d2 { +class task_group; +class task_group_base; +} + +namespace r1 { +//! Task spawn/wait entry points +TBB_EXPORT void __TBB_EXPORTED_FUNC spawn(d1::task& t, d1::task_group_context& ctx); +TBB_EXPORT void __TBB_EXPORTED_FUNC spawn(d1::task& t, d1::task_group_context& ctx, d1::slot_id id); +TBB_EXPORT void __TBB_EXPORTED_FUNC execute_and_wait(d1::task& t, d1::task_group_context& t_ctx, d1::wait_context&, d1::task_group_context& w_ctx); +TBB_EXPORT void __TBB_EXPORTED_FUNC wait(d1::wait_context&, d1::task_group_context& ctx); +TBB_EXPORT d1::slot_id __TBB_EXPORTED_FUNC execution_slot(const d1::execution_data*); +TBB_EXPORT d1::slot_id __TBB_EXPORTED_FUNC execution_slot(const d1::task_arena_base&); +TBB_EXPORT d1::task_group_context* __TBB_EXPORTED_FUNC current_context(); +TBB_EXPORT d1::wait_tree_vertex_interface* get_thread_reference_vertex(d1::wait_tree_vertex_interface* wc); + +// Do not place under __TBB_RESUMABLE_TASKS. It is a stub for unsupported platforms. +struct suspend_point_type; +using suspend_callback_type = void(*)(void*, suspend_point_type*); +//! The resumable tasks entry points +TBB_EXPORT void __TBB_EXPORTED_FUNC suspend(suspend_callback_type suspend_callback, void* user_callback); +TBB_EXPORT void __TBB_EXPORTED_FUNC resume(suspend_point_type* tag); +TBB_EXPORT suspend_point_type* __TBB_EXPORTED_FUNC current_suspend_point(); +TBB_EXPORT void __TBB_EXPORTED_FUNC notify_waiters(std::uintptr_t wait_ctx_addr); + +class thread_data; +class task_dispatcher; +class external_waiter; +struct task_accessor; +struct task_arena_impl; +} // namespace r1 + +namespace d1 { + +class task_arena; +using suspend_point = r1::suspend_point_type*; + +#if __TBB_RESUMABLE_TASKS +template +static void suspend_callback(void* user_callback, suspend_point sp) { + // Copy user function to a new stack after the context switch to avoid a race when the previous + // suspend point is resumed while the user_callback is being called. + F user_callback_copy = *static_cast(user_callback); + user_callback_copy(sp); +} + +template +void suspend(F f) { + r1::suspend(&suspend_callback, &f); +} + +inline void resume(suspend_point tag) { + r1::resume(tag); +} +#endif /* __TBB_RESUMABLE_TASKS */ + +// TODO align wait_context on cache lane +class wait_context { + static constexpr std::uint64_t overflow_mask = ~((1LLU << 32) - 1); + + std::uint64_t m_version_and_traits{1}; + std::atomic m_ref_count{}; + + void add_reference(std::int64_t delta) { + call_itt_task_notify(releasing, this); + std::uint64_t r = m_ref_count.fetch_add(static_cast(delta)) + static_cast(delta); + + __TBB_ASSERT_EX((r & overflow_mask) == 0, "Overflow is detected"); + + if (!r) { + // Some external waiters or coroutine waiters sleep in wait list + // Should to notify them that work is done + std::uintptr_t wait_ctx_addr = std::uintptr_t(this); + r1::notify_waiters(wait_ctx_addr); + } + } + +public: + bool continue_execution() const { + std::uint64_t r = m_ref_count.load(std::memory_order_acquire); + __TBB_ASSERT_EX((r & overflow_mask) == 0, "Overflow is detected"); + return r > 0; + } + +private: + friend class r1::thread_data; + friend class r1::task_dispatcher; + friend class r1::external_waiter; + friend class wait_context_vertex; + friend struct r1::task_arena_impl; + friend struct r1::suspend_point_type; +public: + // Despite the internal reference count is uin64_t we limit the user interface with uint32_t + // to preserve a part of the internal reference count for special needs. + wait_context(std::uint32_t ref_count) : m_ref_count{ref_count} { suppress_unused_warning(m_version_and_traits); } + wait_context(const wait_context&) = delete; + + ~wait_context() { + __TBB_ASSERT(!continue_execution(), nullptr); + } + + void reserve(std::uint32_t delta = 1) { + add_reference(delta); + } + + void release(std::uint32_t delta = 1) { + add_reference(-std::int64_t(delta)); + } +}; + +class wait_tree_vertex_interface { +public: + virtual void reserve(std::uint32_t delta = 1) = 0; + virtual void release(std::uint32_t delta = 1) = 0; + +protected: + virtual ~wait_tree_vertex_interface() = default; +}; + +class wait_context_vertex : public wait_tree_vertex_interface { +public: + wait_context_vertex(std::uint32_t ref = 0) : m_wait(ref) {} + + void reserve(std::uint32_t delta = 1) override { + m_wait.reserve(delta); + } + + void release(std::uint32_t delta = 1) override { + m_wait.release(delta); + } + + wait_context& get_context() { + return m_wait; + } +private: + friend class d2::task_group; + friend class d2::task_group_base; + + bool continue_execution() const { + return m_wait.continue_execution(); + } + + wait_context m_wait; +}; + +class reference_vertex : public wait_tree_vertex_interface { +public: + reference_vertex(wait_tree_vertex_interface* parent, std::uint32_t ref_count) : my_parent{parent}, m_ref_count{ref_count} + {} + + void reserve(std::uint32_t delta = 1) override { + if (m_ref_count.fetch_add(static_cast(delta)) == 0) { + my_parent->reserve(); + } + } + + void release(std::uint32_t delta = 1) override { + std::uint64_t ref = m_ref_count.fetch_sub(static_cast(delta)) - static_cast(delta); + if (ref == 0) { + my_parent->release(); + } + } + + std::uint32_t get_num_child() { + return static_cast(m_ref_count.load(std::memory_order_acquire)); + } +private: + wait_tree_vertex_interface* my_parent; + std::atomic m_ref_count; +}; + +struct execution_data { + task_group_context* context{}; + slot_id original_slot{}; + slot_id affinity_slot{}; +}; + +inline task_group_context* context(const execution_data& ed) { + return ed.context; +} + +inline slot_id original_slot(const execution_data& ed) { + return ed.original_slot; +} + +inline slot_id affinity_slot(const execution_data& ed) { + return ed.affinity_slot; +} + +inline slot_id execution_slot(const execution_data& ed) { + return r1::execution_slot(&ed); +} + +inline bool is_same_affinity(const execution_data& ed) { + return affinity_slot(ed) == no_slot || affinity_slot(ed) == execution_slot(ed); +} + +inline bool is_stolen(const execution_data& ed) { + return original_slot(ed) != execution_slot(ed); +} + +inline void spawn(task& t, task_group_context& ctx) { + call_itt_task_notify(releasing, &t); + r1::spawn(t, ctx); +} + +inline void spawn(task& t, task_group_context& ctx, slot_id id) { + call_itt_task_notify(releasing, &t); + r1::spawn(t, ctx, id); +} + +inline void execute_and_wait(task& t, task_group_context& t_ctx, wait_context& wait_ctx, task_group_context& w_ctx) { + r1::execute_and_wait(t, t_ctx, wait_ctx, w_ctx); + call_itt_task_notify(acquired, &wait_ctx); + call_itt_task_notify(destroy, &wait_ctx); +} + +inline void wait(wait_context& wait_ctx, task_group_context& ctx) { + r1::wait(wait_ctx, ctx); + call_itt_task_notify(acquired, &wait_ctx); + call_itt_task_notify(destroy, &wait_ctx); +} + +using r1::current_context; + +class task_traits { + std::uint64_t m_version_and_traits{}; + friend struct r1::task_accessor; +}; + +//! Alignment for a task object +static constexpr std::size_t task_alignment = 64; + +//! Base class for user-defined tasks. +/** @ingroup task_scheduling */ +class alignas(task_alignment) task : public task_traits { +protected: + virtual ~task() = default; + +public: + virtual task* execute(execution_data&) = 0; + virtual task* cancel(execution_data&) = 0; + +private: + std::uint64_t m_reserved[6]{}; + friend struct r1::task_accessor; +}; +static_assert(sizeof(task) == task_alignment, "task size is broken"); + +} // namespace d1 +} // namespace detail +} // namespace tbb + +#endif /* __TBB__task_H */ diff --git a/src/tbb/include/oneapi/tbb/detail/_task_handle.h b/src/tbb/include/oneapi/tbb/detail/_task_handle.h new file mode 100644 index 000000000..26212b462 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_task_handle.h @@ -0,0 +1,123 @@ +/* + Copyright (c) 2020-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + + +#ifndef __TBB_task_handle_H +#define __TBB_task_handle_H + +#include "_config.h" +#include "_task.h" +#include "_small_object_pool.h" +#include "_utils.h" +#include + +namespace tbb { +namespace detail { + +namespace d1 { class task_group_context; class wait_context; struct execution_data; } +namespace d2 { + +class task_handle; + +class task_handle_task : public d1::task { + std::uint64_t m_version_and_traits{}; + d1::wait_tree_vertex_interface* m_wait_tree_vertex; + d1::task_group_context& m_ctx; + d1::small_object_allocator m_allocator; +public: + void finalize(const d1::execution_data* ed = nullptr) { + if (ed) { + m_allocator.delete_object(this, *ed); + } else { + m_allocator.delete_object(this); + } + } + + task_handle_task(d1::wait_tree_vertex_interface* vertex, d1::task_group_context& ctx, d1::small_object_allocator& alloc) + : m_wait_tree_vertex(vertex) + , m_ctx(ctx) + , m_allocator(alloc) { + suppress_unused_warning(m_version_and_traits); + m_wait_tree_vertex->reserve(); + } + + ~task_handle_task() override { + m_wait_tree_vertex->release(); + } + + d1::task_group_context& ctx() const { return m_ctx; } +}; + + +class task_handle { + struct task_handle_task_finalizer_t{ + void operator()(task_handle_task* p){ p->finalize(); } + }; + using handle_impl_t = std::unique_ptr; + + handle_impl_t m_handle = {nullptr}; +public: + task_handle() = default; + task_handle(task_handle&&) = default; + task_handle& operator=(task_handle&&) = default; + + explicit operator bool() const noexcept { return static_cast(m_handle); } + + friend bool operator==(task_handle const& th, std::nullptr_t) noexcept; + friend bool operator==(std::nullptr_t, task_handle const& th) noexcept; + + friend bool operator!=(task_handle const& th, std::nullptr_t) noexcept; + friend bool operator!=(std::nullptr_t, task_handle const& th) noexcept; + +private: + friend struct task_handle_accessor; + + task_handle(task_handle_task* t) : m_handle {t}{}; + + d1::task* release() { + return m_handle.release(); + } +}; + +struct task_handle_accessor { +static task_handle construct(task_handle_task* t) { return {t}; } +static d1::task* release(task_handle& th) { return th.release(); } +static d1::task_group_context& ctx_of(task_handle& th) { + __TBB_ASSERT(th.m_handle, "ctx_of does not expect empty task_handle."); + return th.m_handle->ctx(); +} +}; + +inline bool operator==(task_handle const& th, std::nullptr_t) noexcept { + return th.m_handle == nullptr; +} +inline bool operator==(std::nullptr_t, task_handle const& th) noexcept { + return th.m_handle == nullptr; +} + +inline bool operator!=(task_handle const& th, std::nullptr_t) noexcept { + return th.m_handle != nullptr; +} + +inline bool operator!=(std::nullptr_t, task_handle const& th) noexcept { + return th.m_handle != nullptr; +} + +} // namespace d2 +} // namespace detail +} // namespace tbb + +#endif /* __TBB_task_handle_H */ diff --git a/src/tbb/include/oneapi/tbb/detail/_template_helpers.h b/src/tbb/include/oneapi/tbb/detail/_template_helpers.h new file mode 100644 index 000000000..50ce3d2d3 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_template_helpers.h @@ -0,0 +1,403 @@ +/* + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_detail__template_helpers_H +#define __TBB_detail__template_helpers_H + +#include "_utils.h" +#include "_config.h" + +#include +#include +#include +#include +#include +#include + +namespace tbb { +namespace detail { +inline namespace d0 { + +// An internal implementation of void_t, which can be used in SFINAE contexts +template +struct void_impl { + using type = void; +}; // struct void_impl + +template +using void_t = typename void_impl::type; + +// Generic SFINAE helper for expression checks, based on the idea demonstrated in ISO C++ paper n4502 +template class... Checks> +struct supports_impl { + using type = std::false_type; +}; + +template class... Checks> +struct supports_impl...>, Checks...> { + using type = std::true_type; +}; + +template class... Checks> +using supports = typename supports_impl::type; + +//! A template to select either 32-bit or 64-bit constant as compile time, depending on machine word size. +template +struct select_size_t_constant { + // Explicit cast is needed to avoid compiler warnings about possible truncation. + // The value of the right size, which is selected by ?:, is anyway not truncated or promoted. + static const std::size_t value = static_cast((sizeof(std::size_t)==sizeof(u)) ? u : ull); +}; + +// TODO: do we really need it? +//! Cast between unrelated pointer types. +/** This method should be used sparingly as a last resort for dealing with + situations that inherently break strict ISO C++ aliasing rules. */ +// T is a pointer type because it will be explicitly provided by the programmer as a template argument; +// U is a referent type to enable the compiler to check that "ptr" is a pointer, deducing U in the process. +template +inline T punned_cast( U* ptr ) { + std::uintptr_t x = reinterpret_cast(ptr); + return reinterpret_cast(x); +} + +template +struct padded_base : T { + char pad[S - R]; +}; +template struct padded_base : T {}; + +//! Pads type T to fill out to a multiple of cache line size. +template +struct padded : padded_base {}; + +#if __TBB_CPP14_INTEGER_SEQUENCE_PRESENT + +using std::index_sequence; +using std::make_index_sequence; + +#else + +template class index_sequence {}; + +template +struct make_index_sequence_impl : make_index_sequence_impl < N - 1, N - 1, S... > {}; + +template +struct make_index_sequence_impl <0, S...> { + using type = index_sequence; +}; + +template +using make_index_sequence = typename make_index_sequence_impl::type; + +#endif /* __TBB_CPP14_INTEGER_SEQUENCE_PRESENT */ + +#if __TBB_CPP17_LOGICAL_OPERATIONS_PRESENT +using std::conjunction; +using std::disjunction; +#else // __TBB_CPP17_LOGICAL_OPERATIONS_PRESENT + +template +struct conjunction : std::true_type {}; + +template +struct conjunction + : std::conditional, First>::type {}; + +template +struct conjunction : T {}; + +template +struct disjunction : std::false_type {}; + +template +struct disjunction + : std::conditional>::type {}; + +template +struct disjunction : T {}; + +#endif // __TBB_CPP17_LOGICAL_OPERATIONS_PRESENT + +template +using iterator_value_t = typename std::iterator_traits::value_type; + +template +using iterator_key_t = typename std::remove_const::first_type>::type; + +template +using iterator_mapped_t = typename iterator_value_t::second_type; + +template +using iterator_alloc_pair_t = std::pair>::type, + iterator_mapped_t>; + +template using alloc_value_type = typename A::value_type; +template using alloc_ptr_t = typename std::allocator_traits::pointer; +template using has_allocate = decltype(std::declval&>() = std::declval().allocate(0)); +template using has_deallocate = decltype(std::declval().deallocate(std::declval>(), 0)); + +// alloc_value_type should be checked first, because it can be used in other checks +template +using is_allocator = supports; + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +template +inline constexpr bool is_allocator_v = is_allocator::value; +#endif + +// Template class in which the "type" determines the type of the element number N in pack Args +template +struct pack_element { + using type = void; +}; + +template +struct pack_element { + using type = typename pack_element::type; +}; + +template +struct pack_element<0, T, Args...> { + using type = T; +}; + +template +using pack_element_t = typename pack_element::type; + +template +class raii_guard { +public: + static_assert( + std::is_nothrow_copy_constructible::value && + std::is_nothrow_move_constructible::value, + "Throwing an exception during the Func copy or move construction cause an unexpected behavior." + ); + + raii_guard( Func f ) noexcept : my_func(f), is_active(true) {} + + raii_guard( raii_guard&& g ) noexcept : my_func(std::move(g.my_func)), is_active(g.is_active) { + g.is_active = false; + } + + ~raii_guard() { + if (is_active) { + my_func(); + } + } + + void dismiss() { + is_active = false; + } +private: + Func my_func; + bool is_active; +}; // class raii_guard + +template +raii_guard make_raii_guard( Func f ) { + return raii_guard(f); +} + +template +struct try_call_proxy { + try_call_proxy( Body b ) : body(b) {} + + template + void on_exception( OnExceptionBody on_exception_body ) { + auto guard = make_raii_guard(on_exception_body); + body(); + guard.dismiss(); + } + + template + void on_completion(OnCompletionBody on_completion_body) { + auto guard = make_raii_guard(on_completion_body); + body(); + } + + Body body; +}; // struct try_call_proxy + +// Template helper function for API +// try_call(lambda1).on_exception(lambda2) +// Executes lambda1 and if it throws an exception - executes lambda2 +template +try_call_proxy try_call( Body b ) { + return try_call_proxy(b); +} + +#if __TBB_CPP17_IS_SWAPPABLE_PRESENT +using std::is_nothrow_swappable; +using std::is_swappable; +#else // __TBB_CPP17_IS_SWAPPABLE_PRESENT +namespace is_swappable_detail { +using std::swap; + +template +using has_swap = decltype(swap(std::declval(), std::declval())); + +#if _MSC_VER && _MSC_VER <= 1900 && !__INTEL_COMPILER +// Workaround for VS2015: it fails to instantiate noexcept(...) inside std::integral_constant. +template +struct noexcept_wrapper { + static const bool value = noexcept(swap(std::declval(), std::declval())); +}; +template +struct is_nothrow_swappable_impl : std::integral_constant::value> {}; +#else +template +struct is_nothrow_swappable_impl : std::integral_constant(), std::declval()))> {}; +#endif +} + +template +struct is_swappable : supports {}; + +template +struct is_nothrow_swappable + : conjunction, is_swappable_detail::is_nothrow_swappable_impl> {}; +#endif // __TBB_CPP17_IS_SWAPPABLE_PRESENT + +//! Allows to store a function parameter pack as a variable and later pass it to another function +template< typename... Types > +struct stored_pack; + +template<> +struct stored_pack<> +{ + using pack_type = stored_pack<>; + stored_pack() {} + + // Friend front-end functions + template< typename F, typename Pack > friend void call(F&& f, Pack&& p); + template< typename Ret, typename F, typename Pack > friend Ret call_and_return(F&& f, Pack&& p); + +protected: + // Ideally, ref-qualified non-static methods would be used, + // but that would greatly reduce the set of compilers where it works. + template< typename Ret, typename F, typename... Preceding > + static Ret call(F&& f, const pack_type& /*pack*/, Preceding&&... params) { + return std::forward(f)(std::forward(params)...); + } + template< typename Ret, typename F, typename... Preceding > + static Ret call(F&& f, pack_type&& /*pack*/, Preceding&&... params) { + return std::forward(f)(std::forward(params)...); + } +}; + +template< typename T, typename... Types > +struct stored_pack : stored_pack +{ + using pack_type = stored_pack; + using pack_remainder = stored_pack; + + // Since lifetime of original values is out of control, copies should be made. + // Thus references should be stripped away from the deduced type. + typename std::decay::type leftmost_value; + + // Here rvalue references act in the same way as forwarding references, + // as long as class template parameters were deduced via forwarding references. + stored_pack(T&& t, Types&&... types) + : pack_remainder(std::forward(types)...), leftmost_value(std::forward(t)) {} + + // Friend front-end functions + template< typename F, typename Pack > friend void call(F&& f, Pack&& p); + template< typename Ret, typename F, typename Pack > friend Ret call_and_return(F&& f, Pack&& p); + +protected: + template< typename Ret, typename F, typename... Preceding > + static Ret call(F&& f, pack_type& pack, Preceding&&... params) { + return pack_remainder::template call( + std::forward(f), static_cast(pack), + std::forward(params)... , pack.leftmost_value + ); + } + + template< typename Ret, typename F, typename... Preceding > + static Ret call(F&& f, pack_type&& pack, Preceding&&... params) { + return pack_remainder::template call( + std::forward(f), static_cast(pack), + std::forward(params)... , std::move(pack.leftmost_value) + ); + } +}; + +//! Calls the given function with arguments taken from a stored_pack +template< typename F, typename Pack > +void call(F&& f, Pack&& p) { + std::decay::type::template call(std::forward(f), std::forward(p)); +} + +template< typename Ret, typename F, typename Pack > +Ret call_and_return(F&& f, Pack&& p) { + return std::decay::type::template call(std::forward(f), std::forward(p)); +} + +template< typename... Types > +stored_pack save_pack(Types&&... types) { + return stored_pack(std::forward(types)...); +} + +// A structure with the value which is equal to Trait::value +// but can be used in the immediate context due to parameter T +template +struct dependent_bool : std::integral_constant {}; + +template +struct body_arg_detector; + +template +struct body_arg_detector { + using arg_type = Arg; +}; + +template +struct body_arg_detector { + using arg_type = Arg; +}; + +template +struct argument_detector; + +template +struct argument_detector { + using type = typename body_arg_detector::arg_type; +}; + +template +struct argument_detector { + using type = Arg; +}; + +// Detects the argument type of callable, works for callable with one argument. +template +using argument_type_of = typename argument_detector::type>::type; + +template +struct type_identity { + using type = T; +}; + +template +using type_identity_t = typename type_identity::type; + +} // inline namespace d0 +} // namespace detail +} // namespace tbb + +#endif // __TBB_detail__template_helpers_H diff --git a/src/tbb/include/oneapi/tbb/detail/_utils.h b/src/tbb/include/oneapi/tbb/detail/_utils.h new file mode 100644 index 000000000..1f480702f --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_utils.h @@ -0,0 +1,393 @@ +/* + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_detail__utils_H +#define __TBB_detail__utils_H + +#include +#include +#include +#include + +#include "_config.h" +#include "_assert.h" +#include "_machine.h" + +namespace tbb { +namespace detail { +inline namespace d0 { + +//! Utility template function to prevent "unused" warnings by various compilers. +template void suppress_unused_warning(T&&...) {} + +//! Compile-time constant that is upper bound on cache line/sector size. +/** It should be used only in situations where having a compile-time upper + bound is more useful than a run-time exact answer. + @ingroup memory_allocation */ +constexpr size_t max_nfs_size = 128; +constexpr std::size_t max_nfs_size_exp = 7; +static_assert(1 << max_nfs_size_exp == max_nfs_size, "max_nfs_size_exp must be a log2(max_nfs_size)"); + +//! Class that implements exponential backoff. +class atomic_backoff { + //! Time delay, in units of "pause" instructions. + /** Should be equal to approximately the number of "pause" instructions + that take the same time as an context switch. Must be a power of two.*/ + static constexpr std::int32_t LOOPS_BEFORE_YIELD = 16; + std::int32_t count; + +public: + // In many cases, an object of this type is initialized eagerly on hot path, + // as in for(atomic_backoff b; ; b.pause()) { /*loop body*/ } + // For this reason, the construction cost must be very small! + atomic_backoff() : count(1) {} + // This constructor pauses immediately; do not use on hot paths! + atomic_backoff(bool) : count(1) { pause(); } + + //! No Copy + atomic_backoff(const atomic_backoff&) = delete; + atomic_backoff& operator=(const atomic_backoff&) = delete; + + //! Pause for a while. + void pause() { + if (count <= LOOPS_BEFORE_YIELD) { + machine_pause(count); + // Pause twice as long the next time. + count *= 2; + } else { + // Pause is so long that we might as well yield CPU to scheduler. + yield(); + } + } + + //! Pause for a few times and return false if saturated. + bool bounded_pause() { + machine_pause(count); + if (count < LOOPS_BEFORE_YIELD) { + // Pause twice as long the next time. + count *= 2; + return true; + } else { + return false; + } + } + + void reset() { + count = 1; + } +}; + +//! Spin WHILE the condition is true. +/** T and U should be comparable types. */ +template +T spin_wait_while(const std::atomic& location, C comp, std::memory_order order) { + atomic_backoff backoff; + T snapshot = location.load(order); + while (comp(snapshot)) { + backoff.pause(); + snapshot = location.load(order); + } + return snapshot; +} + +//! Spin WHILE the value of the variable is equal to a given value +/** T and U should be comparable types. */ +template +T spin_wait_while_eq(const std::atomic& location, const U value, std::memory_order order = std::memory_order_acquire) { + return spin_wait_while(location, [&value](T t) { return t == value; }, order); +} + +//! Spin UNTIL the value of the variable is equal to a given value +/** T and U should be comparable types. */ +template +T spin_wait_until_eq(const std::atomic& location, const U value, std::memory_order order = std::memory_order_acquire) { + return spin_wait_while(location, [&value](T t) { return t != value; }, order); +} + +//! Spin UNTIL the condition returns true or spinning time is up. +/** Returns what the passed functor returned last time it was invoked. */ +template +bool timed_spin_wait_until(Condition condition) { + // 32 pauses + 32 yields are meausered as balanced spin time before sleep. + bool finish = condition(); + for (int i = 1; !finish && i < 32; finish = condition(), i *= 2) { + machine_pause(i); + } + for (int i = 32; !finish && i < 64; finish = condition(), ++i) { + yield(); + } + return finish; +} + +template +T clamp(T value, T lower_bound, T upper_bound) { + __TBB_ASSERT(lower_bound <= upper_bound, "Incorrect bounds"); + return value > lower_bound ? (value > upper_bound ? upper_bound : value) : lower_bound; +} + +template +std::uintptr_t log2(T in) { + __TBB_ASSERT(in > 0, "The logarithm of a non-positive value is undefined."); + return machine_log2(in); +} + +template +T reverse_bits(T src) { + return machine_reverse_bits(src); +} + +template +T reverse_n_bits(T src, std::size_t n) { + __TBB_ASSERT(n != 0, "Reverse for 0 bits is undefined behavior."); + return reverse_bits(src) >> (number_of_bits() - n); +} + +// A function to check if passed integer is a power of two +template +constexpr bool is_power_of_two( IntegerType arg ) { + static_assert(std::is_integral::value, + "An argument for is_power_of_two should be integral type"); + return arg && (0 == (arg & (arg - 1))); +} + +// A function to determine if passed integer is a power of two +// at least as big as another power of two, i.e. for strictly positive i and j, +// with j being a power of two, determines whether i==j< +constexpr bool is_power_of_two_at_least(ArgIntegerType arg, DivisorIntegerType divisor) { + // Divisor should be a power of two + static_assert(std::is_integral::value, + "An argument for is_power_of_two_at_least should be integral type"); + return 0 == (arg & (arg - divisor)); +} + +// A function to compute arg modulo divisor where divisor is a power of 2. +template +inline ArgIntegerType modulo_power_of_two(ArgIntegerType arg, DivisorIntegerType divisor) { + __TBB_ASSERT( is_power_of_two(divisor), "Divisor should be a power of two" ); + return arg & (divisor - 1); +} + +//! A function to check if passed in pointer is aligned on a specific border +template +constexpr bool is_aligned(T* pointer, std::uintptr_t alignment) { + return 0 == (reinterpret_cast(pointer) & (alignment - 1)); +} + +#if TBB_USE_ASSERT +static void* const poisoned_ptr = reinterpret_cast(-1); + +//! Set p to invalid pointer value. +template +inline void poison_pointer( T* &p ) { p = reinterpret_cast(poisoned_ptr); } + +template +inline void poison_pointer(std::atomic& p) { p.store(reinterpret_cast(poisoned_ptr), std::memory_order_relaxed); } + +/** Expected to be used in assertions only, thus no empty form is defined. **/ +template +inline bool is_poisoned( T* p ) { return p == reinterpret_cast(poisoned_ptr); } + +template +inline bool is_poisoned(const std::atomic& p) { return is_poisoned(p.load(std::memory_order_relaxed)); } +#else +template +inline void poison_pointer(T&) {/*do nothing*/} +#endif /* !TBB_USE_ASSERT */ + +template +bool assert_pointer_valid(T* p, const char* comment = nullptr) { + suppress_unused_warning(p, comment); + __TBB_ASSERT(p != nullptr, comment); + __TBB_ASSERT(!is_poisoned(p), comment); +#if !(_MSC_VER && _MSC_VER <= 1900 && !__INTEL_COMPILER) + __TBB_ASSERT(is_aligned(p, alignment == 0 ? alignof(T) : alignment), comment); +#endif + // Returns something to simplify assert_pointers_valid implementation. + return true; +} + +template +void assert_pointers_valid(Args*... p) { + // suppress_unused_warning is used as an evaluation context for the variadic pack. + suppress_unused_warning(assert_pointer_valid(p)...); +} + +//! Base class for types that should not be assigned. +class no_assign { +public: + void operator=(const no_assign&) = delete; + no_assign(const no_assign&) = default; + no_assign() = default; +}; + +//! Base class for types that should not be copied or assigned. +class no_copy: no_assign { +public: + no_copy(const no_copy&) = delete; + no_copy() = default; +}; + +template +void swap_atomics_relaxed(std::atomic& lhs, std::atomic& rhs){ + T tmp = lhs.load(std::memory_order_relaxed); + lhs.store(rhs.load(std::memory_order_relaxed), std::memory_order_relaxed); + rhs.store(tmp, std::memory_order_relaxed); +} + +//! One-time initialization states +enum class do_once_state { + uninitialized = 0, ///< No execution attempts have been undertaken yet + pending, ///< A thread is executing associated do-once routine + executed, ///< Do-once routine has been executed + initialized = executed ///< Convenience alias +}; + +//! One-time initialization function +/** /param initializer Pointer to function without arguments + The variant that returns bool is used for cases when initialization can fail + and it is OK to continue execution, but the state should be reset so that + the initialization attempt was repeated the next time. + /param state Shared state associated with initializer that specifies its + initialization state. Must be initially set to #uninitialized value + (e.g. by means of default static zero initialization). **/ +template +void atomic_do_once( const F& initializer, std::atomic& state ) { + // The loop in the implementation is necessary to avoid race when thread T2 + // that arrived in the middle of initialization attempt by another thread T1 + // has just made initialization possible. + // In such a case T2 has to rely on T1 to initialize, but T1 may already be past + // the point where it can recognize the changed conditions. + do_once_state expected_state; + while ( state.load( std::memory_order_acquire ) != do_once_state::executed ) { + if( state.load( std::memory_order_relaxed ) == do_once_state::uninitialized ) { + expected_state = do_once_state::uninitialized; +#if defined(__INTEL_COMPILER) && __INTEL_COMPILER <= 1910 + using enum_type = typename std::underlying_type::type; + if( ((std::atomic&)state).compare_exchange_strong( (enum_type&)expected_state, (enum_type)do_once_state::pending ) ) { +#else + if( state.compare_exchange_strong( expected_state, do_once_state::pending ) ) { +#endif + run_initializer( initializer, state ); + break; + } + } + spin_wait_while_eq( state, do_once_state::pending ); + } +} + +// Run the initializer which can not fail +template +void run_initializer(const Functor& f, std::atomic& state ) { + f(); + state.store(do_once_state::executed, std::memory_order_release); +} + +#if __TBB_CPP20_CONCEPTS_PRESENT +template +concept boolean_testable_impl = std::convertible_to; + +template +concept boolean_testable = boolean_testable_impl && requires( T&& t ) { + { !std::forward(t) } -> boolean_testable_impl; + }; + +#if __TBB_CPP20_COMPARISONS_PRESENT +struct synthesized_three_way_comparator { + template + auto operator()( const T1& lhs, const T2& rhs ) const + requires requires { + { lhs < rhs } -> boolean_testable; + { rhs < lhs } -> boolean_testable; + } + { + if constexpr (std::three_way_comparable_with) { + return lhs <=> rhs; + } else { + if (lhs < rhs) { + return std::weak_ordering::less; + } + if (rhs < lhs) { + return std::weak_ordering::greater; + } + return std::weak_ordering::equivalent; + } + } +}; // struct synthesized_three_way_comparator + +template +using synthesized_three_way_result = decltype(synthesized_three_way_comparator{}(std::declval(), + std::declval())); + +#endif // __TBB_CPP20_COMPARISONS_PRESENT + +// Check if the type T is implicitly OR explicitly convertible to U +template +concept relaxed_convertible_to = std::constructible_from; + +template +concept adaptive_same_as = +#if __TBB_STRICT_CONSTRAINTS + std::same_as; +#else + std::convertible_to; +#endif +#endif // __TBB_CPP20_CONCEPTS_PRESENT + +template +auto invoke(F&& f, Args&&... args) +#if __TBB_CPP17_INVOKE_PRESENT + noexcept(std::is_nothrow_invocable_v) + -> std::invoke_result_t +{ + return std::invoke(std::forward(f), std::forward(args)...); +} +#else // __TBB_CPP17_INVOKE_PRESENT + noexcept(noexcept(std::forward(f)(std::forward(args)...))) + -> decltype(std::forward(f)(std::forward(args)...)) +{ + return std::forward(f)(std::forward(args)...); +} +#endif // __TBB_CPP17_INVOKE_PRESENT + +} // namespace d0 + +namespace d1 { + +class delegate_base { +public: + virtual bool operator()() const = 0; + virtual ~delegate_base() {} +}; + +template +class delegated_function : public delegate_base { +public: + delegated_function(FuncType& f) : my_func(f) {} + + bool operator()() const override { + return my_func(); + } + +private: + FuncType &my_func; +}; +} // namespace d1 + +} // namespace detail +} // namespace tbb + +#endif // __TBB_detail__utils_H diff --git a/src/tbb/include/oneapi/tbb/detail/_waitable_atomic.h b/src/tbb/include/oneapi/tbb/detail/_waitable_atomic.h new file mode 100644 index 000000000..1b18d11e5 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/detail/_waitable_atomic.h @@ -0,0 +1,90 @@ +/* + Copyright (c) 2021-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_detail__address_waiters_H +#define __TBB_detail__address_waiters_H + +#include "_utils.h" + +namespace tbb { +namespace detail { + +namespace r1 { +TBB_EXPORT void __TBB_EXPORTED_FUNC wait_on_address(void* address, d1::delegate_base& wakeup_condition, std::uintptr_t context); +TBB_EXPORT void __TBB_EXPORTED_FUNC notify_by_address(void* address, std::uintptr_t context); +TBB_EXPORT void __TBB_EXPORTED_FUNC notify_by_address_one(void* address); +TBB_EXPORT void __TBB_EXPORTED_FUNC notify_by_address_all(void* address); +} // namespace r1 + +namespace d1 { + +template +void adaptive_wait_on_address(void* address, Predicate wakeup_condition, std::uintptr_t context) { + if (!timed_spin_wait_until(wakeup_condition)) { + d1::delegated_function pred(wakeup_condition); + r1::wait_on_address(address, pred, context); + } +} + +template +class waitable_atomic { +public: + waitable_atomic() = default; + + explicit waitable_atomic(T value) : my_atomic(value) {} + + waitable_atomic(const waitable_atomic&) = delete; + waitable_atomic& operator=(const waitable_atomic&) = delete; + + T load(std::memory_order order) const noexcept { + return my_atomic.load(order); + } + + T exchange(T desired) noexcept { + return my_atomic.exchange(desired); + } + + void wait(T old, std::uintptr_t context, std::memory_order order) { + auto wakeup_condition = [&] { return my_atomic.load(order) != old; }; + if (!timed_spin_wait_until(wakeup_condition)) { + // We need to use while here, because notify_all() will wake up all threads + // But predicate for them might be false + d1::delegated_function pred(wakeup_condition); + do { + r1::wait_on_address(this, pred, context); + } while (!wakeup_condition()); + } + } + + void notify_one_relaxed() { + r1::notify_by_address_one(this); + } + + // TODO: consider adding following interfaces: + // store(desired, memory_order) + // notify_all_relaxed() + // wait_until(T, std::uintptr_t, std::memory_order) + // notify_relaxed(std::uintptr_t context) + +private: + std::atomic my_atomic{}; +}; + +} // namespace d1 +} // namespace detail +} // namespace tbb + +#endif // __TBB_detail__address_waiters_H diff --git a/src/tbb/include/oneapi/tbb/enumerable_thread_specific.h b/src/tbb/include/oneapi/tbb/enumerable_thread_specific.h new file mode 100644 index 000000000..caa53fa0d --- /dev/null +++ b/src/tbb/include/oneapi/tbb/enumerable_thread_specific.h @@ -0,0 +1,1121 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_enumerable_thread_specific_H +#define __TBB_enumerable_thread_specific_H + +#include "detail/_config.h" +#include "detail/_namespace_injection.h" +#include "detail/_assert.h" +#include "detail/_template_helpers.h" +#include "detail/_aligned_space.h" + +#include "concurrent_vector.h" +#include "tbb_allocator.h" +#include "cache_aligned_allocator.h" +#include "profiling.h" + +#include +#include +#include // memcpy +#include // std::ptrdiff_t + +#include "task.h" // for task::suspend_point + +#if _WIN32 || _WIN64 +#ifndef NOMINMAX +#define NOMINMAX +#define __TBB_DEFINED_NOMINMAX 1 +#endif +#include +#if __TBB_DEFINED_NOMINMAX +#undef NOMINMAX +#undef __TBB_DEFINED_NOMINMAX +#endif +#else +#include +#endif + +namespace tbb { +namespace detail { +namespace d1 { + +//! enum for selecting between single key and key-per-instance versions +enum ets_key_usage_type { + ets_key_per_instance + , ets_no_key +#if __TBB_RESUMABLE_TASKS + , ets_suspend_aware +#endif +}; + +// Forward declaration to use in internal classes +template +class enumerable_thread_specific; + +template +struct internal_ets_key_selector { + using key_type = std::thread::id; + static key_type current_key() { + return std::this_thread::get_id(); + } +}; + +// Intel Compiler on OSX cannot create atomics objects that instantiated from non-fundamental types +#if __INTEL_COMPILER && __APPLE__ +template<> +struct internal_ets_key_selector { + using key_type = std::size_t; + static key_type current_key() { + auto id = std::this_thread::get_id(); + return reinterpret_cast(id); + } +}; +#endif + +template +struct ets_key_selector : internal_ets_key_selector {}; + +#if __TBB_RESUMABLE_TASKS +template <> +struct ets_key_selector { + using key_type = suspend_point; + static key_type current_key() { + return r1::current_suspend_point(); + } +}; +#endif + +template +class ets_base : detail::no_copy { +protected: + using key_type = typename ets_key_selector::key_type; + +public: + struct slot; + struct array { + array* next; + std::size_t lg_size; + slot& at( std::size_t k ) { + return (reinterpret_cast(reinterpret_cast(this+1)))[k]; + } + std::size_t size() const { return std::size_t(1) << lg_size; } + std::size_t mask() const { return size() - 1; } + std::size_t start( std::size_t h ) const { + return h >> (8 * sizeof(std::size_t) - lg_size); + } + }; + struct slot { + std::atomic key; + void* ptr; + bool empty() const { return key.load(std::memory_order_relaxed) == key_type(); } + bool match( key_type k ) const { return key.load(std::memory_order_relaxed) == k; } + bool claim( key_type k ) { + // TODO: maybe claim ptr, because key_type is not guaranteed to fit into word size + key_type expected = key_type(); + return key.compare_exchange_strong(expected, k); + } + }; + +protected: + //! Root of linked list of arrays of decreasing size. + /** nullptr if and only if my_count==0. + Each array in the list is half the size of its predecessor. */ + std::atomic my_root; + std::atomic my_count; + + virtual void* create_local() = 0; + virtual void* create_array(std::size_t _size) = 0; // _size in bytes + virtual void free_array(void* ptr, std::size_t _size) = 0; // _size in bytes + + array* allocate( std::size_t lg_size ) { + std::size_t n = std::size_t(1) << lg_size; + array* a = static_cast(create_array(sizeof(array) + n * sizeof(slot))); + a->lg_size = lg_size; + std::memset( a + 1, 0, n * sizeof(slot) ); + return a; + } + void deallocate(array* a) { + std::size_t n = std::size_t(1) << (a->lg_size); + free_array( static_cast(a), std::size_t(sizeof(array) + n * sizeof(slot)) ); + } + + ets_base() : my_root{nullptr}, my_count{0} {} + virtual ~ets_base(); // g++ complains if this is not virtual + + void* table_lookup( bool& exists ); + void table_clear(); + // The following functions are not used in concurrent context, + // so we don't need synchronization and ITT annotations there. + template + void table_elementwise_copy( const ets_base& other, + void*(*add_element)(ets_base&, void*) ) { + __TBB_ASSERT(!my_root.load(std::memory_order_relaxed), nullptr); + __TBB_ASSERT(!my_count.load(std::memory_order_relaxed), nullptr); + if( !other.my_root.load(std::memory_order_relaxed) ) return; + array* root = allocate(other.my_root.load(std::memory_order_relaxed)->lg_size); + my_root.store(root, std::memory_order_relaxed); + root->next = nullptr; + my_count.store(other.my_count.load(std::memory_order_relaxed), std::memory_order_relaxed); + std::size_t mask = root->mask(); + for( array* r = other.my_root.load(std::memory_order_relaxed); r; r = r->next ) { + for( std::size_t i = 0; i < r->size(); ++i ) { + slot& s1 = r->at(i); + if( !s1.empty() ) { + for( std::size_t j = root->start(std::hash{}(s1.key.load(std::memory_order_relaxed))); ; j = (j+1)&mask ) { + slot& s2 = root->at(j); + if( s2.empty() ) { + s2.ptr = add_element(static_cast&>(*this), s1.ptr); + s2.key.store(s1.key.load(std::memory_order_relaxed), std::memory_order_relaxed); + break; + } + else if( s2.match(s1.key.load(std::memory_order_relaxed)) ) + break; + } + } + } + } + } + void table_swap( ets_base& other ) { + __TBB_ASSERT(this!=&other, "Don't swap an instance with itself"); + swap_atomics_relaxed(my_root, other.my_root); + swap_atomics_relaxed(my_count, other.my_count); + } +}; + +template +ets_base::~ets_base() { + __TBB_ASSERT(!my_root.load(std::memory_order_relaxed), nullptr); +} + +template +void ets_base::table_clear() { + while ( array* r = my_root.load(std::memory_order_relaxed) ) { + my_root.store(r->next, std::memory_order_relaxed); + deallocate(r); + } + my_count.store(0, std::memory_order_relaxed); +} + +template +void* ets_base::table_lookup( bool& exists ) { + const key_type k = ets_key_selector::current_key(); + + __TBB_ASSERT(k != key_type(), nullptr); + void* found; + std::size_t h = std::hash{}(k); + for( array* r = my_root.load(std::memory_order_acquire); r; r = r->next ) { + call_itt_notify(acquired,r); + std::size_t mask=r->mask(); + for(std::size_t i = r->start(h); ;i=(i+1)&mask) { + slot& s = r->at(i); + if( s.empty() ) break; + if( s.match(k) ) { + if( r == my_root.load(std::memory_order_acquire) ) { + // Success at top level + exists = true; + return s.ptr; + } else { + // Success at some other level. Need to insert at top level. + exists = true; + found = s.ptr; + goto insert; + } + } + } + } + // Key does not yet exist. The density of slots in the table does not exceed 0.5, + // for if this will occur a new table is allocated with double the current table + // size, which is swapped in as the new root table. So an empty slot is guaranteed. + exists = false; + found = create_local(); + { + std::size_t c = ++my_count; + array* r = my_root.load(std::memory_order_acquire); + call_itt_notify(acquired,r); + if( !r || c > r->size()/2 ) { + std::size_t s = r ? r->lg_size : 2; + while( c > std::size_t(1)<<(s-1) ) ++s; + array* a = allocate(s); + for(;;) { + a->next = r; + call_itt_notify(releasing,a); + array* new_r = r; + if( my_root.compare_exchange_strong(new_r, a) ) break; + call_itt_notify(acquired, new_r); + __TBB_ASSERT(new_r != nullptr, nullptr); + if( new_r->lg_size >= s ) { + // Another thread inserted an equal or bigger array, so our array is superfluous. + deallocate(a); + break; + } + r = new_r; + } + } + } + insert: + // Whether a slot has been found in an older table, or if it has been inserted at this level, + // it has already been accounted for in the total. Guaranteed to be room for it, and it is + // not present, so search for empty slot and use it. + array* ir = my_root.load(std::memory_order_acquire); + call_itt_notify(acquired, ir); + std::size_t mask = ir->mask(); + for(std::size_t i = ir->start(h);; i = (i+1)&mask) { + slot& s = ir->at(i); + if( s.empty() ) { + if( s.claim(k) ) { + s.ptr = found; + return found; + } + } + } +} + +//! Specialization that exploits native TLS +template <> +class ets_base: public ets_base { + using super = ets_base; +#if _WIN32||_WIN64 +#if __TBB_WIN8UI_SUPPORT + using tls_key_t = DWORD; + void create_key() { my_key = FlsAlloc(nullptr); } + void destroy_key() { FlsFree(my_key); } + void set_tls(void * value) { FlsSetValue(my_key, (LPVOID)value); } + void* get_tls() { return (void *)FlsGetValue(my_key); } +#else + using tls_key_t = DWORD; + void create_key() { my_key = TlsAlloc(); } + void destroy_key() { TlsFree(my_key); } + void set_tls(void * value) { TlsSetValue(my_key, (LPVOID)value); } + void* get_tls() { return (void *)TlsGetValue(my_key); } +#endif +#else + using tls_key_t = pthread_key_t; + void create_key() { pthread_key_create(&my_key, nullptr); } + void destroy_key() { pthread_key_delete(my_key); } + void set_tls( void * value ) const { pthread_setspecific(my_key, value); } + void* get_tls() const { return pthread_getspecific(my_key); } +#endif + tls_key_t my_key; + virtual void* create_local() override = 0; + virtual void* create_array(std::size_t _size) override = 0; // _size in bytes + virtual void free_array(void* ptr, std::size_t _size) override = 0; // size in bytes +protected: + ets_base() {create_key();} + ~ets_base() {destroy_key();} + void* table_lookup( bool& exists ) { + void* found = get_tls(); + if( found ) { + exists=true; + } else { + found = super::table_lookup(exists); + set_tls(found); + } + return found; + } + void table_clear() { + destroy_key(); + create_key(); + super::table_clear(); + } + void table_swap( ets_base& other ) { + using std::swap; + __TBB_ASSERT(this!=&other, "Don't swap an instance with itself"); + swap(my_key, other.my_key); + super::table_swap(other); + } +}; + +//! Random access iterator for traversing the thread local copies. +template< typename Container, typename Value > +class enumerable_thread_specific_iterator +{ + //! current position in the concurrent_vector + + Container *my_container; + typename Container::size_type my_index; + mutable Value *my_value; + + template + friend bool operator==( const enumerable_thread_specific_iterator& i, + const enumerable_thread_specific_iterator& j ); + + template + friend bool operator<( const enumerable_thread_specific_iterator& i, + const enumerable_thread_specific_iterator& j ); + + template + friend std::ptrdiff_t operator-( const enumerable_thread_specific_iterator& i, + const enumerable_thread_specific_iterator& j ); + + template + friend class enumerable_thread_specific_iterator; + +public: + //! STL support + using difference_type = std::ptrdiff_t; + using value_type = Value; + using pointer = Value*; + using reference = Value&; + using iterator_category = std::random_access_iterator_tag; + + enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) : + my_container(&const_cast(container)), my_index(index), my_value(nullptr) {} + + //! Default constructor + enumerable_thread_specific_iterator() : my_container(nullptr), my_index(0), my_value(nullptr) {} + + template + enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator& other ) : + my_container( other.my_container ), my_index( other.my_index), my_value( const_cast(other.my_value) ) {} + + enumerable_thread_specific_iterator operator+( std::ptrdiff_t offset ) const { + return enumerable_thread_specific_iterator(*my_container, my_index + offset); + } + + friend enumerable_thread_specific_iterator operator+( std::ptrdiff_t offset, enumerable_thread_specific_iterator v ) { + return enumerable_thread_specific_iterator(*v.my_container, v.my_index + offset); + } + + enumerable_thread_specific_iterator &operator+=( std::ptrdiff_t offset ) { + my_index += offset; + my_value = nullptr; + return *this; + } + + enumerable_thread_specific_iterator operator-( std::ptrdiff_t offset ) const { + return enumerable_thread_specific_iterator( *my_container, my_index-offset ); + } + + enumerable_thread_specific_iterator &operator-=( std::ptrdiff_t offset ) { + my_index -= offset; + my_value = nullptr; + return *this; + } + + Value& operator*() const { + Value* value = my_value; + if( !value ) { + value = my_value = (*my_container)[my_index].value(); + } + __TBB_ASSERT( value==(*my_container)[my_index].value(), "corrupt cache" ); + return *value; + } + + Value& operator[]( std::ptrdiff_t k ) const { + return *(*my_container)[my_index + k].value(); + } + + Value* operator->() const {return &operator*();} + + enumerable_thread_specific_iterator& operator++() { + ++my_index; + my_value = nullptr; + return *this; + } + + enumerable_thread_specific_iterator& operator--() { + --my_index; + my_value = nullptr; + return *this; + } + + //! Post increment + enumerable_thread_specific_iterator operator++(int) { + enumerable_thread_specific_iterator result = *this; + ++my_index; + my_value = nullptr; + return result; + } + + //! Post decrement + enumerable_thread_specific_iterator operator--(int) { + enumerable_thread_specific_iterator result = *this; + --my_index; + my_value = nullptr; + return result; + } +}; + +template +bool operator==( const enumerable_thread_specific_iterator& i, + const enumerable_thread_specific_iterator& j ) { + return i.my_index == j.my_index && i.my_container == j.my_container; +} + +template +bool operator!=( const enumerable_thread_specific_iterator& i, + const enumerable_thread_specific_iterator& j ) { + return !(i==j); +} + +template +bool operator<( const enumerable_thread_specific_iterator& i, + const enumerable_thread_specific_iterator& j ) { + return i.my_index +bool operator>( const enumerable_thread_specific_iterator& i, + const enumerable_thread_specific_iterator& j ) { + return j +bool operator>=( const enumerable_thread_specific_iterator& i, + const enumerable_thread_specific_iterator& j ) { + return !(i +bool operator<=( const enumerable_thread_specific_iterator& i, + const enumerable_thread_specific_iterator& j ) { + return !(j +std::ptrdiff_t operator-( const enumerable_thread_specific_iterator& i, + const enumerable_thread_specific_iterator& j ) { + return i.my_index-j.my_index; +} + +template +class segmented_iterator +{ + template + friend bool operator==(const segmented_iterator& i, const segmented_iterator& j); + + template + friend bool operator!=(const segmented_iterator& i, const segmented_iterator& j); + + template + friend class segmented_iterator; + +public: + segmented_iterator() {my_segcont = nullptr;} + + segmented_iterator( const SegmentedContainer& _segmented_container ) : + my_segcont(const_cast(&_segmented_container)), + outer_iter(my_segcont->end()) { } + + ~segmented_iterator() {} + + using InnerContainer = typename SegmentedContainer::value_type; + using inner_iterator = typename InnerContainer::iterator; + using outer_iterator = typename SegmentedContainer::iterator; + + // STL support + // TODO: inherit all types from segmented container? + using difference_type = std::ptrdiff_t; + using value_type = Value; + using size_type = typename SegmentedContainer::size_type; + using pointer = Value*; + using reference = Value&; + using iterator_category = std::input_iterator_tag; + + // Copy Constructor + template + segmented_iterator(const segmented_iterator& other) : + my_segcont(other.my_segcont), + outer_iter(other.outer_iter), + // can we assign a default-constructed iterator to inner if we're at the end? + inner_iter(other.inner_iter) + {} + + // assignment + template + segmented_iterator& operator=( const segmented_iterator& other) { + my_segcont = other.my_segcont; + outer_iter = other.outer_iter; + if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter; + return *this; + } + + // allow assignment of outer iterator to segmented iterator. Once it is + // assigned, move forward until a non-empty inner container is found or + // the end of the outer container is reached. + segmented_iterator& operator=(const outer_iterator& new_outer_iter) { + __TBB_ASSERT(my_segcont != nullptr, nullptr); + // check that this iterator points to something inside the segmented container + for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) { + if( !outer_iter->empty() ) { + inner_iter = outer_iter->begin(); + break; + } + } + return *this; + } + + // pre-increment + segmented_iterator& operator++() { + advance_me(); + return *this; + } + + // post-increment + segmented_iterator operator++(int) { + segmented_iterator tmp = *this; + operator++(); + return tmp; + } + + bool operator==(const outer_iterator& other_outer) const { + __TBB_ASSERT(my_segcont != nullptr, nullptr); + return (outer_iter == other_outer && + (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin())); + } + + bool operator!=(const outer_iterator& other_outer) const { + return !operator==(other_outer); + + } + + // (i)* RHS + reference operator*() const { + __TBB_ASSERT(my_segcont != nullptr, nullptr); + __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container"); + __TBB_ASSERT(inner_iter != outer_iter->end(), nullptr); // should never happen + return *inner_iter; + } + + // i-> + pointer operator->() const { return &operator*();} + +private: + SegmentedContainer* my_segcont; + outer_iterator outer_iter; + inner_iterator inner_iter; + + void advance_me() { + __TBB_ASSERT(my_segcont != nullptr, nullptr); + __TBB_ASSERT(outer_iter != my_segcont->end(), nullptr); // not true if there are no inner containers + __TBB_ASSERT(inner_iter != outer_iter->end(), nullptr); // not true if the inner containers are all empty. + ++inner_iter; + while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) { + inner_iter = outer_iter->begin(); + } + } +}; // segmented_iterator + +template +bool operator==( const segmented_iterator& i, + const segmented_iterator& j ) { + if(i.my_segcont != j.my_segcont) return false; + if(i.my_segcont == nullptr) return true; + if(i.outer_iter != j.outer_iter) return false; + if(i.outer_iter == i.my_segcont->end()) return true; + return i.inner_iter == j.inner_iter; +} + +// != +template +bool operator!=( const segmented_iterator& i, + const segmented_iterator& j ) { + return !(i==j); +} + +template +struct construct_by_default: no_assign { + void construct(void*where) {new(where) T();} // C++ note: the () in T() ensure zero initialization. + construct_by_default( int ) {} +}; + +template +struct construct_by_exemplar: no_assign { + const T exemplar; + void construct(void*where) {new(where) T(exemplar);} + construct_by_exemplar( const T& t ) : exemplar(t) {} + construct_by_exemplar( T&& t ) : exemplar(std::move(t)) {} +}; + +template +struct construct_by_finit: no_assign { + Finit f; + void construct(void* where) {new(where) T(f());} + construct_by_finit( Finit&& f_ ) : f(std::move(f_)) {} +}; + +template +struct construct_by_args: no_assign { + stored_pack pack; + void construct(void* where) { + call( [where](const typename std::decay

::type&... args ){ + new(where) T(args...); + }, pack ); + } + construct_by_args( P&& ... args ) : pack(std::forward

(args)...) {} +}; + +// storage for initialization function pointer +// TODO: consider removing the template parameter T here and in callback_leaf +class callback_base { +public: + // Clone *this + virtual callback_base* clone() const = 0; + // Destruct and free *this + virtual void destroy() = 0; + // Need virtual destructor to satisfy GCC compiler warning + virtual ~callback_base() { } + // Construct T at where + virtual void construct(void* where) = 0; +}; + +template +class callback_leaf: public callback_base, Constructor { + template callback_leaf( P&& ... params ) : Constructor(std::forward

(params)...) {} + // TODO: make the construction/destruction consistent (use allocator.construct/destroy) + using my_allocator_type = typename tbb::tbb_allocator; + + callback_base* clone() const override { + return make(*this); + } + + void destroy() override { + my_allocator_type alloc; + tbb::detail::allocator_traits::destroy(alloc, this); + tbb::detail::allocator_traits::deallocate(alloc, this, 1); + } + + void construct(void* where) override { + Constructor::construct(where); + } + +public: + template + static callback_base* make( P&& ... params ) { + void* where = my_allocator_type().allocate(1); + return new(where) callback_leaf( std::forward

(params)... ); + } +}; + +//! Template for recording construction of objects in table +/** All maintenance of the space will be done explicitly on push_back, + and all thread local copies must be destroyed before the concurrent + vector is deleted. + + The flag is_built is initialized to false. When the local is + successfully-constructed, set the flag to true or call value_committed(). + If the constructor throws, the flag will be false. +*/ +template +struct ets_element { + detail::aligned_space my_space; + bool is_built; + ets_element() { is_built = false; } // not currently-built + U* value() { return my_space.begin(); } + U* value_committed() { is_built = true; return my_space.begin(); } + ~ets_element() { + if(is_built) { + my_space.begin()->~U(); + is_built = false; + } + } +}; + +// A predicate that can be used for a compile-time compatibility check of ETS instances +// Ideally, it should have been declared inside the ETS class, but unfortunately +// in that case VS2013 does not enable the variadic constructor. +template struct is_compatible_ets : std::false_type {}; +template +struct is_compatible_ets< T, enumerable_thread_specific > : std::is_same {}; + +// A predicate that checks whether, for a variable 'foo' of type T, foo() is a valid expression +template using has_empty_braces_operator = decltype(std::declval()()); +template using is_callable_no_args = supports; + +//! The enumerable_thread_specific container +/** enumerable_thread_specific has the following properties: + - thread-local copies are lazily created, with default, exemplar or function initialization. + - thread-local copies do not move (during lifetime, and excepting clear()) so the address of a copy is invariant. + - the contained objects need not have operator=() defined if combine is not used. + - enumerable_thread_specific containers may be copy-constructed or assigned. + - thread-local copies can be managed by hash-table, or can be accessed via TLS storage for speed. + - outside of parallel contexts, the contents of all thread-local copies are accessible by iterator or using combine or combine_each methods + +@par Segmented iterator + When the thread-local objects are containers with input_iterators defined, a segmented iterator may + be used to iterate over all the elements of all thread-local copies. + +@par combine and combine_each + - Both methods are defined for enumerable_thread_specific. + - combine() requires the type T have operator=() defined. + - neither method modifies the contents of the object (though there is no guarantee that the applied methods do not modify the object.) + - Both are evaluated in serial context (the methods are assumed to be non-benign.) + +@ingroup containers */ +template , + ets_key_usage_type ETS_key_type=ets_no_key > +class enumerable_thread_specific: ets_base { + + template friend class enumerable_thread_specific; + + using padded_element = padded>; + + //! A generic range, used to create range objects from the iterators + template + class generic_range_type: public blocked_range { + public: + using value_type = T; + using reference = T&; + using const_reference = const T&; + using iterator = I; + using difference_type = std::ptrdiff_t; + + generic_range_type( I begin_, I end_, std::size_t grainsize_ = 1) : blocked_range(begin_,end_,grainsize_) {} + template + generic_range_type( const generic_range_type& r) : blocked_range(r.begin(),r.end(),r.grainsize()) {} + generic_range_type( generic_range_type& r, split ) : blocked_range(r,split()) {} + }; + + using allocator_traits_type = tbb::detail::allocator_traits; + + using padded_allocator_type = typename allocator_traits_type::template rebind_alloc; + using internal_collection_type = tbb::concurrent_vector< padded_element, padded_allocator_type >; + + callback_base *my_construct_callback; + + internal_collection_type my_locals; + + // TODO: consider unifying the callback mechanism for all create_local* methods below + // (likely non-compatible and requires interface version increase) + void* create_local() override { + padded_element& lref = *my_locals.grow_by(1); + my_construct_callback->construct(lref.value()); + return lref.value_committed(); + } + + static void* create_local_by_copy( ets_base& base, void* p ) { + enumerable_thread_specific& ets = static_cast(base); + padded_element& lref = *ets.my_locals.grow_by(1); + new(lref.value()) T(*static_cast(p)); + return lref.value_committed(); + } + + static void* create_local_by_move( ets_base& base, void* p ) { + enumerable_thread_specific& ets = static_cast(base); + padded_element& lref = *ets.my_locals.grow_by(1); + new(lref.value()) T(std::move(*static_cast(p))); + return lref.value_committed(); + } + + using array_allocator_type = typename allocator_traits_type::template rebind_alloc; + + // _size is in bytes + void* create_array(std::size_t _size) override { + std::size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t); + return array_allocator_type().allocate(nelements); + } + + void free_array( void* _ptr, std::size_t _size) override { + std::size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t); + array_allocator_type().deallocate( reinterpret_cast(_ptr),nelements); + } + +public: + + //! Basic types + using value_type = T; + using allocator_type = Allocator; + using size_type = typename internal_collection_type::size_type; + using difference_type = typename internal_collection_type::difference_type; + using reference = value_type&; + using const_reference = const value_type&; + + using pointer = typename allocator_traits_type::pointer; + using const_pointer = typename allocator_traits_type::const_pointer; + + // Iterator types + using iterator = enumerable_thread_specific_iterator; + using const_iterator = enumerable_thread_specific_iterator; + + // Parallel range types + using range_type = generic_range_type; + using const_range_type = generic_range_type; + + //! Default constructor. Each local instance of T is default constructed. + enumerable_thread_specific() : my_construct_callback( + callback_leaf >::make(/*dummy argument*/0) + ){} + + //! Constructor with initializer functor. Each local instance of T is constructed by T(finit()). + template ::type>::value>::type> + explicit enumerable_thread_specific( Finit finit ) : my_construct_callback( + callback_leaf >::make( std::move(finit) ) + ){} + + //! Constructor with exemplar. Each local instance of T is copy-constructed from the exemplar. + explicit enumerable_thread_specific( const T& exemplar ) : my_construct_callback( + callback_leaf >::make( exemplar ) + ){} + + explicit enumerable_thread_specific( T&& exemplar ) : my_construct_callback( + callback_leaf >::make( std::move(exemplar) ) + ){} + + //! Variadic constructor with initializer arguments. Each local instance of T is constructed by T(args...) + template ::type>::value + && !is_compatible_ets::type>::value + && !std::is_same::type>::value + >::type> + enumerable_thread_specific( P1&& arg1, P&& ... args ) : my_construct_callback( + callback_leaf >::make( std::forward(arg1), std::forward

(args)... ) + ){} + + //! Destructor + ~enumerable_thread_specific() { + if(my_construct_callback) my_construct_callback->destroy(); + // Deallocate the hash table before overridden free_array() becomes inaccessible + this->ets_base::table_clear(); + } + + //! returns reference to local, discarding exists + reference local() { + bool exists; + return local(exists); + } + + //! Returns reference to calling thread's local copy, creating one if necessary + reference local(bool& exists) { + void* ptr = this->table_lookup(exists); + return *(T*)ptr; + } + + //! Get the number of local copies + size_type size() const { return my_locals.size(); } + + //! true if there have been no local copies created + bool empty() const { return my_locals.empty(); } + + //! begin iterator + iterator begin() { return iterator( my_locals, 0 ); } + //! end iterator + iterator end() { return iterator(my_locals, my_locals.size() ); } + + //! begin const iterator + const_iterator begin() const { return const_iterator(my_locals, 0); } + + //! end const iterator + const_iterator end() const { return const_iterator(my_locals, my_locals.size()); } + + //! Get range for parallel algorithms + range_type range( std::size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); } + + //! Get const range for parallel algorithms + const_range_type range( std::size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); } + + //! Destroys local copies + void clear() { + my_locals.clear(); + this->table_clear(); + // callback is not destroyed + } + +private: + template + void internal_copy(const enumerable_thread_specific& other) { + // this tests is_compatible_ets + static_assert( (is_compatible_ets::type>::value), "is_compatible_ets fails" ); + // Initialize my_construct_callback first, so that it is valid even if rest of this routine throws an exception. + my_construct_callback = other.my_construct_callback->clone(); + __TBB_ASSERT(my_locals.size()==0, nullptr); + my_locals.reserve(other.size()); + this->table_elementwise_copy( other, create_local_by_copy ); + } + + void internal_swap(enumerable_thread_specific& other) { + using std::swap; + __TBB_ASSERT( this!=&other, nullptr); + swap(my_construct_callback, other.my_construct_callback); + // concurrent_vector::swap() preserves storage space, + // so addresses to the vector kept in ETS hash table remain valid. + swap(my_locals, other.my_locals); + this->ets_base::table_swap(other); + } + + template + void internal_move(enumerable_thread_specific&& other) { + static_assert( (is_compatible_ets::type>::value), "is_compatible_ets fails" ); + my_construct_callback = other.my_construct_callback; + other.my_construct_callback = nullptr; + __TBB_ASSERT(my_locals.size()==0, nullptr); + my_locals.reserve(other.size()); + this->table_elementwise_copy( other, create_local_by_move ); + } + +public: + enumerable_thread_specific( const enumerable_thread_specific& other ) + : ets_base() /* prevents GCC warnings with -Wextra */ + { + internal_copy(other); + } + + template + enumerable_thread_specific( const enumerable_thread_specific& other ) + { + internal_copy(other); + } + + enumerable_thread_specific( enumerable_thread_specific&& other ) : my_construct_callback() + { + // TODO: use internal_move correctly here + internal_swap(other); + } + + template + enumerable_thread_specific( enumerable_thread_specific&& other ) : my_construct_callback() + { + internal_move(std::move(other)); + } + + enumerable_thread_specific& operator=( const enumerable_thread_specific& other ) + { + if( this != &other ) { + this->clear(); + my_construct_callback->destroy(); + internal_copy( other ); + } + return *this; + } + + template + enumerable_thread_specific& operator=( const enumerable_thread_specific& other ) + { + __TBB_ASSERT( static_cast(this)!=static_cast(&other), nullptr); // Objects of different types + this->clear(); + my_construct_callback->destroy(); + internal_copy(other); + return *this; + } + + enumerable_thread_specific& operator=( enumerable_thread_specific&& other ) + { + if( this != &other ) { + // TODO: use internal_move correctly here + internal_swap(other); + } + return *this; + } + + template + enumerable_thread_specific& operator=( enumerable_thread_specific&& other ) + { + __TBB_ASSERT( static_cast(this)!=static_cast(&other), nullptr); // Objects of different types + this->clear(); + my_construct_callback->destroy(); + internal_move(std::move(other)); + return *this; + } + + // CombineFunc has signature T(T,T) or T(const T&, const T&) + template + T combine(CombineFunc f_combine) { + if(begin() == end()) { + ets_element location; + my_construct_callback->construct(location.value()); + return *location.value_committed(); + } + const_iterator ci = begin(); + T my_result = *ci; + while(++ci != end()) + my_result = f_combine( my_result, *ci ); + return my_result; + } + + // combine_func_t takes T by value or by [const] reference, and returns nothing + template + void combine_each(CombineFunc f_combine) { + for(iterator ci = begin(); ci != end(); ++ci) { + f_combine( *ci ); + } + } + +}; // enumerable_thread_specific + +template< typename Container > +class flattened2d { + // This intermediate typedef is to address issues with VC7.1 compilers + using conval_type = typename Container::value_type; + +public: + //! Basic types + using size_type = typename conval_type::size_type; + using difference_type = typename conval_type::difference_type; + using allocator_type = typename conval_type::allocator_type; + using value_type = typename conval_type::value_type; + using reference = typename conval_type::reference; + using const_reference = typename conval_type::const_reference; + using pointer = typename conval_type::pointer; + using const_pointer = typename conval_type::const_pointer; + + using iterator = segmented_iterator; + using const_iterator = segmented_iterator; + + flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) : + my_container(const_cast(&c)), my_begin(b), my_end(e) { } + + explicit flattened2d( const Container &c ) : + my_container(const_cast(&c)), my_begin(c.begin()), my_end(c.end()) { } + + iterator begin() { return iterator(*my_container) = my_begin; } + iterator end() { return iterator(*my_container) = my_end; } + const_iterator begin() const { return const_iterator(*my_container) = my_begin; } + const_iterator end() const { return const_iterator(*my_container) = my_end; } + + size_type size() const { + size_type tot_size = 0; + for(typename Container::const_iterator i = my_begin; i != my_end; ++i) { + tot_size += i->size(); + } + return tot_size; + } + +private: + Container *my_container; + typename Container::const_iterator my_begin; + typename Container::const_iterator my_end; +}; + +template +flattened2d flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) { + return flattened2d(c, b, e); +} + +template +flattened2d flatten2d(const Container &c) { + return flattened2d(c); +} + +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::enumerable_thread_specific; +using detail::d1::flattened2d; +using detail::d1::flatten2d; +// ets enum keys +using detail::d1::ets_key_usage_type; +using detail::d1::ets_key_per_instance; +using detail::d1::ets_no_key; +#if __TBB_RESUMABLE_TASKS +using detail::d1::ets_suspend_aware; +#endif +} // inline namespace v1 + +} // namespace tbb + +#endif // __TBB_enumerable_thread_specific_H + diff --git a/src/tbb/include/oneapi/tbb/flow_graph.h b/src/tbb/include/oneapi/tbb/flow_graph.h new file mode 100644 index 000000000..035fef066 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/flow_graph.h @@ -0,0 +1,3718 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_flow_graph_H +#define __TBB_flow_graph_H + +#include +#include +#include + +#include "detail/_config.h" +#include "detail/_namespace_injection.h" +#include "spin_mutex.h" +#include "null_mutex.h" +#include "spin_rw_mutex.h" +#include "null_rw_mutex.h" +#include "detail/_pipeline_filters.h" +#include "detail/_task.h" +#include "detail/_small_object_pool.h" +#include "cache_aligned_allocator.h" +#include "detail/_exception.h" +#include "detail/_template_helpers.h" +#include "detail/_aggregator.h" +#include "detail/_allocator_traits.h" +#include "detail/_utils.h" +#include "profiling.h" +#include "task_arena.h" + +#if TBB_USE_PROFILING_TOOLS && ( __unix__ || __APPLE__ ) + #if __INTEL_COMPILER + // Disabled warning "routine is both inline and noinline" + // #pragma warning (push) + // #pragma warning( disable: 2196 ) + #endif + #define __TBB_NOINLINE_SYM __attribute__((noinline)) +#else + #define __TBB_NOINLINE_SYM +#endif + +#include +#include +#include +#include +#if __TBB_CPP20_CONCEPTS_PRESENT +#include +#endif + +/** @file + \brief The graph related classes and functions + + There are some applications that best express dependencies as messages + passed between nodes in a graph. These messages may contain data or + simply act as signals that a predecessors has completed. The graph + class and its associated node classes can be used to express such + applications. +*/ + +namespace tbb { +namespace detail { + +namespace d2 { + +//! An enumeration the provides the two most common concurrency levels: unlimited and serial +enum concurrency { unlimited = 0, serial = 1 }; + +//! A generic null type +struct null_type {}; + +//! An empty class used for messages that mean "I'm done" +class continue_msg {}; + +} // namespace d2 + +#if __TBB_CPP20_CONCEPTS_PRESENT +namespace d0 { + +template +concept node_body_return_type = std::same_as || + std::convertible_to; + +// TODO: consider using std::invocable here +template +concept continue_node_body = std::copy_constructible && + requires( Body& body, const tbb::detail::d2::continue_msg& v ) { + { body(v) } -> node_body_return_type; + }; + +template +concept function_node_body = std::copy_constructible && + std::invocable && + node_body_return_type, Output>; + +template +concept join_node_function_object = std::copy_constructible && + std::invocable && + std::convertible_to, Key>; + +template +concept input_node_body = std::copy_constructible && + requires( Body& body, tbb::detail::d1::flow_control& fc ) { + { body(fc) } -> adaptive_same_as; + }; + +template +concept multifunction_node_body = std::copy_constructible && + std::invocable; + +template +concept sequencer = std::copy_constructible && + std::invocable && + std::convertible_to, std::size_t>; + +template +concept async_node_body = std::copy_constructible && + std::invocable; + +} // namespace d0 +#endif // __TBB_CPP20_CONCEPTS_PRESENT + +namespace d2 { + +//! Forward declaration section +template< typename T > class sender; +template< typename T > class receiver; +class continue_receiver; + +template< typename T, typename U > class limiter_node; // needed for resetting decrementer + +template class successor_cache; +template class broadcast_cache; +template class round_robin_cache; +template class predecessor_cache; +template class reservable_predecessor_cache; + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +namespace order { +struct following; +struct preceding; +} +template struct node_set; +#endif + + +} // namespace d2 +} // namespace detail +} // namespace tbb + +//! The graph class +#include "detail/_flow_graph_impl.h" + +namespace tbb { +namespace detail { +namespace d2 { + +static inline std::pair order_tasks(graph_task* first, graph_task* second) { + if (second->priority > first->priority) + return std::make_pair(second, first); + return std::make_pair(first, second); +} + +// submit task if necessary. Returns the non-enqueued task if there is one. +static inline graph_task* combine_tasks(graph& g, graph_task* left, graph_task* right) { + // if no RHS task, don't change left. + if (right == nullptr) return left; + // right != nullptr + if (left == nullptr) return right; + if (left == SUCCESSFULLY_ENQUEUED) return right; + // left contains a task + if (right != SUCCESSFULLY_ENQUEUED) { + // both are valid tasks + auto tasks_pair = order_tasks(left, right); + spawn_in_graph_arena(g, *tasks_pair.first); + return tasks_pair.second; + } + return left; +} + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT +class message_metainfo { +public: + using waiters_type = std::forward_list; + + message_metainfo() = default; + + message_metainfo(const waiters_type& waiters) : my_waiters(waiters) {} + message_metainfo(waiters_type&& waiters) : my_waiters(std::move(waiters)) {} + + const waiters_type& waiters() const & { return my_waiters; } + waiters_type&& waiters() && { return std::move(my_waiters); } + + bool empty() const { return my_waiters.empty(); } + + void merge(const message_metainfo& other) { + // TODO: should we avoid duplications on merging + my_waiters.insert_after(my_waiters.before_begin(), + other.waiters().begin(), + other.waiters().end()); + } +private: + waiters_type my_waiters; +}; // class message_metainfo + +#define __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo) , metainfo + +#else +#define __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo) +#endif // __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + +//! Pure virtual template class that defines a sender of messages of type T +template< typename T > +class sender { +public: + virtual ~sender() {} + + //! Request an item from the sender + virtual bool try_get( T & ) { return false; } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + virtual bool try_get( T &, message_metainfo& ) { return false; } +#endif + + //! Reserves an item in the sender + virtual bool try_reserve( T & ) { return false; } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + virtual bool try_reserve( T &, message_metainfo& ) { return false; } +#endif + + //! Releases the reserved item + virtual bool try_release( ) { return false; } + + //! Consumes the reserved item + virtual bool try_consume( ) { return false; } + +protected: + //! The output type of this sender + typedef T output_type; + + //! The successor type for this node + typedef receiver successor_type; + + //! Add a new successor to this node + virtual bool register_successor( successor_type &r ) = 0; + + //! Removes a successor from this node + virtual bool remove_successor( successor_type &r ) = 0; + + template + friend bool register_successor(sender& s, receiver& r); + + template + friend bool remove_successor (sender& s, receiver& r); +}; // class sender + +template +bool register_successor(sender& s, receiver& r) { + return s.register_successor(r); +} + +template +bool remove_successor(sender& s, receiver& r) { + return s.remove_successor(r); +} + +//! Pure virtual template class that defines a receiver of messages of type T +template< typename T > +class receiver { +private: + template + bool internal_try_put(const T& t, TryPutTaskArgs&&... args) { + graph_task* res = try_put_task(t, std::forward(args)...); + if (!res) return false; + if (res != SUCCESSFULLY_ENQUEUED) spawn_in_graph_arena(graph_reference(), *res); + return true; + } + +public: + //! Destructor + virtual ~receiver() {} + + //! Put an item to the receiver + bool try_put( const T& t ) { + return internal_try_put(t); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + //! Put an item to the receiver and wait for completion + bool try_put_and_wait( const T& t ) { + // Since try_put_and_wait is a blocking call, it is safe to create wait_context on stack + d1::wait_context_vertex msg_wait_vertex{}; + + bool res = internal_try_put(t, message_metainfo{message_metainfo::waiters_type{&msg_wait_vertex}}); + if (res) { + __TBB_ASSERT(graph_reference().my_context != nullptr, "No wait_context associated with the Flow Graph"); + wait(msg_wait_vertex.get_context(), *graph_reference().my_context); + } + return res; + } +#endif + + //! put item to successor; return task to run the successor if possible. +protected: + //! The input type of this receiver + typedef T input_type; + + //! The predecessor type for this node + typedef sender predecessor_type; + + template< typename R, typename B > friend class run_and_put_task; + template< typename X, typename Y > friend class broadcast_cache; + template< typename X, typename Y > friend class round_robin_cache; + virtual graph_task *try_put_task(const T& t) = 0; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + virtual graph_task *try_put_task(const T& t, const message_metainfo&) = 0; +#endif + virtual graph& graph_reference() const = 0; + + template friend class successor_cache; + virtual bool is_continue_receiver() { return false; } + + // TODO revamp: reconsider the inheritance and move node priority out of receiver + virtual node_priority_t priority() const { return no_priority; } + + //! Add a predecessor to the node + virtual bool register_predecessor( predecessor_type & ) { return false; } + + //! Remove a predecessor from the node + virtual bool remove_predecessor( predecessor_type & ) { return false; } + + template + friend bool register_predecessor(receiver& r, sender& s); + template + friend bool remove_predecessor (receiver& r, sender& s); +}; // class receiver + +template +bool register_predecessor(receiver& r, sender& s) { + return r.register_predecessor(s); +} + +template +bool remove_predecessor(receiver& r, sender& s) { + return r.remove_predecessor(s); +} + +//! Base class for receivers of completion messages +/** These receivers automatically reset, but cannot be explicitly waited on */ +class continue_receiver : public receiver< continue_msg > { +protected: + + //! Constructor + explicit continue_receiver( int number_of_predecessors, node_priority_t a_priority ) { + my_predecessor_count = my_initial_predecessor_count = number_of_predecessors; + my_current_count = 0; + my_priority = a_priority; + } + + //! Copy constructor + continue_receiver( const continue_receiver& src ) : receiver() { + my_predecessor_count = my_initial_predecessor_count = src.my_initial_predecessor_count; + my_current_count = 0; + my_priority = src.my_priority; + } + + //! Increments the trigger threshold + bool register_predecessor( predecessor_type & ) override { + spin_mutex::scoped_lock l(my_mutex); + ++my_predecessor_count; + return true; + } + + //! Decrements the trigger threshold + /** Does not check to see if the removal of the predecessor now makes the current count + exceed the new threshold. So removing a predecessor while the graph is active can cause + unexpected results. */ + bool remove_predecessor( predecessor_type & ) override { + spin_mutex::scoped_lock l(my_mutex); + --my_predecessor_count; + return true; + } + + //! The input type + typedef continue_msg input_type; + + //! The predecessor type for this node + typedef receiver::predecessor_type predecessor_type; + + template< typename R, typename B > friend class run_and_put_task; + template friend class broadcast_cache; + template friend class round_robin_cache; + +private: + // execute body is supposed to be too small to create a task for. + graph_task* try_put_task_impl( const input_type& __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& metainfo) ) { +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + message_metainfo predecessor_metainfo; +#endif + { + spin_mutex::scoped_lock l(my_mutex); +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + // Prolong the wait and store the metainfo until receiving signals from all the predecessors + for (auto waiter : metainfo.waiters()) { + waiter->reserve(1); + } + my_current_metainfo.merge(metainfo); +#endif + if ( ++my_current_count < my_predecessor_count ) + return SUCCESSFULLY_ENQUEUED; + else { + my_current_count = 0; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + predecessor_metainfo = my_current_metainfo; + my_current_metainfo = message_metainfo{}; +#endif + } + } +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + graph_task* res = execute(predecessor_metainfo); + for (auto waiter : predecessor_metainfo.waiters()) { + waiter->release(1); + } +#else + graph_task* res = execute(); +#endif + return res? res : SUCCESSFULLY_ENQUEUED; + } + +protected: + graph_task* try_put_task( const input_type& input ) override { + return try_put_task_impl(input __TBB_FLOW_GRAPH_METAINFO_ARG(message_metainfo{})); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + graph_task* try_put_task( const input_type& input, const message_metainfo& metainfo ) override { + return try_put_task_impl(input, metainfo); + } +#endif + + spin_mutex my_mutex; + int my_predecessor_count; + int my_current_count; + int my_initial_predecessor_count; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + message_metainfo my_current_metainfo; +#endif + node_priority_t my_priority; + // the friend declaration in the base class did not eliminate the "protected class" + // error in gcc 4.1.2 + template friend class limiter_node; + + virtual void reset_receiver( reset_flags f ) { + my_current_count = 0; + if (f & rf_clear_edges) { + my_predecessor_count = my_initial_predecessor_count; + } + } + + //! Does whatever should happen when the threshold is reached + /** This should be very fast or else spawn a task. This is + called while the sender is blocked in the try_put(). */ +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + virtual graph_task* execute(const message_metainfo& metainfo) = 0; +#else + virtual graph_task* execute() = 0; +#endif + template friend class successor_cache; + bool is_continue_receiver() override { return true; } + + node_priority_t priority() const override { return my_priority; } +}; // class continue_receiver + +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + template + K key_from_message( const T &t ) { + return t.key(); + } +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + +} // d1 +} // detail +} // tbb + +#include "detail/_flow_graph_trace_impl.h" +#include "detail/_hash_compare.h" + +namespace tbb { +namespace detail { +namespace d2 { + +#include "detail/_flow_graph_body_impl.h" +#include "detail/_flow_graph_cache_impl.h" +#include "detail/_flow_graph_types_impl.h" + +using namespace graph_policy_namespace; + +template +graph_iterator::graph_iterator(C *g, bool begin) : my_graph(g), current_node(nullptr) +{ + if (begin) current_node = my_graph->my_nodes; + //else it is an end iterator by default +} + +template +typename graph_iterator::reference graph_iterator::operator*() const { + __TBB_ASSERT(current_node, "graph_iterator at end"); + return *operator->(); +} + +template +typename graph_iterator::pointer graph_iterator::operator->() const { + return current_node; +} + +template +void graph_iterator::internal_forward() { + if (current_node) current_node = current_node->next; +} + +//! Constructs a graph with isolated task_group_context +inline graph::graph() : my_wait_context_vertex(0), my_nodes(nullptr), my_nodes_last(nullptr), my_task_arena(nullptr) { + prepare_task_arena(); + own_context = true; + cancelled = false; + caught_exception = false; + my_context = new (r1::cache_aligned_allocate(sizeof(task_group_context))) task_group_context(FLOW_TASKS); + fgt_graph(this); + my_is_active = true; +} + +inline graph::graph(task_group_context& use_this_context) : + my_wait_context_vertex(0), my_context(&use_this_context), my_nodes(nullptr), my_nodes_last(nullptr), my_task_arena(nullptr) { + prepare_task_arena(); + own_context = false; + cancelled = false; + caught_exception = false; + fgt_graph(this); + my_is_active = true; +} + +inline graph::~graph() { + wait_for_all(); + if (own_context) { + my_context->~task_group_context(); + r1::cache_aligned_deallocate(my_context); + } + delete my_task_arena; +} + +inline void graph::reserve_wait() { + my_wait_context_vertex.reserve(); + fgt_reserve_wait(this); +} + +inline void graph::release_wait() { + fgt_release_wait(this); + my_wait_context_vertex.release(); +} + +inline void graph::register_node(graph_node *n) { + n->next = nullptr; + { + spin_mutex::scoped_lock lock(nodelist_mutex); + n->prev = my_nodes_last; + if (my_nodes_last) my_nodes_last->next = n; + my_nodes_last = n; + if (!my_nodes) my_nodes = n; + } +} + +inline void graph::remove_node(graph_node *n) { + { + spin_mutex::scoped_lock lock(nodelist_mutex); + __TBB_ASSERT(my_nodes && my_nodes_last, "graph::remove_node: Error: no registered nodes"); + if (n->prev) n->prev->next = n->next; + if (n->next) n->next->prev = n->prev; + if (my_nodes_last == n) my_nodes_last = n->prev; + if (my_nodes == n) my_nodes = n->next; + } + n->prev = n->next = nullptr; +} + +inline void graph::reset( reset_flags f ) { + // reset context + deactivate_graph(*this); + + my_context->reset(); + cancelled = false; + caught_exception = false; + // reset all the nodes comprising the graph + for(iterator ii = begin(); ii != end(); ++ii) { + graph_node *my_p = &(*ii); + my_p->reset_node(f); + } + // Reattach the arena. Might be useful to run the graph in a particular task_arena + // while not limiting graph lifetime to a single task_arena::execute() call. + prepare_task_arena( /*reinit=*/true ); + activate_graph(*this); +} + +inline void graph::cancel() { + my_context->cancel_group_execution(); +} + +inline graph::iterator graph::begin() { return iterator(this, true); } + +inline graph::iterator graph::end() { return iterator(this, false); } + +inline graph::const_iterator graph::begin() const { return const_iterator(this, true); } + +inline graph::const_iterator graph::end() const { return const_iterator(this, false); } + +inline graph::const_iterator graph::cbegin() const { return const_iterator(this, true); } + +inline graph::const_iterator graph::cend() const { return const_iterator(this, false); } + +inline graph_node::graph_node(graph& g) : my_graph(g) { + my_graph.register_node(this); +} + +inline graph_node::~graph_node() { + my_graph.remove_node(this); +} + +#include "detail/_flow_graph_node_impl.h" + + +//! An executable node that acts as a source, i.e. it has no predecessors + +template < typename Output > + __TBB_requires(std::copyable) +class input_node : public graph_node, public sender< Output > { +public: + //! The type of the output message, which is complete + typedef Output output_type; + + //! The type of successors of this node + typedef typename sender::successor_type successor_type; + + // Input node has no input type + typedef null_type input_type; + + //! Constructor for a node with a successor + template< typename Body > + __TBB_requires(input_node_body) + __TBB_NOINLINE_SYM input_node( graph &g, Body body ) + : graph_node(g), my_active(false) + , my_body( new input_body_leaf< output_type, Body>(body) ) + , my_init_body( new input_body_leaf< output_type, Body>(body) ) + , my_successors(this), my_reserved(false), my_has_cached_item(false) + { + fgt_node_with_body(CODEPTR(), FLOW_INPUT_NODE, &this->my_graph, + static_cast *>(this), this->my_body); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_requires(input_node_body) + input_node( const node_set& successors, Body body ) + : input_node(successors.graph_reference(), body) + { + make_edges(*this, successors); + } +#endif + + //! Copy constructor + __TBB_NOINLINE_SYM input_node( const input_node& src ) + : graph_node(src.my_graph), sender() + , my_active(false) + , my_body(src.my_init_body->clone()), my_init_body(src.my_init_body->clone()) + , my_successors(this), my_reserved(false), my_has_cached_item(false) + { + fgt_node_with_body(CODEPTR(), FLOW_INPUT_NODE, &this->my_graph, + static_cast *>(this), this->my_body); + } + + //! The destructor + ~input_node() { delete my_body; delete my_init_body; } + + //! Add a new successor to this node + bool register_successor( successor_type &r ) override { + spin_mutex::scoped_lock lock(my_mutex); + my_successors.register_successor(r); + if ( my_active ) + spawn_put(); + return true; + } + + //! Removes a successor from this node + bool remove_successor( successor_type &r ) override { + spin_mutex::scoped_lock lock(my_mutex); + my_successors.remove_successor(r); + return true; + } + + //! Request an item from the node + bool try_get( output_type &v ) override { + spin_mutex::scoped_lock lock(my_mutex); + if ( my_reserved ) + return false; + + if ( my_has_cached_item ) { + v = my_cached_item; + my_has_cached_item = false; + return true; + } + // we've been asked to provide an item, but we have none. enqueue a task to + // provide one. + if ( my_active ) + spawn_put(); + return false; + } + + //! Reserves an item. + bool try_reserve( output_type &v ) override { + spin_mutex::scoped_lock lock(my_mutex); + if ( my_reserved ) { + return false; + } + + if ( my_has_cached_item ) { + v = my_cached_item; + my_reserved = true; + return true; + } else { + return false; + } + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT +private: + bool try_reserve( output_type& v, message_metainfo& ) override { + return try_reserve(v); + } + + bool try_get( output_type& v, message_metainfo& ) override { + return try_get(v); + } +public: +#endif + + //! Release a reserved item. + /** true = item has been released and so remains in sender, dest must request or reserve future items */ + bool try_release( ) override { + spin_mutex::scoped_lock lock(my_mutex); + __TBB_ASSERT( my_reserved && my_has_cached_item, "releasing non-existent reservation" ); + my_reserved = false; + if(!my_successors.empty()) + spawn_put(); + return true; + } + + //! Consumes a reserved item + bool try_consume( ) override { + spin_mutex::scoped_lock lock(my_mutex); + __TBB_ASSERT( my_reserved && my_has_cached_item, "consuming non-existent reservation" ); + my_reserved = false; + my_has_cached_item = false; + if ( !my_successors.empty() ) { + spawn_put(); + } + return true; + } + + //! Activates a node that was created in the inactive state + void activate() { + spin_mutex::scoped_lock lock(my_mutex); + my_active = true; + if (!my_successors.empty()) + spawn_put(); + } + + template + Body copy_function_object() { + input_body &body_ref = *this->my_body; + return dynamic_cast< input_body_leaf & >(body_ref).get_body(); + } + +protected: + + //! resets the input_node to its initial state + void reset_node( reset_flags f) override { + my_active = false; + my_reserved = false; + my_has_cached_item = false; + + if(f & rf_clear_edges) my_successors.clear(); + if(f & rf_reset_bodies) { + input_body *tmp = my_init_body->clone(); + delete my_body; + my_body = tmp; + } + } + +private: + spin_mutex my_mutex; + bool my_active; + input_body *my_body; + input_body *my_init_body; + broadcast_cache< output_type > my_successors; + bool my_reserved; + bool my_has_cached_item; + output_type my_cached_item; + + // used by apply_body_bypass, can invoke body of node. + bool try_reserve_apply_body(output_type &v) { + spin_mutex::scoped_lock lock(my_mutex); + if ( my_reserved ) { + return false; + } + if ( !my_has_cached_item ) { + d1::flow_control control; + + fgt_begin_body( my_body ); + + my_cached_item = (*my_body)(control); + my_has_cached_item = !control.is_pipeline_stopped; + + fgt_end_body( my_body ); + } + if ( my_has_cached_item ) { + v = my_cached_item; + my_reserved = true; + return true; + } else { + return false; + } + } + + graph_task* create_put_task() { + d1::small_object_allocator allocator{}; + typedef input_node_task_bypass< input_node > task_type; + graph_task* t = allocator.new_object(my_graph, allocator, *this); + return t; + } + + //! Spawns a task that applies the body + void spawn_put( ) { + if(is_graph_active(this->my_graph)) { + spawn_in_graph_arena(this->my_graph, *create_put_task()); + } + } + + friend class input_node_task_bypass< input_node >; + //! Applies the body. Returning SUCCESSFULLY_ENQUEUED okay; forward_task_bypass will handle it. + graph_task* apply_body_bypass( ) { + output_type v; + if ( !try_reserve_apply_body(v) ) + return nullptr; + + graph_task *last_task = my_successors.try_put_task(v); + if ( last_task ) + try_consume(); + else + try_release(); + return last_task; + } +}; // class input_node + +//! Implements a function node that supports Input -> Output +template + __TBB_requires(std::default_initializable && + std::copy_constructible && + std::copy_constructible) +class function_node + : public graph_node + , public function_input< Input, Output, Policy, cache_aligned_allocator > + , public function_output +{ + typedef cache_aligned_allocator internals_allocator; + +public: + typedef Input input_type; + typedef Output output_type; + typedef function_input input_impl_type; + typedef function_input_queue input_queue_type; + typedef function_output fOutput_type; + typedef typename input_impl_type::predecessor_type predecessor_type; + typedef typename fOutput_type::successor_type successor_type; + + using input_impl_type::my_predecessors; + + //! Constructor + // input_queue_type is allocated here, but destroyed in the function_input_base. + // TODO: pass the graph_buffer_policy to the function_input_base so it can all + // be done in one place. This would be an interface-breaking change. + template< typename Body > + __TBB_requires(function_node_body) + __TBB_NOINLINE_SYM function_node( graph &g, size_t concurrency, + Body body, Policy = Policy(), node_priority_t a_priority = no_priority ) + : graph_node(g), input_impl_type(g, concurrency, body, a_priority), + fOutput_type(g) { + fgt_node_with_body( CODEPTR(), FLOW_FUNCTION_NODE, &this->my_graph, + static_cast *>(this), static_cast *>(this), this->my_body ); + } + + template + __TBB_requires(function_node_body) + function_node( graph& g, size_t concurrency, Body body, node_priority_t a_priority ) + : function_node(g, concurrency, body, Policy(), a_priority) {} + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_requires(function_node_body) + function_node( const node_set& nodes, size_t concurrency, Body body, + Policy p = Policy(), node_priority_t a_priority = no_priority ) + : function_node(nodes.graph_reference(), concurrency, body, p, a_priority) { + make_edges_in_order(nodes, *this); + } + + template + __TBB_requires(function_node_body) + function_node( const node_set& nodes, size_t concurrency, Body body, node_priority_t a_priority ) + : function_node(nodes, concurrency, body, Policy(), a_priority) {} +#endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + + //! Copy constructor + __TBB_NOINLINE_SYM function_node( const function_node& src ) : + graph_node(src.my_graph), + input_impl_type(src), + fOutput_type(src.my_graph) { + fgt_node_with_body( CODEPTR(), FLOW_FUNCTION_NODE, &this->my_graph, + static_cast *>(this), static_cast *>(this), this->my_body ); + } + +protected: + template< typename R, typename B > friend class run_and_put_task; + template friend class broadcast_cache; + template friend class round_robin_cache; + using input_impl_type::try_put_task; + + broadcast_cache &successors () override { return fOutput_type::my_successors; } + + void reset_node(reset_flags f) override { + input_impl_type::reset_function_input(f); + // TODO: use clear() instead. + if(f & rf_clear_edges) { + successors().clear(); + my_predecessors.clear(); + } + __TBB_ASSERT(!(f & rf_clear_edges) || successors().empty(), "function_node successors not empty"); + __TBB_ASSERT(this->my_predecessors.empty(), "function_node predecessors not empty"); + } + +}; // class function_node + +//! implements a function node that supports Input -> (set of outputs) +// Output is a tuple of output types. +template + __TBB_requires(std::default_initializable && + std::copy_constructible) +class multifunction_node : + public graph_node, + public multifunction_input + < + Input, + typename wrap_tuple_elements< + std::tuple_size::value, // #elements in tuple + multifunction_output, // wrap this around each element + Output // the tuple providing the types + >::type, + Policy, + cache_aligned_allocator + > +{ + typedef cache_aligned_allocator internals_allocator; + +protected: + static const int N = std::tuple_size::value; +public: + typedef Input input_type; + typedef null_type output_type; + typedef typename wrap_tuple_elements::type output_ports_type; + typedef multifunction_input< + input_type, output_ports_type, Policy, internals_allocator> input_impl_type; + typedef function_input_queue input_queue_type; +private: + using input_impl_type::my_predecessors; +public: + template + __TBB_requires(multifunction_node_body) + __TBB_NOINLINE_SYM multifunction_node( + graph &g, size_t concurrency, + Body body, Policy = Policy(), node_priority_t a_priority = no_priority + ) : graph_node(g), input_impl_type(g, concurrency, body, a_priority) { + fgt_multioutput_node_with_body( + CODEPTR(), FLOW_MULTIFUNCTION_NODE, + &this->my_graph, static_cast *>(this), + this->output_ports(), this->my_body + ); + } + + template + __TBB_requires(multifunction_node_body) + __TBB_NOINLINE_SYM multifunction_node(graph& g, size_t concurrency, Body body, node_priority_t a_priority) + : multifunction_node(g, concurrency, body, Policy(), a_priority) {} + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_requires(multifunction_node_body) + __TBB_NOINLINE_SYM multifunction_node(const node_set& nodes, size_t concurrency, Body body, + Policy p = Policy(), node_priority_t a_priority = no_priority) + : multifunction_node(nodes.graph_reference(), concurrency, body, p, a_priority) { + make_edges_in_order(nodes, *this); + } + + template + __TBB_requires(multifunction_node_body) + __TBB_NOINLINE_SYM multifunction_node(const node_set& nodes, size_t concurrency, Body body, node_priority_t a_priority) + : multifunction_node(nodes, concurrency, body, Policy(), a_priority) {} +#endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + + __TBB_NOINLINE_SYM multifunction_node( const multifunction_node &other) : + graph_node(other.my_graph), input_impl_type(other) { + fgt_multioutput_node_with_body( CODEPTR(), FLOW_MULTIFUNCTION_NODE, + &this->my_graph, static_cast *>(this), + this->output_ports(), this->my_body ); + } + + // all the guts are in multifunction_input... +protected: + void reset_node(reset_flags f) override { input_impl_type::reset(f); } +}; // multifunction_node + +//! split_node: accepts a tuple as input, forwards each element of the tuple to its +// successors. The node has unlimited concurrency, so it does not reject inputs. +template +class split_node : public graph_node, public receiver { + static const int N = std::tuple_size::value; + typedef receiver base_type; +public: + typedef TupleType input_type; + typedef typename wrap_tuple_elements< + N, // #elements in tuple + multifunction_output, // wrap this around each element + TupleType // the tuple providing the types + >::type output_ports_type; + + __TBB_NOINLINE_SYM explicit split_node(graph &g) + : graph_node(g), + my_output_ports(init_output_ports::call(g, my_output_ports)) + { + fgt_multioutput_node(CODEPTR(), FLOW_SPLIT_NODE, &this->my_graph, + static_cast *>(this), this->output_ports()); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_NOINLINE_SYM split_node(const node_set& nodes) : split_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + __TBB_NOINLINE_SYM split_node(const split_node& other) + : graph_node(other.my_graph), base_type(other), + my_output_ports(init_output_ports::call(other.my_graph, my_output_ports)) + { + fgt_multioutput_node(CODEPTR(), FLOW_SPLIT_NODE, &this->my_graph, + static_cast *>(this), this->output_ports()); + } + + output_ports_type &output_ports() { return my_output_ports; } + +protected: + graph_task *try_put_task(const TupleType& t) override { + // Sending split messages in parallel is not justified, as overheads would prevail. + // Also, we do not have successors here. So we just tell the task returned here is successful. + return emit_element::emit_this(this->my_graph, t, output_ports()); + } +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + graph_task* try_put_task(const TupleType& t, const message_metainfo& metainfo) override { + // Sending split messages in parallel is not justified, as overheads would prevail. + // Also, we do not have successors here. So we just tell the task returned here is successful. + return emit_element::emit_this(this->my_graph, t, output_ports(), metainfo); + } +#endif + + void reset_node(reset_flags f) override { + if (f & rf_clear_edges) + clear_element::clear_this(my_output_ports); + + __TBB_ASSERT(!(f & rf_clear_edges) || clear_element::this_empty(my_output_ports), "split_node reset failed"); + } + graph& graph_reference() const override { + return my_graph; + } + +private: + output_ports_type my_output_ports; +}; + +//! Implements an executable node that supports continue_msg -> Output +template > + __TBB_requires(std::copy_constructible) +class continue_node : public graph_node, public continue_input, + public function_output { +public: + typedef continue_msg input_type; + typedef Output output_type; + typedef continue_input input_impl_type; + typedef function_output fOutput_type; + typedef typename input_impl_type::predecessor_type predecessor_type; + typedef typename fOutput_type::successor_type successor_type; + + //! Constructor for executable node with continue_msg -> Output + template + __TBB_requires(continue_node_body) + __TBB_NOINLINE_SYM continue_node( + graph &g, + Body body, Policy = Policy(), node_priority_t a_priority = no_priority + ) : graph_node(g), input_impl_type( g, body, a_priority ), + fOutput_type(g) { + fgt_node_with_body( CODEPTR(), FLOW_CONTINUE_NODE, &this->my_graph, + + static_cast *>(this), + static_cast *>(this), this->my_body ); + } + + template + __TBB_requires(continue_node_body) + continue_node( graph& g, Body body, node_priority_t a_priority ) + : continue_node(g, body, Policy(), a_priority) {} + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_requires(continue_node_body) + continue_node( const node_set& nodes, Body body, + Policy p = Policy(), node_priority_t a_priority = no_priority ) + : continue_node(nodes.graph_reference(), body, p, a_priority ) { + make_edges_in_order(nodes, *this); + } + template + __TBB_requires(continue_node_body) + continue_node( const node_set& nodes, Body body, node_priority_t a_priority) + : continue_node(nodes, body, Policy(), a_priority) {} +#endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + + //! Constructor for executable node with continue_msg -> Output + template + __TBB_requires(continue_node_body) + __TBB_NOINLINE_SYM continue_node( + graph &g, int number_of_predecessors, + Body body, Policy = Policy(), node_priority_t a_priority = no_priority + ) : graph_node(g) + , input_impl_type(g, number_of_predecessors, body, a_priority), + fOutput_type(g) { + fgt_node_with_body( CODEPTR(), FLOW_CONTINUE_NODE, &this->my_graph, + static_cast *>(this), + static_cast *>(this), this->my_body ); + } + + template + __TBB_requires(continue_node_body) + continue_node( graph& g, int number_of_predecessors, Body body, node_priority_t a_priority) + : continue_node(g, number_of_predecessors, body, Policy(), a_priority) {} + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_requires(continue_node_body) + continue_node( const node_set& nodes, int number_of_predecessors, + Body body, Policy p = Policy(), node_priority_t a_priority = no_priority ) + : continue_node(nodes.graph_reference(), number_of_predecessors, body, p, a_priority) { + make_edges_in_order(nodes, *this); + } + + template + __TBB_requires(continue_node_body) + continue_node( const node_set& nodes, int number_of_predecessors, + Body body, node_priority_t a_priority ) + : continue_node(nodes, number_of_predecessors, body, Policy(), a_priority) {} +#endif + + //! Copy constructor + __TBB_NOINLINE_SYM continue_node( const continue_node& src ) : + graph_node(src.my_graph), input_impl_type(src), + function_output(src.my_graph) { + fgt_node_with_body( CODEPTR(), FLOW_CONTINUE_NODE, &this->my_graph, + static_cast *>(this), + static_cast *>(this), this->my_body ); + } + +protected: + template< typename R, typename B > friend class run_and_put_task; + template friend class broadcast_cache; + template friend class round_robin_cache; + using input_impl_type::try_put_task; + broadcast_cache &successors () override { return fOutput_type::my_successors; } + + void reset_node(reset_flags f) override { + input_impl_type::reset_receiver(f); + if(f & rf_clear_edges)successors().clear(); + __TBB_ASSERT(!(f & rf_clear_edges) || successors().empty(), "continue_node not reset"); + } +}; // continue_node + +//! Forwards messages of type T to all successors +template +class broadcast_node : public graph_node, public receiver, public sender { +public: + typedef T input_type; + typedef T output_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef typename sender::successor_type successor_type; +private: + broadcast_cache my_successors; +public: + + __TBB_NOINLINE_SYM explicit broadcast_node(graph& g) : graph_node(g), my_successors(this) { + fgt_node( CODEPTR(), FLOW_BROADCAST_NODE, &this->my_graph, + static_cast *>(this), static_cast *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + broadcast_node(const node_set& nodes) : broadcast_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + __TBB_NOINLINE_SYM broadcast_node( const broadcast_node& src ) : broadcast_node(src.my_graph) {} + + //! Adds a successor + bool register_successor( successor_type &r ) override { + my_successors.register_successor( r ); + return true; + } + + //! Removes s as a successor + bool remove_successor( successor_type &r ) override { + my_successors.remove_successor( r ); + return true; + } + +private: + graph_task* try_put_task_impl(const T& t __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& metainfo)) { + graph_task* new_task = my_successors.try_put_task(t __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + if (!new_task) new_task = SUCCESSFULLY_ENQUEUED; + return new_task; + } + +protected: + template< typename R, typename B > friend class run_and_put_task; + template friend class broadcast_cache; + template friend class round_robin_cache; + //! build a task to run the successor if possible. Default is old behavior. + graph_task* try_put_task(const T& t) override { + return try_put_task_impl(t __TBB_FLOW_GRAPH_METAINFO_ARG(message_metainfo{})); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + graph_task* try_put_task(const T& t, const message_metainfo& metainfo) override { + return try_put_task_impl(t, metainfo); + } +#endif + + graph& graph_reference() const override { + return my_graph; + } + + void reset_node(reset_flags f) override { + if (f&rf_clear_edges) { + my_successors.clear(); + } + __TBB_ASSERT(!(f & rf_clear_edges) || my_successors.empty(), "Error resetting broadcast_node"); + } +}; // broadcast_node + +//! Forwards messages in arbitrary order +template +class buffer_node + : public graph_node + , public reservable_item_buffer< T, cache_aligned_allocator > + , public receiver, public sender +{ + typedef cache_aligned_allocator internals_allocator; + +public: + typedef T input_type; + typedef T output_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef typename sender::successor_type successor_type; + typedef buffer_node class_type; + +protected: + typedef size_t size_type; + round_robin_cache< T, null_rw_mutex > my_successors; + + friend class forward_task_bypass< class_type >; + + enum op_type {reg_succ, rem_succ, req_item, res_item, rel_res, con_res, put_item, try_fwd_task + }; + + // implements the aggregator_operation concept + class buffer_operation : public d1::aggregated_operation< buffer_operation > { + public: + char type; + T* elem; + graph_task* ltask; + successor_type *r; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + message_metainfo* metainfo{ nullptr }; +#endif + + buffer_operation(const T& e, op_type t) : type(char(t)) + , elem(const_cast(&e)) , ltask(nullptr) + , r(nullptr) + {} + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + buffer_operation(const T& e, op_type t, const message_metainfo& info) + : type(char(t)), elem(const_cast(&e)), ltask(nullptr), r(nullptr) + , metainfo(const_cast(&info)) + {} + + buffer_operation(op_type t, message_metainfo& info) + : type(char(t)), elem(nullptr), ltask(nullptr), r(nullptr), metainfo(&info) {} +#endif + buffer_operation(op_type t) : type(char(t)), elem(nullptr), ltask(nullptr), r(nullptr) {} + }; + + bool forwarder_busy; + typedef d1::aggregating_functor handler_type; + friend class d1::aggregating_functor; + d1::aggregator< handler_type, buffer_operation> my_aggregator; + + virtual void handle_operations(buffer_operation *op_list) { + handle_operations_impl(op_list, this); + } + + template + void handle_operations_impl(buffer_operation *op_list, derived_type* derived) { + __TBB_ASSERT(static_cast(derived) == this, "'this' is not a base class for derived"); + + buffer_operation *tmp = nullptr; + bool try_forwarding = false; + while (op_list) { + tmp = op_list; + op_list = op_list->next; + switch (tmp->type) { + case reg_succ: internal_reg_succ(tmp); try_forwarding = true; break; + case rem_succ: internal_rem_succ(tmp); break; + case req_item: internal_pop(tmp); break; + case res_item: internal_reserve(tmp); break; + case rel_res: internal_release(tmp); try_forwarding = true; break; + case con_res: internal_consume(tmp); try_forwarding = true; break; + case put_item: try_forwarding = internal_push(tmp); break; + case try_fwd_task: internal_forward_task(tmp); break; + } + } + + derived->order(); + + if (try_forwarding && !forwarder_busy) { + if(is_graph_active(this->my_graph)) { + forwarder_busy = true; + typedef forward_task_bypass task_type; + d1::small_object_allocator allocator{}; + graph_task* new_task = allocator.new_object(graph_reference(), allocator, *this); + // tmp should point to the last item handled by the aggregator. This is the operation + // the handling thread enqueued. So modifying that record will be okay. + // TODO revamp: check that the issue is still present + // workaround for icc bug (at least 12.0 and 13.0) + // error: function "tbb::flow::interfaceX::combine_tasks" cannot be called with the given argument list + // argument types are: (graph, graph_task *, graph_task *) + graph_task *z = tmp->ltask; + graph &g = this->my_graph; + tmp->ltask = combine_tasks(g, z, new_task); // in case the op generated a task + } + } + } // handle_operations + + inline graph_task *grab_forwarding_task( buffer_operation &op_data) { + return op_data.ltask; + } + + inline bool enqueue_forwarding_task(buffer_operation &op_data) { + graph_task *ft = grab_forwarding_task(op_data); + if(ft) { + spawn_in_graph_arena(graph_reference(), *ft); + return true; + } + return false; + } + + //! This is executed by an enqueued task, the "forwarder" + virtual graph_task *forward_task() { + buffer_operation op_data(try_fwd_task); + graph_task *last_task = nullptr; + do { + op_data.status = WAIT; + op_data.ltask = nullptr; + my_aggregator.execute(&op_data); + + // workaround for icc bug + graph_task *xtask = op_data.ltask; + graph& g = this->my_graph; + last_task = combine_tasks(g, last_task, xtask); + } while (op_data.status ==SUCCEEDED); + return last_task; + } + + //! Register successor + virtual void internal_reg_succ(buffer_operation *op) { + __TBB_ASSERT(op->r, nullptr); + my_successors.register_successor(*(op->r)); + op->status.store(SUCCEEDED, std::memory_order_release); + } + + //! Remove successor + virtual void internal_rem_succ(buffer_operation *op) { + __TBB_ASSERT(op->r, nullptr); + my_successors.remove_successor(*(op->r)); + op->status.store(SUCCEEDED, std::memory_order_release); + } + +private: + void order() {} + + bool is_item_valid() { + return this->my_item_valid(this->my_tail - 1); + } + + void try_put_and_add_task(graph_task*& last_task) { + graph_task* new_task = my_successors.try_put_task(this->back() + __TBB_FLOW_GRAPH_METAINFO_ARG(this->back_metainfo())); + if (new_task) { + // workaround for icc bug + graph& g = this->my_graph; + last_task = combine_tasks(g, last_task, new_task); + this->destroy_back(); + } + } + +protected: + //! Tries to forward valid items to successors + virtual void internal_forward_task(buffer_operation *op) { + internal_forward_task_impl(op, this); + } + + template + void internal_forward_task_impl(buffer_operation *op, derived_type* derived) { + __TBB_ASSERT(static_cast(derived) == this, "'this' is not a base class for derived"); + + if (this->my_reserved || !derived->is_item_valid()) { + op->status.store(FAILED, std::memory_order_release); + this->forwarder_busy = false; + return; + } + // Try forwarding, giving each successor a chance + graph_task* last_task = nullptr; + size_type counter = my_successors.size(); + for (; counter > 0 && derived->is_item_valid(); --counter) + derived->try_put_and_add_task(last_task); + + op->ltask = last_task; // return task + if (last_task && !counter) { + op->status.store(SUCCEEDED, std::memory_order_release); + } + else { + op->status.store(FAILED, std::memory_order_release); + forwarder_busy = false; + } + } + + virtual bool internal_push(buffer_operation *op) { + __TBB_ASSERT(op->elem, nullptr); +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + __TBB_ASSERT(op->metainfo, nullptr); + this->push_back(*(op->elem), (*op->metainfo)); +#else + this->push_back(*(op->elem)); +#endif + op->status.store(SUCCEEDED, std::memory_order_release); + return true; + } + + virtual void internal_pop(buffer_operation *op) { + __TBB_ASSERT(op->elem, nullptr); +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + bool pop_result = op->metainfo ? this->pop_back(*(op->elem), *(op->metainfo)) + : this->pop_back(*(op->elem)); +#else + bool pop_result = this->pop_back(*(op->elem)); +#endif + if (pop_result) { + op->status.store(SUCCEEDED, std::memory_order_release); + } + else { + op->status.store(FAILED, std::memory_order_release); + } + } + + virtual void internal_reserve(buffer_operation *op) { + __TBB_ASSERT(op->elem, nullptr); +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + bool reserve_result = op->metainfo ? this->reserve_front(*(op->elem), *(op->metainfo)) + : this->reserve_front(*(op->elem)); +#else + bool reserve_result = this->reserve_front(*(op->elem)); +#endif + if (reserve_result) { + op->status.store(SUCCEEDED, std::memory_order_release); + } + else { + op->status.store(FAILED, std::memory_order_release); + } + } + + virtual void internal_consume(buffer_operation *op) { + this->consume_front(); + op->status.store(SUCCEEDED, std::memory_order_release); + } + + virtual void internal_release(buffer_operation *op) { + this->release_front(); + op->status.store(SUCCEEDED, std::memory_order_release); + } + +public: + //! Constructor + __TBB_NOINLINE_SYM explicit buffer_node( graph &g ) + : graph_node(g), reservable_item_buffer(), receiver(), + sender(), my_successors(this), forwarder_busy(false) + { + my_aggregator.initialize_handler(handler_type(this)); + fgt_node( CODEPTR(), FLOW_BUFFER_NODE, &this->my_graph, + static_cast *>(this), static_cast *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + buffer_node(const node_set& nodes) : buffer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + //! Copy constructor + __TBB_NOINLINE_SYM buffer_node( const buffer_node& src ) : buffer_node(src.my_graph) {} + + // + // message sender implementation + // + + //! Adds a new successor. + /** Adds successor r to the list of successors; may forward tasks. */ + bool register_successor( successor_type &r ) override { + buffer_operation op_data(reg_succ); + op_data.r = &r; + my_aggregator.execute(&op_data); + (void)enqueue_forwarding_task(op_data); + return true; + } + + //! Removes a successor. + /** Removes successor r from the list of successors. + It also calls r.remove_predecessor(*this) to remove this node as a predecessor. */ + bool remove_successor( successor_type &r ) override { + // TODO revamp: investigate why full qualification is necessary here + tbb::detail::d2::remove_predecessor(r, *this); + buffer_operation op_data(rem_succ); + op_data.r = &r; + my_aggregator.execute(&op_data); + // even though this operation does not cause a forward, if we are the handler, and + // a forward is scheduled, we may be the first to reach this point after the aggregator, + // and so should check for the task. + (void)enqueue_forwarding_task(op_data); + return true; + } + + //! Request an item from the buffer_node + /** true = v contains the returned item
+ false = no item has been returned */ + bool try_get( T &v ) override { + buffer_operation op_data(req_item); + op_data.elem = &v; + my_aggregator.execute(&op_data); + (void)enqueue_forwarding_task(op_data); + return (op_data.status==SUCCEEDED); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + bool try_get( T &v, message_metainfo& metainfo ) override { + buffer_operation op_data(req_item, metainfo); + op_data.elem = &v; + my_aggregator.execute(&op_data); + (void)enqueue_forwarding_task(op_data); + return (op_data.status==SUCCEEDED); + } +#endif + + //! Reserves an item. + /** false = no item can be reserved
+ true = an item is reserved */ + bool try_reserve( T &v ) override { + buffer_operation op_data(res_item); + op_data.elem = &v; + my_aggregator.execute(&op_data); + (void)enqueue_forwarding_task(op_data); + return (op_data.status==SUCCEEDED); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + bool try_reserve( output_type& v, message_metainfo& metainfo ) override { + buffer_operation op_data(res_item, metainfo); + op_data.elem = &v; + my_aggregator.execute(&op_data); + (void)enqueue_forwarding_task(op_data); + return op_data.status==SUCCEEDED; + } +#endif + + //! Release a reserved item. + /** true = item has been released and so remains in sender */ + bool try_release() override { + buffer_operation op_data(rel_res); + my_aggregator.execute(&op_data); + (void)enqueue_forwarding_task(op_data); + return true; + } + + //! Consumes a reserved item. + /** true = item is removed from sender and reservation removed */ + bool try_consume() override { + buffer_operation op_data(con_res); + my_aggregator.execute(&op_data); + (void)enqueue_forwarding_task(op_data); + return true; + } + +private: + graph_task* try_put_task_impl(const T& t __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& metainfo)) { + buffer_operation op_data(t, put_item __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + my_aggregator.execute(&op_data); + graph_task *ft = grab_forwarding_task(op_data); + // sequencer_nodes can return failure (if an item has been previously inserted) + // We have to spawn the returned task if our own operation fails. + + if(ft && op_data.status ==FAILED) { + // we haven't succeeded queueing the item, but for some reason the + // call returned a task (if another request resulted in a successful + // forward this could happen.) Queue the task and reset the pointer. + spawn_in_graph_arena(graph_reference(), *ft); ft = nullptr; + } + else if(!ft && op_data.status ==SUCCEEDED) { + ft = SUCCESSFULLY_ENQUEUED; + } + return ft; + } + +protected: + + template< typename R, typename B > friend class run_and_put_task; + template friend class broadcast_cache; + template friend class round_robin_cache; + //! receive an item, return a task *if possible + graph_task *try_put_task(const T &t) override { + return try_put_task_impl(t __TBB_FLOW_GRAPH_METAINFO_ARG(message_metainfo{})); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + graph_task* try_put_task(const T& t, const message_metainfo& metainfo) override { + return try_put_task_impl(t, metainfo); + } +#endif + + graph& graph_reference() const override { + return my_graph; + } + +protected: + void reset_node( reset_flags f) override { + reservable_item_buffer::reset(); + // TODO: just clear structures + if (f&rf_clear_edges) { + my_successors.clear(); + } + forwarder_busy = false; + } +}; // buffer_node + +//! Forwards messages in FIFO order +template +class queue_node : public buffer_node { +protected: + typedef buffer_node base_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::buffer_operation queue_operation; + typedef queue_node class_type; + +private: + template friend class buffer_node; + + bool is_item_valid() { + return this->my_item_valid(this->my_head); + } + + void try_put_and_add_task(graph_task*& last_task) { + graph_task* new_task = this->my_successors.try_put_task(this->front() + __TBB_FLOW_GRAPH_METAINFO_ARG(this->front_metainfo())); + + if (new_task) { + // workaround for icc bug + graph& graph_ref = this->graph_reference(); + last_task = combine_tasks(graph_ref, last_task, new_task); + this->destroy_front(); + } + } + +protected: + void internal_forward_task(queue_operation *op) override { + this->internal_forward_task_impl(op, this); + } + + void internal_pop(queue_operation *op) override { + if ( this->my_reserved || !this->my_item_valid(this->my_head)){ + op->status.store(FAILED, std::memory_order_release); + } + else { +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + if (op->metainfo) { + this->pop_front(*(op->elem), *(op->metainfo)); + } else +#endif + { + this->pop_front(*(op->elem)); + } + op->status.store(SUCCEEDED, std::memory_order_release); + } + } + void internal_reserve(queue_operation *op) override { + if (this->my_reserved || !this->my_item_valid(this->my_head)) { + op->status.store(FAILED, std::memory_order_release); + } + else { +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + if (op->metainfo) { + this->reserve_front(*(op->elem), *(op->metainfo)); + } + else +#endif + { + this->reserve_front(*(op->elem)); + } + op->status.store(SUCCEEDED, std::memory_order_release); + } + } + void internal_consume(queue_operation *op) override { + this->consume_front(); + op->status.store(SUCCEEDED, std::memory_order_release); + } + +public: + typedef T input_type; + typedef T output_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef typename sender::successor_type successor_type; + + //! Constructor + __TBB_NOINLINE_SYM explicit queue_node( graph &g ) : base_type(g) { + fgt_node( CODEPTR(), FLOW_QUEUE_NODE, &(this->my_graph), + static_cast *>(this), + static_cast *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + queue_node( const node_set& nodes) : queue_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + //! Copy constructor + __TBB_NOINLINE_SYM queue_node( const queue_node& src) : base_type(src) { + fgt_node( CODEPTR(), FLOW_QUEUE_NODE, &(this->my_graph), + static_cast *>(this), + static_cast *>(this) ); + } + + +protected: + void reset_node( reset_flags f) override { + base_type::reset_node(f); + } +}; // queue_node + +//! Forwards messages in sequence order +template + __TBB_requires(std::copyable) +class sequencer_node : public queue_node { + function_body< T, size_t > *my_sequencer; + // my_sequencer should be a benign function and must be callable + // from a parallel context. Does this mean it needn't be reset? +public: + typedef T input_type; + typedef T output_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef typename sender::successor_type successor_type; + + //! Constructor + template< typename Sequencer > + __TBB_requires(sequencer) + __TBB_NOINLINE_SYM sequencer_node( graph &g, const Sequencer& s ) : queue_node(g), + my_sequencer(new function_body_leaf< T, size_t, Sequencer>(s) ) { + fgt_node( CODEPTR(), FLOW_SEQUENCER_NODE, &(this->my_graph), + static_cast *>(this), + static_cast *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_requires(sequencer) + sequencer_node( const node_set& nodes, const Sequencer& s) + : sequencer_node(nodes.graph_reference(), s) { + make_edges_in_order(nodes, *this); + } +#endif + + //! Copy constructor + __TBB_NOINLINE_SYM sequencer_node( const sequencer_node& src ) : queue_node(src), + my_sequencer( src.my_sequencer->clone() ) { + fgt_node( CODEPTR(), FLOW_SEQUENCER_NODE, &(this->my_graph), + static_cast *>(this), + static_cast *>(this) ); + } + + //! Destructor + ~sequencer_node() { delete my_sequencer; } + +protected: + typedef typename buffer_node::size_type size_type; + typedef typename buffer_node::buffer_operation sequencer_operation; + +private: + bool internal_push(sequencer_operation *op) override { + size_type tag = (*my_sequencer)(*(op->elem)); +#if !TBB_DEPRECATED_SEQUENCER_DUPLICATES + if (tag < this->my_head) { + // have already emitted a message with this tag + op->status.store(FAILED, std::memory_order_release); + return false; + } +#endif + // cannot modify this->my_tail now; the buffer would be inconsistent. + size_t new_tail = (tag+1 > this->my_tail) ? tag+1 : this->my_tail; + + if (this->size(new_tail) > this->capacity()) { + this->grow_my_array(this->size(new_tail)); + } + this->my_tail = new_tail; + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + __TBB_ASSERT(op->metainfo, nullptr); + bool place_item_result = this->place_item(tag, *(op->elem), *(op->metainfo)); + const op_stat res = place_item_result ? SUCCEEDED : FAILED; +#else + const op_stat res = this->place_item(tag, *(op->elem)) ? SUCCEEDED : FAILED; +#endif + op->status.store(res, std::memory_order_release); + return res ==SUCCEEDED; + } +}; // sequencer_node + +//! Forwards messages in priority order +template> +class priority_queue_node : public buffer_node { +public: + typedef T input_type; + typedef T output_type; + typedef buffer_node base_type; + typedef priority_queue_node class_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef typename sender::successor_type successor_type; + + //! Constructor + __TBB_NOINLINE_SYM explicit priority_queue_node( graph &g, const Compare& comp = Compare() ) + : buffer_node(g), compare(comp), mark(0) { + fgt_node( CODEPTR(), FLOW_PRIORITY_QUEUE_NODE, &(this->my_graph), + static_cast *>(this), + static_cast *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + priority_queue_node(const node_set& nodes, const Compare& comp = Compare()) + : priority_queue_node(nodes.graph_reference(), comp) { + make_edges_in_order(nodes, *this); + } +#endif + + //! Copy constructor + __TBB_NOINLINE_SYM priority_queue_node( const priority_queue_node &src ) + : buffer_node(src), mark(0) + { + fgt_node( CODEPTR(), FLOW_PRIORITY_QUEUE_NODE, &(this->my_graph), + static_cast *>(this), + static_cast *>(this) ); + } + +protected: + + void reset_node( reset_flags f) override { + mark = 0; + base_type::reset_node(f); + } + + typedef typename buffer_node::size_type size_type; + typedef typename buffer_node::item_type item_type; + typedef typename buffer_node::buffer_operation prio_operation; + + //! Tries to forward valid items to successors + void internal_forward_task(prio_operation *op) override { + this->internal_forward_task_impl(op, this); + } + + void handle_operations(prio_operation *op_list) override { + this->handle_operations_impl(op_list, this); + } + + bool internal_push(prio_operation *op) override { +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + __TBB_ASSERT(op->metainfo, nullptr); + prio_push(*(op->elem), *(op->metainfo)); +#else + prio_push(*(op->elem)); +#endif + op->status.store(SUCCEEDED, std::memory_order_release); + return true; + } + + void internal_pop(prio_operation *op) override { + // if empty or already reserved, don't pop + if ( this->my_reserved == true || this->my_tail == 0 ) { + op->status.store(FAILED, std::memory_order_release); + return; + } + + *(op->elem) = prio(); +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + if (op->metainfo) { + *(op->metainfo) = std::move(prio_metainfo()); + } +#endif + op->status.store(SUCCEEDED, std::memory_order_release); + prio_pop(); + + } + + // pops the highest-priority item, saves copy + void internal_reserve(prio_operation *op) override { + if (this->my_reserved == true || this->my_tail == 0) { + op->status.store(FAILED, std::memory_order_release); + return; + } + this->my_reserved = true; + *(op->elem) = prio(); +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + if (op->metainfo) { + *(op->metainfo) = std::move(prio_metainfo()); + reserved_metainfo = *(op->metainfo); + } +#endif + reserved_item = *(op->elem); + op->status.store(SUCCEEDED, std::memory_order_release); + prio_pop(); + } + + void internal_consume(prio_operation *op) override { + op->status.store(SUCCEEDED, std::memory_order_release); + this->my_reserved = false; + reserved_item = input_type(); +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + for (auto waiter : reserved_metainfo.waiters()) { + waiter->release(1); + } + + reserved_metainfo = message_metainfo{}; +#endif + } + + void internal_release(prio_operation *op) override { + op->status.store(SUCCEEDED, std::memory_order_release); + prio_push(reserved_item __TBB_FLOW_GRAPH_METAINFO_ARG(reserved_metainfo)); + this->my_reserved = false; + reserved_item = input_type(); +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + for (auto waiter : reserved_metainfo.waiters()) { + waiter->release(1); + } + + reserved_metainfo = message_metainfo{}; +#endif + } + +private: + template friend class buffer_node; + + void order() { + if (mark < this->my_tail) heapify(); + __TBB_ASSERT(mark == this->my_tail, "mark unequal after heapify"); + } + + bool is_item_valid() { + return this->my_tail > 0; + } + + void try_put_and_add_task(graph_task*& last_task) { + graph_task* new_task = this->my_successors.try_put_task(this->prio() + __TBB_FLOW_GRAPH_METAINFO_ARG(this->prio_metainfo())); + if (new_task) { + // workaround for icc bug + graph& graph_ref = this->graph_reference(); + last_task = combine_tasks(graph_ref, last_task, new_task); + prio_pop(); + } + } + +private: + Compare compare; + size_type mark; + + input_type reserved_item; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + message_metainfo reserved_metainfo; +#endif + + // in case a reheap has not been done after a push, check if the mark item is higher than the 0'th item + bool prio_use_tail() { + __TBB_ASSERT(mark <= this->my_tail, "mark outside bounds before test"); + return mark < this->my_tail && compare(this->get_my_item(0), this->get_my_item(this->my_tail - 1)); + } + + // prio_push: checks that the item will fit, expand array if necessary, put at end + void prio_push(const T &src __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& metainfo)) { + if ( this->my_tail >= this->my_array_size ) + this->grow_my_array( this->my_tail + 1 ); + (void) this->place_item(this->my_tail, src __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + ++(this->my_tail); + __TBB_ASSERT(mark < this->my_tail, "mark outside bounds after push"); + } + + // prio_pop: deletes highest priority item from the array, and if it is item + // 0, move last item to 0 and reheap. If end of array, just destroy and decrement tail + // and mark. Assumes the array has already been tested for emptiness; no failure. + void prio_pop() { + if (prio_use_tail()) { + // there are newly pushed elements; last one higher than top + // copy the data + this->destroy_item(this->my_tail-1); + --(this->my_tail); + __TBB_ASSERT(mark <= this->my_tail, "mark outside bounds after pop"); + return; + } + this->destroy_item(0); + if(this->my_tail > 1) { + // push the last element down heap + __TBB_ASSERT(this->my_item_valid(this->my_tail - 1), nullptr); + this->move_item(0,this->my_tail - 1); + } + --(this->my_tail); + if(mark > this->my_tail) --mark; + if (this->my_tail > 1) // don't reheap for heap of size 1 + reheap(); + __TBB_ASSERT(mark <= this->my_tail, "mark outside bounds after pop"); + } + + const T& prio() { + return this->get_my_item(prio_use_tail() ? this->my_tail-1 : 0); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + message_metainfo& prio_metainfo() { + return this->get_my_metainfo(prio_use_tail() ? this->my_tail-1 : 0); + } +#endif + + // turn array into heap + void heapify() { + if(this->my_tail == 0) { + mark = 0; + return; + } + if (!mark) mark = 1; + for (; markmy_tail; ++mark) { // for each unheaped element + size_type cur_pos = mark; + input_type to_place; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + message_metainfo metainfo; +#endif + this->fetch_item(mark, to_place __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + do { // push to_place up the heap + size_type parent = (cur_pos-1)>>1; + if (!compare(this->get_my_item(parent), to_place)) + break; + this->move_item(cur_pos, parent); + cur_pos = parent; + } while( cur_pos ); + this->place_item(cur_pos, to_place __TBB_FLOW_GRAPH_METAINFO_ARG(std::move(metainfo))); + } + } + + // otherwise heapified array with new root element; rearrange to heap + void reheap() { + size_type cur_pos=0, child=1; + while (child < mark) { + size_type target = child; + if (child+1get_my_item(child), + this->get_my_item(child+1))) + ++target; + // target now has the higher priority child + if (compare(this->get_my_item(target), + this->get_my_item(cur_pos))) + break; + // swap + this->swap_items(cur_pos, target); + cur_pos = target; + child = (cur_pos<<1)+1; + } + } +}; // priority_queue_node + +//! Forwards messages only if the threshold has not been reached +/** This node forwards items until its threshold is reached. + It contains no buffering. If the downstream node rejects, the + message is dropped. */ +template< typename T, typename DecrementType=continue_msg > +class limiter_node : public graph_node, public receiver< T >, public sender< T > { +public: + typedef T input_type; + typedef T output_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef typename sender::successor_type successor_type; + //TODO: There is a lack of predefined types for its controlling "decrementer" port. It should be fixed later. + +private: + size_t my_threshold; + size_t my_count; // number of successful puts + size_t my_tries; // number of active put attempts + size_t my_future_decrement; // number of active decrement + reservable_predecessor_cache< T, spin_mutex > my_predecessors; + spin_mutex my_mutex; + broadcast_cache< T > my_successors; + + //! The internal receiver< DecrementType > that adjusts the count + threshold_regulator< limiter_node, DecrementType > decrement; + + graph_task* decrement_counter( long long delta ) { + if ( delta > 0 && size_t(delta) > my_threshold ) { + delta = my_threshold; + } + + { + spin_mutex::scoped_lock lock(my_mutex); + if ( delta > 0 && size_t(delta) > my_count ) { + if( my_tries > 0 ) { + my_future_decrement += (size_t(delta) - my_count); + } + my_count = 0; + } + else if ( delta < 0 && size_t(-delta) > my_threshold - my_count ) { + my_count = my_threshold; + } + else { + my_count -= size_t(delta); // absolute value of delta is sufficiently small + } + __TBB_ASSERT(my_count <= my_threshold, "counter values are truncated to be inside the [0, threshold] interval"); + } + return forward_task(); + } + + // Let threshold_regulator call decrement_counter() + friend class threshold_regulator< limiter_node, DecrementType >; + + friend class forward_task_bypass< limiter_node >; + + bool check_conditions() { // always called under lock + return ( my_count + my_tries < my_threshold && !my_predecessors.empty() && !my_successors.empty() ); + } + + // only returns a valid task pointer or nullptr, never SUCCESSFULLY_ENQUEUED + graph_task* forward_task() { + input_type v; + graph_task* rval = nullptr; + bool reserved = false; + + { + spin_mutex::scoped_lock lock(my_mutex); + if ( check_conditions() ) + ++my_tries; + else + return nullptr; + } + + //SUCCESS + // if we can reserve and can put, we consume the reservation + // we increment the count and decrement the tries +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + message_metainfo metainfo; +#endif + if ( (my_predecessors.try_reserve(v __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo))) == true ) { + reserved = true; + if ( (rval = my_successors.try_put_task(v __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo))) != nullptr ) { + { + spin_mutex::scoped_lock lock(my_mutex); + ++my_count; + if ( my_future_decrement ) { + if ( my_count > my_future_decrement ) { + my_count -= my_future_decrement; + my_future_decrement = 0; + } + else { + my_future_decrement -= my_count; + my_count = 0; + } + } + --my_tries; + my_predecessors.try_consume(); + if ( check_conditions() ) { + if ( is_graph_active(this->my_graph) ) { + typedef forward_task_bypass> task_type; + d1::small_object_allocator allocator{}; + graph_task* rtask = allocator.new_object( my_graph, allocator, *this ); + spawn_in_graph_arena(graph_reference(), *rtask); + } + } + } + return rval; + } + } + //FAILURE + //if we can't reserve, we decrement the tries + //if we can reserve but can't put, we decrement the tries and release the reservation + { + spin_mutex::scoped_lock lock(my_mutex); + --my_tries; + if (reserved) my_predecessors.try_release(); + if ( check_conditions() ) { + if ( is_graph_active(this->my_graph) ) { + d1::small_object_allocator allocator{}; + typedef forward_task_bypass> task_type; + graph_task* t = allocator.new_object(my_graph, allocator, *this); + __TBB_ASSERT(!rval, "Have two tasks to handle"); + return t; + } + } + return rval; + } + } + + void initialize() { + fgt_node( + CODEPTR(), FLOW_LIMITER_NODE, &this->my_graph, + static_cast *>(this), static_cast *>(&decrement), + static_cast *>(this) + ); + } + +public: + //! Constructor + limiter_node(graph &g, size_t threshold) + : graph_node(g), my_threshold(threshold), my_count(0), my_tries(0), my_future_decrement(0), + my_predecessors(this), my_successors(this), decrement(this) + { + initialize(); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + limiter_node(const node_set& nodes, size_t threshold) + : limiter_node(nodes.graph_reference(), threshold) { + make_edges_in_order(nodes, *this); + } +#endif + + //! Copy constructor + limiter_node( const limiter_node& src ) : limiter_node(src.my_graph, src.my_threshold) {} + + //! The interface for accessing internal receiver< DecrementType > that adjusts the count + receiver& decrementer() { return decrement; } + + //! Replace the current successor with this new successor + bool register_successor( successor_type &r ) override { + spin_mutex::scoped_lock lock(my_mutex); + bool was_empty = my_successors.empty(); + my_successors.register_successor(r); + //spawn a forward task if this is the only successor + if ( was_empty && !my_predecessors.empty() && my_count + my_tries < my_threshold ) { + if ( is_graph_active(this->my_graph) ) { + d1::small_object_allocator allocator{}; + typedef forward_task_bypass> task_type; + graph_task* t = allocator.new_object(my_graph, allocator, *this); + spawn_in_graph_arena(graph_reference(), *t); + } + } + return true; + } + + //! Removes a successor from this node + /** r.remove_predecessor(*this) is also called. */ + bool remove_successor( successor_type &r ) override { + // TODO revamp: investigate why qualification is needed for remove_predecessor() call + tbb::detail::d2::remove_predecessor(r, *this); + my_successors.remove_successor(r); + return true; + } + + //! Adds src to the list of cached predecessors. + bool register_predecessor( predecessor_type &src ) override { + spin_mutex::scoped_lock lock(my_mutex); + my_predecessors.add( src ); + if ( my_count + my_tries < my_threshold && !my_successors.empty() && is_graph_active(this->my_graph) ) { + d1::small_object_allocator allocator{}; + typedef forward_task_bypass> task_type; + graph_task* t = allocator.new_object(my_graph, allocator, *this); + spawn_in_graph_arena(graph_reference(), *t); + } + return true; + } + + //! Removes src from the list of cached predecessors. + bool remove_predecessor( predecessor_type &src ) override { + my_predecessors.remove( src ); + return true; + } + +protected: + + template< typename R, typename B > friend class run_and_put_task; + template friend class broadcast_cache; + template friend class round_robin_cache; + +private: + //! Puts an item to this receiver + graph_task* try_put_task_impl( const T &t __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& metainfo) ) { + { + spin_mutex::scoped_lock lock(my_mutex); + if ( my_count + my_tries >= my_threshold ) + return nullptr; + else + ++my_tries; + } + + graph_task* rtask = my_successors.try_put_task(t __TBB_FLOW_GRAPH_METAINFO_ARG(metainfo)); + if ( !rtask ) { // try_put_task failed. + spin_mutex::scoped_lock lock(my_mutex); + --my_tries; + if (check_conditions() && is_graph_active(this->my_graph)) { + d1::small_object_allocator allocator{}; + typedef forward_task_bypass> task_type; + rtask = allocator.new_object(my_graph, allocator, *this); + } + } + else { + spin_mutex::scoped_lock lock(my_mutex); + ++my_count; + if ( my_future_decrement ) { + if ( my_count > my_future_decrement ) { + my_count -= my_future_decrement; + my_future_decrement = 0; + } + else { + my_future_decrement -= my_count; + my_count = 0; + } + } + --my_tries; + } + return rtask; + } + +protected: + graph_task* try_put_task(const T& t) override { + return try_put_task_impl(t __TBB_FLOW_GRAPH_METAINFO_ARG(message_metainfo{})); + } +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + graph_task* try_put_task(const T& t, const message_metainfo& metainfo) override { + return try_put_task_impl(t, metainfo); + } +#endif + + graph& graph_reference() const override { return my_graph; } + + void reset_node( reset_flags f ) override { + my_count = 0; + if ( f & rf_clear_edges ) { + my_predecessors.clear(); + my_successors.clear(); + } + else { + my_predecessors.reset(); + } + decrement.reset_receiver(f); + } +}; // limiter_node + +#include "detail/_flow_graph_join_impl.h" + +template class join_node; + +template +class join_node: public unfolded_join_node::value, reserving_port, OutputTuple, reserving> { +private: + static const int N = std::tuple_size::value; + typedef unfolded_join_node unfolded_type; +public: + typedef OutputTuple output_type; + typedef typename unfolded_type::input_ports_type input_ports_type; + __TBB_NOINLINE_SYM explicit join_node(graph &g) : unfolded_type(g) { + fgt_multiinput_node( CODEPTR(), FLOW_JOIN_NODE_RESERVING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_NOINLINE_SYM join_node(const node_set& nodes, reserving = reserving()) : join_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + __TBB_NOINLINE_SYM join_node(const join_node &other) : unfolded_type(other) { + fgt_multiinput_node( CODEPTR(), FLOW_JOIN_NODE_RESERVING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +}; + +template +class join_node: public unfolded_join_node::value, queueing_port, OutputTuple, queueing> { +private: + static const int N = std::tuple_size::value; + typedef unfolded_join_node unfolded_type; +public: + typedef OutputTuple output_type; + typedef typename unfolded_type::input_ports_type input_ports_type; + __TBB_NOINLINE_SYM explicit join_node(graph &g) : unfolded_type(g) { + fgt_multiinput_node( CODEPTR(), FLOW_JOIN_NODE_QUEUEING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_NOINLINE_SYM join_node(const node_set& nodes, queueing = queueing()) : join_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + __TBB_NOINLINE_SYM join_node(const join_node &other) : unfolded_type(other) { + fgt_multiinput_node( CODEPTR(), FLOW_JOIN_NODE_QUEUEING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +}; + +#if __TBB_CPP20_CONCEPTS_PRESENT +// Helper function which is well-formed only if all of the elements in OutputTuple +// satisfies join_node_function_object +template +void join_node_function_objects_helper( std::index_sequence ) + requires (std::tuple_size_v == sizeof...(Functions)) && + (... && join_node_function_object, K>); + +template +concept join_node_functions = requires { + join_node_function_objects_helper(std::make_index_sequence{}); +}; + +#endif + +// template for key_matching join_node +// tag_matching join_node is a specialization of key_matching, and is source-compatible. +template +class join_node > : public unfolded_join_node::value, + key_matching_port, OutputTuple, key_matching > { +private: + static const int N = std::tuple_size::value; + typedef unfolded_join_node > unfolded_type; +public: + typedef OutputTuple output_type; + typedef typename unfolded_type::input_ports_type input_ports_type; + +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + join_node(graph &g) : unfolded_type(g) {} +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + + template + __TBB_requires(join_node_functions) + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1) : unfolded_type(g, b0, b1) { + fgt_multiinput_node( CODEPTR(), FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + template + __TBB_requires(join_node_functions) + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2) : unfolded_type(g, b0, b1, b2) { + fgt_multiinput_node( CODEPTR(), FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + template + __TBB_requires(join_node_functions) + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3) : unfolded_type(g, b0, b1, b2, b3) { + fgt_multiinput_node( CODEPTR(), FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + template + __TBB_requires(join_node_functions) + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4) : + unfolded_type(g, b0, b1, b2, b3, b4) { + fgt_multiinput_node( CODEPTR(), FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } +#if __TBB_VARIADIC_MAX >= 6 + template + __TBB_requires(join_node_functions) + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5) : + unfolded_type(g, b0, b1, b2, b3, b4, b5) { + fgt_multiinput_node( CODEPTR(), FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } +#endif +#if __TBB_VARIADIC_MAX >= 7 + template + __TBB_requires(join_node_functions) + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6) : + unfolded_type(g, b0, b1, b2, b3, b4, b5, b6) { + fgt_multiinput_node( CODEPTR(), FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } +#endif +#if __TBB_VARIADIC_MAX >= 8 + template + __TBB_requires(join_node_functions) + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6, + __TBB_B7 b7) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7) { + fgt_multiinput_node( CODEPTR(), FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } +#endif +#if __TBB_VARIADIC_MAX >= 9 + template + __TBB_requires(join_node_functions) + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6, + __TBB_B7 b7, __TBB_B8 b8) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7, b8) { + fgt_multiinput_node( CODEPTR(), FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } +#endif +#if __TBB_VARIADIC_MAX >= 10 + template + __TBB_requires(join_node_functions) + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6, + __TBB_B7 b7, __TBB_B8 b8, __TBB_B9 b9) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9) { + fgt_multiinput_node( CODEPTR(), FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } +#endif + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template < +#if (__clang_major__ == 3 && __clang_minor__ == 4) + // clang 3.4 misdeduces 'Args...' for 'node_set' while it can cope with template template parameter. + template class node_set, +#endif + typename... Args, typename... Bodies + > + __TBB_requires((sizeof...(Bodies) == 0) || join_node_functions) + __TBB_NOINLINE_SYM join_node(const node_set& nodes, Bodies... bodies) + : join_node(nodes.graph_reference(), bodies...) { + make_edges_in_order(nodes, *this); + } +#endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + + __TBB_NOINLINE_SYM join_node(const join_node &other) : unfolded_type(other) { + fgt_multiinput_node( CODEPTR(), FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +}; + +// indexer node +#include "detail/_flow_graph_indexer_impl.h" + +// TODO: Implement interface with variadic template or tuple +template class indexer_node; + +//indexer node specializations +template +class indexer_node : public unfolded_indexer_node > { +private: + static const int N = 1; +public: + typedef std::tuple InputTuple; + typedef tagged_msg output_type; + typedef unfolded_indexer_node unfolded_type; + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + fgt_multiinput_node( CODEPTR(), FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + fgt_multiinput_node( CODEPTR(), FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } +}; + +template +class indexer_node : public unfolded_indexer_node > { +private: + static const int N = 2; +public: + typedef std::tuple InputTuple; + typedef tagged_msg output_type; + typedef unfolded_indexer_node unfolded_type; + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + fgt_multiinput_node( CODEPTR(), FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + fgt_multiinput_node( CODEPTR(), FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +}; + +template +class indexer_node : public unfolded_indexer_node > { +private: + static const int N = 3; +public: + typedef std::tuple InputTuple; + typedef tagged_msg output_type; + typedef unfolded_indexer_node unfolded_type; + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + fgt_multiinput_node( CODEPTR(), FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + fgt_multiinput_node( CODEPTR(), FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +}; + +template +class indexer_node : public unfolded_indexer_node > { +private: + static const int N = 4; +public: + typedef std::tuple InputTuple; + typedef tagged_msg output_type; + typedef unfolded_indexer_node unfolded_type; + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + fgt_multiinput_node( CODEPTR(), FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + fgt_multiinput_node( CODEPTR(), FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +}; + +template +class indexer_node : public unfolded_indexer_node > { +private: + static const int N = 5; +public: + typedef std::tuple InputTuple; + typedef tagged_msg output_type; + typedef unfolded_indexer_node unfolded_type; + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + fgt_multiinput_node( CODEPTR(), FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + fgt_multiinput_node( CODEPTR(), FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +}; + +#if __TBB_VARIADIC_MAX >= 6 +template +class indexer_node : public unfolded_indexer_node > { +private: + static const int N = 6; +public: + typedef std::tuple InputTuple; + typedef tagged_msg output_type; + typedef unfolded_indexer_node unfolded_type; + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + fgt_multiinput_node( CODEPTR(), FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + fgt_multiinput_node( CODEPTR(), FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +}; +#endif //variadic max 6 + +#if __TBB_VARIADIC_MAX >= 7 +template +class indexer_node : public unfolded_indexer_node > { +private: + static const int N = 7; +public: + typedef std::tuple InputTuple; + typedef tagged_msg output_type; + typedef unfolded_indexer_node unfolded_type; + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + fgt_multiinput_node( CODEPTR(), FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + fgt_multiinput_node( CODEPTR(), FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +}; +#endif //variadic max 7 + +#if __TBB_VARIADIC_MAX >= 8 +template +class indexer_node : public unfolded_indexer_node > { +private: + static const int N = 8; +public: + typedef std::tuple InputTuple; + typedef tagged_msg output_type; + typedef unfolded_indexer_node unfolded_type; + indexer_node(graph& g) : unfolded_type(g) { + fgt_multiinput_node( CODEPTR(), FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + indexer_node( const indexer_node& other ) : unfolded_type(other) { + fgt_multiinput_node( CODEPTR(), FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +}; +#endif //variadic max 8 + +#if __TBB_VARIADIC_MAX >= 9 +template +class indexer_node : public unfolded_indexer_node > { +private: + static const int N = 9; +public: + typedef std::tuple InputTuple; + typedef tagged_msg output_type; + typedef unfolded_indexer_node unfolded_type; + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + fgt_multiinput_node( CODEPTR(), FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + fgt_multiinput_node( CODEPTR(), FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +}; +#endif //variadic max 9 + +#if __TBB_VARIADIC_MAX >= 10 +template +class indexer_node/*default*/ : public unfolded_indexer_node > { +private: + static const int N = 10; +public: + typedef std::tuple InputTuple; + typedef tagged_msg output_type; + typedef unfolded_indexer_node unfolded_type; + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + fgt_multiinput_node( CODEPTR(), FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + fgt_multiinput_node( CODEPTR(), FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +}; +#endif //variadic max 10 + +template< typename T > +inline void internal_make_edge( sender &p, receiver &s ) { + register_successor(p, s); + fgt_make_edge( &p, &s ); +} + +//! Makes an edge between a single predecessor and a single successor +template< typename T > +inline void make_edge( sender &p, receiver &s ) { + internal_make_edge( p, s ); +} + +//Makes an edge from port 0 of a multi-output predecessor to port 0 of a multi-input successor. +template< typename T, typename V, + typename = typename T::output_ports_type, typename = typename V::input_ports_type > +inline void make_edge( T& output, V& input) { + make_edge(std::get<0>(output.output_ports()), std::get<0>(input.input_ports())); +} + +//Makes an edge from port 0 of a multi-output predecessor to a receiver. +template< typename T, typename R, + typename = typename T::output_ports_type > +inline void make_edge( T& output, receiver& input) { + make_edge(std::get<0>(output.output_ports()), input); +} + +//Makes an edge from a sender to port 0 of a multi-input successor. +template< typename S, typename V, + typename = typename V::input_ports_type > +inline void make_edge( sender& output, V& input) { + make_edge(output, std::get<0>(input.input_ports())); +} + +template< typename T > +inline void internal_remove_edge( sender &p, receiver &s ) { + remove_successor( p, s ); + fgt_remove_edge( &p, &s ); +} + +//! Removes an edge between a single predecessor and a single successor +template< typename T > +inline void remove_edge( sender &p, receiver &s ) { + internal_remove_edge( p, s ); +} + +//Removes an edge between port 0 of a multi-output predecessor and port 0 of a multi-input successor. +template< typename T, typename V, + typename = typename T::output_ports_type, typename = typename V::input_ports_type > +inline void remove_edge( T& output, V& input) { + remove_edge(std::get<0>(output.output_ports()), std::get<0>(input.input_ports())); +} + +//Removes an edge between port 0 of a multi-output predecessor and a receiver. +template< typename T, typename R, + typename = typename T::output_ports_type > +inline void remove_edge( T& output, receiver& input) { + remove_edge(std::get<0>(output.output_ports()), input); +} +//Removes an edge between a sender and port 0 of a multi-input successor. +template< typename S, typename V, + typename = typename V::input_ports_type > +inline void remove_edge( sender& output, V& input) { + remove_edge(output, std::get<0>(input.input_ports())); +} + +//! Returns a copy of the body from a function or continue node +template< typename Body, typename Node > +Body copy_body( Node &n ) { + return n.template copy_function_object(); +} + +//composite_node +template< typename InputTuple, typename OutputTuple > class composite_node; + +template< typename... InputTypes, typename... OutputTypes> +class composite_node , std::tuple > : public graph_node { + +public: + typedef std::tuple< receiver&... > input_ports_type; + typedef std::tuple< sender&... > output_ports_type; + +private: + std::unique_ptr my_input_ports; + std::unique_ptr my_output_ports; + + static const size_t NUM_INPUTS = sizeof...(InputTypes); + static const size_t NUM_OUTPUTS = sizeof...(OutputTypes); + +protected: + void reset_node(reset_flags) override {} + +public: + composite_node( graph &g ) : graph_node(g) { + fgt_multiinput_multioutput_node( CODEPTR(), FLOW_COMPOSITE_NODE, this, &this->my_graph ); + } + + template + void set_external_ports(T1&& input_ports_tuple, T2&& output_ports_tuple) { + static_assert(NUM_INPUTS == std::tuple_size::value, "number of arguments does not match number of input ports"); + static_assert(NUM_OUTPUTS == std::tuple_size::value, "number of arguments does not match number of output ports"); + + fgt_internal_input_alias_helper::alias_port( this, input_ports_tuple); + fgt_internal_output_alias_helper::alias_port( this, output_ports_tuple); + + my_input_ports.reset( new input_ports_type(std::forward(input_ports_tuple)) ); + my_output_ports.reset( new output_ports_type(std::forward(output_ports_tuple)) ); + } + + template< typename... NodeTypes > + void add_visible_nodes(const NodeTypes&... n) { add_nodes_impl(this, true, n...); } + + template< typename... NodeTypes > + void add_nodes(const NodeTypes&... n) { add_nodes_impl(this, false, n...); } + + + input_ports_type& input_ports() { + __TBB_ASSERT(my_input_ports, "input ports not set, call set_external_ports to set input ports"); + return *my_input_ports; + } + + output_ports_type& output_ports() { + __TBB_ASSERT(my_output_ports, "output ports not set, call set_external_ports to set output ports"); + return *my_output_ports; + } +}; // class composite_node + +//composite_node with only input ports +template< typename... InputTypes> +class composite_node , std::tuple<> > : public graph_node { +public: + typedef std::tuple< receiver&... > input_ports_type; + +private: + std::unique_ptr my_input_ports; + static const size_t NUM_INPUTS = sizeof...(InputTypes); + +protected: + void reset_node(reset_flags) override {} + +public: + composite_node( graph &g ) : graph_node(g) { + fgt_composite( CODEPTR(), this, &g ); + } + + template + void set_external_ports(T&& input_ports_tuple) { + static_assert(NUM_INPUTS == std::tuple_size::value, "number of arguments does not match number of input ports"); + + fgt_internal_input_alias_helper::alias_port( this, input_ports_tuple); + + my_input_ports.reset( new input_ports_type(std::forward(input_ports_tuple)) ); + } + + template< typename... NodeTypes > + void add_visible_nodes(const NodeTypes&... n) { add_nodes_impl(this, true, n...); } + + template< typename... NodeTypes > + void add_nodes( const NodeTypes&... n) { add_nodes_impl(this, false, n...); } + + + input_ports_type& input_ports() { + __TBB_ASSERT(my_input_ports, "input ports not set, call set_external_ports to set input ports"); + return *my_input_ports; + } + +}; // class composite_node + +//composite_nodes with only output_ports +template +class composite_node , std::tuple > : public graph_node { +public: + typedef std::tuple< sender&... > output_ports_type; + +private: + std::unique_ptr my_output_ports; + static const size_t NUM_OUTPUTS = sizeof...(OutputTypes); + +protected: + void reset_node(reset_flags) override {} + +public: + __TBB_NOINLINE_SYM composite_node( graph &g ) : graph_node(g) { + fgt_composite( CODEPTR(), this, &g ); + } + + template + void set_external_ports(T&& output_ports_tuple) { + static_assert(NUM_OUTPUTS == std::tuple_size::value, "number of arguments does not match number of output ports"); + + fgt_internal_output_alias_helper::alias_port( this, output_ports_tuple); + + my_output_ports.reset( new output_ports_type(std::forward(output_ports_tuple)) ); + } + + template + void add_visible_nodes(const NodeTypes&... n) { add_nodes_impl(this, true, n...); } + + template + void add_nodes(const NodeTypes&... n) { add_nodes_impl(this, false, n...); } + + + output_ports_type& output_ports() { + __TBB_ASSERT(my_output_ports, "output ports not set, call set_external_ports to set output ports"); + return *my_output_ports; + } + +}; // class composite_node + +template +class async_body_base: no_assign { +public: + typedef Gateway gateway_type; + + async_body_base(gateway_type *gateway): my_gateway(gateway) { } + void set_gateway(gateway_type *gateway) { + my_gateway = gateway; + } + +protected: + gateway_type *my_gateway; +}; + +template +class async_body: public async_body_base { +private: + Body my_body; + +public: + typedef async_body_base base_type; + typedef Gateway gateway_type; + + async_body(const Body &body, gateway_type *gateway) + : base_type(gateway), my_body(body) { } + + void operator()( const Input &v, Ports & ) noexcept(noexcept(tbb::detail::invoke(my_body, v, std::declval()))) { + tbb::detail::invoke(my_body, v, *this->my_gateway); + } + + Body get_body() { return my_body; } +}; + +//! Implements async node +template < typename Input, typename Output, + typename Policy = queueing_lightweight > + __TBB_requires(std::default_initializable && std::copy_constructible) +class async_node + : public multifunction_node< Input, std::tuple< Output >, Policy >, public sender< Output > +{ + typedef multifunction_node< Input, std::tuple< Output >, Policy > base_type; + typedef multifunction_input< + Input, typename base_type::output_ports_type, Policy, cache_aligned_allocator> mfn_input_type; + +public: + typedef Input input_type; + typedef Output output_type; + typedef receiver receiver_type; + typedef receiver successor_type; + typedef sender predecessor_type; + typedef receiver_gateway gateway_type; + typedef async_body_base async_body_base_type; + typedef typename base_type::output_ports_type output_ports_type; + +private: + class receiver_gateway_impl: public receiver_gateway { + public: + receiver_gateway_impl(async_node* node): my_node(node) {} + void reserve_wait() override { + fgt_async_reserve(static_cast(my_node), &my_node->my_graph); + my_node->my_graph.reserve_wait(); + } + + void release_wait() override { + async_node* n = my_node; + graph* g = &n->my_graph; + g->release_wait(); + fgt_async_commit(static_cast(n), g); + } + + //! Implements gateway_type::try_put for an external activity to submit a message to FG + bool try_put(const Output &i) override { + return my_node->try_put_impl(i); + } + + private: + async_node* my_node; + } my_gateway; + + //The substitute of 'this' for member construction, to prevent compiler warnings + async_node* self() { return this; } + + //! Implements gateway_type::try_put for an external activity to submit a message to FG + bool try_put_impl(const Output &i) { + multifunction_output &port_0 = output_port<0>(*this); + broadcast_cache& port_successors = port_0.successors(); + fgt_async_try_put_begin(this, &port_0); + // TODO revamp: change to std::list + graph_task_list tasks; + bool is_at_least_one_put_successful = port_successors.gather_successful_try_puts(i, tasks); + __TBB_ASSERT( is_at_least_one_put_successful || tasks.empty(), + "Return status is inconsistent with the method operation." ); + + while( !tasks.empty() ) { + enqueue_in_graph_arena(this->my_graph, tasks.pop_front()); + } + fgt_async_try_put_end(this, &port_0); + return is_at_least_one_put_successful; + } + +public: + template + __TBB_requires(async_node_body) + __TBB_NOINLINE_SYM async_node( + graph &g, size_t concurrency, + Body body, Policy = Policy(), node_priority_t a_priority = no_priority + ) : base_type( + g, concurrency, + async_body + (body, &my_gateway), a_priority ), my_gateway(self()) { + fgt_multioutput_node_with_body<1>( + CODEPTR(), FLOW_ASYNC_NODE, + &this->my_graph, static_cast *>(this), + this->output_ports(), this->my_body + ); + } + + template + __TBB_requires(async_node_body) + __TBB_NOINLINE_SYM async_node(graph& g, size_t concurrency, Body body, node_priority_t a_priority) + : async_node(g, concurrency, body, Policy(), a_priority) {} + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_requires(async_node_body) + __TBB_NOINLINE_SYM async_node( + const node_set& nodes, size_t concurrency, Body body, + Policy = Policy(), node_priority_t a_priority = no_priority ) + : async_node(nodes.graph_reference(), concurrency, body, a_priority) { + make_edges_in_order(nodes, *this); + } + + template + __TBB_requires(async_node_body) + __TBB_NOINLINE_SYM async_node(const node_set& nodes, size_t concurrency, Body body, node_priority_t a_priority) + : async_node(nodes, concurrency, body, Policy(), a_priority) {} +#endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + + __TBB_NOINLINE_SYM async_node( const async_node &other ) : base_type(other), sender(), my_gateway(self()) { + static_cast(this->my_body->get_body_ptr())->set_gateway(&my_gateway); + static_cast(this->my_init_body->get_body_ptr())->set_gateway(&my_gateway); + + fgt_multioutput_node_with_body<1>( CODEPTR(), FLOW_ASYNC_NODE, + &this->my_graph, static_cast *>(this), + this->output_ports(), this->my_body ); + } + + gateway_type& gateway() { + return my_gateway; + } + + // Define sender< Output > + + //! Add a new successor to this node + bool register_successor(successor_type&) override { + __TBB_ASSERT(false, "Successors must be registered only via ports"); + return false; + } + + //! Removes a successor from this node + bool remove_successor(successor_type&) override { + __TBB_ASSERT(false, "Successors must be removed only via ports"); + return false; + } + + template + Body copy_function_object() { + typedef multifunction_body mfn_body_type; + typedef async_body async_body_type; + mfn_body_type &body_ref = *this->my_body; + async_body_type ab = *static_cast(dynamic_cast< multifunction_body_leaf & >(body_ref).get_body_ptr()); + return ab.get_body(); + } + +protected: + + void reset_node( reset_flags f) override { + base_type::reset_node(f); + } +}; + +#include "detail/_flow_graph_node_set_impl.h" + +template< typename T > +class overwrite_node : public graph_node, public receiver, public sender { +public: + typedef T input_type; + typedef T output_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef typename sender::successor_type successor_type; + + __TBB_NOINLINE_SYM explicit overwrite_node(graph &g) + : graph_node(g), my_successors(this), my_buffer_is_valid(false) + { + fgt_node( CODEPTR(), FLOW_OVERWRITE_NODE, &this->my_graph, + static_cast *>(this), static_cast *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + overwrite_node(const node_set& nodes) : overwrite_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + //! Copy constructor; doesn't take anything from src; default won't work + __TBB_NOINLINE_SYM overwrite_node( const overwrite_node& src ) : overwrite_node(src.my_graph) {} + + ~overwrite_node() {} + + bool register_successor( successor_type &s ) override { + spin_mutex::scoped_lock l( my_mutex ); + if (my_buffer_is_valid && is_graph_active( my_graph )) { + // We have a valid value that must be forwarded immediately. + bool ret = s.try_put( my_buffer ); + if ( ret ) { + // We add the successor that accepted our put + my_successors.register_successor( s ); + } else { + // In case of reservation a race between the moment of reservation and register_successor can appear, + // because failed reserve does not mean that register_successor is not ready to put a message immediately. + // We have some sort of infinite loop: reserving node tries to set pull state for the edge, + // but overwrite_node tries to return push state back. That is why we have to break this loop with task creation. + d1::small_object_allocator allocator{}; + typedef register_predecessor_task task_type; + graph_task* t = allocator.new_object(graph_reference(), allocator, *this, s); + spawn_in_graph_arena( my_graph, *t ); + } + } else { + // No valid value yet, just add as successor + my_successors.register_successor( s ); + } + return true; + } + + bool remove_successor( successor_type &s ) override { + spin_mutex::scoped_lock l( my_mutex ); + my_successors.remove_successor(s); + return true; + } + + bool try_get( input_type &v ) override { + spin_mutex::scoped_lock l( my_mutex ); + if ( my_buffer_is_valid ) { + v = my_buffer; + return true; + } + return false; + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + bool try_get( input_type &v, message_metainfo& metainfo ) override { + spin_mutex::scoped_lock l( my_mutex ); + if (my_buffer_is_valid) { + v = my_buffer; + metainfo = my_buffered_metainfo; + + // Since the successor of the node will use move semantics while wrapping the metainfo + // that is designed to transfer the ownership of the value from single-push buffer to the task + // It is required to reserve one more reference here because the value keeps in the buffer + // and the ownership is not transferred + for (auto msg_waiter : metainfo.waiters()) { + msg_waiter->reserve(1); + } + return true; + } + return false; + } +#endif + + //! Reserves an item + bool try_reserve( T &v ) override { + return try_get(v); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT +private: + bool try_reserve(T& v, message_metainfo& metainfo) override { + spin_mutex::scoped_lock l( my_mutex ); + if (my_buffer_is_valid) { + v = my_buffer; + metainfo = my_buffered_metainfo; + return true; + } + return false; + } +public: +#endif + + //! Releases the reserved item + bool try_release() override { return true; } + + //! Consumes the reserved item + bool try_consume() override { return true; } + + bool is_valid() { + spin_mutex::scoped_lock l( my_mutex ); + return my_buffer_is_valid; + } + + void clear() { + spin_mutex::scoped_lock l( my_mutex ); + my_buffer_is_valid = false; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + for (auto msg_waiter : my_buffered_metainfo.waiters()) { + msg_waiter->release(1); + } + my_buffered_metainfo = message_metainfo{}; +#endif + } + +protected: + + template< typename R, typename B > friend class run_and_put_task; + template friend class broadcast_cache; + template friend class round_robin_cache; + graph_task* try_put_task( const input_type &v ) override { + spin_mutex::scoped_lock l( my_mutex ); + return try_put_task_impl(v __TBB_FLOW_GRAPH_METAINFO_ARG(message_metainfo{})); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + graph_task* try_put_task(const input_type& v, const message_metainfo& metainfo) override { + spin_mutex::scoped_lock l( my_mutex ); + return try_put_task_impl(v, metainfo); + } +#endif + + graph_task * try_put_task_impl(const input_type &v __TBB_FLOW_GRAPH_METAINFO_ARG(const message_metainfo& metainfo)) { + my_buffer = v; + my_buffer_is_valid = true; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + // Since the new item is pushed to the buffer - reserving the waiters + for (auto msg_waiter : metainfo.waiters()) { + msg_waiter->reserve(1); + } + + // Since the item is taken out from the buffer - releasing the stored waiters + for (auto msg_waiter : my_buffered_metainfo.waiters()) { + msg_waiter->release(1); + } + + my_buffered_metainfo = metainfo; +#endif + graph_task* rtask = my_successors.try_put_task(v __TBB_FLOW_GRAPH_METAINFO_ARG(my_buffered_metainfo) ); + if (!rtask) rtask = SUCCESSFULLY_ENQUEUED; + return rtask; + } + + graph& graph_reference() const override { + return my_graph; + } + + //! Breaks an infinite loop between the node reservation and register_successor call + struct register_predecessor_task : public graph_task { + register_predecessor_task( + graph& g, d1::small_object_allocator& allocator, predecessor_type& owner, successor_type& succ) + : graph_task(g, allocator), o(owner), s(succ) {}; + + d1::task* execute(d1::execution_data& ed) override { + // TODO revamp: investigate why qualification is needed for register_successor() call + using tbb::detail::d2::register_predecessor; + using tbb::detail::d2::register_successor; + if ( !register_predecessor(s, o) ) { + register_successor(o, s); + } + finalize(ed); + return nullptr; + } + + d1::task* cancel(d1::execution_data& ed) override { + finalize(ed); + return nullptr; + } + + predecessor_type& o; + successor_type& s; + }; + + spin_mutex my_mutex; + broadcast_cache< input_type, null_rw_mutex > my_successors; + input_type my_buffer; +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + message_metainfo my_buffered_metainfo; +#endif + bool my_buffer_is_valid; + + void reset_node( reset_flags f) override { + my_buffer_is_valid = false; + if (f&rf_clear_edges) { + my_successors.clear(); + } + } +}; // overwrite_node + +template< typename T > +class write_once_node : public overwrite_node { +public: + typedef T input_type; + typedef T output_type; + typedef overwrite_node base_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef typename sender::successor_type successor_type; + + //! Constructor + __TBB_NOINLINE_SYM explicit write_once_node(graph& g) : base_type(g) { + fgt_node( CODEPTR(), FLOW_WRITE_ONCE_NODE, &(this->my_graph), + static_cast *>(this), + static_cast *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + write_once_node(const node_set& nodes) : write_once_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + //! Copy constructor: call base class copy constructor + __TBB_NOINLINE_SYM write_once_node( const write_once_node& src ) : base_type(src) { + fgt_node( CODEPTR(), FLOW_WRITE_ONCE_NODE, &(this->my_graph), + static_cast *>(this), + static_cast *>(this) ); + } + +protected: + template< typename R, typename B > friend class run_and_put_task; + template friend class broadcast_cache; + template friend class round_robin_cache; + graph_task *try_put_task( const T &v ) override { + spin_mutex::scoped_lock l( this->my_mutex ); + return this->my_buffer_is_valid ? nullptr : this->try_put_task_impl(v __TBB_FLOW_GRAPH_METAINFO_ARG(message_metainfo{})); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_TRY_PUT_AND_WAIT + graph_task* try_put_task(const T& v, const message_metainfo& metainfo) override { + spin_mutex::scoped_lock l( this->my_mutex ); + return this->my_buffer_is_valid ? nullptr : this->try_put_task_impl(v, metainfo); + } +#endif +}; // write_once_node + +inline void set_name(const graph& g, const char *name) { + fgt_graph_desc(&g, name); +} + +template +inline void set_name(const input_node& node, const char *name) { + fgt_node_desc(&node, name); +} + +template +inline void set_name(const function_node& node, const char *name) { + fgt_node_desc(&node, name); +} + +template +inline void set_name(const continue_node& node, const char *name) { + fgt_node_desc(&node, name); +} + +template +inline void set_name(const broadcast_node& node, const char *name) { + fgt_node_desc(&node, name); +} + +template +inline void set_name(const buffer_node& node, const char *name) { + fgt_node_desc(&node, name); +} + +template +inline void set_name(const queue_node& node, const char *name) { + fgt_node_desc(&node, name); +} + +template +inline void set_name(const sequencer_node& node, const char *name) { + fgt_node_desc(&node, name); +} + +template +inline void set_name(const priority_queue_node& node, const char *name) { + fgt_node_desc(&node, name); +} + +template +inline void set_name(const limiter_node& node, const char *name) { + fgt_node_desc(&node, name); +} + +template +inline void set_name(const join_node& node, const char *name) { + fgt_node_desc(&node, name); +} + +template +inline void set_name(const indexer_node& node, const char *name) { + fgt_node_desc(&node, name); +} + +template +inline void set_name(const overwrite_node& node, const char *name) { + fgt_node_desc(&node, name); +} + +template +inline void set_name(const write_once_node& node, const char *name) { + fgt_node_desc(&node, name); +} + +template +inline void set_name(const multifunction_node& node, const char *name) { + fgt_multioutput_node_desc(&node, name); +} + +template +inline void set_name(const split_node& node, const char *name) { + fgt_multioutput_node_desc(&node, name); +} + +template< typename InputTuple, typename OutputTuple > +inline void set_name(const composite_node& node, const char *name) { + fgt_multiinput_multioutput_node_desc(&node, name); +} + +template +inline void set_name(const async_node& node, const char *name) +{ + fgt_multioutput_node_desc(&node, name); +} +} // d2 +} // detail +} // tbb + + +// Include deduction guides for node classes +#include "detail/_flow_graph_nodes_deduction.h" + +namespace tbb { +namespace flow { +inline namespace v1 { + using detail::d2::receiver; + using detail::d2::sender; + + using detail::d2::serial; + using detail::d2::unlimited; + + using detail::d2::reset_flags; + using detail::d2::rf_reset_protocol; + using detail::d2::rf_reset_bodies; + using detail::d2::rf_clear_edges; + + using detail::d2::graph; + using detail::d2::graph_node; + using detail::d2::continue_msg; + + using detail::d2::input_node; + using detail::d2::function_node; + using detail::d2::multifunction_node; + using detail::d2::split_node; + using detail::d2::output_port; + using detail::d2::indexer_node; + using detail::d2::tagged_msg; + using detail::d2::cast_to; + using detail::d2::is_a; + using detail::d2::continue_node; + using detail::d2::overwrite_node; + using detail::d2::write_once_node; + using detail::d2::broadcast_node; + using detail::d2::buffer_node; + using detail::d2::queue_node; + using detail::d2::sequencer_node; + using detail::d2::priority_queue_node; + using detail::d2::limiter_node; + using namespace detail::d2::graph_policy_namespace; + using detail::d2::join_node; + using detail::d2::input_port; + using detail::d2::copy_body; + using detail::d2::make_edge; + using detail::d2::remove_edge; + using detail::d2::tag_value; + using detail::d2::composite_node; + using detail::d2::async_node; + using detail::d2::node_priority_t; + using detail::d2::no_priority; + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + using detail::d2::follows; + using detail::d2::precedes; + using detail::d2::make_node_set; + using detail::d2::make_edges; +#endif + +} // v1 +} // flow + + using detail::d1::flow_control; + +namespace profiling { + using detail::d2::set_name; +} // profiling + +} // tbb + + +#if TBB_USE_PROFILING_TOOLS && ( __unix__ || __APPLE__ ) + // We don't do pragma pop here, since it still gives warning on the USER side + #undef __TBB_NOINLINE_SYM +#endif + +#endif // __TBB_flow_graph_H diff --git a/src/tbb/include/oneapi/tbb/flow_graph_abstractions.h b/src/tbb/include/oneapi/tbb/flow_graph_abstractions.h new file mode 100644 index 000000000..329e75c43 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/flow_graph_abstractions.h @@ -0,0 +1,51 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_flow_graph_abstractions_H +#define __TBB_flow_graph_abstractions_H + +namespace tbb { +namespace detail { +namespace d2 { + +//! Pure virtual template classes that define interfaces for async communication +class graph_proxy { +public: + //! Inform a graph that messages may come from outside, to prevent premature graph completion + virtual void reserve_wait() = 0; + + //! Inform a graph that a previous call to reserve_wait is no longer in effect + virtual void release_wait() = 0; + + virtual ~graph_proxy() {} +}; + +template +class receiver_gateway : public graph_proxy { +public: + //! Type of inputing data into FG. + typedef Input input_type; + + //! Submit signal from an asynchronous activity to FG. + virtual bool try_put(const input_type&) = 0; +}; + +} // d2 + + +} // detail +} // tbb +#endif diff --git a/src/tbb/include/oneapi/tbb/global_control.h b/src/tbb/include/oneapi/tbb/global_control.h new file mode 100644 index 000000000..57f3b9dbc --- /dev/null +++ b/src/tbb/include/oneapi/tbb/global_control.h @@ -0,0 +1,200 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_global_control_H +#define __TBB_global_control_H + +#include "detail/_config.h" + +#include "detail/_assert.h" +#include "detail/_attach.h" +#include "detail/_exception.h" +#include "detail/_namespace_injection.h" +#include "detail/_template_helpers.h" + +#include +#include // std::nothrow_t + +namespace tbb { +namespace detail { + +namespace d1 { +class global_control; +class task_scheduler_handle; +} + +namespace r1 { +TBB_EXPORT void __TBB_EXPORTED_FUNC create(d1::global_control&); +TBB_EXPORT void __TBB_EXPORTED_FUNC destroy(d1::global_control&); +TBB_EXPORT std::size_t __TBB_EXPORTED_FUNC global_control_active_value(int); +struct global_control_impl; +struct control_storage_comparator; +void release_impl(d1::task_scheduler_handle& handle); +bool finalize_impl(d1::task_scheduler_handle& handle); +TBB_EXPORT void __TBB_EXPORTED_FUNC get(d1::task_scheduler_handle&); +TBB_EXPORT bool __TBB_EXPORTED_FUNC finalize(d1::task_scheduler_handle&, std::intptr_t mode); +} + +namespace d1 { + +class global_control { +public: + enum parameter { + max_allowed_parallelism, + thread_stack_size, + terminate_on_exception, + scheduler_handle, // not a public parameter + parameter_max // insert new parameters above this point + }; + + global_control(parameter p, std::size_t value) : + my_value(value), my_reserved(), my_param(p) { + suppress_unused_warning(my_reserved); + __TBB_ASSERT(my_param < parameter_max, "Invalid parameter"); +#if __TBB_WIN8UI_SUPPORT && (_WIN32_WINNT < 0x0A00) + // For Windows 8 Store* apps it's impossible to set stack size + if (p==thread_stack_size) + return; +#elif __TBB_x86_64 && (_WIN32 || _WIN64) + if (p==thread_stack_size) + __TBB_ASSERT_RELEASE((unsigned)value == value, "Stack size is limited to unsigned int range"); +#endif + if (my_param==max_allowed_parallelism) + __TBB_ASSERT_RELEASE(my_value>0, "max_allowed_parallelism cannot be 0."); + r1::create(*this); + } + + ~global_control() { + __TBB_ASSERT(my_param < parameter_max, "Invalid parameter"); +#if __TBB_WIN8UI_SUPPORT && (_WIN32_WINNT < 0x0A00) + // For Windows 8 Store* apps it's impossible to set stack size + if (my_param==thread_stack_size) + return; +#endif + r1::destroy(*this); + } + + static std::size_t active_value(parameter p) { + __TBB_ASSERT(p < parameter_max, "Invalid parameter"); + return r1::global_control_active_value((int)p); + } + +private: + std::size_t my_value; + std::intptr_t my_reserved; // TODO: substitution of global_control* not to break backward compatibility + parameter my_param; + + friend struct r1::global_control_impl; + friend struct r1::control_storage_comparator; +}; + +//! Finalization options. +//! Outside of the class to avoid extensive friendship. +static constexpr std::intptr_t release_nothrowing = 0; +static constexpr std::intptr_t finalize_nothrowing = 1; +static constexpr std::intptr_t finalize_throwing = 2; + +//! User side wrapper for a task scheduler lifetime control object +class task_scheduler_handle { +public: + //! Creates an empty task_scheduler_handle + task_scheduler_handle() = default; + + //! Creates an attached instance of task_scheduler_handle + task_scheduler_handle(attach) { + r1::get(*this); + } + + //! Release a reference if any + ~task_scheduler_handle() { + release(); + } + + //! No copy + task_scheduler_handle(const task_scheduler_handle& other) = delete; + task_scheduler_handle& operator=(const task_scheduler_handle& other) = delete; + + //! Move only + task_scheduler_handle(task_scheduler_handle&& other) noexcept { + std::swap(m_ctl, other.m_ctl); + } + task_scheduler_handle& operator=(task_scheduler_handle&& other) noexcept { + std::swap(m_ctl, other.m_ctl); + return *this; + }; + + //! Checks if the task_scheduler_handle is empty + explicit operator bool() const noexcept { + return m_ctl != nullptr; + } + + //! Release the reference and deactivate handle + void release() { + if (m_ctl != nullptr) { + r1::finalize(*this, release_nothrowing); + m_ctl = nullptr; + } + } + +private: + friend void r1::release_impl(task_scheduler_handle& handle); + friend bool r1::finalize_impl(task_scheduler_handle& handle); + friend void __TBB_EXPORTED_FUNC r1::get(task_scheduler_handle&); + + friend void finalize(task_scheduler_handle&); + friend bool finalize(task_scheduler_handle&, const std::nothrow_t&) noexcept; + + global_control* m_ctl{nullptr}; +}; + +#if TBB_USE_EXCEPTIONS +//! Waits for worker threads termination. Throws exception on error. +inline void finalize(task_scheduler_handle& handle) { + try_call([&] { + if (handle.m_ctl != nullptr) { + bool finalized = r1::finalize(handle, finalize_throwing); + __TBB_ASSERT_EX(finalized, "r1::finalize did not respect finalize_throwing ?"); + + } + }).on_completion([&] { + __TBB_ASSERT(!handle, "The handle should be empty after finalize"); + }); +} +#endif +//! Waits for worker threads termination. Returns false on error. +inline bool finalize(task_scheduler_handle& handle, const std::nothrow_t&) noexcept { + bool finalized = true; + if (handle.m_ctl != nullptr) { + finalized = r1::finalize(handle, finalize_nothrowing); + } + __TBB_ASSERT(!handle, "The handle should be empty after finalize"); + return finalized; +} + +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::global_control; +using detail::d1::attach; +using detail::d1::finalize; +using detail::d1::task_scheduler_handle; +using detail::r1::unsafe_wait; +} // namespace v1 + +} // namespace tbb + +#endif // __TBB_global_control_H diff --git a/src/tbb/include/oneapi/tbb/info.h b/src/tbb/include/oneapi/tbb/info.h new file mode 100644 index 000000000..dfcfcc031 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/info.h @@ -0,0 +1,125 @@ +/* + Copyright (c) 2019-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_info_H +#define __TBB_info_H + +#include "detail/_config.h" +#include "detail/_namespace_injection.h" + +#if __TBB_ARENA_BINDING +#include +#include + +namespace tbb { +namespace detail { + +namespace d1{ + +using numa_node_id = int; +using core_type_id = int; + +// TODO: consider version approach to resolve backward compatibility potential issues. +struct constraints { +#if !__TBB_CPP20_PRESENT + constraints(numa_node_id id = -1, int maximal_concurrency = -1) + : numa_id(id) + , max_concurrency(maximal_concurrency) + {} +#endif /*!__TBB_CPP20_PRESENT*/ + + constraints& set_numa_id(numa_node_id id) { + numa_id = id; + return *this; + } + constraints& set_max_concurrency(int maximal_concurrency) { + max_concurrency = maximal_concurrency; + return *this; + } + constraints& set_core_type(core_type_id id) { + core_type = id; + return *this; + } + constraints& set_max_threads_per_core(int threads_number) { + max_threads_per_core = threads_number; + return *this; + } + + numa_node_id numa_id = -1; + int max_concurrency = -1; + core_type_id core_type = -1; + int max_threads_per_core = -1; +}; + +} // namespace d1 + +namespace r1 { +TBB_EXPORT unsigned __TBB_EXPORTED_FUNC numa_node_count(); +TBB_EXPORT void __TBB_EXPORTED_FUNC fill_numa_indices(int* index_array); +TBB_EXPORT int __TBB_EXPORTED_FUNC numa_default_concurrency(int numa_id); + +// Reserved fields are required to save binary backward compatibility in case of future changes. +// They must be defined to 0 at this moment. +TBB_EXPORT unsigned __TBB_EXPORTED_FUNC core_type_count(intptr_t reserved = 0); +TBB_EXPORT void __TBB_EXPORTED_FUNC fill_core_type_indices(int* index_array, intptr_t reserved = 0); + +TBB_EXPORT int __TBB_EXPORTED_FUNC constraints_default_concurrency(const d1::constraints& c, intptr_t reserved = 0); +TBB_EXPORT int __TBB_EXPORTED_FUNC constraints_threads_per_core(const d1::constraints& c, intptr_t reserved = 0); +} // namespace r1 + +namespace d1 { + +inline std::vector numa_nodes() { + std::vector node_indices(r1::numa_node_count()); + r1::fill_numa_indices(node_indices.data()); + return node_indices; +} + +inline int default_concurrency(numa_node_id id = -1) { + return r1::numa_default_concurrency(id); +} + +inline std::vector core_types() { + std::vector core_type_indexes(r1::core_type_count()); + r1::fill_core_type_indices(core_type_indexes.data()); + return core_type_indexes; +} + +inline int default_concurrency(constraints c) { + if (c.max_concurrency > 0) { return c.max_concurrency; } + return r1::constraints_default_concurrency(c); +} + +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::numa_node_id; +using detail::d1::core_type_id; + +namespace info { +using detail::d1::numa_nodes; +using detail::d1::core_types; + +using detail::d1::default_concurrency; +} // namespace info +} // namespace v1 + +} // namespace tbb + +#endif /*__TBB_ARENA_BINDING*/ + +#endif /*__TBB_info_H*/ diff --git a/inst/include/tbb/memory_pool.h b/src/tbb/include/oneapi/tbb/memory_pool.h similarity index 66% rename from inst/include/tbb/memory_pool.h rename to src/tbb/include/oneapi/tbb/memory_pool.h index 47b8e1b2c..de01466e9 100644 --- a/inst/include/tbb/memory_pool.h +++ b/src/tbb/include/oneapi/tbb/memory_pool.h @@ -1,21 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef __TBB_memory_pool_H @@ -27,10 +23,11 @@ /** @file */ #include "scalable_allocator.h" + #include // std::bad_alloc -#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC +#include // std::runtime_error, std::invalid_argument #include // std::forward -#endif + #if __TBB_EXTRA_DEBUG #define __TBBMALLOC_ASSERT ASSERT @@ -39,12 +36,11 @@ #endif namespace tbb { -namespace interface6 { -//! @cond INTERNAL -namespace internal { +namespace detail { +namespace d1 { //! Base of thread-safe pool allocator for variable-size requests -class pool_base : tbb::internal::no_copy { +class pool_base : no_copy { // Pool interface is separate from standard allocator classes because it has // to maintain internal state, no copy or assignment. Move and swap are possible. public: @@ -70,18 +66,15 @@ class pool_base : tbb::internal::no_copy { rml::MemoryPool *my_pool; }; -} // namespace internal -//! @endcond - #if _MSC_VER && !defined(__INTEL_COMPILER) // Workaround for erroneous "unreferenced parameter" warning in method destroy. - #pragma warning (push) - #pragma warning (disable: 4100) + // #pragma warning (push) + // #pragma warning (disable: 4100) #endif //! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 /** @ingroup memory_allocation */ -template +template class memory_pool_allocator { protected: typedef P pool_type; @@ -93,7 +86,7 @@ class memory_pool_allocator { template friend bool operator!=( const memory_pool_allocator& a, const memory_pool_allocator& b); public: - typedef typename tbb::internal::allocator_type::value_type value_type; + typedef T value_type; typedef value_type* pointer; typedef const value_type* const_pointer; typedef value_type& reference; @@ -104,38 +97,35 @@ class memory_pool_allocator { typedef memory_pool_allocator other; }; - memory_pool_allocator(pool_type &pool) throw() : my_pool(&pool) {} - memory_pool_allocator(const memory_pool_allocator& src) throw() : my_pool(src.my_pool) {} + explicit memory_pool_allocator(pool_type &pool) noexcept : my_pool(&pool) {} + memory_pool_allocator(const memory_pool_allocator& src) noexcept : my_pool(src.my_pool) {} template - memory_pool_allocator(const memory_pool_allocator& src) throw() : my_pool(src.my_pool) {} + memory_pool_allocator(const memory_pool_allocator& src) noexcept : my_pool(src.my_pool) {} pointer address(reference x) const { return &x; } const_pointer address(const_reference x) const { return &x; } - + //! Allocate space for n objects. - pointer allocate( size_type n, const void* /*hint*/ = 0) { - return static_cast( my_pool->malloc( n*sizeof(value_type) ) ); + pointer allocate( size_type n, const void* /*hint*/ = nullptr) { + pointer p = static_cast( my_pool->malloc( n*sizeof(value_type) ) ); + if (!p) + throw_exception(std::bad_alloc()); + return p; } //! Free previously allocated block of memory. void deallocate( pointer p, size_type ) { my_pool->free(p); } //! Largest value for which method allocate might succeed. - size_type max_size() const throw() { + size_type max_size() const noexcept { size_type max = static_cast(-1) / sizeof (value_type); return (max > 0 ? max : 1); } //! Copy-construct value at location pointed to by p. -#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC + template void construct(U *p, Args&&... args) { ::new((void *)p) U(std::forward(args)...); } -#else // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC -#if __TBB_CPP11_RVALUE_REF_PRESENT - void construct( pointer p, value_type&& value ) {::new((void*)(p)) value_type(std::move(value));} -#endif - void construct( pointer p, const value_type& value ) { ::new((void*)(p)) value_type(value); } -#endif // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC //! Destroy value at location pointed to by p. void destroy( pointer p ) { p->~value_type(); } @@ -143,12 +133,12 @@ class memory_pool_allocator { }; #if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning (pop) + // #pragma warning (pop) #endif // warning 4100 is back //! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 /** @ingroup memory_allocation */ -template +template class memory_pool_allocator { public: typedef P pool_type; @@ -159,10 +149,10 @@ class memory_pool_allocator { typedef memory_pool_allocator other; }; - memory_pool_allocator( pool_type &pool) throw() : my_pool(&pool) {} - memory_pool_allocator( const memory_pool_allocator& src) throw() : my_pool(src.my_pool) {} + explicit memory_pool_allocator( pool_type &pool) noexcept : my_pool(&pool) {} + memory_pool_allocator( const memory_pool_allocator& src) noexcept : my_pool(src.my_pool) {} template - memory_pool_allocator(const memory_pool_allocator& src) throw() : my_pool(src.my_pool) {} + memory_pool_allocator(const memory_pool_allocator& src) noexcept : my_pool(src.my_pool) {} protected: pool_type *my_pool; @@ -180,24 +170,22 @@ inline bool operator==( const memory_pool_allocator& a, const memory_pool_a template inline bool operator!=( const memory_pool_allocator& a, const memory_pool_allocator& b) {return a.my_pool!=b.my_pool;} - //! Thread-safe growable pool allocator for variable-size requests template -class memory_pool : public internal::pool_base { +class memory_pool : public pool_base { Alloc my_alloc; // TODO: base-class optimization static void *allocate_request(intptr_t pool_id, size_t & bytes); static int deallocate_request(intptr_t pool_id, void*, size_t raw_bytes); public: //! construct pool with underlying allocator - memory_pool(const Alloc &src = Alloc()); + explicit memory_pool(const Alloc &src = Alloc()); //! destroy pool ~memory_pool() { destroy(); } // call the callbacks first and destroy my_alloc latter - }; -class fixed_pool : public internal::pool_base { +class fixed_pool : public pool_base { void *my_buffer; size_t my_size; inline static void *allocate_request(intptr_t pool_id, size_t & bytes); @@ -216,40 +204,51 @@ memory_pool::memory_pool(const Alloc &src) : my_alloc(src) { rml::MemPoolPolicy args(allocate_request, deallocate_request, sizeof(typename Alloc::value_type)); rml::MemPoolError res = rml::pool_create_v1(intptr_t(this), &args, &my_pool); - if( res!=rml::POOL_OK ) __TBB_THROW(std::bad_alloc()); + if (res!=rml::POOL_OK) + throw_exception(std::runtime_error("Can't create pool")); } template void *memory_pool::allocate_request(intptr_t pool_id, size_t & bytes) { memory_pool &self = *reinterpret_cast*>(pool_id); const size_t unit_size = sizeof(typename Alloc::value_type); - __TBBMALLOC_ASSERT( 0 == bytes%unit_size, NULL); + __TBBMALLOC_ASSERT( 0 == bytes%unit_size, nullptr); void *ptr; - __TBB_TRY { ptr = self.my_alloc.allocate( bytes/unit_size ); } - __TBB_CATCH(...) { return 0; } +#if TBB_USE_EXCEPTIONS + try { +#endif + ptr = self.my_alloc.allocate( bytes/unit_size ); +#if TBB_USE_EXCEPTIONS + } catch(...) { + return nullptr; + } +#endif return ptr; } #if __TBB_MSVC_UNREACHABLE_CODE_IGNORED // Workaround for erroneous "unreachable code" warning in the template below. // Specific for VC++ 17-18 compiler - #pragma warning (push) - #pragma warning (disable: 4702) + // #pragma warning (push) + // #pragma warning (disable: 4702) #endif template int memory_pool::deallocate_request(intptr_t pool_id, void* raw_ptr, size_t raw_bytes) { memory_pool &self = *reinterpret_cast*>(pool_id); const size_t unit_size = sizeof(typename Alloc::value_type); - __TBBMALLOC_ASSERT( 0 == raw_bytes%unit_size, NULL); + __TBBMALLOC_ASSERT( 0 == raw_bytes%unit_size, nullptr); self.my_alloc.deallocate( static_cast(raw_ptr), raw_bytes/unit_size ); return 0; } #if __TBB_MSVC_UNREACHABLE_CODE_IGNORED - #pragma warning (pop) + // #pragma warning (pop) #endif inline fixed_pool::fixed_pool(void *buf, size_t size) : my_buffer(buf), my_size(size) { - if( !buf || !size ) __TBB_THROW(std::bad_alloc()); - rml::MemPoolPolicy args(allocate_request, 0, size, /*fixedPool=*/true); + if (!buf || !size) + // TODO: improve support for mode with exceptions disabled + throw_exception(std::invalid_argument("Zero in parameter is invalid")); + rml::MemPoolPolicy args(allocate_request, nullptr, size, /*fixedPool=*/true); rml::MemPoolError res = rml::pool_create_v1(intptr_t(this), &args, &my_pool); - if( res!=rml::POOL_OK ) __TBB_THROW(std::bad_alloc()); + if (res!=rml::POOL_OK) + throw_exception(std::runtime_error("Can't create pool")); } inline void *fixed_pool::allocate_request(intptr_t pool_id, size_t & bytes) { fixed_pool &self = *reinterpret_cast(pool_id); @@ -259,11 +258,15 @@ inline void *fixed_pool::allocate_request(intptr_t pool_id, size_t & bytes) { return self.my_buffer; } -} //namespace interface6 -using interface6::memory_pool_allocator; -using interface6::memory_pool; -using interface6::fixed_pool; -} //namespace tbb +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::memory_pool_allocator; +using detail::d1::memory_pool; +using detail::d1::fixed_pool; +} // inline namepspace v1 +} // namespace tbb #undef __TBBMALLOC_ASSERT #endif// __TBB_memory_pool_H diff --git a/src/tbb/include/oneapi/tbb/mutex.h b/src/tbb/include/oneapi/tbb/mutex.h new file mode 100644 index 000000000..169b7a3ca --- /dev/null +++ b/src/tbb/include/oneapi/tbb/mutex.h @@ -0,0 +1,93 @@ +/* + Copyright (c) 2021-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_mutex_H +#define __TBB_mutex_H + +#include "detail/_namespace_injection.h" +#include "detail/_utils.h" +#include "detail/_scoped_lock.h" +#include "detail/_waitable_atomic.h" +#include "detail/_mutex_common.h" +#include "profiling.h" + +namespace tbb { +namespace detail { +namespace d1 { + +class mutex { +public: + //! Constructors + mutex() { + create_itt_sync(this, "tbb::mutex", ""); + }; + + //! Destructor + ~mutex() = default; + + //! No Copy + mutex(const mutex&) = delete; + mutex& operator=(const mutex&) = delete; + + using scoped_lock = unique_scoped_lock; + + //! Mutex traits + static constexpr bool is_rw_mutex = false; + static constexpr bool is_recursive_mutex = false; + static constexpr bool is_fair_mutex = false; + + //! Acquire lock + /** Spin if the lock is taken */ + void lock() { + call_itt_notify(prepare, this); + while (!try_lock()) { + my_flag.wait(true, /* context = */ 0, std::memory_order_relaxed); + } + } + + //! Try acquiring lock (non-blocking) + /** Return true if lock acquired; false otherwise. */ + bool try_lock() { + bool result = !my_flag.load(std::memory_order_relaxed) && !my_flag.exchange(true); + if (result) { + call_itt_notify(acquired, this); + } + return result; + } + + //! Release lock + void unlock() { + call_itt_notify(releasing, this); + // We need Write Read memory barrier before notify that reads the waiter list. + // In C++ only full fence covers this type of barrier. + my_flag.exchange(false); + my_flag.notify_one_relaxed(); + } + +private: + waitable_atomic my_flag{0}; +}; // class mutex + +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::mutex; +} // namespace v1 + +} // namespace tbb + +#endif // __TBB_mutex_H diff --git a/src/tbb/include/oneapi/tbb/null_mutex.h b/src/tbb/include/oneapi/tbb/null_mutex.h new file mode 100644 index 000000000..1797e35bb --- /dev/null +++ b/src/tbb/include/oneapi/tbb/null_mutex.h @@ -0,0 +1,80 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_null_mutex_H +#define __TBB_null_mutex_H + +#include "detail/_config.h" +#include "detail/_namespace_injection.h" +#include "detail/_mutex_common.h" + +namespace tbb { +namespace detail { +namespace d1 { + +//! A mutex which does nothing +/** A null_mutex does no operation and simulates success. + @ingroup synchronization */ +class null_mutex { +public: + //! Constructors + constexpr null_mutex() noexcept = default; + + //! Destructor + ~null_mutex() = default; + + //! No Copy + null_mutex(const null_mutex&) = delete; + null_mutex& operator=(const null_mutex&) = delete; + + //! Represents acquisition of a mutex. + class scoped_lock { + public: + //! Constructors + constexpr scoped_lock() noexcept = default; + scoped_lock(null_mutex&) {} + + //! Destructor + ~scoped_lock() = default; + + //! No Copy + scoped_lock(const scoped_lock&) = delete; + scoped_lock& operator=(const scoped_lock&) = delete; + + void acquire(null_mutex&) {} + bool try_acquire(null_mutex&) { return true; } + void release() {} + }; + + //! Mutex traits + static constexpr bool is_rw_mutex = false; + static constexpr bool is_recursive_mutex = true; + static constexpr bool is_fair_mutex = true; + + void lock() {} + bool try_lock() { return true; } + void unlock() {} +}; // class null_mutex + +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::null_mutex; +} // namespace v1 +} // namespace tbb + +#endif /* __TBB_null_mutex_H */ diff --git a/src/tbb/include/oneapi/tbb/null_rw_mutex.h b/src/tbb/include/oneapi/tbb/null_rw_mutex.h new file mode 100644 index 000000000..43983a005 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/null_rw_mutex.h @@ -0,0 +1,87 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_null_rw_mutex_H +#define __TBB_null_rw_mutex_H + +#include "detail/_config.h" +#include "detail/_namespace_injection.h" +#include "detail/_mutex_common.h" + +namespace tbb { +namespace detail { +namespace d1 { + +//! A rw mutex which does nothing +/** A null_rw_mutex is a rw mutex that does nothing and simulates successful operation. + @ingroup synchronization */ +class null_rw_mutex { +public: + //! Constructors + constexpr null_rw_mutex() noexcept = default; + + //! Destructor + ~null_rw_mutex() = default; + + //! No Copy + null_rw_mutex(const null_rw_mutex&) = delete; + null_rw_mutex& operator=(const null_rw_mutex&) = delete; + + //! Represents acquisition of a mutex. + class scoped_lock { + public: + //! Constructors + constexpr scoped_lock() noexcept = default; + scoped_lock(null_rw_mutex&, bool = true) {} + + //! Destructor + ~scoped_lock() = default; + + //! No Copy + scoped_lock(const scoped_lock&) = delete; + scoped_lock& operator=(const scoped_lock&) = delete; + + void acquire(null_rw_mutex&, bool = true) {} + bool try_acquire(null_rw_mutex&, bool = true) { return true; } + void release() {} + bool upgrade_to_writer() { return true; } + bool downgrade_to_reader() { return true; } + + bool is_writer() const { return true; } + }; + + //! Mutex traits + static constexpr bool is_rw_mutex = true; + static constexpr bool is_recursive_mutex = true; + static constexpr bool is_fair_mutex = true; + + void lock() {} + bool try_lock() { return true; } + void unlock() {} + void lock_shared() {} + bool try_lock_shared() { return true; } + void unlock_shared() {} +}; // class null_rw_mutex + +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::null_rw_mutex; +} // namespace v1 +} // namespace tbb + +#endif /* __TBB_null_rw_mutex_H */ diff --git a/src/tbb/include/oneapi/tbb/parallel_for.h b/src/tbb/include/oneapi/tbb/parallel_for.h new file mode 100644 index 000000000..37a261350 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/parallel_for.h @@ -0,0 +1,469 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_parallel_for_H +#define __TBB_parallel_for_H + +#include "detail/_config.h" +#include "detail/_namespace_injection.h" +#include "detail/_exception.h" +#include "detail/_task.h" +#include "detail/_small_object_pool.h" +#include "profiling.h" + +#include "partitioner.h" +#include "blocked_range.h" +#include "task_group.h" + +#include +#include + +namespace tbb { +namespace detail { +#if __TBB_CPP20_CONCEPTS_PRESENT +inline namespace d0 { + +template +concept parallel_for_body = std::copy_constructible && std::invocable&, Range&>; + +template +concept parallel_for_index = std::constructible_from && + std::copyable && + requires( const std::remove_reference_t& lhs, const std::remove_reference_t& rhs ) { + { lhs < rhs } -> adaptive_same_as; + { lhs - rhs } -> std::convertible_to; + { lhs + (rhs - lhs) } -> std::convertible_to; + }; + +template +concept parallel_for_function = std::invocable&, Index>; + +} // namespace d0 +#endif // __TBB_CPP20_CONCEPTS_PRESENT +namespace d1 { + +//! Task type used in parallel_for +/** @ingroup algorithms */ +template +struct start_for : public task { + Range my_range; + const Body my_body; + node* my_parent; + + typename Partitioner::task_partition_type my_partition; + small_object_allocator my_allocator; + + task* execute(execution_data&) override; + task* cancel(execution_data&) override; + void finalize(const execution_data&); + + //! Constructor for root task. + start_for( const Range& range, const Body& body, Partitioner& partitioner, small_object_allocator& alloc ) : + my_range(range), + my_body(body), + my_parent(nullptr), + my_partition(partitioner), + my_allocator(alloc) {} + //! Splitting constructor used to generate children. + /** parent_ becomes left child. Newly constructed object is right child. */ + start_for( start_for& parent_, typename Partitioner::split_type& split_obj, small_object_allocator& alloc ) : + my_range(parent_.my_range, get_range_split_object(split_obj)), + my_body(parent_.my_body), + my_parent(nullptr), + my_partition(parent_.my_partition, split_obj), + my_allocator(alloc) {} + //! Construct right child from the given range as response to the demand. + /** parent_ remains left child. Newly constructed object is right child. */ + start_for( start_for& parent_, const Range& r, depth_t d, small_object_allocator& alloc ) : + my_range(r), + my_body(parent_.my_body), + my_parent(nullptr), + my_partition(parent_.my_partition, split()), + my_allocator(alloc) + { + my_partition.align_depth( d ); + } + static void run(const Range& range, const Body& body, Partitioner& partitioner) { + task_group_context context(PARALLEL_FOR); + run(range, body, partitioner, context); + } + + static void run(const Range& range, const Body& body, Partitioner& partitioner, task_group_context& context) { + if ( !range.empty() ) { + small_object_allocator alloc{}; + start_for& for_task = *alloc.new_object(range, body, partitioner, alloc); + + // defer creation of the wait node until task allocation succeeds + wait_node wn; + for_task.my_parent = &wn; + execute_and_wait(for_task, context, wn.m_wait, context); + } + } + //! Run body for range, serves as callback for partitioner + void run_body( Range &r ) { + tbb::detail::invoke(my_body, r); + } + + //! spawn right task, serves as callback for partitioner + void offer_work(typename Partitioner::split_type& split_obj, execution_data& ed) { + offer_work_impl(ed, *this, split_obj); + } + + //! spawn right task, serves as callback for partitioner + void offer_work(const Range& r, depth_t d, execution_data& ed) { + offer_work_impl(ed, *this, r, d); + } + +private: + template + void offer_work_impl(execution_data& ed, Args&&... constructor_args) { + // New right child + small_object_allocator alloc{}; + start_for& right_child = *alloc.new_object(ed, std::forward(constructor_args)..., alloc); + + // New root node as a continuation and ref count. Left and right child attach to the new parent. + right_child.my_parent = my_parent = alloc.new_object(ed, my_parent, 2, alloc); + // Spawn the right sibling + right_child.spawn_self(ed); + } + + void spawn_self(execution_data& ed) { + my_partition.spawn_task(*this, *context(ed)); + } +}; + +//! fold the tree and deallocate the task +template +void start_for::finalize(const execution_data& ed) { + // Get the current parent and allocator an object destruction + node* parent = my_parent; + auto allocator = my_allocator; + // Task execution finished - destroy it + this->~start_for(); + // Unwind the tree decrementing the parent`s reference count + + fold_tree(parent, ed); + allocator.deallocate(this, ed); + +} + +//! execute task for parallel_for +template +task* start_for::execute(execution_data& ed) { + if (!is_same_affinity(ed)) { + my_partition.note_affinity(execution_slot(ed)); + } + my_partition.check_being_stolen(*this, ed); + my_partition.execute(*this, my_range, ed); + finalize(ed); + return nullptr; +} + +//! cancel task for parallel_for +template +task* start_for::cancel(execution_data& ed) { + finalize(ed); + return nullptr; +} + +//! Calls the function with values from range [begin, end) with a step provided +template +class parallel_for_body_wrapper : detail::no_assign { + const Function &my_func; + const Index my_begin; + const Index my_step; +public: + parallel_for_body_wrapper( const Function& _func, Index& _begin, Index& _step ) + : my_func(_func), my_begin(_begin), my_step(_step) {} + + void operator()( const blocked_range& r ) const { + // A set of local variables to help the compiler with vectorization of the following loop. + Index b = r.begin(); + Index e = r.end(); + Index ms = my_step; + Index k = my_begin + b*ms; + +#if __INTEL_COMPILER +#pragma ivdep +#if __TBB_ASSERT_ON_VECTORIZATION_FAILURE +#pragma vector always assert +#endif +#endif + for ( Index i = b; i < e; ++i, k += ms ) { + tbb::detail::invoke(my_func, k); + } + } +}; + +// Requirements on Range concept are documented in blocked_range.h + +/** \page parallel_for_body_req Requirements on parallel_for body + Class \c Body implementing the concept of parallel_for body must define: + - \code Body::Body( const Body& ); \endcode Copy constructor + - \code Body::~Body(); \endcode Destructor + - \code void Body::operator()( Range& r ) const; \endcode Function call operator applying the body to range \c r. +**/ + +/** \name parallel_for + See also requirements on \ref range_req "Range" and \ref parallel_for_body_req "parallel_for Body". **/ +//@{ + +//! Parallel iteration over range with default partitioner. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_for_body) +void parallel_for( const Range& range, const Body& body ) { + start_for::run(range,body,__TBB_DEFAULT_PARTITIONER()); +} + +//! Parallel iteration over range with simple partitioner. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_for_body) +void parallel_for( const Range& range, const Body& body, const simple_partitioner& partitioner ) { + start_for::run(range,body,partitioner); +} + +//! Parallel iteration over range with auto_partitioner. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_for_body) +void parallel_for( const Range& range, const Body& body, const auto_partitioner& partitioner ) { + start_for::run(range,body,partitioner); +} + +//! Parallel iteration over range with static_partitioner. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_for_body) +void parallel_for( const Range& range, const Body& body, const static_partitioner& partitioner ) { + start_for::run(range,body,partitioner); +} + +//! Parallel iteration over range with affinity_partitioner. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_for_body) +void parallel_for( const Range& range, const Body& body, affinity_partitioner& partitioner ) { + start_for::run(range,body,partitioner); +} + +//! Parallel iteration over range with default partitioner and user-supplied context. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_for_body) +void parallel_for( const Range& range, const Body& body, task_group_context& context ) { + start_for::run(range, body, __TBB_DEFAULT_PARTITIONER(), context); +} + +//! Parallel iteration over range with simple partitioner and user-supplied context. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_for_body) +void parallel_for( const Range& range, const Body& body, const simple_partitioner& partitioner, task_group_context& context ) { + start_for::run(range, body, partitioner, context); +} + +//! Parallel iteration over range with auto_partitioner and user-supplied context. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_for_body) +void parallel_for( const Range& range, const Body& body, const auto_partitioner& partitioner, task_group_context& context ) { + start_for::run(range, body, partitioner, context); +} + +//! Parallel iteration over range with static_partitioner and user-supplied context. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_for_body) +void parallel_for( const Range& range, const Body& body, const static_partitioner& partitioner, task_group_context& context ) { + start_for::run(range, body, partitioner, context); +} + +//! Parallel iteration over range with affinity_partitioner and user-supplied context. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_for_body) +void parallel_for( const Range& range, const Body& body, affinity_partitioner& partitioner, task_group_context& context ) { + start_for::run(range,body,partitioner, context); +} + +//! Implementation of parallel iteration over stepped range of integers with explicit step and partitioner +template +void parallel_for_impl(Index first, Index last, Index step, const Function& f, Partitioner& partitioner) { + if (step <= 0 ) + throw_exception(exception_id::nonpositive_step); // throws std::invalid_argument + else if (first < last) { + // Above "else" avoids "potential divide by zero" warning on some platforms + Index end = Index(last - first - 1ul) / step + Index(1); + blocked_range range(static_cast(0), end); + parallel_for_body_wrapper body(f, first, step); + parallel_for(range, body, partitioner); + } +} + +//! Parallel iteration over a range of integers with a step provided and default partitioner +template + __TBB_requires(parallel_for_index && parallel_for_function) +void parallel_for(Index first, Index last, Index step, const Function& f) { + parallel_for_impl(first, last, step, f, __TBB_DEFAULT_PARTITIONER()); +} +//! Parallel iteration over a range of integers with a step provided and simple partitioner +template + __TBB_requires(parallel_for_index && parallel_for_function) +void parallel_for(Index first, Index last, Index step, const Function& f, const simple_partitioner& partitioner) { + parallel_for_impl(first, last, step, f, partitioner); +} +//! Parallel iteration over a range of integers with a step provided and auto partitioner +template + __TBB_requires(parallel_for_index && parallel_for_function) +void parallel_for(Index first, Index last, Index step, const Function& f, const auto_partitioner& partitioner) { + parallel_for_impl(first, last, step, f, partitioner); +} +//! Parallel iteration over a range of integers with a step provided and static partitioner +template + __TBB_requires(parallel_for_index && parallel_for_function) +void parallel_for(Index first, Index last, Index step, const Function& f, const static_partitioner& partitioner) { + parallel_for_impl(first, last, step, f, partitioner); +} +//! Parallel iteration over a range of integers with a step provided and affinity partitioner +template + __TBB_requires(parallel_for_index && parallel_for_function) +void parallel_for(Index first, Index last, Index step, const Function& f, affinity_partitioner& partitioner) { + parallel_for_impl(first, last, step, f, partitioner); +} + +//! Parallel iteration over a range of integers with a default step value and default partitioner +template + __TBB_requires(parallel_for_index && parallel_for_function) +void parallel_for(Index first, Index last, const Function& f) { + parallel_for_impl(first, last, static_cast(1), f, __TBB_DEFAULT_PARTITIONER()); +} +//! Parallel iteration over a range of integers with a default step value and simple partitioner +template + __TBB_requires(parallel_for_index && parallel_for_function) +void parallel_for(Index first, Index last, const Function& f, const simple_partitioner& partitioner) { + parallel_for_impl(first, last, static_cast(1), f, partitioner); +} +//! Parallel iteration over a range of integers with a default step value and auto partitioner +template + __TBB_requires(parallel_for_index && parallel_for_function) +void parallel_for(Index first, Index last, const Function& f, const auto_partitioner& partitioner) { + parallel_for_impl(first, last, static_cast(1), f, partitioner); +} +//! Parallel iteration over a range of integers with a default step value and static partitioner +template + __TBB_requires(parallel_for_index && parallel_for_function) +void parallel_for(Index first, Index last, const Function& f, const static_partitioner& partitioner) { + parallel_for_impl(first, last, static_cast(1), f, partitioner); +} +//! Parallel iteration over a range of integers with a default step value and affinity partitioner +template + __TBB_requires(parallel_for_index && parallel_for_function) +void parallel_for(Index first, Index last, const Function& f, affinity_partitioner& partitioner) { + parallel_for_impl(first, last, static_cast(1), f, partitioner); +} + +//! Implementation of parallel iteration over stepped range of integers with explicit step, task group context, and partitioner +template +void parallel_for_impl(Index first, Index last, Index step, const Function& f, Partitioner& partitioner, task_group_context &context) { + if (step <= 0 ) + throw_exception(exception_id::nonpositive_step); // throws std::invalid_argument + else if (first < last) { + // Above "else" avoids "potential divide by zero" warning on some platforms + Index end = (last - first - Index(1)) / step + Index(1); + blocked_range range(static_cast(0), end); + parallel_for_body_wrapper body(f, first, step); + parallel_for(range, body, partitioner, context); + } +} + +//! Parallel iteration over a range of integers with explicit step, task group context, and default partitioner +template + __TBB_requires(parallel_for_index && parallel_for_function) +void parallel_for(Index first, Index last, Index step, const Function& f, task_group_context &context) { + parallel_for_impl(first, last, step, f, __TBB_DEFAULT_PARTITIONER(), context); +} +//! Parallel iteration over a range of integers with explicit step, task group context, and simple partitioner +template + __TBB_requires(parallel_for_index && parallel_for_function) +void parallel_for(Index first, Index last, Index step, const Function& f, const simple_partitioner& partitioner, task_group_context &context) { + parallel_for_impl(first, last, step, f, partitioner, context); +} +//! Parallel iteration over a range of integers with explicit step, task group context, and auto partitioner +template + __TBB_requires(parallel_for_index && parallel_for_function) +void parallel_for(Index first, Index last, Index step, const Function& f, const auto_partitioner& partitioner, task_group_context &context) { + parallel_for_impl(first, last, step, f, partitioner, context); +} +//! Parallel iteration over a range of integers with explicit step, task group context, and static partitioner +template + __TBB_requires(parallel_for_index && parallel_for_function) +void parallel_for(Index first, Index last, Index step, const Function& f, const static_partitioner& partitioner, task_group_context &context) { + parallel_for_impl(first, last, step, f, partitioner, context); +} +//! Parallel iteration over a range of integers with explicit step, task group context, and affinity partitioner +template + __TBB_requires(parallel_for_index && parallel_for_function) +void parallel_for(Index first, Index last, Index step, const Function& f, affinity_partitioner& partitioner, task_group_context &context) { + parallel_for_impl(first, last, step, f, partitioner, context); +} + +//! Parallel iteration over a range of integers with a default step value, explicit task group context, and default partitioner +template + __TBB_requires(parallel_for_index && parallel_for_function) +void parallel_for(Index first, Index last, const Function& f, task_group_context &context) { + parallel_for_impl(first, last, static_cast(1), f, __TBB_DEFAULT_PARTITIONER(), context); +} +//! Parallel iteration over a range of integers with a default step value, explicit task group context, and simple partitioner +template + __TBB_requires(parallel_for_index && parallel_for_function) +void parallel_for(Index first, Index last, const Function& f, const simple_partitioner& partitioner, task_group_context &context) { + parallel_for_impl(first, last, static_cast(1), f, partitioner, context); +} +//! Parallel iteration over a range of integers with a default step value, explicit task group context, and auto partitioner +template + __TBB_requires(parallel_for_index && parallel_for_function) +void parallel_for(Index first, Index last, const Function& f, const auto_partitioner& partitioner, task_group_context &context) { + parallel_for_impl(first, last, static_cast(1), f, partitioner, context); +} +//! Parallel iteration over a range of integers with a default step value, explicit task group context, and static partitioner +template + __TBB_requires(parallel_for_index && parallel_for_function) +void parallel_for(Index first, Index last, const Function& f, const static_partitioner& partitioner, task_group_context &context) { + parallel_for_impl(first, last, static_cast(1), f, partitioner, context); +} +//! Parallel iteration over a range of integers with a default step value, explicit task group context, and affinity_partitioner +template + __TBB_requires(parallel_for_index && parallel_for_function) +void parallel_for(Index first, Index last, const Function& f, affinity_partitioner& partitioner, task_group_context &context) { + parallel_for_impl(first, last, static_cast(1), f, partitioner, context); +} +// @} + +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::parallel_for; +// Split types +using detail::split; +using detail::proportional_split; +} // namespace v1 + +} // namespace tbb + +#endif /* __TBB_parallel_for_H */ diff --git a/src/tbb/include/oneapi/tbb/parallel_for_each.h b/src/tbb/include/oneapi/tbb/parallel_for_each.h new file mode 100644 index 000000000..ce68a7ff9 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/parallel_for_each.h @@ -0,0 +1,712 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_parallel_for_each_H +#define __TBB_parallel_for_each_H + +#include "detail/_config.h" +#include "detail/_namespace_injection.h" +#include "detail/_exception.h" +#include "detail/_task.h" +#include "detail/_aligned_space.h" +#include "detail/_small_object_pool.h" +#include "detail/_utils.h" + +#include "parallel_for.h" +#include "task_group.h" // task_group_context + +#include +#include + +namespace tbb { +namespace detail { +#if __TBB_CPP20_CONCEPTS_PRESENT +namespace d1 { +template +class feeder; + +} // namespace d1 +inline namespace d0 { + +template +concept parallel_for_each_body = std::invocable&, ItemType&&> || + std::invocable&, ItemType&&, tbb::detail::d1::feeder&>; + +} // namespace d0 +#endif // __TBB_CPP20_CONCEPTS_PRESENT +namespace d2 { +template class feeder_impl; +} // namespace d2 + +namespace d1 { +//! Class the user supplied algorithm body uses to add new tasks +template +class feeder { + feeder() {} + feeder(const feeder&) = delete; + void operator=( const feeder&) = delete; + + virtual ~feeder () {} + virtual void internal_add_copy(const Item& item) = 0; + virtual void internal_add_move(Item&& item) = 0; + + template friend class d2::feeder_impl; +public: + //! Add a work item to a running parallel_for_each. + void add(const Item& item) {internal_add_copy(item);} + void add(Item&& item) {internal_add_move(std::move(item));} +}; + +} // namespace d1 + +namespace d2 { +using namespace tbb::detail::d1; +/** Selects one of the two possible forms of function call member operator. + @ingroup algorithms **/ +template +struct parallel_for_each_operator_selector { +public: + template + static auto call(const Body& body, ItemArg&& item, FeederArg*) + -> decltype(tbb::detail::invoke(body, std::forward(item)), void()) { + #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) + // Suppression of Microsoft non-standard extension warnings + // #pragma warning (push) + // #pragma warning (disable: 4239) + #endif + + tbb::detail::invoke(body, std::forward(item)); + + #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) + // #pragma warning (pop) + #endif + } + + template + static auto call(const Body& body, ItemArg&& item, FeederArg* feeder) + -> decltype(tbb::detail::invoke(body, std::forward(item), *feeder), void()) { + #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) + // Suppression of Microsoft non-standard extension warnings + // #pragma warning (push) + // #pragma warning (disable: 4239) + #endif + __TBB_ASSERT(feeder, "Feeder was not created but should be"); + + tbb::detail::invoke(body, std::forward(item), *feeder); + + #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) + // #pragma warning (pop) + #endif + } +}; + +template +struct feeder_item_task: public task { + using feeder_type = feeder_impl; + + template + feeder_item_task(ItemType&& input_item, feeder_type& feeder, small_object_allocator& alloc, wait_tree_vertex_interface& wait_vertex) : + item(std::forward(input_item)), + my_feeder(feeder), + my_allocator(alloc), + m_wait_tree_vertex(r1::get_thread_reference_vertex(&wait_vertex)) + { + m_wait_tree_vertex->reserve(); + } + + void finalize(const execution_data& ed) { + m_wait_tree_vertex->release(); + my_allocator.delete_object(this, ed); + } + + //! Hack for resolve ambiguity between calls to the body with and without moving the stored copy + //! Executing body with moving the copy should have higher priority + using first_priority = int; + using second_priority = double; + + template + static auto call(const BodyType& call_body, ItemType& call_item, FeederType& call_feeder, first_priority) + -> decltype(parallel_for_each_operator_selector::call(call_body, std::move(call_item), &call_feeder), void()) + { + parallel_for_each_operator_selector::call(call_body, std::move(call_item), &call_feeder); + } + + template + static void call(const BodyType& call_body, ItemType& call_item, FeederType& call_feeder, second_priority) { + parallel_for_each_operator_selector::call(call_body, call_item, &call_feeder); + } + + task* execute(execution_data& ed) override { + call(my_feeder.my_body, item, my_feeder, first_priority{}); + finalize(ed); + return nullptr; + } + + task* cancel(execution_data& ed) override { + finalize(ed); + return nullptr; + } + + Item item; + feeder_type& my_feeder; + small_object_allocator my_allocator; + wait_tree_vertex_interface* m_wait_tree_vertex; +}; // class feeder_item_task + +/** Implements new task adding procedure. + @ingroup algorithms **/ +template +class feeder_impl : public feeder { + // Avoiding use of copy constructor in a virtual method if the type does not support it + void internal_add_copy_impl(std::true_type, const Item& item) { + using feeder_task = feeder_item_task; + small_object_allocator alloc; + auto task = alloc.new_object(item, *this, alloc, my_wait_context); + + spawn(*task, my_execution_context); + } + + void internal_add_copy_impl(std::false_type, const Item&) { + __TBB_ASSERT(false, "Overloading for r-value reference doesn't work or it's not movable and not copyable object"); + } + + void internal_add_copy(const Item& item) override { + internal_add_copy_impl(typename std::is_copy_constructible::type(), item); + } + + void internal_add_move(Item&& item) override { + using feeder_task = feeder_item_task; + small_object_allocator alloc{}; + auto task = alloc.new_object(std::move(item), *this, alloc, my_wait_context); + + spawn(*task, my_execution_context); + } +public: + feeder_impl(const Body& body, wait_context_vertex& w_context, task_group_context &context) + : my_body(body), + my_wait_context(w_context) + , my_execution_context(context) + {} + + const Body& my_body; + wait_context_vertex& my_wait_context; + task_group_context& my_execution_context; +}; // class feeder_impl + +/** Execute computation under one element of the range + @ingroup algorithms **/ +template +struct for_each_iteration_task: public task { + using feeder_type = feeder_impl; + + for_each_iteration_task(Iterator input_item_ptr, const Body& body, feeder_impl* feeder_ptr, wait_context& wait_context) : + item_ptr(input_item_ptr), my_body(body), my_feeder_ptr(feeder_ptr), parent_wait_context(wait_context) + {} + + void finalize() { + parent_wait_context.release(); + } + + task* execute(execution_data&) override { + parallel_for_each_operator_selector::call(my_body, *item_ptr, my_feeder_ptr); + finalize(); + return nullptr; + } + + task* cancel(execution_data&) override { + finalize(); + return nullptr; + } + + Iterator item_ptr; + const Body& my_body; + feeder_impl* my_feeder_ptr; + wait_context& parent_wait_context; +}; // class for_each_iteration_task + +// Helper to get the type of the iterator to the internal sequence of copies +// If the element can be passed to the body as an rvalue - this iterator should be move_iterator +template +struct input_iteration_task_iterator_helper { + // For input iterators we pass const lvalue reference to the body + // It is prohibited to take non-constant lvalue references for input iterators + using type = const Item*; +}; + +template +struct input_iteration_task_iterator_helper::call(std::declval(), + std::declval(), + std::declval*>()))>> +{ + using type = std::move_iterator; +}; + +/** Split one block task to several(max_block_size) iteration tasks for input iterators + @ingroup algorithms **/ +template +struct input_block_handling_task : public task { + static constexpr size_t max_block_size = 4; + + using feeder_type = feeder_impl; + using iteration_task_iterator_type = typename input_iteration_task_iterator_helper::type; + using iteration_task = for_each_iteration_task; + + input_block_handling_task(wait_context_vertex& root_wait_context, task_group_context& e_context, + const Body& body, feeder_impl* feeder_ptr, small_object_allocator& alloc) + :my_size(0), my_wait_context(0), my_root_wait_context(root_wait_context), + my_execution_context(e_context), my_allocator(alloc) + { + auto item_it = block_iteration_space.begin(); + for (auto* it = task_pool.begin(); it != task_pool.end(); ++it) { + new (it) iteration_task(iteration_task_iterator_type(item_it++), body, feeder_ptr, my_wait_context); + } + } + + void finalize(const execution_data& ed) { + my_root_wait_context.release(); + my_allocator.delete_object(this, ed); + } + + task* execute(execution_data& ed) override { + __TBB_ASSERT( my_size > 0, "Negative size was passed to task"); + for (std::size_t counter = 1; counter < my_size; ++counter) { + my_wait_context.reserve(); + spawn(*(task_pool.begin() + counter), my_execution_context); + } + my_wait_context.reserve(); + execute_and_wait(*task_pool.begin(), my_execution_context, + my_wait_context, my_execution_context); + + // deallocate current task after children execution + finalize(ed); + return nullptr; + } + + task* cancel(execution_data& ed) override { + finalize(ed); + return nullptr; + } + + ~input_block_handling_task() { + for(std::size_t counter = 0; counter < max_block_size; ++counter) { + (task_pool.begin() + counter)->~iteration_task(); + if (counter < my_size) { + (block_iteration_space.begin() + counter)->~Item(); + } + } + } + + aligned_space block_iteration_space; + aligned_space task_pool; + std::size_t my_size; + wait_context my_wait_context; + wait_context_vertex& my_root_wait_context; + task_group_context& my_execution_context; + small_object_allocator my_allocator; +}; // class input_block_handling_task + +/** Split one block task to several(max_block_size) iteration tasks for forward iterators + @ingroup algorithms **/ +template +struct forward_block_handling_task : public task { + static constexpr size_t max_block_size = 4; + + using iteration_task = for_each_iteration_task; + + forward_block_handling_task(Iterator first, std::size_t size, + wait_context_vertex& w_context, task_group_context& e_context, + const Body& body, feeder_impl* feeder_ptr, + small_object_allocator& alloc) + : my_size(size), my_wait_context(0), my_root_wait_context(w_context), + my_execution_context(e_context), my_allocator(alloc) + { + auto* task_it = task_pool.begin(); + for (std::size_t i = 0; i < size; i++) { + new (task_it++) iteration_task(first, body, feeder_ptr, my_wait_context); + ++first; + } + } + + void finalize(const execution_data& ed) { + my_root_wait_context.release(); + my_allocator.delete_object(this, ed); + } + + task* execute(execution_data& ed) override { + __TBB_ASSERT( my_size > 0, "Negative size was passed to task"); + for(std::size_t counter = 1; counter < my_size; ++counter) { + my_wait_context.reserve(); + spawn(*(task_pool.begin() + counter), my_execution_context); + } + my_wait_context.reserve(); + execute_and_wait(*task_pool.begin(), my_execution_context, + my_wait_context, my_execution_context); + + // deallocate current task after children execution + finalize(ed); + return nullptr; + } + + task* cancel(execution_data& ed) override { + finalize(ed); + return nullptr; + } + + ~forward_block_handling_task() { + for(std::size_t counter = 0; counter < my_size; ++counter) { + (task_pool.begin() + counter)->~iteration_task(); + } + } + + aligned_space task_pool; + std::size_t my_size; + wait_context my_wait_context; + wait_context_vertex& my_root_wait_context; + task_group_context& my_execution_context; + small_object_allocator my_allocator; +}; // class forward_block_handling_task + +/** Body for parallel_for algorithm. + * Allows to redirect operations under random access iterators range to the parallel_for algorithm. + @ingroup algorithms **/ +template +class parallel_for_body_wrapper { + Iterator my_first; + const Body& my_body; + feeder_impl* my_feeder_ptr; +public: + parallel_for_body_wrapper(Iterator first, const Body& body, feeder_impl* feeder_ptr) + : my_first(first), my_body(body), my_feeder_ptr(feeder_ptr) {} + + void operator()(tbb::blocked_range range) const { +#if __INTEL_COMPILER +#pragma ivdep +#endif + for (std::size_t count = range.begin(); count != range.end(); count++) { + parallel_for_each_operator_selector::call(my_body, *(my_first + count), + my_feeder_ptr); + } + } +}; // class parallel_for_body_wrapper + + +/** Helper for getting iterators tag including inherited custom tags + @ingroup algorithms */ +template +using tag = typename std::iterator_traits::iterator_category; + +#if __TBB_CPP20_PRESENT +template +struct move_iterator_dispatch_helper { + using type = It; +}; + +// Until C++23, std::move_iterator::iterator_concept always defines +// to std::input_iterator_tag and hence std::forward_iterator concept +// always evaluates to false, so std::move_iterator dispatch should be +// made according to the base iterator type. +template +struct move_iterator_dispatch_helper> { + using type = It; +}; + +template +using iterator_tag_dispatch_impl = + std::conditional_t, + std::random_access_iterator_tag, + std::conditional_t, + std::forward_iterator_tag, + std::input_iterator_tag>>; + +template +using iterator_tag_dispatch = + iterator_tag_dispatch_impl::type>; + +#else +template +using iterator_tag_dispatch = typename + std::conditional< + std::is_base_of>::value, + std::random_access_iterator_tag, + typename std::conditional< + std::is_base_of>::value, + std::forward_iterator_tag, + std::input_iterator_tag + >::type + >::type; +#endif // __TBB_CPP20_PRESENT + +template +using feeder_is_required = tbb::detail::void_t(), + std::declval::reference>(), + std::declval&>()))>; + +// Creates feeder object only if the body can accept it +template +struct feeder_holder { + feeder_holder( wait_context_vertex&, task_group_context&, const Body& ) {} + + feeder_impl* feeder_ptr() { return nullptr; } +}; // class feeder_holder + +template +class feeder_holder> { +public: + feeder_holder( wait_context_vertex& w_context, task_group_context& context, const Body& body ) + : my_feeder(body, w_context, context) {} + + feeder_impl* feeder_ptr() { return &my_feeder; } +private: + feeder_impl my_feeder; +}; // class feeder_holder + +template +class for_each_root_task_base : public task { +public: + for_each_root_task_base(Iterator first, Iterator last, const Body& body, wait_context_vertex& w_context, task_group_context& e_context) + : my_first(first), my_last(last), my_wait_context(w_context), my_execution_context(e_context), + my_body(body), my_feeder_holder(my_wait_context, my_execution_context, my_body) + { + my_wait_context.reserve(); + } +private: + task* cancel(execution_data&) override { + this->my_wait_context.release(); + return nullptr; + } +protected: + Iterator my_first; + Iterator my_last; + wait_context_vertex& my_wait_context; + task_group_context& my_execution_context; + const Body& my_body; + feeder_holder my_feeder_holder; +}; // class for_each_root_task_base + +/** parallel_for_each algorithm root task - most generic version + * Splits input range to blocks + @ingroup algorithms **/ +template > +class for_each_root_task : public for_each_root_task_base +{ + using base_type = for_each_root_task_base; +public: + using base_type::base_type; +private: + task* execute(execution_data& ed) override { + using block_handling_type = input_block_handling_task; + + if (this->my_first == this->my_last) { + this->my_wait_context.release(); + return nullptr; + } + + this->my_wait_context.reserve(); + small_object_allocator alloc{}; + auto block_handling_task = alloc.new_object(ed, this->my_wait_context, this->my_execution_context, + this->my_body, this->my_feeder_holder.feeder_ptr(), + alloc); + + auto* block_iterator = block_handling_task->block_iteration_space.begin(); + for (; !(this->my_first == this->my_last) && block_handling_task->my_size < block_handling_type::max_block_size; ++this->my_first) { + // Move semantics are automatically used when supported by the iterator + new (block_iterator++) Item(*this->my_first); + ++block_handling_task->my_size; + } + + // Do not access this after spawn to avoid races + spawn(*this, this->my_execution_context); + return block_handling_task; + } +}; // class for_each_root_task - most generic implementation + +/** parallel_for_each algorithm root task - forward iterator based specialization + * Splits input range to blocks + @ingroup algorithms **/ +template +class for_each_root_task + : public for_each_root_task_base +{ + using base_type = for_each_root_task_base; +public: + using base_type::base_type; +private: + task* execute(execution_data& ed) override { + using block_handling_type = forward_block_handling_task; + if (this->my_first == this->my_last) { + this->my_wait_context.release(); + return nullptr; + } + + std::size_t block_size{0}; + Iterator first_block_element = this->my_first; + for (; !(this->my_first == this->my_last) && block_size < block_handling_type::max_block_size; ++this->my_first) { + ++block_size; + } + + this->my_wait_context.reserve(); + small_object_allocator alloc{}; + auto block_handling_task = alloc.new_object(ed, first_block_element, block_size, + this->my_wait_context, this->my_execution_context, + this->my_body, this->my_feeder_holder.feeder_ptr(), alloc); + + // Do not access this after spawn to avoid races + spawn(*this, this->my_execution_context); + return block_handling_task; + } +}; // class for_each_root_task - forward iterator based specialization + +/** parallel_for_each algorithm root task - random access iterator based specialization + * Splits input range to blocks + @ingroup algorithms **/ +template +class for_each_root_task + : public for_each_root_task_base +{ + using base_type = for_each_root_task_base; +public: + using base_type::base_type; +private: + task* execute(execution_data&) override { + tbb::parallel_for( + tbb::blocked_range(0, std::distance(this->my_first, this->my_last)), + parallel_for_body_wrapper(this->my_first, this->my_body, this->my_feeder_holder.feeder_ptr()) + , this->my_execution_context + ); + + this->my_wait_context.release(); + return nullptr; + } +}; // class for_each_root_task - random access iterator based specialization + +/** Helper for getting item type. If item type can be deduced from feeder - got it from feeder, + if feeder is generic - got item type from range. + @ingroup algorithms */ +template +auto feeder_argument_parser(void (Body::*)(Item, feeder&) const) -> FeederArg; + +template +decltype(feeder_argument_parser(&Body::operator())) get_item_type_impl(int); // for (T, feeder) +template Item get_item_type_impl(...); // stub + +template +using get_item_type = decltype(get_item_type_impl(0)); + +#if __TBB_CPP20_CONCEPTS_PRESENT +template +using feeder_item_type = std::remove_cvref_t>; + +template +concept parallel_for_each_iterator_body = + parallel_for_each_body, feeder_item_type>>; + +template +concept parallel_for_each_range_body = + parallel_for_each_body, feeder_item_type>>; +#endif + +/** Implements parallel iteration over a range. + @ingroup algorithms */ +template +void run_parallel_for_each( Iterator first, Iterator last, const Body& body, task_group_context& context) +{ + if (!(first == last)) { + using ItemType = get_item_type::value_type>; + wait_context_vertex w_context(0); + + for_each_root_task root_task(first, last, body, w_context, context); + + execute_and_wait(root_task, context, w_context.get_context(), context); + } +} + +/** \page parallel_for_each_body_req Requirements on parallel_for_each body + Class \c Body implementing the concept of parallel_for_each body must define: + - \code + B::operator()( + cv_item_type item, + feeder& feeder + ) const + + OR + + B::operator()( cv_item_type& item ) const + \endcode Process item. + May be invoked concurrently for the same \c this but different \c item. + + - \code item_type( const item_type& ) \endcode + Copy a work item. + - \code ~item_type() \endcode Destroy a work item +**/ + +/** \name parallel_for_each + See also requirements on \ref parallel_for_each_body_req "parallel_for_each Body". **/ +//@{ +//! Parallel iteration over a range, with optional addition of more work. +/** @ingroup algorithms */ +template + __TBB_requires(std::input_iterator && parallel_for_each_iterator_body) +void parallel_for_each(Iterator first, Iterator last, const Body& body) { + task_group_context context(PARALLEL_FOR_EACH); + run_parallel_for_each(first, last, body, context); +} + +template + __TBB_requires(container_based_sequence && parallel_for_each_range_body) +void parallel_for_each(Range& rng, const Body& body) { + parallel_for_each(std::begin(rng), std::end(rng), body); +} + +template + __TBB_requires(container_based_sequence && parallel_for_each_range_body) +void parallel_for_each(const Range& rng, const Body& body) { + parallel_for_each(std::begin(rng), std::end(rng), body); +} + +//! Parallel iteration over a range, with optional addition of more work and user-supplied context +/** @ingroup algorithms */ +template + __TBB_requires(std::input_iterator && parallel_for_each_iterator_body) +void parallel_for_each(Iterator first, Iterator last, const Body& body, task_group_context& context) { + run_parallel_for_each(first, last, body, context); +} + +template + __TBB_requires(container_based_sequence && parallel_for_each_range_body) +void parallel_for_each(Range& rng, const Body& body, task_group_context& context) { + parallel_for_each(std::begin(rng), std::end(rng), body, context); +} + +template + __TBB_requires(container_based_sequence && parallel_for_each_range_body) +void parallel_for_each(const Range& rng, const Body& body, task_group_context& context) { + parallel_for_each(std::begin(rng), std::end(rng), body, context); +} + +} // namespace d2 +} // namespace detail +//! @endcond +//@} + +inline namespace v1 { +using detail::d2::parallel_for_each; +using detail::d1::feeder; +} // namespace v1 + +} // namespace tbb + +#endif /* __TBB_parallel_for_each_H */ diff --git a/src/tbb/include/oneapi/tbb/parallel_invoke.h b/src/tbb/include/oneapi/tbb/parallel_invoke.h new file mode 100644 index 000000000..4bc5d8533 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/parallel_invoke.h @@ -0,0 +1,227 @@ +/* + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_parallel_invoke_H +#define __TBB_parallel_invoke_H + +#include "detail/_config.h" +#include "detail/_namespace_injection.h" +#include "detail/_exception.h" +#include "detail/_task.h" +#include "detail/_template_helpers.h" +#include "detail/_small_object_pool.h" + +#include "task_group.h" + +#include +#include +#include + +namespace tbb { +namespace detail { +namespace d1 { + +//! Simple task object, executing user method +template +struct function_invoker : public task { + function_invoker(const Function& function, WaitObject& wait_ctx) : + my_function(function), + parent_wait_ctx(wait_ctx) + {} + + task* execute(execution_data& ed) override { + my_function(); + parent_wait_ctx.release(ed); + call_itt_task_notify(destroy, this); + return nullptr; + } + + task* cancel(execution_data& ed) override { + parent_wait_ctx.release(ed); + return nullptr; + } + + const Function& my_function; + WaitObject& parent_wait_ctx; +}; // struct function_invoker + +//! Task object for managing subroots in trinary task trees. +// Endowed with additional synchronization logic (compatible with wait object interfaces) to support +// continuation passing execution. This task spawns 2 function_invoker tasks with first and second functors +// and then executes first functor by itself. But only the last executed functor must destruct and deallocate +// the subroot task. +template +struct invoke_subroot_task : public task { + wait_context& root_wait_ctx; + std::atomic ref_count{0}; + bool child_spawned = false; + + const F1& self_invoked_functor; + function_invoker> f2_invoker; + function_invoker> f3_invoker; + + task_group_context& my_execution_context; + small_object_allocator my_allocator; + + invoke_subroot_task(const F1& f1, const F2& f2, const F3& f3, wait_context& wait_ctx, task_group_context& context, + small_object_allocator& alloc) : + root_wait_ctx(wait_ctx), + self_invoked_functor(f1), + f2_invoker(f2, *this), + f3_invoker(f3, *this), + my_execution_context(context), + my_allocator(alloc) + { + root_wait_ctx.reserve(); + } + + void finalize(const execution_data& ed) { + root_wait_ctx.release(); + + my_allocator.delete_object(this, ed); + } + + void release(const execution_data& ed) { + __TBB_ASSERT(ref_count > 0, nullptr); + call_itt_task_notify(releasing, this); + if( --ref_count == 0 ) { + call_itt_task_notify(acquired, this); + finalize(ed); + } + } + + task* execute(execution_data& ed) override { + ref_count.fetch_add(3, std::memory_order_relaxed); + spawn(f3_invoker, my_execution_context); + spawn(f2_invoker, my_execution_context); + self_invoked_functor(); + + release(ed); + return nullptr; + } + + task* cancel(execution_data& ed) override { + if( ref_count > 0 ) { // detect children spawn + release(ed); + } else { + finalize(ed); + } + return nullptr; + } +}; // struct subroot_task + +class invoke_root_task { +public: + invoke_root_task(wait_context& wc) : my_wait_context(wc) {} + void release(const execution_data&) { + my_wait_context.release(); + } +private: + wait_context& my_wait_context; +}; + +template +void invoke_recursive_separation(wait_context& root_wait_ctx, task_group_context& context, const F1& f1) { + root_wait_ctx.reserve(1); + invoke_root_task root(root_wait_ctx); + function_invoker invoker1(f1, root); + + execute_and_wait(invoker1, context, root_wait_ctx, context); +} + +template +void invoke_recursive_separation(wait_context& root_wait_ctx, task_group_context& context, const F1& f1, const F2& f2) { + root_wait_ctx.reserve(2); + invoke_root_task root(root_wait_ctx); + function_invoker invoker1(f1, root); + function_invoker invoker2(f2, root); + + spawn(invoker1, context); + execute_and_wait(invoker2, context, root_wait_ctx, context); +} + +template +void invoke_recursive_separation(wait_context& root_wait_ctx, task_group_context& context, const F1& f1, const F2& f2, const F3& f3) { + root_wait_ctx.reserve(3); + invoke_root_task root(root_wait_ctx); + function_invoker invoker1(f1, root); + function_invoker invoker2(f2, root); + function_invoker invoker3(f3, root); + + //TODO: implement sub root for two tasks (measure performance) + spawn(invoker1, context); + spawn(invoker2, context); + execute_and_wait(invoker3, context, root_wait_ctx, context); +} + +template +void invoke_recursive_separation(wait_context& root_wait_ctx, task_group_context& context, + const F1& f1, const F2& f2, const F3& f3, const Fs&... fs) { + small_object_allocator alloc{}; + auto sub_root = alloc.new_object>(f1, f2, f3, root_wait_ctx, context, alloc); + spawn(*sub_root, context); + + invoke_recursive_separation(root_wait_ctx, context, fs...); +} + +template +void parallel_invoke_impl(task_group_context& context, const Fs&... fs) { + static_assert(sizeof...(Fs) >= 2, "Parallel invoke may be called with at least two callable"); + wait_context root_wait_ctx{0}; + + invoke_recursive_separation(root_wait_ctx, context, fs...); +} + +template +void parallel_invoke_impl(const F1& f1, const Fs&... fs) { + static_assert(sizeof...(Fs) >= 1, "Parallel invoke may be called with at least two callable"); + task_group_context context(PARALLEL_INVOKE); + wait_context root_wait_ctx{0}; + + invoke_recursive_separation(root_wait_ctx, context, fs..., f1); +} + +//! Passes last argument of variadic pack as first for handling user provided task_group_context +template +struct invoke_helper; + +template +struct invoke_helper, T, Fs...> : invoke_helper, Fs...> {}; + +template +struct invoke_helper, T> { + void operator()(Fs&&... args, T&& t) { + parallel_invoke_impl(std::forward(t), std::forward(args)...); + } +}; + +//! Parallel execution of several function objects +// We need to pass parameter pack through forwarding reference, +// since this pack may contain task_group_context that must be passed via lvalue non-const reference +template +void parallel_invoke(Fs&&... fs) { + invoke_helper, Fs...>()(std::forward(fs)...); +} + +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::parallel_invoke; +} // namespace v1 + +} // namespace tbb +#endif /* __TBB_parallel_invoke_H */ diff --git a/src/tbb/include/oneapi/tbb/parallel_pipeline.h b/src/tbb/include/oneapi/tbb/parallel_pipeline.h new file mode 100644 index 000000000..a204b9c4c --- /dev/null +++ b/src/tbb/include/oneapi/tbb/parallel_pipeline.h @@ -0,0 +1,153 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_parallel_pipeline_H +#define __TBB_parallel_pipeline_H + +#include "detail/_pipeline_filters.h" +#include "detail/_config.h" +#include "detail/_namespace_injection.h" +#include "task_group.h" + +#include +#include +#include + +namespace tbb { +namespace detail { + +namespace r1 { +TBB_EXPORT void __TBB_EXPORTED_FUNC parallel_pipeline(task_group_context&, std::size_t, const d1::filter_node&); +} + +namespace d1 { + +enum class filter_mode : unsigned int +{ + //! processes multiple items in parallel and in no particular order + parallel = base_filter::filter_is_out_of_order, + //! processes items one at a time; all such filters process items in the same order + serial_in_order = base_filter::filter_is_serial, + //! processes items one at a time and in no particular order + serial_out_of_order = base_filter::filter_is_serial | base_filter::filter_is_out_of_order +}; +//! Class representing a chain of type-safe pipeline filters +/** @ingroup algorithms */ +template +class filter { + filter_node_ptr my_root; + filter( filter_node_ptr root ) : my_root(root) {} + friend void parallel_pipeline( size_t, const filter&, task_group_context& ); + template + friend filter make_filter( filter_mode, const Body& ); + template + friend filter operator&( const filter&, const filter& ); +public: + filter() = default; + filter( const filter& rhs ) : my_root(rhs.my_root) {} + filter( filter&& rhs ) : my_root(std::move(rhs.my_root)) {} + + void operator=(const filter& rhs) { + my_root = rhs.my_root; + } + void operator=( filter&& rhs ) { + my_root = std::move(rhs.my_root); + } + + template + filter( filter_mode mode, const Body& body ) : + my_root( new(r1::allocate_memory(sizeof(filter_node_leaf))) + filter_node_leaf(static_cast(mode), body) ) { + } + + filter& operator&=( const filter& right ) { + *this = *this & right; + return *this; + } + + void clear() { + // Like operator= with filter() on right side. + my_root = nullptr; + } +}; + +//! Create a filter to participate in parallel_pipeline +/** @ingroup algorithms */ +template +filter make_filter( filter_mode mode, const Body& body ) { + return filter_node_ptr( new(r1::allocate_memory(sizeof(filter_node_leaf))) + filter_node_leaf(static_cast(mode), body) ); +} + +//! Create a filter to participate in parallel_pipeline +/** @ingroup algorithms */ +template +filter, filter_output> make_filter( filter_mode mode, const Body& body ) { + return make_filter, filter_output>(mode, body); +} + +//! Composition of filters left and right. +/** @ingroup algorithms */ +template +filter operator&( const filter& left, const filter& right ) { + __TBB_ASSERT(left.my_root,"cannot use default-constructed filter as left argument of '&'"); + __TBB_ASSERT(right.my_root,"cannot use default-constructed filter as right argument of '&'"); + return filter_node_ptr( new (r1::allocate_memory(sizeof(filter_node))) filter_node(left.my_root,right.my_root) ); +} + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +template +filter(filter_mode, Body) +->filter, filter_output>; +#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +//! Parallel pipeline over chain of filters with user-supplied context. +/** @ingroup algorithms **/ +inline void parallel_pipeline(size_t max_number_of_live_tokens, const filter& filter_chain, task_group_context& context) { + r1::parallel_pipeline(context, max_number_of_live_tokens, *filter_chain.my_root); +} + +//! Parallel pipeline over chain of filters. +/** @ingroup algorithms **/ +inline void parallel_pipeline(size_t max_number_of_live_tokens, const filter& filter_chain) { + task_group_context context; + parallel_pipeline(max_number_of_live_tokens, filter_chain, context); +} + +//! Parallel pipeline over sequence of filters. +/** @ingroup algorithms **/ +template +void parallel_pipeline(size_t max_number_of_live_tokens, + const F1& filter1, + const F2& filter2, + FiltersContext&&... filters) { + parallel_pipeline(max_number_of_live_tokens, filter1 & filter2, std::forward(filters)...); +} + +} // namespace d1 +} // namespace detail + +inline namespace v1 +{ +using detail::d1::parallel_pipeline; +using detail::d1::filter; +using detail::d1::make_filter; +using detail::d1::filter_mode; +using detail::d1::flow_control; +} +} // tbb + +#endif /* __TBB_parallel_pipeline_H */ diff --git a/src/tbb/include/oneapi/tbb/parallel_reduce.h b/src/tbb/include/oneapi/tbb/parallel_reduce.h new file mode 100644 index 000000000..205c97ef9 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/parallel_reduce.h @@ -0,0 +1,772 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_parallel_reduce_H +#define __TBB_parallel_reduce_H + +#include +#include "detail/_namespace_injection.h" +#include "detail/_task.h" +#include "detail/_aligned_space.h" +#include "detail/_small_object_pool.h" +#include "detail/_range_common.h" + +#include "task_group.h" // task_group_context +#include "partitioner.h" +#include "profiling.h" + +namespace tbb { +namespace detail { +#if __TBB_CPP20_CONCEPTS_PRESENT +inline namespace d0 { + +template +concept parallel_reduce_body = splittable && + requires( Body& body, const Range& range, Body& rhs ) { + body(range); + body.join(rhs); + }; + +template +concept parallel_reduce_function = std::invocable&, + const Range&, Value&&> && + std::convertible_to&, + const Range&, Value&&>, + Value>; + +template +concept parallel_reduce_combine = std::invocable&, + Value&&, Value&&> && + std::convertible_to&, + Value&&, Value&&>, + Value>; + +} // namespace d0 +#endif // __TBB_CPP20_CONCEPTS_PRESENT +namespace d1 { + +//! Tree node type for parallel_reduce. +/** @ingroup algorithms */ +//TODO: consider folding tree via bypass execution(instead of manual folding) +// for better cancellation and critical tasks handling (performance measurements required). +template +struct reduction_tree_node : public tree_node { + tbb::detail::aligned_space zombie_space; + Body& left_body; + bool has_right_zombie{false}; + + reduction_tree_node(node* parent, int ref_count, Body& input_left_body, small_object_allocator& alloc) : + tree_node{parent, ref_count, alloc}, + left_body(input_left_body) /* gcc4.8 bug - braced-initialization doesn't work for class members of reference type */ + {} + + void join(task_group_context* context) { + if (has_right_zombie && !context->is_group_execution_cancelled()) + left_body.join(*zombie_space.begin()); + } + + ~reduction_tree_node() { + if( has_right_zombie ) zombie_space.begin()->~Body(); + } +}; + +//! Task type used to split the work of parallel_reduce. +/** @ingroup algorithms */ +template +struct start_reduce : public task { + Range my_range; + Body* my_body; + node* my_parent; + + typename Partitioner::task_partition_type my_partition; + small_object_allocator my_allocator; + bool is_right_child; + + task* execute(execution_data&) override; + task* cancel(execution_data&) override; + void finalize(const execution_data&); + + using tree_node_type = reduction_tree_node; + + //! Constructor reduce root task. + start_reduce( const Range& range, Body& body, Partitioner& partitioner, small_object_allocator& alloc ) : + my_range(range), + my_body(&body), + my_parent(nullptr), + my_partition(partitioner), + my_allocator(alloc), + is_right_child(false) {} + //! Splitting constructor used to generate children. + /** parent_ becomes left child. Newly constructed object is right child. */ + start_reduce( start_reduce& parent_, typename Partitioner::split_type& split_obj, small_object_allocator& alloc ) : + my_range(parent_.my_range, get_range_split_object(split_obj)), + my_body(parent_.my_body), + my_parent(nullptr), + my_partition(parent_.my_partition, split_obj), + my_allocator(alloc), + is_right_child(true) + { + parent_.is_right_child = false; + } + //! Construct right child from the given range as response to the demand. + /** parent_ remains left child. Newly constructed object is right child. */ + start_reduce( start_reduce& parent_, const Range& r, depth_t d, small_object_allocator& alloc ) : + my_range(r), + my_body(parent_.my_body), + my_parent(nullptr), + my_partition(parent_.my_partition, split()), + my_allocator(alloc), + is_right_child(true) + { + my_partition.align_depth( d ); + parent_.is_right_child = false; + } + static void run(const Range& range, Body& body, Partitioner& partitioner, task_group_context& context) { + if ( !range.empty() ) { + wait_node wn; + small_object_allocator alloc{}; + auto reduce_task = alloc.new_object(range, body, partitioner, alloc); + reduce_task->my_parent = &wn; + execute_and_wait(*reduce_task, context, wn.m_wait, context); + } + } + static void run(const Range& range, Body& body, Partitioner& partitioner) { + // Bound context prevents exceptions from body to affect nesting or sibling algorithms, + // and allows users to handle exceptions safely by wrapping parallel_reduce in the try-block. + task_group_context context(PARALLEL_REDUCE); + run(range, body, partitioner, context); + } + //! Run body for range, serves as callback for partitioner + void run_body( Range &r ) { + tbb::detail::invoke(*my_body, r); + } + + //! spawn right task, serves as callback for partitioner + void offer_work(typename Partitioner::split_type& split_obj, execution_data& ed) { + offer_work_impl(ed, *this, split_obj); + } + //! spawn right task, serves as callback for partitioner + void offer_work(const Range& r, depth_t d, execution_data& ed) { + offer_work_impl(ed, *this, r, d); + } + +private: + template + void offer_work_impl(execution_data& ed, Args&&... args) { + small_object_allocator alloc{}; + // New right child + auto right_child = alloc.new_object(ed, std::forward(args)..., alloc); + + // New root node as a continuation and ref count. Left and right child attach to the new parent. + right_child->my_parent = my_parent = alloc.new_object(ed, my_parent, 2, *my_body, alloc); + + // Spawn the right sibling + right_child->spawn_self(ed); + } + + void spawn_self(execution_data& ed) { + my_partition.spawn_task(*this, *context(ed)); + } +}; + +//! fold the tree and deallocate the task +template +void start_reduce::finalize(const execution_data& ed) { + // Get the current parent and wait object before an object destruction + node* parent = my_parent; + auto allocator = my_allocator; + // Task execution finished - destroy it + this->~start_reduce(); + // Unwind the tree decrementing the parent`s reference count + fold_tree(parent, ed); + allocator.deallocate(this, ed); +} + +//! Execute parallel_reduce task +template +task* start_reduce::execute(execution_data& ed) { + if (!is_same_affinity(ed)) { + my_partition.note_affinity(execution_slot(ed)); + } + my_partition.check_being_stolen(*this, ed); + + // The acquire barrier synchronizes the data pointed with my_body if the left + // task has already finished. + __TBB_ASSERT(my_parent, nullptr); + if( is_right_child && my_parent->m_ref_count.load(std::memory_order_acquire) == 2 ) { + tree_node_type* parent_ptr = static_cast(my_parent); + my_body = static_cast(new( parent_ptr->zombie_space.begin() ) Body(*my_body, split())); + parent_ptr->has_right_zombie = true; + } + __TBB_ASSERT(my_body != nullptr, "Incorrect body value"); + + my_partition.execute(*this, my_range, ed); + + finalize(ed); + return nullptr; +} + +//! Cancel parallel_reduce task +template +task* start_reduce::cancel(execution_data& ed) { + finalize(ed); + return nullptr; +} + +//! Tree node type for parallel_deterministic_reduce. +/** @ingroup algorithms */ +template +struct deterministic_reduction_tree_node : public tree_node { + Body right_body; + Body& left_body; + + deterministic_reduction_tree_node(node* parent, int ref_count, Body& input_left_body, small_object_allocator& alloc) : + tree_node{parent, ref_count, alloc}, + right_body{input_left_body, detail::split()}, + left_body(input_left_body) + {} + + void join(task_group_context* context) { + if (!context->is_group_execution_cancelled()) + left_body.join(right_body); + } +}; + +//! Task type used to split the work of parallel_deterministic_reduce. +/** @ingroup algorithms */ +template +struct start_deterministic_reduce : public task { + Range my_range; + Body& my_body; + node* my_parent; + + typename Partitioner::task_partition_type my_partition; + small_object_allocator my_allocator; + + task* execute(execution_data&) override; + task* cancel(execution_data&) override; + void finalize(const execution_data&); + + using tree_node_type = deterministic_reduction_tree_node; + + //! Constructor deterministic_reduce root task. + start_deterministic_reduce( const Range& range, Partitioner& partitioner, Body& body, small_object_allocator& alloc ) : + my_range(range), + my_body(body), + my_parent(nullptr), + my_partition(partitioner), + my_allocator(alloc) {} + //! Splitting constructor used to generate children. + /** parent_ becomes left child. Newly constructed object is right child. */ + start_deterministic_reduce( start_deterministic_reduce& parent_, typename Partitioner::split_type& split_obj, Body& body, + small_object_allocator& alloc ) : + my_range(parent_.my_range, get_range_split_object(split_obj)), + my_body(body), + my_parent(nullptr), + my_partition(parent_.my_partition, split_obj), + my_allocator(alloc) {} + static void run(const Range& range, Body& body, Partitioner& partitioner, task_group_context& context) { + if ( !range.empty() ) { + wait_node wn; + small_object_allocator alloc{}; + auto deterministic_reduce_task = + alloc.new_object(range, partitioner, body, alloc); + deterministic_reduce_task->my_parent = &wn; + execute_and_wait(*deterministic_reduce_task, context, wn.m_wait, context); + } + } + static void run(const Range& range, Body& body, Partitioner& partitioner) { + // Bound context prevents exceptions from body to affect nesting or sibling algorithms, + // and allows users to handle exceptions safely by wrapping parallel_deterministic_reduce + // in the try-block. + task_group_context context(PARALLEL_REDUCE); + run(range, body, partitioner, context); + } + //! Run body for range, serves as callback for partitioner + void run_body( Range &r ) { + tbb::detail::invoke(my_body, r); + } + //! Spawn right task, serves as callback for partitioner + void offer_work(typename Partitioner::split_type& split_obj, execution_data& ed) { + offer_work_impl(ed, *this, split_obj); + } +private: + template + void offer_work_impl(execution_data& ed, Args&&... args) { + small_object_allocator alloc{}; + // New root node as a continuation and ref count. Left and right child attach to the new parent. Split the body. + auto new_tree_node = alloc.new_object(ed, my_parent, 2, my_body, alloc); + + // New right child + auto right_child = alloc.new_object(ed, std::forward(args)..., new_tree_node->right_body, alloc); + + right_child->my_parent = my_parent = new_tree_node; + + // Spawn the right sibling + right_child->spawn_self(ed); + } + + void spawn_self(execution_data& ed) { + my_partition.spawn_task(*this, *context(ed)); + } +}; + +//! Fold the tree and deallocate the task +template +void start_deterministic_reduce::finalize(const execution_data& ed) { + // Get the current parent and wait object before an object destruction + node* parent = my_parent; + + auto allocator = my_allocator; + // Task execution finished - destroy it + this->~start_deterministic_reduce(); + // Unwind the tree decrementing the parent`s reference count + fold_tree(parent, ed); + allocator.deallocate(this, ed); +} + +//! Execute parallel_deterministic_reduce task +template +task* start_deterministic_reduce::execute(execution_data& ed) { + if (!is_same_affinity(ed)) { + my_partition.note_affinity(execution_slot(ed)); + } + my_partition.check_being_stolen(*this, ed); + + my_partition.execute(*this, my_range, ed); + + finalize(ed); + return nullptr; +} + +//! Cancel parallel_deterministic_reduce task +template +task* start_deterministic_reduce::cancel(execution_data& ed) { + finalize(ed); + return nullptr; +} + + +//! Auxiliary class for parallel_reduce; for internal use only. +/** The adaptor class that implements \ref parallel_reduce_body_req "parallel_reduce Body" + using given \ref parallel_reduce_lambda_req "anonymous function objects". + **/ +/** @ingroup algorithms */ +template +class lambda_reduce_body { +//TODO: decide if my_real_body, my_reduction, and my_identity_element should be copied or referenced +// (might require some performance measurements) + + const Value& my_identity_element; + const RealBody& my_real_body; + const Reduction& my_reduction; + Value my_value; + lambda_reduce_body& operator= ( const lambda_reduce_body& other ); +public: + lambda_reduce_body( const Value& identity, const RealBody& body, const Reduction& reduction ) + : my_identity_element(identity) + , my_real_body(body) + , my_reduction(reduction) + , my_value(identity) + { } + lambda_reduce_body( const lambda_reduce_body& other ) = default; + lambda_reduce_body( lambda_reduce_body& other, tbb::split ) + : my_identity_element(other.my_identity_element) + , my_real_body(other.my_real_body) + , my_reduction(other.my_reduction) + , my_value(other.my_identity_element) + { } + void operator()(Range& range) { + my_value = tbb::detail::invoke(my_real_body, range, std::move(my_value)); + } + + void join( lambda_reduce_body& rhs ) { + my_value = tbb::detail::invoke(my_reduction, std::move(my_value), std::move(rhs.my_value)); + } + + __TBB_nodiscard Value&& result() && noexcept { + return std::move(my_value); + } +}; + + +// Requirements on Range concept are documented in blocked_range.h + +/** \page parallel_reduce_body_req Requirements on parallel_reduce body + Class \c Body implementing the concept of parallel_reduce body must define: + - \code Body::Body( Body&, split ); \endcode Splitting constructor. + Must be able to run concurrently with operator() and method \c join + - \code Body::~Body(); \endcode Destructor + - \code void Body::operator()( Range& r ); \endcode Function call operator applying body to range \c r + and accumulating the result + - \code void Body::join( Body& b ); \endcode Join results. + The result in \c b should be merged into the result of \c this +**/ + +/** \page parallel_reduce_lambda_req Requirements on parallel_reduce anonymous function objects (lambda functions) + TO BE DOCUMENTED +**/ + +/** \name parallel_reduce + See also requirements on \ref range_req "Range" and \ref parallel_reduce_body_req "parallel_reduce Body". **/ +//@{ + +//! Parallel iteration with reduction and default partitioner. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_body) +void parallel_reduce( const Range& range, Body& body ) { + start_reduce::run( range, body, __TBB_DEFAULT_PARTITIONER() ); +} + +//! Parallel iteration with reduction and simple_partitioner +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_body) +void parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner ) { + start_reduce::run( range, body, partitioner ); +} + +//! Parallel iteration with reduction and auto_partitioner +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_body) +void parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner ) { + start_reduce::run( range, body, partitioner ); +} + +//! Parallel iteration with reduction and static_partitioner +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_body) +void parallel_reduce( const Range& range, Body& body, const static_partitioner& partitioner ) { + start_reduce::run( range, body, partitioner ); +} + +//! Parallel iteration with reduction and affinity_partitioner +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_body) +void parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner ) { + start_reduce::run( range, body, partitioner ); +} + +//! Parallel iteration with reduction, default partitioner and user-supplied context. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_body) +void parallel_reduce( const Range& range, Body& body, task_group_context& context ) { + start_reduce::run( range, body, __TBB_DEFAULT_PARTITIONER(), context ); +} + +//! Parallel iteration with reduction, simple partitioner and user-supplied context. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_body) +void parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner, task_group_context& context ) { + start_reduce::run( range, body, partitioner, context ); +} + +//! Parallel iteration with reduction, auto_partitioner and user-supplied context +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_body) +void parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner, task_group_context& context ) { + start_reduce::run( range, body, partitioner, context ); +} + +//! Parallel iteration with reduction, static_partitioner and user-supplied context +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_body) +void parallel_reduce( const Range& range, Body& body, const static_partitioner& partitioner, task_group_context& context ) { + start_reduce::run( range, body, partitioner, context ); +} + +//! Parallel iteration with reduction, affinity_partitioner and user-supplied context +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_body) +void parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner, task_group_context& context ) { + start_reduce::run( range, body, partitioner, context ); +} +/** parallel_reduce overloads that work with anonymous function objects + (see also \ref parallel_reduce_lambda_req "requirements on parallel_reduce anonymous function objects"). **/ + +//! Parallel iteration with reduction and default partitioner. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_function && + parallel_reduce_combine) +Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction ) { + lambda_reduce_body body(identity, real_body, reduction); + start_reduce,const __TBB_DEFAULT_PARTITIONER> + ::run(range, body, __TBB_DEFAULT_PARTITIONER() ); + return std::move(body).result(); +} + +//! Parallel iteration with reduction and simple_partitioner. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_function && + parallel_reduce_combine) +Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + const simple_partitioner& partitioner ) { + lambda_reduce_body body(identity, real_body, reduction); + start_reduce,const simple_partitioner> + ::run(range, body, partitioner ); + return std::move(body).result(); +} + +//! Parallel iteration with reduction and auto_partitioner +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_function && + parallel_reduce_combine) +Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + const auto_partitioner& partitioner ) { + lambda_reduce_body body(identity, real_body, reduction); + start_reduce,const auto_partitioner> + ::run( range, body, partitioner ); + return std::move(body).result(); +} + +//! Parallel iteration with reduction and static_partitioner +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_function && + parallel_reduce_combine) +Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + const static_partitioner& partitioner ) { + lambda_reduce_body body(identity, real_body, reduction); + start_reduce,const static_partitioner> + ::run( range, body, partitioner ); + return std::move(body).result(); +} + +//! Parallel iteration with reduction and affinity_partitioner +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_function && + parallel_reduce_combine) +Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + affinity_partitioner& partitioner ) { + lambda_reduce_body body(identity, real_body, reduction); + start_reduce,affinity_partitioner> + ::run( range, body, partitioner ); + return std::move(body).result(); +} + +//! Parallel iteration with reduction, default partitioner and user-supplied context. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_function && + parallel_reduce_combine) +Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + task_group_context& context ) { + lambda_reduce_body body(identity, real_body, reduction); + start_reduce,const __TBB_DEFAULT_PARTITIONER> + ::run( range, body, __TBB_DEFAULT_PARTITIONER(), context ); + return std::move(body).result(); +} + +//! Parallel iteration with reduction, simple partitioner and user-supplied context. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_function && + parallel_reduce_combine) +Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + const simple_partitioner& partitioner, task_group_context& context ) { + lambda_reduce_body body(identity, real_body, reduction); + start_reduce,const simple_partitioner> + ::run( range, body, partitioner, context ); + return std::move(body).result(); +} + +//! Parallel iteration with reduction, auto_partitioner and user-supplied context +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_function && + parallel_reduce_combine) +Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + const auto_partitioner& partitioner, task_group_context& context ) { + lambda_reduce_body body(identity, real_body, reduction); + start_reduce,const auto_partitioner> + ::run( range, body, partitioner, context ); + return std::move(body).result(); +} + +//! Parallel iteration with reduction, static_partitioner and user-supplied context +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_function && + parallel_reduce_combine) +Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + const static_partitioner& partitioner, task_group_context& context ) { + lambda_reduce_body body(identity, real_body, reduction); + start_reduce,const static_partitioner> + ::run( range, body, partitioner, context ); + return std::move(body).result(); +} + +//! Parallel iteration with reduction, affinity_partitioner and user-supplied context +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_function && + parallel_reduce_combine) +Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + affinity_partitioner& partitioner, task_group_context& context ) { + lambda_reduce_body body(identity, real_body, reduction); + start_reduce,affinity_partitioner> + ::run( range, body, partitioner, context ); + return std::move(body).result(); +} + +//! Parallel iteration with deterministic reduction and default simple partitioner. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_body) +void parallel_deterministic_reduce( const Range& range, Body& body ) { + start_deterministic_reduce::run(range, body, simple_partitioner()); +} + +//! Parallel iteration with deterministic reduction and simple partitioner. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_body) +void parallel_deterministic_reduce( const Range& range, Body& body, const simple_partitioner& partitioner ) { + start_deterministic_reduce::run(range, body, partitioner); +} + +//! Parallel iteration with deterministic reduction and static partitioner. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_body) +void parallel_deterministic_reduce( const Range& range, Body& body, const static_partitioner& partitioner ) { + start_deterministic_reduce::run(range, body, partitioner); +} + +//! Parallel iteration with deterministic reduction, default simple partitioner and user-supplied context. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_body) +void parallel_deterministic_reduce( const Range& range, Body& body, task_group_context& context ) { + start_deterministic_reduce::run( range, body, simple_partitioner(), context ); +} + +//! Parallel iteration with deterministic reduction, simple partitioner and user-supplied context. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_body) +void parallel_deterministic_reduce( const Range& range, Body& body, const simple_partitioner& partitioner, task_group_context& context ) { + start_deterministic_reduce::run(range, body, partitioner, context); +} + +//! Parallel iteration with deterministic reduction, static partitioner and user-supplied context. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_body) +void parallel_deterministic_reduce( const Range& range, Body& body, const static_partitioner& partitioner, task_group_context& context ) { + start_deterministic_reduce::run(range, body, partitioner, context); +} + +/** parallel_reduce overloads that work with anonymous function objects + (see also \ref parallel_reduce_lambda_req "requirements on parallel_reduce anonymous function objects"). **/ + +//! Parallel iteration with deterministic reduction and default simple partitioner. +// TODO: consider making static_partitioner the default +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_function && + parallel_reduce_combine) +Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction ) { + return parallel_deterministic_reduce(range, identity, real_body, reduction, simple_partitioner()); +} + +//! Parallel iteration with deterministic reduction and simple partitioner. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_function && + parallel_reduce_combine) +Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, const simple_partitioner& partitioner ) { + lambda_reduce_body body(identity, real_body, reduction); + start_deterministic_reduce, const simple_partitioner> + ::run(range, body, partitioner); + return std::move(body).result(); +} + +//! Parallel iteration with deterministic reduction and static partitioner. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_function && + parallel_reduce_combine) +Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, const static_partitioner& partitioner ) { + lambda_reduce_body body(identity, real_body, reduction); + start_deterministic_reduce, const static_partitioner> + ::run(range, body, partitioner); + return std::move(body).result(); +} + +//! Parallel iteration with deterministic reduction, default simple partitioner and user-supplied context. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_function && + parallel_reduce_combine) +Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + task_group_context& context ) { + return parallel_deterministic_reduce(range, identity, real_body, reduction, simple_partitioner(), context); +} + +//! Parallel iteration with deterministic reduction, simple partitioner and user-supplied context. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_function && + parallel_reduce_combine) +Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + const simple_partitioner& partitioner, task_group_context& context ) { + lambda_reduce_body body(identity, real_body, reduction); + start_deterministic_reduce, const simple_partitioner> + ::run(range, body, partitioner, context); + return std::move(body).result(); +} + +//! Parallel iteration with deterministic reduction, static partitioner and user-supplied context. +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_reduce_function && + parallel_reduce_combine) +Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + const static_partitioner& partitioner, task_group_context& context ) { + lambda_reduce_body body(identity, real_body, reduction); + start_deterministic_reduce, const static_partitioner> + ::run(range, body, partitioner, context); + return std::move(body).result(); +} +//@} + +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::parallel_reduce; +using detail::d1::parallel_deterministic_reduce; +// Split types +using detail::split; +using detail::proportional_split; +} // namespace v1 + +} // namespace tbb +#endif /* __TBB_parallel_reduce_H */ diff --git a/src/tbb/include/oneapi/tbb/parallel_scan.h b/src/tbb/include/oneapi/tbb/parallel_scan.h new file mode 100644 index 000000000..d624f7ebd --- /dev/null +++ b/src/tbb/include/oneapi/tbb/parallel_scan.h @@ -0,0 +1,630 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_parallel_scan_H +#define __TBB_parallel_scan_H + +#include + +#include "detail/_config.h" +#include "detail/_namespace_injection.h" +#include "detail/_exception.h" +#include "detail/_task.h" + +#include "profiling.h" +#include "partitioner.h" +#include "blocked_range.h" +#include "task_group.h" + +namespace tbb { +namespace detail { +namespace d1 { + +//! Used to indicate that the initial scan is being performed. +/** @ingroup algorithms */ +struct pre_scan_tag { + static bool is_final_scan() {return false;} + operator bool() {return is_final_scan();} +}; + +//! Used to indicate that the final scan is being performed. +/** @ingroup algorithms */ +struct final_scan_tag { + static bool is_final_scan() {return true;} + operator bool() {return is_final_scan();} +}; + +template +struct sum_node; + +#if __TBB_CPP20_CONCEPTS_PRESENT +} // namespace d1 +namespace d0 { + +template +concept parallel_scan_body = splittable && + requires( Body& body, const Range& range, Body& other ) { + body(range, tbb::detail::d1::pre_scan_tag{}); + body(range, tbb::detail::d1::final_scan_tag{}); + body.reverse_join(other); + body.assign(other); + }; + +template +concept parallel_scan_function = std::invocable&, + const Range&, const Value&, bool> && + std::convertible_to&, + const Range&, const Value&, bool>, + Value>; + +template +concept parallel_scan_combine = std::invocable&, + const Value&, const Value&> && + std::convertible_to&, + const Value&, const Value&>, + Value>; + +} // namespace d0 +namespace d1 { +#endif // __TBB_CPP20_CONCEPTS_PRESENT + +//! Performs final scan for a leaf +/** @ingroup algorithms */ +template +struct final_sum : public task { +private: + using sum_node_type = sum_node; + Body m_body; + aligned_space m_range; + //! Where to put result of last subrange, or nullptr if not last subrange. + Body* m_stuff_last; + + wait_context& m_wait_context; + sum_node_type* m_parent = nullptr; +public: + small_object_allocator m_allocator; + final_sum( Body& body, wait_context& w_o, small_object_allocator& alloc ) : + m_body(body, split()), m_wait_context(w_o), m_allocator(alloc) { + poison_pointer(m_stuff_last); + } + + final_sum( final_sum& sum, small_object_allocator& alloc ) : + m_body(sum.m_body, split()), m_wait_context(sum.m_wait_context), m_allocator(alloc) { + poison_pointer(m_stuff_last); + } + + ~final_sum() { + m_range.begin()->~Range(); + } + void finish_construction( sum_node_type* parent, const Range& range, Body* stuff_last ) { + __TBB_ASSERT( m_parent == nullptr, nullptr ); + m_parent = parent; + new( m_range.begin() ) Range(range); + m_stuff_last = stuff_last; + } +private: + sum_node_type* release_parent() { + call_itt_task_notify(releasing, m_parent); + if (m_parent) { + auto parent = m_parent; + m_parent = nullptr; + if (parent->ref_count.fetch_sub(1) == 1) { + return parent; + } + } + else + m_wait_context.release(); + return nullptr; + } + sum_node_type* finalize(const execution_data& ed){ + sum_node_type* next_task = release_parent(); + m_allocator.delete_object(this, ed); + return next_task; + } + +public: + task* execute(execution_data& ed) override { + m_body( *m_range.begin(), final_scan_tag() ); + if( m_stuff_last ) + m_stuff_last->assign(m_body); + + return finalize(ed); + } + task* cancel(execution_data& ed) override { + return finalize(ed); + } + template + void operator()( const Range& r, Tag tag ) { + m_body( r, tag ); + } + void reverse_join( final_sum& a ) { + m_body.reverse_join(a.m_body); + } + void reverse_join( Body& body ) { + m_body.reverse_join(body); + } + void assign_to( Body& body ) { + body.assign(m_body); + } + void self_destroy(const execution_data& ed) { + m_allocator.delete_object(this, ed); + } +}; + +//! Split work to be done in the scan. +/** @ingroup algorithms */ +template +struct sum_node : public task { +private: + using final_sum_type = final_sum; +public: + final_sum_type *m_incoming; + final_sum_type *m_body; + Body *m_stuff_last; +private: + final_sum_type *m_left_sum; + sum_node *m_left; + sum_node *m_right; + bool m_left_is_final; + Range m_range; + wait_context& m_wait_context; + sum_node* m_parent; + small_object_allocator m_allocator; +public: + std::atomic ref_count{0}; + sum_node( const Range range, bool left_is_final_, sum_node* parent, wait_context& w_o, small_object_allocator& alloc ) : + m_stuff_last(nullptr), + m_left_sum(nullptr), + m_left(nullptr), + m_right(nullptr), + m_left_is_final(left_is_final_), + m_range(range), + m_wait_context(w_o), + m_parent(parent), + m_allocator(alloc) + { + if( m_parent ) + m_parent->ref_count.fetch_add(1); + // Poison fields that will be set by second pass. + poison_pointer(m_body); + poison_pointer(m_incoming); + } + + ~sum_node() { + if (m_parent) + m_parent->ref_count.fetch_sub(1); + } +private: + sum_node* release_parent() { + call_itt_task_notify(releasing, m_parent); + if (m_parent) { + auto parent = m_parent; + m_parent = nullptr; + if (parent->ref_count.fetch_sub(1) == 1) { + return parent; + } + } + else + m_wait_context.release(); + return nullptr; + } + task* create_child( const Range& range, final_sum_type& body, sum_node* child, final_sum_type* incoming, Body* stuff_last ) { + if( child ) { + __TBB_ASSERT( is_poisoned(child->m_body) && is_poisoned(child->m_incoming), nullptr ); + child->prepare_for_execution(body, incoming, stuff_last); + return child; + } else { + body.finish_construction(this, range, stuff_last); + return &body; + } + } + + sum_node* finalize(const execution_data& ed) { + sum_node* next_task = release_parent(); + m_allocator.delete_object(this, ed); + return next_task; + } + +public: + void prepare_for_execution(final_sum_type& body, final_sum_type* incoming, Body *stuff_last) { + this->m_body = &body; + this->m_incoming = incoming; + this->m_stuff_last = stuff_last; + } + task* execute(execution_data& ed) override { + if( m_body ) { + if( m_incoming ) + m_left_sum->reverse_join( *m_incoming ); + task* right_child = this->create_child(Range(m_range,split()), *m_left_sum, m_right, m_left_sum, m_stuff_last); + task* left_child = m_left_is_final ? nullptr : this->create_child(m_range, *m_body, m_left, m_incoming, nullptr); + ref_count = (left_child != nullptr) + (right_child != nullptr); + m_body = nullptr; + if( left_child ) { + spawn(*right_child, *ed.context); + return left_child; + } else { + return right_child; + } + } else { + return finalize(ed); + } + } + task* cancel(execution_data& ed) override { + return finalize(ed); + } + void self_destroy(const execution_data& ed) { + m_allocator.delete_object(this, ed); + } + template + friend struct start_scan; + + template + friend struct finish_scan; +}; + +//! Combine partial results +/** @ingroup algorithms */ +template +struct finish_scan : public task { +private: + using sum_node_type = sum_node; + using final_sum_type = final_sum; + final_sum_type** const m_sum_slot; + sum_node_type*& m_return_slot; + small_object_allocator m_allocator; +public: + std::atomic m_right_zombie; + sum_node_type& m_result; + std::atomic ref_count{2}; + finish_scan* m_parent; + wait_context& m_wait_context; + task* execute(execution_data& ed) override { + __TBB_ASSERT( m_result.ref_count.load() == static_cast((m_result.m_left!=nullptr)+(m_result.m_right!=nullptr)), nullptr ); + if( m_result.m_left ) + m_result.m_left_is_final = false; + final_sum_type* right_zombie = m_right_zombie.load(std::memory_order_acquire); + if( right_zombie && m_sum_slot ) + (*m_sum_slot)->reverse_join(*m_result.m_left_sum); + __TBB_ASSERT( !m_return_slot, nullptr ); + if( right_zombie || m_result.m_right ) { + m_return_slot = &m_result; + } else { + m_result.self_destroy(ed); + } + if( right_zombie && !m_sum_slot && !m_result.m_right ) { + right_zombie->self_destroy(ed); + m_right_zombie.store(nullptr, std::memory_order_relaxed); + } + return finalize(ed); + } + task* cancel(execution_data& ed) override { + return finalize(ed); + } + finish_scan(sum_node_type*& return_slot, final_sum_type** sum, sum_node_type& result_, finish_scan* parent, wait_context& w_o, small_object_allocator& alloc) : + m_sum_slot(sum), + m_return_slot(return_slot), + m_allocator(alloc), + m_right_zombie(nullptr), + m_result(result_), + m_parent(parent), + m_wait_context(w_o) + { + __TBB_ASSERT( !m_return_slot, nullptr ); + } +private: + finish_scan* release_parent() { + call_itt_task_notify(releasing, m_parent); + if (m_parent) { + auto parent = m_parent; + m_parent = nullptr; + if (parent->ref_count.fetch_sub(1) == 1) { + return parent; + } + } + else + m_wait_context.release(); + return nullptr; + } + finish_scan* finalize(const execution_data& ed) { + finish_scan* next_task = release_parent(); + m_allocator.delete_object(this, ed); + return next_task; + } +}; + +//! Initial task to split the work +/** @ingroup algorithms */ +template +struct start_scan : public task { +private: + using sum_node_type = sum_node; + using final_sum_type = final_sum; + using finish_pass1_type = finish_scan; + std::reference_wrapper m_return_slot; + Range m_range; + std::reference_wrapper m_body; + typename Partitioner::partition_type m_partition; + /** Non-null if caller is requesting total. */ + final_sum_type** m_sum_slot; + bool m_is_final; + bool m_is_right_child; + + finish_pass1_type* m_parent; + small_object_allocator m_allocator; + wait_context& m_wait_context; + + finish_pass1_type* release_parent() { + call_itt_task_notify(releasing, m_parent); + if (m_parent) { + auto parent = m_parent; + m_parent = nullptr; + if (parent->ref_count.fetch_sub(1) == 1) { + return parent; + } + } + else + m_wait_context.release(); + return nullptr; + } + + finish_pass1_type* finalize( const execution_data& ed ) { + finish_pass1_type* next_task = release_parent(); + m_allocator.delete_object(this, ed); + return next_task; + } + +public: + task* execute( execution_data& ) override; + task* cancel( execution_data& ed ) override { + return finalize(ed); + } + start_scan( sum_node_type*& return_slot, start_scan& parent, small_object_allocator& alloc ) : + m_return_slot(return_slot), + m_range(parent.m_range,split()), + m_body(parent.m_body), + m_partition(parent.m_partition,split()), + m_sum_slot(parent.m_sum_slot), + m_is_final(parent.m_is_final), + m_is_right_child(true), + m_parent(parent.m_parent), + m_allocator(alloc), + m_wait_context(parent.m_wait_context) + { + __TBB_ASSERT( !m_return_slot, nullptr ); + parent.m_is_right_child = false; + } + + start_scan( sum_node_type*& return_slot, const Range& range, final_sum_type& body, const Partitioner& partitioner, wait_context& w_o, small_object_allocator& alloc ) : + m_return_slot(return_slot), + m_range(range), + m_body(body), + m_partition(partitioner), + m_sum_slot(nullptr), + m_is_final(true), + m_is_right_child(false), + m_parent(nullptr), + m_allocator(alloc), + m_wait_context(w_o) + { + __TBB_ASSERT( !m_return_slot, nullptr ); + } + + static void run( const Range& range, Body& body, const Partitioner& partitioner ) { + if( !range.empty() ) { + task_group_context context(PARALLEL_SCAN); + + using start_pass1_type = start_scan; + sum_node_type* root = nullptr; + wait_context w_ctx{1}; + small_object_allocator alloc{}; + + auto& temp_body = *alloc.new_object(body, w_ctx, alloc); + temp_body.reverse_join(body); + + auto& pass1 = *alloc.new_object(/*m_return_slot=*/root, range, temp_body, partitioner, w_ctx, alloc); + + execute_and_wait(pass1, context, w_ctx, context); + if( root ) { + root->prepare_for_execution(temp_body, nullptr, &body); + w_ctx.reserve(); + execute_and_wait(*root, context, w_ctx, context); + } else { + temp_body.assign_to(body); + temp_body.finish_construction(nullptr, range, nullptr); + alloc.delete_object(&temp_body); + } + } + } +}; + +template +task* start_scan::execute( execution_data& ed ) { + // Inspecting m_parent->result.left_sum would ordinarily be a race condition. + // But we inspect it only if we are not a stolen task, in which case we + // know that task assigning to m_parent->result.left_sum has completed. + __TBB_ASSERT(!m_is_right_child || m_parent, "right child is never an orphan"); + bool treat_as_stolen = m_is_right_child && (is_stolen(ed) || &m_body.get()!=m_parent->m_result.m_left_sum); + if( treat_as_stolen ) { + // Invocation is for right child that has been really stolen or needs to be virtually stolen + small_object_allocator alloc{}; + final_sum_type* right_zombie = alloc.new_object(m_body, alloc); + m_parent->m_right_zombie.store(right_zombie, std::memory_order_release); + m_body = *right_zombie; + m_is_final = false; + } + task* next_task = nullptr; + if( (m_is_right_child && !treat_as_stolen) || !m_range.is_divisible() || m_partition.should_execute_range(ed) ) { + if( m_is_final ) + m_body(m_range, final_scan_tag()); + else if( m_sum_slot ) + m_body(m_range, pre_scan_tag()); + if( m_sum_slot ) + *m_sum_slot = &m_body.get(); + __TBB_ASSERT( !m_return_slot, nullptr ); + + next_task = finalize(ed); + } else { + small_object_allocator alloc{}; + auto result = alloc.new_object(m_range,/*m_left_is_final=*/m_is_final, m_parent? &m_parent->m_result: nullptr, m_wait_context, alloc); + + auto new_parent = alloc.new_object(m_return_slot, m_sum_slot, *result, m_parent, m_wait_context, alloc); + m_parent = new_parent; + + // Split off right child + auto& right_child = *alloc.new_object(/*m_return_slot=*/result->m_right, *this, alloc); + + spawn(right_child, *ed.context); + + m_sum_slot = &result->m_left_sum; + m_return_slot = result->m_left; + + __TBB_ASSERT( !m_return_slot, nullptr ); + next_task = this; + } + return next_task; +} + +template +class lambda_scan_body { + Value m_sum_slot; + const Value& identity_element; + const Scan& m_scan; + const ReverseJoin& m_reverse_join; +public: + void operator=(const lambda_scan_body&) = delete; + lambda_scan_body(const lambda_scan_body&) = default; + + lambda_scan_body( const Value& identity, const Scan& scan, const ReverseJoin& rev_join ) + : m_sum_slot(identity) + , identity_element(identity) + , m_scan(scan) + , m_reverse_join(rev_join) {} + + lambda_scan_body( lambda_scan_body& b, split ) + : m_sum_slot(b.identity_element) + , identity_element(b.identity_element) + , m_scan(b.m_scan) + , m_reverse_join(b.m_reverse_join) {} + + template + void operator()( const Range& r, Tag tag ) { + m_sum_slot = tbb::detail::invoke(m_scan, r, m_sum_slot, tag); + } + + void reverse_join( lambda_scan_body& a ) { + m_sum_slot = tbb::detail::invoke(m_reverse_join, a.m_sum_slot, m_sum_slot); + } + + void assign( lambda_scan_body& b ) { + m_sum_slot = b.m_sum_slot; + } + + Value result() const { + return m_sum_slot; + } +}; + +// Requirements on Range concept are documented in blocked_range.h + +/** \page parallel_scan_body_req Requirements on parallel_scan body + Class \c Body implementing the concept of parallel_scan body must define: + - \code Body::Body( Body&, split ); \endcode Splitting constructor. + Split \c b so that \c this and \c b can accumulate separately + - \code Body::~Body(); \endcode Destructor + - \code void Body::operator()( const Range& r, pre_scan_tag ); \endcode + Preprocess iterations for range \c r + - \code void Body::operator()( const Range& r, final_scan_tag ); \endcode + Do final processing for iterations of range \c r + - \code void Body::reverse_join( Body& a ); \endcode + Merge preprocessing state of \c a into \c this, where \c a was + created earlier from \c b by b's splitting constructor +**/ + +/** \name parallel_scan + See also requirements on \ref range_req "Range" and \ref parallel_scan_body_req "parallel_scan Body". **/ +//@{ + +//! Parallel prefix with default partitioner +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_scan_body) +void parallel_scan( const Range& range, Body& body ) { + start_scan::run(range,body,__TBB_DEFAULT_PARTITIONER()); +} + +//! Parallel prefix with simple_partitioner +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_scan_body) +void parallel_scan( const Range& range, Body& body, const simple_partitioner& partitioner ) { + start_scan::run(range, body, partitioner); +} + +//! Parallel prefix with auto_partitioner +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_scan_body) +void parallel_scan( const Range& range, Body& body, const auto_partitioner& partitioner ) { + start_scan::run(range, body, partitioner); +} + +//! Parallel prefix with default partitioner +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_scan_function && + parallel_scan_combine) +Value parallel_scan( const Range& range, const Value& identity, const Scan& scan, const ReverseJoin& reverse_join ) { + lambda_scan_body body(identity, scan, reverse_join); + parallel_scan(range, body, __TBB_DEFAULT_PARTITIONER()); + return body.result(); +} + +//! Parallel prefix with simple_partitioner +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_scan_function && + parallel_scan_combine) +Value parallel_scan( const Range& range, const Value& identity, const Scan& scan, const ReverseJoin& reverse_join, + const simple_partitioner& partitioner ) { + lambda_scan_body body(identity, scan, reverse_join); + parallel_scan(range, body, partitioner); + return body.result(); +} + +//! Parallel prefix with auto_partitioner +/** @ingroup algorithms **/ +template + __TBB_requires(tbb_range && parallel_scan_function && + parallel_scan_combine) +Value parallel_scan( const Range& range, const Value& identity, const Scan& scan, const ReverseJoin& reverse_join, + const auto_partitioner& partitioner ) { + lambda_scan_body body(identity, scan, reverse_join); + parallel_scan(range, body, partitioner); + return body.result(); +} + +} // namespace d1 +} // namespace detail + +inline namespace v1 { + using detail::d1::parallel_scan; + using detail::d1::pre_scan_tag; + using detail::d1::final_scan_tag; +} // namespace v1 + +} // namespace tbb + +#endif /* __TBB_parallel_scan_H */ diff --git a/src/tbb/include/oneapi/tbb/parallel_sort.h b/src/tbb/include/oneapi/tbb/parallel_sort.h new file mode 100644 index 000000000..4c0578eb0 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/parallel_sort.h @@ -0,0 +1,288 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_parallel_sort_H +#define __TBB_parallel_sort_H + +#include "detail/_namespace_injection.h" +#include "parallel_for.h" +#include "blocked_range.h" +#include "profiling.h" + +#include +#include +#include +#include + +namespace tbb { +namespace detail { +#if __TBB_CPP20_CONCEPTS_PRESENT +inline namespace d0 { + +// TODO: consider using std::strict_weak_order concept +template +concept compare = requires( const std::remove_reference_t& comp, typename std::iterator_traits::reference value ) { + // Forward via iterator_traits::reference + { comp(typename std::iterator_traits::reference(value), + typename std::iterator_traits::reference(value)) } -> std::convertible_to; +}; + +// Inspired by std::__PartiallyOrderedWith exposition only concept +template +concept less_than_comparable = requires( const std::remove_reference_t& lhs, + const std::remove_reference_t& rhs ) { + { lhs < rhs } -> boolean_testable; +}; + +} // namespace d0 +#endif // __TBB_CPP20_CONCEPTS_PRESENT +namespace d1 { + +//! Range used in quicksort to split elements into subranges based on a value. +/** The split operation selects a splitter and places all elements less than or equal + to the value in the first range and the remaining elements in the second range. + @ingroup algorithms */ +template +class quick_sort_range { + std::size_t median_of_three( const RandomAccessIterator& array, std::size_t l, std::size_t m, std::size_t r ) const { + return comp(array[l], array[m]) ? ( comp(array[m], array[r]) ? m : ( comp(array[l], array[r]) ? r : l ) ) + : ( comp(array[r], array[m]) ? m : ( comp(array[r], array[l]) ? r : l ) ); + } + + std::size_t pseudo_median_of_nine( const RandomAccessIterator& array, const quick_sort_range& range ) const { + std::size_t offset = range.size / 8u; + return median_of_three(array, + median_of_three(array, 0 , offset, offset * 2), + median_of_three(array, offset * 3, offset * 4, offset * 5), + median_of_three(array, offset * 6, offset * 7, range.size - 1)); + + } + + std::size_t split_range( quick_sort_range& range ) { + RandomAccessIterator array = range.begin; + RandomAccessIterator first_element = range.begin; + std::size_t m = pseudo_median_of_nine(array, range); + if( m != 0 ) std::iter_swap(array, array + m); + + std::size_t i = 0; + std::size_t j = range.size; + // Partition interval [i + 1,j - 1] with key *first_element. + for(;;) { + __TBB_ASSERT( i < j, nullptr ); + // Loop must terminate since array[l] == *first_element. + do { + --j; + __TBB_ASSERT( i <= j, "bad ordering relation?" ); + } while( comp(*first_element, array[j]) ); + do { + __TBB_ASSERT( i <= j, nullptr ); + if( i == j ) goto partition; + ++i; + } while( comp(array[i], *first_element) ); + if( i == j ) goto partition; + std::iter_swap(array + i, array + j); + } +partition: + // Put the partition key were it belongs + std::iter_swap(array + j, first_element); + // array[l..j) is less or equal to key. + // array(j..r) is greater or equal to key. + // array[j] is equal to key + i = j + 1; + std::size_t new_range_size = range.size - i; + range.size = j; + return new_range_size; + } + +public: + quick_sort_range() = default; + quick_sort_range( const quick_sort_range& ) = default; + void operator=( const quick_sort_range& ) = delete; + + static constexpr std::size_t grainsize = 500; + const Compare& comp; + std::size_t size; + RandomAccessIterator begin; + + quick_sort_range( RandomAccessIterator begin_, std::size_t size_, const Compare& comp_ ) : + comp(comp_), size(size_), begin(begin_) {} + + bool empty() const { return size == 0; } + bool is_divisible() const { return size >= grainsize; } + + quick_sort_range( quick_sort_range& range, split ) + : comp(range.comp) + , size(split_range(range)) + // +1 accounts for the pivot element, which is at its correct place + // already and, therefore, is not included into subranges. + , begin(range.begin + range.size + 1) {} +}; + +//! Body class used to test if elements in a range are presorted +/** @ingroup algorithms */ +template +class quick_sort_pretest_body { + const Compare& comp; + task_group_context& context; + +public: + quick_sort_pretest_body() = default; + quick_sort_pretest_body( const quick_sort_pretest_body& ) = default; + void operator=( const quick_sort_pretest_body& ) = delete; + + quick_sort_pretest_body( const Compare& _comp, task_group_context& _context ) : comp(_comp), context(_context) {} + + void operator()( const blocked_range& range ) const { + RandomAccessIterator my_end = range.end(); + + int i = 0; + //TODO: consider using std::is_sorted() for each 64 iterations (requires performance measurements) + for( RandomAccessIterator k = range.begin(); k != my_end; ++k, ++i ) { + if( i % 64 == 0 && context.is_group_execution_cancelled() ) break; + + // The k - 1 is never out-of-range because the first chunk starts at begin+serial_cutoff+1 + if( comp(*(k), *(k - 1)) ) { + context.cancel_group_execution(); + break; + } + } + } +}; + +//! Body class used to sort elements in a range that is smaller than the grainsize. +/** @ingroup algorithms */ +template +struct quick_sort_body { + void operator()( const quick_sort_range& range ) const { + std::sort(range.begin, range.begin + range.size, range.comp); + } +}; + +//! Method to perform parallel_for based quick sort. +/** @ingroup algorithms */ +template +void do_parallel_quick_sort( RandomAccessIterator begin, RandomAccessIterator end, const Compare& comp ) { + parallel_for(quick_sort_range(begin, end - begin, comp), + quick_sort_body(), + auto_partitioner()); +} + +//! Wrapper method to initiate the sort by calling parallel_for. +/** @ingroup algorithms */ +template +void parallel_quick_sort( RandomAccessIterator begin, RandomAccessIterator end, const Compare& comp ) { + task_group_context my_context(PARALLEL_SORT); + constexpr int serial_cutoff = 9; + + __TBB_ASSERT( begin + serial_cutoff < end, "min_parallel_size is smaller than serial cutoff?" ); + RandomAccessIterator k = begin; + for( ; k != begin + serial_cutoff; ++k ) { + if( comp(*(k + 1), *k) ) { + do_parallel_quick_sort(begin, end, comp); + return; + } + } + + // Check is input range already sorted + parallel_for(blocked_range(k + 1, end), + quick_sort_pretest_body(comp, my_context), + auto_partitioner(), + my_context); + + if( my_context.is_group_execution_cancelled() ) + do_parallel_quick_sort(begin, end, comp); +} + +/** \page parallel_sort_iter_req Requirements on iterators for parallel_sort + Requirements on the iterator type \c It and its value type \c T for \c parallel_sort: + + - \code void iter_swap( It a, It b ) \endcode Swaps the values of the elements the given + iterators \c a and \c b are pointing to. \c It should be a random access iterator. + + - \code bool Compare::operator()( const T& x, const T& y ) \endcode True if x comes before y; +**/ + +/** \name parallel_sort + See also requirements on \ref parallel_sort_iter_req "iterators for parallel_sort". **/ +//@{ + +#if __TBB_CPP20_CONCEPTS_PRESENT +template +using iter_value_type = typename std::iterator_traits::value_type; + +template +using range_value_type = typename std::iterator_traits>::value_type; +#endif + +//! Sorts the data in [begin,end) using the given comparator +/** The compare function object is used for all comparisons between elements during sorting. + The compare object must define a bool operator() function. + @ingroup algorithms **/ +template + __TBB_requires(std::random_access_iterator && + compare && + std::movable>) +void parallel_sort( RandomAccessIterator begin, RandomAccessIterator end, const Compare& comp ) { + constexpr int min_parallel_size = 500; + if( end > begin ) { + if( end - begin < min_parallel_size ) { + std::sort(begin, end, comp); + } else { + parallel_quick_sort(begin, end, comp); + } + } +} + +//! Sorts the data in [begin,end) with a default comparator \c std::less +/** @ingroup algorithms **/ +template + __TBB_requires(std::random_access_iterator && + less_than_comparable> && + std::movable>) +void parallel_sort( RandomAccessIterator begin, RandomAccessIterator end ) { + parallel_sort(begin, end, std::less::value_type>()); +} + +//! Sorts the data in rng using the given comparator +/** @ingroup algorithms **/ +template + __TBB_requires(container_based_sequence && + compare> && + std::movable>) +void parallel_sort( Range&& rng, const Compare& comp ) { + parallel_sort(std::begin(rng), std::end(rng), comp); +} + +//! Sorts the data in rng with a default comparator \c std::less +/** @ingroup algorithms **/ +template + __TBB_requires(container_based_sequence && + less_than_comparable> && + std::movable>) +void parallel_sort( Range&& rng ) { + parallel_sort(std::begin(rng), std::end(rng)); +} +//@} + +} // namespace d1 +} // namespace detail + +inline namespace v1 { + using detail::d1::parallel_sort; +} // namespace v1 +} // namespace tbb + +#endif /*__TBB_parallel_sort_H*/ diff --git a/src/tbb/include/oneapi/tbb/partitioner.h b/src/tbb/include/oneapi/tbb/partitioner.h new file mode 100644 index 000000000..58ff726fa --- /dev/null +++ b/src/tbb/include/oneapi/tbb/partitioner.h @@ -0,0 +1,681 @@ +/* + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_partitioner_H +#define __TBB_partitioner_H + +#ifndef __TBB_INITIAL_CHUNKS +// initial task divisions per thread +#define __TBB_INITIAL_CHUNKS 2 +#endif +#ifndef __TBB_RANGE_POOL_CAPACITY +// maximum number of elements in range pool +#define __TBB_RANGE_POOL_CAPACITY 8 +#endif +#ifndef __TBB_INIT_DEPTH +// initial value for depth of range pool +#define __TBB_INIT_DEPTH 5 +#endif +#ifndef __TBB_DEMAND_DEPTH_ADD +// when imbalance is found range splits this value times more +#define __TBB_DEMAND_DEPTH_ADD 1 +#endif + +#include "detail/_config.h" +#include "detail/_namespace_injection.h" +#include "detail/_aligned_space.h" +#include "detail/_utils.h" +#include "detail/_template_helpers.h" +#include "detail/_range_common.h" +#include "detail/_task.h" +#include "detail/_small_object_pool.h" + +#include "cache_aligned_allocator.h" +#include "task_group.h" // task_group_context +#include "task_arena.h" + +#include +#include +#include + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) + // Workaround for overzealous compiler warnings + // #pragma warning (push) + // #pragma warning (disable: 4244) +#endif + +namespace tbb { +namespace detail { + +namespace d1 { +class auto_partitioner; +class simple_partitioner; +class static_partitioner; +class affinity_partitioner; +class affinity_partition_type; +class affinity_partitioner_base; + +inline std::size_t get_initial_auto_partitioner_divisor() { + const std::size_t factor = 4; + return factor * static_cast(max_concurrency()); +} + +//! Defines entry point for affinity partitioner into oneTBB run-time library. +class affinity_partitioner_base: no_copy { + friend class affinity_partitioner; + friend class affinity_partition_type; + //! Array that remembers affinities of tree positions to affinity_id. + /** nullptr if my_size==0. */ + slot_id* my_array; + //! Number of elements in my_array. + std::size_t my_size; + //! Zeros the fields. + affinity_partitioner_base() : my_array(nullptr), my_size(0) {} + //! Deallocates my_array. + ~affinity_partitioner_base() { resize(0); } + //! Resize my_array. + /** Retains values if resulting size is the same. */ + void resize(unsigned factor) { + // Check factor to avoid asking for number of workers while there might be no arena. + unsigned max_threads_in_arena = static_cast(max_concurrency()); + std::size_t new_size = factor ? factor * max_threads_in_arena : 0; + if (new_size != my_size) { + if (my_array) { + r1::cache_aligned_deallocate(my_array); + // Following two assignments must be done here for sake of exception safety. + my_array = nullptr; + my_size = 0; + } + if (new_size) { + my_array = static_cast(r1::cache_aligned_allocate(new_size * sizeof(slot_id))); + std::fill_n(my_array, new_size, no_slot); + my_size = new_size; + } + } + } +}; + +template struct start_for; +template struct start_scan; +template struct start_reduce; +template struct start_deterministic_reduce; + +struct node { + node* my_parent{}; + std::atomic m_ref_count{}; + + node() = default; + node(node* parent, int ref_count) : + my_parent{parent}, m_ref_count{ref_count} { + __TBB_ASSERT(ref_count > 0, "The ref count must be positive"); + } +}; + +struct wait_node : node { + wait_node() : node{ nullptr, 1 } {} + wait_context m_wait{1}; +}; + +//! Join task node that contains shared flag for stealing feedback +struct tree_node : public node { + small_object_allocator m_allocator; + std::atomic m_child_stolen{false}; + + tree_node(node* parent, int ref_count, small_object_allocator& alloc) + : node{parent, ref_count} + , m_allocator{alloc} {} + + void join(task_group_context*) {/*dummy, required only for reduction algorithms*/}; + + template + static void mark_task_stolen(Task &t) { + std::atomic &flag = static_cast(t.my_parent)->m_child_stolen; +#if TBB_USE_PROFILING_TOOLS + // Threading tools respect lock prefix but report false-positive data-race via plain store + flag.exchange(true); +#else + flag.store(true, std::memory_order_relaxed); +#endif // TBB_USE_PROFILING_TOOLS + } + template + static bool is_peer_stolen(Task &t) { + return static_cast(t.my_parent)->m_child_stolen.load(std::memory_order_relaxed); + } +}; + +// Context used to check cancellation state during reduction join process +template +void fold_tree(node* n, const execution_data& ed) { + for (;;) { + __TBB_ASSERT(n, nullptr); + __TBB_ASSERT(n->m_ref_count.load(std::memory_order_relaxed) > 0, "The refcount must be positive."); + call_itt_task_notify(releasing, n); + if (--n->m_ref_count > 0) { + return; + } + node* parent = n->my_parent; + if (!parent) { + break; + }; + + call_itt_task_notify(acquired, n); + TreeNodeType* self = static_cast(n); + self->join(ed.context); + self->m_allocator.delete_object(self, ed); + n = parent; + } + // Finish parallel for execution when the root (last node) is reached + static_cast(n)->m_wait.release(); +} + +//! Depth is a relative depth of recursive division inside a range pool. Relative depth allows +//! infinite absolute depth of the recursion for heavily unbalanced workloads with range represented +//! by a number that cannot fit into machine word. +typedef unsigned char depth_t; + +//! Range pool stores ranges of type T in a circular buffer with MaxCapacity +template +class range_vector { + depth_t my_head; + depth_t my_tail; + depth_t my_size; + depth_t my_depth[MaxCapacity]; // relative depths of stored ranges + tbb::detail::aligned_space my_pool; + +public: + //! initialize via first range in pool + range_vector(const T& elem) : my_head(0), my_tail(0), my_size(1) { + my_depth[0] = 0; + new( static_cast(my_pool.begin()) ) T(elem);//TODO: std::move? + } + ~range_vector() { + while( !empty() ) pop_back(); + } + bool empty() const { return my_size == 0; } + depth_t size() const { return my_size; } + //! Populates range pool via ranges up to max depth or while divisible + //! max_depth starts from 0, e.g. value 2 makes 3 ranges in the pool up to two 1/4 pieces + void split_to_fill(depth_t max_depth) { + while( my_size < MaxCapacity && is_divisible(max_depth) ) { + depth_t prev = my_head; + my_head = (my_head + 1) % MaxCapacity; + new(my_pool.begin()+my_head) T(my_pool.begin()[prev]); // copy TODO: std::move? + my_pool.begin()[prev].~T(); // instead of assignment + new(my_pool.begin()+prev) T(my_pool.begin()[my_head], detail::split()); // do 'inverse' split + my_depth[my_head] = ++my_depth[prev]; + my_size++; + } + } + void pop_back() { + __TBB_ASSERT(my_size > 0, "range_vector::pop_back() with empty size"); + my_pool.begin()[my_head].~T(); + my_size--; + my_head = (my_head + MaxCapacity - 1) % MaxCapacity; + } + void pop_front() { + __TBB_ASSERT(my_size > 0, "range_vector::pop_front() with empty size"); + my_pool.begin()[my_tail].~T(); + my_size--; + my_tail = (my_tail + 1) % MaxCapacity; + } + T& back() { + __TBB_ASSERT(my_size > 0, "range_vector::back() with empty size"); + return my_pool.begin()[my_head]; + } + T& front() { + __TBB_ASSERT(my_size > 0, "range_vector::front() with empty size"); + return my_pool.begin()[my_tail]; + } + //! similarly to front(), returns depth of the first range in the pool + depth_t front_depth() { + __TBB_ASSERT(my_size > 0, "range_vector::front_depth() with empty size"); + return my_depth[my_tail]; + } + depth_t back_depth() { + __TBB_ASSERT(my_size > 0, "range_vector::back_depth() with empty size"); + return my_depth[my_head]; + } + bool is_divisible(depth_t max_depth) { + return back_depth() < max_depth && back().is_divisible(); + } +}; + +//! Provides default methods for partition objects and common algorithm blocks. +template +struct partition_type_base { + typedef detail::split split_type; + // decision makers + void note_affinity( slot_id ) {} + template + bool check_being_stolen(Task&, const execution_data&) { return false; } // part of old should_execute_range() + template split_type get_split() { return split(); } + Partition& self() { return *static_cast(this); } // CRTP helper + + template + void work_balance(StartType &start, Range &range, const execution_data&) { + start.run_body( range ); // static partitioner goes here + } + + template + void execute(StartType &start, Range &range, execution_data& ed) { + // The algorithm in a few words ([]-denotes calls to decision methods of partitioner): + // [If this task is stolen, adjust depth and divisions if necessary, set flag]. + // If range is divisible { + // Spread the work while [initial divisions left]; + // Create trap task [if necessary]; + // } + // If not divisible or [max depth is reached], execute, else do the range pool part + if ( range.is_divisible() ) { + if ( self().is_divisible() ) { + do { // split until is divisible + typename Partition::split_type split_obj = self().template get_split(); + start.offer_work( split_obj, ed ); + } while ( range.is_divisible() && self().is_divisible() ); + } + } + self().work_balance(start, range, ed); + } +}; + +//! Provides default splitting strategy for partition objects. +template +struct adaptive_mode : partition_type_base { + typedef Partition my_partition; + std::size_t my_divisor; + // For affinity_partitioner, my_divisor indicates the number of affinity array indices the task reserves. + // A task which has only one index must produce the right split without reserved index in order to avoid + // it to be overwritten in note_affinity() of the created (right) task. + // I.e. a task created deeper than the affinity array can remember must not save its affinity (LIFO order) + static const unsigned factor = 1; + adaptive_mode() : my_divisor(get_initial_auto_partitioner_divisor() / 4 * my_partition::factor) {} + adaptive_mode(adaptive_mode &src, split) : my_divisor(do_split(src, split())) {} + adaptive_mode(adaptive_mode&, const proportional_split&) : my_divisor(0) + { + // left blank as my_divisor gets overridden in the successors' constructors + } + /*! Override do_split methods in order to specify splitting strategy */ + std::size_t do_split(adaptive_mode &src, split) { + return src.my_divisor /= 2u; + } +}; + + +//! Provides proportional splitting strategy for partition objects +template +struct proportional_mode : adaptive_mode { + typedef Partition my_partition; + using partition_type_base::self; // CRTP helper to get access to derived classes + + proportional_mode() : adaptive_mode() {} + proportional_mode(proportional_mode &src, split) : adaptive_mode(src, split()) {} + proportional_mode(proportional_mode &src, const proportional_split& split_obj) + : adaptive_mode(src, split_obj) + { + self().my_divisor = do_split(src, split_obj); + } + std::size_t do_split(proportional_mode &src, const proportional_split& split_obj) { + std::size_t portion = split_obj.right() * my_partition::factor; + portion = (portion + my_partition::factor/2) & (0ul - my_partition::factor); + src.my_divisor -= portion; + return portion; + } + bool is_divisible() { // part of old should_execute_range() + return self().my_divisor > my_partition::factor; + } + template + proportional_split get_split() { + // Create the proportion from partitioner internal resources (threads) that would be used: + // - into proportional_mode constructor to split the partitioner + // - if Range supports the proportional_split constructor it would use proposed proportion, + // otherwise, the tbb::proportional_split object will be implicitly (for Range implementer) + // casted to tbb::split + + std::size_t n = self().my_divisor / my_partition::factor; + std::size_t right = n / 2; + std::size_t left = n - right; + return proportional_split(left, right); + } +}; + +static std::size_t get_initial_partition_head() { + int current_index = tbb::this_task_arena::current_thread_index(); + if (current_index == tbb::task_arena::not_initialized) + current_index = 0; + return size_t(current_index); +} + +//! Provides default linear indexing of partitioner's sequence +template +struct linear_affinity_mode : proportional_mode { + std::size_t my_head; + std::size_t my_max_affinity; + using proportional_mode::self; + linear_affinity_mode() : proportional_mode(), my_head(get_initial_partition_head()), + my_max_affinity(self().my_divisor) {} + linear_affinity_mode(linear_affinity_mode &src, split) : proportional_mode(src, split()) + , my_head((src.my_head + src.my_divisor) % src.my_max_affinity), my_max_affinity(src.my_max_affinity) {} + linear_affinity_mode(linear_affinity_mode &src, const proportional_split& split_obj) : proportional_mode(src, split_obj) + , my_head((src.my_head + src.my_divisor) % src.my_max_affinity), my_max_affinity(src.my_max_affinity) {} + void spawn_task(task& t, task_group_context& ctx) { + if (self().my_divisor) { + spawn(t, ctx, slot_id(my_head)); + } else { + spawn(t, ctx); + } + } +}; + +static bool is_stolen_task(const execution_data& ed) { + return execution_slot(ed) != original_slot(ed); +} + +/*! Determine work-balance phase implementing splitting & stealing actions */ +template +struct dynamic_grainsize_mode : Mode { + using Mode::self; + enum { + begin = 0, + run, + pass + } my_delay; + depth_t my_max_depth; + static const unsigned range_pool_size = __TBB_RANGE_POOL_CAPACITY; + dynamic_grainsize_mode(): Mode() + , my_delay(begin) + , my_max_depth(__TBB_INIT_DEPTH) {} + dynamic_grainsize_mode(dynamic_grainsize_mode& p, split) + : Mode(p, split()) + , my_delay(pass) + , my_max_depth(p.my_max_depth) {} + dynamic_grainsize_mode(dynamic_grainsize_mode& p, const proportional_split& split_obj) + : Mode(p, split_obj) + , my_delay(begin) + , my_max_depth(p.my_max_depth) {} + template + bool check_being_stolen(Task &t, const execution_data& ed) { // part of old should_execute_range() + if( !(self().my_divisor / Mode::my_partition::factor) ) { // if not from the top P tasks of binary tree + self().my_divisor = 1; // TODO: replace by on-stack flag (partition_state's member)? + if( is_stolen_task(ed) && t.my_parent->m_ref_count >= 2 ) { // runs concurrently with the left task +#if __TBB_USE_OPTIONAL_RTTI + // RTTI is available, check whether the cast is valid + // TODO: TBB_REVAMP_TODO __TBB_ASSERT(dynamic_cast(t.m_parent), 0); + // correctness of the cast relies on avoiding the root task for which: + // - initial value of my_divisor != 0 (protected by separate assertion) + // - is_stolen_task() always returns false for the root task. +#endif + tree_node::mark_task_stolen(t); + if( !my_max_depth ) my_max_depth++; + my_max_depth += __TBB_DEMAND_DEPTH_ADD; + return true; + } + } + return false; + } + depth_t max_depth() { return my_max_depth; } + void align_depth(depth_t base) { + __TBB_ASSERT(base <= my_max_depth, nullptr); + my_max_depth -= base; + } + template + void work_balance(StartType &start, Range &range, execution_data& ed) { + if( !range.is_divisible() || !self().max_depth() ) { + start.run_body( range ); + } + else { // do range pool + range_vector range_pool(range); + do { + range_pool.split_to_fill(self().max_depth()); // fill range pool + if( self().check_for_demand( start ) ) { + if( range_pool.size() > 1 ) { + start.offer_work( range_pool.front(), range_pool.front_depth(), ed ); + range_pool.pop_front(); + continue; + } + if( range_pool.is_divisible(self().max_depth()) ) // was not enough depth to fork a task + continue; // note: next split_to_fill() should split range at least once + } + start.run_body( range_pool.back() ); + range_pool.pop_back(); + } while( !range_pool.empty() && !ed.context->is_group_execution_cancelled() ); + } + } + template + bool check_for_demand(Task& t) { + if ( pass == my_delay ) { + if ( self().my_divisor > 1 ) // produce affinitized tasks while they have slot in array + return true; // do not do my_max_depth++ here, but be sure range_pool is splittable once more + else if ( self().my_divisor && my_max_depth ) { // make balancing task + self().my_divisor = 0; // once for each task; depth will be decreased in align_depth() + return true; + } + else if ( tree_node::is_peer_stolen(t) ) { + my_max_depth += __TBB_DEMAND_DEPTH_ADD; + return true; + } + } else if( begin == my_delay ) { + my_delay = pass; + } + return false; + } +}; + +class auto_partition_type: public dynamic_grainsize_mode > { +public: + auto_partition_type( const auto_partitioner& ) { + my_divisor *= __TBB_INITIAL_CHUNKS; + } + auto_partition_type( auto_partition_type& src, split) + : dynamic_grainsize_mode >(src, split()) {} + bool is_divisible() { // part of old should_execute_range() + if( my_divisor > 1 ) return true; + if( my_divisor && my_max_depth ) { // can split the task. TODO: on-stack flag instead + // keep same fragmentation while splitting for the local task pool + my_max_depth--; + my_divisor = 0; // decrease max_depth once per task + return true; + } else return false; + } + template + bool check_for_demand(Task& t) { + if (tree_node::is_peer_stolen(t)) { + my_max_depth += __TBB_DEMAND_DEPTH_ADD; + return true; + } else return false; + } + void spawn_task(task& t, task_group_context& ctx) { + spawn(t, ctx); + } +}; + +class simple_partition_type: public partition_type_base { +public: + simple_partition_type( const simple_partitioner& ) {} + simple_partition_type( const simple_partition_type&, split ) {} + //! simplified algorithm + template + void execute(StartType &start, Range &range, execution_data& ed) { + split_type split_obj = split(); // start.offer_work accepts split_type as reference + while( range.is_divisible() ) + start.offer_work( split_obj, ed ); + start.run_body( range ); + } + void spawn_task(task& t, task_group_context& ctx) { + spawn(t, ctx); + } +}; + +class static_partition_type : public linear_affinity_mode { +public: + typedef detail::proportional_split split_type; + static_partition_type( const static_partitioner& ) {} + static_partition_type( static_partition_type& p, const proportional_split& split_obj ) + : linear_affinity_mode(p, split_obj) {} +}; + +class affinity_partition_type : public dynamic_grainsize_mode > { + static const unsigned factor_power = 4; // TODO: get a unified formula based on number of computing units + slot_id* my_array; +public: + static const unsigned factor = 1 << factor_power; // number of slots in affinity array per task + typedef detail::proportional_split split_type; + affinity_partition_type( affinity_partitioner_base& ap ) { + __TBB_ASSERT( (factor&(factor-1))==0, "factor must be power of two" ); + ap.resize(factor); + my_array = ap.my_array; + my_max_depth = factor_power + 1; + __TBB_ASSERT( my_max_depth < __TBB_RANGE_POOL_CAPACITY, nullptr ); + } + affinity_partition_type(affinity_partition_type& p, split) + : dynamic_grainsize_mode >(p, split()) + , my_array(p.my_array) {} + affinity_partition_type(affinity_partition_type& p, const proportional_split& split_obj) + : dynamic_grainsize_mode >(p, split_obj) + , my_array(p.my_array) {} + void note_affinity(slot_id id) { + if( my_divisor ) + my_array[my_head] = id; + } + void spawn_task(task& t, task_group_context& ctx) { + if (my_divisor) { + if (!my_array[my_head]) { + // TODO: consider new ideas with my_array for both affinity and static partitioner's, then code reuse + spawn(t, ctx, slot_id(my_head / factor)); + } else { + spawn(t, ctx, my_array[my_head]); + } + } else { + spawn(t, ctx); + } + } +}; + +//! A simple partitioner +/** Divides the range until the range is not divisible. + @ingroup algorithms */ +class simple_partitioner { +public: + simple_partitioner() {} +private: + template friend struct start_for; + template friend struct start_reduce; + template friend struct start_deterministic_reduce; + template friend struct start_scan; + // new implementation just extends existing interface + typedef simple_partition_type task_partition_type; + // TODO: consider to make split_type public + typedef simple_partition_type::split_type split_type; + + // for parallel_scan only + class partition_type { + public: + bool should_execute_range(const execution_data& ) {return false;} + partition_type( const simple_partitioner& ) {} + partition_type( const partition_type&, split ) {} + }; +}; + +//! An auto partitioner +/** The range is initial divided into several large chunks. + Chunks are further subdivided into smaller pieces if demand detected and they are divisible. + @ingroup algorithms */ +class auto_partitioner { +public: + auto_partitioner() {} + +private: + template friend struct start_for; + template friend struct start_reduce; + template friend struct start_deterministic_reduce; + template friend struct start_scan; + // new implementation just extends existing interface + typedef auto_partition_type task_partition_type; + // TODO: consider to make split_type public + typedef auto_partition_type::split_type split_type; + + //! Backward-compatible partition for auto and affinity partition objects. + class partition_type { + size_t num_chunks; + static const size_t VICTIM_CHUNKS = 4; + public: + bool should_execute_range(const execution_data& ed) { + if( num_chunks friend struct start_for; + template friend struct start_reduce; + template friend struct start_deterministic_reduce; + template friend struct start_scan; + // new implementation just extends existing interface + typedef static_partition_type task_partition_type; + // TODO: consider to make split_type public + typedef static_partition_type::split_type split_type; +}; + +//! An affinity partitioner +class affinity_partitioner : affinity_partitioner_base { +public: + affinity_partitioner() {} + +private: + template friend struct start_for; + template friend struct start_reduce; + template friend struct start_deterministic_reduce; + template friend struct start_scan; + // new implementation just extends existing interface + typedef affinity_partition_type task_partition_type; + // TODO: consider to make split_type public + typedef affinity_partition_type::split_type split_type; +}; + +} // namespace d1 +} // namespace detail + +inline namespace v1 { +// Partitioners +using detail::d1::auto_partitioner; +using detail::d1::simple_partitioner; +using detail::d1::static_partitioner; +using detail::d1::affinity_partitioner; +// Split types +using detail::split; +using detail::proportional_split; +} // namespace v1 + +} // namespace tbb + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) + // #pragma warning (pop) +#endif // warning 4244 is back + +#undef __TBB_INITIAL_CHUNKS +#undef __TBB_RANGE_POOL_CAPACITY +#undef __TBB_INIT_DEPTH + +#endif /* __TBB_partitioner_H */ diff --git a/src/tbb/include/oneapi/tbb/profiling.h b/src/tbb/include/oneapi/tbb/profiling.h new file mode 100644 index 000000000..412b5a35e --- /dev/null +++ b/src/tbb/include/oneapi/tbb/profiling.h @@ -0,0 +1,243 @@ +/* + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_profiling_H +#define __TBB_profiling_H + +#include "detail/_config.h" +#include + +#include + +namespace tbb { +namespace detail { +inline namespace d0 { + // include list of index names + #define TBB_STRING_RESOURCE(index_name,str) index_name, + enum string_resource_index : std::uintptr_t { + #include "detail/_string_resource.h" + NUM_STRINGS + }; + #undef TBB_STRING_RESOURCE + + enum itt_relation + { + __itt_relation_is_unknown = 0, + __itt_relation_is_dependent_on, /**< "A is dependent on B" means that A cannot start until B completes */ + __itt_relation_is_sibling_of, /**< "A is sibling of B" means that A and B were created as a group */ + __itt_relation_is_parent_of, /**< "A is parent of B" means that A created B */ + __itt_relation_is_continuation_of, /**< "A is continuation of B" means that A assumes the dependencies of B */ + __itt_relation_is_child_of, /**< "A is child of B" means that A was created by B (inverse of is_parent_of) */ + __itt_relation_is_continued_by, /**< "A is continued by B" means that B assumes the dependencies of A (inverse of is_continuation_of) */ + __itt_relation_is_predecessor_to /**< "A is predecessor to B" means that B cannot start until A completes (inverse of is_dependent_on) */ + }; + +//! Unicode support +#if (_WIN32||_WIN64) + //! Unicode character type. Always wchar_t on Windows. + using tchar = wchar_t; +#else /* !WIN */ + using tchar = char; +#endif /* !WIN */ + +} // namespace d0 +} // namespace detail +} // namespace tbb + +#include +#if _WIN32||_WIN64 +#include /* mbstowcs_s */ +#endif +// Need these to work regardless of tools support +namespace tbb { +namespace detail { +namespace d1 { + enum notify_type {prepare=0, cancel, acquired, releasing, destroy}; + enum itt_domain_enum { ITT_DOMAIN_FLOW=0, ITT_DOMAIN_MAIN=1, ITT_DOMAIN_ALGO=2, ITT_NUM_DOMAINS }; +} // namespace d1 + +namespace r1 { + TBB_EXPORT void __TBB_EXPORTED_FUNC call_itt_notify(int t, void* ptr); + TBB_EXPORT void __TBB_EXPORTED_FUNC create_itt_sync(void* ptr, const tchar* objtype, const tchar* objname); + TBB_EXPORT void __TBB_EXPORTED_FUNC itt_make_task_group(d1::itt_domain_enum domain, void* group, unsigned long long group_extra, + void* parent, unsigned long long parent_extra, string_resource_index name_index); + TBB_EXPORT void __TBB_EXPORTED_FUNC itt_task_begin(d1::itt_domain_enum domain, void* task, unsigned long long task_extra, + void* parent, unsigned long long parent_extra, string_resource_index name_index); + TBB_EXPORT void __TBB_EXPORTED_FUNC itt_task_end(d1::itt_domain_enum domain); + TBB_EXPORT void __TBB_EXPORTED_FUNC itt_set_sync_name(void* obj, const tchar* name); + TBB_EXPORT void __TBB_EXPORTED_FUNC itt_metadata_str_add(d1::itt_domain_enum domain, void* addr, unsigned long long addr_extra, + string_resource_index key, const char* value); + TBB_EXPORT void __TBB_EXPORTED_FUNC itt_metadata_ptr_add(d1::itt_domain_enum domain, void* addr, unsigned long long addr_extra, + string_resource_index key, void* value); + TBB_EXPORT void __TBB_EXPORTED_FUNC itt_relation_add(d1::itt_domain_enum domain, void* addr0, unsigned long long addr0_extra, + itt_relation relation, void* addr1, unsigned long long addr1_extra); + TBB_EXPORT void __TBB_EXPORTED_FUNC itt_region_begin(d1::itt_domain_enum domain, void* region, unsigned long long region_extra, + void* parent, unsigned long long parent_extra, string_resource_index /* name_index */); + TBB_EXPORT void __TBB_EXPORTED_FUNC itt_region_end(d1::itt_domain_enum domain, void* region, unsigned long long region_extra); +} // namespace r1 + +namespace d1 { +#if TBB_USE_PROFILING_TOOLS && (_WIN32||_WIN64) + inline std::size_t multibyte_to_widechar(wchar_t* wcs, const char* mbs, std::size_t bufsize) { + std::size_t len; + mbstowcs_s(&len, wcs, bufsize, mbs, _TRUNCATE); + return len; // mbstowcs_s counts null terminator + } +#endif + +#if TBB_USE_PROFILING_TOOLS + inline void create_itt_sync(void *ptr, const char *objtype, const char *objname) { +#if (_WIN32||_WIN64) + std::size_t len_type = multibyte_to_widechar(nullptr, objtype, 0); + wchar_t *type = new wchar_t[len_type]; + multibyte_to_widechar(type, objtype, len_type); + std::size_t len_name = multibyte_to_widechar(nullptr, objname, 0); + wchar_t *name = new wchar_t[len_name]; + multibyte_to_widechar(name, objname, len_name); +#else // WIN + const char *type = objtype; + const char *name = objname; +#endif + r1::create_itt_sync(ptr, type, name); + +#if (_WIN32||_WIN64) + delete[] type; + delete[] name; +#endif // WIN + } + +// Distinguish notifications on task for reducing overheads +#if TBB_USE_PROFILING_TOOLS == 2 + inline void call_itt_task_notify(d1::notify_type t, void *ptr) { + r1::call_itt_notify(static_cast(t), ptr); + } +#else + inline void call_itt_task_notify(d1::notify_type, void *) {} +#endif // TBB_USE_PROFILING_TOOLS + + inline void call_itt_notify(d1::notify_type t, void *ptr) { + r1::call_itt_notify(static_cast(t), ptr); + } + +#if (_WIN32||_WIN64) && !__MINGW32__ + inline void itt_set_sync_name(void* obj, const wchar_t* name) { + r1::itt_set_sync_name(obj, name); + } + inline void itt_set_sync_name(void* obj, const char* name) { + std::size_t len_name = multibyte_to_widechar(nullptr, name, 0); + wchar_t *obj_name = new wchar_t[len_name]; + multibyte_to_widechar(obj_name, name, len_name); + r1::itt_set_sync_name(obj, obj_name); + delete[] obj_name; + } +#else + inline void itt_set_sync_name( void* obj, const char* name) { + r1::itt_set_sync_name(obj, name); + } +#endif //WIN + + inline void itt_make_task_group(itt_domain_enum domain, void* group, unsigned long long group_extra, + void* parent, unsigned long long parent_extra, string_resource_index name_index) { + r1::itt_make_task_group(domain, group, group_extra, parent, parent_extra, name_index); + } + + inline void itt_metadata_str_add( itt_domain_enum domain, void *addr, unsigned long long addr_extra, + string_resource_index key, const char *value ) { + r1::itt_metadata_str_add( domain, addr, addr_extra, key, value ); + } + + inline void register_node_addr(itt_domain_enum domain, void *addr, unsigned long long addr_extra, + string_resource_index key, void *value) { + r1::itt_metadata_ptr_add(domain, addr, addr_extra, key, value); + } + + inline void itt_relation_add( itt_domain_enum domain, void *addr0, unsigned long long addr0_extra, + itt_relation relation, void *addr1, unsigned long long addr1_extra ) { + r1::itt_relation_add( domain, addr0, addr0_extra, relation, addr1, addr1_extra ); + } + + inline void itt_task_begin( itt_domain_enum domain, void *task, unsigned long long task_extra, + void *parent, unsigned long long parent_extra, string_resource_index name_index ) { + r1::itt_task_begin( domain, task, task_extra, parent, parent_extra, name_index ); + } + + inline void itt_task_end( itt_domain_enum domain ) { + r1::itt_task_end( domain ); + } + + inline void itt_region_begin( itt_domain_enum domain, void *region, unsigned long long region_extra, + void *parent, unsigned long long parent_extra, string_resource_index name_index ) { + r1::itt_region_begin( domain, region, region_extra, parent, parent_extra, name_index ); + } + + inline void itt_region_end( itt_domain_enum domain, void *region, unsigned long long region_extra ) { + r1::itt_region_end( domain, region, region_extra ); + } +#else + inline void create_itt_sync(void* /*ptr*/, const char* /*objtype*/, const char* /*objname*/) {} + + inline void call_itt_notify(notify_type /*t*/, void* /*ptr*/) {} + + inline void call_itt_task_notify(notify_type /*t*/, void* /*ptr*/) {} +#endif // TBB_USE_PROFILING_TOOLS + +#if TBB_USE_PROFILING_TOOLS && !(TBB_USE_PROFILING_TOOLS == 2) +class event { +/** This class supports user event traces through itt. + Common use-case is tagging data flow graph tasks (data-id) + and visualization by Intel Advisor Flow Graph Analyzer (FGA) **/ +// TODO: Replace implementation by itt user event api. + + const std::string my_name; + + static void emit_trace(const std::string &input) { + itt_metadata_str_add( ITT_DOMAIN_FLOW, nullptr, FLOW_NULL, USER_EVENT, ( "FGA::DATAID::" + input ).c_str() ); + } + +public: + event(const std::string &input) + : my_name( input ) + { } + + void emit() { + emit_trace(my_name); + } + + static void emit(const std::string &description) { + emit_trace(description); + } + +}; +#else // TBB_USE_PROFILING_TOOLS && !(TBB_USE_PROFILING_TOOLS == 2) +// Using empty struct if user event tracing is disabled: +struct event { + event(const std::string &) { } + + void emit() { } + + static void emit(const std::string &) { } +}; +#endif // TBB_USE_PROFILING_TOOLS && !(TBB_USE_PROFILING_TOOLS == 2) +} // namespace d1 +} // namespace detail + +namespace profiling { + using detail::d1::event; +} +} // namespace tbb + + +#endif /* __TBB_profiling_H */ diff --git a/src/tbb/include/oneapi/tbb/queuing_mutex.h b/src/tbb/include/oneapi/tbb/queuing_mutex.h new file mode 100644 index 000000000..f7ee5a94d --- /dev/null +++ b/src/tbb/include/oneapi/tbb/queuing_mutex.h @@ -0,0 +1,192 @@ +/* + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_queuing_mutex_H +#define __TBB_queuing_mutex_H + +#include "detail/_namespace_injection.h" +#include "detail/_assert.h" +#include "detail/_utils.h" +#include "detail/_mutex_common.h" + +#include "profiling.h" + +#include + +namespace tbb { +namespace detail { +namespace d1 { + +//! Queuing mutex with local-only spinning. +/** @ingroup synchronization */ +class queuing_mutex { +public: + //! Construct unacquired mutex. + queuing_mutex() noexcept { + create_itt_sync(this, "tbb::queuing_mutex", ""); + }; + + queuing_mutex(const queuing_mutex&) = delete; + queuing_mutex& operator=(const queuing_mutex&) = delete; + + //! The scoped locking pattern + /** It helps to avoid the common problem of forgetting to release lock. + It also nicely provides the "node" for queuing locks. */ + class scoped_lock { + //! Reset fields to mean "no lock held". + void reset() { + m_mutex = nullptr; + } + + public: + //! Construct lock that has not acquired a mutex. + /** Equivalent to zero-initialization of *this. */ + scoped_lock() = default; + + //! Acquire lock on given mutex. + scoped_lock(queuing_mutex& m) { + acquire(m); + } + + //! Release lock (if lock is held). + ~scoped_lock() { + if (m_mutex) release(); + } + + //! No Copy + scoped_lock( const scoped_lock& ) = delete; + scoped_lock& operator=( const scoped_lock& ) = delete; + + //! Acquire lock on given mutex. + void acquire( queuing_mutex& m ) { + __TBB_ASSERT(!m_mutex, "scoped_lock is already holding a mutex"); + + // Must set all fields before the exchange, because once the + // exchange executes, *this becomes accessible to other threads. + m_mutex = &m; + m_next.store(nullptr, std::memory_order_relaxed); + m_going.store(0U, std::memory_order_relaxed); + + // x86 compare exchange operation always has a strong fence + // "sending" the fields initialized above to other processors. + scoped_lock* pred = m.q_tail.exchange(this); + if (pred) { + call_itt_notify(prepare, &m); + __TBB_ASSERT(pred->m_next.load(std::memory_order_relaxed) == nullptr, "the predecessor has another successor!"); + + pred->m_next.store(this, std::memory_order_release); + spin_wait_while_eq(m_going, 0U); + } + call_itt_notify(acquired, &m); + + } + + //! Acquire lock on given mutex if free (i.e. non-blocking) + bool try_acquire( queuing_mutex& m ) { + __TBB_ASSERT(!m_mutex, "scoped_lock is already holding a mutex"); + + // Must set all fields before the compare_exchange_strong, because once the + // compare_exchange_strong executes, *this becomes accessible to other threads. + m_next.store(nullptr, std::memory_order_relaxed); + m_going.store(0U, std::memory_order_relaxed); + + scoped_lock* expected = nullptr; + // The compare_exchange_strong must have release semantics, because we are + // "sending" the fields initialized above to other processors. + // x86 compare exchange operation always has a strong fence + if (!m.q_tail.compare_exchange_strong(expected, this, std::memory_order_acq_rel)) + return false; + + m_mutex = &m; + + call_itt_notify(acquired, &m); + return true; + } + + //! Release lock. + void release() + { + __TBB_ASSERT(this->m_mutex, "no lock acquired"); + + call_itt_notify(releasing, this->m_mutex); + + if (m_next.load(std::memory_order_relaxed) == nullptr) { + scoped_lock* expected = this; + if (m_mutex->q_tail.compare_exchange_strong(expected, nullptr)) { + // this was the only item in the queue, and the queue is now empty. + reset(); + return; + } + // Someone in the queue + spin_wait_while_eq(m_next, nullptr); + } + m_next.load(std::memory_order_acquire)->m_going.store(1U, std::memory_order_release); + + reset(); + } + + private: + //! The pointer to the mutex owned, or nullptr if not holding a mutex. + queuing_mutex* m_mutex{nullptr}; + + //! The pointer to the next competitor for a mutex + std::atomic m_next{nullptr}; + + //! The local spin-wait variable + /** Inverted (0 - blocked, 1 - acquired the mutex) for the sake of + zero-initialization. Defining it as an entire word instead of + a byte seems to help performance slightly. */ + std::atomic m_going{0U}; + }; + + // Mutex traits + static constexpr bool is_rw_mutex = false; + static constexpr bool is_recursive_mutex = false; + static constexpr bool is_fair_mutex = true; + +private: + //! The last competitor requesting the lock + std::atomic q_tail{nullptr}; + +}; + +#if TBB_USE_PROFILING_TOOLS +inline void set_name(queuing_mutex& obj, const char* name) { + itt_set_sync_name(&obj, name); +} +#if (_WIN32||_WIN64) +inline void set_name(queuing_mutex& obj, const wchar_t* name) { + itt_set_sync_name(&obj, name); +} +#endif //WIN +#else +inline void set_name(queuing_mutex&, const char*) {} +#if (_WIN32||_WIN64) +inline void set_name(queuing_mutex&, const wchar_t*) {} +#endif //WIN +#endif +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::queuing_mutex; +} // namespace v1 +namespace profiling { + using detail::d1::set_name; +} +} // namespace tbb + +#endif /* __TBB_queuing_mutex_H */ diff --git a/src/tbb/include/oneapi/tbb/queuing_rw_mutex.h b/src/tbb/include/oneapi/tbb/queuing_rw_mutex.h new file mode 100644 index 000000000..f8325dfd9 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/queuing_rw_mutex.h @@ -0,0 +1,207 @@ +/* + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_queuing_rw_mutex_H +#define __TBB_queuing_rw_mutex_H + +#include "detail/_config.h" +#include "detail/_namespace_injection.h" +#include "detail/_assert.h" +#include "detail/_mutex_common.h" + +#include "profiling.h" + +#include +#include + +namespace tbb { +namespace detail { +namespace r1 { +struct queuing_rw_mutex_impl; +} +namespace d1 { + +//! Queuing reader-writer mutex with local-only spinning. +/** Adapted from Krieger, Stumm, et al. pseudocode at + https://www.researchgate.net/publication/221083709_A_Fair_Fast_Scalable_Reader-Writer_Lock + @ingroup synchronization */ +class queuing_rw_mutex { + friend r1::queuing_rw_mutex_impl; +public: + //! Construct unacquired mutex. + queuing_rw_mutex() noexcept { + create_itt_sync(this, "tbb::queuing_rw_mutex", ""); + } + + //! Destructor asserts if the mutex is acquired, i.e. q_tail is non-null + ~queuing_rw_mutex() { + __TBB_ASSERT(q_tail.load(std::memory_order_relaxed) == nullptr, "destruction of an acquired mutex"); + } + + //! No Copy + queuing_rw_mutex(const queuing_rw_mutex&) = delete; + queuing_rw_mutex& operator=(const queuing_rw_mutex&) = delete; + + //! The scoped locking pattern + /** It helps to avoid the common problem of forgetting to release lock. + It also nicely provides the "node" for queuing locks. */ + class scoped_lock { + friend r1::queuing_rw_mutex_impl; + //! Initialize fields to mean "no lock held". + void initialize() { + my_mutex = nullptr; + my_internal_lock.store(0, std::memory_order_relaxed); + my_going.store(0, std::memory_order_relaxed); +#if TBB_USE_ASSERT + my_state = 0xFF; // Set to invalid state + my_next.store(reinterpret_cast(reinterpret_cast(-1)), std::memory_order_relaxed); + my_prev.store(reinterpret_cast(reinterpret_cast(-1)), std::memory_order_relaxed); +#endif /* TBB_USE_ASSERT */ + } + + public: + //! Construct lock that has not acquired a mutex. + /** Equivalent to zero-initialization of *this. */ + scoped_lock() {initialize();} + + //! Acquire lock on given mutex. + scoped_lock( queuing_rw_mutex& m, bool write=true ) { + initialize(); + acquire(m,write); + } + + //! Release lock (if lock is held). + ~scoped_lock() { + if( my_mutex ) release(); + } + + //! No Copy + scoped_lock(const scoped_lock&) = delete; + scoped_lock& operator=(const scoped_lock&) = delete; + + //! Acquire lock on given mutex. + void acquire( queuing_rw_mutex& m, bool write=true ); + + //! Acquire lock on given mutex if free (i.e. non-blocking) + bool try_acquire( queuing_rw_mutex& m, bool write=true ); + + //! Release lock. + void release(); + + //! Upgrade reader to become a writer. + /** Returns whether the upgrade happened without releasing and re-acquiring the lock */ + bool upgrade_to_writer(); + + //! Downgrade writer to become a reader. + bool downgrade_to_reader(); + + bool is_writer() const; + + private: + //! The pointer to the mutex owned, or nullptr if not holding a mutex. + queuing_rw_mutex* my_mutex; + + //! The 'pointer' to the previous and next competitors for a mutex + std::atomic my_prev; + std::atomic my_next; + + using state_t = unsigned char ; + + //! State of the request: reader, writer, active reader, other service states + std::atomic my_state; + + //! The local spin-wait variable + /** Corresponds to "spin" in the pseudocode but inverted for the sake of zero-initialization */ + std::atomic my_going; + + //! A tiny internal lock + std::atomic my_internal_lock; + }; + + // Mutex traits + static constexpr bool is_rw_mutex = true; + static constexpr bool is_recursive_mutex = false; + static constexpr bool is_fair_mutex = true; + +private: + //! The last competitor requesting the lock + std::atomic q_tail{nullptr}; +}; +#if TBB_USE_PROFILING_TOOLS +inline void set_name(queuing_rw_mutex& obj, const char* name) { + itt_set_sync_name(&obj, name); +} +#if (_WIN32||_WIN64) +inline void set_name(queuing_rw_mutex& obj, const wchar_t* name) { + itt_set_sync_name(&obj, name); +} +#endif //WIN +#else +inline void set_name(queuing_rw_mutex&, const char*) {} +#if (_WIN32||_WIN64) +inline void set_name(queuing_rw_mutex&, const wchar_t*) {} +#endif //WIN +#endif +} // namespace d1 + +namespace r1 { +TBB_EXPORT void acquire(d1::queuing_rw_mutex&, d1::queuing_rw_mutex::scoped_lock&, bool); +TBB_EXPORT bool try_acquire(d1::queuing_rw_mutex&, d1::queuing_rw_mutex::scoped_lock&, bool); +TBB_EXPORT void release(d1::queuing_rw_mutex::scoped_lock&); +TBB_EXPORT bool upgrade_to_writer(d1::queuing_rw_mutex::scoped_lock&); +TBB_EXPORT bool downgrade_to_reader(d1::queuing_rw_mutex::scoped_lock&); +TBB_EXPORT bool is_writer(const d1::queuing_rw_mutex::scoped_lock&); +} // namespace r1 + +namespace d1 { + + +inline void queuing_rw_mutex::scoped_lock::acquire(queuing_rw_mutex& m,bool write) { + r1::acquire(m, *this, write); +} + +inline bool queuing_rw_mutex::scoped_lock::try_acquire(queuing_rw_mutex& m, bool write) { + return r1::try_acquire(m, *this, write); +} + +inline void queuing_rw_mutex::scoped_lock::release() { + r1::release(*this); +} + +inline bool queuing_rw_mutex::scoped_lock::upgrade_to_writer() { + return r1::upgrade_to_writer(*this); +} + +inline bool queuing_rw_mutex::scoped_lock::downgrade_to_reader() { + return r1::downgrade_to_reader(*this); +} + +inline bool queuing_rw_mutex::scoped_lock::is_writer() const { + return r1::is_writer(*this); +} +} // namespace d1 + +} // namespace detail + +inline namespace v1 { +using detail::d1::queuing_rw_mutex; +} // namespace v1 +namespace profiling { + using detail::d1::set_name; +} +} // namespace tbb + +#endif /* __TBB_queuing_rw_mutex_H */ diff --git a/src/tbb/include/oneapi/tbb/rw_mutex.h b/src/tbb/include/oneapi/tbb/rw_mutex.h new file mode 100644 index 000000000..c3fbaf657 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/rw_mutex.h @@ -0,0 +1,216 @@ +/* + Copyright (c) 2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_rw_mutex_H +#define __TBB_rw_mutex_H + +#include "detail/_namespace_injection.h" +#include "detail/_utils.h" +#include "detail/_waitable_atomic.h" +#include "detail/_scoped_lock.h" +#include "detail/_mutex_common.h" +#include "profiling.h" + +namespace tbb { +namespace detail { +namespace d1 { + +class rw_mutex { +public: + //! Constructors + rw_mutex() noexcept : m_state(0) { + create_itt_sync(this, "tbb::rw_mutex", ""); + } + + //! Destructor + ~rw_mutex() { + __TBB_ASSERT(!m_state.load(std::memory_order_relaxed), "destruction of an acquired mutex"); + } + + //! No Copy + rw_mutex(const rw_mutex&) = delete; + rw_mutex& operator=(const rw_mutex&) = delete; + + using scoped_lock = rw_scoped_lock; + + //! Mutex traits + static constexpr bool is_rw_mutex = true; + static constexpr bool is_recursive_mutex = false; + static constexpr bool is_fair_mutex = false; + + //! Acquire lock + void lock() { + call_itt_notify(prepare, this); + while (!try_lock()) { + if (!(m_state.load(std::memory_order_relaxed) & WRITER_PENDING)) { // no pending writers + m_state |= WRITER_PENDING; + } + + auto wakeup_condition = [&] { return !(m_state.load(std::memory_order_relaxed) & BUSY); }; + adaptive_wait_on_address(this, wakeup_condition, WRITER_CONTEXT); + } + + call_itt_notify(acquired, this); + } + + //! Try acquiring lock (non-blocking) + /** Return true if lock acquired; false otherwise. */ + bool try_lock() { + // for a writer: only possible to acquire if no active readers or writers + // Use relaxed memory fence is OK here because + // Acquire memory fence guaranteed by compare_exchange_strong() + state_type s = m_state.load(std::memory_order_relaxed); + if (!(s & BUSY)) { // no readers, no writers; mask is 1..1101 + if (m_state.compare_exchange_strong(s, WRITER)) { + call_itt_notify(acquired, this); + return true; // successfully stored writer flag + } + } + return false; + } + + //! Release lock + void unlock() { + call_itt_notify(releasing, this); + state_type curr_state = (m_state &= READERS | WRITER_PENDING); // Returns current state + + if (curr_state & WRITER_PENDING) { + r1::notify_by_address(this, WRITER_CONTEXT); + } else { + // It's possible that WRITER sleeps without WRITER_PENDING, + // because other thread might clear this bit at upgrade() + r1::notify_by_address_all(this); + } + } + + //! Lock shared ownership mutex + void lock_shared() { + call_itt_notify(prepare, this); + while (!try_lock_shared()) { + state_type has_writer = WRITER | WRITER_PENDING; + auto wakeup_condition = [&] { return !(m_state.load(std::memory_order_relaxed) & has_writer); }; + adaptive_wait_on_address(this, wakeup_condition, READER_CONTEXT); + } + __TBB_ASSERT(m_state.load(std::memory_order_relaxed) & READERS, "invalid state of a read lock: no readers"); + } + + //! Try lock shared ownership mutex + bool try_lock_shared() { + // for a reader: acquire if no active or waiting writers + // Use relaxed memory fence is OK here because + // Acquire memory fence guaranteed by fetch_add() + state_type has_writer = WRITER | WRITER_PENDING; + if (!(m_state.load(std::memory_order_relaxed) & has_writer)) { + if (m_state.fetch_add(ONE_READER) & has_writer) { + m_state -= ONE_READER; + r1::notify_by_address(this, WRITER_CONTEXT); + } else { + call_itt_notify(acquired, this); + return true; // successfully stored increased number of readers + } + } + return false; + } + + //! Unlock shared ownership mutex + void unlock_shared() { + __TBB_ASSERT(m_state.load(std::memory_order_relaxed) & READERS, "invalid state of a read lock: no readers"); + call_itt_notify(releasing, this); + + state_type curr_state = (m_state -= ONE_READER); // Returns current state + + if (curr_state & (WRITER_PENDING)) { + r1::notify_by_address(this, WRITER_CONTEXT); + } else { + // It's possible that WRITER sleeps without WRITER_PENDING, + // because other thread might clear this bit at upgrade() + r1::notify_by_address_all(this); + } + } + +private: + /** Internal non ISO C++ standard API **/ + //! This API is used through the scoped_lock class + + //! Upgrade reader to become a writer. + /** Returns whether the upgrade happened without releasing and re-acquiring the lock */ + bool upgrade() { + state_type s = m_state.load(std::memory_order_relaxed); + __TBB_ASSERT(s & READERS, "invalid state before upgrade: no readers "); + // Check and set writer-pending flag. + // Required conditions: either no pending writers, or we are the only reader + // (with multiple readers and pending writer, another upgrade could have been requested) + while ((s & READERS) == ONE_READER || !(s & WRITER_PENDING)) { + if (m_state.compare_exchange_strong(s, s | WRITER | WRITER_PENDING)) { + auto wakeup_condition = [&] { return (m_state.load(std::memory_order_relaxed) & READERS) == ONE_READER; }; + while ((m_state.load(std::memory_order_relaxed) & READERS) != ONE_READER) { + adaptive_wait_on_address(this, wakeup_condition, WRITER_CONTEXT); + } + + __TBB_ASSERT((m_state.load(std::memory_order_relaxed) & (WRITER_PENDING|WRITER)) == (WRITER_PENDING | WRITER), + "invalid state when upgrading to writer"); + // Both new readers and writers are blocked at this time + m_state -= (ONE_READER + WRITER_PENDING); + return true; // successfully upgraded + } + } + // Slow reacquire + unlock_shared(); + lock(); + return false; + } + + //! Downgrade writer to a reader + void downgrade() { + __TBB_ASSERT(m_state.load(std::memory_order_relaxed) & WRITER, nullptr), + call_itt_notify(releasing, this); + m_state += (ONE_READER - WRITER); + + if (!(m_state & WRITER_PENDING)) { + r1::notify_by_address(this, READER_CONTEXT); + } + + __TBB_ASSERT(m_state.load(std::memory_order_relaxed) & READERS, "invalid state after downgrade: no readers"); + } + + using state_type = std::intptr_t; + static constexpr state_type WRITER = 1; + static constexpr state_type WRITER_PENDING = 2; + static constexpr state_type READERS = ~(WRITER | WRITER_PENDING); + static constexpr state_type ONE_READER = 4; + static constexpr state_type BUSY = WRITER | READERS; + + using context_type = std::uintptr_t; + static constexpr context_type WRITER_CONTEXT = 0; + static constexpr context_type READER_CONTEXT = 1; + friend scoped_lock; + //! State of lock + /** Bit 0 = writer is holding lock + Bit 1 = request by a writer to acquire lock (hint to readers to wait) + Bit 2..N = number of readers holding lock */ + std::atomic m_state; +}; // class rw_mutex + +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::rw_mutex; +} // namespace v1 + +} // namespace tbb + +#endif // __TBB_rw_mutex_H diff --git a/src/tbb/include/oneapi/tbb/scalable_allocator.h b/src/tbb/include/oneapi/tbb/scalable_allocator.h new file mode 100644 index 000000000..31650a0a7 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/scalable_allocator.h @@ -0,0 +1,335 @@ +/* + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_scalable_allocator_H +#define __TBB_scalable_allocator_H + +#ifdef __cplusplus +#include "oneapi/tbb/detail/_config.h" +#include "oneapi/tbb/detail/_utils.h" +#include "oneapi/tbb/detail/_namespace_injection.h" +#include +#include +#include /* std::bad_alloc() */ +#else +#include "oneapi/tbb/detail/_export.h" +#include /* Need ptrdiff_t and size_t from here. */ +#if !defined(_MSC_VER) || defined(__clang__) +#include /* Need intptr_t from here. */ +#endif +#endif + +#if __TBB_CPP17_MEMORY_RESOURCE_PRESENT +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#if _MSC_VER + #define __TBB_EXPORTED_FUNC __cdecl +#else + #define __TBB_EXPORTED_FUNC +#endif + +/** The "malloc" analogue to allocate block of memory of size bytes. + * @ingroup memory_allocation */ +TBBMALLOC_EXPORT void* __TBB_EXPORTED_FUNC scalable_malloc(size_t size); + +/** The "free" analogue to discard a previously allocated piece of memory. + @ingroup memory_allocation */ +TBBMALLOC_EXPORT void __TBB_EXPORTED_FUNC scalable_free(void* ptr); + +/** The "realloc" analogue complementing scalable_malloc. + @ingroup memory_allocation */ +TBBMALLOC_EXPORT void* __TBB_EXPORTED_FUNC scalable_realloc(void* ptr, size_t size); + +/** The "calloc" analogue complementing scalable_malloc. + @ingroup memory_allocation */ +TBBMALLOC_EXPORT void* __TBB_EXPORTED_FUNC scalable_calloc(size_t nobj, size_t size); + +/** The "posix_memalign" analogue. + @ingroup memory_allocation */ +TBBMALLOC_EXPORT int __TBB_EXPORTED_FUNC scalable_posix_memalign(void** memptr, size_t alignment, size_t size); + +/** The "_aligned_malloc" analogue. + @ingroup memory_allocation */ +TBBMALLOC_EXPORT void* __TBB_EXPORTED_FUNC scalable_aligned_malloc(size_t size, size_t alignment); + +/** The "_aligned_realloc" analogue. + @ingroup memory_allocation */ +TBBMALLOC_EXPORT void* __TBB_EXPORTED_FUNC scalable_aligned_realloc(void* ptr, size_t size, size_t alignment); + +/** The "_aligned_free" analogue. + @ingroup memory_allocation */ +TBBMALLOC_EXPORT void __TBB_EXPORTED_FUNC scalable_aligned_free(void* ptr); + +/** The analogue of _msize/malloc_size/malloc_usable_size. + Returns the usable size of a memory block previously allocated by scalable_*, + or 0 (zero) if ptr does not point to such a block. + @ingroup memory_allocation */ +TBBMALLOC_EXPORT size_t __TBB_EXPORTED_FUNC scalable_msize(void* ptr); + +/* Results for scalable_allocation_* functions */ +typedef enum { + TBBMALLOC_OK, + TBBMALLOC_INVALID_PARAM, + TBBMALLOC_UNSUPPORTED, + TBBMALLOC_NO_MEMORY, + TBBMALLOC_NO_EFFECT +} ScalableAllocationResult; + +/* Setting TBB_MALLOC_USE_HUGE_PAGES environment variable to 1 enables huge pages. + scalable_allocation_mode call has priority over environment variable. */ +typedef enum { + TBBMALLOC_USE_HUGE_PAGES, /* value turns using huge pages on and off */ + /* deprecated, kept for backward compatibility only */ + USE_HUGE_PAGES = TBBMALLOC_USE_HUGE_PAGES, + /* try to limit memory consumption value (Bytes), clean internal buffers + if limit is exceeded, but not prevents from requesting memory from OS */ + TBBMALLOC_SET_SOFT_HEAP_LIMIT, + /* Lower bound for the size (Bytes), that is interpreted as huge + * and not released during regular cleanup operations. */ + TBBMALLOC_SET_HUGE_SIZE_THRESHOLD +} AllocationModeParam; + +/** Set TBB allocator-specific allocation modes. + @ingroup memory_allocation */ +TBBMALLOC_EXPORT int __TBB_EXPORTED_FUNC scalable_allocation_mode(int param, intptr_t value); + +typedef enum { + /* Clean internal allocator buffers for all threads. + Returns TBBMALLOC_NO_EFFECT if no buffers cleaned, + TBBMALLOC_OK if some memory released from buffers. */ + TBBMALLOC_CLEAN_ALL_BUFFERS, + /* Clean internal allocator buffer for current thread only. + Return values same as for TBBMALLOC_CLEAN_ALL_BUFFERS. */ + TBBMALLOC_CLEAN_THREAD_BUFFERS +} ScalableAllocationCmd; + +/** Call TBB allocator-specific commands. + @ingroup memory_allocation */ +TBBMALLOC_EXPORT int __TBB_EXPORTED_FUNC scalable_allocation_command(int cmd, void *param); + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + +#ifdef __cplusplus + +//! The namespace rml contains components of low-level memory pool interface. +namespace rml { +class MemoryPool; + +typedef void *(*rawAllocType)(std::intptr_t pool_id, std::size_t &bytes); +// returns non-zero in case of error +typedef int (*rawFreeType)(std::intptr_t pool_id, void* raw_ptr, std::size_t raw_bytes); + +struct MemPoolPolicy { + enum { + TBBMALLOC_POOL_VERSION = 1 + }; + + rawAllocType pAlloc; + rawFreeType pFree; + // granularity of pAlloc allocations. 0 means default used. + std::size_t granularity; + int version; + // all memory consumed at 1st pAlloc call and never returned, + // no more pAlloc calls after 1st + unsigned fixedPool : 1, + // memory consumed but returned only at pool termination + keepAllMemory : 1, + reserved : 30; + + MemPoolPolicy(rawAllocType pAlloc_, rawFreeType pFree_, + std::size_t granularity_ = 0, bool fixedPool_ = false, + bool keepAllMemory_ = false) : + pAlloc(pAlloc_), pFree(pFree_), granularity(granularity_), version(TBBMALLOC_POOL_VERSION), + fixedPool(fixedPool_), keepAllMemory(keepAllMemory_), + reserved(0) {} +}; + +// enums have same values as appropriate enums from ScalableAllocationResult +// TODO: use ScalableAllocationResult in pool_create directly +enum MemPoolError { + // pool created successfully + POOL_OK = TBBMALLOC_OK, + // invalid policy parameters found + INVALID_POLICY = TBBMALLOC_INVALID_PARAM, + // requested pool policy is not supported by allocator library + UNSUPPORTED_POLICY = TBBMALLOC_UNSUPPORTED, + // lack of memory during pool creation + NO_MEMORY = TBBMALLOC_NO_MEMORY, + // action takes no effect + NO_EFFECT = TBBMALLOC_NO_EFFECT +}; + +TBBMALLOC_EXPORT MemPoolError pool_create_v1(std::intptr_t pool_id, const MemPoolPolicy *policy, + rml::MemoryPool **pool); + +TBBMALLOC_EXPORT bool pool_destroy(MemoryPool* memPool); +TBBMALLOC_EXPORT void *pool_malloc(MemoryPool* memPool, std::size_t size); +TBBMALLOC_EXPORT void *pool_realloc(MemoryPool* memPool, void *object, std::size_t size); +TBBMALLOC_EXPORT void *pool_aligned_malloc(MemoryPool* mPool, std::size_t size, std::size_t alignment); +TBBMALLOC_EXPORT void *pool_aligned_realloc(MemoryPool* mPool, void *ptr, std::size_t size, std::size_t alignment); +TBBMALLOC_EXPORT bool pool_reset(MemoryPool* memPool); +TBBMALLOC_EXPORT bool pool_free(MemoryPool *memPool, void *object); +TBBMALLOC_EXPORT MemoryPool *pool_identify(void *object); +TBBMALLOC_EXPORT std::size_t pool_msize(MemoryPool *memPool, void *object); + +} // namespace rml + +namespace tbb { +namespace detail { +namespace d1 { + +// keep throw in a separate function to prevent code bloat +template +void throw_exception(const E &e) { +#if TBB_USE_EXCEPTIONS + throw e; +#else + suppress_unused_warning(e); +#endif +} + +template +class scalable_allocator { +public: + using value_type = T; + using propagate_on_container_move_assignment = std::true_type; + + //! Always defined for TBB containers + using is_always_equal = std::true_type; + + scalable_allocator() = default; + template scalable_allocator(const scalable_allocator&) noexcept {} + + //! Allocate space for n objects. + __TBB_nodiscard T* allocate(std::size_t n) { + T* p = static_cast(scalable_malloc(n * sizeof(value_type))); + if (!p) { + throw_exception(std::bad_alloc()); + } + return p; + } + + //! Free previously allocated block of memory + void deallocate(T* p, std::size_t) { + scalable_free(p); + } + +#if TBB_ALLOCATOR_TRAITS_BROKEN + using pointer = value_type*; + using const_pointer = const value_type*; + using reference = value_type&; + using const_reference = const value_type&; + using difference_type = std::ptrdiff_t; + using size_type = std::size_t; + template struct rebind { + using other = scalable_allocator; + }; + //! Largest value for which method allocate might succeed. + size_type max_size() const noexcept { + size_type absolutemax = static_cast(-1) / sizeof (value_type); + return (absolutemax > 0 ? absolutemax : 1); + } + template + void construct(U *p, Args&&... args) + { ::new((void *)p) U(std::forward(args)...); } + void destroy(pointer p) { p->~value_type(); } + pointer address(reference x) const { return &x; } + const_pointer address(const_reference x) const { return &x; } +#endif // TBB_ALLOCATOR_TRAITS_BROKEN + +}; + +#if TBB_ALLOCATOR_TRAITS_BROKEN + template<> + class scalable_allocator { + public: + using pointer = void*; + using const_pointer = const void*; + using value_type = void; + template struct rebind { + using other = scalable_allocator; + }; + }; +#endif + +template +inline bool operator==(const scalable_allocator&, const scalable_allocator&) noexcept { return true; } + +#if !__TBB_CPP20_COMPARISONS_PRESENT +template +inline bool operator!=(const scalable_allocator&, const scalable_allocator&) noexcept { return false; } +#endif + +#if __TBB_CPP17_MEMORY_RESOURCE_PRESENT + +//! C++17 memory resource implementation for scalable allocator +//! ISO C++ Section 23.12.2 +class scalable_resource_impl : public std::pmr::memory_resource { +private: + void* do_allocate(std::size_t bytes, std::size_t alignment) override { + void* p = scalable_aligned_malloc(bytes, alignment); + if (!p) { + throw_exception(std::bad_alloc()); + } + return p; + } + + void do_deallocate(void* ptr, std::size_t /*bytes*/, std::size_t /*alignment*/) override { + scalable_free(ptr); + } + + //! Memory allocated by one instance of scalable_resource_impl could be deallocated by any + //! other instance of this class + bool do_is_equal(const std::pmr::memory_resource& other) const noexcept override { + return this == &other || +#if __TBB_USE_OPTIONAL_RTTI + dynamic_cast(&other) != nullptr; +#else + false; +#endif + } +}; + +//! Global scalable allocator memory resource provider +inline std::pmr::memory_resource* scalable_memory_resource() noexcept { + static tbb::detail::d1::scalable_resource_impl scalable_res; + return &scalable_res; +} + +#endif // __TBB_CPP17_MEMORY_RESOURCE_PRESENT + +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::scalable_allocator; +#if __TBB_CPP17_MEMORY_RESOURCE_PRESENT +using detail::d1::scalable_memory_resource; +#endif +} // namespace v1 + +} // namespace tbb + +#endif /* __cplusplus */ + +#endif /* __TBB_scalable_allocator_H */ diff --git a/src/tbb/include/oneapi/tbb/spin_mutex.h b/src/tbb/include/oneapi/tbb/spin_mutex.h new file mode 100644 index 000000000..e38c47c9d --- /dev/null +++ b/src/tbb/include/oneapi/tbb/spin_mutex.h @@ -0,0 +1,134 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_spin_mutex_H +#define __TBB_spin_mutex_H + +#include "detail/_namespace_injection.h" +#include "detail/_mutex_common.h" + +#include "profiling.h" + +#include "detail/_assert.h" +#include "detail/_utils.h" +#include "detail/_scoped_lock.h" + +#include + +namespace tbb { +namespace detail { +namespace d1 { + +#if __TBB_TSX_INTRINSICS_PRESENT +class rtm_mutex; +#endif + +/** A spin_mutex is a low-level synchronization primitive. + While locked, it causes the waiting threads to spin in a loop until the lock is released. + It should be used only for locking short critical sections + (typically less than 20 instructions) when fairness is not an issue. + If zero-initialized, the mutex is considered unheld. + @ingroup synchronization */ +class spin_mutex { +public: + //! Constructors + spin_mutex() noexcept : m_flag(false) { + create_itt_sync(this, "tbb::spin_mutex", ""); + }; + + //! Destructor + ~spin_mutex() = default; + + //! No Copy + spin_mutex(const spin_mutex&) = delete; + spin_mutex& operator=(const spin_mutex&) = delete; + + using scoped_lock = unique_scoped_lock; + + //! Mutex traits + static constexpr bool is_rw_mutex = false; + static constexpr bool is_recursive_mutex = false; + static constexpr bool is_fair_mutex = false; + + //! Acquire lock + /** Spin if the lock is taken */ + void lock() { + atomic_backoff backoff; + call_itt_notify(prepare, this); + while (m_flag.exchange(true)) backoff.pause(); + call_itt_notify(acquired, this); + } + + //! Try acquiring lock (non-blocking) + /** Return true if lock acquired; false otherwise. */ + bool try_lock() { + bool result = !m_flag.exchange(true); + if (result) { + call_itt_notify(acquired, this); + } + return result; + } + + //! Release lock + void unlock() { + call_itt_notify(releasing, this); + m_flag.store(false, std::memory_order_release); + } + +protected: + std::atomic m_flag; +}; // class spin_mutex + +#if TBB_USE_PROFILING_TOOLS +inline void set_name(spin_mutex& obj, const char* name) { + itt_set_sync_name(&obj, name); +} +#if (_WIN32||_WIN64) +inline void set_name(spin_mutex& obj, const wchar_t* name) { + itt_set_sync_name(&obj, name); +} +#endif //WIN +#else +inline void set_name(spin_mutex&, const char*) {} +#if (_WIN32||_WIN64) +inline void set_name(spin_mutex&, const wchar_t*) {} +#endif // WIN +#endif +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::spin_mutex; +} // namespace v1 +namespace profiling { + using detail::d1::set_name; +} +} // namespace tbb + +#include "detail/_rtm_mutex.h" + +namespace tbb { +inline namespace v1 { +#if __TBB_TSX_INTRINSICS_PRESENT + using speculative_spin_mutex = detail::d1::rtm_mutex; +#else + using speculative_spin_mutex = detail::d1::spin_mutex; +#endif +} +} + +#endif /* __TBB_spin_mutex_H */ + diff --git a/src/tbb/include/oneapi/tbb/spin_rw_mutex.h b/src/tbb/include/oneapi/tbb/spin_rw_mutex.h new file mode 100644 index 000000000..3fdae3500 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/spin_rw_mutex.h @@ -0,0 +1,229 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_spin_rw_mutex_H +#define __TBB_spin_rw_mutex_H + +#include "detail/_namespace_injection.h" +#include "detail/_mutex_common.h" + +#include "profiling.h" + +#include "detail/_assert.h" +#include "detail/_utils.h" +#include "detail/_scoped_lock.h" + +#include + +namespace tbb { +namespace detail { +namespace d1 { + +#if __TBB_TSX_INTRINSICS_PRESENT +class rtm_rw_mutex; +#endif + +//! Fast, unfair, spinning reader-writer lock with backoff and writer-preference +/** @ingroup synchronization */ +class spin_rw_mutex { +public: + //! Constructors + spin_rw_mutex() noexcept : m_state(0) { + create_itt_sync(this, "tbb::spin_rw_mutex", ""); + } + + //! Destructor + ~spin_rw_mutex() { + __TBB_ASSERT(!m_state, "destruction of an acquired mutex"); + } + + //! No Copy + spin_rw_mutex(const spin_rw_mutex&) = delete; + spin_rw_mutex& operator=(const spin_rw_mutex&) = delete; + + using scoped_lock = rw_scoped_lock; + + //! Mutex traits + static constexpr bool is_rw_mutex = true; + static constexpr bool is_recursive_mutex = false; + static constexpr bool is_fair_mutex = false; + + //! Acquire lock + void lock() { + call_itt_notify(prepare, this); + for (atomic_backoff backoff; ; backoff.pause()) { + state_type s = m_state.load(std::memory_order_relaxed); + if (!(s & BUSY)) { // no readers, no writers + if (m_state.compare_exchange_strong(s, WRITER)) + break; // successfully stored writer flag + backoff.reset(); // we could be very close to complete op. + } else if (!(s & WRITER_PENDING)) { // no pending writers + m_state |= WRITER_PENDING; + } + } + call_itt_notify(acquired, this); + } + + //! Try acquiring lock (non-blocking) + /** Return true if lock acquired; false otherwise. */ + bool try_lock() { + // for a writer: only possible to acquire if no active readers or writers + state_type s = m_state.load(std::memory_order_relaxed); + if (!(s & BUSY)) { // no readers, no writers; mask is 1..1101 + if (m_state.compare_exchange_strong(s, WRITER)) { + call_itt_notify(acquired, this); + return true; // successfully stored writer flag + } + } + return false; + } + + //! Release lock + void unlock() { + call_itt_notify(releasing, this); + m_state &= READERS; + } + + //! Lock shared ownership mutex + void lock_shared() { + call_itt_notify(prepare, this); + for (atomic_backoff b; ; b.pause()) { + state_type s = m_state.load(std::memory_order_relaxed); + if (!(s & (WRITER | WRITER_PENDING))) { // no writer or write requests + state_type prev_state = m_state.fetch_add(ONE_READER); + if (!(prev_state & WRITER)) { + break; // successfully stored increased number of readers + } + // writer got there first, undo the increment + m_state -= ONE_READER; + } + } + call_itt_notify(acquired, this); + __TBB_ASSERT(m_state & READERS, "invalid state of a read lock: no readers"); + } + + //! Try lock shared ownership mutex + bool try_lock_shared() { + // for a reader: acquire if no active or waiting writers + state_type s = m_state.load(std::memory_order_relaxed); + if (!(s & (WRITER | WRITER_PENDING))) { // no writers + state_type prev_state = m_state.fetch_add(ONE_READER); + if (!(prev_state & WRITER)) { // got the lock + call_itt_notify(acquired, this); + return true; // successfully stored increased number of readers + } + // writer got there first, undo the increment + m_state -= ONE_READER; + } + return false; + } + + //! Unlock shared ownership mutex + void unlock_shared() { + __TBB_ASSERT(m_state & READERS, "invalid state of a read lock: no readers"); + call_itt_notify(releasing, this); + m_state -= ONE_READER; + } + +protected: + /** Internal non ISO C++ standard API **/ + //! This API is used through the scoped_lock class + + //! Upgrade reader to become a writer. + /** Returns whether the upgrade happened without releasing and re-acquiring the lock */ + bool upgrade() { + state_type s = m_state.load(std::memory_order_relaxed); + __TBB_ASSERT(s & READERS, "invalid state before upgrade: no readers "); + // Check and set writer-pending flag. + // Required conditions: either no pending writers, or we are the only reader + // (with multiple readers and pending writer, another upgrade could have been requested) + while ((s & READERS) == ONE_READER || !(s & WRITER_PENDING)) { + if (m_state.compare_exchange_strong(s, s | WRITER | WRITER_PENDING)) { + atomic_backoff backoff; + while ((m_state.load(std::memory_order_relaxed) & READERS) != ONE_READER) backoff.pause(); + __TBB_ASSERT((m_state & (WRITER_PENDING|WRITER)) == (WRITER_PENDING | WRITER), "invalid state when upgrading to writer"); + // Both new readers and writers are blocked at this time + m_state -= (ONE_READER + WRITER_PENDING); + return true; // successfully upgraded + } + } + // Slow reacquire + unlock_shared(); + lock(); + return false; + } + + //! Downgrade writer to a reader + void downgrade() { + call_itt_notify(releasing, this); + m_state += (ONE_READER - WRITER); + __TBB_ASSERT(m_state & READERS, "invalid state after downgrade: no readers"); + } + + using state_type = std::intptr_t; + static constexpr state_type WRITER = 1; + static constexpr state_type WRITER_PENDING = 2; + static constexpr state_type READERS = ~(WRITER | WRITER_PENDING); + static constexpr state_type ONE_READER = 4; + static constexpr state_type BUSY = WRITER | READERS; + friend scoped_lock; + //! State of lock + /** Bit 0 = writer is holding lock + Bit 1 = request by a writer to acquire lock (hint to readers to wait) + Bit 2..N = number of readers holding lock */ + std::atomic m_state; +}; // class spin_rw_mutex + +#if TBB_USE_PROFILING_TOOLS +inline void set_name(spin_rw_mutex& obj, const char* name) { + itt_set_sync_name(&obj, name); +} +#if (_WIN32||_WIN64) +inline void set_name(spin_rw_mutex& obj, const wchar_t* name) { + itt_set_sync_name(&obj, name); +} +#endif // WIN +#else +inline void set_name(spin_rw_mutex&, const char*) {} +#if (_WIN32||_WIN64) +inline void set_name(spin_rw_mutex&, const wchar_t*) {} +#endif // WIN +#endif +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::spin_rw_mutex; +} // namespace v1 +namespace profiling { + using detail::d1::set_name; +} +} // namespace tbb + +#include "detail/_rtm_rw_mutex.h" + +namespace tbb { +inline namespace v1 { +#if __TBB_TSX_INTRINSICS_PRESENT + using speculative_spin_rw_mutex = detail::d1::rtm_rw_mutex; +#else + using speculative_spin_rw_mutex = detail::d1::spin_rw_mutex; +#endif +} +} + +#endif /* __TBB_spin_rw_mutex_H */ + diff --git a/src/tbb/include/oneapi/tbb/task.h b/src/tbb/include/oneapi/tbb/task.h new file mode 100644 index 000000000..82ce1df6c --- /dev/null +++ b/src/tbb/include/oneapi/tbb/task.h @@ -0,0 +1,37 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_task_H +#define __TBB_task_H + +#include "detail/_config.h" +#include "detail/_namespace_injection.h" +#include "detail/_task.h" + +namespace tbb { +inline namespace v1 { +namespace task { +#if __TBB_RESUMABLE_TASKS + using detail::d1::suspend_point; + using detail::d1::resume; + using detail::d1::suspend; +#endif /* __TBB_RESUMABLE_TASKS */ + using detail::d1::current_context; +} // namespace task +} // namespace v1 +} // namespace tbb + +#endif /* __TBB_task_H */ diff --git a/src/tbb/include/oneapi/tbb/task_arena.h b/src/tbb/include/oneapi/tbb/task_arena.h new file mode 100644 index 000000000..5ce41d99c --- /dev/null +++ b/src/tbb/include/oneapi/tbb/task_arena.h @@ -0,0 +1,499 @@ +/* + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_task_arena_H +#define __TBB_task_arena_H + +#include "detail/_config.h" + +#include "detail/_aligned_space.h" +#include "detail/_attach.h" +#include "detail/_exception.h" +#include "detail/_namespace_injection.h" +#include "detail/_small_object_pool.h" +#include "detail/_task.h" + +#include "detail/_task_handle.h" + +#if __TBB_ARENA_BINDING +#include "info.h" +#endif /*__TBB_ARENA_BINDING*/ + +namespace tbb { +namespace detail { + +namespace d1 { + +template +class task_arena_function : public delegate_base { + F &my_func; + aligned_space my_return_storage; + bool my_constructed{false}; + // The function should be called only once. + bool operator()() const override { + new (my_return_storage.begin()) R(my_func()); + return true; + } +public: + task_arena_function(F& f) : my_func(f) {} + // The function can be called only after operator() and only once. + R consume_result() { + my_constructed = true; + return std::move(*(my_return_storage.begin())); + } + ~task_arena_function() override { + if (my_constructed) { + my_return_storage.begin()->~R(); + } + } +}; + +template +class task_arena_function : public delegate_base { + F &my_func; + bool operator()() const override { + my_func(); + return true; + } +public: + task_arena_function(F& f) : my_func(f) {} + void consume_result() const {} + + friend class task_arena_base; +}; + +class task_arena_base; +class task_scheduler_observer; +} // namespace d1 + +namespace r1 { +class arena; +struct task_arena_impl; + +TBB_EXPORT void __TBB_EXPORTED_FUNC observe(d1::task_scheduler_observer&, bool); +TBB_EXPORT void __TBB_EXPORTED_FUNC initialize(d1::task_arena_base&); +TBB_EXPORT void __TBB_EXPORTED_FUNC terminate(d1::task_arena_base&); +TBB_EXPORT bool __TBB_EXPORTED_FUNC attach(d1::task_arena_base&); +TBB_EXPORT void __TBB_EXPORTED_FUNC execute(d1::task_arena_base&, d1::delegate_base&); +TBB_EXPORT void __TBB_EXPORTED_FUNC wait(d1::task_arena_base&); +TBB_EXPORT int __TBB_EXPORTED_FUNC max_concurrency(const d1::task_arena_base*); +TBB_EXPORT void __TBB_EXPORTED_FUNC isolate_within_arena(d1::delegate_base& d, std::intptr_t); + +TBB_EXPORT void __TBB_EXPORTED_FUNC enqueue(d1::task&, d1::task_arena_base*); +TBB_EXPORT void __TBB_EXPORTED_FUNC enqueue(d1::task&, d1::task_group_context&, d1::task_arena_base*); +TBB_EXPORT void __TBB_EXPORTED_FUNC submit(d1::task&, d1::task_group_context&, arena*, std::uintptr_t); +} // namespace r1 + +namespace d2 { +inline void enqueue_impl(task_handle&& th, d1::task_arena_base* ta) { + __TBB_ASSERT(th != nullptr, "Attempt to schedule empty task_handle"); + + auto& ctx = task_handle_accessor::ctx_of(th); + + // Do not access th after release + r1::enqueue(*task_handle_accessor::release(th), ctx, ta); +} +} //namespace d2 + +namespace d1 { + +static constexpr unsigned num_priority_levels = 3; +static constexpr int priority_stride = INT_MAX / (num_priority_levels + 1); + +class task_arena_base { + friend struct r1::task_arena_impl; + friend void r1::observe(d1::task_scheduler_observer&, bool); +public: + enum class priority : int { + low = 1 * priority_stride, + normal = 2 * priority_stride, + high = 3 * priority_stride + }; +#if __TBB_ARENA_BINDING + using constraints = tbb::detail::d1::constraints; +#endif /*__TBB_ARENA_BINDING*/ +protected: + //! Special settings + intptr_t my_version_and_traits; + + std::atomic my_initialization_state; + + //! nullptr if not currently initialized. + std::atomic my_arena; + static_assert(sizeof(std::atomic) == sizeof(r1::arena*), + "To preserve backward compatibility we need the equal size of an atomic pointer and a pointer"); + + //! Concurrency level for deferred initialization + int my_max_concurrency; + + //! Reserved slots for external threads + unsigned my_num_reserved_slots; + + //! Arena priority + priority my_priority; + + //! The NUMA node index to which the arena will be attached + numa_node_id my_numa_id; + + //! The core type index to which arena will be attached + core_type_id my_core_type; + + //! Number of threads per core + int my_max_threads_per_core; + + // Backward compatibility checks. + core_type_id core_type() const { + return (my_version_and_traits & core_type_support_flag) == core_type_support_flag ? my_core_type : automatic; + } + int max_threads_per_core() const { + return (my_version_and_traits & core_type_support_flag) == core_type_support_flag ? my_max_threads_per_core : automatic; + } + + enum { + default_flags = 0 + , core_type_support_flag = 1 + }; + + task_arena_base(int max_concurrency, unsigned reserved_for_masters, priority a_priority) + : my_version_and_traits(default_flags | core_type_support_flag) + , my_initialization_state(do_once_state::uninitialized) + , my_arena(nullptr) + , my_max_concurrency(max_concurrency) + , my_num_reserved_slots(reserved_for_masters) + , my_priority(a_priority) + , my_numa_id(automatic) + , my_core_type(automatic) + , my_max_threads_per_core(automatic) + {} + +#if __TBB_ARENA_BINDING + task_arena_base(const constraints& constraints_, unsigned reserved_for_masters, priority a_priority) + : my_version_and_traits(default_flags | core_type_support_flag) + , my_initialization_state(do_once_state::uninitialized) + , my_arena(nullptr) + , my_max_concurrency(constraints_.max_concurrency) + , my_num_reserved_slots(reserved_for_masters) + , my_priority(a_priority) + , my_numa_id(constraints_.numa_id) + , my_core_type(constraints_.core_type) + , my_max_threads_per_core(constraints_.max_threads_per_core) + {} +#endif /*__TBB_ARENA_BINDING*/ +public: + //! Typedef for number of threads that is automatic. + static const int automatic = -1; + static const int not_initialized = -2; +}; + +template +R isolate_impl(F& f) { + task_arena_function func(f); + r1::isolate_within_arena(func, /*isolation*/ 0); + return func.consume_result(); +} + +template +class enqueue_task : public task { + small_object_allocator m_allocator; + const F m_func; + + void finalize(const execution_data& ed) { + m_allocator.delete_object(this, ed); + } + task* execute(execution_data& ed) override { + m_func(); + finalize(ed); + return nullptr; + } + task* cancel(execution_data&) override { + __TBB_ASSERT_RELEASE(false, "Unhandled exception from enqueue task is caught"); + return nullptr; + } +public: + enqueue_task(const F& f, small_object_allocator& alloc) : m_allocator(alloc), m_func(f) {} + enqueue_task(F&& f, small_object_allocator& alloc) : m_allocator(alloc), m_func(std::move(f)) {} +}; + +template +void enqueue_impl(F&& f, task_arena_base* ta) { + small_object_allocator alloc{}; + r1::enqueue(*alloc.new_object::type>>(std::forward(f), alloc), ta); +} +/** 1-to-1 proxy representation class of scheduler's arena + * Constructors set up settings only, real construction is deferred till the first method invocation + * Destructor only removes one of the references to the inner arena representation. + * Final destruction happens when all the references (and the work) are gone. + */ +class task_arena : public task_arena_base { + + void mark_initialized() { + __TBB_ASSERT( my_arena.load(std::memory_order_relaxed), "task_arena initialization is incomplete" ); + my_initialization_state.store(do_once_state::initialized, std::memory_order_release); + } + + template + R execute_impl(F& f) { + initialize(); + task_arena_function func(f); + r1::execute(*this, func); + return func.consume_result(); + } +public: + //! Creates task_arena with certain concurrency limits + /** Sets up settings only, real construction is deferred till the first method invocation + * @arg max_concurrency specifies total number of slots in arena where threads work + * @arg reserved_for_masters specifies number of slots to be used by external threads only. + * Value of 1 is default and reflects behavior of implicit arenas. + **/ + task_arena(int max_concurrency_ = automatic, unsigned reserved_for_masters = 1, + priority a_priority = priority::normal) + : task_arena_base(max_concurrency_, reserved_for_masters, a_priority) + {} + +#if __TBB_ARENA_BINDING + //! Creates task arena pinned to certain NUMA node + task_arena(const constraints& constraints_, unsigned reserved_for_masters = 1, + priority a_priority = priority::normal) + : task_arena_base(constraints_, reserved_for_masters, a_priority) + {} + + //! Copies settings from another task_arena + task_arena(const task_arena &s) // copy settings but not the reference or instance + : task_arena_base( + constraints{} + .set_numa_id(s.my_numa_id) + .set_max_concurrency(s.my_max_concurrency) + .set_core_type(s.my_core_type) + .set_max_threads_per_core(s.my_max_threads_per_core) + , s.my_num_reserved_slots, s.my_priority) + {} +#else + //! Copies settings from another task_arena + task_arena(const task_arena& a) // copy settings but not the reference or instance + : task_arena_base(a.my_max_concurrency, a.my_num_reserved_slots, a.my_priority) + {} +#endif /*__TBB_ARENA_BINDING*/ + + //! Tag class used to indicate the "attaching" constructor + struct attach {}; + + //! Creates an instance of task_arena attached to the current arena of the thread + explicit task_arena( attach ) + : task_arena_base(automatic, 1, priority::normal) // use default settings if attach fails + { + if (r1::attach(*this)) { + mark_initialized(); + } + } + + //! Creates an instance of task_arena attached to the current arena of the thread + explicit task_arena(d1::attach) + : task_arena(attach{}) + {} + + //! Forces allocation of the resources for the task_arena as specified in constructor arguments + void initialize() { + atomic_do_once([this]{ r1::initialize(*this); }, my_initialization_state); + } + + //! Overrides concurrency level and forces initialization of internal representation + void initialize(int max_concurrency_, unsigned reserved_for_masters = 1, + priority a_priority = priority::normal) + { + __TBB_ASSERT(!my_arena.load(std::memory_order_relaxed), "Impossible to modify settings of an already initialized task_arena"); + if( !is_active() ) { + my_max_concurrency = max_concurrency_; + my_num_reserved_slots = reserved_for_masters; + my_priority = a_priority; + r1::initialize(*this); + mark_initialized(); + } + } + +#if __TBB_ARENA_BINDING + void initialize(constraints constraints_, unsigned reserved_for_masters = 1, + priority a_priority = priority::normal) + { + __TBB_ASSERT(!my_arena.load(std::memory_order_relaxed), "Impossible to modify settings of an already initialized task_arena"); + if( !is_active() ) { + my_numa_id = constraints_.numa_id; + my_max_concurrency = constraints_.max_concurrency; + my_core_type = constraints_.core_type; + my_max_threads_per_core = constraints_.max_threads_per_core; + my_num_reserved_slots = reserved_for_masters; + my_priority = a_priority; + r1::initialize(*this); + mark_initialized(); + } + } +#endif /*__TBB_ARENA_BINDING*/ + + //! Attaches this instance to the current arena of the thread + void initialize(attach) { + // TODO: decide if this call must be thread-safe + __TBB_ASSERT(!my_arena.load(std::memory_order_relaxed), "Impossible to modify settings of an already initialized task_arena"); + if( !is_active() ) { + if ( !r1::attach(*this) ) { + r1::initialize(*this); + } + mark_initialized(); + } + } + + //! Attaches this instance to the current arena of the thread + void initialize(d1::attach) { + initialize(attach{}); + } + + //! Removes the reference to the internal arena representation. + //! Not thread safe wrt concurrent invocations of other methods. + void terminate() { + if( is_active() ) { + r1::terminate(*this); + my_initialization_state.store(do_once_state::uninitialized, std::memory_order_relaxed); + } + } + + //! Removes the reference to the internal arena representation, and destroys the external object. + //! Not thread safe wrt concurrent invocations of other methods. + ~task_arena() { + terminate(); + } + + //! Returns true if the arena is active (initialized); false otherwise. + //! The name was chosen to match a task_scheduler_init method with the same semantics. + bool is_active() const { + return my_initialization_state.load(std::memory_order_acquire) == do_once_state::initialized; + } + + //! Enqueues a task into the arena to process a functor, and immediately returns. + //! Does not require the calling thread to join the arena + + template + void enqueue(F&& f) { + initialize(); + enqueue_impl(std::forward(f), this); + } + + //! Enqueues a task into the arena to process a functor wrapped in task_handle, and immediately returns. + //! Does not require the calling thread to join the arena + void enqueue(d2::task_handle&& th) { + initialize(); + d2::enqueue_impl(std::move(th), this); + } + + //! Joins the arena and executes a mutable functor, then returns + //! If not possible to join, wraps the functor into a task, enqueues it and waits for task completion + //! Can decrement the arena demand for workers, causing a worker to leave and free a slot to the calling thread + //! Since C++11, the method returns the value returned by functor (prior to C++11 it returns void). + template + auto execute(F&& f) -> decltype(f()) { + return execute_impl(f); + } + +#if __TBB_EXTRA_DEBUG + //! Returns my_num_reserved_slots + int debug_reserved_slots() const { + // Handle special cases inside the library + return my_num_reserved_slots; + } + + //! Returns my_max_concurrency + int debug_max_concurrency() const { + // Handle special cases inside the library + return my_max_concurrency; + } + + //! Wait for all work in the arena to be completed + //! Even submitted by other application threads + //! Joins arena if/when possible (in the same way as execute()) + void debug_wait_until_empty() { + initialize(); + r1::wait(*this); + } +#endif //__TBB_EXTRA_DEBUG + + //! Returns the maximal number of threads that can work inside the arena + int max_concurrency() const { + // Handle special cases inside the library + return (my_max_concurrency > 1) ? my_max_concurrency : r1::max_concurrency(this); + } + + friend void submit(task& t, task_arena& ta, task_group_context& ctx, bool as_critical) { + __TBB_ASSERT(ta.is_active(), nullptr); + call_itt_task_notify(releasing, &t); + r1::submit(t, ctx, ta.my_arena.load(std::memory_order_relaxed), as_critical ? 1 : 0); + } +}; + +//! Executes a mutable functor in isolation within the current task arena. +//! Since C++11, the method returns the value returned by functor (prior to C++11 it returns void). +template +inline auto isolate(F&& f) -> decltype(f()) { + return isolate_impl(f); +} + +//! Returns the index, aka slot number, of the calling thread in its current arena +inline int current_thread_index() { + slot_id idx = r1::execution_slot(nullptr); + return idx == slot_id(-1) ? task_arena_base::not_initialized : int(idx); +} + +#if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS +inline bool is_inside_task() { + return nullptr != current_context(); +} +#endif //__TBB_PREVIEW_TASK_GROUP_EXTENSIONS + +//! Returns the maximal number of threads that can work inside the arena +inline int max_concurrency() { + return r1::max_concurrency(nullptr); +} + +inline void enqueue(d2::task_handle&& th) { + d2::enqueue_impl(std::move(th), nullptr); +} + +template +inline void enqueue(F&& f) { + enqueue_impl(std::forward(f), nullptr); +} + +using r1::submit; + +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::task_arena; +using detail::d1::attach; + +#if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS +using detail::d1::is_inside_task; +#endif + +namespace this_task_arena { +using detail::d1::current_thread_index; +using detail::d1::max_concurrency; +using detail::d1::isolate; + +using detail::d1::enqueue; +} // namespace this_task_arena + +} // inline namespace v1 + +} // namespace tbb +#endif /* __TBB_task_arena_H */ diff --git a/src/tbb/include/oneapi/tbb/task_group.h b/src/tbb/include/oneapi/tbb/task_group.h new file mode 100644 index 000000000..09c0adaa1 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/task_group.h @@ -0,0 +1,712 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_task_group_H +#define __TBB_task_group_H + +#include "detail/_config.h" +#include "detail/_namespace_injection.h" +#include "detail/_assert.h" +#include "detail/_utils.h" +#include "detail/_template_helpers.h" +#include "detail/_exception.h" +#include "detail/_task.h" +#include "detail/_small_object_pool.h" +#include "detail/_intrusive_list_node.h" +#include "detail/_task_handle.h" + +#include "profiling.h" + +#include + +#if _MSC_VER && !defined(__INTEL_COMPILER) + // Suppress warning: structure was padded due to alignment specifier + // #pragma warning(push) + // #pragma warning(disable:4324) +#endif + +namespace tbb { +namespace detail { + +namespace d1 { +class delegate_base; +class task_arena_base; +class task_group_context; +} + +namespace r1 { +// Forward declarations +class tbb_exception_ptr; +class cancellation_disseminator; +class thread_data; +class task_dispatcher; +template +class context_guard_helper; +struct task_arena_impl; +class context_list; + +TBB_EXPORT void __TBB_EXPORTED_FUNC execute(d1::task_arena_base&, d1::delegate_base&); +TBB_EXPORT void __TBB_EXPORTED_FUNC isolate_within_arena(d1::delegate_base&, std::intptr_t); + +TBB_EXPORT void __TBB_EXPORTED_FUNC initialize(d1::task_group_context&); +TBB_EXPORT void __TBB_EXPORTED_FUNC destroy(d1::task_group_context&); +TBB_EXPORT void __TBB_EXPORTED_FUNC reset(d1::task_group_context&); +TBB_EXPORT bool __TBB_EXPORTED_FUNC cancel_group_execution(d1::task_group_context&); +TBB_EXPORT bool __TBB_EXPORTED_FUNC is_group_execution_cancelled(d1::task_group_context&); +TBB_EXPORT void __TBB_EXPORTED_FUNC capture_fp_settings(d1::task_group_context&); + +struct task_group_context_impl; +} + +namespace d2 { + +namespace { +template +d1::task* task_ptr_or_nullptr(F&& f); +} + +template +class function_task : public task_handle_task { + //TODO: apply empty base optimization here + const F m_func; + +private: + d1::task* execute(d1::execution_data& ed) override { + __TBB_ASSERT(ed.context == &this->ctx(), "The task group context should be used for all tasks"); + task* res = task_ptr_or_nullptr(m_func); + finalize(&ed); + return res; + } + d1::task* cancel(d1::execution_data& ed) override { + finalize(&ed); + return nullptr; + } +public: + template + function_task(FF&& f, d1::wait_tree_vertex_interface* vertex, d1::task_group_context& ctx, d1::small_object_allocator& alloc) + : task_handle_task{vertex, ctx, alloc}, + m_func(std::forward(f)) {} +}; + +#if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS +namespace { + template + d1::task* task_ptr_or_nullptr_impl(std::false_type, F&& f){ + task_handle th = std::forward(f)(); + return task_handle_accessor::release(th); + } + + template + d1::task* task_ptr_or_nullptr_impl(std::true_type, F&& f){ + std::forward(f)(); + return nullptr; + } + + template + d1::task* task_ptr_or_nullptr(F&& f){ + using is_void_t = std::is_void< + decltype(std::forward(f)()) + >; + + return task_ptr_or_nullptr_impl(is_void_t{}, std::forward(f)); + } +} +#else +namespace { + template + d1::task* task_ptr_or_nullptr(F&& f){ + std::forward(f)(); + return nullptr; + } +} // namespace +#endif // __TBB_PREVIEW_TASK_GROUP_EXTENSIONS +} // namespace d2 + +namespace d1 { + +// This structure is left here for backward compatibility check +struct context_list_node { + std::atomic prev{}; + std::atomic next{}; +}; + +//! Used to form groups of tasks +/** @ingroup task_scheduling + The context services explicit cancellation requests from user code, and unhandled + exceptions intercepted during tasks execution. Intercepting an exception results + in generating internal cancellation requests (which is processed in exactly the + same way as external ones). + + The context is associated with one or more root tasks and defines the cancellation + group that includes all the descendants of the corresponding root task(s). Association + is established when a context object is passed as an argument to the task::allocate_root() + method. See task_group_context::task_group_context for more details. + + The context can be bound to another one, and other contexts can be bound to it, + forming a tree-like structure: parent -> this -> children. Arrows here designate + cancellation propagation direction. If a task in a cancellation group is cancelled + all the other tasks in this group and groups bound to it (as children) get cancelled too. +**/ +class task_group_context : no_copy { +public: + enum traits_type { + fp_settings = 1 << 1, + concurrent_wait = 1 << 2, + default_traits = 0 + }; + enum kind_type { + isolated, + bound + }; +private: + //! Space for platform-specific FPU settings. + /** Must only be accessed inside TBB binaries, and never directly in user + code or inline methods. */ + std::uint64_t my_cpu_ctl_env; + + //! Specifies whether cancellation was requested for this task group. + std::atomic my_cancellation_requested; + + //! Versioning for run-time checks and behavioral traits of the context. + enum class task_group_context_version : std::uint8_t { + unused = 1 // ensure that new versions, if any, will not clash with previously used ones + }; + task_group_context_version my_version; + + //! The context traits. + struct context_traits { + bool fp_settings : 1; + bool concurrent_wait : 1; + bool bound : 1; + bool reserved1 : 1; + bool reserved2 : 1; + bool reserved3 : 1; + bool reserved4 : 1; + bool reserved5 : 1; + } my_traits; + + static_assert(sizeof(context_traits) == 1, "Traits shall fit into one byte."); + + static constexpr std::uint8_t may_have_children = 1; + //! The context internal state (currently only may_have_children). + std::atomic my_may_have_children; + + enum class state : std::uint8_t { + created, + locked, + isolated, + bound, + dead, + proxy = std::uint8_t(-1) //the context is not the real one, but proxy to other one + }; + + //! The synchronization machine state to manage lifetime. + std::atomic my_state; + + union { + //! Pointer to the context of the parent cancellation group. nullptr for isolated contexts. + task_group_context* my_parent; + + //! Pointer to the actual context 'this' context represents a proxy of. + task_group_context* my_actual_context; + }; + + //! Thread data instance that registered this context in its list. + r1::context_list* my_context_list; + static_assert(sizeof(std::atomic) == sizeof(r1::context_list*), "To preserve backward compatibility these types should have the same size"); + + //! Used to form the thread specific list of contexts without additional memory allocation. + /** A context is included into the list of the current thread when its binding to + its parent happens. Any context can be present in the list of one thread only. **/ + intrusive_list_node my_node; + static_assert(sizeof(intrusive_list_node) == sizeof(context_list_node), "To preserve backward compatibility these types should have the same size"); + + //! Pointer to the container storing exception being propagated across this task group. + std::atomic my_exception; + static_assert(sizeof(std::atomic) == sizeof(r1::tbb_exception_ptr*), + "backward compatibility check"); + + //! Used to set and maintain stack stitching point for Intel Performance Tools. + void* my_itt_caller; + + //! Description of algorithm for scheduler based instrumentation. + string_resource_index my_name; + + char padding[max_nfs_size + - sizeof(std::uint64_t) // my_cpu_ctl_env + - sizeof(std::atomic) // my_cancellation_requested + - sizeof(std::uint8_t) // my_version + - sizeof(context_traits) // my_traits + - sizeof(std::atomic) // my_state + - sizeof(std::atomic) // my_state + - sizeof(task_group_context*) // my_parent + - sizeof(r1::context_list*) // my_context_list + - sizeof(intrusive_list_node) // my_node + - sizeof(std::atomic) // my_exception + - sizeof(void*) // my_itt_caller + - sizeof(string_resource_index) // my_name + ]; + + task_group_context(context_traits t, string_resource_index name) + : my_version{task_group_context_version::unused}, my_name{name} + { + my_traits = t; // GCC4.8 issues warning list initialization for bitset (missing-field-initializers) + r1::initialize(*this); + } + + task_group_context(task_group_context* actual_context) + : my_version{task_group_context_version::unused} + , my_state{state::proxy} + , my_actual_context{actual_context} + { + __TBB_ASSERT(my_actual_context, "Passed pointer value points to nothing."); + my_name = actual_context->my_name; + + // no need to initialize 'this' context as it acts as a proxy for my_actual_context, which + // initialization is a user-side responsibility. + } + + static context_traits make_traits(kind_type relation_with_parent, std::uintptr_t user_traits) { + context_traits ct; + ct.fp_settings = (user_traits & fp_settings) == fp_settings; + ct.concurrent_wait = (user_traits & concurrent_wait) == concurrent_wait; + ct.bound = relation_with_parent == bound; + ct.reserved1 = ct.reserved2 = ct.reserved3 = ct.reserved4 = ct.reserved5 = false; + return ct; + } + + bool is_proxy() const { + return my_state.load(std::memory_order_relaxed) == state::proxy; + } + + task_group_context& actual_context() noexcept { + if (is_proxy()) { + __TBB_ASSERT(my_actual_context, "Actual task_group_context is not set."); + return *my_actual_context; + } + return *this; + } + + const task_group_context& actual_context() const noexcept { + if (is_proxy()) { + __TBB_ASSERT(my_actual_context, "Actual task_group_context is not set."); + return *my_actual_context; + } + return *this; + } + +public: + //! Default & binding constructor. + /** By default a bound context is created. That is this context will be bound + (as child) to the context of the currently executing task . Cancellation + requests passed to the parent context are propagated to all the contexts + bound to it. Similarly priority change is propagated from the parent context + to its children. + + If task_group_context::isolated is used as the argument, then the tasks associated + with this context will never be affected by events in any other context. + + Creating isolated contexts involve much less overhead, but they have limited + utility. Normally when an exception occurs in an algorithm that has nested + ones running, it is desirably to have all the nested algorithms cancelled + as well. Such a behavior requires nested algorithms to use bound contexts. + + There is one good place where using isolated algorithms is beneficial. It is + an external thread. That is if a particular algorithm is invoked directly from + the external thread (not from a TBB task), supplying it with explicitly + created isolated context will result in a faster algorithm startup. + + VERSIONING NOTE: + Implementation(s) of task_group_context constructor(s) cannot be made + entirely out-of-line because the run-time version must be set by the user + code. This will become critically important for binary compatibility, if + we ever have to change the size of the context object. **/ + + task_group_context(kind_type relation_with_parent = bound, + std::uintptr_t t = default_traits) + : task_group_context(make_traits(relation_with_parent, t), CUSTOM_CTX) {} + + // Custom constructor for instrumentation of oneTBB algorithm + task_group_context(string_resource_index name ) + : task_group_context(make_traits(bound, default_traits), name) {} + + // Do not introduce any logic on user side since it might break state propagation assumptions + ~task_group_context() { + // When 'this' serves as a proxy, the initialization does not happen - nor should the + // destruction. + if (!is_proxy()) + { + r1::destroy(*this); + } + } + + //! Forcefully reinitializes the context after the task tree it was associated with is completed. + /** Because the method assumes that all the tasks that used to be associated with + this context have already finished, calling it while the context is still + in use somewhere in the task hierarchy leads to undefined behavior. + + IMPORTANT: This method is not thread safe! + + The method does not change the context's parent if it is set. **/ + void reset() { + r1::reset(actual_context()); + } + + //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups. + /** \return false if cancellation has already been requested, true otherwise. + + Note that canceling never fails. When false is returned, it just means that + another thread (or this one) has already sent cancellation request to this + context or to one of its ancestors (if this context is bound). It is guaranteed + that when this method is concurrently called on the same not yet cancelled + context, true will be returned by one and only one invocation. **/ + bool cancel_group_execution() { + return r1::cancel_group_execution(actual_context()); + } + + //! Returns true if the context received cancellation request. + bool is_group_execution_cancelled() { + return r1::is_group_execution_cancelled(actual_context()); + } + +#if __TBB_FP_CONTEXT + //! Captures the current FPU control settings to the context. + /** Because the method assumes that all the tasks that used to be associated with + this context have already finished, calling it while the context is still + in use somewhere in the task hierarchy leads to undefined behavior. + + IMPORTANT: This method is not thread safe! + + The method does not change the FPU control settings of the context's parent. **/ + void capture_fp_settings() { + r1::capture_fp_settings(actual_context()); + } +#endif + + //! Returns the user visible context trait + std::uintptr_t traits() const { + std::uintptr_t t{}; + const task_group_context& ctx = actual_context(); + t |= ctx.my_traits.fp_settings ? fp_settings : 0; + t |= ctx.my_traits.concurrent_wait ? concurrent_wait : 0; + return t; + } +private: + //// TODO: cleanup friends + friend class r1::cancellation_disseminator; + friend class r1::thread_data; + friend class r1::task_dispatcher; + template + friend class r1::context_guard_helper; + friend struct r1::task_arena_impl; + friend struct r1::task_group_context_impl; + friend class d2::task_group_base; +}; // class task_group_context + +static_assert(sizeof(task_group_context) == 128, "Wrong size of task_group_context"); + +inline bool is_current_task_group_canceling() { + task_group_context* ctx = current_context(); + return ctx ? ctx->is_group_execution_cancelled() : false; +} + +} // namespace d1 + +namespace d2 { + +enum task_group_status { + not_complete, + complete, + canceled +}; + +class task_group; +class structured_task_group; +#if TBB_PREVIEW_ISOLATED_TASK_GROUP +class isolated_task_group; +#endif + +template +class function_stack_task : public d1::task { + const F& m_func; + d1::wait_tree_vertex_interface* m_wait_tree_vertex; + + void finalize() { + m_wait_tree_vertex->release(); + } + task* execute(d1::execution_data&) override { + task* res = d2::task_ptr_or_nullptr(m_func); + finalize(); + return res; + } + task* cancel(d1::execution_data&) override { + finalize(); + return nullptr; + } +public: + function_stack_task(const F& f, d1::wait_tree_vertex_interface* vertex) : m_func(f), m_wait_tree_vertex(vertex) { + m_wait_tree_vertex->reserve(); + } +}; + +class task_group_base : no_copy { +protected: + d1::wait_context_vertex m_wait_vertex; + d1::task_group_context m_context; + + template + task_group_status internal_run_and_wait(const F& f) { + function_stack_task t{ f, r1::get_thread_reference_vertex(&m_wait_vertex) }; + + bool cancellation_status = false; + try_call([&] { + execute_and_wait(t, context(), m_wait_vertex.get_context(), context()); + }).on_completion([&] { + // TODO: the reset method is not thread-safe. Ensure the correct behavior. + cancellation_status = context().is_group_execution_cancelled(); + context().reset(); + }); + return cancellation_status ? canceled : complete; + } + + task_group_status internal_run_and_wait(d2::task_handle&& h) { + __TBB_ASSERT(h != nullptr, "Attempt to schedule empty task_handle"); + + using acs = d2::task_handle_accessor; + __TBB_ASSERT(&acs::ctx_of(h) == &context(), "Attempt to schedule task_handle into different task_group"); + + bool cancellation_status = false; + try_call([&] { + execute_and_wait(*acs::release(h), context(), m_wait_vertex.get_context(), context()); + }).on_completion([&] { + // TODO: the reset method is not thread-safe. Ensure the correct behavior. + cancellation_status = context().is_group_execution_cancelled(); + context().reset(); + }); + return cancellation_status ? canceled : complete; + } + + template + d1::task* prepare_task(F&& f) { + d1::small_object_allocator alloc{}; + return alloc.new_object::type>>(std::forward(f), + r1::get_thread_reference_vertex(&m_wait_vertex), context(), alloc); + } + + d1::task_group_context& context() noexcept { + return m_context.actual_context(); + } + + template + d2::task_handle prepare_task_handle(F&& f) { + d1::small_object_allocator alloc{}; + using function_task_t = d2::function_task::type>; + d2::task_handle_task* function_task_p = alloc.new_object(std::forward(f), + r1::get_thread_reference_vertex(&m_wait_vertex), context(), alloc); + + return d2::task_handle_accessor::construct(function_task_p); + } + +public: + task_group_base(uintptr_t traits = 0) + : m_wait_vertex(0) + , m_context(d1::task_group_context::bound, d1::task_group_context::default_traits | traits) + {} + + task_group_base(d1::task_group_context& ctx) + : m_wait_vertex(0) + , m_context(&ctx) + {} + + ~task_group_base() noexcept(false) { + if (m_wait_vertex.continue_execution()) { +#if __TBB_CPP17_UNCAUGHT_EXCEPTIONS_PRESENT + bool stack_unwinding_in_progress = std::uncaught_exceptions() > 0; +#else + bool stack_unwinding_in_progress = std::uncaught_exception(); +#endif + // Always attempt to do proper cleanup to avoid inevitable memory corruption + // in case of missing wait (for the sake of better testability & debuggability) + if (!context().is_group_execution_cancelled()) + cancel(); + d1::wait(m_wait_vertex.get_context(), context()); + if (!stack_unwinding_in_progress) + throw_exception(exception_id::missing_wait); + } + } + + task_group_status wait() { + bool cancellation_status = false; + try_call([&] { + d1::wait(m_wait_vertex.get_context(), context()); + }).on_completion([&] { + // TODO: the reset method is not thread-safe. Ensure the correct behavior. + cancellation_status = m_context.is_group_execution_cancelled(); + context().reset(); + }); + return cancellation_status ? canceled : complete; + } + + void cancel() { + context().cancel_group_execution(); + } +}; // class task_group_base + +class task_group : public task_group_base { +public: + task_group() : task_group_base(d1::task_group_context::concurrent_wait) {} + task_group(d1::task_group_context& ctx) : task_group_base(ctx) {} + + template + void run(F&& f) { + d1::spawn(*prepare_task(std::forward(f)), context()); + } + + void run(d2::task_handle&& h) { + __TBB_ASSERT(h != nullptr, "Attempt to schedule empty task_handle"); + + using acs = d2::task_handle_accessor; + __TBB_ASSERT(&acs::ctx_of(h) == &context(), "Attempt to schedule task_handle into different task_group"); + + d1::spawn(*acs::release(h), context()); + } + + template + d2::task_handle defer(F&& f) { + return prepare_task_handle(std::forward(f)); + + } + + template + task_group_status run_and_wait(const F& f) { + return internal_run_and_wait(f); + } + + task_group_status run_and_wait(d2::task_handle&& h) { + return internal_run_and_wait(std::move(h)); + } +}; // class task_group + +#if TBB_PREVIEW_ISOLATED_TASK_GROUP +class spawn_delegate : public d1::delegate_base { + d1::task* task_to_spawn; + d1::task_group_context& context; + bool operator()() const override { + spawn(*task_to_spawn, context); + return true; + } +public: + spawn_delegate(d1::task* a_task, d1::task_group_context& ctx) + : task_to_spawn(a_task), context(ctx) + {} +}; + +class wait_delegate : public d1::delegate_base { + bool operator()() const override { + status = tg.wait(); + return true; + } +protected: + task_group& tg; + task_group_status& status; +public: + wait_delegate(task_group& a_group, task_group_status& tgs) + : tg(a_group), status(tgs) {} +}; + +template +class run_wait_delegate : public wait_delegate { + F& func; + bool operator()() const override { + status = tg.run_and_wait(func); + return true; + } +public: + run_wait_delegate(task_group& a_group, F& a_func, task_group_status& tgs) + : wait_delegate(a_group, tgs), func(a_func) {} +}; + +class isolated_task_group : public task_group { + intptr_t this_isolation() { + return reinterpret_cast(this); + } +public: + isolated_task_group() : task_group() {} + + isolated_task_group(d1::task_group_context& ctx) : task_group(ctx) {} + + template + void run(F&& f) { + spawn_delegate sd(prepare_task(std::forward(f)), context()); + r1::isolate_within_arena(sd, this_isolation()); + } + + void run(d2::task_handle&& h) { + __TBB_ASSERT(h != nullptr, "Attempt to schedule empty task_handle"); + + using acs = d2::task_handle_accessor; + __TBB_ASSERT(&acs::ctx_of(h) == &context(), "Attempt to schedule task_handle into different task_group"); + + spawn_delegate sd(acs::release(h), context()); + r1::isolate_within_arena(sd, this_isolation()); + } + + template + task_group_status run_and_wait( const F& f ) { + task_group_status result = not_complete; + run_wait_delegate rwd(*this, f, result); + r1::isolate_within_arena(rwd, this_isolation()); + __TBB_ASSERT(result != not_complete, "premature exit from wait?"); + return result; + } + + task_group_status wait() { + task_group_status result = not_complete; + wait_delegate wd(*this, result); + r1::isolate_within_arena(wd, this_isolation()); + __TBB_ASSERT(result != not_complete, "premature exit from wait?"); + return result; + } +}; // class isolated_task_group +#endif // TBB_PREVIEW_ISOLATED_TASK_GROUP +} // namespace d2 +} // namespace detail + +inline namespace v1 { +using detail::d1::task_group_context; +using detail::d2::task_group; +#if TBB_PREVIEW_ISOLATED_TASK_GROUP +using detail::d2::isolated_task_group; +#endif + +using detail::d2::task_group_status; +using detail::d2::not_complete; +using detail::d2::complete; +using detail::d2::canceled; + +using detail::d1::is_current_task_group_canceling; +using detail::r1::missing_wait; + +using detail::d2::task_handle; +} + +} // namespace tbb + +#if _MSC_VER && !defined(__INTEL_COMPILER) + // #pragma warning(pop) // 4324 warning +#endif + +#endif // __TBB_task_group_H diff --git a/src/tbb/include/oneapi/tbb/task_scheduler_observer.h b/src/tbb/include/oneapi/tbb/task_scheduler_observer.h new file mode 100644 index 000000000..f4f91e96d --- /dev/null +++ b/src/tbb/include/oneapi/tbb/task_scheduler_observer.h @@ -0,0 +1,236 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_task_scheduler_observer_H +#define __TBB_task_scheduler_observer_H + +#include + +#include "detail/_namespace_injection.h" + +#include "task_arena.h" + + +namespace tbb { +namespace detail { + +namespace d1 { +class task_scheduler_observer; +} + +namespace r1 { +class observer_proxy; +class observer_list; + +//! Enable or disable observation +/** For local observers the method can be used only when the current thread +has the task scheduler initialized or is attached to an arena. +Repeated calls with the same state are no-ops. **/ +TBB_EXPORT void __TBB_EXPORTED_FUNC observe(d1::task_scheduler_observer&, bool state = true); +} + +namespace d1 { +class task_scheduler_observer { + friend class r1::observer_proxy; + friend class r1::observer_list; + friend void r1::observe(d1::task_scheduler_observer&, bool); + + //! Pointer to the proxy holding this observer. + /** Observers are proxied by the scheduler to maintain persistent lists of them. **/ + std::atomic my_proxy{ nullptr }; + + //! Counter preventing the observer from being destroyed while in use by the scheduler. + /** Valid only when observation is on. **/ + std::atomic my_busy_count{ 0 }; + + //! Contains task_arena pointer + task_arena* my_task_arena{ nullptr }; +public: + //! Returns true if observation is enabled, false otherwise. + bool is_observing() const { return my_proxy.load(std::memory_order_relaxed) != nullptr; } + + //! Entry notification + /** Invoked from inside observe(true) call and whenever a worker enters the arena + this observer is associated with. If a thread is already in the arena when + the observer is activated, the entry notification is called before it + executes the first stolen task. **/ + virtual void on_scheduler_entry( bool /*is_worker*/ ) {} + + //! Exit notification + /** Invoked from inside observe(false) call and whenever a worker leaves the + arena this observer is associated with. **/ + virtual void on_scheduler_exit( bool /*is_worker*/ ) {} + + //! Construct local or global observer in inactive state (observation disabled). + /** For a local observer entry/exit notifications are invoked whenever a worker + thread joins/leaves the arena of the observer's owner thread. If a thread is + already in the arena when the observer is activated, the entry notification is + called before it executes the first stolen task. **/ + explicit task_scheduler_observer() = default; + + //! Construct local observer for a given arena in inactive state (observation disabled). + /** entry/exit notifications are invoked whenever a thread joins/leaves arena. + If a thread is already in the arena when the observer is activated, the entry notification + is called before it executes the first stolen task. **/ + explicit task_scheduler_observer(task_arena& a) : my_task_arena(&a) {} + + /** Destructor protects instance of the observer from concurrent notification. + It is recommended to disable observation before destructor of a derived class starts, + otherwise it can lead to concurrent notification callback on partly destroyed object **/ + virtual ~task_scheduler_observer() { + if (my_proxy.load(std::memory_order_acquire)) { + observe(false); + } + } + + //! Enable or disable observation + /** Warning: concurrent invocations of this method are not safe. + Repeated calls with the same state are no-ops. **/ + void observe(bool state = true) { + if( state && !my_proxy.load(std::memory_order_relaxed) ) { + __TBB_ASSERT( my_busy_count.load(std::memory_order_relaxed) == 0, "Inconsistent state of task_scheduler_observer instance"); + } + r1::observe(*this, state); + } +}; + +} // namespace d1 +} // namespace detail + +inline namespace v1 { + using detail::d1::task_scheduler_observer; +} +} // namespace tbb + + +// Provided for backwards compatibility. +namespace tbb { +namespace interface6 { +class task_scheduler_observer; +} +namespace internal { + +class task_scheduler_observer_v3 { + friend class tbb::detail::r1::observer_proxy; + friend class tbb::detail::r1::observer_list; + friend class interface6::task_scheduler_observer; + + //! Pointer to the proxy holding this observer. + /** Observers are proxied by the scheduler to maintain persistent lists of them. **/ + tbb::detail::r1::observer_proxy* my_proxy; + + //! Counter preventing the observer from being destroyed while in use by the scheduler. + /** Valid only when observation is on. **/ + std::atomic my_busy_count; + +public: + //! Enable or disable observation + /** For local observers the method can be used only when the current thread + has the task scheduler initialized or is attached to an arena. + + Repeated calls with the same state are no-ops. **/ + void __TBB_EXPORTED_METHOD observe( bool state=true ); + + //! Returns true if observation is enabled, false otherwise. + bool is_observing() const {return my_proxy!=NULL;} + + //! Construct observer with observation disabled. + task_scheduler_observer_v3() : my_proxy(NULL) { my_busy_count.store(0); } + + //! Entry notification + /** Invoked from inside observe(true) call and whenever a worker enters the arena + this observer is associated with. If a thread is already in the arena when + the observer is activated, the entry notification is called before it + executes the first stolen task. + + Obsolete semantics. For global observers it is called by a thread before + the first steal since observation became enabled. **/ + virtual void on_scheduler_entry( bool /*is_worker*/ ) {} + + //! Exit notification + /** Invoked from inside observe(false) call and whenever a worker leaves the + arena this observer is associated with. + + Obsolete semantics. For global observers it is called by a thread before + the first steal since observation became enabled. **/ + virtual void on_scheduler_exit( bool /*is_worker*/ ) {} + + //! Destructor automatically switches observation off if it is enabled. + virtual ~task_scheduler_observer_v3() { if(my_proxy) observe(false);} +}; + +} // namespace internal + +namespace interface6 { +class task_scheduler_observer : public internal::task_scheduler_observer_v3 { + friend class internal::task_scheduler_observer_v3; + friend class tbb::detail::r1::observer_proxy; + friend class tbb::detail::r1::observer_list; + + /** Negative numbers with the largest absolute value to minimize probability + of coincidence in case of a bug in busy count usage. **/ + // TODO: take more high bits for version number + static const intptr_t v6_trait = (intptr_t)((~(uintptr_t)0 >> 1) + 1); + + //! contains task_arena pointer or tag indicating local or global semantics of the observer + intptr_t my_context_tag; + enum { global_tag = 0, implicit_tag = 1 }; + +public: + //! Construct local or global observer in inactive state (observation disabled). + /** For a local observer entry/exit notifications are invoked whenever a worker + thread joins/leaves the arena of the observer's owner thread. If a thread is + already in the arena when the observer is activated, the entry notification is + called before it executes the first stolen task. **/ + /** TODO: Obsolete. + Global observer semantics is obsolete as it violates master thread isolation + guarantees and is not composable. Thus the current default behavior of the + constructor is obsolete too and will be changed in one of the future versions + of the library. **/ + explicit task_scheduler_observer( bool local = false ) { + my_context_tag = local? implicit_tag : global_tag; + } + + //! Construct local observer for a given arena in inactive state (observation disabled). + /** entry/exit notifications are invoked whenever a thread joins/leaves arena. + If a thread is already in the arena when the observer is activated, the entry notification + is called before it executes the first stolen task. **/ + explicit task_scheduler_observer( task_arena & a) { + my_context_tag = (intptr_t)&a; + } + + /** Destructor protects instance of the observer from concurrent notification. + It is recommended to disable observation before destructor of a derived class starts, + otherwise it can lead to concurrent notification callback on partly destroyed object **/ + virtual ~task_scheduler_observer() { if(my_proxy) observe(false); } + + //! Enable or disable observation + /** Warning: concurrent invocations of this method are not safe. + Repeated calls with the same state are no-ops. **/ + void observe( bool state=true ) { + if( state && !my_proxy ) { + __TBB_ASSERT( !my_busy_count, "Inconsistent state of task_scheduler_observer instance"); + my_busy_count.store(v6_trait); + } + internal::task_scheduler_observer_v3::observe(state); + } +}; + +} //namespace interface6 + +} // namespace tbb + +#endif /* __TBB_task_scheduler_observer_H */ diff --git a/src/tbb/include/oneapi/tbb/tbb_allocator.h b/src/tbb/include/oneapi/tbb/tbb_allocator.h new file mode 100644 index 000000000..5394b8188 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/tbb_allocator.h @@ -0,0 +1,126 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_tbb_allocator_H +#define __TBB_tbb_allocator_H + +#include "oneapi/tbb/detail/_utils.h" +#include "detail/_namespace_injection.h" +#include +#include + +#if __TBB_CPP17_MEMORY_RESOURCE_PRESENT +#include +#endif + +namespace tbb { +namespace detail { + +namespace r1 { +TBB_EXPORT void* __TBB_EXPORTED_FUNC allocate_memory(std::size_t size); +TBB_EXPORT void __TBB_EXPORTED_FUNC deallocate_memory(void* p); +TBB_EXPORT bool __TBB_EXPORTED_FUNC is_tbbmalloc_used(); +} + +namespace d1 { + +template +class tbb_allocator { +public: + using value_type = T; + using propagate_on_container_move_assignment = std::true_type; + + //! Always defined for TBB containers (supported since C++17 for std containers) + using is_always_equal = std::true_type; + + //! Specifies current allocator + enum malloc_type { + scalable, + standard + }; + + tbb_allocator() = default; + template tbb_allocator(const tbb_allocator&) noexcept {} + + //! Allocate space for n objects. + __TBB_nodiscard T* allocate(std::size_t n) { + return static_cast(r1::allocate_memory(n * sizeof(value_type))); + } + + //! Free previously allocated block of memory. + void deallocate(T* p, std::size_t) { + r1::deallocate_memory(p); + } + + //! Returns current allocator + static malloc_type allocator_type() { + return r1::is_tbbmalloc_used() ? standard : scalable; + } + +#if TBB_ALLOCATOR_TRAITS_BROKEN + using pointer = value_type*; + using const_pointer = const value_type*; + using reference = value_type&; + using const_reference = const value_type&; + using difference_type = std::ptrdiff_t; + using size_type = std::size_t; + template struct rebind { + using other = tbb_allocator; + }; + //! Largest value for which method allocate might succeed. + size_type max_size() const noexcept { + size_type max = ~(std::size_t(0)) / sizeof(value_type); + return (max > 0 ? max : 1); + } + template + void construct(U *p, Args&&... args) + { ::new (p) U(std::forward(args)...); } + void destroy( pointer p ) { p->~value_type(); } + pointer address(reference x) const { return &x; } + const_pointer address(const_reference x) const { return &x; } +#endif // TBB_ALLOCATOR_TRAITS_BROKEN +}; + +#if TBB_ALLOCATOR_TRAITS_BROKEN + template<> + class tbb_allocator { + public: + using pointer = void*; + using const_pointer = const void*; + using value_type = void; + template struct rebind { + using other = tbb_allocator; + }; + }; +#endif + +template +inline bool operator==(const tbb_allocator&, const tbb_allocator&) noexcept { return true; } + +#if !__TBB_CPP20_COMPARISONS_PRESENT +template +inline bool operator!=(const tbb_allocator&, const tbb_allocator&) noexcept { return false; } +#endif + +} // namespace d1 +} // namespace detail + +inline namespace v1 { +using detail::d1::tbb_allocator; +} // namespace v1 +} // namespace tbb + +#endif /* __TBB_tbb_allocator_H */ diff --git a/src/tbb/include/oneapi/tbb/tbbmalloc_proxy.h b/src/tbb/include/oneapi/tbb/tbbmalloc_proxy.h new file mode 100644 index 000000000..0ba38f215 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/tbbmalloc_proxy.h @@ -0,0 +1,65 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* +Replacing the standard memory allocation routines in Microsoft* C/C++ RTL +(malloc/free, global new/delete, etc.) with the TBB memory allocator. + +Include the following header to a source of any binary which is loaded during +application startup + +#include "oneapi/tbb/tbbmalloc_proxy.h" + +or add following parameters to the linker options for the binary which is +loaded during application startup. It can be either exe-file or dll. + +For win32 +tbbmalloc_proxy.lib /INCLUDE:"___TBB_malloc_proxy" +win64 +tbbmalloc_proxy.lib /INCLUDE:"__TBB_malloc_proxy" +*/ + +#ifndef __TBB_tbbmalloc_proxy_H +#define __TBB_tbbmalloc_proxy_H + +#if _MSC_VER + +#ifdef _DEBUG + #pragma comment(lib, "tbbmalloc_proxy_debug.lib") +#else + #pragma comment(lib, "tbbmalloc_proxy.lib") +#endif + +#if defined(_WIN64) + #pragma comment(linker, "/include:__TBB_malloc_proxy") +#else + #pragma comment(linker, "/include:___TBB_malloc_proxy") +#endif + +#else +/* Primarily to support MinGW */ + +extern "C" void __TBB_malloc_proxy(); +struct __TBB_malloc_proxy_caller { + __TBB_malloc_proxy_caller() { __TBB_malloc_proxy(); } +} volatile __TBB_malloc_proxy_helper_object; + +#endif // _MSC_VER + +/* Public Windows API */ +extern "C" int TBB_malloc_replacement_log(char *** function_replacement_log_ptr); + +#endif //__TBB_tbbmalloc_proxy_H diff --git a/src/tbb/include/oneapi/tbb/tick_count.h b/src/tbb/include/oneapi/tbb/tick_count.h new file mode 100644 index 000000000..2caa56ba1 --- /dev/null +++ b/src/tbb/include/oneapi/tbb/tick_count.h @@ -0,0 +1,99 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_tick_count_H +#define __TBB_tick_count_H + +#include + +#include "detail/_namespace_injection.h" + +namespace tbb { +namespace detail { +namespace d1 { + + +//! Absolute timestamp +/** @ingroup timing */ +class tick_count { +public: + using clock_type = typename std::conditional::type; + + //! Relative time interval. + class interval_t : public clock_type::duration { + public: + //! Construct a time interval representing zero time duration + interval_t() : clock_type::duration(clock_type::duration::zero()) {} + + //! Construct a time interval representing sec seconds time duration + explicit interval_t( double sec ) + : clock_type::duration(std::chrono::duration_cast(std::chrono::duration(sec))) {} + + //! Return the length of a time interval in seconds + double seconds() const { + return std::chrono::duration_cast>(*this).count(); + } + + //! Extract the intervals from the tick_counts and subtract them. + friend interval_t operator-( const tick_count& t1, const tick_count& t0 ); + + //! Add two intervals. + friend interval_t operator+( const interval_t& i, const interval_t& j ) { + return interval_t(std::chrono::operator+(i, j)); + } + + //! Subtract two intervals. + friend interval_t operator-( const interval_t& i, const interval_t& j ) { + return interval_t(std::chrono::operator-(i, j)); + } + + private: + explicit interval_t( clock_type::duration value_ ) : clock_type::duration(value_) {} + }; + + tick_count() = default; + + //! Return current time. + static tick_count now() { + return clock_type::now(); + } + + //! Subtract two timestamps to get the time interval between + friend interval_t operator-( const tick_count& t1, const tick_count& t0 ) { + return tick_count::interval_t(t1.my_time_point - t0.my_time_point); + } + + //! Return the resolution of the clock in seconds per tick. + static double resolution() { + return static_cast(interval_t::period::num) / interval_t::period::den; + } + +private: + clock_type::time_point my_time_point; + tick_count( clock_type::time_point tp ) : my_time_point(tp) {} +}; + +} // namespace d1 +} // namespace detail + +inline namespace v1 { + using detail::d1::tick_count; +} // namespace v1 + +} // namespace tbb + +#endif /* __TBB_tick_count_H */ diff --git a/src/tbb/include/oneapi/tbb/version.h b/src/tbb/include/oneapi/tbb/version.h new file mode 100644 index 000000000..c8f3ad50e --- /dev/null +++ b/src/tbb/include/oneapi/tbb/version.h @@ -0,0 +1,118 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_version_H +#define __TBB_version_H + +// Exclude all includes during .rc files compilation +#ifndef RC_INVOKED + #include "detail/_config.h" + #include "detail/_namespace_injection.h" +#else + #define __TBB_STRING_AUX(x) #x + #define __TBB_STRING(x) __TBB_STRING_AUX(x) +#endif + +// Product version +#define TBB_VERSION_MAJOR 2022 +// Update version +#define TBB_VERSION_MINOR 0 +// "Patch" version for custom releases +#define TBB_VERSION_PATCH 0 +// Suffix string +#define __TBB_VERSION_SUFFIX "" +// Full official version string +#define TBB_VERSION_STRING \ + __TBB_STRING(TBB_VERSION_MAJOR) "." \ + __TBB_STRING(TBB_VERSION_MINOR) "." \ + __TBB_STRING(TBB_VERSION_PATCH) \ + __TBB_VERSION_SUFFIX + +// OneAPI oneTBB specification version +#define ONETBB_SPEC_VERSION "1.0" +// Full interface version +#define TBB_INTERFACE_VERSION 12140 +// Major interface version +#define TBB_INTERFACE_VERSION_MAJOR (TBB_INTERFACE_VERSION/1000) +// Minor interface version +#define TBB_INTERFACE_VERSION_MINOR (TBB_INTERFACE_VERSION%1000/10) + +// The binary compatibility version +// To be used in SONAME, manifests, etc. +#define __TBB_BINARY_VERSION 12 + +//! TBB_VERSION support +#ifndef TBB_ENDL +#define TBB_ENDL "\n" +#endif + +//TBB_REVAMP_TODO: consider enabling version_string.ver generation +//TBB_REVAMP_TODO: #include "version_string.ver" + +#define __TBB_ONETBB_SPEC_VERSION(N) #N ": SPECIFICATION VERSION\t" ONETBB_SPEC_VERSION TBB_ENDL +#define __TBB_VERSION_NUMBER(N) #N ": VERSION\t\t" TBB_VERSION_STRING TBB_ENDL +#define __TBB_INTERFACE_VERSION_NUMBER(N) #N ": INTERFACE VERSION\t" __TBB_STRING(TBB_INTERFACE_VERSION) TBB_ENDL + +#ifndef TBB_USE_DEBUG + #define __TBB_VERSION_USE_DEBUG(N) #N ": TBB_USE_DEBUG\tundefined" TBB_ENDL +#elif TBB_USE_DEBUG==0 + #define __TBB_VERSION_USE_DEBUG(N) #N ": TBB_USE_DEBUG\t0" TBB_ENDL +#elif TBB_USE_DEBUG==1 + #define __TBB_VERSION_USE_DEBUG(N) #N ": TBB_USE_DEBUG\t1" TBB_ENDL +#elif TBB_USE_DEBUG==2 + #define __TBB_VERSION_USE_DEBUG(N) #N ": TBB_USE_DEBUG\t2" TBB_ENDL +#else + #error Unexpected value for TBB_USE_DEBUG +#endif + +#ifndef TBB_USE_ASSERT + #define __TBB_VERSION_USE_ASSERT(N) #N ": TBB_USE_ASSERT\tundefined" TBB_ENDL +#elif TBB_USE_ASSERT==0 + #define __TBB_VERSION_USE_ASSERT(N) #N ": TBB_USE_ASSERT\t0" TBB_ENDL +#elif TBB_USE_ASSERT==1 + #define __TBB_VERSION_USE_ASSERT(N) #N ": TBB_USE_ASSERT\t1" TBB_ENDL +#elif TBB_USE_ASSERT==2 + #define __TBB_VERSION_USE_ASSERT(N) #N ": TBB_USE_ASSERT\t2" TBB_ENDL +#else + #error Unexpected value for TBB_USE_ASSERT +#endif + +#define TBB_VERSION_STRINGS_P(N) \ + __TBB_ONETBB_SPEC_VERSION(N) \ + __TBB_VERSION_NUMBER(N) \ + __TBB_INTERFACE_VERSION_NUMBER(N) \ + __TBB_VERSION_USE_DEBUG(N) \ + __TBB_VERSION_USE_ASSERT(N) + +#define TBB_VERSION_STRINGS TBB_VERSION_STRINGS_P(oneTBB) +#define TBBMALLOC_VERSION_STRINGS TBB_VERSION_STRINGS_P(TBBmalloc) + +//! The function returns the version string for the Intel(R) oneAPI Threading Building Blocks (oneTBB) +//! shared library being used. +/** + * The returned pointer is an address of a string in the shared library. + * It can be different than the TBB_VERSION_STRING obtained at compile time. + */ +extern "C" TBB_EXPORT const char* __TBB_EXPORTED_FUNC TBB_runtime_version(); + +//! The function returns the interface version of the oneTBB shared library being used. +/** + * The returned version is determined at runtime, not at compile/link time. + * It can be different than the value of TBB_INTERFACE_VERSION obtained at compile time. + */ +extern "C" TBB_EXPORT int __TBB_EXPORTED_FUNC TBB_runtime_interface_version(); + +#endif // __TBB_version_H diff --git a/src/tbb/include/serial/tbb/parallel_for.h b/src/tbb/include/serial/tbb/parallel_for.h deleted file mode 100644 index e0bddb713..000000000 --- a/src/tbb/include/serial/tbb/parallel_for.h +++ /dev/null @@ -1,202 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_SERIAL_parallel_for_H -#define __TBB_SERIAL_parallel_for_H - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include -#include // required to construct std exception classes - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "tbb_annotate.h" - -#ifndef __TBB_NORMAL_EXECUTION -#include "tbb/blocked_range.h" -#include "tbb/partitioner.h" -#endif - -namespace tbb { -namespace serial { -namespace interface7 { - -// parallel_for serial annotated implementation - -template< typename Range, typename Body, typename Partitioner > -class start_for : tbb::internal::no_copy { - Range my_range; - const Body my_body; - typename Partitioner::task_partition_type my_partition; - void execute(); - - //! Constructor for root task. - start_for( const Range& range, const Body& body, Partitioner& partitioner ) : - my_range( range ), - my_body( body ), - my_partition( partitioner ) - { - } - - //! Splitting constructor used to generate children. - /** this becomes left child. Newly constructed object is right child. */ - start_for( start_for& parent_, typename Partitioner::split_type& split_obj ) : - my_range( parent_.my_range, split_obj ), - my_body( parent_.my_body ), - my_partition( parent_.my_partition, split_obj ) - { - } - -public: - static void run( const Range& range, const Body& body, Partitioner& partitioner ) { - if( !range.empty() ) { - ANNOTATE_SITE_BEGIN( tbb_parallel_for ); - { - start_for a( range, body, partitioner ); - a.execute(); - } - ANNOTATE_SITE_END( tbb_parallel_for ); - } - } -}; - -template< typename Range, typename Body, typename Partitioner > -void start_for< Range, Body, Partitioner >::execute() { - if( !my_range.is_divisible() || !my_partition.is_divisible() ) { - ANNOTATE_TASK_BEGIN( tbb_parallel_for_range ); - { - my_body( my_range ); - } - ANNOTATE_TASK_END( tbb_parallel_for_range ); - } else { - typename Partitioner::split_type split_obj; - start_for b( *this, split_obj ); - this->execute(); // Execute the left interval first to keep the serial order. - b.execute(); // Execute the right interval then. - } -} - -//! Parallel iteration over range with default partitioner. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body ) { - serial::interface7::start_for::run(range,body,__TBB_DEFAULT_PARTITIONER()); -} - -//! Parallel iteration over range with simple partitioner. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, const simple_partitioner& partitioner ) { - serial::interface7::start_for::run(range,body,partitioner); -} - -//! Parallel iteration over range with auto_partitioner. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, const auto_partitioner& partitioner ) { - serial::interface7::start_for::run(range,body,partitioner); -} - -//! Parallel iteration over range with affinity_partitioner. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, affinity_partitioner& partitioner ) { - serial::interface7::start_for::run(range,body,partitioner); -} - -//! Implementation of parallel iteration over stepped range of integers with explicit step and partitioner (ignored) -template -void parallel_for_impl(Index first, Index last, Index step, const Function& f, Partitioner& ) { - if (step <= 0 ) - throw std::invalid_argument( "nonpositive_step" ); - else if (last > first) { - // Above "else" avoids "potential divide by zero" warning on some platforms - ANNOTATE_SITE_BEGIN( tbb_parallel_for ); - for( Index i = first; i < last; i = i + step ) { - ANNOTATE_TASK_BEGIN( tbb_parallel_for_iteration ); - { f( i ); } - ANNOTATE_TASK_END( tbb_parallel_for_iteration ); - } - ANNOTATE_SITE_END( tbb_parallel_for ); - } -} - -//! Parallel iteration over a range of integers with explicit step and default partitioner -template -void parallel_for(Index first, Index last, Index step, const Function& f) { - parallel_for_impl(first, last, step, f, auto_partitioner()); -} -//! Parallel iteration over a range of integers with explicit step and simple partitioner -template -void parallel_for(Index first, Index last, Index step, const Function& f, const simple_partitioner& p) { - parallel_for_impl(first, last, step, f, p); -} -//! Parallel iteration over a range of integers with explicit step and auto partitioner -template -void parallel_for(Index first, Index last, Index step, const Function& f, const auto_partitioner& p) { - parallel_for_impl(first, last, step, f, p); -} -//! Parallel iteration over a range of integers with explicit step and affinity partitioner -template -void parallel_for(Index first, Index last, Index step, const Function& f, affinity_partitioner& p) { - parallel_for_impl(first, last, step, f, p); -} - -//! Parallel iteration over a range of integers with default step and default partitioner -template -void parallel_for(Index first, Index last, const Function& f) { - parallel_for_impl(first, last, static_cast(1), f, auto_partitioner()); -} -//! Parallel iteration over a range of integers with default step and simple partitioner -template -void parallel_for(Index first, Index last, const Function& f, const simple_partitioner& p) { - parallel_for_impl(first, last, static_cast(1), f, p); -} -//! Parallel iteration over a range of integers with default step and auto partitioner -template - void parallel_for(Index first, Index last, const Function& f, const auto_partitioner& p) { - parallel_for_impl(first, last, static_cast(1), f, p); -} -//! Parallel iteration over a range of integers with default step and affinity_partitioner -template -void parallel_for(Index first, Index last, const Function& f, affinity_partitioner& p) { - parallel_for_impl(first, last, static_cast(1), f, p); -} - -} // namespace interface7 - -using interface7::parallel_for; - -} // namespace serial - -#ifndef __TBB_NORMAL_EXECUTION -using serial::interface7::parallel_for; -#endif - -} // namespace tbb - -#endif /* __TBB_SERIAL_parallel_for_H */ diff --git a/src/tbb/include/serial/tbb/tbb_annotate.h b/src/tbb/include/serial/tbb/tbb_annotate.h deleted file mode 100644 index 2024cf644..000000000 --- a/src/tbb/include/serial/tbb/tbb_annotate.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_annotate_H -#define __TBB_annotate_H - -// Macros used by the Intel(R) Parallel Advisor. -#ifdef __TBB_NORMAL_EXECUTION - #define ANNOTATE_SITE_BEGIN( site ) - #define ANNOTATE_SITE_END( site ) - #define ANNOTATE_TASK_BEGIN( task ) - #define ANNOTATE_TASK_END( task ) - #define ANNOTATE_LOCK_ACQUIRE( lock ) - #define ANNOTATE_LOCK_RELEASE( lock ) -#else - #include -#endif - -#endif /* __TBB_annotate_H */ diff --git a/src/tbb/include/tbb/aggregator.h b/src/tbb/include/tbb/aggregator.h deleted file mode 100644 index 8a28ed0d7..000000000 --- a/src/tbb/include/tbb/aggregator.h +++ /dev/null @@ -1,202 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB__aggregator_H -#define __TBB__aggregator_H - -#if !TBB_PREVIEW_AGGREGATOR -#error Set TBB_PREVIEW_AGGREGATOR before including aggregator.h -#endif - -#include "atomic.h" -#include "tbb_profiling.h" - -namespace tbb { -namespace interface6 { - -using namespace tbb::internal; - -class aggregator_operation { - template friend class aggregator_ext; - uintptr_t status; - aggregator_operation* my_next; -public: - enum aggregator_operation_status { agg_waiting=0, agg_finished }; - aggregator_operation() : status(agg_waiting), my_next(NULL) {} - /// Call start before handling this operation - void start() { call_itt_notify(acquired, &status); } - /// Call finish when done handling this operation - /** The operation will be released to its originating thread, and possibly deleted. */ - void finish() { itt_store_word_with_release(status, uintptr_t(agg_finished)); } - aggregator_operation* next() { return itt_hide_load_word(my_next);} - void set_next(aggregator_operation* n) { itt_hide_store_word(my_next, n); } -}; - -namespace internal { - -class basic_operation_base : public aggregator_operation { - friend class basic_handler; - virtual void apply_body() = 0; -public: - basic_operation_base() : aggregator_operation() {} - virtual ~basic_operation_base() {} -}; - -template -class basic_operation : public basic_operation_base, no_assign { - const Body& my_body; - /*override*/ void apply_body() { my_body(); } -public: - basic_operation(const Body& b) : basic_operation_base(), my_body(b) {} -}; - -class basic_handler { -public: - basic_handler() {} - void operator()(aggregator_operation* op_list) const { - while (op_list) { - // ITT note: &(op_list->status) tag is used to cover accesses to the operation data. - // The executing thread "acquires" the tag (see start()) and then performs - // the associated operation w/o triggering a race condition diagnostics. - // A thread that created the operation is waiting for its status (see execute_impl()), - // so when this thread is done with the operation, it will "release" the tag - // and update the status (see finish()) to give control back to the waiting thread. - basic_operation_base& request = static_cast(*op_list); - // IMPORTANT: need to advance op_list to op_list->next() before calling request.finish() - op_list = op_list->next(); - request.start(); - request.apply_body(); - request.finish(); - } - } -}; - -} // namespace internal - -//! Aggregator base class and expert interface -/** An aggregator for collecting operations coming from multiple sources and executing - them serially on a single thread. */ -template -class aggregator_ext : tbb::internal::no_copy { -public: - aggregator_ext(const handler_type& h) : handler_busy(0), handle_operations(h) { mailbox = NULL; } - - //! EXPERT INTERFACE: Enter a user-made operation into the aggregator's mailbox. - /** Details of user-made operations must be handled by user-provided handler */ - void process(aggregator_operation *op) { execute_impl(*op); } - - protected: - /** Place operation in mailbox, then either handle mailbox or wait for the operation - to be completed by a different thread. */ - void execute_impl(aggregator_operation& op) { - aggregator_operation* res; - - // ITT note: &(op.status) tag is used to cover accesses to this operation. This - // thread has created the operation, and now releases it so that the handler - // thread may handle the associated operation w/o triggering a race condition; - // thus this tag will be acquired just before the operation is handled in the - // handle_operations functor. - call_itt_notify(releasing, &(op.status)); - // insert the operation in the queue - do { - // ITT may flag the following line as a race; it is a false positive: - // This is an atomic read; we don't provide itt_hide_load_word for atomics - op.my_next = res = mailbox; // NOT A RACE - } while (mailbox.compare_and_swap(&op, res) != res); - if (!res) { // first in the list; handle the operations - // ITT note: &mailbox tag covers access to the handler_busy flag, which this - // waiting handler thread will try to set before entering handle_operations. - call_itt_notify(acquired, &mailbox); - start_handle_operations(); - __TBB_ASSERT(op.status, NULL); - } - else { // not first; wait for op to be ready - call_itt_notify(prepare, &(op.status)); - spin_wait_while_eq(op.status, uintptr_t(aggregator_operation::agg_waiting)); - itt_load_word_with_acquire(op.status); - } - } - - - private: - //! An atomically updated list (aka mailbox) of aggregator_operations - atomic mailbox; - - //! Controls thread access to handle_operations - /** Behaves as boolean flag where 0=false, 1=true */ - uintptr_t handler_busy; - - handler_type handle_operations; - - //! Trigger the handling of operations when the handler is free - void start_handle_operations() { - aggregator_operation *pending_operations; - - // ITT note: &handler_busy tag covers access to mailbox as it is passed - // between active and waiting handlers. Below, the waiting handler waits until - // the active handler releases, and the waiting handler acquires &handler_busy as - // it becomes the active_handler. The release point is at the end of this - // function, when all operations in mailbox have been handled by the - // owner of this aggregator. - call_itt_notify(prepare, &handler_busy); - // get handler_busy: only one thread can possibly spin here at a time - spin_wait_until_eq(handler_busy, uintptr_t(0)); - call_itt_notify(acquired, &handler_busy); - // acquire fence not necessary here due to causality rule and surrounding atomics - __TBB_store_with_release(handler_busy, uintptr_t(1)); - - // ITT note: &mailbox tag covers access to the handler_busy flag itself. - // Capturing the state of the mailbox signifies that handler_busy has been - // set and a new active handler will now process that list's operations. - call_itt_notify(releasing, &mailbox); - // grab pending_operations - pending_operations = mailbox.fetch_and_store(NULL); - - // handle all the operations - handle_operations(pending_operations); - - // release the handler - itt_store_word_with_release(handler_busy, uintptr_t(0)); - } -}; - -//! Basic aggregator interface -class aggregator : private aggregator_ext { -public: - aggregator() : aggregator_ext(internal::basic_handler()) {} - //! BASIC INTERFACE: Enter a function for exclusive execution by the aggregator. - /** The calling thread stores the function object in a basic_operation and - places the operation in the aggregator's mailbox */ - template - void execute(const Body& b) { - internal::basic_operation op(b); - this->execute_impl(op); - } -}; - -} // namespace interface6 - -using interface6::aggregator; -using interface6::aggregator_ext; -using interface6::aggregator_operation; - -} // namespace tbb - -#endif // __TBB__aggregator_H diff --git a/src/tbb/include/tbb/aligned_space.h b/src/tbb/include/tbb/aligned_space.h deleted file mode 100644 index d2015972d..000000000 --- a/src/tbb/include/tbb/aligned_space.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_aligned_space_H -#define __TBB_aligned_space_H - -#include "tbb_stddef.h" -#include "tbb_machine.h" - -namespace tbb { - -//! Block of space aligned sufficiently to construct an array T with N elements. -/** The elements are not constructed or destroyed by this class. - @ingroup memory_allocation */ -template -class aligned_space { -private: - typedef __TBB_TypeWithAlignmentAtLeastAsStrict(T) element_type; - element_type array[(sizeof(T)*N+sizeof(element_type)-1)/sizeof(element_type)]; -public: - //! Pointer to beginning of array - T* begin() {return internal::punned_cast(this);} - - //! Pointer to one past last element in array. - T* end() {return begin()+N;} -}; - -} // namespace tbb - -#endif /* __TBB_aligned_space_H */ diff --git a/src/tbb/include/tbb/atomic.h b/src/tbb/include/tbb/atomic.h deleted file mode 100644 index 45bf31740..000000000 --- a/src/tbb/include/tbb/atomic.h +++ /dev/null @@ -1,556 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_atomic_H -#define __TBB_atomic_H - -#include - -#if _MSC_VER -#define __TBB_LONG_LONG __int64 -#else -#define __TBB_LONG_LONG long long -#endif /* _MSC_VER */ - -#include "tbb_machine.h" - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings - #pragma warning (push) - #pragma warning (disable: 4244 4267 4512) -#endif - -namespace tbb { - -//! Specifies memory semantics. -enum memory_semantics { - //! Sequential consistency - full_fence, - //! Acquire - acquire, - //! Release - release, - //! No ordering - relaxed -}; - -//! @cond INTERNAL -namespace internal { - -#if __TBB_ATTRIBUTE_ALIGNED_PRESENT - #define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f __attribute__ ((aligned(a))); -#elif __TBB_DECLSPEC_ALIGN_PRESENT - #define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f; -#else - #error Do not know syntax for forcing alignment. -#endif - -template -struct atomic_rep; // Primary template declared, but never defined. - -template<> -struct atomic_rep<1> { // Specialization - typedef int8_t word; -}; -template<> -struct atomic_rep<2> { // Specialization - typedef int16_t word; -}; -template<> -struct atomic_rep<4> { // Specialization -#if _MSC_VER && !_WIN64 - // Work-around that avoids spurious /Wp64 warnings - typedef intptr_t word; -#else - typedef int32_t word; -#endif -}; -#if __TBB_64BIT_ATOMICS -template<> -struct atomic_rep<8> { // Specialization - typedef int64_t word; -}; -#endif - -template -struct aligned_storage; - -//the specializations are needed to please MSVC syntax of __declspec(align()) which accept _literal_ constants only -#if __TBB_ATOMIC_CTORS - #define ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(S) \ - template \ - struct aligned_storage { \ - __TBB_DECL_ATOMIC_FIELD(value_type,my_value,S) \ - aligned_storage() = default ; \ - constexpr aligned_storage(value_type value):my_value(value){} \ - }; \ - -#else - #define ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(S) \ - template \ - struct aligned_storage { \ - __TBB_DECL_ATOMIC_FIELD(value_type,my_value,S) \ - }; \ - -#endif - -template -struct aligned_storage { - value_type my_value; -#if __TBB_ATOMIC_CTORS - aligned_storage() = default ; - constexpr aligned_storage(value_type value):my_value(value){} -#endif -}; - -ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(2) -ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(4) -#if __TBB_64BIT_ATOMICS -ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(8) -#endif - -template -struct atomic_traits; // Primary template declared, but not defined. - -#define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M) \ - template<> struct atomic_traits { \ - typedef atomic_rep::word word; \ - inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \ - return __TBB_machine_cmpswp##S##M(location,new_value,comparand); \ - } \ - inline static word fetch_and_add( volatile void* location, word addend ) { \ - return __TBB_machine_fetchadd##S##M(location,addend); \ - } \ - inline static word fetch_and_store( volatile void* location, word value ) { \ - return __TBB_machine_fetchstore##S##M(location,value); \ - } \ - }; - -#define __TBB_DECL_ATOMIC_PRIMITIVES(S) \ - template \ - struct atomic_traits { \ - typedef atomic_rep::word word; \ - inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \ - return __TBB_machine_cmpswp##S(location,new_value,comparand); \ - } \ - inline static word fetch_and_add( volatile void* location, word addend ) { \ - return __TBB_machine_fetchadd##S(location,addend); \ - } \ - inline static word fetch_and_store( volatile void* location, word value ) { \ - return __TBB_machine_fetchstore##S(location,value); \ - } \ - }; - -template -struct atomic_load_store_traits; // Primary template declaration - -#define __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(M) \ - template<> struct atomic_load_store_traits { \ - template \ - inline static T load( const volatile T& location ) { \ - return __TBB_load_##M( location ); \ - } \ - template \ - inline static void store( volatile T& location, T value ) { \ - __TBB_store_##M( location, value ); \ - } \ - } - -#if __TBB_USE_FENCED_ATOMICS -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,full_fence) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,full_fence) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,full_fence) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,acquire) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,acquire) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,acquire) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,release) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,release) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,release) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,relaxed) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,relaxed) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,relaxed) -#if __TBB_64BIT_ATOMICS -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,full_fence) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,acquire) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,release) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,relaxed) -#endif -#else /* !__TBB_USE_FENCED_ATOMICS */ -__TBB_DECL_ATOMIC_PRIMITIVES(1) -__TBB_DECL_ATOMIC_PRIMITIVES(2) -__TBB_DECL_ATOMIC_PRIMITIVES(4) -#if __TBB_64BIT_ATOMICS -__TBB_DECL_ATOMIC_PRIMITIVES(8) -#endif -#endif /* !__TBB_USE_FENCED_ATOMICS */ - -__TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(full_fence); -__TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(acquire); -__TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(release); -__TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(relaxed); - -//! Additive inverse of 1 for type T. -/** Various compilers issue various warnings if -1 is used with various integer types. - The baroque expression below avoids all the warnings (we hope). */ -#define __TBB_MINUS_ONE(T) (T(T(0)-T(1))) - -//! Base class that provides basic functionality for atomic without fetch_and_add. -/** Works for any type T that has the same size as an integral type, has a trivial constructor/destructor, - and can be copied/compared by memcpy/memcmp. */ -template -struct atomic_impl { -protected: - aligned_storage my_storage; -private: - //TODO: rechecks on recent versions of gcc if union is still the _only_ way to do a conversion without warnings - //! Union type used to convert type T to underlying integral type. - template - union converter { - typedef typename atomic_rep::word bits_type; - converter(){} - converter(value_type a_value) : value(a_value) {} - value_type value; - bits_type bits; - }; - - template - static typename converter::bits_type to_bits(value_t value){ - return converter(value).bits; - } - template - static value_t to_value(typename converter::bits_type bits){ - converter u; - u.bits = bits; - return u.value; - } - - template - union ptr_converter; //Primary template declared, but never defined. - - template - union ptr_converter { - ptr_converter(){} - ptr_converter(value_t* a_value) : value(a_value) {} - value_t* value; - uintptr_t bits; - }; - //TODO: check if making to_bits accepting reference (thus unifying it with to_bits_ref) - //does not hurt performance - template - static typename converter::bits_type & to_bits_ref(value_t& value){ - //TODO: this #ifdef is temporary workaround, as union conversion seems to fail - //on suncc for 64 bit types for 32 bit target - #if !__SUNPRO_CC - return *(typename converter::bits_type*)ptr_converter(&value).bits; - #else - return *(typename converter::bits_type*)(&value); - #endif - } - - -public: - typedef T value_type; - -#if __TBB_ATOMIC_CTORS - atomic_impl() = default ; - constexpr atomic_impl(value_type value):my_storage(value){} -#endif - template - value_type fetch_and_store( value_type value ) { - return to_value( - internal::atomic_traits::fetch_and_store( &my_storage.my_value, to_bits(value) ) - ); - } - - value_type fetch_and_store( value_type value ) { - return fetch_and_store(value); - } - - template - value_type compare_and_swap( value_type value, value_type comparand ) { - return to_value( - internal::atomic_traits::compare_and_swap( &my_storage.my_value, to_bits(value), to_bits(comparand) ) - ); - } - - value_type compare_and_swap( value_type value, value_type comparand ) { - return compare_and_swap(value,comparand); - } - - operator value_type() const volatile { // volatile qualifier here for backwards compatibility - return to_value( - __TBB_load_with_acquire( to_bits_ref(my_storage.my_value) ) - ); - } - - template - value_type load () const { - return to_value( - internal::atomic_load_store_traits::load( to_bits_ref(my_storage.my_value) ) - ); - } - - value_type load () const { - return load(); - } - - template - void store ( value_type value ) { - internal::atomic_load_store_traits::store( to_bits_ref(my_storage.my_value), to_bits(value)); - } - - void store ( value_type value ) { - store( value ); - } - -protected: - value_type store_with_release( value_type rhs ) { - //TODO: unify with store - __TBB_store_with_release( to_bits_ref(my_storage.my_value), to_bits(rhs) ); - return rhs; - } -}; - -//! Base class that provides basic functionality for atomic with fetch_and_add. -/** I is the underlying type. - D is the difference type. - StepType should be char if I is an integral type, and T if I is a T*. */ -template -struct atomic_impl_with_arithmetic: atomic_impl { -public: - typedef I value_type; -#if __TBB_ATOMIC_CTORS - atomic_impl_with_arithmetic() = default ; - constexpr atomic_impl_with_arithmetic(value_type value): atomic_impl(value){} -#endif - template - value_type fetch_and_add( D addend ) { - return value_type(internal::atomic_traits::fetch_and_add( &this->my_storage.my_value, addend*sizeof(StepType) )); - } - - value_type fetch_and_add( D addend ) { - return fetch_and_add(addend); - } - - template - value_type fetch_and_increment() { - return fetch_and_add(1); - } - - value_type fetch_and_increment() { - return fetch_and_add(1); - } - - template - value_type fetch_and_decrement() { - return fetch_and_add(__TBB_MINUS_ONE(D)); - } - - value_type fetch_and_decrement() { - return fetch_and_add(__TBB_MINUS_ONE(D)); - } - -public: - value_type operator+=( D value ) { - return fetch_and_add(value)+value; - } - - value_type operator-=( D value ) { - // Additive inverse of value computed using binary minus, - // instead of unary minus, for sake of avoiding compiler warnings. - return operator+=(D(0)-value); - } - - value_type operator++() { - return fetch_and_add(1)+1; - } - - value_type operator--() { - return fetch_and_add(__TBB_MINUS_ONE(D))-1; - } - - value_type operator++(int) { - return fetch_and_add(1); - } - - value_type operator--(int) { - return fetch_and_add(__TBB_MINUS_ONE(D)); - } -}; - -} /* Internal */ -//! @endcond - -//! Primary template for atomic. -/** See the Reference for details. - @ingroup synchronization */ -template -struct atomic: internal::atomic_impl { -#if __TBB_ATOMIC_CTORS - atomic() = default; - constexpr atomic(T arg): internal::atomic_impl(arg) {} -#endif - T operator=( T rhs ) { - // "this" required here in strict ISO C++ because store_with_release is a dependent name - return this->store_with_release(rhs); - } - atomic& operator=( const atomic& rhs ) {this->store_with_release(rhs); return *this;} -}; - -#if __TBB_ATOMIC_CTORS - #define __TBB_DECL_ATOMIC(T) \ - template<> struct atomic: internal::atomic_impl_with_arithmetic { \ - atomic() = default; \ - constexpr atomic(T arg): internal::atomic_impl_with_arithmetic(arg) {} \ - \ - T operator=( T rhs ) {return store_with_release(rhs);} \ - atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \ - }; -#else - #define __TBB_DECL_ATOMIC(T) \ - template<> struct atomic: internal::atomic_impl_with_arithmetic { \ - T operator=( T rhs ) {return store_with_release(rhs);} \ - atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \ - }; -#endif - -#if __TBB_64BIT_ATOMICS -//TODO: consider adding non-default (and atomic) copy constructor for 32bit platform -__TBB_DECL_ATOMIC(__TBB_LONG_LONG) -__TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG) -#else -// test_atomic will verify that sizeof(long long)==8 -#endif -__TBB_DECL_ATOMIC(long) -__TBB_DECL_ATOMIC(unsigned long) - -#if _MSC_VER && !_WIN64 -#if __TBB_ATOMIC_CTORS -/* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option. - It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T) - with an operator=(U) that explicitly converts the U to a T. Types T and U should be - type synonyms on the platform. Type U should be the wider variant of T from the - perspective of /Wp64. */ -#define __TBB_DECL_ATOMIC_ALT(T,U) \ - template<> struct atomic: internal::atomic_impl_with_arithmetic { \ - atomic() = default ; \ - constexpr atomic(T arg): internal::atomic_impl_with_arithmetic(arg) {} \ - T operator=( U rhs ) {return store_with_release(T(rhs));} \ - atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \ - }; -#else -#define __TBB_DECL_ATOMIC_ALT(T,U) \ - template<> struct atomic: internal::atomic_impl_with_arithmetic { \ - T operator=( U rhs ) {return store_with_release(T(rhs));} \ - atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \ - }; -#endif -__TBB_DECL_ATOMIC_ALT(unsigned,size_t) -__TBB_DECL_ATOMIC_ALT(int,ptrdiff_t) -#else -__TBB_DECL_ATOMIC(unsigned) -__TBB_DECL_ATOMIC(int) -#endif /* _MSC_VER && !_WIN64 */ - -__TBB_DECL_ATOMIC(unsigned short) -__TBB_DECL_ATOMIC(short) -__TBB_DECL_ATOMIC(char) -__TBB_DECL_ATOMIC(signed char) -__TBB_DECL_ATOMIC(unsigned char) - -#if !_MSC_VER || defined(_NATIVE_WCHAR_T_DEFINED) -__TBB_DECL_ATOMIC(wchar_t) -#endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */ - -//! Specialization for atomic with arithmetic and operator->. -template struct atomic: internal::atomic_impl_with_arithmetic { -#if __TBB_ATOMIC_CTORS - atomic() = default ; - constexpr atomic(T* arg): internal::atomic_impl_with_arithmetic(arg) {} -#endif - T* operator=( T* rhs ) { - // "this" required here in strict ISO C++ because store_with_release is a dependent name - return this->store_with_release(rhs); - } - atomic& operator=( const atomic& rhs ) { - this->store_with_release(rhs); return *this; - } - T* operator->() const { - return (*this); - } -}; - -//! Specialization for atomic, for sake of not allowing arithmetic or operator->. -template<> struct atomic: internal::atomic_impl { -#if __TBB_ATOMIC_CTORS - atomic() = default ; - constexpr atomic(void* arg): internal::atomic_impl(arg) {} -#endif - void* operator=( void* rhs ) { - // "this" required here in strict ISO C++ because store_with_release is a dependent name - return this->store_with_release(rhs); - } - atomic& operator=( const atomic& rhs ) { - this->store_with_release(rhs); return *this; - } -}; - -// Helpers to workaround ugly syntax of calling template member function of a -// template class with template argument dependent on template parameters. - -template -T load ( const atomic& a ) { return a.template load(); } - -template -void store ( atomic& a, T value ) { a.template store(value); } - -namespace interface6{ -//! Make an atomic for use in an initialization (list), as an alternative to zero-initialization or normal assignment. -template -atomic make_atomic(T t) { - atomic a; - store(a,t); - return a; -} -} -using interface6::make_atomic; - -namespace internal { -template -void swap(atomic & lhs, atomic & rhs){ - T tmp = load(lhs); - store(lhs,load(rhs)); - store(rhs,tmp); -} - -// only to aid in the gradual conversion of ordinary variables to proper atomics -template -inline atomic& as_atomic( T& t ) { - return (atomic&)t; -} -} // namespace tbb::internal - -} // namespace tbb - -#if _MSC_VER && !__INTEL_COMPILER - #pragma warning (pop) -#endif // warnings 4244, 4267 are back - -#endif /* __TBB_atomic_H */ diff --git a/src/tbb/include/tbb/blocked_range.h b/src/tbb/include/tbb/blocked_range.h index 4b95bf17c..316ec01ba 100644 --- a/src/tbb/include/tbb/blocked_range.h +++ b/src/tbb/include/tbb/blocked_range.h @@ -1,159 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_blocked_range_H -#define __TBB_blocked_range_H - -#include "tbb_stddef.h" - -namespace tbb { - -/** \page range_req Requirements on range concept - Class \c R implementing the concept of range must define: - - \code R::R( const R& ); \endcode Copy constructor - - \code R::~R(); \endcode Destructor - - \code bool R::is_divisible() const; \endcode True if range can be partitioned into two subranges - - \code bool R::empty() const; \endcode True if range is empty - - \code R::R( R& r, split ); \endcode Split range \c r into two subranges. -**/ - -//! A range over which to iterate. -/** @ingroup algorithms */ -template -class blocked_range { -public: - //! Type of a value - /** Called a const_iterator for sake of algorithms that need to treat a blocked_range - as an STL container. */ - typedef Value const_iterator; - - //! Type for size of a range - typedef std::size_t size_type; - - //! Construct range with default-constructed values for begin and end. - /** Requires that Value have a default constructor. */ - blocked_range() : my_end(), my_begin() {} - - //! Construct range over half-open interval [begin,end), with the given grainsize. - blocked_range( Value begin_, Value end_, size_type grainsize_=1 ) : - my_end(end_), my_begin(begin_), my_grainsize(grainsize_) - { - __TBB_ASSERT( my_grainsize>0, "grainsize must be positive" ); - } - - //! Beginning of range. - const_iterator begin() const {return my_begin;} - - //! One past last value in range. - const_iterator end() const {return my_end;} - - //! Size of the range - /** Unspecified if end() - friend class blocked_range2d; - - template - friend class blocked_range3d; -}; - -} // namespace tbb + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_blocked_range_H */ +#include "../oneapi/tbb/blocked_range.h" diff --git a/src/tbb/include/tbb/blocked_range2d.h b/src/tbb/include/tbb/blocked_range2d.h index 230a94ae7..1e1324078 100644 --- a/src/tbb/include/tbb/blocked_range2d.h +++ b/src/tbb/include/tbb/blocked_range2d.h @@ -1,108 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_blocked_range2d_H -#define __TBB_blocked_range2d_H - -#include "tbb_stddef.h" -#include "blocked_range.h" - -namespace tbb { - -//! A 2-dimensional range that models the Range concept. -/** @ingroup algorithms */ -template -class blocked_range2d { -public: - //! Type for size of an iteration range - typedef blocked_range row_range_type; - typedef blocked_range col_range_type; - -private: - row_range_type my_rows; - col_range_type my_cols; - -public: - - blocked_range2d( RowValue row_begin, RowValue row_end, typename row_range_type::size_type row_grainsize, - ColValue col_begin, ColValue col_end, typename col_range_type::size_type col_grainsize ) : - my_rows(row_begin,row_end,row_grainsize), - my_cols(col_begin,col_end,col_grainsize) - { - } - - blocked_range2d( RowValue row_begin, RowValue row_end, - ColValue col_begin, ColValue col_end ) : - my_rows(row_begin,row_end), - my_cols(col_begin,col_end) - { - } + http://www.apache.org/licenses/LICENSE-2.0 - //! True if range is empty - bool empty() const { - // Yes, it is a logical OR here, not AND. - return my_rows.empty() || my_cols.empty(); - } - - //! True if range is divisible into two pieces. - bool is_divisible() const { - return my_rows.is_divisible() || my_cols.is_divisible(); - } - - blocked_range2d( blocked_range2d& r, split ) : - my_rows(r.my_rows), - my_cols(r.my_cols) - { - split split_obj; - do_split(r, split_obj); - } - -#if __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES - //! Static field to support proportional split - static const bool is_divisible_in_proportion = true; - - blocked_range2d( blocked_range2d& r, proportional_split& proportion ) : - my_rows(r.my_rows), - my_cols(r.my_cols) - { - do_split(r, proportion); - } -#endif /* __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES */ - - template - void do_split( blocked_range2d& r, Split& split_obj ) - { - if( my_rows.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_rows.grainsize()) ) { - my_cols.my_begin = col_range_type::do_split(r.my_cols, split_obj); - } else { - my_rows.my_begin = row_range_type::do_split(r.my_rows, split_obj); - } - } - - //! The rows of the iteration space - const row_range_type& rows() const {return my_rows;} - - //! The columns of the iteration space - const col_range_type& cols() const {return my_cols;} -}; - -} // namespace tbb + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_blocked_range2d_H */ +#include "../oneapi/tbb/blocked_range2d.h" diff --git a/src/tbb/include/tbb/blocked_range3d.h b/src/tbb/include/tbb/blocked_range3d.h index 1557d72d0..332197966 100644 --- a/src/tbb/include/tbb/blocked_range3d.h +++ b/src/tbb/include/tbb/blocked_range3d.h @@ -1,128 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_blocked_range3d_H -#define __TBB_blocked_range3d_H - -#include "tbb_stddef.h" -#include "blocked_range.h" - -namespace tbb { - -//! A 3-dimensional range that models the Range concept. -/** @ingroup algorithms */ -template -class blocked_range3d { -public: - //! Type for size of an iteration range - typedef blocked_range page_range_type; - typedef blocked_range row_range_type; - typedef blocked_range col_range_type; - -private: - page_range_type my_pages; - row_range_type my_rows; - col_range_type my_cols; - -public: - - blocked_range3d( PageValue page_begin, PageValue page_end, - RowValue row_begin, RowValue row_end, - ColValue col_begin, ColValue col_end ) : - my_pages(page_begin,page_end), - my_rows(row_begin,row_end), - my_cols(col_begin,col_end) - { - } - - blocked_range3d( PageValue page_begin, PageValue page_end, typename page_range_type::size_type page_grainsize, - RowValue row_begin, RowValue row_end, typename row_range_type::size_type row_grainsize, - ColValue col_begin, ColValue col_end, typename col_range_type::size_type col_grainsize ) : - my_pages(page_begin,page_end,page_grainsize), - my_rows(row_begin,row_end,row_grainsize), - my_cols(col_begin,col_end,col_grainsize) - { - } - - //! True if range is empty - bool empty() const { - // Yes, it is a logical OR here, not AND. - return my_pages.empty() || my_rows.empty() || my_cols.empty(); - } + http://www.apache.org/licenses/LICENSE-2.0 - //! True if range is divisible into two pieces. - bool is_divisible() const { - return my_pages.is_divisible() || my_rows.is_divisible() || my_cols.is_divisible(); - } - - blocked_range3d( blocked_range3d& r, split ) : - my_pages(r.my_pages), - my_rows(r.my_rows), - my_cols(r.my_cols) - { - split split_obj; - do_split(r, split_obj); - } - -#if __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES - //! Static field to support proportional split - static const bool is_divisible_in_proportion = true; - - blocked_range3d( blocked_range3d& r, proportional_split& proportion ) : - my_pages(r.my_pages), - my_rows(r.my_rows), - my_cols(r.my_cols) - { - do_split(r, proportion); - } -#endif /* __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES */ - - template - void do_split( blocked_range3d& r, Split& split_obj) - { - if ( my_pages.size()*double(my_rows.grainsize()) < my_rows.size()*double(my_pages.grainsize()) ) { - if ( my_rows.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_rows.grainsize()) ) { - my_cols.my_begin = col_range_type::do_split(r.my_cols, split_obj); - } else { - my_rows.my_begin = row_range_type::do_split(r.my_rows, split_obj); - } - } else { - if ( my_pages.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_pages.grainsize()) ) { - my_cols.my_begin = col_range_type::do_split(r.my_cols, split_obj); - } else { - my_pages.my_begin = page_range_type::do_split(r.my_pages, split_obj); - } - } - } - - //! The pages of the iteration space - const page_range_type& pages() const {return my_pages;} - - //! The rows of the iteration space - const row_range_type& rows() const {return my_rows;} - - //! The columns of the iteration space - const col_range_type& cols() const {return my_cols;} - -}; - -} // namespace tbb + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_blocked_range3d_H */ +#include "../oneapi/tbb/blocked_range3d.h" diff --git a/src/tbb/include/tbb/blocked_rangeNd.h b/src/tbb/include/tbb/blocked_rangeNd.h new file mode 100644 index 000000000..0c0fb7303 --- /dev/null +++ b/src/tbb/include/tbb/blocked_rangeNd.h @@ -0,0 +1,17 @@ +/* + Copyright (c) 2017-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "../oneapi/tbb/blocked_rangeNd.h" diff --git a/src/tbb/include/tbb/cache_aligned_allocator.h b/src/tbb/include/tbb/cache_aligned_allocator.h index 253ef7b2e..2d3c66a74 100644 --- a/src/tbb/include/tbb/cache_aligned_allocator.h +++ b/src/tbb/include/tbb/cache_aligned_allocator.h @@ -1,137 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_cache_aligned_allocator_H -#define __TBB_cache_aligned_allocator_H - -#include -#include "tbb_stddef.h" -#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - #include // std::forward -#endif - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - //! Cache/sector line size. - /** @ingroup memory_allocation */ - size_t __TBB_EXPORTED_FUNC NFS_GetLineSize(); - - //! Allocate memory on cache/sector line boundary. - /** @ingroup memory_allocation */ - void* __TBB_EXPORTED_FUNC NFS_Allocate( size_t n_element, size_t element_size, void* hint ); - - //! Free memory allocated by NFS_Allocate. - /** Freeing a NULL pointer is allowed, but has no effect. - @ingroup memory_allocation */ - void __TBB_EXPORTED_FUNC NFS_Free( void* ); -} -//! @endcond - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Workaround for erroneous "unreferenced parameter" warning in method destroy. - #pragma warning (push) - #pragma warning (disable: 4100) -#endif - -//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 -/** The members are ordered the same way they are in section 20.4.1 - of the ISO C++ standard. - @ingroup memory_allocation */ -template -class cache_aligned_allocator { -public: - typedef typename internal::allocator_type::value_type value_type; - typedef value_type* pointer; - typedef const value_type* const_pointer; - typedef value_type& reference; - typedef const value_type& const_reference; - typedef size_t size_type; - typedef ptrdiff_t difference_type; - template struct rebind { - typedef cache_aligned_allocator other; - }; - - cache_aligned_allocator() throw() {} - cache_aligned_allocator( const cache_aligned_allocator& ) throw() {} - template cache_aligned_allocator(const cache_aligned_allocator&) throw() {} + http://www.apache.org/licenses/LICENSE-2.0 - pointer address(reference x) const {return &x;} - const_pointer address(const_reference x) const {return &x;} - - //! Allocate space for n objects, starting on a cache/sector line. - pointer allocate( size_type n, const void* hint=0 ) { - // The "hint" argument is always ignored in NFS_Allocate thus const_cast shouldn't hurt - return pointer(internal::NFS_Allocate( n, sizeof(value_type), const_cast(hint) )); - } - - //! Free block of memory that starts on a cache line - void deallocate( pointer p, size_type ) { - internal::NFS_Free(p); - } - - //! Largest value for which method allocate might succeed. - size_type max_size() const throw() { - return (~size_t(0)-internal::NFS_MaxLineSize)/sizeof(value_type); - } - - //! Copy-construct value at location pointed to by p. -#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - template - void construct(U *p, Args&&... args) - { ::new((void *)p) U(std::forward(args)...); } -#else // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC -#if __TBB_CPP11_RVALUE_REF_PRESENT - void construct( pointer p, value_type&& value ) {::new((void*)(p)) value_type(std::move(value));} -#endif - void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);} -#endif // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - - //! Destroy value at location pointed to by p. - void destroy( pointer p ) {p->~value_type();} -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4100 is back - -//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 -/** @ingroup memory_allocation */ -template<> -class cache_aligned_allocator { -public: - typedef void* pointer; - typedef const void* const_pointer; - typedef void value_type; - template struct rebind { - typedef cache_aligned_allocator other; - }; -}; - -template -inline bool operator==( const cache_aligned_allocator&, const cache_aligned_allocator& ) {return true;} - -template -inline bool operator!=( const cache_aligned_allocator&, const cache_aligned_allocator& ) {return false;} - -} // namespace tbb + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_cache_aligned_allocator_H */ +#include "../oneapi/tbb/cache_aligned_allocator.h" diff --git a/src/tbb/include/tbb/collaborative_call_once.h b/src/tbb/include/tbb/collaborative_call_once.h new file mode 100644 index 000000000..68cbbcddb --- /dev/null +++ b/src/tbb/include/tbb/collaborative_call_once.h @@ -0,0 +1,17 @@ +/* + Copyright (c) 2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "../oneapi/tbb/collaborative_call_once.h" diff --git a/src/tbb/include/tbb/combinable.h b/src/tbb/include/tbb/combinable.h index 566606d6f..50295ec72 100644 --- a/src/tbb/include/tbb/combinable.h +++ b/src/tbb/include/tbb/combinable.h @@ -1,72 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_combinable_H -#define __TBB_combinable_H - -#include "enumerable_thread_specific.h" -#include "cache_aligned_allocator.h" - -namespace tbb { -/** \name combinable - **/ -//@{ -//! Thread-local storage with optional reduction -/** @ingroup containers */ - template - class combinable { - private: - typedef typename tbb::cache_aligned_allocator my_alloc; - - typedef typename tbb::enumerable_thread_specific my_ets_type; - my_ets_type my_ets; - - public: - - combinable() { } - - template - combinable( finit _finit) : my_ets(_finit) { } - - //! destructor - ~combinable() { - } + http://www.apache.org/licenses/LICENSE-2.0 - combinable(const combinable& other) : my_ets(other.my_ets) { } - - combinable & operator=( const combinable & other) { my_ets = other.my_ets; return *this; } - - void clear() { my_ets.clear(); } - - T& local() { return my_ets.local(); } - - T& local(bool & exists) { return my_ets.local(exists); } - - // combine_func_t has signature T(T,T) or T(const T&, const T&) - template - T combine(combine_func_t f_combine) { return my_ets.combine(f_combine); } - - // combine_func_t has signature void(T) or void(const T&) - template - void combine_each(combine_func_t f_combine) { my_ets.combine_each(f_combine); } + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ - }; -} // namespace tbb -#endif /* __TBB_combinable_H */ +#include "../oneapi/tbb/combinable.h" diff --git a/src/tbb/include/tbb/compat/condition_variable b/src/tbb/include/tbb/compat/condition_variable deleted file mode 100644 index 89c2ccf55..000000000 --- a/src/tbb/include/tbb/compat/condition_variable +++ /dev/null @@ -1,457 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_condition_variable_H -#define __TBB_condition_variable_H - -#if _WIN32||_WIN64 -#include "../machine/windows_api.h" - -namespace tbb { -namespace interface5 { -namespace internal { -struct condition_variable_using_event -{ - //! Event for blocking waiting threads. - HANDLE event; - //! Protects invariants involving n_waiters, release_count, and epoch. - CRITICAL_SECTION mutex; - //! Number of threads waiting on this condition variable - int n_waiters; - //! Number of threads remaining that should no longer wait on this condition variable. - int release_count; - //! To keep threads from waking up prematurely with earlier signals. - unsigned epoch; -}; -}}} // namespace tbb::interface5::internal - -#ifndef CONDITION_VARIABLE_INIT -typedef void* CONDITION_VARIABLE; -typedef CONDITION_VARIABLE* PCONDITION_VARIABLE; -#endif - -#else /* if not _WIN32||_WIN64 */ -#include // some systems need it for ETIMEDOUT -#include -#if __linux__ -#include -#else /* generic Unix */ -#include -#endif -#endif /* _WIN32||_WIN64 */ - -#include "../tbb_stddef.h" -#include "../mutex.h" -#include "../tbb_thread.h" -#include "../tbb_exception.h" -#include "../tbb_profiling.h" - -namespace tbb { - -namespace interface5 { - -// C++0x standard working draft 30.4.3 -// Lock tag types -struct defer_lock_t { }; //! do not acquire ownership of the mutex -struct try_to_lock_t { }; //! try to acquire ownership of the mutex without blocking -struct adopt_lock_t { }; //! assume the calling thread has already -const defer_lock_t defer_lock = {}; -const try_to_lock_t try_to_lock = {}; -const adopt_lock_t adopt_lock = {}; - -// C++0x standard working draft 30.4.3.1 -//! lock_guard -template -class lock_guard : tbb::internal::no_copy { -public: - //! mutex type - typedef M mutex_type; - - //! Constructor - /** precondition: If mutex_type is not a recursive mutex, the calling thread - does not own the mutex m. */ - explicit lock_guard(mutex_type& m) : pm(m) {m.lock();} - - //! Adopt_lock constructor - /** precondition: the calling thread owns the mutex m. */ - lock_guard(mutex_type& m, adopt_lock_t) : pm(m) {} - - //! Destructor - ~lock_guard() { pm.unlock(); } -private: - mutex_type& pm; -}; - -// C++0x standard working draft 30.4.3.2 -//! unique_lock -template -class unique_lock : tbb::internal::no_copy { - friend class condition_variable; -public: - typedef M mutex_type; - - // 30.4.3.2.1 construct/copy/destroy - // NB: Without constructors that take an r-value reference to a unique_lock, the following constructor is of little use. - //! Constructor - /** postcondition: pm==0 && owns==false */ - unique_lock() : pm(NULL), owns(false) {} - - //! Constructor - /** precondition: if mutex_type is not a recursive mutex, the calling thread - does not own the mutex m. If the precondition is not met, a deadlock occurs. - postcondition: pm==&m and owns==true */ - explicit unique_lock(mutex_type& m) : pm(&m) {m.lock(); owns=true;} - - //! Defer_lock constructor - /** postcondition: pm==&m and owns==false */ - unique_lock(mutex_type& m, defer_lock_t) : pm(&m), owns(false) {} - - //! Try_to_lock constructor - /** precondition: if mutex_type is not a recursive mutex, the calling thread - does not own the mutex m. If the precondition is not met, a deadlock occurs. - postcondition: pm==&m and owns==res where res is the value returned by - the call to m.try_lock(). */ - unique_lock(mutex_type& m, try_to_lock_t) : pm(&m) {owns = m.try_lock();} - - //! Adopt_lock constructor - /** precondition: the calling thread owns the mutex. If it does not, mutex->unlock() would fail. - postcondition: pm==&m and owns==true */ - unique_lock(mutex_type& m, adopt_lock_t) : pm(&m), owns(true) {} - - //! Timed unique_lock acquisition. - /** To avoid requiring support for namespace chrono, this method deviates from the working draft in that - it uses tbb::tick_count::interval_t to specify the time duration. */ - unique_lock(mutex_type& m, const tick_count::interval_t &i) : pm(&m) {owns = try_lock_for( i );} - - //! Destructor - ~unique_lock() { if( owns ) pm->unlock(); } - - // 30.4.3.2.2 locking - //! Lock the mutex and own it. - void lock() { - if( pm ) { - if( !owns ) { - pm->lock(); - owns = true; - } else - throw_exception_v4( tbb::internal::eid_possible_deadlock ); - } else - throw_exception_v4( tbb::internal::eid_operation_not_permitted ); - __TBB_ASSERT( owns, NULL ); - } - - //! Try to lock the mutex. - /** If successful, note that this lock owns it. Otherwise, set it false. */ - bool try_lock() { - if( pm ) { - if( !owns ) - owns = pm->try_lock(); - else - throw_exception_v4( tbb::internal::eid_possible_deadlock ); - } else - throw_exception_v4( tbb::internal::eid_operation_not_permitted ); - return owns; - } - - //! Try to lock the mutex. - bool try_lock_for( const tick_count::interval_t &i ); - - //! Unlock the mutex - /** And note that this lock no longer owns it. */ - void unlock() { - if( owns ) { - pm->unlock(); - owns = false; - } else - throw_exception_v4( tbb::internal::eid_operation_not_permitted ); - __TBB_ASSERT( !owns, NULL ); - } - - // 30.4.3.2.3 modifiers - //! Swap the two unique locks - void swap(unique_lock& u) { - mutex_type* t_pm = u.pm; u.pm = pm; pm = t_pm; - bool t_owns = u.owns; u.owns = owns; owns = t_owns; - } - - //! Release control over the mutex. - mutex_type* release() { - mutex_type* o_pm = pm; - pm = NULL; - owns = false; - return o_pm; - } - - // 30.4.3.2.4 observers - //! Does this lock own the mutex? - bool owns_lock() const { return owns; } - - // TODO: Un-comment 'explicit' when the last non-C++0x compiler support is dropped - //! Does this lock own the mutex? - /*explicit*/ operator bool() const { return owns; } - - //! Return the mutex that this lock currently has. - mutex_type* mutex() const { return pm; } - -private: - mutex_type* pm; - bool owns; -}; - -template -bool unique_lock::try_lock_for( const tick_count::interval_t &i) -{ - const int unique_lock_tick = 100; /* microseconds; 0.1 milliseconds */ - // the smallest wait-time is 0.1 milliseconds. - bool res = pm->try_lock(); - int duration_in_micro; - if( !res && (duration_in_micro=int(i.seconds()*1e6))>unique_lock_tick ) { - tick_count::interval_t i_100( double(unique_lock_tick)/1e6 /* seconds */); // 100 microseconds = 0.1*10E-3 - do { - this_tbb_thread::sleep(i_100); // sleep for 100 micro seconds - duration_in_micro -= unique_lock_tick; - res = pm->try_lock(); - } while( !res && duration_in_micro>unique_lock_tick ); - } - return (owns=res); -} - -//! Swap the two unique locks that have the mutexes of same type -template -void swap(unique_lock& x, unique_lock& y) { x.swap( y ); } - -namespace internal { - -#if _WIN32||_WIN64 -union condvar_impl_t { - condition_variable_using_event cv_event; - CONDITION_VARIABLE cv_native; -}; -void __TBB_EXPORTED_FUNC internal_initialize_condition_variable( condvar_impl_t& cv ); -void __TBB_EXPORTED_FUNC internal_destroy_condition_variable( condvar_impl_t& cv ); -void __TBB_EXPORTED_FUNC internal_condition_variable_notify_one( condvar_impl_t& cv ); -void __TBB_EXPORTED_FUNC internal_condition_variable_notify_all( condvar_impl_t& cv ); -bool __TBB_EXPORTED_FUNC internal_condition_variable_wait( condvar_impl_t& cv, mutex* mtx, const tick_count::interval_t* i = NULL ); - -#else /* if !(_WIN32||_WIN64), i.e., POSIX threads */ -typedef pthread_cond_t condvar_impl_t; -#endif - -} // namespace internal - -//! cv_status -/** C++0x standard working draft 30.5 */ -enum cv_status { no_timeout, timeout }; - -//! condition variable -/** C++0x standard working draft 30.5.1 - @ingroup synchronization */ -class condition_variable : tbb::internal::no_copy { -public: - //! Constructor - condition_variable() { -#if _WIN32||_WIN64 - internal_initialize_condition_variable( my_cv ); -#else - pthread_cond_init( &my_cv, NULL ); -#endif - } - - //! Destructor - ~condition_variable() { - //precondition: There shall be no thread blocked on *this. -#if _WIN32||_WIN64 - internal_destroy_condition_variable( my_cv ); -#else - pthread_cond_destroy( &my_cv ); -#endif - } - - //! Notify one thread and wake it up - void notify_one() { -#if _WIN32||_WIN64 - internal_condition_variable_notify_one( my_cv ); -#else - pthread_cond_signal( &my_cv ); -#endif - } - - //! Notify all threads - void notify_all() { -#if _WIN32||_WIN64 - internal_condition_variable_notify_all( my_cv ); -#else - pthread_cond_broadcast( &my_cv ); -#endif - } - - //! Release the mutex associated with the lock and wait on this condition variable - void wait(unique_lock& lock); - - //! Wait on this condition variable while pred is false - template - void wait(unique_lock& lock, Predicate pred) { - while( !pred() ) - wait( lock ); - } - - //! Timed version of wait() - cv_status wait_for(unique_lock& lock, const tick_count::interval_t &i ); - - //! Timed version of the predicated wait - /** The loop terminates when pred() returns true or when the time duration specified by rel_time (i) has elapsed. */ - template - bool wait_for(unique_lock& lock, const tick_count::interval_t &i, Predicate pred) - { - while( !pred() ) { - cv_status st = wait_for( lock, i ); - if( st==timeout ) - return pred(); - } - return true; - } - - // C++0x standard working draft. 30.2.3 - typedef internal::condvar_impl_t* native_handle_type; - - native_handle_type native_handle() { return (native_handle_type) &my_cv; } - -private: - internal::condvar_impl_t my_cv; -}; - - -#if _WIN32||_WIN64 -inline void condition_variable::wait( unique_lock& lock ) -{ - __TBB_ASSERT( lock.owns, NULL ); - lock.owns = false; - if( !internal_condition_variable_wait( my_cv, lock.mutex() ) ) { - int ec = GetLastError(); - // on Windows 7, SleepConditionVariableCS() may return ERROR_TIMEOUT while the doc says it returns WAIT_TIMEOUT - __TBB_ASSERT_EX( ec!=WAIT_TIMEOUT&&ec!=ERROR_TIMEOUT, NULL ); - lock.owns = true; - throw_exception_v4( tbb::internal::eid_condvar_wait_failed ); - } - lock.owns = true; -} - -inline cv_status condition_variable::wait_for( unique_lock& lock, const tick_count::interval_t& i ) -{ - cv_status rc = no_timeout; - __TBB_ASSERT( lock.owns, NULL ); - lock.owns = false; - // condvar_wait could be SleepConditionVariableCS (or SleepConditionVariableSRW) or our own pre-vista cond_var_wait() - if( !internal_condition_variable_wait( my_cv, lock.mutex(), &i ) ) { - int ec = GetLastError(); - if( ec==WAIT_TIMEOUT || ec==ERROR_TIMEOUT ) - rc = timeout; - else { - lock.owns = true; - throw_exception_v4( tbb::internal::eid_condvar_wait_failed ); - } - } - lock.owns = true; - return rc; -} - -#else /* !(_WIN32||_WIN64) */ -inline void condition_variable::wait( unique_lock& lock ) -{ - __TBB_ASSERT( lock.owns, NULL ); - lock.owns = false; - if( pthread_cond_wait( &my_cv, lock.mutex()->native_handle() ) ) { - lock.owns = true; - throw_exception_v4( tbb::internal::eid_condvar_wait_failed ); - } - // upon successful return, the mutex has been locked and is owned by the calling thread. - lock.owns = true; -} - -inline cv_status condition_variable::wait_for( unique_lock& lock, const tick_count::interval_t& i ) -{ -#if __linux__ - struct timespec req; - double sec = i.seconds(); - clock_gettime( CLOCK_REALTIME, &req ); - req.tv_sec += static_cast(sec); - req.tv_nsec += static_cast( (sec - static_cast(sec))*1e9 ); -#else /* generic Unix */ - struct timeval tv; - struct timespec req; - double sec = i.seconds(); - int status = gettimeofday(&tv, NULL); - __TBB_ASSERT_EX( status==0, "gettimeofday failed" ); - req.tv_sec = tv.tv_sec + static_cast(sec); - req.tv_nsec = tv.tv_usec*1000 + static_cast( (sec - static_cast(sec))*1e9 ); -#endif /*(choice of OS) */ - if( req.tv_nsec>=1e9 ) { - req.tv_sec += 1; - req.tv_nsec -= static_cast(1e9); - } - __TBB_ASSERT( 0<=req.tv_nsec && req.tv_nsec<1e9, NULL ); - - int ec; - cv_status rc = no_timeout; - __TBB_ASSERT( lock.owns, NULL ); - lock.owns = false; - if( ( ec=pthread_cond_timedwait( &my_cv, lock.mutex()->native_handle(), &req ) ) ) { - if( ec==ETIMEDOUT ) - rc = timeout; - else { - __TBB_ASSERT( lock.try_lock()==false, NULL ); - lock.owns = true; - throw_exception_v4( tbb::internal::eid_condvar_wait_failed ); - } - } - lock.owns = true; - return rc; -} -#endif /* !(_WIN32||_WIN64) */ - -} // namespace interface5 - -__TBB_DEFINE_PROFILING_SET_NAME(interface5::condition_variable) - -} // namespace tbb - -#if TBB_IMPLEMENT_CPP0X - -namespace std { - -using tbb::interface5::defer_lock_t; -using tbb::interface5::try_to_lock_t; -using tbb::interface5::adopt_lock_t; -using tbb::interface5::defer_lock; -using tbb::interface5::try_to_lock; -using tbb::interface5::adopt_lock; -using tbb::interface5::lock_guard; -using tbb::interface5::unique_lock; -using tbb::interface5::swap; /* this is for void std::swap(unique_lock&,unique_lock&) */ -using tbb::interface5::condition_variable; -using tbb::interface5::cv_status; -using tbb::interface5::timeout; -using tbb::interface5::no_timeout; - -} // namespace std - -#endif /* TBB_IMPLEMENT_CPP0X */ - -#endif /* __TBB_condition_variable_H */ diff --git a/src/tbb/include/tbb/compat/ppl.h b/src/tbb/include/tbb/compat/ppl.h deleted file mode 100644 index 9012e0acd..000000000 --- a/src/tbb/include/tbb/compat/ppl.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_compat_ppl_H -#define __TBB_compat_ppl_H - -#include "../task_group.h" -#include "../parallel_invoke.h" -#include "../parallel_for_each.h" -#include "../parallel_for.h" -#include "../tbb_exception.h" -#include "../critical_section.h" -#include "../reader_writer_lock.h" -#include "../combinable.h" - -namespace Concurrency { - -#if __TBB_TASK_GROUP_CONTEXT - using tbb::task_handle; - using tbb::task_group_status; - using tbb::task_group; - using tbb::structured_task_group; - using tbb::invalid_multiple_scheduling; - using tbb::missing_wait; - using tbb::make_task; - - using tbb::not_complete; - using tbb::complete; - using tbb::canceled; - - using tbb::is_current_task_group_canceling; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - using tbb::parallel_invoke; - using tbb::strict_ppl::parallel_for; - using tbb::parallel_for_each; - using tbb::critical_section; - using tbb::reader_writer_lock; - using tbb::combinable; - - using tbb::improper_lock; - -} // namespace Concurrency - -#endif /* __TBB_compat_ppl_H */ diff --git a/src/tbb/include/tbb/compat/thread b/src/tbb/include/tbb/compat/thread deleted file mode 100644 index 64197bfc0..000000000 --- a/src/tbb/include/tbb/compat/thread +++ /dev/null @@ -1,46 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_thread_H -#define __TBB_thread_H - -#include "../tbb_thread.h" - -#if TBB_IMPLEMENT_CPP0X - -namespace std { - -typedef tbb::tbb_thread thread; - -namespace this_thread { - using tbb::this_tbb_thread::get_id; - using tbb::this_tbb_thread::yield; - - inline void sleep_for(const tbb::tick_count::interval_t& rel_time) { - tbb::internal::thread_sleep_v3( rel_time ); - } - -} - -} - -#endif /* TBB_IMPLEMENT_CPP0X */ - -#endif /* __TBB_thread_H */ diff --git a/src/tbb/include/tbb/compat/tuple b/src/tbb/include/tbb/compat/tuple deleted file mode 100644 index 00b7809ca..000000000 --- a/src/tbb/include/tbb/compat/tuple +++ /dev/null @@ -1,488 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_tuple_H -#define __TBB_tuple_H - -#include -#include "../tbb_stddef.h" - -// build preprocessor variables for varying number of arguments -// Need the leading comma so the empty __TBB_T_PACK will not cause a syntax error. -#if __TBB_VARIADIC_MAX <= 5 -#define __TBB_T_PACK -#define __TBB_U_PACK -#define __TBB_TYPENAME_T_PACK -#define __TBB_TYPENAME_U_PACK -#define __TBB_NULL_TYPE_PACK -#define __TBB_REF_T_PARAM_PACK -#define __TBB_CONST_REF_T_PARAM_PACK -#define __TBB_T_PARAM_LIST_PACK -#define __TBB_CONST_NULL_REF_PACK -// -#elif __TBB_VARIADIC_MAX == 6 -#define __TBB_T_PACK ,__T5 -#define __TBB_U_PACK ,__U5 -#define __TBB_TYPENAME_T_PACK , typename __T5 -#define __TBB_TYPENAME_U_PACK , typename __U5 -#define __TBB_NULL_TYPE_PACK , null_type -#define __TBB_REF_T_PARAM_PACK ,__T5& t5 -#define __TBB_CONST_REF_T_PARAM_PACK ,const __T5& t5 -#define __TBB_T_PARAM_LIST_PACK ,t5 -#define __TBB_CONST_NULL_REF_PACK , const null_type& -// -#elif __TBB_VARIADIC_MAX == 7 -#define __TBB_T_PACK ,__T5, __T6 -#define __TBB_U_PACK ,__U5, __U6 -#define __TBB_TYPENAME_T_PACK , typename __T5 , typename __T6 -#define __TBB_TYPENAME_U_PACK , typename __U5 , typename __U6 -#define __TBB_NULL_TYPE_PACK , null_type, null_type -#define __TBB_REF_T_PARAM_PACK ,__T5& t5, __T6& t6 -#define __TBB_CONST_REF_T_PARAM_PACK ,const __T5& t5, const __T6& t6 -#define __TBB_T_PARAM_LIST_PACK ,t5 ,t6 -#define __TBB_CONST_NULL_REF_PACK , const null_type&, const null_type& -// -#elif __TBB_VARIADIC_MAX == 8 -#define __TBB_T_PACK ,__T5, __T6, __T7 -#define __TBB_U_PACK ,__U5, __U6, __U7 -#define __TBB_TYPENAME_T_PACK , typename __T5 , typename __T6, typename __T7 -#define __TBB_TYPENAME_U_PACK , typename __U5 , typename __U6, typename __U7 -#define __TBB_NULL_TYPE_PACK , null_type, null_type, null_type -#define __TBB_REF_T_PARAM_PACK ,__T5& t5, __T6& t6, __T7& t7 -#define __TBB_CONST_REF_T_PARAM_PACK , const __T5& t5, const __T6& t6, const __T7& t7 -#define __TBB_T_PARAM_LIST_PACK ,t5 ,t6 ,t7 -#define __TBB_CONST_NULL_REF_PACK , const null_type&, const null_type&, const null_type& -// -#elif __TBB_VARIADIC_MAX == 9 -#define __TBB_T_PACK ,__T5, __T6, __T7, __T8 -#define __TBB_U_PACK ,__U5, __U6, __U7, __U8 -#define __TBB_TYPENAME_T_PACK , typename __T5, typename __T6, typename __T7, typename __T8 -#define __TBB_TYPENAME_U_PACK , typename __U5, typename __U6, typename __U7, typename __U8 -#define __TBB_NULL_TYPE_PACK , null_type, null_type, null_type, null_type -#define __TBB_REF_T_PARAM_PACK ,__T5& t5, __T6& t6, __T7& t7, __T8& t8 -#define __TBB_CONST_REF_T_PARAM_PACK , const __T5& t5, const __T6& t6, const __T7& t7, const __T8& t8 -#define __TBB_T_PARAM_LIST_PACK ,t5 ,t6 ,t7 ,t8 -#define __TBB_CONST_NULL_REF_PACK , const null_type&, const null_type&, const null_type&, const null_type& -// -#elif __TBB_VARIADIC_MAX >= 10 -#define __TBB_T_PACK ,__T5, __T6, __T7, __T8, __T9 -#define __TBB_U_PACK ,__U5, __U6, __U7, __U8, __U9 -#define __TBB_TYPENAME_T_PACK , typename __T5, typename __T6, typename __T7, typename __T8, typename __T9 -#define __TBB_TYPENAME_U_PACK , typename __U5, typename __U6, typename __U7, typename __U8, typename __U9 -#define __TBB_NULL_TYPE_PACK , null_type, null_type, null_type, null_type, null_type -#define __TBB_REF_T_PARAM_PACK ,__T5& t5, __T6& t6, __T7& t7, __T8& t8, __T9& t9 -#define __TBB_CONST_REF_T_PARAM_PACK , const __T5& t5, const __T6& t6, const __T7& t7, const __T8& t8, const __T9& t9 -#define __TBB_T_PARAM_LIST_PACK ,t5 ,t6 ,t7 ,t8 ,t9 -#define __TBB_CONST_NULL_REF_PACK , const null_type&, const null_type&, const null_type&, const null_type&, const null_type& -#endif - - - -namespace tbb { -namespace interface5 { - -namespace internal { -struct null_type { }; -} -using internal::null_type; - -// tuple forward declaration -template = 6 -, typename __T5=null_type -#if __TBB_VARIADIC_MAX >= 7 -, typename __T6=null_type -#if __TBB_VARIADIC_MAX >= 8 -, typename __T7=null_type -#if __TBB_VARIADIC_MAX >= 9 -, typename __T8=null_type -#if __TBB_VARIADIC_MAX >= 10 -, typename __T9=null_type -#endif -#endif -#endif -#endif -#endif -> -class tuple; - -namespace internal { - -// const null_type temp -inline const null_type cnull() { return null_type(); } - -// cons forward declaration -template struct cons; - -// type of a component of the cons -template -struct component { - typedef typename __T::tail_type next; - typedef typename component<__N-1,next>::type type; -}; - -template -struct component<0,__T> { - typedef typename __T::head_type type; -}; - -template<> -struct component<0,null_type> { - typedef null_type type; -}; - -// const version of component - -template -struct component<__N, const __T> -{ - typedef typename __T::tail_type next; - typedef const typename component<__N-1,next>::type type; -}; - -template -struct component<0, const __T> -{ - typedef const typename __T::head_type type; -}; - - -// helper class for getting components of cons -template< int __N> -struct get_helper { -template -inline static typename component<__N, cons<__HT,__TT> >::type& get(cons<__HT,__TT>& ti) { - return get_helper<__N-1>::get(ti.tail); -} -template -inline static typename component<__N, cons<__HT,__TT> >::type const& get(const cons<__HT,__TT>& ti) { - return get_helper<__N-1>::get(ti.tail); -} -}; - -template<> -struct get_helper<0> { -template -inline static typename component<0, cons<__HT,__TT> >::type& get(cons<__HT,__TT>& ti) { - return ti.head; -} -template -inline static typename component<0, cons<__HT,__TT> >::type const& get(const cons<__HT,__TT>& ti) { - return ti.head; -} -}; - -// traits adaptor -template -struct tuple_traits { - typedef cons <__T0, typename tuple_traits<__T1, __T2, __T3, __T4 __TBB_T_PACK , null_type>::U > U; -}; - -template -struct tuple_traits<__T0, null_type, null_type, null_type, null_type __TBB_NULL_TYPE_PACK > { - typedef cons<__T0, null_type> U; -}; - -template<> -struct tuple_traits { - typedef null_type U; -}; - - -// core cons defs -template -struct cons{ - - typedef __HT head_type; - typedef __TT tail_type; - - head_type head; - tail_type tail; - - static const int length = 1 + tail_type::length; - - // default constructors - explicit cons() : head(), tail() { } - - // non-default constructors - cons(head_type& h, const tail_type& t) : head(h), tail(t) { } - - template - cons(const __T0& t0, const __T1& t1, const __T2& t2, const __T3& t3, const __T4& t4 __TBB_CONST_REF_T_PARAM_PACK) : - head(t0), tail(t1, t2, t3, t4 __TBB_T_PARAM_LIST_PACK, cnull()) { } - - template - cons(__T0& t0, __T1& t1, __T2& t2, __T3& t3, __T4& t4 __TBB_REF_T_PARAM_PACK) : - head(t0), tail(t1, t2, t3, t4 __TBB_T_PARAM_LIST_PACK , cnull()) { } - - template - cons(const cons<__HT1,__TT1>& other) : head(other.head), tail(other.tail) { } - - cons& operator=(const cons& other) { head = other.head; tail = other.tail; return *this; } - - friend bool operator==(const cons& me, const cons& other) { - return me.head == other.head && me.tail == other.tail; - } - friend bool operator<(const cons& me, const cons& other) { - return me.head < other.head || (!(other.head < me.head) && me.tail < other.tail); - } - friend bool operator>(const cons& me, const cons& other) { return other=(const cons& me, const cons& other) { return !(meother); } - - template - friend bool operator==(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { - return me.head == other.head && me.tail == other.tail; - } - - template - friend bool operator<(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { - return me.head < other.head || (!(other.head < me.head) && me.tail < other.tail); - } - - template - friend bool operator>(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { return other - friend bool operator!=(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { return !(me==other); } - - template - friend bool operator>=(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { return !(me - friend bool operator<=(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { return !(me>other); } - - -}; // cons - - -template -struct cons<__HT,null_type> { - - typedef __HT head_type; - typedef null_type tail_type; - - head_type head; - - static const int length = 1; - - // default constructor - cons() : head() { /*std::cout << "default constructor 1\n";*/ } - - cons(const null_type&, const null_type&, const null_type&, const null_type&, const null_type& __TBB_CONST_NULL_REF_PACK) : head() { /*std::cout << "default constructor 2\n";*/ } - - // non-default constructor - template - cons(__T1& t1, const null_type&, const null_type&, const null_type&, const null_type& __TBB_CONST_NULL_REF_PACK) : head(t1) { /*std::cout << "non-default a1, t1== " << t1 << "\n";*/} - - cons(head_type& h, const null_type& = null_type() ) : head(h) { } - cons(const head_type& t0, const null_type&, const null_type&, const null_type&, const null_type& __TBB_CONST_NULL_REF_PACK) : head(t0) { } - - // converting constructor - template - cons(__HT1 h1, const null_type&, const null_type&, const null_type&, const null_type& __TBB_CONST_NULL_REF_PACK) : head(h1) { } - - // copy constructor - template - cons( const cons<__HT1, null_type>& other) : head(other.head) { } - - // assignment operator - cons& operator=(const cons& other) { head = other.head; return *this; } - - friend bool operator==(const cons& me, const cons& other) { return me.head == other.head; } - friend bool operator<(const cons& me, const cons& other) { return me.head < other.head; } - friend bool operator>(const cons& me, const cons& other) { return otherother); } - friend bool operator>=(const cons& me, const cons& other) {return !(me - friend bool operator==(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { - return me.head == other.head; - } - - template - friend bool operator<(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { - return me.head < other.head; - } - - template - friend bool operator>(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { return other - friend bool operator!=(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { return !(me==other); } - - template - friend bool operator<=(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { return !(me>other); } - - template - friend bool operator>=(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { return !(me -struct cons { typedef null_type tail_type; static const int length = 0; }; - -// wrapper for default constructor -template -inline const __T wrap_dcons(__T*) { return __T(); } - -} // namespace internal - -// tuple definition -template -class tuple : public internal::tuple_traits<__T0, __T1, __T2, __T3, __T4 __TBB_T_PACK >::U { - // friends - template friend class tuple_size; - template friend struct tuple_element; - - // stl components - typedef tuple<__T0,__T1,__T2,__T3,__T4 __TBB_T_PACK > value_type; - typedef value_type *pointer; - typedef const value_type *const_pointer; - typedef value_type &reference; - typedef const value_type &const_reference; - typedef size_t size_type; - - typedef typename internal::tuple_traits<__T0,__T1,__T2,__T3, __T4 __TBB_T_PACK >::U my_cons; - -public: - tuple(const __T0& t0=internal::wrap_dcons((__T0*)NULL) - ,const __T1& t1=internal::wrap_dcons((__T1*)NULL) - ,const __T2& t2=internal::wrap_dcons((__T2*)NULL) - ,const __T3& t3=internal::wrap_dcons((__T3*)NULL) - ,const __T4& t4=internal::wrap_dcons((__T4*)NULL) -#if __TBB_VARIADIC_MAX >= 6 - ,const __T5& t5=internal::wrap_dcons((__T5*)NULL) -#if __TBB_VARIADIC_MAX >= 7 - ,const __T6& t6=internal::wrap_dcons((__T6*)NULL) -#if __TBB_VARIADIC_MAX >= 8 - ,const __T7& t7=internal::wrap_dcons((__T7*)NULL) -#if __TBB_VARIADIC_MAX >= 9 - ,const __T8& t8=internal::wrap_dcons((__T8*)NULL) -#if __TBB_VARIADIC_MAX >= 10 - ,const __T9& t9=internal::wrap_dcons((__T9*)NULL) -#endif -#endif -#endif -#endif -#endif - ) : - my_cons(t0,t1,t2,t3,t4 __TBB_T_PARAM_LIST_PACK) { } - - template - struct internal_tuple_element { - typedef typename internal::component<__N,my_cons>::type type; - }; - - template - typename internal_tuple_element<__N>::type& get() { return internal::get_helper<__N>::get(*this); } - - template - typename internal_tuple_element<__N>::type const& get() const { return internal::get_helper<__N>::get(*this); } - - template - tuple& operator=(const internal::cons<__U1,__U2>& other) { - my_cons::operator=(other); - return *this; - } - - template - tuple& operator=(const std::pair<__U1,__U2>& other) { - // __TBB_ASSERT(tuple_size::value == 2, "Invalid size for pair to tuple assignment"); - this->head = other.first; - this->tail.head = other.second; - return *this; - } - - friend bool operator==(const tuple& me, const tuple& other) {return static_cast(me)==(other);} - friend bool operator<(const tuple& me, const tuple& other) {return static_cast(me)<(other);} - friend bool operator>(const tuple& me, const tuple& other) {return static_cast(me)>(other);} - friend bool operator!=(const tuple& me, const tuple& other) {return static_cast(me)!=(other);} - friend bool operator>=(const tuple& me, const tuple& other) {return static_cast(me)>=(other);} - friend bool operator<=(const tuple& me, const tuple& other) {return static_cast(me)<=(other);} - -}; // tuple - -// empty tuple -template<> -class tuple : public null_type { -}; - -// helper classes - -template < typename __T> -class tuple_size { -public: - static const size_t value = 1 + tuple_size::value; -}; - -template <> -class tuple_size > { -public: - static const size_t value = 0; -}; - -template <> -class tuple_size { -public: - static const size_t value = 0; -}; - -template -struct tuple_element { - typedef typename internal::component<__N, typename __T::my_cons>::type type; -}; - -template -inline static typename tuple_element<__N,tuple<__T0,__T1,__T2,__T3,__T4 __TBB_T_PACK > >::type& - get(tuple<__T0,__T1,__T2,__T3,__T4 __TBB_T_PACK >& t) { return internal::get_helper<__N>::get(t); } - -template -inline static typename tuple_element<__N,tuple<__T0,__T1,__T2,__T3,__T4 __TBB_T_PACK > >::type const& - get(const tuple<__T0,__T1,__T2,__T3,__T4 __TBB_T_PACK >& t) { return internal::get_helper<__N>::get(t); } - -} // interface5 -} // tbb - -#if !__TBB_CPP11_TUPLE_PRESENT -namespace tbb { - namespace flow { - using tbb::interface5::tuple; - using tbb::interface5::tuple_size; - using tbb::interface5::tuple_element; - using tbb::interface5::get; - } -} -#endif - -#undef __TBB_T_PACK -#undef __TBB_U_PACK -#undef __TBB_TYPENAME_T_PACK -#undef __TBB_TYPENAME_U_PACK -#undef __TBB_NULL_TYPE_PACK -#undef __TBB_REF_T_PARAM_PACK -#undef __TBB_CONST_REF_T_PARAM_PACK -#undef __TBB_T_PARAM_LIST_PACK -#undef __TBB_CONST_NULL_REF_PACK - -#endif /* __TBB_tuple_H */ diff --git a/src/tbb/include/tbb/concurrent_hash_map.h b/src/tbb/include/tbb/concurrent_hash_map.h index 27b710f5a..68652c596 100644 --- a/src/tbb/include/tbb/concurrent_hash_map.h +++ b/src/tbb/include/tbb/concurrent_hash_map.h @@ -1,1417 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_hash_map_H -#define __TBB_concurrent_hash_map_H - -#include "tbb_stddef.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include -#include // Need std::pair -#include // Need std::memset - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "cache_aligned_allocator.h" -#include "tbb_allocator.h" -#include "spin_rw_mutex.h" -#include "atomic.h" -#include "tbb_exception.h" -#include "tbb_profiling.h" -#include "internal/_concurrent_unordered_impl.h" // Need tbb_hasher -#if __TBB_INITIALIZER_LISTS_PRESENT -#include -#endif -#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS -#include -#endif -#if __TBB_STATISTICS -#include -#endif - -namespace tbb { - -//! hash_compare that is default argument for concurrent_hash_map -template -struct tbb_hash_compare { - static size_t hash( const Key& a ) { return tbb_hasher(a); } - static bool equal( const Key& a, const Key& b ) { return a == b; } -}; - -namespace interface5 { - - template, typename A = tbb_allocator > > - class concurrent_hash_map; - - //! @cond INTERNAL - namespace internal { - using namespace tbb::internal; - - - //! Type of a hash code. - typedef size_t hashcode_t; - //! Node base type - struct hash_map_node_base : tbb::internal::no_copy { - //! Mutex type - typedef spin_rw_mutex mutex_t; - //! Scoped lock type for mutex - typedef mutex_t::scoped_lock scoped_t; - //! Next node in chain - hash_map_node_base *next; - mutex_t mutex; - }; - //! Incompleteness flag value - static hash_map_node_base *const rehash_req = reinterpret_cast(size_t(3)); - //! Rehashed empty bucket flag - static hash_map_node_base *const empty_rehashed = reinterpret_cast(size_t(0)); - //! base class of concurrent_hash_map - class hash_map_base { - public: - //! Size type - typedef size_t size_type; - //! Type of a hash code. - typedef size_t hashcode_t; - //! Segment index type - typedef size_t segment_index_t; - //! Node base type - typedef hash_map_node_base node_base; - //! Bucket type - struct bucket : tbb::internal::no_copy { - //! Mutex type for buckets - typedef spin_rw_mutex mutex_t; - //! Scoped lock type for mutex - typedef mutex_t::scoped_lock scoped_t; - mutex_t mutex; - node_base *node_list; - }; - //! Count of segments in the first block - static size_type const embedded_block = 1; - //! Count of segments in the first block - static size_type const embedded_buckets = 1< my_mask; - //! Segment pointers table. Also prevents false sharing between my_mask and my_size - segments_table_t my_table; - //! Size of container in stored items - atomic my_size; // It must be in separate cache line from my_mask due to performance effects - //! Zero segment - bucket my_embedded_segment[embedded_buckets]; -#if __TBB_STATISTICS - atomic my_info_resizes; // concurrent ones - mutable atomic my_info_restarts; // race collisions - atomic my_info_rehashes; // invocations of rehash_bucket -#endif - //! Constructor - hash_map_base() { - std::memset( this, 0, pointers_per_table*sizeof(segment_ptr_t) // 32*4=128 or 64*8=512 - + sizeof(my_size) + sizeof(my_mask) // 4+4 or 8+8 - + embedded_buckets*sizeof(bucket) ); // n*8 or n*16 - for( size_type i = 0; i < embedded_block; i++ ) // fill the table - my_table[i] = my_embedded_segment + segment_base(i); - my_mask = embedded_buckets - 1; - __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks"); -#if __TBB_STATISTICS - my_info_resizes = 0; // concurrent ones - my_info_restarts = 0; // race collisions - my_info_rehashes = 0; // invocations of rehash_bucket -#endif - } - - //! @return segment index of given index in the array - static segment_index_t segment_index_of( size_type index ) { - return segment_index_t( __TBB_Log2( index|1 ) ); - } - - //! @return the first array index of given segment - static segment_index_t segment_base( segment_index_t k ) { - return (segment_index_t(1)<(ptr) > uintptr_t(63); - } - - //! Initialize buckets - static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) { - if( is_initial ) std::memset(ptr, 0, sz*sizeof(bucket) ); - else for(size_type i = 0; i < sz; i++, ptr++) { - *reinterpret_cast(&ptr->mutex) = 0; - ptr->node_list = rehash_req; - } - } - - //! Add node @arg n to bucket @arg b - static void add_to_bucket( bucket *b, node_base *n ) { - __TBB_ASSERT(b->node_list != rehash_req, NULL); - n->next = b->node_list; - b->node_list = n; // its under lock and flag is set - } - - //! Exception safety helper - struct enable_segment_failsafe : tbb::internal::no_copy { - segment_ptr_t *my_segment_ptr; - enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {} - ~enable_segment_failsafe() { - if( my_segment_ptr ) *my_segment_ptr = 0; // indicate no allocation in progress - } - }; - - //! Enable segment - void enable_segment( segment_index_t k, bool is_initial = false ) { - __TBB_ASSERT( k, "Zero segment must be embedded" ); - enable_segment_failsafe watchdog( my_table, k ); - cache_aligned_allocator alloc; - size_type sz; - __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment"); - if( k >= first_block ) { - sz = segment_size( k ); - segment_ptr_t ptr = alloc.allocate( sz ); - init_buckets( ptr, sz, is_initial ); - itt_hide_store_word( my_table[k], ptr ); - sz <<= 1;// double it to get entire capacity of the container - } else { // the first block - __TBB_ASSERT( k == embedded_block, "Wrong segment index" ); - sz = segment_size( first_block ); - segment_ptr_t ptr = alloc.allocate( sz - embedded_buckets ); - init_buckets( ptr, sz - embedded_buckets, is_initial ); - ptr -= segment_base(embedded_block); - for(segment_index_t i = embedded_block; i < first_block; i++) // calc the offsets - itt_hide_store_word( my_table[i], ptr + segment_base(i) ); - } - itt_store_word_with_release( my_mask, sz-1 ); - watchdog.my_segment_ptr = 0; - } - - //! Get bucket by (masked) hashcode - bucket *get_bucket( hashcode_t h ) const throw() { // TODO: add throw() everywhere? - segment_index_t s = segment_index_of( h ); - h -= segment_base(s); - segment_ptr_t seg = my_table[s]; - __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" ); - return &seg[h]; - } - - // internal serial rehashing helper - void mark_rehashed_levels( hashcode_t h ) throw () { - segment_index_t s = segment_index_of( h ); - while( segment_ptr_t seg = my_table[++s] ) - if( seg[h].node_list == rehash_req ) { - seg[h].node_list = empty_rehashed; - mark_rehashed_levels( h + ((hashcode_t)1<node_list) != rehash_req ) - { -#if __TBB_STATISTICS - my_info_restarts++; // race collisions -#endif - return true; - } - } - return false; - } - - //! Insert a node and check for load factor. @return segment index to enable. - segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) { - size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted - add_to_bucket( b, n ); - // check load factor - if( sz >= mask ) { // TODO: add custom load_factor - segment_index_t new_seg = __TBB_Log2( mask+1 ); //optimized segment_index_of - __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated"); - static const segment_ptr_t is_allocating = (segment_ptr_t)2; - if( !itt_hide_load_word(my_table[new_seg]) - && as_atomic(my_table[new_seg]).compare_and_swap(is_allocating, NULL) == NULL ) - return new_seg; // The value must be processed - } - return 0; - } - - //! Prepare enough segments for number of buckets - void reserve(size_type buckets) { - if( !buckets-- ) return; - bool is_initial = !my_size; - for( size_type m = my_mask; buckets > m; m = my_mask ) - enable_segment( segment_index_of( m+1 ), is_initial ); - } - //! Swap hash_map_bases - void internal_swap(hash_map_base &table) { - using std::swap; - swap(this->my_mask, table.my_mask); - swap(this->my_size, table.my_size); - for(size_type i = 0; i < embedded_buckets; i++) - swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list); - for(size_type i = embedded_block; i < pointers_per_table; i++) - swap(this->my_table[i], table.my_table[i]); - } - }; - - template - class hash_map_range; - - //! Meets requirements of a forward iterator for STL */ - /** Value is either the T or const T type of the container. - @ingroup containers */ - template - class hash_map_iterator - : public std::iterator - { - typedef Container map_type; - typedef typename Container::node node; - typedef hash_map_base::node_base node_base; - typedef hash_map_base::bucket bucket; - - template - friend bool operator==( const hash_map_iterator& i, const hash_map_iterator& j ); - - template - friend bool operator!=( const hash_map_iterator& i, const hash_map_iterator& j ); - - template - friend ptrdiff_t operator-( const hash_map_iterator& i, const hash_map_iterator& j ); - - template - friend class hash_map_iterator; - - template - friend class hash_map_range; - - void advance_to_next_bucket() { // TODO?: refactor to iterator_base class - size_t k = my_index+1; - while( my_bucket && k <= my_map->my_mask ) { - // Following test uses 2's-complement wizardry - if( k& (k-2) ) // not the beginning of a segment - ++my_bucket; - else my_bucket = my_map->get_bucket( k ); - my_node = static_cast( my_bucket->node_list ); - if( hash_map_base::is_valid(my_node) ) { - my_index = k; return; - } - ++k; - } - my_bucket = 0; my_node = 0; my_index = k; // the end - } -#if !defined(_MSC_VER) || defined(__INTEL_COMPILER) - template - friend class interface5::concurrent_hash_map; -#else - public: // workaround -#endif - //! concurrent_hash_map over which we are iterating. - const Container *my_map; - - //! Index in hash table for current item - size_t my_index; - - //! Pointer to bucket - const bucket *my_bucket; - - //! Pointer to node that has current item - node *my_node; - - hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ); - - public: - //! Construct undefined iterator - hash_map_iterator() {} - hash_map_iterator( const hash_map_iterator &other ) : - my_map(other.my_map), - my_index(other.my_index), - my_bucket(other.my_bucket), - my_node(other.my_node) - {} - Value& operator*() const { - __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" ); - return my_node->item; - } - Value* operator->() const {return &operator*();} - hash_map_iterator& operator++(); - - //! Post increment - hash_map_iterator operator++(int) { - hash_map_iterator old(*this); - operator++(); - return old; - } - }; - - template - hash_map_iterator::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) : - my_map(&map), - my_index(index), - my_bucket(b), - my_node( static_cast(n) ) - { - if( b && !hash_map_base::is_valid(n) ) - advance_to_next_bucket(); - } - - template - hash_map_iterator& hash_map_iterator::operator++() { - my_node = static_cast( my_node->next ); - if( !my_node ) advance_to_next_bucket(); - return *this; - } - - template - bool operator==( const hash_map_iterator& i, const hash_map_iterator& j ) { - return i.my_node == j.my_node && i.my_map == j.my_map; - } - - template - bool operator!=( const hash_map_iterator& i, const hash_map_iterator& j ) { - return i.my_node != j.my_node || i.my_map != j.my_map; - } - - //! Range class used with concurrent_hash_map - /** @ingroup containers */ - template - class hash_map_range { - typedef typename Iterator::map_type map_type; - Iterator my_begin; - Iterator my_end; - mutable Iterator my_midpoint; - size_t my_grainsize; - //! Set my_midpoint to point approximately half way between my_begin and my_end. - void set_midpoint() const; - template friend class hash_map_range; - public: - //! Type for size of a range - typedef std::size_t size_type; - typedef typename Iterator::value_type value_type; - typedef typename Iterator::reference reference; - typedef typename Iterator::difference_type difference_type; - typedef Iterator iterator; - - //! True if range is empty. - bool empty() const {return my_begin==my_end;} - - //! True if range can be partitioned into two subranges. - bool is_divisible() const { - return my_midpoint!=my_end; - } - //! Split range. - hash_map_range( hash_map_range& r, split ) : - my_end(r.my_end), - my_grainsize(r.my_grainsize) - { - r.my_end = my_begin = r.my_midpoint; - __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" ); - __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" ); - set_midpoint(); - r.set_midpoint(); - } - //! type conversion - template - hash_map_range( hash_map_range& r) : - my_begin(r.my_begin), - my_end(r.my_end), - my_midpoint(r.my_midpoint), - my_grainsize(r.my_grainsize) - {} - //! Init range with container and grainsize specified - hash_map_range( const map_type &map, size_type grainsize_ = 1 ) : - my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ), - my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ), - my_grainsize( grainsize_ ) - { - __TBB_ASSERT( grainsize_>0, "grainsize must be positive" ); - set_midpoint(); - } - const Iterator& begin() const {return my_begin;} - const Iterator& end() const {return my_end;} - //! The grain size for this range. - size_type grainsize() const {return my_grainsize;} - }; - - template - void hash_map_range::set_midpoint() const { - // Split by groups of nodes - size_t m = my_end.my_index-my_begin.my_index; - if( m > my_grainsize ) { - m = my_begin.my_index + m/2u; - hash_map_base::bucket *b = my_begin.my_map->get_bucket(m); - my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list); - } else { - my_midpoint = my_end; - } - __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index, - "my_begin is after my_midpoint" ); - __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index, - "my_midpoint is after my_end" ); - __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end, - "[my_begin, my_midpoint) range should not be empty" ); - } - - } // internal -//! @endcond - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Suppress "conditional expression is constant" warning. - #pragma warning( push ) - #pragma warning( disable: 4127 ) -#endif - -//! Unordered map from Key to T. -/** concurrent_hash_map is associative container with concurrent access. - -@par Compatibility - The class meets all Container Requirements from C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1). - -@par Exception Safety - - Hash function is not permitted to throw an exception. User-defined types Key and T are forbidden from throwing an exception in destructors. - - If exception happens during insert() operations, it has no effect (unless exception raised by HashCompare::hash() function during grow_segment). - - If exception happens during operator=() operation, the container can have a part of source items, and methods size() and empty() can return wrong results. - -@par Changes since TBB 2.1 - - Replaced internal algorithm and data structure. Patent is pending. - - Added buckets number argument for constructor - -@par Changes since TBB 2.0 - - Fixed exception-safety - - Added template argument for allocator - - Added allocator argument in constructors - - Added constructor from a range of iterators - - Added several new overloaded insert() methods - - Added get_allocator() - - Added swap() - - Added count() - - Added overloaded erase(accessor &) and erase(const_accessor&) - - Added equal_range() [const] - - Added [const_]pointer, [const_]reference, and allocator_type types - - Added global functions: operator==(), operator!=(), and swap() - - @ingroup containers */ -template -class concurrent_hash_map : protected internal::hash_map_base { - template - friend class internal::hash_map_iterator; - - template - friend class internal::hash_map_range; - -public: - typedef Key key_type; - typedef T mapped_type; - typedef std::pair value_type; - typedef hash_map_base::size_type size_type; - typedef ptrdiff_t difference_type; - typedef value_type *pointer; - typedef const value_type *const_pointer; - typedef value_type &reference; - typedef const value_type &const_reference; - typedef internal::hash_map_iterator iterator; - typedef internal::hash_map_iterator const_iterator; - typedef internal::hash_map_range range_type; - typedef internal::hash_map_range const_range_type; - typedef Allocator allocator_type; - -protected: - friend class const_accessor; - struct node; - typedef typename Allocator::template rebind::other node_allocator_type; - node_allocator_type my_allocator; - HashCompare my_hash_compare; - - struct node : public node_base { - value_type item; - node( const Key &key ) : item(key, T()) {} - node( const Key &key, const T &t ) : item(key, t) {} -#if __TBB_CPP11_RVALUE_REF_PRESENT - node( value_type&& i ) : item(std::move(i)){} -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - node( const value_type& i ) : item(i) {} - - // exception-safe allocation, see C++ Standard 2003, clause 5.3.4p17 - void *operator new( size_t /*size*/, node_allocator_type &a ) { - void *ptr = a.allocate(1); - if(!ptr) - tbb::internal::throw_exception(tbb::internal::eid_bad_alloc); - return ptr; - } - // match placement-new form above to be called if exception thrown in constructor - void operator delete( void *ptr, node_allocator_type &a ) { a.deallocate(static_cast(ptr),1); } - }; - - void delete_node( node_base *n ) { - my_allocator.destroy( static_cast(n) ); - my_allocator.deallocate( static_cast(n), 1); - } - - static node* allocate_node_copy_construct(node_allocator_type& allocator, const Key &key, const T * t){ - return new( allocator ) node(key, *t); - } - - static node* allocate_node_default_construct(node_allocator_type& allocator, const Key &key, const T * ){ - return new( allocator ) node(key); - } - - static node* do_not_allocate_node(node_allocator_type& , const Key &, const T * ){ - __TBB_ASSERT(false,"this dummy function should not be called"); - return NULL; - } - - node *search_bucket( const key_type &key, bucket *b ) const { - node *n = static_cast( b->node_list ); - while( is_valid(n) && !my_hash_compare.equal(key, n->item.first) ) - n = static_cast( n->next ); - __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket"); - return n; - } - - //! bucket accessor is to find, rehash, acquire a lock, and access a bucket - class bucket_accessor : public bucket::scoped_t { - bucket *my_b; - public: - bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); } - //! find a bucket by masked hashcode, optionally rehash, and acquire the lock - inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { - my_b = base->get_bucket( h ); - // TODO: actually, notification is unnecessary here, just hiding double-check - if( itt_load_word_with_acquire(my_b->node_list) == internal::rehash_req - && try_acquire( my_b->mutex, /*write=*/true ) ) - { - if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h ); //recursive rehashing - } - else bucket::scoped_t::acquire( my_b->mutex, writer ); - __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL); - } - //! check whether bucket is locked for write - bool is_writer() { return bucket::scoped_t::is_writer; } - //! get bucket pointer - bucket *operator() () { return my_b; } - }; - - // TODO refactor to hash_base - void rehash_bucket( bucket *b_new, const hashcode_t h ) { - __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)"); - __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" ); - __TBB_store_with_release(b_new->node_list, internal::empty_rehashed); // mark rehashed - hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit -#if __TBB_STATISTICS - my_info_rehashes++; // invocations of rehash_bucket -#endif - - bucket_accessor b_old( this, h & mask ); - - mask = (mask<<1) | 1; // get full mask for new bucket - __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL ); - restart: - for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) { - hashcode_t c = my_hash_compare.hash( static_cast(n)->item.first ); -#if TBB_USE_ASSERT - hashcode_t bmask = h & (mask>>1); - bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1; // minimal mask of parent bucket - __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" ); -#endif - if( (c & mask) == h ) { - if( !b_old.is_writer() ) - if( !b_old.upgrade_to_writer() ) { - goto restart; // node ptr can be invalid due to concurrent erase - } - *p = n->next; // exclude from b_old - add_to_bucket( b_new, n ); - } else p = &n->next; // iterate to next item - } - } + http://www.apache.org/licenses/LICENSE-2.0 - struct call_clear_on_leave { - concurrent_hash_map* my_ch_map; - call_clear_on_leave( concurrent_hash_map* a_ch_map ) : my_ch_map(a_ch_map) {} - void dismiss() {my_ch_map = 0;} - ~call_clear_on_leave(){ - if (my_ch_map){ - my_ch_map->clear(); - } - } - }; -public: - - class accessor; - //! Combines data access, locking, and garbage collection. - class const_accessor : private node::scoped_t /*which derived from no_copy*/ { - friend class concurrent_hash_map; - friend class accessor; - public: - //! Type of value - typedef const typename concurrent_hash_map::value_type value_type; - - //! True if result is empty. - bool empty() const { return !my_node; } - - //! Set to null - void release() { - if( my_node ) { - node::scoped_t::release(); - my_node = 0; - } - } - - //! Return reference to associated value in hash table. - const_reference operator*() const { - __TBB_ASSERT( my_node, "attempt to dereference empty accessor" ); - return my_node->item; - } - - //! Return pointer to associated value in hash table. - const_pointer operator->() const { - return &operator*(); - } - - //! Create empty result - const_accessor() : my_node(NULL) {} - - //! Destroy result after releasing the underlying reference. - ~const_accessor() { - my_node = NULL; // scoped lock's release() is called in its destructor - } - protected: - bool is_writer() { return node::scoped_t::is_writer; } - node *my_node; - hashcode_t my_hash; - }; - - //! Allows write access to elements and combines data access, locking, and garbage collection. - class accessor: public const_accessor { - public: - //! Type of value - typedef typename concurrent_hash_map::value_type value_type; - - //! Return reference to associated value in hash table. - reference operator*() const { - __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" ); - return this->my_node->item; - } - - //! Return pointer to associated value in hash table. - pointer operator->() const { - return &operator*(); - } - }; - - //! Construct empty table. - concurrent_hash_map( const allocator_type &a = allocator_type() ) - : internal::hash_map_base(), my_allocator(a) - {} - - //! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level. - concurrent_hash_map( size_type n, const allocator_type &a = allocator_type() ) - : my_allocator(a) - { - reserve( n ); - } - - //! Copy constructor - concurrent_hash_map( const concurrent_hash_map &table, const allocator_type &a = allocator_type() ) - : internal::hash_map_base(), my_allocator(a) - { - internal_copy(table); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Move constructor - concurrent_hash_map( concurrent_hash_map &&table ) - : internal::hash_map_base(), my_allocator(std::move(table.get_allocator())) - { - swap(table); - } - - //! Move constructor - concurrent_hash_map( concurrent_hash_map &&table, const allocator_type &a ) - : internal::hash_map_base(), my_allocator(a) - { - if (a == table.get_allocator()){ - this->swap(table); - }else{ - call_clear_on_leave scope_guard(this); - internal_copy(std::make_move_iterator(table.begin()), std::make_move_iterator(table.end())); - scope_guard.dismiss(); - } - } -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - - //! Construction with copying iteration range and given allocator instance - template - concurrent_hash_map( I first, I last, const allocator_type &a = allocator_type() ) - : my_allocator(a) - { - reserve( std::distance(first, last) ); // TODO: load_factor? - internal_copy(first, last); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level. - concurrent_hash_map( std::initializer_list il, const allocator_type &a = allocator_type() ) - : my_allocator(a) - { - reserve(il.size()); - internal_copy(il.begin(), il.end()); - } - -#endif //__TBB_INITIALIZER_LISTS_PRESENT - - //! Assignment - concurrent_hash_map& operator=( const concurrent_hash_map &table ) { - if( this!=&table ) { - clear(); - internal_copy(table); - } - return *this; - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Move Assignment - concurrent_hash_map& operator=( concurrent_hash_map &&table ) { - if(this != &table){ - typedef typename tbb::internal::allocator_traits::propagate_on_container_move_assignment pocma_t; - if(pocma_t::value || this->my_allocator == table.my_allocator) { - concurrent_hash_map trash (std::move(*this)); - //TODO: swapping allocators here may be a problem, replace with single direction moving iff pocma is set - this->swap(table); - } else { - //do per element move - concurrent_hash_map moved_copy(std::move(table), this->my_allocator); - this->swap(moved_copy); - } - } - return *this; - } -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Assignment - concurrent_hash_map& operator=( std::initializer_list il ) { - clear(); - reserve(il.size()); - internal_copy(il.begin(), il.end()); - return *this; - } -#endif //__TBB_INITIALIZER_LISTS_PRESENT - - - //! Rehashes and optionally resizes the whole table. - /** Useful to optimize performance before or after concurrent operations. - Also enables using of find() and count() concurrent methods in serial context. */ - void rehash(size_type n = 0); - - //! Clear table - void clear(); - - //! Clear table and destroy it. - ~concurrent_hash_map() { clear(); } - - //------------------------------------------------------------------------ - // Parallel algorithm support - //------------------------------------------------------------------------ - range_type range( size_type grainsize=1 ) { - return range_type( *this, grainsize ); - } - const_range_type range( size_type grainsize=1 ) const { - return const_range_type( *this, grainsize ); - } - - //------------------------------------------------------------------------ - // STL support - not thread-safe methods - //------------------------------------------------------------------------ - iterator begin() { return iterator( *this, 0, my_embedded_segment, my_embedded_segment->node_list ); } - iterator end() { return iterator( *this, 0, 0, 0 ); } - const_iterator begin() const { return const_iterator( *this, 0, my_embedded_segment, my_embedded_segment->node_list ); } - const_iterator end() const { return const_iterator( *this, 0, 0, 0 ); } - std::pair equal_range( const Key& key ) { return internal_equal_range( key, end() ); } - std::pair equal_range( const Key& key ) const { return internal_equal_range( key, end() ); } - - //! Number of items in table. - size_type size() const { return my_size; } - - //! True if size()==0. - bool empty() const { return my_size == 0; } - - //! Upper bound on size. - size_type max_size() const {return (~size_type(0))/sizeof(node);} - - //! Returns the current number of buckets - size_type bucket_count() const { return my_mask+1; } - - //! return allocator object - allocator_type get_allocator() const { return this->my_allocator; } - - //! swap two instances. Iterators are invalidated - void swap( concurrent_hash_map &table ); - - //------------------------------------------------------------------------ - // concurrent map operations - //------------------------------------------------------------------------ - - //! Return count of items (0 or 1) - size_type count( const Key &key ) const { - return const_cast(this)->lookup(/*insert*/false, key, NULL, NULL, /*write=*/false, &do_not_allocate_node ); - } - - //! Find item and acquire a read lock on the item. - /** Return true if item is found, false otherwise. */ - bool find( const_accessor &result, const Key &key ) const { - result.release(); - return const_cast(this)->lookup(/*insert*/false, key, NULL, &result, /*write=*/false, &do_not_allocate_node ); - } - - //! Find item and acquire a write lock on the item. - /** Return true if item is found, false otherwise. */ - bool find( accessor &result, const Key &key ) { - result.release(); - return lookup(/*insert*/false, key, NULL, &result, /*write=*/true, &do_not_allocate_node ); - } - - //! Insert item (if not already present) and acquire a read lock on the item. - /** Returns true if item is new. */ - bool insert( const_accessor &result, const Key &key ) { - result.release(); - return lookup(/*insert*/true, key, NULL, &result, /*write=*/false, &allocate_node_default_construct ); - } - - //! Insert item (if not already present) and acquire a write lock on the item. - /** Returns true if item is new. */ - bool insert( accessor &result, const Key &key ) { - result.release(); - return lookup(/*insert*/true, key, NULL, &result, /*write=*/true, &allocate_node_default_construct ); - } - - //! Insert item by copying if there is no such key present already and acquire a read lock on the item. - /** Returns true if item is new. */ - bool insert( const_accessor &result, const value_type &value ) { - result.release(); - return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/false, &allocate_node_copy_construct ); - } - - //! Insert item by copying if there is no such key present already and acquire a write lock on the item. - /** Returns true if item is new. */ - bool insert( accessor &result, const value_type &value ) { - result.release(); - return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/true, &allocate_node_copy_construct ); - } - - //! Insert item by copying if there is no such key present already - /** Returns true if item is inserted. */ - bool insert( const value_type &value ) { - return lookup(/*insert*/true, value.first, &value.second, NULL, /*write=*/false, &allocate_node_copy_construct ); - } - - //! Insert range [first, last) - template - void insert( I first, I last ) { - for ( ; first != last; ++first ) - insert( *first ); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Insert initializer list - void insert( std::initializer_list il ) { - insert( il.begin(), il.end() ); - } -#endif //__TBB_INITIALIZER_LISTS_PRESENT - - //! Erase item. - /** Return true if item was erased by particularly this call. */ - bool erase( const Key& key ); - - //! Erase item by const_accessor. - /** Return true if item was erased by particularly this call. */ - bool erase( const_accessor& item_accessor ) { - return exclude( item_accessor ); - } - - //! Erase item by accessor. - /** Return true if item was erased by particularly this call. */ - bool erase( accessor& item_accessor ) { - return exclude( item_accessor ); - } - -protected: - //! Insert or find item and optionally acquire a lock on the item. - bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key &, const T * ) ) ; - - //! delete item by accessor - bool exclude( const_accessor &item_accessor ); - - //! Returns an iterator for an item defined by the key, or for the next item after it (if upper==true) - template - std::pair internal_equal_range( const Key& key, I end ) const; - - //! Copy "source" to *this, where *this must start out empty. - void internal_copy( const concurrent_hash_map& source ); - - template - void internal_copy( I first, I last ); - - //! Fast find when no concurrent erasure is used. For internal use inside TBB only! - /** Return pointer to item with given key, or NULL if no such item exists. - Must not be called concurrently with erasure operations. */ - const_pointer internal_fast_find( const Key& key ) const { - hashcode_t h = my_hash_compare.hash( key ); - hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask ); - node *n; - restart: - __TBB_ASSERT((m&(m+1))==0, "data structure is invalid"); - bucket *b = get_bucket( h & m ); - // TODO: actually, notification is unnecessary here, just hiding double-check - if( itt_load_word_with_acquire(b->node_list) == internal::rehash_req ) - { - bucket::scoped_t lock; - if( lock.try_acquire( b->mutex, /*write=*/true ) ) { - if( b->node_list == internal::rehash_req) - const_cast(this)->rehash_bucket( b, h & m ); //recursive rehashing - } - else lock.acquire( b->mutex, /*write=*/false ); - __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL); - } - n = search_bucket( key, b ); - if( n ) - return &n->item; - else if( check_mask_race( h, m ) ) - goto restart; - return 0; - } -}; - -template -bool concurrent_hash_map::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key&, const T*) ) { - __TBB_ASSERT( !result || !result->my_node, NULL ); - bool return_value; - hashcode_t const h = my_hash_compare.hash( key ); - hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask ); - segment_index_t grow_segment = 0; - node *n, *tmp_n = 0; - restart: - {//lock scope - __TBB_ASSERT((m&(m+1))==0, "data structure is invalid"); - return_value = false; - // get bucket - bucket_accessor b( this, h & m ); - - // find a node - n = search_bucket( key, b() ); - if( op_insert ) { - // [opt] insert a key - if( !n ) { - if( !tmp_n ) { - tmp_n = allocate_node(my_allocator, key, t); - } - if( !b.is_writer() && !b.upgrade_to_writer() ) { // TODO: improved insertion - // Rerun search_list, in case another thread inserted the item during the upgrade. - n = search_bucket( key, b() ); - if( is_valid(n) ) { // unfortunately, it did - b.downgrade_to_reader(); - goto exists; - } - } - if( check_mask_race(h, m) ) - goto restart; // b.release() is done in ~b(). - // insert and set flag to grow the container - grow_segment = insert_new_node( b(), n = tmp_n, m ); - tmp_n = 0; - return_value = true; - } - } else { // find or count - if( !n ) { - if( check_mask_race( h, m ) ) - goto restart; // b.release() is done in ~b(). TODO: replace by continue - return false; - } - return_value = true; - } - exists: - if( !result ) goto check_growth; - // TODO: the following seems as generic/regular operation - // acquire the item - if( !result->try_acquire( n->mutex, write ) ) { - for( tbb::internal::atomic_backoff backoff(true);; ) { - if( result->try_acquire( n->mutex, write ) ) break; - if( !backoff.bounded_pause() ) { - // the wait takes really long, restart the operation - b.release(); - __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" ); - __TBB_Yield(); - m = (hashcode_t) itt_load_word_with_acquire( my_mask ); - goto restart; - } - } - } - }//lock scope - result->my_node = n; - result->my_hash = h; -check_growth: - // [opt] grow the container - if( grow_segment ) { -#if __TBB_STATISTICS - my_info_resizes++; // concurrent ones -#endif - enable_segment( grow_segment ); - } - if( tmp_n ) // if op_insert only - delete_node( tmp_n ); - return return_value; -} - -template -template -std::pair concurrent_hash_map::internal_equal_range( const Key& key, I end_ ) const { - hashcode_t h = my_hash_compare.hash( key ); - hashcode_t m = my_mask; - __TBB_ASSERT((m&(m+1))==0, "data structure is invalid"); - h &= m; - bucket *b = get_bucket( h ); - while( b->node_list == internal::rehash_req ) { - m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit - b = get_bucket( h &= m ); - } - node *n = search_bucket( key, b ); - if( !n ) - return std::make_pair(end_, end_); - iterator lower(*this, h, b, n), upper(lower); - return std::make_pair(lower, ++upper); -} - -template -bool concurrent_hash_map::exclude( const_accessor &item_accessor ) { - __TBB_ASSERT( item_accessor.my_node, NULL ); - node_base *const n = item_accessor.my_node; - hashcode_t const h = item_accessor.my_hash; - hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask ); - do { - // get bucket - bucket_accessor b( this, h & m, /*writer=*/true ); - node_base **p = &b()->node_list; - while( *p && *p != n ) - p = &(*p)->next; - if( !*p ) { // someone else was first - if( check_mask_race( h, m ) ) - continue; - item_accessor.release(); - return false; - } - __TBB_ASSERT( *p == n, NULL ); - *p = n->next; // remove from container - my_size--; - break; - } while(true); - if( !item_accessor.is_writer() ) // need to get exclusive lock - item_accessor.upgrade_to_writer(); // return value means nothing here - item_accessor.release(); - delete_node( n ); // Only one thread can delete it - return true; -} - -template -bool concurrent_hash_map::erase( const Key &key ) { - node_base *n; - hashcode_t const h = my_hash_compare.hash( key ); - hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask ); -restart: - {//lock scope - // get bucket - bucket_accessor b( this, h & m ); - search: - node_base **p = &b()->node_list; - n = *p; - while( is_valid(n) && !my_hash_compare.equal(key, static_cast(n)->item.first ) ) { - p = &n->next; - n = *p; - } - if( !n ) { // not found, but mask could be changed - if( check_mask_race( h, m ) ) - goto restart; - return false; - } - else if( !b.is_writer() && !b.upgrade_to_writer() ) { - if( check_mask_race( h, m ) ) // contended upgrade, check mask - goto restart; - goto search; - } - *p = n->next; - my_size--; - } - { - typename node::scoped_t item_locker( n->mutex, /*write=*/true ); - } - // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor! - delete_node( n ); // Only one thread can delete it due to write lock on the bucket - return true; -} - -template -void concurrent_hash_map::swap(concurrent_hash_map &table) { - //TODO: respect C++11 allocator_traits
::propogate_on_constainer_swap - using std::swap; - swap(this->my_allocator, table.my_allocator); - swap(this->my_hash_compare, table.my_hash_compare); - internal_swap(table); -} - -template -void concurrent_hash_map::rehash(size_type sz) { - reserve( sz ); // TODO: add reduction of number of buckets as well - hashcode_t mask = my_mask; - hashcode_t b = (mask+1)>>1; // size or first index of the last segment - __TBB_ASSERT((b&(b-1))==0, NULL); // zero or power of 2 - bucket *bp = get_bucket( b ); // only the last segment should be scanned for rehashing - for(; b <= mask; b++, bp++ ) { - node_base *n = bp->node_list; - __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" ); - __TBB_ASSERT( *reinterpret_cast(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" ); - if( n == internal::rehash_req ) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one - hashcode_t h = b; bucket *b_old = bp; - do { - __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" ); - hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit - b_old = get_bucket( h &= m ); - } while( b_old->node_list == internal::rehash_req ); - // now h - is index of the root rehashed bucket b_old - mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments - for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) { - hashcode_t c = my_hash_compare.hash( static_cast(q)->item.first ); - if( (c & mask) != h ) { // should be rehashed - *p = q->next; // exclude from b_old - bucket *b_new = get_bucket( c & mask ); - __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" ); - add_to_bucket( b_new, q ); - } else p = &q->next; // iterate to next item - } - } - } -#if TBB_USE_PERFORMANCE_WARNINGS - int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics - static bool reported = false; -#endif -#if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS - for( b = 0; b <= mask; b++ ) {// only last segment should be scanned for rehashing - if( b & (b-2) ) ++bp; // not the beginning of a segment - else bp = get_bucket( b ); - node_base *n = bp->node_list; - __TBB_ASSERT( *reinterpret_cast(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" ); - __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" ); -#if TBB_USE_PERFORMANCE_WARNINGS - if( n == internal::empty_rehashed ) empty_buckets++; - else if( n->next ) overpopulated_buckets++; -#endif -#if TBB_USE_ASSERT - for( ; is_valid(n); n = n->next ) { - hashcode_t h = my_hash_compare.hash( static_cast(n)->item.first ) & mask; - __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" ); - } -#endif - } -#endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS -#if TBB_USE_PERFORMANCE_WARNINGS - if( buckets > current_size) empty_buckets -= buckets - current_size; - else overpopulated_buckets -= current_size - buckets; // TODO: load_factor? - if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) { - tbb::internal::runtime_warning( - "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d", - typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets ); - reported = true; - } -#endif -} - -template -void concurrent_hash_map::clear() { - hashcode_t m = my_mask; - __TBB_ASSERT((m&(m+1))==0, "data structure is invalid"); -#if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS -#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS - int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics - static bool reported = false; -#endif - bucket *bp = 0; - // check consistency - for( segment_index_t b = 0; b <= m; b++ ) { - if( b & (b-2) ) ++bp; // not the beginning of a segment - else bp = get_bucket( b ); - node_base *n = bp->node_list; - __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" ); - __TBB_ASSERT( *reinterpret_cast(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" ); -#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS - if( n == internal::empty_rehashed ) empty_buckets++; - else if( n == internal::rehash_req ) buckets--; - else if( n->next ) overpopulated_buckets++; -#endif -#if __TBB_EXTRA_DEBUG - for(; is_valid(n); n = n->next ) { - hashcode_t h = my_hash_compare.hash( static_cast(n)->item.first ); - h &= m; - __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" ); - } -#endif - } -#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS -#if __TBB_STATISTICS - printf( "items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d" - " concurrent: resizes=%u rehashes=%u restarts=%u\n", - current_size, int(m+1), buckets, empty_buckets, overpopulated_buckets, - unsigned(my_info_resizes), unsigned(my_info_rehashes), unsigned(my_info_restarts) ); - my_info_resizes = 0; // concurrent ones - my_info_restarts = 0; // race collisions - my_info_rehashes = 0; // invocations of rehash_bucket -#endif - if( buckets > current_size) empty_buckets -= buckets - current_size; - else overpopulated_buckets -= current_size - buckets; // TODO: load_factor? - if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) { - tbb::internal::runtime_warning( - "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d", - typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets ); - reported = true; - } -#endif -#endif//TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS - my_size = 0; - segment_index_t s = segment_index_of( m ); - __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" ); - cache_aligned_allocator alloc; - do { - __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" ); - segment_ptr_t buckets_ptr = my_table[s]; - size_type sz = segment_size( s ? s : 1 ); - for( segment_index_t i = 0; i < sz; i++ ) - for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) { - buckets_ptr[i].node_list = n->next; - delete_node( n ); - } - if( s >= first_block) // the first segment or the next - alloc.deallocate( buckets_ptr, sz ); - else if( s == embedded_block && embedded_block != first_block ) - alloc.deallocate( buckets_ptr, segment_size(first_block)-embedded_buckets ); - if( s >= embedded_block ) my_table[s] = 0; - } while(s-- > 0); - my_mask = embedded_buckets - 1; -} - -template -void concurrent_hash_map::internal_copy( const concurrent_hash_map& source ) { - reserve( source.my_size ); // TODO: load_factor? - hashcode_t mask = source.my_mask; - if( my_mask == mask ) { // optimized version - bucket *dst = 0, *src = 0; - bool rehash_required = false; - for( hashcode_t k = 0; k <= mask; k++ ) { - if( k & (k-2) ) ++dst,src++; // not the beginning of a segment - else { dst = get_bucket( k ); src = source.get_bucket( k ); } - __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table"); - node *n = static_cast( src->node_list ); - if( n == internal::rehash_req ) { // source is not rehashed, items are in previous buckets - rehash_required = true; - dst->node_list = internal::rehash_req; - } else for(; n; n = static_cast( n->next ) ) { - add_to_bucket( dst, new( my_allocator ) node(n->item.first, n->item.second) ); - ++my_size; // TODO: replace by non-atomic op - } - } - if( rehash_required ) rehash(); - } else internal_copy( source.begin(), source.end() ); -} - -template -template -void concurrent_hash_map::internal_copy(I first, I last) { - hashcode_t m = my_mask; - for(; first != last; ++first) { - hashcode_t h = my_hash_compare.hash( (*first).first ); - bucket *b = get_bucket( h & m ); - __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table"); - node *n = new( my_allocator ) node(*first); - add_to_bucket( b, n ); - ++my_size; // TODO: replace by non-atomic op - } -} - -} // namespace interface5 - -using interface5::concurrent_hash_map; - - -template -inline bool operator==(const concurrent_hash_map &a, const concurrent_hash_map &b) { - if(a.size() != b.size()) return false; - typename concurrent_hash_map::const_iterator i(a.begin()), i_end(a.end()); - typename concurrent_hash_map::const_iterator j, j_end(b.end()); - for(; i != i_end; ++i) { - j = b.equal_range(i->first).first; - if( j == j_end || !(i->second == j->second) ) return false; - } - return true; -} - -template -inline bool operator!=(const concurrent_hash_map &a, const concurrent_hash_map &b) -{ return !(a == b); } - -template -inline void swap(concurrent_hash_map &a, concurrent_hash_map &b) -{ a.swap( b ); } - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning( pop ) -#endif // warning 4127 is back - -} // namespace tbb + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_concurrent_hash_map_H */ +#include "../oneapi/tbb/concurrent_hash_map.h" diff --git a/src/tbb/include/tbb/concurrent_lru_cache.h b/src/tbb/include/tbb/concurrent_lru_cache.h index dbf0f1f82..2757a234b 100644 --- a/src/tbb/include/tbb/concurrent_lru_cache.h +++ b/src/tbb/include/tbb/concurrent_lru_cache.h @@ -1,235 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_lru_cache_H -#define __TBB_concurrent_lru_cache_H - -#if ! TBB_PREVIEW_CONCURRENT_LRU_CACHE - #error Set TBB_PREVIEW_CONCURRENT_LRU_CACHE to include concurrent_lru_cache.h -#endif - -#include -#include - -#include "tbb_stddef.h" -#include "atomic.h" -#include "internal/_aggregator_impl.h" - -namespace tbb{ -namespace interface6 { - - -template -class concurrent_lru_cache : internal::no_assign{ -private: - typedef concurrent_lru_cache self_type; - typedef value_functor_type value_function_type; - typedef std::size_t ref_counter_type; - struct map_value_type; - typedef std::map map_storage_type; - typedef std::list lru_list_type; - struct map_value_type { - value_type my_value; - ref_counter_type my_ref_counter; - typename lru_list_type::iterator my_lru_list_iterator; - bool my_is_ready; - - map_value_type (value_type const& a_value, ref_counter_type a_ref_counter, typename lru_list_type::iterator a_lru_list_iterator, bool a_is_ready) - : my_value(a_value), my_ref_counter(a_ref_counter), my_lru_list_iterator (a_lru_list_iterator), my_is_ready(a_is_ready) - {} - }; - - class handle_object; + http://www.apache.org/licenses/LICENSE-2.0 - struct aggregator_operation; - typedef aggregator_operation aggregated_operation_type; - typedef tbb::internal::aggregating_functor aggregator_function_type; - friend class tbb::internal::aggregating_functor; - typedef tbb::internal::aggregator aggregator_type; - -private: - value_function_type my_value_function; - std::size_t const my_number_of_lru_history_items; - map_storage_type my_map_storage; - lru_list_type my_lru_list; - aggregator_type my_aggregator; - -public: - typedef handle_object handle; - -public: - concurrent_lru_cache(value_function_type f, std::size_t number_of_lru_history_items) - : my_value_function(f),my_number_of_lru_history_items(number_of_lru_history_items) - { - my_aggregator.initialize_handler(aggregator_function_type(this)); - } - - handle_object operator[](key_type k){ - retrieve_aggregator_operation op(k); - my_aggregator.execute(&op); - if (op.is_new_value_needed()){ - op.result().second.my_value = my_value_function(k); - __TBB_store_with_release(op.result().second.my_is_ready, true); - }else{ - tbb::internal::spin_wait_while_eq(op.result().second.my_is_ready,false); - } - return handle_object(*this,op.result()); - } -private: - void signal_end_of_usage(typename map_storage_type::reference value_ref){ - signal_end_of_usage_aggregator_operation op(value_ref); - my_aggregator.execute(&op); - } - -private: - struct handle_move_t:no_assign{ - concurrent_lru_cache & my_cache_ref; - typename map_storage_type::reference my_map_record_ref; - handle_move_t(concurrent_lru_cache & cache_ref, typename map_storage_type::reference value_ref):my_cache_ref(cache_ref),my_map_record_ref(value_ref) {}; - }; - class handle_object { - concurrent_lru_cache * my_cache_pointer; - typename map_storage_type::reference my_map_record_ref; - public: - handle_object(concurrent_lru_cache & cache_ref, typename map_storage_type::reference value_ref):my_cache_pointer(&cache_ref), my_map_record_ref(value_ref) {} - handle_object(handle_move_t m):my_cache_pointer(&m.my_cache_ref), my_map_record_ref(m.my_map_record_ref){} - operator handle_move_t(){ return move(*this);} - value_type& value(){ - __TBB_ASSERT(my_cache_pointer,"get value from moved from object?"); - return my_map_record_ref.second.my_value; - } - ~handle_object(){ - if (my_cache_pointer){ - my_cache_pointer->signal_end_of_usage(my_map_record_ref); - } - } - private: - friend handle_move_t move(handle_object& h){ - return handle_object::move(h); - } - static handle_move_t move(handle_object& h){ - __TBB_ASSERT(h.my_cache_pointer,"move from the same object twice ?"); - concurrent_lru_cache * cache_pointer = NULL; - std::swap(cache_pointer,h.my_cache_pointer); - return handle_move_t(*cache_pointer,h.my_map_record_ref); - } - private: - void operator=(handle_object&); -#if __SUNPRO_CC - // Presumably due to a compiler error, private copy constructor - // breaks expressions like handle h = cache[key]; - public: -#endif - handle_object(handle_object &); - }; -private: - //TODO: looks like aggregator_operation is a perfect match for statically typed variant type - struct aggregator_operation : tbb::internal::aggregated_operation{ - enum e_op_type {op_retive, op_signal_end_of_usage}; - //TODO: try to use pointer to function apply_visitor here - //TODO: try virtual functions and measure the difference - e_op_type my_operation_type; - aggregator_operation(e_op_type operation_type): my_operation_type(operation_type) {} - void cast_and_handle(self_type& container ){ - if (my_operation_type==op_retive){ - static_cast(this)->handle(container); - }else{ - static_cast(this)->handle(container); - } - } - }; - struct retrieve_aggregator_operation : aggregator_operation, private internal::no_assign { - key_type my_key; - typename map_storage_type::pointer my_result_map_record_pointer; - bool my_is_new_value_needed; - retrieve_aggregator_operation(key_type key):aggregator_operation(aggregator_operation::op_retive),my_key(key),my_is_new_value_needed(false){} - void handle(self_type& container ){ - my_result_map_record_pointer = & container.retrieve_serial(my_key,my_is_new_value_needed); - } - typename map_storage_type::reference result(){ return * my_result_map_record_pointer; } - bool is_new_value_needed(){return my_is_new_value_needed;} - }; - struct signal_end_of_usage_aggregator_operation : aggregator_operation, private internal::no_assign { - typename map_storage_type::reference my_map_record_ref; - signal_end_of_usage_aggregator_operation(typename map_storage_type::reference map_record_ref):aggregator_operation(aggregator_operation::op_signal_end_of_usage),my_map_record_ref(map_record_ref){} - void handle(self_type& container ){ - container.signal_end_of_usage_serial(my_map_record_ref); - } - }; - -private: - void handle_operations(aggregator_operation* op_list){ - while(op_list){ - op_list->cast_and_handle(*this); - aggregator_operation* tmp = op_list; - op_list=op_list->next; - tbb::internal::itt_store_word_with_release(tmp->status, uintptr_t(1)); - } - } - -private: - typename map_storage_type::reference retrieve_serial(key_type k, bool& is_new_value_needed){ - typename map_storage_type::iterator it = my_map_storage.find(k); - if (it == my_map_storage.end()){ - it = my_map_storage.insert(it,std::make_pair(k,map_value_type(value_type(),0,my_lru_list.end(),false))); - is_new_value_needed = true; - }else { - typename lru_list_type::iterator list_it = it->second.my_lru_list_iterator; - if (list_it!=my_lru_list.end()) { - __TBB_ASSERT(!it->second.my_ref_counter,"item to be evicted should not have a live references"); - //item is going to be used. Therefore it is not a subject for eviction - //so - remove it from LRU history. - my_lru_list.erase(list_it); - it->second.my_lru_list_iterator= my_lru_list.end(); - } - } - ++(it->second.my_ref_counter); - return *it; - } - - void signal_end_of_usage_serial(typename map_storage_type::reference map_record_ref){ - typename map_storage_type::iterator it = my_map_storage.find(map_record_ref.first); - __TBB_ASSERT(it!=my_map_storage.end(),"cache should not return past-end iterators to outer world"); - __TBB_ASSERT(&(*it) == &map_record_ref,"dangling reference has been returned to outside world? data race ?"); - __TBB_ASSERT( my_lru_list.end()== std::find(my_lru_list.begin(),my_lru_list.end(),it), - "object in use should not be in list of unused objects "); - if (! --(it->second.my_ref_counter)){ - //it was the last reference so put it to the LRU history - if (my_lru_list.size()>=my_number_of_lru_history_items){ - //evict items in order to get a space - size_t number_of_elements_to_evict = 1 + my_lru_list.size() - my_number_of_lru_history_items; - for (size_t i=0; isecond.my_ref_counter,"item to be evicted should not have a live references"); - my_lru_list.pop_back(); - my_map_storage.erase(it_to_evict); - } - } - my_lru_list.push_front(it); - it->second.my_lru_list_iterator = my_lru_list.begin(); - } - } -}; -} // namespace interface6 - -using interface6::concurrent_lru_cache; + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -} // namespace tbb -#endif //__TBB_concurrent_lru_cache_H +#include "../oneapi/tbb/concurrent_lru_cache.h" diff --git a/src/tbb/include/tbb/concurrent_map.h b/src/tbb/include/tbb/concurrent_map.h new file mode 100644 index 000000000..84f59d7e6 --- /dev/null +++ b/src/tbb/include/tbb/concurrent_map.h @@ -0,0 +1,17 @@ +/* + Copyright (c) 2019-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "../oneapi/tbb/concurrent_map.h" diff --git a/src/tbb/include/tbb/concurrent_priority_queue.h b/src/tbb/include/tbb/concurrent_priority_queue.h index 245034fb2..3b27130b1 100644 --- a/src/tbb/include/tbb/concurrent_priority_queue.h +++ b/src/tbb/include/tbb/concurrent_priority_queue.h @@ -1,457 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_priority_queue_H -#define __TBB_concurrent_priority_queue_H - -#include "atomic.h" -#include "cache_aligned_allocator.h" -#include "tbb_exception.h" -#include "tbb_stddef.h" -#include "tbb_profiling.h" -#include "internal/_aggregator_impl.h" -#include -#include -#include - -#if __TBB_INITIALIZER_LISTS_PRESENT - #include -#endif - -namespace tbb { -namespace interface5 { - -using namespace tbb::internal; - -//! Concurrent priority queue -template , typename A=cache_aligned_allocator > -class concurrent_priority_queue { - public: - //! Element type in the queue. - typedef T value_type; - - //! Reference type - typedef T& reference; - - //! Const reference type - typedef const T& const_reference; - - //! Integral type for representing size of the queue. - typedef size_t size_type; - - //! Difference type for iterator - typedef ptrdiff_t difference_type; - - //! Allocator type - typedef A allocator_type; - - //! Constructs a new concurrent_priority_queue with default capacity - explicit concurrent_priority_queue(const allocator_type& a = allocator_type()) : mark(0), my_size(0), data(a) - { - my_aggregator.initialize_handler(my_functor_t(this)); - } - - //! Constructs a new concurrent_priority_queue with init_sz capacity - explicit concurrent_priority_queue(size_type init_capacity, const allocator_type& a = allocator_type()) : - mark(0), my_size(0), data(a) - { - data.reserve(init_capacity); - my_aggregator.initialize_handler(my_functor_t(this)); - } - - //! [begin,end) constructor - template - concurrent_priority_queue(InputIterator begin, InputIterator end, const allocator_type& a = allocator_type()) : - mark(0), data(begin, end, a) - { - my_aggregator.initialize_handler(my_functor_t(this)); - heapify(); - my_size = data.size(); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Constructor from std::initializer_list - concurrent_priority_queue(std::initializer_list init_list, const allocator_type &a = allocator_type()) : - mark(0),data(init_list.begin(), init_list.end(), a) - { - my_aggregator.initialize_handler(my_functor_t(this)); - heapify(); - my_size = data.size(); - } -#endif //# __TBB_INITIALIZER_LISTS_PRESENT - - //! Copy constructor - /** This operation is unsafe if there are pending concurrent operations on the src queue. */ - explicit concurrent_priority_queue(const concurrent_priority_queue& src) : mark(src.mark), - my_size(src.my_size), data(src.data.begin(), src.data.end(), src.data.get_allocator()) - { - my_aggregator.initialize_handler(my_functor_t(this)); - heapify(); - } - - //! Copy constructor with specific allocator - /** This operation is unsafe if there are pending concurrent operations on the src queue. */ - concurrent_priority_queue(const concurrent_priority_queue& src, const allocator_type& a) : mark(src.mark), - my_size(src.my_size), data(src.data.begin(), src.data.end(), a) - { - my_aggregator.initialize_handler(my_functor_t(this)); - heapify(); - } - - //! Assignment operator - /** This operation is unsafe if there are pending concurrent operations on the src queue. */ - concurrent_priority_queue& operator=(const concurrent_priority_queue& src) { - if (this != &src) { - vector_t(src.data.begin(), src.data.end(), src.data.get_allocator()).swap(data); - mark = src.mark; - my_size = src.my_size; - } - return *this; - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Move constructor - /** This operation is unsafe if there are pending concurrent operations on the src queue. */ - concurrent_priority_queue(concurrent_priority_queue&& src) : mark(src.mark), - my_size(src.my_size), data(std::move(src.data)) - { - my_aggregator.initialize_handler(my_functor_t(this)); - } - - //! Move constructor with specific allocator - /** This operation is unsafe if there are pending concurrent operations on the src queue. */ - concurrent_priority_queue(concurrent_priority_queue&& src, const allocator_type& a) : mark(src.mark), - my_size(src.my_size), -#if __TBB_ALLOCATOR_TRAITS_PRESENT - data(std::move(src.data), a) -#else - // Some early version of C++11 STL vector does not have a constructor of vector(vector&& , allocator). - // It seems that the reason is absence of support of allocator_traits (stateful allocators). - data(a) -#endif //__TBB_ALLOCATOR_TRAITS_PRESENT - { - my_aggregator.initialize_handler(my_functor_t(this)); -#if !__TBB_ALLOCATOR_TRAITS_PRESENT - if (a != src.data.get_allocator()){ - data.reserve(src.data.size()); - data.assign(std::make_move_iterator(src.data.begin()), std::make_move_iterator(src.data.end())); - }else{ - data = std::move(src.data); - } -#endif //!__TBB_ALLOCATOR_TRAITS_PRESENT - } - - //! Move assignment operator - /** This operation is unsafe if there are pending concurrent operations on the src queue. */ - concurrent_priority_queue& operator=( concurrent_priority_queue&& src) { - if (this != &src) { - mark = src.mark; - my_size = src.my_size; -#if !__TBB_ALLOCATOR_TRAITS_PRESENT - if (data.get_allocator() != src.data.get_allocator()){ - vector_t(std::make_move_iterator(src.data.begin()), std::make_move_iterator(src.data.end()), data.get_allocator()).swap(data); - }else -#endif //!__TBB_ALLOCATOR_TRAITS_PRESENT - { - data = std::move(src.data); - } - } - return *this; - } -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - - //! Assign the queue from [begin,end) range, not thread-safe - template - void assign(InputIterator begin, InputIterator end) { - vector_t(begin, end, data.get_allocator()).swap(data); - mark = 0; - my_size = data.size(); - heapify(); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Assign the queue from std::initializer_list, not thread-safe - void assign(std::initializer_list il) { this->assign(il.begin(), il.end()); } - - //! Assign from std::initializer_list, not thread-safe - concurrent_priority_queue& operator=(std::initializer_list il) { - this->assign(il.begin(), il.end()); - return *this; - } -#endif //# __TBB_INITIALIZER_LISTS_PRESENT - - //! Returns true if empty, false otherwise - /** Returned value may not reflect results of pending operations. - This operation reads shared data and will trigger a race condition. */ - bool empty() const { return size()==0; } + http://www.apache.org/licenses/LICENSE-2.0 - //! Returns the current number of elements contained in the queue - /** Returned value may not reflect results of pending operations. - This operation reads shared data and will trigger a race condition. */ - size_type size() const { return __TBB_load_with_acquire(my_size); } - - //! Pushes elem onto the queue, increasing capacity of queue if necessary - /** This operation can be safely used concurrently with other push, try_pop or emplace operations. */ - void push(const_reference elem) { - cpq_operation op_data(elem, PUSH_OP); - my_aggregator.execute(&op_data); - if (op_data.status == FAILED) // exception thrown - throw_exception(eid_bad_alloc); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Pushes elem onto the queue, increasing capacity of queue if necessary - /** This operation can be safely used concurrently with other push, try_pop or emplace operations. */ - void push(value_type &&elem) { - cpq_operation op_data(elem, PUSH_RVALUE_OP); - my_aggregator.execute(&op_data); - if (op_data.status == FAILED) // exception thrown - throw_exception(eid_bad_alloc); - } - -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT - //! Constructs a new element using args as the arguments for its construction and pushes it onto the queue */ - /** This operation can be safely used concurrently with other push, try_pop or emplace operations. */ - template - void emplace(Args&&... args) { - push(value_type(std::forward(args)...)); - } -#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */ -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - - //! Gets a reference to and removes highest priority element - /** If a highest priority element was found, sets elem and returns true, - otherwise returns false. - This operation can be safely used concurrently with other push, try_pop or emplace operations. */ - bool try_pop(reference elem) { - cpq_operation op_data(POP_OP); - op_data.elem = &elem; - my_aggregator.execute(&op_data); - return op_data.status==SUCCEEDED; - } - - //! Clear the queue; not thread-safe - /** This operation is unsafe if there are pending concurrent operations on the queue. - Resets size, effectively emptying queue; does not free space. - May not clear elements added in pending operations. */ - void clear() { - data.clear(); - mark = 0; - my_size = 0; - } - - //! Swap this queue with another; not thread-safe - /** This operation is unsafe if there are pending concurrent operations on the queue. */ - void swap(concurrent_priority_queue& q) { - using std::swap; - data.swap(q.data); - swap(mark, q.mark); - swap(my_size, q.my_size); - } - - //! Return allocator object - allocator_type get_allocator() const { return data.get_allocator(); } - - private: - enum operation_type {INVALID_OP, PUSH_OP, POP_OP, PUSH_RVALUE_OP}; - enum operation_status { WAIT=0, SUCCEEDED, FAILED }; - - class cpq_operation : public aggregated_operation { - public: - operation_type type; - union { - value_type *elem; - size_type sz; - }; - cpq_operation(const_reference e, operation_type t) : - type(t), elem(const_cast(&e)) {} - cpq_operation(operation_type t) : type(t) {} - }; - - class my_functor_t { - concurrent_priority_queue *cpq; - public: - my_functor_t() {} - my_functor_t(concurrent_priority_queue *cpq_) : cpq(cpq_) {} - void operator()(cpq_operation* op_list) { - cpq->handle_operations(op_list); - } - }; - - typedef tbb::internal::aggregator< my_functor_t, cpq_operation > aggregator_t; - aggregator_t my_aggregator; - //! Padding added to avoid false sharing - char padding1[NFS_MaxLineSize - sizeof(aggregator_t)]; - //! The point at which unsorted elements begin - size_type mark; - __TBB_atomic size_type my_size; - Compare compare; - //! Padding added to avoid false sharing - char padding2[NFS_MaxLineSize - (2*sizeof(size_type)) - sizeof(Compare)]; - //! Storage for the heap of elements in queue, plus unheapified elements - /** data has the following structure: - - binary unheapified - heap elements - ____|_______|____ - | | | - v v v - [_|...|_|_|...|_| |...| ] - 0 ^ ^ ^ - | | |__capacity - | |__my_size - |__mark - - Thus, data stores the binary heap starting at position 0 through - mark-1 (it may be empty). Then there are 0 or more elements - that have not yet been inserted into the heap, in positions - mark through my_size-1. */ - typedef std::vector vector_t; - vector_t data; - - void handle_operations(cpq_operation *op_list) { - cpq_operation *tmp, *pop_list=NULL; - - __TBB_ASSERT(mark == data.size(), NULL); - - // First pass processes all constant (amortized; reallocation may happen) time pushes and pops. - while (op_list) { - // ITT note: &(op_list->status) tag is used to cover accesses to op_list - // node. This thread is going to handle the operation, and so will acquire it - // and perform the associated operation w/o triggering a race condition; the - // thread that created the operation is waiting on the status field, so when - // this thread is done with the operation, it will perform a - // store_with_release to give control back to the waiting thread in - // aggregator::insert_operation. - call_itt_notify(acquired, &(op_list->status)); - __TBB_ASSERT(op_list->type != INVALID_OP, NULL); - tmp = op_list; - op_list = itt_hide_load_word(op_list->next); - if (tmp->type == POP_OP) { - if (mark < data.size() && - compare(data[0], data[data.size()-1])) { - // there are newly pushed elems and the last one - // is higher than top - *(tmp->elem) = move(data[data.size()-1]); - __TBB_store_with_release(my_size, my_size-1); - itt_store_word_with_release(tmp->status, uintptr_t(SUCCEEDED)); - data.pop_back(); - __TBB_ASSERT(mark<=data.size(), NULL); - } - else { // no convenient item to pop; postpone - itt_hide_store_word(tmp->next, pop_list); - pop_list = tmp; - } - } else { // PUSH_OP or PUSH_RVALUE_OP - __TBB_ASSERT(tmp->type == PUSH_OP || tmp->type == PUSH_RVALUE_OP, "Unknown operation" ); - __TBB_TRY{ - if (tmp->type == PUSH_OP) { - data.push_back(*(tmp->elem)); - } else { - data.push_back(move(*(tmp->elem))); - } - __TBB_store_with_release(my_size, my_size + 1); - itt_store_word_with_release(tmp->status, uintptr_t(SUCCEEDED)); - } __TBB_CATCH(...) { - itt_store_word_with_release(tmp->status, uintptr_t(FAILED)); - } - } - } - - // second pass processes pop operations - while (pop_list) { - tmp = pop_list; - pop_list = itt_hide_load_word(pop_list->next); - __TBB_ASSERT(tmp->type == POP_OP, NULL); - if (data.empty()) { - itt_store_word_with_release(tmp->status, uintptr_t(FAILED)); - } - else { - __TBB_ASSERT(mark<=data.size(), NULL); - if (mark < data.size() && - compare(data[0], data[data.size()-1])) { - // there are newly pushed elems and the last one is - // higher than top - *(tmp->elem) = move(data[data.size()-1]); - __TBB_store_with_release(my_size, my_size-1); - itt_store_word_with_release(tmp->status, uintptr_t(SUCCEEDED)); - data.pop_back(); - } - else { // extract top and push last element down heap - *(tmp->elem) = move(data[0]); - __TBB_store_with_release(my_size, my_size-1); - itt_store_word_with_release(tmp->status, uintptr_t(SUCCEEDED)); - reheap(); - } - } - } - - // heapify any leftover pushed elements before doing the next - // batch of operations - if (mark0) mark = 1; - for (; mark>1; - if (!compare(data[parent], to_place)) break; - data[cur_pos] = move(data[parent]); - cur_pos = parent; - } while( cur_pos ); - data[cur_pos] = move(to_place); - } - } - - //! Re-heapify after an extraction - /** Re-heapify by pushing last element down the heap from the root. */ - void reheap() { - size_type cur_pos=0, child=1; - - while (child < mark) { - size_type target = child; - if (child+1 < mark && compare(data[child], data[child+1])) - ++target; - // target now has the higher priority child - if (compare(data[target], data[data.size()-1])) break; - data[cur_pos] = move(data[target]); - cur_pos = target; - child = (cur_pos<<1)+1; - } - if (cur_pos != data.size()-1) - data[cur_pos] = move(data[data.size()-1]); - data.pop_back(); - if (mark > data.size()) mark = data.size(); - } -}; - -} // namespace interface5 - -using interface5::concurrent_priority_queue; - -} // namespace tbb + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_concurrent_priority_queue_H */ +#include "../oneapi/tbb/concurrent_priority_queue.h" diff --git a/src/tbb/include/tbb/concurrent_queue.h b/src/tbb/include/tbb/concurrent_queue.h index 2cead237c..d81a58b88 100644 --- a/src/tbb/include/tbb/concurrent_queue.h +++ b/src/tbb/include/tbb/concurrent_queue.h @@ -1,462 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_queue_H -#define __TBB_concurrent_queue_H - -#include "internal/_concurrent_queue_impl.h" - -namespace tbb { - -namespace strict_ppl { - -//! A high-performance thread-safe non-blocking concurrent queue. -/** Multiple threads may each push and pop concurrently. - Assignment construction is not allowed. - @ingroup containers */ -template > -class concurrent_queue: public internal::concurrent_queue_base_v3 { - template friend class internal::concurrent_queue_iterator; - - //! Allocator type - typedef typename A::template rebind::other page_allocator_type; - page_allocator_type my_allocator; - - //! Allocates a block of size n (bytes) - /*override*/ virtual void *allocate_block( size_t n ) { - void *b = reinterpret_cast(my_allocator.allocate( n )); - if( !b ) - internal::throw_exception(internal::eid_bad_alloc); - return b; - } - - //! Deallocates block created by allocate_block. - /*override*/ virtual void deallocate_block( void *b, size_t n ) { - my_allocator.deallocate( reinterpret_cast(b), n ); - } - - static void copy_construct_item(T* location, const void* src){ - new (location) T(*static_cast(src)); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - static void move_construct_item(T* location, const void* src) { - new (location) T( std::move(*static_cast(const_cast(src))) ); - } -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ -public: - //! Element type in the queue. - typedef T value_type; - - //! Reference type - typedef T& reference; - - //! Const reference type - typedef const T& const_reference; - - //! Integral type for representing size of the queue. - typedef size_t size_type; - - //! Difference type for iterator - typedef ptrdiff_t difference_type; - - //! Allocator type - typedef A allocator_type; - - //! Construct empty queue - explicit concurrent_queue(const allocator_type& a = allocator_type()) : - my_allocator( a ) - { - } - - //! [begin,end) constructor - template - concurrent_queue( InputIterator begin, InputIterator end, const allocator_type& a = allocator_type()) : - my_allocator( a ) - { - for( ; begin != end; ++begin ) - this->push(*begin); - } - - //! Copy constructor - concurrent_queue( const concurrent_queue& src, const allocator_type& a = allocator_type()) : - internal::concurrent_queue_base_v3(), my_allocator( a ) - { - this->assign( src, copy_construct_item ); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Move constructors - concurrent_queue( concurrent_queue&& src ) : - internal::concurrent_queue_base_v3(), my_allocator( std::move(src.my_allocator) ) - { - this->internal_swap( src ); - } - - concurrent_queue( concurrent_queue&& src, const allocator_type& a ) : - internal::concurrent_queue_base_v3(), my_allocator( a ) - { - // checking that memory allocated by one instance of allocator can be deallocated - // with another - if( my_allocator == src.my_allocator) { - this->internal_swap( src ); - } else { - // allocators are different => performing per-element move - this->assign( src, move_construct_item ); - src.clear(); - } - } -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - - //! Destroy queue - ~concurrent_queue(); - - //! Enqueue an item at tail of queue. - void push( const T& source ) { - this->internal_push( &source, copy_construct_item ); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - void push( T&& source ) { - this->internal_push( &source, move_construct_item ); - } - -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT - template - void emplace( Arguments&&... args ) { - push( T(std::forward( args )...) ); - } -#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - - //! Attempt to dequeue an item from head of queue. - /** Does not wait for item to become available. - Returns true if successful; false otherwise. */ - bool try_pop( T& result ) { - return this->internal_try_pop( &result ); - } - - //! Return the number of items in the queue; thread unsafe - size_type unsafe_size() const {return this->internal_size();} - - //! Equivalent to size()==0. - bool empty() const {return this->internal_empty();} - - //! Clear the queue. not thread-safe. - void clear() ; - - //! Return allocator object - allocator_type get_allocator() const { return this->my_allocator; } - - typedef internal::concurrent_queue_iterator iterator; - typedef internal::concurrent_queue_iterator const_iterator; - - //------------------------------------------------------------------------ - // The iterators are intended only for debugging. They are slow and not thread safe. - //------------------------------------------------------------------------ - iterator unsafe_begin() {return iterator(*this);} - iterator unsafe_end() {return iterator();} - const_iterator unsafe_begin() const {return const_iterator(*this);} - const_iterator unsafe_end() const {return const_iterator();} -} ; - -template -concurrent_queue::~concurrent_queue() { - clear(); - this->internal_finish_clear(); -} - -template -void concurrent_queue::clear() { - while( !empty() ) { - T value; - this->internal_try_pop(&value); - } -} - -} // namespace strict_ppl - -//! A high-performance thread-safe blocking concurrent bounded queue. -/** This is the pre-PPL TBB concurrent queue which supports boundedness and blocking semantics. - Note that method names agree with the PPL-style concurrent queue. - Multiple threads may each push and pop concurrently. - Assignment construction is not allowed. - @ingroup containers */ -template > -class concurrent_bounded_queue: public internal::concurrent_queue_base_v8 { - template friend class internal::concurrent_queue_iterator; - - //! Allocator type - typedef typename A::template rebind::other page_allocator_type; - page_allocator_type my_allocator; - - typedef typename concurrent_queue_base_v3::padded_page padded_page; - typedef typename concurrent_queue_base_v3::copy_specifics copy_specifics; - - //! Class used to ensure exception-safety of method "pop" - class destroyer: internal::no_copy { - T& my_value; - public: - destroyer( T& value ) : my_value(value) {} - ~destroyer() {my_value.~T();} - }; - - T& get_ref( page& p, size_t index ) { - __TBB_ASSERT( index(static_cast(&p))->last)[index]; - } + http://www.apache.org/licenses/LICENSE-2.0 - /*override*/ virtual void copy_item( page& dst, size_t index, const void* src ) { - new( &get_ref(dst,index) ) T(*static_cast(src)); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - /*override*/ virtual void move_item( page& dst, size_t index, const void* src ) { - new( &get_ref(dst,index) ) T( std::move(*static_cast(const_cast(src))) ); - } -#else - /*override*/ virtual void move_item( page&, size_t, const void* ) { - __TBB_ASSERT( false, "Unreachable code" ); - } -#endif - - /*override*/ virtual void copy_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) { - new( &get_ref(dst,dindex) ) T( get_ref( const_cast(src), sindex ) ); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - /*override*/ virtual void move_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) { - new( &get_ref(dst,dindex) ) T( std::move(get_ref( const_cast(src), sindex )) ); - } -#else - /*override*/ virtual void move_page_item( page&, size_t, const page&, size_t ) { - __TBB_ASSERT( false, "Unreachable code" ); - } -#endif - - /*override*/ virtual void assign_and_destroy_item( void* dst, page& src, size_t index ) { - T& from = get_ref(src,index); - destroyer d(from); - *static_cast(dst) = tbb::internal::move( from ); - } - - /*override*/ virtual page *allocate_page() { - size_t n = sizeof(padded_page) + (items_per_page-1)*sizeof(T); - page *p = reinterpret_cast(my_allocator.allocate( n )); - if( !p ) - internal::throw_exception(internal::eid_bad_alloc); - return p; - } - - /*override*/ virtual void deallocate_page( page *p ) { - size_t n = sizeof(padded_page) + (items_per_page-1)*sizeof(T); - my_allocator.deallocate( reinterpret_cast(p), n ); - } - -public: - //! Element type in the queue. - typedef T value_type; - - //! Allocator type - typedef A allocator_type; - - //! Reference type - typedef T& reference; - - //! Const reference type - typedef const T& const_reference; - - //! Integral type for representing size of the queue. - /** Note that the size_type is a signed integral type. - This is because the size can be negative if there are pending pops without corresponding pushes. */ - typedef std::ptrdiff_t size_type; - - //! Difference type for iterator - typedef std::ptrdiff_t difference_type; - - //! Construct empty queue - explicit concurrent_bounded_queue(const allocator_type& a = allocator_type()) : - concurrent_queue_base_v8( sizeof(T) ), my_allocator( a ) - { - } - - //! Copy constructor - concurrent_bounded_queue( const concurrent_bounded_queue& src, const allocator_type& a = allocator_type()) - : concurrent_queue_base_v8( sizeof(T) ), my_allocator( a ) - { - assign( src ); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Move constructors - concurrent_bounded_queue( concurrent_bounded_queue&& src ) - : concurrent_queue_base_v8( sizeof(T) ), my_allocator( std::move(src.my_allocator) ) - { - internal_swap( src ); - } - - concurrent_bounded_queue( concurrent_bounded_queue&& src, const allocator_type& a ) - : concurrent_queue_base_v8( sizeof(T) ), my_allocator( a ) - { - // checking that memory allocated by one instance of allocator can be deallocated - // with another - if( my_allocator == src.my_allocator) { - this->internal_swap( src ); - } else { - // allocators are different => performing per-element move - this->move_content( src ); - src.clear(); - } - } -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - - //! [begin,end) constructor - template - concurrent_bounded_queue( InputIterator begin, InputIterator end, - const allocator_type& a = allocator_type()) - : concurrent_queue_base_v8( sizeof(T) ), my_allocator( a ) - { - for( ; begin != end; ++begin ) - internal_push_if_not_full(&*begin); - } - - //! Destroy queue - ~concurrent_bounded_queue(); - - //! Enqueue an item at tail of queue. - void push( const T& source ) { - internal_push( &source ); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Move an item at tail of queue. - void push( T&& source ) { - internal_push_move( &source ); - } - -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT - template - void emplace( Arguments&&... args ) { - push( T(std::forward( args )...) ); - } -#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */ -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - - //! Dequeue item from head of queue. - /** Block until an item becomes available, and then dequeue it. */ - void pop( T& destination ) { - internal_pop( &destination ); - } - -#if TBB_USE_EXCEPTIONS - //! Abort all pending queue operations - void abort() { - internal_abort(); - } -#endif - - //! Enqueue an item at tail of queue if queue is not already full. - /** Does not wait for queue to become not full. - Returns true if item is pushed; false if queue was already full. */ - bool try_push( const T& source ) { - return internal_push_if_not_full( &source ); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Move an item at tail of queue if queue is not already full. - /** Does not wait for queue to become not full. - Returns true if item is pushed; false if queue was already full. */ - bool try_push( T&& source ) { - return internal_push_move_if_not_full( &source ); - } -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT - template - bool try_emplace( Arguments&&... args ) { - return try_push( T(std::forward( args )...) ); - } -#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */ -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - - //! Attempt to dequeue an item from head of queue. - /** Does not wait for item to become available. - Returns true if successful; false otherwise. */ - bool try_pop( T& destination ) { - return internal_pop_if_present( &destination ); - } - - //! Return number of pushes minus number of pops. - /** Note that the result can be negative if there are pops waiting for the - corresponding pushes. The result can also exceed capacity() if there - are push operations in flight. */ - size_type size() const {return internal_size();} - - //! Equivalent to size()<=0. - bool empty() const {return internal_empty();} - - //! Maximum number of allowed elements - size_type capacity() const { - return my_capacity; - } - - //! Set the capacity - /** Setting the capacity to 0 causes subsequent try_push operations to always fail, - and subsequent push operations to block forever. */ - void set_capacity( size_type new_capacity ) { - internal_set_capacity( new_capacity, sizeof(T) ); - } - - //! return allocator object - allocator_type get_allocator() const { return this->my_allocator; } - - //! clear the queue. not thread-safe. - void clear() ; - - typedef internal::concurrent_queue_iterator iterator; - typedef internal::concurrent_queue_iterator const_iterator; - - //------------------------------------------------------------------------ - // The iterators are intended only for debugging. They are slow and not thread safe. - //------------------------------------------------------------------------ - iterator unsafe_begin() {return iterator(*this);} - iterator unsafe_end() {return iterator();} - const_iterator unsafe_begin() const {return const_iterator(*this);} - const_iterator unsafe_end() const {return const_iterator();} - -}; - -template -concurrent_bounded_queue::~concurrent_bounded_queue() { - clear(); - internal_finish_clear(); -} - -template -void concurrent_bounded_queue::clear() { - while( !empty() ) { - T value; - internal_pop_if_present(&value); - } -} - -using strict_ppl::concurrent_queue; - -} // namespace tbb + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_concurrent_queue_H */ +#include "../oneapi/tbb/concurrent_queue.h" diff --git a/src/tbb/include/tbb/concurrent_set.h b/src/tbb/include/tbb/concurrent_set.h new file mode 100644 index 000000000..cf4652f59 --- /dev/null +++ b/src/tbb/include/tbb/concurrent_set.h @@ -0,0 +1,17 @@ +/* + Copyright (c) 2019-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "../oneapi/tbb/concurrent_set.h" diff --git a/src/tbb/include/tbb/concurrent_unordered_map.h b/src/tbb/include/tbb/concurrent_unordered_map.h index b2f54174a..9475c06cf 100644 --- a/src/tbb/include/tbb/concurrent_unordered_map.h +++ b/src/tbb/include/tbb/concurrent_unordered_map.h @@ -1,326 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -/* Container implementations in this header are based on PPL implementations - provided by Microsoft. */ - -#ifndef __TBB_concurrent_unordered_map_H -#define __TBB_concurrent_unordered_map_H - -#include "internal/_concurrent_unordered_impl.h" - -namespace tbb -{ - -namespace interface5 { - -// Template class for hash map traits -template -class concurrent_unordered_map_traits -{ -protected: - typedef std::pair value_type; - typedef Key key_type; - typedef Hash_compare hash_compare; - typedef typename Allocator::template rebind::other allocator_type; - enum { allow_multimapping = Allow_multimapping }; - - concurrent_unordered_map_traits() : my_hash_compare() {} - concurrent_unordered_map_traits(const hash_compare& hc) : my_hash_compare(hc) {} - - class value_compare : public std::binary_function - { - friend class concurrent_unordered_map_traits; - - public: - bool operator()(const value_type& left, const value_type& right) const - { - return (my_hash_compare(left.first, right.first)); - } - - value_compare(const hash_compare& comparator) : my_hash_compare(comparator) {} - - protected: - hash_compare my_hash_compare; // the comparator predicate for keys - }; - - template - static const Key& get_key(const std::pair& value) { - return (value.first); - } - - hash_compare my_hash_compare; // the comparator predicate for keys -}; - -template , typename Key_equality = std::equal_to, - typename Allocator = tbb::tbb_allocator > > -class concurrent_unordered_map : - public internal::concurrent_unordered_base< concurrent_unordered_map_traits, Allocator, false> > -{ - // Base type definitions - typedef internal::hash_compare hash_compare; - typedef concurrent_unordered_map_traits traits_type; - typedef internal::concurrent_unordered_base< traits_type > base_type; -#if __TBB_EXTRA_DEBUG -public: -#endif - using traits_type::allow_multimapping; -public: - using base_type::end; - using base_type::find; - using base_type::insert; - - // Type definitions - typedef Key key_type; - typedef typename base_type::value_type value_type; - typedef T mapped_type; - typedef Hasher hasher; - typedef Key_equality key_equal; - typedef hash_compare key_compare; - - typedef typename base_type::allocator_type allocator_type; - typedef typename base_type::pointer pointer; - typedef typename base_type::const_pointer const_pointer; - typedef typename base_type::reference reference; - typedef typename base_type::const_reference const_reference; - - typedef typename base_type::size_type size_type; - typedef typename base_type::difference_type difference_type; - - typedef typename base_type::iterator iterator; - typedef typename base_type::const_iterator const_iterator; - typedef typename base_type::iterator local_iterator; - typedef typename base_type::const_iterator const_local_iterator; - - // Construction/destruction/copying - explicit concurrent_unordered_map(size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), - const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) - { - } - - concurrent_unordered_map(const Allocator& a) : base_type(base_type::initial_bucket_number, key_compare(), a) - { - } - - template - concurrent_unordered_map(Iterator first, Iterator last, size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), - const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) - { - insert(first, last); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Constructor from initializer_list - concurrent_unordered_map(std::initializer_list il, size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), - const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) - { - this->insert(il.begin(),il.end()); - } -#endif //# __TBB_INITIALIZER_LISTS_PRESENT - -#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN - concurrent_unordered_map(const concurrent_unordered_map& table) - : base_type(table) - { - } - - concurrent_unordered_map& operator=(const concurrent_unordered_map& table) - { - return static_cast(base_type::operator=(table)); - } - - concurrent_unordered_map(concurrent_unordered_map&& table) - : base_type(std::move(table)) - { - } - - concurrent_unordered_map& operator=(concurrent_unordered_map&& table) - { - return static_cast(base_type::operator=(std::move(table))); - } -#endif //__TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN + http://www.apache.org/licenses/LICENSE-2.0 - concurrent_unordered_map(const concurrent_unordered_map& table, const Allocator& a) - : base_type(table, a) - { - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - concurrent_unordered_map(concurrent_unordered_map&& table, const Allocator& a) : base_type(std::move(table), a) - { - } -#endif - // Observers - mapped_type& operator[](const key_type& key) - { - iterator where = find(key); - - if (where == end()) - { - where = insert(std::pair(key, mapped_type())).first; - } - - return ((*where).second); - } - - mapped_type& at(const key_type& key) - { - iterator where = find(key); - - if (where == end()) - { - tbb::internal::throw_exception(tbb::internal::eid_invalid_key); - } - - return ((*where).second); - } - - const mapped_type& at(const key_type& key) const - { - const_iterator where = find(key); - - if (where == end()) - { - tbb::internal::throw_exception(tbb::internal::eid_invalid_key); - } - - return ((*where).second); - } -}; - -template < typename Key, typename T, typename Hasher = tbb::tbb_hash, typename Key_equality = std::equal_to, - typename Allocator = tbb::tbb_allocator > > -class concurrent_unordered_multimap : - public internal::concurrent_unordered_base< concurrent_unordered_map_traits< Key, T, - internal::hash_compare, Allocator, true> > -{ - // Base type definitions - typedef internal::hash_compare hash_compare; - typedef concurrent_unordered_map_traits traits_type; - typedef internal::concurrent_unordered_base base_type; -#if __TBB_EXTRA_DEBUG -public: -#endif - using traits_type::allow_multimapping; -public: - using base_type::insert; - - // Type definitions - typedef Key key_type; - typedef typename base_type::value_type value_type; - typedef T mapped_type; - typedef Hasher hasher; - typedef Key_equality key_equal; - typedef hash_compare key_compare; - - typedef typename base_type::allocator_type allocator_type; - typedef typename base_type::pointer pointer; - typedef typename base_type::const_pointer const_pointer; - typedef typename base_type::reference reference; - typedef typename base_type::const_reference const_reference; - - typedef typename base_type::size_type size_type; - typedef typename base_type::difference_type difference_type; - - typedef typename base_type::iterator iterator; - typedef typename base_type::const_iterator const_iterator; - typedef typename base_type::iterator local_iterator; - typedef typename base_type::const_iterator const_local_iterator; - - // Construction/destruction/copying - explicit concurrent_unordered_multimap(size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), - const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) - { - } - - concurrent_unordered_multimap(const Allocator& a) : base_type(base_type::initial_bucket_number, key_compare(), a) - { - } - - template - concurrent_unordered_multimap(Iterator first, Iterator last, size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), - const allocator_type& a = allocator_type()) - : base_type(n_of_buckets,key_compare(_Hasher,_Key_equality), a) - { - insert(first, last); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Constructor from initializer_list - concurrent_unordered_multimap(std::initializer_list il, size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), - const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) - { - this->insert(il.begin(),il.end()); - } -#endif //# __TBB_INITIALIZER_LISTS_PRESENT - -#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN - concurrent_unordered_multimap(const concurrent_unordered_multimap& table) - : base_type(table) - { - } - - concurrent_unordered_multimap& operator=(const concurrent_unordered_multimap& table) - { - return static_cast(base_type::operator=(table)); - } - - concurrent_unordered_multimap(concurrent_unordered_multimap&& table) - : base_type(std::move(table)) - { - } - - concurrent_unordered_multimap& operator=(concurrent_unordered_multimap&& table) - { - return static_cast(base_type::operator=(std::move(table))); - } -#endif //__TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN - - concurrent_unordered_multimap(const concurrent_unordered_multimap& table, const Allocator& a) - : base_type(table, a) - { - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - concurrent_unordered_multimap(concurrent_unordered_multimap&& table, const Allocator& a) : base_type(std::move(table), a) - { - } -#endif -}; -} // namespace interface5 - -using interface5::concurrent_unordered_map; -using interface5::concurrent_unordered_multimap; - -} // namespace tbb + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif// __TBB_concurrent_unordered_map_H +#include "../oneapi/tbb/concurrent_unordered_map.h" diff --git a/src/tbb/include/tbb/concurrent_unordered_set.h b/src/tbb/include/tbb/concurrent_unordered_set.h index 846351869..81a8f9c37 100644 --- a/src/tbb/include/tbb/concurrent_unordered_set.h +++ b/src/tbb/include/tbb/concurrent_unordered_set.h @@ -1,269 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -/* Container implementations in this header are based on PPL implementations - provided by Microsoft. */ - -#ifndef __TBB_concurrent_unordered_set_H -#define __TBB_concurrent_unordered_set_H - -#include "internal/_concurrent_unordered_impl.h" - -namespace tbb -{ - -namespace interface5 { - -// Template class for hash set traits -template -class concurrent_unordered_set_traits -{ -protected: - typedef Key value_type; - typedef Key key_type; - typedef Hash_compare hash_compare; - typedef typename Allocator::template rebind::other allocator_type; - enum { allow_multimapping = Allow_multimapping }; - - concurrent_unordered_set_traits() : my_hash_compare() {} - concurrent_unordered_set_traits(const hash_compare& hc) : my_hash_compare(hc) {} - - typedef hash_compare value_compare; - - static const Key& get_key(const value_type& value) { - return value; - } - - hash_compare my_hash_compare; // the comparator predicate for keys -}; - -template , typename Key_equality = std::equal_to, typename Allocator = tbb::tbb_allocator > -class concurrent_unordered_set : public internal::concurrent_unordered_base< concurrent_unordered_set_traits, Allocator, false> > -{ - // Base type definitions - typedef internal::hash_compare hash_compare; - typedef internal::concurrent_unordered_base< concurrent_unordered_set_traits > base_type; - typedef concurrent_unordered_set_traits, Allocator, false> traits_type; -#if __TBB_EXTRA_DEBUG -public: -#endif - using traits_type::allow_multimapping; -public: - using base_type::insert; - - // Type definitions - typedef Key key_type; - typedef typename base_type::value_type value_type; - typedef Key mapped_type; - typedef Hasher hasher; - typedef Key_equality key_equal; - typedef hash_compare key_compare; - - typedef typename base_type::allocator_type allocator_type; - typedef typename base_type::pointer pointer; - typedef typename base_type::const_pointer const_pointer; - typedef typename base_type::reference reference; - typedef typename base_type::const_reference const_reference; - - typedef typename base_type::size_type size_type; - typedef typename base_type::difference_type difference_type; - - typedef typename base_type::iterator iterator; - typedef typename base_type::const_iterator const_iterator; - typedef typename base_type::iterator local_iterator; - typedef typename base_type::const_iterator const_local_iterator; - - // Construction/destruction/copying - explicit concurrent_unordered_set(size_type n_of_buckets = base_type::initial_bucket_number, const hasher& a_hasher = hasher(), - const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) - { - } - - concurrent_unordered_set(const Allocator& a) : base_type(base_type::initial_bucket_number, key_compare(), a) - { - } - - template - concurrent_unordered_set(Iterator first, Iterator last, size_type n_of_buckets = base_type::initial_bucket_number, const hasher& a_hasher = hasher(), - const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) - { - insert(first, last); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Constructor from initializer_list - concurrent_unordered_set(std::initializer_list il, size_type n_of_buckets = base_type::initial_bucket_number, const hasher& a_hasher = hasher(), - const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) - { - this->insert(il.begin(),il.end()); - } -#endif //# __TBB_INITIALIZER_LISTS_PRESENT - -#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN - concurrent_unordered_set(const concurrent_unordered_set& table) - : base_type(table) - { - } - - concurrent_unordered_set& operator=(const concurrent_unordered_set& table) - { - return static_cast(base_type::operator=(table)); - } + http://www.apache.org/licenses/LICENSE-2.0 - concurrent_unordered_set(concurrent_unordered_set&& table) - : base_type(std::move(table)) - { - } - - concurrent_unordered_set& operator=(concurrent_unordered_set&& table) - { - return static_cast(base_type::operator=(std::move(table))); - } -#endif //__TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN - - concurrent_unordered_set(const concurrent_unordered_set& table, const Allocator& a) - : base_type(table, a) - { - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - concurrent_unordered_set(concurrent_unordered_set&& table, const Allocator& a) - : base_type(std::move(table), a) - { - } -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - -}; - -template , typename Key_equality = std::equal_to, - typename Allocator = tbb::tbb_allocator > -class concurrent_unordered_multiset : - public internal::concurrent_unordered_base< concurrent_unordered_set_traits, Allocator, true> > -{ - // Base type definitions - typedef internal::hash_compare hash_compare; - typedef concurrent_unordered_set_traits traits_type; - typedef internal::concurrent_unordered_base< traits_type > base_type; -#if __TBB_EXTRA_DEBUG -public: -#endif - using traits_type::allow_multimapping; -public: - using base_type::insert; - - // Type definitions - typedef Key key_type; - typedef typename base_type::value_type value_type; - typedef Key mapped_type; - typedef Hasher hasher; - typedef Key_equality key_equal; - typedef hash_compare key_compare; - - typedef typename base_type::allocator_type allocator_type; - typedef typename base_type::pointer pointer; - typedef typename base_type::const_pointer const_pointer; - typedef typename base_type::reference reference; - typedef typename base_type::const_reference const_reference; - - typedef typename base_type::size_type size_type; - typedef typename base_type::difference_type difference_type; - - typedef typename base_type::iterator iterator; - typedef typename base_type::const_iterator const_iterator; - typedef typename base_type::iterator local_iterator; - typedef typename base_type::const_iterator const_local_iterator; - - // Construction/destruction/copying - explicit concurrent_unordered_multiset(size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), - const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) - { - } - - concurrent_unordered_multiset(const Allocator& a) : base_type(base_type::initial_bucket_number, key_compare(), a) - { - } - - template - concurrent_unordered_multiset(Iterator first, Iterator last, size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), - const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) - { - insert(first, last); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Constructor from initializer_list - concurrent_unordered_multiset(std::initializer_list il, size_type n_of_buckets = base_type::initial_bucket_number, const hasher& a_hasher = hasher(), - const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) - { - this->insert(il.begin(),il.end()); - } -#endif //# __TBB_INITIALIZER_LISTS_PRESENT - -#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN - concurrent_unordered_multiset(const concurrent_unordered_multiset& table) - : base_type(table) - { - } - - concurrent_unordered_multiset& operator=(const concurrent_unordered_multiset& table) - { - return static_cast(base_type::operator=(table)); - } - - concurrent_unordered_multiset(concurrent_unordered_multiset&& table) - : base_type(std::move(table)) - { - } - - concurrent_unordered_multiset& operator=(concurrent_unordered_multiset&& table) - { - return static_cast(base_type::operator=(std::move(table))); - } -#endif //__TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN - - concurrent_unordered_multiset(const concurrent_unordered_multiset& table, const Allocator& a) - : base_type(table, a) - { - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - concurrent_unordered_multiset(concurrent_unordered_multiset&& table, const Allocator& a) - : base_type(std::move(table), a) - { - } -#endif //__TBB_CPP11_RVALUE_REF_PRESENT -}; -} // namespace interface5 - -using interface5::concurrent_unordered_set; -using interface5::concurrent_unordered_multiset; - -} // namespace tbb + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif// __TBB_concurrent_unordered_set_H +#include "../oneapi/tbb/concurrent_unordered_set.h" diff --git a/src/tbb/include/tbb/concurrent_vector.h b/src/tbb/include/tbb/concurrent_vector.h index 3b6ad3203..c1fc97c62 100644 --- a/src/tbb/include/tbb/concurrent_vector.h +++ b/src/tbb/include/tbb/concurrent_vector.h @@ -1,1334 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_vector_H -#define __TBB_concurrent_vector_H - -#include "tbb_stddef.h" -#include "tbb_exception.h" -#include "atomic.h" -#include "cache_aligned_allocator.h" -#include "blocked_range.h" -#include "tbb_machine.h" -#include "tbb_profiling.h" -#include -#include // for memset() - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#if _MSC_VER==1500 && !__INTEL_COMPILER - // VS2008/VC9 seems to have an issue; limits pull in math.h - #pragma warning( push ) - #pragma warning( disable: 4985 ) -#endif -#include /* std::numeric_limits */ -#if _MSC_VER==1500 && !__INTEL_COMPILER - #pragma warning( pop ) -#endif - -#if __TBB_INITIALIZER_LISTS_PRESENT - #include -#endif - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (push) -#if defined(_Wp64) - #pragma warning (disable: 4267) -#endif - #pragma warning (disable: 4127) //warning C4127: conditional expression is constant -#endif - -namespace tbb { - -template > -class concurrent_vector; - -template -class vector_iterator; - -//! @cond INTERNAL -namespace internal { - - //! Bad allocation marker - static void *const vector_allocation_error_flag = reinterpret_cast(size_t(63)); - - //! Exception helper function - template - void handle_unconstructed_elements(T* array, size_t n_of_elements){ - std::memset(array, 0, n_of_elements * sizeof(T)); - } - - //! Base class of concurrent vector implementation. - /** @ingroup containers */ - class concurrent_vector_base_v3 { - protected: - - // Basic types declarations - typedef size_t segment_index_t; - typedef size_t size_type; - - // Using enumerations due to Mac linking problems of static const variables - enum { - // Size constants - default_initial_segments = 1, // 2 initial items - //! Number of slots for segment pointers inside the class - pointers_per_short_table = 3, // to fit into 8 words of entire structure - pointers_per_long_table = sizeof(segment_index_t) * 8 // one segment per bit - }; - - struct segment_not_used {}; - struct segment_allocated {}; - struct segment_allocation_failed {}; - - class segment_t; - class segment_value_t { - void* array; - private: - //TODO: More elegant way to grant access to selected functions _only_? - friend class segment_t; - explicit segment_value_t(void* an_array):array(an_array) {} - public: - friend bool operator==(segment_value_t const& lhs, segment_not_used ) { return lhs.array == 0;} - friend bool operator==(segment_value_t const& lhs, segment_allocated) { return lhs.array > internal::vector_allocation_error_flag;} - friend bool operator==(segment_value_t const& lhs, segment_allocation_failed) { return lhs.array == internal::vector_allocation_error_flag;} - template - friend bool operator!=(segment_value_t const& lhs, argument_type arg) { return ! (lhs == arg);} - - template - T* pointer() const { return static_cast(const_cast(array)); } - }; - - // Segment pointer. - class segment_t { - atomic array; - public: - segment_t(){ store(segment_not_used());} - //Copy ctor and assignment operator are defined to ease using of stl algorithms. - //These algorithms usually not a synchronization point, so, semantic is - //intentionally relaxed here. - segment_t(segment_t const& rhs ){ array.store(rhs.array.load());} - - void swap(segment_t & rhs ){ - tbb::internal::swap(array, rhs.array); - } - - segment_t& operator=(segment_t const& rhs ){ - array.store(rhs.array.load()); - return *this; - } - - template - segment_value_t load() const { return segment_value_t(array.load());} - - template - void store(segment_not_used) { - array.store(0); - } - - template - void store(segment_allocation_failed) { - __TBB_ASSERT(load() != segment_allocated(),"transition from \"allocated\" to \"allocation failed\" state looks non-logical"); - array.store(internal::vector_allocation_error_flag); - } - - template - void store(void* allocated_segment_pointer) __TBB_NOEXCEPT(true) { - __TBB_ASSERT(segment_value_t(allocated_segment_pointer) == segment_allocated(), - "other overloads of store should be used for marking segment as not_used or allocation_failed" ); - array.store(allocated_segment_pointer); - } - -#if TBB_USE_ASSERT - ~segment_t() { - __TBB_ASSERT(load() != segment_allocated(), "should have been freed by clear" ); - } -#endif /* TBB_USE_ASSERT */ - }; - friend void swap(segment_t & , segment_t & ) __TBB_NOEXCEPT(true); - - // Data fields - - //! allocator function pointer - void* (*vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t); - - //! count of segments in the first block - atomic my_first_block; - - //! Requested size of vector - atomic my_early_size; - - //! Pointer to the segments table - atomic my_segment; - - //! embedded storage of segment pointers - segment_t my_storage[pointers_per_short_table]; - - // Methods - - concurrent_vector_base_v3() { - //Here the semantic is intentionally relaxed. - //The reason this is next: - //Object that is in middle of construction (i.e. its constructor is not yet finished) - //cannot be used concurrently until the construction is finished. - //Thus to flag other threads that construction is finished, some synchronization with - //acquire-release semantic should be done by the (external) code that uses the vector. - //So, no need to do the synchronization inside the vector. - - my_early_size.store(0); - my_first_block.store(0); // here is not default_initial_segments - my_segment.store(my_storage); - } - - __TBB_EXPORTED_METHOD ~concurrent_vector_base_v3(); - - //these helpers methods use the fact that segments are allocated so - //that every segment size is a (increasing) power of 2. - //with one exception 0 segment has size of 2 as well segment 1; - //e.g. size of segment with index of 3 is 2^3=8; - static segment_index_t segment_index_of( size_type index ) { - return segment_index_t( __TBB_Log2( index|1 ) ); - } - - static segment_index_t segment_base( segment_index_t k ) { - return (segment_index_t(1)< - friend class vector_iterator; - - }; - - inline void swap(concurrent_vector_base_v3::segment_t & lhs, concurrent_vector_base_v3::segment_t & rhs) __TBB_NOEXCEPT(true) { - lhs.swap(rhs); - } - - typedef concurrent_vector_base_v3 concurrent_vector_base; - - //! Meets requirements of a forward iterator for STL and a Value for a blocked_range.*/ - /** Value is either the T or const T type of the container. - @ingroup containers */ - template - class vector_iterator - { - //! concurrent_vector over which we are iterating. - Container* my_vector; - - //! Index into the vector - size_t my_index; - - //! Caches my_vector->internal_subscript(my_index) - /** NULL if cached value is not available */ - mutable Value* my_item; - - template - friend vector_iterator operator+( ptrdiff_t offset, const vector_iterator& v ); - - template - friend bool operator==( const vector_iterator& i, const vector_iterator& j ); - - template - friend bool operator<( const vector_iterator& i, const vector_iterator& j ); - - template - friend ptrdiff_t operator-( const vector_iterator& i, const vector_iterator& j ); - - template - friend class internal::vector_iterator; - -#if !defined(_MSC_VER) || defined(__INTEL_COMPILER) - template - friend class tbb::concurrent_vector; -#else -public: // workaround for MSVC -#endif - - vector_iterator( const Container& vector, size_t index, void *ptr = 0 ) : - my_vector(const_cast(&vector)), - my_index(index), - my_item(static_cast(ptr)) - {} - - public: - //! Default constructor - vector_iterator() : my_vector(NULL), my_index(~size_t(0)), my_item(NULL) {} - - vector_iterator( const vector_iterator& other ) : - my_vector(other.my_vector), - my_index(other.my_index), - my_item(other.my_item) - {} - - vector_iterator operator+( ptrdiff_t offset ) const { - return vector_iterator( *my_vector, my_index+offset ); - } - vector_iterator &operator+=( ptrdiff_t offset ) { - my_index+=offset; - my_item = NULL; - return *this; - } - vector_iterator operator-( ptrdiff_t offset ) const { - return vector_iterator( *my_vector, my_index-offset ); - } - vector_iterator &operator-=( ptrdiff_t offset ) { - my_index-=offset; - my_item = NULL; - return *this; - } - Value& operator*() const { - Value* item = my_item; - if( !item ) { - item = my_item = &my_vector->internal_subscript(my_index); - } - __TBB_ASSERT( item==&my_vector->internal_subscript(my_index), "corrupt cache" ); - return *item; - } - Value& operator[]( ptrdiff_t k ) const { - return my_vector->internal_subscript(my_index+k); - } - Value* operator->() const {return &operator*();} - - //! Pre increment - vector_iterator& operator++() { - size_t element_index = ++my_index; - if( my_item ) { - //TODO: consider using of knowledge about "first_block optimization" here as well? - if( concurrent_vector_base::is_first_element_in_segment(element_index)) { - //if the iterator crosses a segment boundary, the pointer become invalid - //as possibly next segment is in another memory location - my_item= NULL; - } else { - ++my_item; - } - } - return *this; - } - - //! Pre decrement - vector_iterator& operator--() { - __TBB_ASSERT( my_index>0, "operator--() applied to iterator already at beginning of concurrent_vector" ); - size_t element_index = my_index--; - if( my_item ) { - if(concurrent_vector_base::is_first_element_in_segment(element_index)) { - //if the iterator crosses a segment boundary, the pointer become invalid - //as possibly next segment is in another memory location - my_item= NULL; - } else { - --my_item; - } - } - return *this; - } - - //! Post increment - vector_iterator operator++(int) { - vector_iterator result = *this; - operator++(); - return result; - } - - //! Post decrement - vector_iterator operator--(int) { - vector_iterator result = *this; - operator--(); - return result; - } - - // STL support - - typedef ptrdiff_t difference_type; - typedef Value value_type; - typedef Value* pointer; - typedef Value& reference; - typedef std::random_access_iterator_tag iterator_category; - }; - - template - vector_iterator operator+( ptrdiff_t offset, const vector_iterator& v ) { - return vector_iterator( *v.my_vector, v.my_index+offset ); - } - - template - bool operator==( const vector_iterator& i, const vector_iterator& j ) { - return i.my_index==j.my_index && i.my_vector == j.my_vector; - } - - template - bool operator!=( const vector_iterator& i, const vector_iterator& j ) { - return !(i==j); - } - - template - bool operator<( const vector_iterator& i, const vector_iterator& j ) { - return i.my_index - bool operator>( const vector_iterator& i, const vector_iterator& j ) { - return j - bool operator>=( const vector_iterator& i, const vector_iterator& j ) { - return !(i - bool operator<=( const vector_iterator& i, const vector_iterator& j ) { - return !(j - ptrdiff_t operator-( const vector_iterator& i, const vector_iterator& j ) { - return ptrdiff_t(i.my_index)-ptrdiff_t(j.my_index); - } - - template - class allocator_base { - public: - typedef typename A::template - rebind::other allocator_type; - allocator_type my_allocator; - - allocator_base(const allocator_type &a = allocator_type() ) : my_allocator(a) {} - - }; - -} // namespace internal -//! @endcond - -//! Concurrent vector container -/** concurrent_vector is a container having the following main properties: - - It provides random indexed access to its elements. The index of the first element is 0. - - It ensures safe concurrent growing its size (different threads can safely append new elements). - - Adding new elements does not invalidate existing iterators and does not change indices of existing items. - -@par Compatibility - The class meets all Container Requirements and Reversible Container Requirements from - C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1). But it doesn't meet - Sequence Requirements due to absence of insert() and erase() methods. - -@par Exception Safety - Methods working with memory allocation and/or new elements construction can throw an - exception if allocator fails to allocate memory or element's default constructor throws one. - Concurrent vector's element of type T must conform to the following requirements: - - Throwing an exception is forbidden for destructor of T. - - Default constructor of T must not throw an exception OR its non-virtual destructor must safely work when its object memory is zero-initialized. - . - Otherwise, the program's behavior is undefined. -@par - If an exception happens inside growth or assignment operation, an instance of the vector becomes invalid unless it is stated otherwise in the method documentation. - Invalid state means: - - There are no guarantees that all items were initialized by a constructor. The rest of items is zero-filled, including item where exception happens. - - An invalid vector instance cannot be repaired; it is unable to grow anymore. - - Size and capacity reported by the vector are incorrect, and calculated as if the failed operation were successful. - - Attempt to access not allocated elements using operator[] or iterators results in access violation or segmentation fault exception, and in case of using at() method a C++ exception is thrown. - . - If a concurrent grow operation successfully completes, all the elements it has added to the vector will remain valid and accessible even if one of subsequent grow operations fails. - -@par Fragmentation - Unlike an STL vector, a concurrent_vector does not move existing elements if it needs - to allocate more memory. The container is divided into a series of contiguous arrays of - elements. The first reservation, growth, or assignment operation determines the size of - the first array. Using small number of elements as initial size incurs fragmentation that - may increase element access time. Internal layout can be optimized by method compact() that - merges several smaller arrays into one solid. - -@par Changes since TBB 2.1 - - Fixed guarantees of concurrent_vector::size() and grow_to_at_least() methods to assure elements are allocated. - - Methods end()/rbegin()/back() are partly thread-safe since they use size() to get the end of vector - - Added resize() methods (not thread-safe) - - Added cbegin/cend/crbegin/crend methods - - Changed return type of methods grow* and push_back to iterator - -@par Changes since TBB 2.0 - - Implemented exception-safety guarantees - - Added template argument for allocator - - Added allocator argument in constructors - - Faster index calculation - - First growth call specifies a number of segments to be merged in the first allocation. - - Fixed memory blow up for swarm of vector's instances of small size - - Added grow_by(size_type n, const_reference t) growth using copying constructor to init new items. - - Added STL-like constructors. - - Added operators ==, < and derivatives - - Added at() method, approved for using after an exception was thrown inside the vector - - Added get_allocator() method. - - Added assign() methods - - Added compact() method to defragment first segments - - Added swap() method - - range() defaults on grainsize = 1 supporting auto grainsize algorithms. + http://www.apache.org/licenses/LICENSE-2.0 - @ingroup containers */ -template -class concurrent_vector: protected internal::allocator_base, - private internal::concurrent_vector_base { -private: - template - class generic_range_type: public blocked_range { - public: - typedef T value_type; - typedef T& reference; - typedef const T& const_reference; - typedef I iterator; - typedef ptrdiff_t difference_type; - generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range(begin_,end_,grainsize_) {} - template - generic_range_type( const generic_range_type& r) : blocked_range(r.begin(),r.end(),r.grainsize()) {} - generic_range_type( generic_range_type& r, split ) : blocked_range(r,split()) {} - }; - - template - friend class internal::vector_iterator; - -public: - //------------------------------------------------------------------------ - // STL compatible types - //------------------------------------------------------------------------ - typedef internal::concurrent_vector_base_v3::size_type size_type; - typedef typename internal::allocator_base::allocator_type allocator_type; - - typedef T value_type; - typedef ptrdiff_t difference_type; - typedef T& reference; - typedef const T& const_reference; - typedef T *pointer; - typedef const T *const_pointer; - - typedef internal::vector_iterator iterator; - typedef internal::vector_iterator const_iterator; - -#if !defined(_MSC_VER) || _CPPLIB_VER>=300 - // Assume ISO standard definition of std::reverse_iterator - typedef std::reverse_iterator reverse_iterator; - typedef std::reverse_iterator const_reverse_iterator; -#else - // Use non-standard std::reverse_iterator - typedef std::reverse_iterator reverse_iterator; - typedef std::reverse_iterator const_reverse_iterator; -#endif /* defined(_MSC_VER) && (_MSC_VER<1300) */ - - //------------------------------------------------------------------------ - // Parallel algorithm support - //------------------------------------------------------------------------ - typedef generic_range_type range_type; - typedef generic_range_type const_range_type; - - //------------------------------------------------------------------------ - // STL compatible constructors & destructors - //------------------------------------------------------------------------ - - //! Construct empty vector. - explicit concurrent_vector(const allocator_type &a = allocator_type()) - : internal::allocator_base(a), internal::concurrent_vector_base() - { - vector_allocator_ptr = &internal_allocator; - } - - //Constructors are not required to have synchronization - //(for more details see comment in the concurrent_vector_base constructor). -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Constructor from initializer_list - concurrent_vector(std::initializer_list init_list, const allocator_type &a = allocator_type()) - : internal::allocator_base(a), internal::concurrent_vector_base() - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_assign_iterators(init_list.begin(), init_list.end()); - } __TBB_CATCH(...) { - segment_t *table = my_segment.load();; - internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load()); - __TBB_RETHROW(); - } - - } -#endif //# __TBB_INITIALIZER_LISTS_PRESENT - - //! Copying constructor - concurrent_vector( const concurrent_vector& vector, const allocator_type& a = allocator_type() ) - : internal::allocator_base(a), internal::concurrent_vector_base() - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_copy(vector, sizeof(T), ©_array); - } __TBB_CATCH(...) { - segment_t *table = my_segment.load(); - internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load()); - __TBB_RETHROW(); - } - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Move constructor - //TODO add __TBB_NOEXCEPT(true) and static_assert(std::has_nothrow_move_constructor::value) - concurrent_vector( concurrent_vector&& source) - : internal::allocator_base(std::move(source)), internal::concurrent_vector_base() - { - vector_allocator_ptr = &internal_allocator; - concurrent_vector_base_v3::internal_swap(source); - } - - concurrent_vector( concurrent_vector&& source, const allocator_type& a) - : internal::allocator_base(a), internal::concurrent_vector_base() - { - vector_allocator_ptr = &internal_allocator; - //C++ standard requires instances of an allocator being compared for equality, - //which means that memory allocated by one instance is possible to deallocate with the other one. - if (a == source.my_allocator) { - concurrent_vector_base_v3::internal_swap(source); - } else { - __TBB_TRY { - internal_copy(source, sizeof(T), &move_array); - } __TBB_CATCH(...) { - segment_t *table = my_segment.load(); - internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load()); - __TBB_RETHROW(); - } - } - } - -#endif - - //! Copying constructor for vector with different allocator type - template - concurrent_vector( const concurrent_vector& vector, const allocator_type& a = allocator_type() ) - : internal::allocator_base(a), internal::concurrent_vector_base() - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_copy(vector.internal_vector_base(), sizeof(T), ©_array); - } __TBB_CATCH(...) { - segment_t *table = my_segment.load(); - internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load() ); - __TBB_RETHROW(); - } - } - - //! Construction with initial size specified by argument n - explicit concurrent_vector(size_type n) - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array ); - } __TBB_CATCH(...) { - segment_t *table = my_segment.load(); - internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load() ); - __TBB_RETHROW(); - } - } - - //! Construction with initial size specified by argument n, initialization by copying of t, and given allocator instance - concurrent_vector(size_type n, const_reference t, const allocator_type& a = allocator_type()) - : internal::allocator_base(a) - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_resize( n, sizeof(T), max_size(), static_cast(&t), &destroy_array, &initialize_array_by ); - } __TBB_CATCH(...) { - segment_t *table = my_segment.load(); - internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load() ); - __TBB_RETHROW(); - } - } - - //! Construction with copying iteration range and given allocator instance - template - concurrent_vector(I first, I last, const allocator_type &a = allocator_type()) - : internal::allocator_base(a) - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_assign_range(first, last, static_cast::is_integer> *>(0) ); - } __TBB_CATCH(...) { - segment_t *table = my_segment.load(); - internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load() ); - __TBB_RETHROW(); - } - } - - //! Assignment - concurrent_vector& operator=( const concurrent_vector& vector ) { - if( this != &vector ) - internal_assign(vector, sizeof(T), &destroy_array, &assign_array, ©_array); - return *this; - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //TODO: add __TBB_NOEXCEPT() - //! Move assignment - concurrent_vector& operator=( concurrent_vector&& other ) { - __TBB_ASSERT(this != &other, "Move assignment to itself is prohibited "); - typedef typename tbb::internal::allocator_traits::propagate_on_container_move_assignment pocma_t; - if(pocma_t::value || this->my_allocator == other.my_allocator) { - concurrent_vector trash (std::move(*this)); - internal_swap(other); - if (pocma_t::value) { - this->my_allocator = std::move(other.my_allocator); - } - } else { - internal_assign(other, sizeof(T), &destroy_array, &move_assign_array, &move_array); - } - return *this; - } -#endif - //TODO: add an template assignment operator? (i.e. with different element type) - - //! Assignment for vector with different allocator type - template - concurrent_vector& operator=( const concurrent_vector& vector ) { - if( static_cast( this ) != static_cast( &vector ) ) - internal_assign(vector.internal_vector_base(), - sizeof(T), &destroy_array, &assign_array, ©_array); - return *this; - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Assignment for initializer_list - concurrent_vector& operator=( std::initializer_list init_list ) { - internal_clear(&destroy_array); - internal_assign_iterators(init_list.begin(), init_list.end()); - return *this; - } -#endif //#if __TBB_INITIALIZER_LISTS_PRESENT - - //------------------------------------------------------------------------ - // Concurrent operations - //------------------------------------------------------------------------ - //! Grow by "delta" elements. - /** Returns iterator pointing to the first new element. */ - iterator grow_by( size_type delta ) { - return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size.load()); - } - - //! Grow by "delta" elements using copying constructor. - /** Returns iterator pointing to the first new element. */ - iterator grow_by( size_type delta, const_reference t ) { - return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast(&t) ) : my_early_size.load()); - } - - /** Returns iterator pointing to the first new element. */ - template - iterator grow_by( I first, I last ) { - typename std::iterator_traits::difference_type delta = std::distance(first, last); - __TBB_ASSERT( delta >= 0, NULL); - - return iterator(*this, delta ? internal_grow_by(delta, sizeof(T), ©_range, static_cast(&first)) : my_early_size.load()); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - /** Returns iterator pointing to the first new element. */ - iterator grow_by( std::initializer_list init_list ) { - return grow_by( init_list.begin(), init_list.end() ); - } -#endif //#if __TBB_INITIALIZER_LISTS_PRESENT - - //! Append minimal sequence of elements such that size()>=n. - /** The new elements are default constructed. Blocks until all elements in range [0..n) are allocated. - May return while other elements are being constructed by other threads. - Returns iterator that points to beginning of appended sequence. - If no elements were appended, returns iterator pointing to nth element. */ - iterator grow_to_at_least( size_type n ) { - size_type m=0; - if( n ) { - m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array, NULL ); - if( m>n ) m=n; - } - return iterator(*this, m); - }; - - /** Analogous to grow_to_at_least( size_type n ) with exception that the new - elements are initialized by copying of t instead of default construction. */ - iterator grow_to_at_least( size_type n, const_reference t ) { - size_type m=0; - if( n ) { - m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array_by, &t); - if( m>n ) m=n; - } - return iterator(*this, m); - }; - - //! Push item - /** Returns iterator pointing to the new element. */ - iterator push_back( const_reference item ) - { - size_type k; - T* ptr = static_cast(internal_push_back(sizeof(T),k)); - element_construction_guard g(ptr); - new(ptr) T(item); - g.dismiss(); - return iterator(*this, k, ptr); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Push item, move-aware - /** Returns iterator pointing to the new element. */ - iterator push_back( T&& item ) - { - size_type k; - T* ptr = static_cast(internal_push_back(sizeof(T),k)); - element_construction_guard g(ptr); - new(ptr) T(std::move(item)); - g.dismiss(); - return iterator(*this, k, ptr); - } -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT - //! Push item, create item "in place" with provided arguments - /** Returns iterator pointing to the new element. */ - template - iterator emplace_back( Args&&... args) - { - size_type k; - T* ptr = static_cast(internal_push_back(sizeof(T),k)); - element_construction_guard g(ptr); - new(ptr) T( std::forward(args)...); - g.dismiss(); - return iterator(*this, k, ptr); - } -#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - //! Get reference to element at given index. - /** This method is thread-safe for concurrent reads, and also while growing the vector, - as long as the calling thread has checked that index < size(). */ - reference operator[]( size_type index ) { - return internal_subscript(index); - } - - //! Get const reference to element at given index. - const_reference operator[]( size_type index ) const { - return internal_subscript(index); - } - - //! Get reference to element at given index. Throws exceptions on errors. - reference at( size_type index ) { - return internal_subscript_with_exceptions(index); - } - - //! Get const reference to element at given index. Throws exceptions on errors. - const_reference at( size_type index ) const { - return internal_subscript_with_exceptions(index); - } - - //! Get range for iterating with parallel algorithms - range_type range( size_t grainsize = 1 ) { - return range_type( begin(), end(), grainsize ); - } - - //! Get const range for iterating with parallel algorithms - const_range_type range( size_t grainsize = 1 ) const { - return const_range_type( begin(), end(), grainsize ); - } - - //------------------------------------------------------------------------ - // Capacity - //------------------------------------------------------------------------ - //! Return size of vector. It may include elements under construction - size_type size() const { - size_type sz = my_early_size, cp = internal_capacity(); - return cp < sz ? cp : sz; - } - - //! Return false if vector is not empty or has elements under construction at least. - bool empty() const {return !my_early_size;} - - //! Maximum size to which array can grow without allocating more memory. Concurrent allocations are not included in the value. - size_type capacity() const {return internal_capacity();} - - //! Allocate enough space to grow to size n without having to allocate more memory later. - /** Like most of the methods provided for STL compatibility, this method is *not* thread safe. - The capacity afterwards may be bigger than the requested reservation. */ - void reserve( size_type n ) { - if( n ) - internal_reserve(n, sizeof(T), max_size()); - } - - //! Resize the vector. Not thread-safe. - void resize( size_type n ) { - internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array ); - } - - //! Resize the vector, copy t for new elements. Not thread-safe. - void resize( size_type n, const_reference t ) { - internal_resize( n, sizeof(T), max_size(), static_cast(&t), &destroy_array, &initialize_array_by ); - } - - //! Optimize memory usage and fragmentation. - void shrink_to_fit(); - - //! Upper bound on argument to reserve. - size_type max_size() const {return (~size_type(0))/sizeof(T);} - - //------------------------------------------------------------------------ - // STL support - //------------------------------------------------------------------------ - - //! start iterator - iterator begin() {return iterator(*this,0);} - //! end iterator - iterator end() {return iterator(*this,size());} - //! start const iterator - const_iterator begin() const {return const_iterator(*this,0);} - //! end const iterator - const_iterator end() const {return const_iterator(*this,size());} - //! start const iterator - const_iterator cbegin() const {return const_iterator(*this,0);} - //! end const iterator - const_iterator cend() const {return const_iterator(*this,size());} - //! reverse start iterator - reverse_iterator rbegin() {return reverse_iterator(end());} - //! reverse end iterator - reverse_iterator rend() {return reverse_iterator(begin());} - //! reverse start const iterator - const_reverse_iterator rbegin() const {return const_reverse_iterator(end());} - //! reverse end const iterator - const_reverse_iterator rend() const {return const_reverse_iterator(begin());} - //! reverse start const iterator - const_reverse_iterator crbegin() const {return const_reverse_iterator(end());} - //! reverse end const iterator - const_reverse_iterator crend() const {return const_reverse_iterator(begin());} - //! the first item - reference front() { - __TBB_ASSERT( size()>0, NULL); - return (my_segment[0].template load().template pointer())[0]; - } - //! the first item const - const_reference front() const { - __TBB_ASSERT( size()>0, NULL); - return static_cast(my_segment[0].array)[0]; - } - //! the last item - reference back() { - __TBB_ASSERT( size()>0, NULL); - return internal_subscript( size()-1 ); - } - //! the last item const - const_reference back() const { - __TBB_ASSERT( size()>0, NULL); - return internal_subscript( size()-1 ); - } - //! return allocator object - allocator_type get_allocator() const { return this->my_allocator; } - - //! assign n items by copying t item - void assign(size_type n, const_reference t) { - clear(); - internal_resize( n, sizeof(T), max_size(), static_cast(&t), &destroy_array, &initialize_array_by ); - } - - //! assign range [first, last) - template - void assign(I first, I last) { - clear(); internal_assign_range( first, last, static_cast::is_integer> *>(0) ); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! assigns an initializer list - void assign(std::initializer_list init_list) { - clear(); internal_assign_iterators( init_list.begin(), init_list.end()); - } -#endif //# __TBB_INITIALIZER_LISTS_PRESENT - - //! swap two instances - void swap(concurrent_vector &vector) { - using std::swap; - if( this != &vector ) { - concurrent_vector_base_v3::internal_swap(static_cast(vector)); - swap(this->my_allocator, vector.my_allocator); - } - } - - //! Clear container while keeping memory allocated. - /** To free up the memory, use in conjunction with method compact(). Not thread safe **/ - void clear() { - internal_clear(&destroy_array); - } - - //! Clear and destroy vector. - ~concurrent_vector() { - segment_t *table = my_segment.load(); - internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load() ); - // base class destructor call should be then - } - - const internal::concurrent_vector_base_v3 &internal_vector_base() const { return *this; } -private: - //! Allocate k items - static void *internal_allocator(internal::concurrent_vector_base_v3 &vb, size_t k) { - return static_cast&>(vb).my_allocator.allocate(k); - } - //! Free k segments from table - void internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block); - - //! Get reference to element at given index. - T& internal_subscript( size_type index ) const; - - //! Get reference to element at given index with errors checks - T& internal_subscript_with_exceptions( size_type index ) const; - - //! assign n items by copying t - void internal_assign_n(size_type n, const_pointer p) { - internal_resize( n, sizeof(T), max_size(), static_cast(p), &destroy_array, p? &initialize_array_by : &initialize_array ); - } - - //! helper class - template class is_integer_tag; - - //! assign integer items by copying when arguments are treated as iterators. See C++ Standard 2003 23.1.1p9 - template - void internal_assign_range(I first, I last, is_integer_tag *) { - internal_assign_n(static_cast(first), &static_cast(last)); - } - //! inline proxy assign by iterators - template - void internal_assign_range(I first, I last, is_integer_tag *) { - internal_assign_iterators(first, last); - } - //! assign by iterators - template - void internal_assign_iterators(I first, I last); - - //these functions are marked __TBB_EXPORTED_FUNC as they are called from within the library - - //! Construct n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC initialize_array( void* begin, const void*, size_type n ); - - //! Copy-construct n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC initialize_array_by( void* begin, const void* src, size_type n ); - - //! Copy-construct n instances of T by copying single element pointed to by src, starting at "dst". - static void __TBB_EXPORTED_FUNC copy_array( void* dst, const void* src, size_type n ); - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! Move-construct n instances of T, starting at "dst" by copying according element of src array. - static void __TBB_EXPORTED_FUNC move_array( void* dst, const void* src, size_type n ); - //! Move-assign (using operator=) n instances of T, starting at "dst" by assigning according element of src array. - static void __TBB_EXPORTED_FUNC move_assign_array( void* dst, const void* src, size_type n ); -#endif - //! Copy-construct n instances of T, starting at "dst" by iterator range of [p_type_erased_iterator, p_type_erased_iterator+n). - template - static void __TBB_EXPORTED_FUNC copy_range( void* dst, const void* p_type_erased_iterator, size_type n ); - - //! Assign (using operator=) n instances of T, starting at "dst" by assigning according element of src array. - static void __TBB_EXPORTED_FUNC assign_array( void* dst, const void* src, size_type n ); - - //! Destroy n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC destroy_array( void* begin, size_type n ); - - //! Exception-aware helper class for filling a segment by exception-danger operators of user class - class internal_loop_guide : internal::no_copy { - public: - const pointer array; - const size_type n; - size_type i; - - static const T* as_const_pointer(const void *ptr) { return static_cast(ptr); } - static T* as_pointer(const void *src) { return static_cast(const_cast(src)); } - - internal_loop_guide(size_type ntrials, void *ptr) - : array(as_pointer(ptr)), n(ntrials), i(0) {} - void init() { for(; i < n; ++i) new( &array[i] ) T(); } - void init(const void *src) { for(; i < n; ++i) new( &array[i] ) T(*as_const_pointer(src)); } - void copy(const void *src) { for(; i < n; ++i) new( &array[i] ) T(as_const_pointer(src)[i]); } - void assign(const void *src) { for(; i < n; ++i) array[i] = as_const_pointer(src)[i]; } -#if __TBB_CPP11_RVALUE_REF_PRESENT - void move_assign(const void *src) { for(; i < n; ++i) array[i] = std::move(as_pointer(src)[i]); } - void move_construct(const void *src) { for(; i < n; ++i) new( &array[i] ) T( std::move(as_pointer(src)[i]) ); } -#endif - //TODO: rename to construct_range - template void iterate(I &src) { for(; i < n; ++i, ++src) new( &array[i] ) T( *src ); } - ~internal_loop_guide() { - if(i < n) {// if an exception was raised, fill the rest of items with zeros - internal::handle_unconstructed_elements(array+i, n-i); - } - } - }; - - class element_construction_guard : internal::no_copy{ - pointer element; - public: - element_construction_guard(pointer an_element) : element (an_element){} - void dismiss(){ element = NULL; } - ~element_construction_guard(){ - if (element){ - internal::handle_unconstructed_elements(element, 1); - } - } - }; -}; - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) -#pragma warning (push) -#pragma warning (disable: 4701) // potentially uninitialized local variable "old" -#endif -template -void concurrent_vector::shrink_to_fit() { - internal_segments_table old; - __TBB_TRY { - if( internal_compact( sizeof(T), &old, &destroy_array, ©_array ) ) - internal_free_segments( old.table, pointers_per_long_table, old.first_block ); // free joined and unnecessary segments - } __TBB_CATCH(...) { - if( old.first_block ) // free segment allocated for compacting. Only for support of exceptions in ctor of user T[ype] - internal_free_segments( old.table, 1, old.first_block ); - __TBB_RETHROW(); - } -} -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) -#pragma warning (pop) -#endif // warning 4701 is back - -template -void concurrent_vector::internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block) { - // Free the arrays - while( k > first_block ) { - --k; - segment_value_t segment_value = table[k].load(); - table[k].store(segment_not_used()); - if( segment_value == segment_allocated() ) // check for correct segment pointer - this->my_allocator.deallocate( (segment_value.pointer()), segment_size(k) ); - } - segment_value_t segment_value = table[0].load(); - if( segment_value == segment_allocated() ) { - __TBB_ASSERT( first_block > 0, NULL ); - while(k > 0) table[--k].store(segment_not_used()); - this->my_allocator.deallocate( (segment_value.pointer()), segment_size(first_block) ); - } -} - -template -T& concurrent_vector::internal_subscript( size_type index ) const { - //TODO: unify both versions of internal_subscript - __TBB_ASSERT( index < my_early_size, "index out of bounds" ); - size_type j = index; - segment_index_t k = segment_base_index_of( j ); - __TBB_ASSERT( my_segment.load() != my_storage || k < pointers_per_short_table, "index is being allocated" ); - //no need in load with acquire (load) since thread works in own space or gets - //the information about added elements via some form of external synchronization - //TODO: why not make a load of my_segment relaxed as well ? - //TODO: add an assertion that my_segment[k] is properly aligned to please ITT - segment_value_t segment_value = my_segment[k].template load(); - __TBB_ASSERT( segment_value != segment_allocation_failed(), "the instance is broken by bad allocation. Use at() instead" ); - __TBB_ASSERT( segment_value != segment_not_used(), "index is being allocated" ); - return (( segment_value.pointer()))[j]; -} - -template -T& concurrent_vector::internal_subscript_with_exceptions( size_type index ) const { - if( index >= my_early_size ) - internal::throw_exception(internal::eid_out_of_range); // throw std::out_of_range - size_type j = index; - segment_index_t k = segment_base_index_of( j ); - //TODO: refactor this condition into separate helper function, e.g. fits_into_small_table - if( my_segment.load() == my_storage && k >= pointers_per_short_table ) - internal::throw_exception(internal::eid_segment_range_error); // throw std::range_error - // no need in load with acquire (load) since thread works in own space or gets - //the information about added elements via some form of external synchronization - //TODO: why not make a load of my_segment relaxed as well ? - //TODO: add an assertion that my_segment[k] is properly aligned to please ITT - segment_value_t segment_value = my_segment[k].template load(); - if( segment_value != segment_allocated() ) // check for correct segment pointer - internal::throw_exception(internal::eid_index_range_error); // throw std::range_error - return (segment_value.pointer())[j]; -} - -template template -void concurrent_vector::internal_assign_iterators(I first, I last) { - __TBB_ASSERT(my_early_size == 0, NULL); - size_type n = std::distance(first, last); - if( !n ) return; - internal_reserve(n, sizeof(T), max_size()); - my_early_size = n; - segment_index_t k = 0; - //TODO: unify segment iteration code with concurrent_base_v3::helper - size_type sz = segment_size( my_first_block ); - while( sz < n ) { - internal_loop_guide loop(sz, my_segment[k].template load().template pointer()); - loop.iterate(first); - n -= sz; - if( !k ) k = my_first_block; - else { ++k; sz <<= 1; } - } - internal_loop_guide loop(n, my_segment[k].template load().template pointer()); - loop.iterate(first); -} - -template -void concurrent_vector::initialize_array( void* begin, const void *, size_type n ) { - internal_loop_guide loop(n, begin); loop.init(); -} - -template -void concurrent_vector::initialize_array_by( void* begin, const void *src, size_type n ) { - internal_loop_guide loop(n, begin); loop.init(src); -} - -template -void concurrent_vector::copy_array( void* dst, const void* src, size_type n ) { - internal_loop_guide loop(n, dst); loop.copy(src); -} - -#if __TBB_CPP11_RVALUE_REF_PRESENT -template -void concurrent_vector::move_array( void* dst, const void* src, size_type n ) { - internal_loop_guide loop(n, dst); loop.move_construct(src); -} - -template -void concurrent_vector::move_assign_array( void* dst, const void* src, size_type n ) { - internal_loop_guide loop(n, dst); loop.move_assign(src); -} -#endif - -template -template -void concurrent_vector::copy_range( void* dst, const void* p_type_erased_iterator, size_type n ){ - I & iterator ((*const_cast(static_cast(p_type_erased_iterator)))); - internal_loop_guide loop(n, dst); loop.iterate(iterator); -} - -template -void concurrent_vector::assign_array( void* dst, const void* src, size_type n ) { - internal_loop_guide loop(n, dst); loop.assign(src); -} - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warning - #pragma warning (push) - #pragma warning (disable: 4189) -#endif -template -void concurrent_vector::destroy_array( void* begin, size_type n ) { - T* array = static_cast(begin); - for( size_type j=n; j>0; --j ) - array[j-1].~T(); // destructors are supposed to not throw any exceptions -} -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4189 is back - -// concurrent_vector's template functions -template -inline bool operator==(const concurrent_vector &a, const concurrent_vector &b) { - //TODO: call size() only once per vector (in operator==) - // Simply: return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin()); - if(a.size() != b.size()) return false; - typename concurrent_vector::const_iterator i(a.begin()); - typename concurrent_vector::const_iterator j(b.begin()); - for(; i != a.end(); ++i, ++j) - if( !(*i == *j) ) return false; - return true; -} - -template -inline bool operator!=(const concurrent_vector &a, const concurrent_vector &b) -{ return !(a == b); } - -template -inline bool operator<(const concurrent_vector &a, const concurrent_vector &b) -{ return (std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end())); } - -template -inline bool operator>(const concurrent_vector &a, const concurrent_vector &b) -{ return b < a; } - -template -inline bool operator<=(const concurrent_vector &a, const concurrent_vector &b) -{ return !(b < a); } - -template -inline bool operator>=(const concurrent_vector &a, const concurrent_vector &b) -{ return !(a < b); } - -template -inline void swap(concurrent_vector &a, concurrent_vector &b) -{ a.swap( b ); } - -} // namespace tbb - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4267,4127 are back + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_concurrent_vector_H */ +#include "../oneapi/tbb/concurrent_vector.h" diff --git a/src/tbb/include/tbb/critical_section.h b/src/tbb/include/tbb/critical_section.h deleted file mode 100644 index b12cdcd8d..000000000 --- a/src/tbb/include/tbb/critical_section.h +++ /dev/null @@ -1,133 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef _TBB_CRITICAL_SECTION_H_ -#define _TBB_CRITICAL_SECTION_H_ - -#if _WIN32||_WIN64 -#include "machine/windows_api.h" -#else -#include -#include -#endif // _WIN32||WIN64 - -#include "tbb_stddef.h" -#include "tbb_thread.h" -#include "tbb_exception.h" - -#include "tbb_profiling.h" - -namespace tbb { - - namespace internal { -class critical_section_v4 : internal::no_copy { -#if _WIN32||_WIN64 - CRITICAL_SECTION my_impl; -#else - pthread_mutex_t my_impl; -#endif - tbb_thread::id my_tid; -public: - - void __TBB_EXPORTED_METHOD internal_construct(); - - critical_section_v4() { -#if _WIN32||_WIN64 - InitializeCriticalSectionEx( &my_impl, 4000, 0 ); -#else - pthread_mutex_init(&my_impl, NULL); -#endif - internal_construct(); - } - - ~critical_section_v4() { - __TBB_ASSERT(my_tid == tbb_thread::id(), "Destroying a still-held critical section"); -#if _WIN32||_WIN64 - DeleteCriticalSection(&my_impl); -#else - pthread_mutex_destroy(&my_impl); -#endif - } - - class scoped_lock : internal::no_copy { - private: - critical_section_v4 &my_crit; - public: - scoped_lock( critical_section_v4& lock_me) :my_crit(lock_me) { - my_crit.lock(); - } - - ~scoped_lock() { - my_crit.unlock(); - } - }; - - void lock() { - tbb_thread::id local_tid = this_tbb_thread::get_id(); - if(local_tid == my_tid) throw_exception( eid_improper_lock ); -#if _WIN32||_WIN64 - EnterCriticalSection( &my_impl ); -#else - int rval = pthread_mutex_lock(&my_impl); - __TBB_ASSERT_EX(!rval, "critical_section::lock: pthread_mutex_lock failed"); -#endif - __TBB_ASSERT(my_tid == tbb_thread::id(), NULL); - my_tid = local_tid; - } - - bool try_lock() { - bool gotlock; - tbb_thread::id local_tid = this_tbb_thread::get_id(); - if(local_tid == my_tid) return false; -#if _WIN32||_WIN64 - gotlock = TryEnterCriticalSection( &my_impl ) != 0; -#else - int rval = pthread_mutex_trylock(&my_impl); - // valid returns are 0 (locked) and [EBUSY] - __TBB_ASSERT(rval == 0 || rval == EBUSY, "critical_section::trylock: pthread_mutex_trylock failed"); - gotlock = rval == 0; -#endif - if(gotlock) { - my_tid = local_tid; - } - return gotlock; - } - - void unlock() { - __TBB_ASSERT(this_tbb_thread::get_id() == my_tid, "thread unlocking critical_section is not thread that locked it"); - my_tid = tbb_thread::id(); -#if _WIN32||_WIN64 - LeaveCriticalSection( &my_impl ); -#else - int rval = pthread_mutex_unlock(&my_impl); - __TBB_ASSERT_EX(!rval, "critical_section::unlock: pthread_mutex_unlock failed"); -#endif - } - - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = true; -}; // critical_section_v4 -} // namespace internal -typedef internal::critical_section_v4 critical_section; - -__TBB_DEFINE_PROFILING_SET_NAME(critical_section) -} // namespace tbb -#endif // _TBB_CRITICAL_SECTION_H_ diff --git a/src/tbb/include/tbb/enumerable_thread_specific.h b/src/tbb/include/tbb/enumerable_thread_specific.h index d838d1182..9d6050d64 100644 --- a/src/tbb/include/tbb/enumerable_thread_specific.h +++ b/src/tbb/include/tbb/enumerable_thread_specific.h @@ -1,1002 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_enumerable_thread_specific_H -#define __TBB_enumerable_thread_specific_H - -#include "concurrent_vector.h" -#include "tbb_thread.h" -#include "tbb_allocator.h" -#include "tbb_profiling.h" -#include "cache_aligned_allocator.h" -#include "aligned_space.h" -#include // for memcpy - -#if _WIN32||_WIN64 -#include "machine/windows_api.h" -#else -#include -#endif - -namespace tbb { - -//! enum for selecting between single key and key-per-instance versions -enum ets_key_usage_type { ets_key_per_instance, ets_no_key }; - -namespace interface6 { - - //! @cond - namespace internal { - - using namespace tbb::internal; - - template - class ets_base: tbb::internal::no_copy { - protected: -#if _WIN32||_WIN64 - typedef DWORD key_type; -#else - typedef pthread_t key_type; -#endif -#if __TBB_PROTECTED_NESTED_CLASS_BROKEN - public: -#endif - struct slot; - - struct array { - array* next; - size_t lg_size; - slot& at( size_t k ) { - return ((slot*)(void*)(this+1))[k]; - } - size_t size() const {return (size_t)1<>(8*sizeof(size_t)-lg_size); - } - }; - struct slot { - key_type key; - void* ptr; - bool empty() const {return !key;} - bool match( key_type k ) const {return key==k;} - bool claim( key_type k ) { - __TBB_ASSERT(sizeof(tbb::atomic)==sizeof(key_type), NULL); - return tbb::internal::punned_cast*>(&key)->compare_and_swap(k,0)==0; - } - }; -#if __TBB_PROTECTED_NESTED_CLASS_BROKEN - protected: -#endif - - static key_type key_of_current_thread() { - tbb::tbb_thread::id id = tbb::this_tbb_thread::get_id(); - key_type k; - memcpy( &k, &id, sizeof(k) ); - return k; - } - - //! Root of linked list of arrays of decreasing size. - /** NULL if and only if my_count==0. - Each array in the list is half the size of its predecessor. */ - atomic my_root; - atomic my_count; - virtual void* create_local() = 0; - virtual void* create_array(size_t _size) = 0; // _size in bytes - virtual void free_array(void* ptr, size_t _size) = 0; // _size in bytes - array* allocate( size_t lg_size ) { - size_t n = 1<(create_array( sizeof(array)+n*sizeof(slot) )); - a->lg_size = lg_size; - std::memset( a+1, 0, n*sizeof(slot) ); - return a; - } - void free(array* a) { - size_t n = 1<<(a->lg_size); - free_array( (void *)a, size_t(sizeof(array)+n*sizeof(slot)) ); - } - static size_t hash( key_type k ) { - // Multiplicative hashing. Client should use *upper* bits. - // casts required for Mac gcc4.* compiler - return uintptr_t(k)*tbb::internal::select_size_t_constant<0x9E3779B9,0x9E3779B97F4A7C15ULL>::value; - } - - ets_base() {my_root=NULL; my_count=0;} - virtual ~ets_base(); // g++ complains if this is not virtual... - void* table_lookup( bool& exists ); - void table_clear(); - // table_find is used in copying ETS, so is not used in concurrent context. So - // we don't need itt annotations for it. - slot& table_find( key_type k ) { - size_t h = hash(k); - array* r = my_root; - size_t mask = r->mask(); - for(size_t i = r->start(h);;i=(i+1)&mask) { - slot& s = r->at(i); - if( s.empty() || s.match(k) ) - return s; - } - } - void table_reserve_for_copy( const ets_base& other ) { - __TBB_ASSERT(!my_root,NULL); - __TBB_ASSERT(!my_count,NULL); - if( other.my_root ) { - array* a = allocate(other.my_root->lg_size); - a->next = NULL; - my_root = a; - my_count = other.my_count; - } - } - }; - - template - ets_base::~ets_base() { - __TBB_ASSERT(!my_root, NULL); - } - - template - void ets_base::table_clear() { - while( array* r = my_root ) { - my_root = r->next; - free(r); - } - my_count = 0; - } - - template - void* ets_base::table_lookup( bool& exists ) { - const key_type k = key_of_current_thread(); - - __TBB_ASSERT(k!=0,NULL); - void* found; - size_t h = hash(k); - for( array* r=my_root; r; r=r->next ) { - call_itt_notify(acquired,r); - size_t mask=r->mask(); - for(size_t i = r->start(h); ;i=(i+1)&mask) { - slot& s = r->at(i); - if( s.empty() ) break; - if( s.match(k) ) { - if( r==my_root ) { - // Success at top level - exists = true; - return s.ptr; - } else { - // Success at some other level. Need to insert at top level. - exists = true; - found = s.ptr; - goto insert; - } - } - } - } - // Key does not yet exist. The density of slots in the table does not exceed 0.5, - // for if this will occur a new table is allocated with double the current table - // size, which is swapped in as the new root table. So an empty slot is guaranteed. - exists = false; - found = create_local(); - { - size_t c = ++my_count; - array* r = my_root; - call_itt_notify(acquired,r); - if( !r || c>r->size()/2 ) { - size_t s = r ? r->lg_size : 2; - while( c>size_t(1)<<(s-1) ) ++s; - array* a = allocate(s); - for(;;) { - a->next = r; - call_itt_notify(releasing,a); - array* new_r = my_root.compare_and_swap(a,r); - if( new_r==r ) break; - call_itt_notify(acquired, new_r); - if( new_r->lg_size>=s ) { - // Another thread inserted an equal or bigger array, so our array is superfluous. - free(a); - break; - } - r = new_r; - } - } - } - insert: - // Whether a slot has been found in an older table, or if it has been inserted at this level, - // it has already been accounted for in the total. Guaranteed to be room for it, and it is - // not present, so search for empty slot and use it. - array* ir = my_root; - call_itt_notify(acquired, ir); - size_t mask = ir->mask(); - for(size_t i = ir->start(h);;i=(i+1)&mask) { - slot& s = ir->at(i); - if( s.empty() ) { - if( s.claim(k) ) { - s.ptr = found; - return found; - } - } - } - } - - //! Specialization that exploits native TLS - template <> - class ets_base: protected ets_base { - typedef ets_base super; -#if _WIN32||_WIN64 -#if __TBB_WIN8UI_SUPPORT - typedef DWORD tls_key_t; - void create_key() { my_key = FlsAlloc(NULL); } - void destroy_key() { FlsFree(my_key); } - void set_tls(void * value) { FlsSetValue(my_key, (LPVOID)value); } - void* get_tls() { return (void *)FlsGetValue(my_key); } -#else - typedef DWORD tls_key_t; - void create_key() { my_key = TlsAlloc(); } - void destroy_key() { TlsFree(my_key); } - void set_tls(void * value) { TlsSetValue(my_key, (LPVOID)value); } - void* get_tls() { return (void *)TlsGetValue(my_key); } -#endif -#else - typedef pthread_key_t tls_key_t; - void create_key() { pthread_key_create(&my_key, NULL); } - void destroy_key() { pthread_key_delete(my_key); } - void set_tls( void * value ) const { pthread_setspecific(my_key, value); } - void* get_tls() const { return pthread_getspecific(my_key); } -#endif - tls_key_t my_key; - virtual void* create_local() = 0; - virtual void* create_array(size_t _size) = 0; // _size in bytes - virtual void free_array(void* ptr, size_t _size) = 0; // size in bytes - public: - ets_base() {create_key();} - ~ets_base() {destroy_key();} - void* table_lookup( bool& exists ) { - void* found = get_tls(); - if( found ) { - exists=true; - } else { - found = super::table_lookup(exists); - set_tls(found); - } - return found; - } - void table_clear() { - destroy_key(); - create_key(); - super::table_clear(); - } - }; - - //! Random access iterator for traversing the thread local copies. - template< typename Container, typename Value > - class enumerable_thread_specific_iterator -#if defined(_WIN64) && defined(_MSC_VER) - // Ensure that Microsoft's internal template function _Val_type works correctly. - : public std::iterator -#endif /* defined(_WIN64) && defined(_MSC_VER) */ - { - //! current position in the concurrent_vector - - Container *my_container; - typename Container::size_type my_index; - mutable Value *my_value; - - template - friend enumerable_thread_specific_iterator operator+( ptrdiff_t offset, - const enumerable_thread_specific_iterator& v ); - - template - friend bool operator==( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ); - - template - friend bool operator<( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ); - - template - friend ptrdiff_t operator-( const enumerable_thread_specific_iterator& i, const enumerable_thread_specific_iterator& j ); - - template - friend class enumerable_thread_specific_iterator; - - public: - - enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) : - my_container(&const_cast(container)), my_index(index), my_value(NULL) {} - - //! Default constructor - enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {} - - template - enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator& other ) : - my_container( other.my_container ), my_index( other.my_index), my_value( const_cast(other.my_value) ) {} - - enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const { - return enumerable_thread_specific_iterator(*my_container, my_index + offset); - } - - enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) { - my_index += offset; - my_value = NULL; - return *this; - } - - enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const { - return enumerable_thread_specific_iterator( *my_container, my_index-offset ); - } - - enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) { - my_index -= offset; - my_value = NULL; - return *this; - } - - Value& operator*() const { - Value* value = my_value; - if( !value ) { - value = my_value = reinterpret_cast(&(*my_container)[my_index].value); - } - __TBB_ASSERT( value==reinterpret_cast(&(*my_container)[my_index].value), "corrupt cache" ); - return *value; - } - - Value& operator[]( ptrdiff_t k ) const { - return (*my_container)[my_index + k].value; - } - - Value* operator->() const {return &operator*();} - - enumerable_thread_specific_iterator& operator++() { - ++my_index; - my_value = NULL; - return *this; - } - - enumerable_thread_specific_iterator& operator--() { - --my_index; - my_value = NULL; - return *this; - } - - //! Post increment - enumerable_thread_specific_iterator operator++(int) { - enumerable_thread_specific_iterator result = *this; - ++my_index; - my_value = NULL; - return result; - } - - //! Post decrement - enumerable_thread_specific_iterator operator--(int) { - enumerable_thread_specific_iterator result = *this; - --my_index; - my_value = NULL; - return result; - } - - // STL support - typedef ptrdiff_t difference_type; - typedef Value value_type; - typedef Value* pointer; - typedef Value& reference; - typedef std::random_access_iterator_tag iterator_category; - }; - - template - enumerable_thread_specific_iterator operator+( ptrdiff_t offset, - const enumerable_thread_specific_iterator& v ) { - return enumerable_thread_specific_iterator( v.my_container, v.my_index + offset ); - } - - template - bool operator==( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return i.my_index==j.my_index && i.my_container == j.my_container; - } - - template - bool operator!=( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return !(i==j); - } - - template - bool operator<( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return i.my_index - bool operator>( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return j - bool operator>=( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return !(i - bool operator<=( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return !(j - ptrdiff_t operator-( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return i.my_index-j.my_index; - } - - template - class segmented_iterator -#if defined(_WIN64) && defined(_MSC_VER) - : public std::iterator -#endif - { - template - friend bool operator==(const segmented_iterator& i, const segmented_iterator& j); - - template - friend bool operator!=(const segmented_iterator& i, const segmented_iterator& j); - - template - friend class segmented_iterator; - - public: - - segmented_iterator() {my_segcont = NULL;} - - segmented_iterator( const SegmentedContainer& _segmented_container ) : - my_segcont(const_cast(&_segmented_container)), - outer_iter(my_segcont->end()) { } - - ~segmented_iterator() {} - - typedef typename SegmentedContainer::iterator outer_iterator; - typedef typename SegmentedContainer::value_type InnerContainer; - typedef typename InnerContainer::iterator inner_iterator; - - // STL support - typedef ptrdiff_t difference_type; - typedef Value value_type; - typedef typename SegmentedContainer::size_type size_type; - typedef Value* pointer; - typedef Value& reference; - typedef std::input_iterator_tag iterator_category; - - // Copy Constructor - template - segmented_iterator(const segmented_iterator& other) : - my_segcont(other.my_segcont), - outer_iter(other.outer_iter), - // can we assign a default-constructed iterator to inner if we're at the end? - inner_iter(other.inner_iter) - {} - - // assignment - template - segmented_iterator& operator=( const segmented_iterator& other) { - if(this != &other) { - my_segcont = other.my_segcont; - outer_iter = other.outer_iter; - if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter; - } - return *this; - } - - // allow assignment of outer iterator to segmented iterator. Once it is - // assigned, move forward until a non-empty inner container is found or - // the end of the outer container is reached. - segmented_iterator& operator=(const outer_iterator& new_outer_iter) { - __TBB_ASSERT(my_segcont != NULL, NULL); - // check that this iterator points to something inside the segmented container - for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) { - if( !outer_iter->empty() ) { - inner_iter = outer_iter->begin(); - break; - } - } - return *this; - } - - // pre-increment - segmented_iterator& operator++() { - advance_me(); - return *this; - } - - // post-increment - segmented_iterator operator++(int) { - segmented_iterator tmp = *this; - operator++(); - return tmp; - } - - bool operator==(const outer_iterator& other_outer) const { - __TBB_ASSERT(my_segcont != NULL, NULL); - return (outer_iter == other_outer && - (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin())); - } - - bool operator!=(const outer_iterator& other_outer) const { - return !operator==(other_outer); - - } - - // (i)* RHS - reference operator*() const { - __TBB_ASSERT(my_segcont != NULL, NULL); - __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container"); - __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // should never happen - return *inner_iter; - } - - // i-> - pointer operator->() const { return &operator*();} - - private: - SegmentedContainer* my_segcont; - outer_iterator outer_iter; - inner_iterator inner_iter; - - void advance_me() { - __TBB_ASSERT(my_segcont != NULL, NULL); - __TBB_ASSERT(outer_iter != my_segcont->end(), NULL); // not true if there are no inner containers - __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // not true if the inner containers are all empty. - ++inner_iter; - while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) { - inner_iter = outer_iter->begin(); - } - } - }; // segmented_iterator - - template - bool operator==( const segmented_iterator& i, - const segmented_iterator& j ) { - if(i.my_segcont != j.my_segcont) return false; - if(i.my_segcont == NULL) return true; - if(i.outer_iter != j.outer_iter) return false; - if(i.outer_iter == i.my_segcont->end()) return true; - return i.inner_iter == j.inner_iter; - } - - // != - template - bool operator!=( const segmented_iterator& i, - const segmented_iterator& j ) { - return !(i==j); - } - - template - struct destruct_only: tbb::internal::no_copy { - tbb::aligned_space value; - ~destruct_only() {value.begin()[0].~T();} - }; - - template - struct construct_by_default: tbb::internal::no_assign { - void construct(void*where) {new(where) T();} // C++ note: the () in T() ensure zero initialization. - construct_by_default( int ) {} - }; - - template - struct construct_by_exemplar: tbb::internal::no_assign { - const T exemplar; - void construct(void*where) {new(where) T(exemplar);} - construct_by_exemplar( const T& t ) : exemplar(t) {} - }; + http://www.apache.org/licenses/LICENSE-2.0 - template - struct construct_by_finit: tbb::internal::no_assign { - Finit f; - void construct(void* where) {new(where) T(f());} - construct_by_finit( const Finit& f_ ) : f(f_) {} - }; - - // storage for initialization function pointer - template - class callback_base { - public: - // Clone *this - virtual callback_base* clone() = 0; - // Destruct and free *this - virtual void destroy() = 0; - // Need virtual destructor to satisfy GCC compiler warning - virtual ~callback_base() { } - // Construct T at where - virtual void construct(void* where) = 0; - }; - - template - class callback_leaf: public callback_base, Constructor { - template callback_leaf( const X& x ) : Constructor(x) {} - - typedef typename tbb::tbb_allocator my_allocator_type; - - /*override*/ callback_base* clone() { - void* where = my_allocator_type().allocate(1); - return new(where) callback_leaf(*this); - } - - /*override*/ void destroy() { - my_allocator_type().destroy(this); - my_allocator_type().deallocate(this,1); - } - - /*override*/ void construct(void* where) { - Constructor::construct(where); - } - public: - template - static callback_base* make( const X& x ) { - void* where = my_allocator_type().allocate(1); - return new(where) callback_leaf(x); - } - }; - - //! Template for adding padding in order to avoid false sharing - /** ModularSize should be sizeof(U) modulo the cache line size. - All maintenance of the space will be done explicitly on push_back, - and all thread local copies must be destroyed before the concurrent - vector is deleted. - */ - template - struct ets_element { - ets_element() { /* avoid cl warning C4345 about default initialization of POD types */ } - char value[ModularSize==0 ? sizeof(U) : sizeof(U)+(tbb::internal::NFS_MaxLineSize-ModularSize)]; - void unconstruct() { - tbb::internal::punned_cast(&value)->~U(); - } - }; - - } // namespace internal - //! @endcond - - //! The enumerable_thread_specific container - /** enumerable_thread_specific has the following properties: - - thread-local copies are lazily created, with default, exemplar or function initialization. - - thread-local copies do not move (during lifetime, and excepting clear()) so the address of a copy is invariant. - - the contained objects need not have operator=() defined if combine is not used. - - enumerable_thread_specific containers may be copy-constructed or assigned. - - thread-local copies can be managed by hash-table, or can be accessed via TLS storage for speed. - - outside of parallel contexts, the contents of all thread-local copies are accessible by iterator or using combine or combine_each methods - - @par Segmented iterator - When the thread-local objects are containers with input_iterators defined, a segmented iterator may - be used to iterate over all the elements of all thread-local copies. - - @par combine and combine_each - - Both methods are defined for enumerable_thread_specific. - - combine() requires the the type T have operator=() defined. - - neither method modifies the contents of the object (though there is no guarantee that the applied methods do not modify the object.) - - Both are evaluated in serial context (the methods are assumed to be non-benign.) - - @ingroup containers */ - template , - ets_key_usage_type ETS_key_type=ets_no_key > - class enumerable_thread_specific: internal::ets_base { - - template friend class enumerable_thread_specific; - - typedef internal::ets_element padded_element; - - //! A generic range, used to create range objects from the iterators - template - class generic_range_type: public blocked_range { - public: - typedef T value_type; - typedef T& reference; - typedef const T& const_reference; - typedef I iterator; - typedef ptrdiff_t difference_type; - generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range(begin_,end_,grainsize_) {} - template - generic_range_type( const generic_range_type& r) : blocked_range(r.begin(),r.end(),r.grainsize()) {} - generic_range_type( generic_range_type& r, split ) : blocked_range(r,split()) {} - }; - - typedef typename Allocator::template rebind< padded_element >::other padded_allocator_type; - typedef tbb::concurrent_vector< padded_element, padded_allocator_type > internal_collection_type; - - internal::callback_base *my_construct_callback; - - internal_collection_type my_locals; - - /*override*/ void* create_local() { - void* lref = &*my_locals.grow_by(1); - my_construct_callback->construct(lref); - return lref; - } - - void unconstruct_locals() { - for(typename internal_collection_type::iterator cvi = my_locals.begin(); cvi != my_locals.end(); ++cvi) { - cvi->unconstruct(); - } - } - - typedef typename Allocator::template rebind< uintptr_t >::other array_allocator_type; - - // _size is in bytes - /*override*/ void* create_array(size_t _size) { - size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t); - return array_allocator_type().allocate(nelements); - } - - /*override*/ void free_array( void* _ptr, size_t _size) { - size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t); - array_allocator_type().deallocate( reinterpret_cast(_ptr),nelements); - } - - public: - - //! Basic types - typedef Allocator allocator_type; - typedef T value_type; - typedef T& reference; - typedef const T& const_reference; - typedef T* pointer; - typedef const T* const_pointer; - typedef typename internal_collection_type::size_type size_type; - typedef typename internal_collection_type::difference_type difference_type; - - // Iterator types - typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator; - typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator; - - // Parallel range types - typedef generic_range_type< iterator > range_type; - typedef generic_range_type< const_iterator > const_range_type; - - //! Default constructor. Each local instance of T is default constructed. - enumerable_thread_specific() : - my_construct_callback( internal::callback_leaf >::make(/*dummy argument*/0) ) - {} - - //! Constructor with initializer functor. Each local instance of T is constructed by T(finit()). - template - enumerable_thread_specific( Finit finit ) : - my_construct_callback( internal::callback_leaf >::make( finit ) ) - {} - - //! Constructor with exemplar. Each local instance of T is copied-constructed from the exemplar. - enumerable_thread_specific(const T& exemplar) : - my_construct_callback( internal::callback_leaf >::make( exemplar ) ) - {} - - //! Destructor - ~enumerable_thread_specific() { - my_construct_callback->destroy(); - this->clear(); // deallocation before the derived class is finished destructing - // So free(array *) is still accessible - } - - //! returns reference to local, discarding exists - reference local() { - bool exists; - return local(exists); - } - - //! Returns reference to calling thread's local copy, creating one if necessary - reference local(bool& exists) { - void* ptr = this->table_lookup(exists); - return *(T*)ptr; - } - - //! Get the number of local copies - size_type size() const { return my_locals.size(); } - - //! true if there have been no local copies created - bool empty() const { return my_locals.empty(); } - - //! begin iterator - iterator begin() { return iterator( my_locals, 0 ); } - //! end iterator - iterator end() { return iterator(my_locals, my_locals.size() ); } - - //! begin const iterator - const_iterator begin() const { return const_iterator(my_locals, 0); } - - //! end const iterator - const_iterator end() const { return const_iterator(my_locals, my_locals.size()); } - - //! Get range for parallel algorithms - range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); } - - //! Get const range for parallel algorithms - const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); } - - //! Destroys local copies - void clear() { - unconstruct_locals(); - my_locals.clear(); - this->table_clear(); - // callback is not destroyed - // exemplar is not destroyed - } - - private: - - template - void internal_copy( const enumerable_thread_specific& other); - - public: - - template - enumerable_thread_specific( const enumerable_thread_specific& other ) : internal::ets_base () - { - internal_copy(other); - } - - enumerable_thread_specific( const enumerable_thread_specific& other ) : internal::ets_base () - { - internal_copy(other); - } - - private: - - template - enumerable_thread_specific & - internal_assign(const enumerable_thread_specific& other) { - if(static_cast( this ) != static_cast( &other )) { - this->clear(); - my_construct_callback->destroy(); - my_construct_callback = 0; - internal_copy( other ); - } - return *this; - } - - public: - - // assignment - enumerable_thread_specific& operator=(const enumerable_thread_specific& other) { - return internal_assign(other); - } - - template - enumerable_thread_specific& operator=(const enumerable_thread_specific& other) - { - return internal_assign(other); - } - - // combine_func_t has signature T(T,T) or T(const T&, const T&) - template - T combine(combine_func_t f_combine) { - if(begin() == end()) { - internal::destruct_only location; - my_construct_callback->construct(location.value.begin()); - return *location.value.begin(); - } - const_iterator ci = begin(); - T my_result = *ci; - while(++ci != end()) - my_result = f_combine( my_result, *ci ); - return my_result; - } - - // combine_func_t has signature void(T) or void(const T&) - template - void combine_each(combine_func_t f_combine) { - for(const_iterator ci = begin(); ci != end(); ++ci) { - f_combine( *ci ); - } - } - - }; // enumerable_thread_specific - - template - template - void enumerable_thread_specific::internal_copy( const enumerable_thread_specific& other) { - // Initialize my_construct_callback first, so that it is valid even if rest of this routine throws an exception. - my_construct_callback = other.my_construct_callback->clone(); - - typedef internal::ets_base base; - __TBB_ASSERT(my_locals.size()==0,NULL); - this->table_reserve_for_copy( other ); - for( base::array* r=other.my_root; r; r=r->next ) { - for( size_t i=0; isize(); ++i ) { - base::slot& s1 = r->at(i); - if( !s1.empty() ) { - base::slot& s2 = this->table_find(s1.key); - if( s2.empty() ) { - void* lref = &*my_locals.grow_by(1); - s2.ptr = new(lref) T(*(U*)s1.ptr); - s2.key = s1.key; - } else { - // Skip the duplicate - } - } - } - } - } - - template< typename Container > - class flattened2d { - - // This intermediate typedef is to address issues with VC7.1 compilers - typedef typename Container::value_type conval_type; - - public: - - //! Basic types - typedef typename conval_type::size_type size_type; - typedef typename conval_type::difference_type difference_type; - typedef typename conval_type::allocator_type allocator_type; - typedef typename conval_type::value_type value_type; - typedef typename conval_type::reference reference; - typedef typename conval_type::const_reference const_reference; - typedef typename conval_type::pointer pointer; - typedef typename conval_type::const_pointer const_pointer; - - typedef typename internal::segmented_iterator iterator; - typedef typename internal::segmented_iterator const_iterator; - - flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) : - my_container(const_cast(&c)), my_begin(b), my_end(e) { } - - flattened2d( const Container &c ) : - my_container(const_cast(&c)), my_begin(c.begin()), my_end(c.end()) { } - - iterator begin() { return iterator(*my_container) = my_begin; } - iterator end() { return iterator(*my_container) = my_end; } - const_iterator begin() const { return const_iterator(*my_container) = my_begin; } - const_iterator end() const { return const_iterator(*my_container) = my_end; } - - size_type size() const { - size_type tot_size = 0; - for(typename Container::const_iterator i = my_begin; i != my_end; ++i) { - tot_size += i->size(); - } - return tot_size; - } - - private: - - Container *my_container; - typename Container::const_iterator my_begin; - typename Container::const_iterator my_end; - - }; - - template - flattened2d flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) { - return flattened2d(c, b, e); - } - - template - flattened2d flatten2d(const Container &c) { - return flattened2d(c); - } - -} // interface6 - -namespace internal { -using interface6::internal::segmented_iterator; -} - -using interface6::enumerable_thread_specific; -using interface6::flattened2d; -using interface6::flatten2d; - -} // namespace tbb + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif +#include "../oneapi/tbb/enumerable_thread_specific.h" diff --git a/src/tbb/include/tbb/flow_graph.h b/src/tbb/include/tbb/flow_graph.h index 46087e706..40da468fe 100644 --- a/src/tbb/include/tbb/flow_graph.h +++ b/src/tbb/include/tbb/flow_graph.h @@ -1,3237 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_flow_graph_H -#define __TBB_flow_graph_H - -#include "tbb_stddef.h" -#include "atomic.h" -#include "spin_mutex.h" -#include "null_mutex.h" -#include "spin_rw_mutex.h" -#include "null_rw_mutex.h" -#include "task.h" -#include "cache_aligned_allocator.h" -#include "tbb_exception.h" -#include "internal/_aggregator_impl.h" -#include "tbb_profiling.h" - -#if TBB_DEPRECATED_FLOW_ENQUEUE -#define FLOW_SPAWN(a) tbb::task::enqueue((a)) -#else -#define FLOW_SPAWN(a) tbb::task::spawn((a)) -#endif - -// use the VC10 or gcc version of tuple if it is available. -#if __TBB_CPP11_TUPLE_PRESENT - #include -namespace tbb { - namespace flow { - using std::tuple; - using std::tuple_size; - using std::tuple_element; - using std::get; - } -} -#else - #include "compat/tuple" -#endif - -#include -#include - -/** @file - \brief The graph related classes and functions + http://www.apache.org/licenses/LICENSE-2.0 - There are some applications that best express dependencies as messages - passed between nodes in a graph. These messages may contain data or - simply act as signals that a predecessors has completed. The graph - class and its associated node classes can be used to express such - applications. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ -namespace tbb { -namespace flow { - -//! An enumeration the provides the two most common concurrency levels: unlimited and serial -enum concurrency { unlimited = 0, serial = 1 }; - -namespace interface7 { - -namespace internal { - template class successor_cache; - template class broadcast_cache; - template class round_robin_cache; -} - -//! An empty class used for messages that mean "I'm done" -class continue_msg {}; - -template< typename T > class sender; -template< typename T > class receiver; -class continue_receiver; - -//! Pure virtual template class that defines a sender of messages of type T -template< typename T > -class sender { -public: - //! The output type of this sender - typedef T output_type; - - //! The successor type for this node - typedef receiver successor_type; - - virtual ~sender() {} - - //! Add a new successor to this node - virtual bool register_successor( successor_type &r ) = 0; - - //! Removes a successor from this node - virtual bool remove_successor( successor_type &r ) = 0; - - //! Request an item from the sender - virtual bool try_get( T & ) { return false; } - - //! Reserves an item in the sender - virtual bool try_reserve( T & ) { return false; } - - //! Releases the reserved item - virtual bool try_release( ) { return false; } - - //! Consumes the reserved item - virtual bool try_consume( ) { return false; } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - //! interface to record edges for traversal & deletion - virtual void internal_add_built_successor( successor_type & ) = 0; - virtual void internal_delete_built_successor( successor_type & ) = 0; - virtual void copy_successors( std::vector &) = 0; - virtual size_t successor_count() = 0; -#endif -}; - -template< typename T > class limiter_node; // needed for resetting decrementer -template< typename R, typename B > class run_and_put_task; - -static tbb::task * const SUCCESSFULLY_ENQUEUED = (task *)-1; - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES -// flags to modify the behavior of the graph reset(). Can be combined. -enum reset_flags { - rf_reset_protocol = 0, - rf_reset_bodies = 1<<0, // delete the current node body, reset to a copy of the initial node body. - rf_extract = 1<<1 // delete edges (extract() for single node, reset() for graph.) -}; - -#define __TBB_PFG_RESET_ARG(exp) exp -#define __TBB_COMMA , -#else -#define __TBB_PFG_RESET_ARG(exp) /* nothing */ -#define __TBB_COMMA /* nothing */ -#endif - -// enqueue left task if necessary. Returns the non-enqueued task if there is one. -static inline tbb::task *combine_tasks( tbb::task * left, tbb::task * right) { - // if no RHS task, don't change left. - if(right == NULL) return left; - // right != NULL - if(left == NULL) return right; - if(left == SUCCESSFULLY_ENQUEUED) return right; - // left contains a task - if(right != SUCCESSFULLY_ENQUEUED) { - // both are valid tasks - FLOW_SPAWN(*left); - return right; - } - return left; -} - -//! Pure virtual template class that defines a receiver of messages of type T -template< typename T > -class receiver { -public: - //! The input type of this receiver - typedef T input_type; - - //! The predecessor type for this node - typedef sender predecessor_type; - - //! Destructor - virtual ~receiver() {} - - //! Put an item to the receiver - bool try_put( const T& t ) { - task *res = try_put_task(t); - if(!res) return false; - if (res != SUCCESSFULLY_ENQUEUED) FLOW_SPAWN(*res); - return true; - } - - //! put item to successor; return task to run the successor if possible. -protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - virtual task *try_put_task(const T& t) = 0; -public: - - //! Add a predecessor to the node - virtual bool register_predecessor( predecessor_type & ) { return false; } - - //! Remove a predecessor from the node - virtual bool remove_predecessor( predecessor_type & ) { return false; } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - virtual void internal_add_built_predecessor( predecessor_type & ) = 0; - virtual void internal_delete_built_predecessor( predecessor_type & ) = 0; - virtual void copy_predecessors( std::vector & ) = 0; - virtual size_t predecessor_count() = 0; -#endif - -protected: - //! put receiver back in initial state - template friend class limiter_node; - virtual void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags f = rf_reset_protocol ) ) = 0; - - template - friend class internal::successor_cache; - virtual bool is_continue_receiver() { return false; } -}; - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES -//* holder of edges both for caches and for those nodes which do not have predecessor caches. -// C == receiver< ... > or sender< ... >, depending. -template -class edge_container { - -public: - typedef std::vector edge_vector; - - void add_edge( C &s) { - built_edges.push_back( &s ); - } - - void delete_edge( C &s) { - for ( typename edge_vector::iterator i = built_edges.begin(); i != built_edges.end(); ++i ) { - if ( *i == &s ) { - (void)built_edges.erase(i); - return; // only remove one predecessor per request - } - } - } - - void copy_edges( edge_vector &v) { - v = built_edges; - } - - size_t edge_count() { - return (size_t)(built_edges.size()); - } - - void clear() { - built_edges.clear(); - } - - template< typename S > void sender_extract( S &s ); - template< typename R > void receiver_extract( R &r ); - -private: - edge_vector built_edges; -}; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - -//! Base class for receivers of completion messages -/** These receivers automatically reset, but cannot be explicitly waited on */ -class continue_receiver : public receiver< continue_msg > { -public: - - //! The input type - typedef continue_msg input_type; - - //! The predecessor type for this node - typedef sender< continue_msg > predecessor_type; - - //! Constructor - continue_receiver( int number_of_predecessors = 0 ) { - my_predecessor_count = my_initial_predecessor_count = number_of_predecessors; - my_current_count = 0; - } - - //! Copy constructor - continue_receiver( const continue_receiver& src ) : receiver() { - my_predecessor_count = my_initial_predecessor_count = src.my_initial_predecessor_count; - my_current_count = 0; - } - - //! Destructor - virtual ~continue_receiver() { } - - //! Increments the trigger threshold - /* override */ bool register_predecessor( predecessor_type & ) { - spin_mutex::scoped_lock l(my_mutex); - ++my_predecessor_count; - return true; - } - - //! Decrements the trigger threshold - /** Does not check to see if the removal of the predecessor now makes the current count - exceed the new threshold. So removing a predecessor while the graph is active can cause - unexpected results. */ - /* override */ bool remove_predecessor( predecessor_type & ) { - spin_mutex::scoped_lock l(my_mutex); - --my_predecessor_count; - return true; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector predecessor_vector_type; - - /*override*/ void internal_add_built_predecessor( predecessor_type &s) { - spin_mutex::scoped_lock l(my_mutex); - my_built_predecessors.add_edge( s ); - } - - /*override*/ void internal_delete_built_predecessor( predecessor_type &s) { - spin_mutex::scoped_lock l(my_mutex); - my_built_predecessors.delete_edge(s); - } - - /*override*/ void copy_predecessors( predecessor_vector_type &v) { - spin_mutex::scoped_lock l(my_mutex); - my_built_predecessors.copy_edges(v); - } - - /*override*/ size_t predecessor_count() { - spin_mutex::scoped_lock l(my_mutex); - return my_built_predecessors.edge_count(); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - -protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - // execute body is supposed to be too small to create a task for. - /* override */ task *try_put_task( const input_type & ) { - { - spin_mutex::scoped_lock l(my_mutex); - if ( ++my_current_count < my_predecessor_count ) - return SUCCESSFULLY_ENQUEUED; - else - my_current_count = 0; - } - task * res = execute(); - if(!res) return SUCCESSFULLY_ENQUEUED; - return res; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - edge_container my_built_predecessors; -#endif - spin_mutex my_mutex; - int my_predecessor_count; - int my_current_count; - int my_initial_predecessor_count; - // the friend declaration in the base class did not eliminate the "protected class" - // error in gcc 4.1.2 - template friend class limiter_node; - /*override*/void reset_receiver( __TBB_PFG_RESET_ARG(reset_flags f) ) - { - my_current_count = 0; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - if(f & rf_extract) { - my_built_predecessors.receiver_extract(*this); - my_predecessor_count = my_initial_predecessor_count; - } -#endif - } - - //! Does whatever should happen when the threshold is reached - /** This should be very fast or else spawn a task. This is - called while the sender is blocked in the try_put(). */ - virtual task * execute() = 0; - template - friend class internal::successor_cache; - /*override*/ bool is_continue_receiver() { return true; } -}; -} // interface7 -} // flow -} // tbb - -#include "internal/_flow_graph_trace_impl.h" - -namespace tbb { -namespace flow { -namespace interface7 { - -#include "internal/_flow_graph_types_impl.h" -#include "internal/_flow_graph_impl.h" -using namespace internal::graph_policy_namespace; - -class graph; -class graph_node; - -template -class graph_iterator { - friend class graph; - friend class graph_node; -public: - typedef size_t size_type; - typedef GraphNodeType value_type; - typedef GraphNodeType* pointer; - typedef GraphNodeType& reference; - typedef const GraphNodeType& const_reference; - typedef std::forward_iterator_tag iterator_category; - - //! Default constructor - graph_iterator() : my_graph(NULL), current_node(NULL) {} - - //! Copy constructor - graph_iterator(const graph_iterator& other) : - my_graph(other.my_graph), current_node(other.current_node) - {} - - //! Assignment - graph_iterator& operator=(const graph_iterator& other) { - if (this != &other) { - my_graph = other.my_graph; - current_node = other.current_node; - } - return *this; - } - - //! Dereference - reference operator*() const; - - //! Dereference - pointer operator->() const; - - //! Equality - bool operator==(const graph_iterator& other) const { - return ((my_graph == other.my_graph) && (current_node == other.current_node)); - } - - //! Inequality - bool operator!=(const graph_iterator& other) const { return !(operator==(other)); } - - //! Pre-increment - graph_iterator& operator++() { - internal_forward(); - return *this; - } - - //! Post-increment - graph_iterator operator++(int) { - graph_iterator result = *this; - operator++(); - return result; - } - -private: - // the graph over which we are iterating - GraphContainerType *my_graph; - // pointer into my_graph's my_nodes list - pointer current_node; - - //! Private initializing constructor for begin() and end() iterators - graph_iterator(GraphContainerType *g, bool begin); - void internal_forward(); -}; - -//! The graph class -/** This class serves as a handle to the graph */ -class graph : tbb::internal::no_copy { - friend class graph_node; - - template< typename Body > - class run_task : public task { - public: - run_task( Body& body ) : my_body(body) {} - task *execute() { - my_body(); - return NULL; - } - private: - Body my_body; - }; - - template< typename Receiver, typename Body > - class run_and_put_task : public task { - public: - run_and_put_task( Receiver &r, Body& body ) : my_receiver(r), my_body(body) {} - task *execute() { - task *res = my_receiver.try_put_task( my_body() ); - if(res == SUCCESSFULLY_ENQUEUED) res = NULL; - return res; - } - private: - Receiver &my_receiver; - Body my_body; - }; - -public: - //! Constructs a graph with isolated task_group_context - explicit graph() : my_nodes(NULL), my_nodes_last(NULL) - { - own_context = true; - cancelled = false; - caught_exception = false; - my_context = new task_group_context(); - my_root_task = ( new ( task::allocate_root(*my_context) ) empty_task ); - my_root_task->set_ref_count(1); - tbb::internal::fgt_graph( this ); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_is_active = true; -#endif - } - - //! Constructs a graph with use_this_context as context - explicit graph(task_group_context& use_this_context) : - my_context(&use_this_context), my_nodes(NULL), my_nodes_last(NULL) - { - own_context = false; - my_root_task = ( new ( task::allocate_root(*my_context) ) empty_task ); - my_root_task->set_ref_count(1); - tbb::internal::fgt_graph( this ); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_is_active = true; -#endif - } - - //! Destroys the graph. - /** Calls wait_for_all, then destroys the root task and context. */ - ~graph() { - wait_for_all(); - my_root_task->set_ref_count(0); - task::destroy( *my_root_task ); - if (own_context) delete my_context; - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - void set_name( const char *name ) { - tbb::internal::fgt_graph_desc( this, name ); - } -#endif - - //! Used to register that an external entity may still interact with the graph. - /** The graph will not return from wait_for_all until a matching number of decrement_wait_count calls - is made. */ - void increment_wait_count() { - if (my_root_task) - my_root_task->increment_ref_count(); - } - - //! Deregisters an external entity that may have interacted with the graph. - /** The graph will not return from wait_for_all until all the number of decrement_wait_count calls - matches the number of increment_wait_count calls. */ - void decrement_wait_count() { - if (my_root_task) - my_root_task->decrement_ref_count(); - } - - //! Spawns a task that runs a body and puts its output to a specific receiver - /** The task is spawned as a child of the graph. This is useful for running tasks - that need to block a wait_for_all() on the graph. For example a one-off source. */ - template< typename Receiver, typename Body > - void run( Receiver &r, Body body ) { - FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *my_root_task ) ) - run_and_put_task< Receiver, Body >( r, body )) ); - } - - //! Spawns a task that runs a function object - /** The task is spawned as a child of the graph. This is useful for running tasks - that need to block a wait_for_all() on the graph. For example a one-off source. */ - template< typename Body > - void run( Body body ) { - FLOW_SPAWN( * new ( task::allocate_additional_child_of( *my_root_task ) ) run_task< Body >( body ) ); - } - - //! Wait until graph is idle and decrement_wait_count calls equals increment_wait_count calls. - /** The waiting thread will go off and steal work while it is block in the wait_for_all. */ - void wait_for_all() { - cancelled = false; - caught_exception = false; - if (my_root_task) { -#if TBB_USE_EXCEPTIONS - try { -#endif - my_root_task->wait_for_all(); - cancelled = my_context->is_group_execution_cancelled(); -#if TBB_USE_EXCEPTIONS - } - catch(...) { - my_root_task->set_ref_count(1); - my_context->reset(); - caught_exception = true; - cancelled = true; - throw; - } -#endif - my_context->reset(); // consistent with behavior in catch() - my_root_task->set_ref_count(1); - } - } - - //! Returns the root task of the graph - task * root_task() { -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - if (!my_is_active) - return NULL; - else -#endif - return my_root_task; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - void set_active(bool a = true) { - my_is_active = a; - } - - bool is_active() { - return my_is_active; - } -#endif - - // ITERATORS - template - friend class graph_iterator; - - // Graph iterator typedefs - typedef graph_iterator iterator; - typedef graph_iterator const_iterator; - - // Graph iterator constructors - //! start iterator - iterator begin() { return iterator(this, true); } - //! end iterator - iterator end() { return iterator(this, false); } - //! start const iterator - const_iterator begin() const { return const_iterator(this, true); } - //! end const iterator - const_iterator end() const { return const_iterator(this, false); } - //! start const iterator - const_iterator cbegin() const { return const_iterator(this, true); } - //! end const iterator - const_iterator cend() const { return const_iterator(this, false); } - - //! return status of graph execution - bool is_cancelled() { return cancelled; } - bool exception_thrown() { return caught_exception; } - - // thread-unsafe state reset. - void reset(__TBB_PFG_RESET_ARG(reset_flags f = rf_reset_protocol)); - -private: - task *my_root_task; - task_group_context *my_context; - bool own_context; - bool cancelled; - bool caught_exception; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - bool my_is_active; -#endif - - - graph_node *my_nodes, *my_nodes_last; - - spin_mutex nodelist_mutex; - void register_node(graph_node *n); - void remove_node(graph_node *n); - -}; // class graph - -template -graph_iterator::graph_iterator(C *g, bool begin) : my_graph(g), current_node(NULL) -{ - if (begin) current_node = my_graph->my_nodes; - //else it is an end iterator by default -} - -template -typename graph_iterator::reference graph_iterator::operator*() const { - __TBB_ASSERT(current_node, "graph_iterator at end"); - return *operator->(); -} - -template -typename graph_iterator::pointer graph_iterator::operator->() const { - return current_node; -} - - -template -void graph_iterator::internal_forward() { - if (current_node) current_node = current_node->next; -} - -//! The base of all graph nodes. -class graph_node : tbb::internal::no_assign { - friend class graph; - template - friend class graph_iterator; -protected: - graph& my_graph; - graph_node *next, *prev; -public: - graph_node(graph& g) : my_graph(g) { - my_graph.register_node(this); - } - virtual ~graph_node() { - my_graph.remove_node(this); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - virtual void set_name( const char *name ) = 0; -#endif - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - virtual void extract( reset_flags f=rf_extract ) { - bool a = my_graph.is_active(); - my_graph.set_active(false); - reset((reset_flags)(f|rf_extract)); - my_graph.set_active(a); - } -#endif - -protected: - virtual void reset(__TBB_PFG_RESET_ARG(reset_flags f=rf_reset_protocol)) = 0; -}; - -inline void graph::register_node(graph_node *n) { - n->next = NULL; - { - spin_mutex::scoped_lock lock(nodelist_mutex); - n->prev = my_nodes_last; - if (my_nodes_last) my_nodes_last->next = n; - my_nodes_last = n; - if (!my_nodes) my_nodes = n; - } -} - -inline void graph::remove_node(graph_node *n) { - { - spin_mutex::scoped_lock lock(nodelist_mutex); - __TBB_ASSERT(my_nodes && my_nodes_last, "graph::remove_node: Error: no registered nodes"); - if (n->prev) n->prev->next = n->next; - if (n->next) n->next->prev = n->prev; - if (my_nodes_last == n) my_nodes_last = n->prev; - if (my_nodes == n) my_nodes = n->next; - } - n->prev = n->next = NULL; -} - -inline void graph::reset( __TBB_PFG_RESET_ARG( reset_flags f )) { - // reset context - task *saved_my_root_task = my_root_task; - my_root_task = NULL; - if(my_context) my_context->reset(); - cancelled = false; - caught_exception = false; - // reset all the nodes comprising the graph - for(iterator ii = begin(); ii != end(); ++ii) { - graph_node *my_p = &(*ii); - my_p->reset(__TBB_PFG_RESET_ARG(f)); - } - my_root_task = saved_my_root_task; -} - - -#include "internal/_flow_graph_node_impl.h" - -//! An executable node that acts as a source, i.e. it has no predecessors -template < typename Output > -class source_node : public graph_node, public sender< Output > { -protected: - using graph_node::my_graph; -public: - //! The type of the output message, which is complete - typedef Output output_type; - - //! The type of successors of this node - typedef receiver< Output > successor_type; - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector successor_vector_type; -#endif - - //! Constructor for a node with a successor - template< typename Body > - source_node( graph &g, Body body, bool is_active = true ) - : graph_node(g), my_active(is_active), init_my_active(is_active), - my_body( new internal::source_body_leaf< output_type, Body>(body) ), - my_reserved(false), my_has_cached_item(false) - { - my_successors.set_owner(this); - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_SOURCE_NODE, &this->my_graph, - static_cast *>(this), this->my_body ); - } - - //! Copy constructor - source_node( const source_node& src ) : - graph_node(src.my_graph), sender(), - my_active(src.init_my_active), - init_my_active(src.init_my_active), my_body( src.my_body->clone() ), - my_reserved(false), my_has_cached_item(false) - { - my_successors.set_owner(this); - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_SOURCE_NODE, &this->my_graph, - static_cast *>(this), this->my_body ); - } - - //! The destructor - ~source_node() { delete my_body; } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - - //! Add a new successor to this node - /* override */ bool register_successor( successor_type &r ) { - spin_mutex::scoped_lock lock(my_mutex); - my_successors.register_successor(r); - if ( my_active ) - spawn_put(); - return true; - } - - //! Removes a successor from this node - /* override */ bool remove_successor( successor_type &r ) { - spin_mutex::scoped_lock lock(my_mutex); - my_successors.remove_successor(r); - return true; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void internal_add_built_successor( successor_type &r) { - spin_mutex::scoped_lock lock(my_mutex); - my_successors.internal_add_built_successor(r); - } - - /*override*/void internal_delete_built_successor( successor_type &r) { - spin_mutex::scoped_lock lock(my_mutex); - my_successors.internal_delete_built_successor(r); - } - - /*override*/size_t successor_count() { - spin_mutex::scoped_lock lock(my_mutex); - return my_successors.successor_count(); - } - - /*override*/void copy_successors(successor_vector_type &v) { - spin_mutex::scoped_lock l(my_mutex); - my_successors.copy_successors(v); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - //! Request an item from the node - /*override */ bool try_get( output_type &v ) { - spin_mutex::scoped_lock lock(my_mutex); - if ( my_reserved ) - return false; - - if ( my_has_cached_item ) { - v = my_cached_item; - my_has_cached_item = false; - return true; - } - // we've been asked to provide an item, but we have none. enqueue a task to - // provide one. - spawn_put(); - return false; - } - - //! Reserves an item. - /* override */ bool try_reserve( output_type &v ) { - spin_mutex::scoped_lock lock(my_mutex); - if ( my_reserved ) { - return false; - } - - if ( my_has_cached_item ) { - v = my_cached_item; - my_reserved = true; - return true; - } else { - return false; - } - } - - //! Release a reserved item. - /** true = item has been released and so remains in sender, dest must request or reserve future items */ - /* override */ bool try_release( ) { - spin_mutex::scoped_lock lock(my_mutex); - __TBB_ASSERT( my_reserved && my_has_cached_item, "releasing non-existent reservation" ); - my_reserved = false; - if(!my_successors.empty()) - spawn_put(); - return true; - } - - //! Consumes a reserved item - /* override */ bool try_consume( ) { - spin_mutex::scoped_lock lock(my_mutex); - __TBB_ASSERT( my_reserved && my_has_cached_item, "consuming non-existent reservation" ); - my_reserved = false; - my_has_cached_item = false; - if ( !my_successors.empty() ) { - spawn_put(); - } - return true; - } - - //! Activates a node that was created in the inactive state - void activate() { - spin_mutex::scoped_lock lock(my_mutex); - my_active = true; - if ( !my_successors.empty() ) - spawn_put(); - } - - template - Body copy_function_object() { - internal::source_body &body_ref = *this->my_body; - return dynamic_cast< internal::source_body_leaf & >(body_ref).get_body(); - } - -protected: - - //! resets the source_node to its initial state - void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - my_active = init_my_active; - my_reserved =false; - if(my_has_cached_item) { - my_has_cached_item = false; - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_successors.reset(f); - if(f & rf_reset_bodies) my_body->reset_body(); -#endif - } - -private: - spin_mutex my_mutex; - bool my_active; - bool init_my_active; - internal::source_body *my_body; - internal::broadcast_cache< output_type > my_successors; - bool my_reserved; - bool my_has_cached_item; - output_type my_cached_item; - - // used by apply_body, can invoke body of node. - bool try_reserve_apply_body(output_type &v) { - spin_mutex::scoped_lock lock(my_mutex); - if ( my_reserved ) { - return false; - } - if ( !my_has_cached_item ) { - tbb::internal::fgt_begin_body( my_body ); - bool r = (*my_body)(my_cached_item); - tbb::internal::fgt_end_body( my_body ); - if (r) { - my_has_cached_item = true; - } - } - if ( my_has_cached_item ) { - v = my_cached_item; - my_reserved = true; - return true; - } else { - return false; - } - } - - //! Spawns a task that applies the body - /* override */ void spawn_put( ) { - task* tp = this->my_graph.root_task(); - if(tp) { - FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *tp ) ) - internal:: source_task_bypass < source_node< output_type > >( *this ) ) ); - } - } - - friend class internal::source_task_bypass< source_node< output_type > >; - //! Applies the body. Returning SUCCESSFULLY_ENQUEUED okay; forward_task_bypass will handle it. - /* override */ task * apply_body_bypass( ) { - output_type v; - if ( !try_reserve_apply_body(v) ) - return NULL; - - task *last_task = my_successors.try_put_task(v); - if ( last_task ) - try_consume(); - else - try_release(); - return last_task; - } -}; // source_node - -//! Implements a function node that supports Input -> Output -template < typename Input, typename Output = continue_msg, graph_buffer_policy = queueing, typename Allocator=cache_aligned_allocator > -class function_node : public graph_node, public internal::function_input, public internal::function_output { -protected: - using graph_node::my_graph; -public: - typedef Input input_type; - typedef Output output_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; - typedef internal::function_input fInput_type; - typedef internal::function_output fOutput_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEAURES - typedef std::vector predecessor_vector_type; - typedef std::vector successor_vector_type; -#endif - - //! Constructor - template< typename Body > - function_node( graph &g, size_t concurrency, Body body ) : - graph_node(g), internal::function_input(g, concurrency, body) { - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_FUNCTION_NODE, &this->graph_node::my_graph, static_cast *>(this), - static_cast *>(this), this->my_body ); - } - - //! Copy constructor - function_node( const function_node& src ) : - graph_node(src.my_graph), internal::function_input( src ), - fOutput_type() { - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_FUNCTION_NODE, &this->my_graph, static_cast *>(this), - static_cast *>(this), this->my_body ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - -protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - using fInput_type::try_put_task; - - // override of graph_node's reset. - /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) { - fInput_type::reset_function_input(__TBB_PFG_RESET_ARG(f)); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - successors().reset(f); - __TBB_ASSERT(!(f & rf_extract) || successors().empty(), "function_node successors not empty"); - __TBB_ASSERT(this->my_predecessors.empty(), "function_node predecessors not empty"); -#endif - } - - /* override */ internal::broadcast_cache &successors () { return fOutput_type::my_successors; } -}; - -//! Implements a function node that supports Input -> Output -template < typename Input, typename Output, typename Allocator > -class function_node : public graph_node, public internal::function_input, public internal::function_output { -protected: - using graph_node::my_graph; -public: - typedef Input input_type; - typedef Output output_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; - typedef internal::function_input fInput_type; - typedef internal::function_input_queue queue_type; - typedef internal::function_output fOutput_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector predecessor_vector_type; - typedef std::vector successor_vector_type; -#endif - - //! Constructor - template< typename Body > - function_node( graph &g, size_t concurrency, Body body ) : - graph_node(g), fInput_type( g, concurrency, body, new queue_type() ) { - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_FUNCTION_NODE, &this->graph_node::my_graph, static_cast *>(this), - static_cast *>(this), this->my_body ); - } - - //! Copy constructor - function_node( const function_node& src ) : - graph_node(src.graph_node::my_graph), fInput_type( src, new queue_type() ), fOutput_type() { - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_FUNCTION_NODE, &this->graph_node::my_graph, static_cast *>(this), - static_cast *>(this), this->my_body ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - -protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - using fInput_type::try_put_task; - - /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - fInput_type::reset_function_input(__TBB_PFG_RESET_ARG(f)); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - successors().reset(f); - __TBB_ASSERT(!(f & rf_extract) || successors().empty(), "function_node successors not empty"); - __TBB_ASSERT(!(f & rf_extract) || this->my_predecessors.empty(), "function_node predecessors not empty"); -#endif - - } - - /* override */ internal::broadcast_cache &successors () { return fOutput_type::my_successors; } -}; - -//! implements a function node that supports Input -> (set of outputs) -// Output is a tuple of output types. -template < typename Input, typename Output, graph_buffer_policy = queueing, typename Allocator=cache_aligned_allocator > -class multifunction_node : - public graph_node, - public internal::multifunction_input - < - Input, - typename internal::wrap_tuple_elements< - tbb::flow::tuple_size::value, // #elements in tuple - internal::multifunction_output, // wrap this around each element - Output // the tuple providing the types - >::type, - Allocator - > { -protected: - using graph_node::my_graph; -private: - static const int N = tbb::flow::tuple_size::value; -public: - typedef Input input_type; - typedef typename internal::wrap_tuple_elements::type output_ports_type; -private: - typedef typename internal::multifunction_input base_type; - typedef typename internal::function_input_queue queue_type; -public: - template - multifunction_node( graph &g, size_t concurrency, Body body ) : - graph_node(g), base_type(g,concurrency, body) { - tbb::internal::fgt_multioutput_node_with_body( tbb::internal::FLOW_MULTIFUNCTION_NODE, - &this->graph_node::my_graph, static_cast *>(this), - this->output_ports(), this->my_body ); - } - - multifunction_node( const multifunction_node &other) : - graph_node(other.graph_node::my_graph), base_type(other) { - tbb::internal::fgt_multioutput_node_with_body( tbb::internal::FLOW_MULTIFUNCTION_NODE, - &this->graph_node::my_graph, static_cast *>(this), - this->output_ports(), this->my_body ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_multioutput_node_desc( this, name ); - } -#endif - - // all the guts are in multifunction_input... -protected: - /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) { base_type::reset(__TBB_PFG_RESET_ARG(f)); } -}; // multifunction_node - -template < typename Input, typename Output, typename Allocator > -class multifunction_node : public graph_node, public internal::multifunction_input::value, internal::multifunction_output, Output>::type, Allocator> { -protected: - using graph_node::my_graph; - static const int N = tbb::flow::tuple_size::value; -public: - typedef Input input_type; - typedef typename internal::wrap_tuple_elements::type output_ports_type; -private: - typedef typename internal::multifunction_input base_type; - typedef typename internal::function_input_queue queue_type; -public: - template - multifunction_node( graph &g, size_t concurrency, Body body) : - graph_node(g), base_type(g,concurrency, body, new queue_type()) { - tbb::internal::fgt_multioutput_node_with_body( tbb::internal::FLOW_MULTIFUNCTION_NODE, - &this->graph_node::my_graph, static_cast *>(this), - this->output_ports(), this->my_body ); - } - - multifunction_node( const multifunction_node &other) : - graph_node(other.graph_node::my_graph), base_type(other, new queue_type()) { - tbb::internal::fgt_multioutput_node_with_body( tbb::internal::FLOW_MULTIFUNCTION_NODE, - &this->graph_node::my_graph, static_cast *>(this), - this->output_ports(), this->my_body ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_multioutput_node_desc( this, name ); - } -#endif - - // all the guts are in multifunction_input... -protected: - /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) { base_type::reset(__TBB_PFG_RESET_ARG(f)); } -}; // multifunction_node - -//! split_node: accepts a tuple as input, forwards each element of the tuple to its -// successors. The node has unlimited concurrency, so though it is marked as -// "rejecting" it does not reject inputs. -template > -class split_node : public multifunction_node { - static const int N = tbb::flow::tuple_size::value; - typedef multifunction_node base_type; -public: - typedef typename base_type::output_ports_type output_ports_type; -private: - struct splitting_body { - void operator()(const TupleType& t, output_ports_type &p) { - internal::emit_element::emit_this(t, p); - } - }; -public: - typedef TupleType input_type; - typedef Allocator allocator_type; - split_node(graph &g) : base_type(g, unlimited, splitting_body()) { - tbb::internal::fgt_multioutput_node( tbb::internal::FLOW_SPLIT_NODE, &this->graph_node::my_graph, - static_cast *>(this), this->output_ports() ); - } - - split_node( const split_node & other) : base_type(other) { - tbb::internal::fgt_multioutput_node( tbb::internal::FLOW_SPLIT_NODE, &this->graph_node::my_graph, - static_cast *>(this), this->output_ports() ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_multioutput_node_desc( this, name ); - } -#endif - -}; - -//! Implements an executable node that supports continue_msg -> Output -template -class continue_node : public graph_node, public internal::continue_input, public internal::function_output { -protected: - using graph_node::my_graph; -public: - typedef continue_msg input_type; - typedef Output output_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; - typedef internal::continue_input fInput_type; - typedef internal::function_output fOutput_type; - - //! Constructor for executable node with continue_msg -> Output - template - continue_node( graph &g, Body body ) : - graph_node(g), internal::continue_input( g, body ) { - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_CONTINUE_NODE, &this->my_graph, - static_cast *>(this), - static_cast *>(this), this->my_body ); - } - - - //! Constructor for executable node with continue_msg -> Output - template - continue_node( graph &g, int number_of_predecessors, Body body ) : - graph_node(g), internal::continue_input( g, number_of_predecessors, body ) { - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_CONTINUE_NODE, &this->my_graph, - static_cast *>(this), - static_cast *>(this), this->my_body ); - } - - //! Copy constructor - continue_node( const continue_node& src ) : - graph_node(src.graph_node::my_graph), internal::continue_input(src), - internal::function_output() { - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_CONTINUE_NODE, &this->my_graph, - static_cast *>(this), - static_cast *>(this), this->my_body ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - -protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - using fInput_type::try_put_task; - - /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) { - fInput_type::reset_receiver(__TBB_PFG_RESET_ARG(f)); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - successors().reset(f); - __TBB_ASSERT(!(f & rf_extract) || successors().empty(), "continue_node not reset"); -#endif - } - - /* override */ internal::broadcast_cache &successors () { return fOutput_type::my_successors; } -}; // continue_node - -template< typename T > -class overwrite_node : public graph_node, public receiver, public sender { -protected: - using graph_node::my_graph; -public: - typedef T input_type; - typedef T output_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector predecessor_vector_type; - typedef std::vector successor_vector_type; -#endif - - overwrite_node(graph &g) : graph_node(g), my_buffer_is_valid(false) { - my_successors.set_owner( this ); - tbb::internal::fgt_node( tbb::internal::FLOW_OVERWRITE_NODE, &this->my_graph, - static_cast *>(this), static_cast *>(this) ); - } - - // Copy constructor; doesn't take anything from src; default won't work - overwrite_node( const overwrite_node& src ) : - graph_node(src.my_graph), receiver(), sender(), my_buffer_is_valid(false) - { - my_successors.set_owner( this ); - tbb::internal::fgt_node( tbb::internal::FLOW_OVERWRITE_NODE, &this->my_graph, - static_cast *>(this), static_cast *>(this) ); - } - - ~overwrite_node() {} - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - - /* override */ bool register_successor( successor_type &s ) { - spin_mutex::scoped_lock l( my_mutex ); - task* tp = this->my_graph.root_task(); // just to test if we are resetting - if (my_buffer_is_valid && tp) { - // We have a valid value that must be forwarded immediately. - if ( s.try_put( my_buffer ) || !s.register_predecessor( *this ) ) { - // We add the successor: it accepted our put or it rejected it but won't let us become a predecessor - my_successors.register_successor( s ); - } else { - // We don't add the successor: it rejected our put and we became its predecessor instead - return false; - } - } else { - // No valid value yet, just add as successor - my_successors.register_successor( s ); - } - return true; - } - - /* override */ bool remove_successor( successor_type &s ) { - spin_mutex::scoped_lock l( my_mutex ); - my_successors.remove_successor(s); - return true; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void internal_add_built_successor( successor_type &s) { - spin_mutex::scoped_lock l( my_mutex ); - my_successors.internal_add_built_successor(s); - } - - /*override*/void internal_delete_built_successor( successor_type &s) { - spin_mutex::scoped_lock l( my_mutex ); - my_successors.internal_delete_built_successor(s); - } - - /*override*/size_t successor_count() { - spin_mutex::scoped_lock l( my_mutex ); - return my_successors.successor_count(); - } - - /*override*/ void copy_successors(successor_vector_type &v) { - spin_mutex::scoped_lock l( my_mutex ); - my_successors.copy_successors(v); - } - - /*override*/ void internal_add_built_predecessor( predecessor_type &p) { - spin_mutex::scoped_lock l( my_mutex ); - my_built_predecessors.add_edge(p); - } - - /*override*/ void internal_delete_built_predecessor( predecessor_type &p) { - spin_mutex::scoped_lock l( my_mutex ); - my_built_predecessors.delete_edge(p); - } - - /*override*/size_t predecessor_count() { - spin_mutex::scoped_lock l( my_mutex ); - return my_built_predecessors.edge_count(); - } - - /*override*/void copy_predecessors(predecessor_vector_type &v) { - spin_mutex::scoped_lock l( my_mutex ); - my_built_predecessors.copy_edges(v); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - /* override */ bool try_get( input_type &v ) { - spin_mutex::scoped_lock l( my_mutex ); - if ( my_buffer_is_valid ) { - v = my_buffer; - return true; - } - return false; - } - - bool is_valid() { - spin_mutex::scoped_lock l( my_mutex ); - return my_buffer_is_valid; - } - - void clear() { - spin_mutex::scoped_lock l( my_mutex ); - my_buffer_is_valid = false; - } - -protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - /* override */ task * try_put_task( const input_type &v ) { - spin_mutex::scoped_lock l( my_mutex ); - my_buffer = v; - my_buffer_is_valid = true; - task * rtask = my_successors.try_put_task(v); - if(!rtask) rtask = SUCCESSFULLY_ENQUEUED; - return rtask; - } - - /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - my_buffer_is_valid = false; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_successors.reset(f); - if (f&rf_extract) { - my_built_predecessors.receiver_extract(*this); - } -#endif - } - - spin_mutex my_mutex; - internal::broadcast_cache< input_type, null_rw_mutex > my_successors; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - edge_container > my_built_predecessors; -#endif - input_type my_buffer; - bool my_buffer_is_valid; - /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) {} -}; // overwrite_node - -template< typename T > -class write_once_node : public overwrite_node { -public: - typedef T input_type; - typedef T output_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; - - //! Constructor - write_once_node(graph& g) : overwrite_node(g) { - tbb::internal::fgt_node( tbb::internal::FLOW_WRITE_ONCE_NODE, &(this->my_graph), - static_cast *>(this), - static_cast *>(this) ); - } - - //! Copy constructor: call base class copy constructor - write_once_node( const write_once_node& src ) : overwrite_node(src) { - tbb::internal::fgt_node( tbb::internal::FLOW_WRITE_ONCE_NODE, &(this->my_graph), - static_cast *>(this), - static_cast *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - -protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - /* override */ task *try_put_task( const T &v ) { - spin_mutex::scoped_lock l( this->my_mutex ); - if ( this->my_buffer_is_valid ) { - return NULL; - } else { - this->my_buffer = v; - this->my_buffer_is_valid = true; - task *res = this->my_successors.try_put_task(v); - if(!res) res = SUCCESSFULLY_ENQUEUED; - return res; - } - } -}; - -//! Forwards messages of type T to all successors -template -class broadcast_node : public graph_node, public receiver, public sender { -protected: - using graph_node::my_graph; -public: - typedef T input_type; - typedef T output_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector predecessor_vector_type; - typedef std::vector successor_vector_type; -#endif -private: - internal::broadcast_cache my_successors; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - edge_container my_built_predecessors; - spin_mutex pred_mutex; -#endif -public: - - broadcast_node(graph& g) : graph_node(g) { - my_successors.set_owner( this ); - tbb::internal::fgt_node( tbb::internal::FLOW_BROADCAST_NODE, &this->my_graph, - static_cast *>(this), static_cast *>(this) ); - } - - // Copy constructor - broadcast_node( const broadcast_node& src ) : - graph_node(src.my_graph), receiver(), sender() - { - my_successors.set_owner( this ); - tbb::internal::fgt_node( tbb::internal::FLOW_BROADCAST_NODE, &this->my_graph, - static_cast *>(this), static_cast *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - - //! Adds a successor - virtual bool register_successor( receiver &r ) { - my_successors.register_successor( r ); - return true; - } - - //! Removes s as a successor - virtual bool remove_successor( receiver &r ) { - my_successors.remove_successor( r ); - return true; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/ void internal_add_built_successor(successor_type &r) { - my_successors.internal_add_built_successor(r); - } - - /*override*/ void internal_delete_built_successor(successor_type &r) { - my_successors.internal_delete_built_successor(r); - } - - /*override*/ size_t successor_count() { - return my_successors.successor_count(); - } - - /*override*/ void copy_successors(successor_vector_type &v) { - my_successors.copy_successors(v); - } - - /*override*/ void internal_add_built_predecessor( predecessor_type &p) { - my_built_predecessors.add_edge(p); - } - - /*override*/ void internal_delete_built_predecessor( predecessor_type &p) { - my_built_predecessors.delete_edge(p); - } - - /*override*/ size_t predecessor_count() { - return my_built_predecessors.edge_count(); - } - - /*override*/ void copy_predecessors(predecessor_vector_type &v) { - my_built_predecessors.copy_edges(v); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - -protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - //! build a task to run the successor if possible. Default is old behavior. - /*override*/ task *try_put_task(const T& t) { - task *new_task = my_successors.try_put_task(t); - if(!new_task) new_task = SUCCESSFULLY_ENQUEUED; - return new_task; - } - - /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) { -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_successors.reset(f); - if (f&rf_extract) { - my_built_predecessors.receiver_extract(*this); - } - __TBB_ASSERT(!(f & rf_extract) || my_successors.empty(), "Error resetting broadcast_node"); -#endif - } - /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) {} -}; // broadcast_node - -//! Forwards messages in arbitrary order -template > -class buffer_node : public graph_node, public internal::reservable_item_buffer, public receiver, public sender { -protected: - using graph_node::my_graph; -public: - typedef T input_type; - typedef T output_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; - typedef buffer_node my_class; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector predecessor_vector_type; - typedef std::vector successor_vector_type; -#endif -protected: - typedef size_t size_type; - internal::round_robin_cache< T, null_rw_mutex > my_successors; - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - edge_container my_built_predecessors; -#endif - - friend class internal::forward_task_bypass< buffer_node< T, A > >; - - enum op_type {reg_succ, rem_succ, req_item, res_item, rel_res, con_res, put_item, try_fwd_task -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - , add_blt_succ, del_blt_succ, - add_blt_pred, del_blt_pred, - blt_succ_cnt, blt_pred_cnt, - blt_succ_cpy, blt_pred_cpy // create vector copies of preds and succs -#endif - }; - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - - // implements the aggregator_operation concept - class buffer_operation : public internal::aggregated_operation< buffer_operation > { - public: - char type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - task * ltask; - union { - input_type *elem; - successor_type *r; - predecessor_type *p; - size_t cnt_val; - successor_vector_type *svec; - predecessor_vector_type *pvec; - }; -#else - T *elem; - task * ltask; - successor_type *r; -#endif - buffer_operation(const T& e, op_type t) : type(char(t)) - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - , ltask(NULL), elem(const_cast(&e)) -#else - , elem(const_cast(&e)) , ltask(NULL) -#endif - {} - buffer_operation(op_type t) : type(char(t)), ltask(NULL) {} - }; - - bool forwarder_busy; - typedef internal::aggregating_functor my_handler; - friend class internal::aggregating_functor; - internal::aggregator< my_handler, buffer_operation> my_aggregator; - - virtual void handle_operations(buffer_operation *op_list) { - buffer_operation *tmp = NULL; - bool try_forwarding=false; - while (op_list) { - tmp = op_list; - op_list = op_list->next; - switch (tmp->type) { - case reg_succ: internal_reg_succ(tmp); try_forwarding = true; break; - case rem_succ: internal_rem_succ(tmp); break; - case req_item: internal_pop(tmp); break; - case res_item: internal_reserve(tmp); break; - case rel_res: internal_release(tmp); try_forwarding = true; break; - case con_res: internal_consume(tmp); try_forwarding = true; break; - case put_item: internal_push(tmp); try_forwarding = (tmp->status == SUCCEEDED); break; - case try_fwd_task: internal_forward_task(tmp); break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - // edge recording - case add_blt_succ: internal_add_built_succ(tmp); break; - case del_blt_succ: internal_del_built_succ(tmp); break; - case add_blt_pred: internal_add_built_pred(tmp); break; - case del_blt_pred: internal_del_built_pred(tmp); break; - case blt_succ_cnt: internal_succ_cnt(tmp); break; - case blt_pred_cnt: internal_pred_cnt(tmp); break; - case blt_succ_cpy: internal_copy_succs(tmp); break; - case blt_pred_cpy: internal_copy_preds(tmp); break; -#endif - } - } - if (try_forwarding && !forwarder_busy) { - task* tp = this->my_graph.root_task(); - if(tp) { - forwarder_busy = true; - task *new_task = new(task::allocate_additional_child_of(*tp)) internal:: - forward_task_bypass - < buffer_node >(*this); - // tmp should point to the last item handled by the aggregator. This is the operation - // the handling thread enqueued. So modifying that record will be okay. - tbb::task *z = tmp->ltask; - tmp->ltask = combine_tasks(z, new_task); // in case the op generated a task - } - } - } - - inline task *grab_forwarding_task( buffer_operation &op_data) { - return op_data.ltask; - } - - inline bool enqueue_forwarding_task(buffer_operation &op_data) { - task *ft = grab_forwarding_task(op_data); - if(ft) { - FLOW_SPAWN(*ft); - return true; - } - return false; - } - - //! This is executed by an enqueued task, the "forwarder" - virtual task *forward_task() { - buffer_operation op_data(try_fwd_task); - task *last_task = NULL; - do { - op_data.status = WAIT; - op_data.ltask = NULL; - my_aggregator.execute(&op_data); - tbb::task *xtask = op_data.ltask; - last_task = combine_tasks(last_task, xtask); - } while (op_data.status == SUCCEEDED); - return last_task; - } - - //! Register successor - virtual void internal_reg_succ(buffer_operation *op) { - my_successors.register_successor(*(op->r)); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - //! Remove successor - virtual void internal_rem_succ(buffer_operation *op) { - my_successors.remove_successor(*(op->r)); - __TBB_store_with_release(op->status, SUCCEEDED); - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - virtual void internal_add_built_succ(buffer_operation *op) { - my_successors.internal_add_built_successor(*(op->r)); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - virtual void internal_del_built_succ(buffer_operation *op) { - my_successors.internal_delete_built_successor(*(op->r)); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - virtual void internal_add_built_pred(buffer_operation *op) { - my_built_predecessors.add_edge(*(op->p)); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - virtual void internal_del_built_pred(buffer_operation *op) { - my_built_predecessors.delete_edge(*(op->p)); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - virtual void internal_succ_cnt(buffer_operation *op) { - op->cnt_val = my_successors.successor_count(); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - virtual void internal_pred_cnt(buffer_operation *op) { - op->cnt_val = my_built_predecessors.edge_count(); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - virtual void internal_copy_succs(buffer_operation *op) { - my_successors.copy_successors(*(op->svec)); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - virtual void internal_copy_preds(buffer_operation *op) { - my_built_predecessors.copy_edges(*(op->pvec)); - __TBB_store_with_release(op->status, SUCCEEDED); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - //! Tries to forward valid items to successors - virtual void internal_forward_task(buffer_operation *op) { - if (this->my_reserved || !this->my_item_valid(this->my_tail-1)) { - __TBB_store_with_release(op->status, FAILED); - this->forwarder_busy = false; - return; - } - T i_copy; - task * last_task = NULL; - size_type counter = my_successors.size(); - // Try forwarding, giving each successor a chance - while (counter>0 && !this->buffer_empty() && this->my_item_valid(this->my_tail-1)) { - this->copy_back(i_copy); - task *new_task = my_successors.try_put_task(i_copy); - if(new_task) { - last_task = combine_tasks(last_task, new_task); - this->destroy_back(); - } - --counter; - } - op->ltask = last_task; // return task - if (last_task && !counter) { - __TBB_store_with_release(op->status, SUCCEEDED); - } - else { - __TBB_store_with_release(op->status, FAILED); - forwarder_busy = false; - } - } - - virtual void internal_push(buffer_operation *op) { - this->push_back(*(op->elem)); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - virtual void internal_pop(buffer_operation *op) { - if(this->pop_back(*(op->elem))) { - __TBB_store_with_release(op->status, SUCCEEDED); - } - else { - __TBB_store_with_release(op->status, FAILED); - } - } - - virtual void internal_reserve(buffer_operation *op) { - if(this->reserve_front(*(op->elem))) { - __TBB_store_with_release(op->status, SUCCEEDED); - } - else { - __TBB_store_with_release(op->status, FAILED); - } - } - - virtual void internal_consume(buffer_operation *op) { - this->consume_front(); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - virtual void internal_release(buffer_operation *op) { - this->release_front(); - __TBB_store_with_release(op->status, SUCCEEDED); - } - -public: - //! Constructor - buffer_node( graph &g ) : graph_node(g), internal::reservable_item_buffer(), - forwarder_busy(false) { - my_successors.set_owner(this); - my_aggregator.initialize_handler(my_handler(this)); - tbb::internal::fgt_node( tbb::internal::FLOW_BUFFER_NODE, &this->my_graph, - static_cast *>(this), static_cast *>(this) ); - } - - //! Copy constructor - buffer_node( const buffer_node& src ) : graph_node(src.my_graph), - internal::reservable_item_buffer(), receiver(), sender() { - forwarder_busy = false; - my_successors.set_owner(this); - my_aggregator.initialize_handler(my_handler(this)); - tbb::internal::fgt_node( tbb::internal::FLOW_BUFFER_NODE, &this->my_graph, - static_cast *>(this), static_cast *>(this) ); - } - - virtual ~buffer_node() {} - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - - // - // message sender implementation - // - - //! Adds a new successor. - /** Adds successor r to the list of successors; may forward tasks. */ - /* override */ bool register_successor( successor_type &r ) { - buffer_operation op_data(reg_succ); - op_data.r = &r; - my_aggregator.execute(&op_data); - (void)enqueue_forwarding_task(op_data); - return true; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/ void internal_add_built_successor( successor_type &r) { - buffer_operation op_data(add_blt_succ); - op_data.r = &r; - my_aggregator.execute(&op_data); - } - - /*override*/ void internal_delete_built_successor( successor_type &r) { - buffer_operation op_data(del_blt_succ); - op_data.r = &r; - my_aggregator.execute(&op_data); - } - - /*override*/ void internal_add_built_predecessor( predecessor_type &p) { - buffer_operation op_data(add_blt_pred); - op_data.p = &p; - my_aggregator.execute(&op_data); - } - - /*override*/ void internal_delete_built_predecessor( predecessor_type &p) { - buffer_operation op_data(del_blt_pred); - op_data.p = &p; - my_aggregator.execute(&op_data); - } - - /*override*/ size_t predecessor_count() { - buffer_operation op_data(blt_pred_cnt); - my_aggregator.execute(&op_data); - return op_data.cnt_val; - } - - /*override*/ size_t successor_count() { - buffer_operation op_data(blt_succ_cnt); - my_aggregator.execute(&op_data); - return op_data.cnt_val; - } - - /*override*/ void copy_predecessors( predecessor_vector_type &v ) { - buffer_operation op_data(blt_pred_cpy); - op_data.pvec = &v; - my_aggregator.execute(&op_data); - } - - /*override*/ void copy_successors( successor_vector_type &v ) { - buffer_operation op_data(blt_succ_cpy); - op_data.svec = &v; - my_aggregator.execute(&op_data); - } -#endif - - //! Removes a successor. - /** Removes successor r from the list of successors. - It also calls r.remove_predecessor(*this) to remove this node as a predecessor. */ - /* override */ bool remove_successor( successor_type &r ) { - r.remove_predecessor(*this); - buffer_operation op_data(rem_succ); - op_data.r = &r; - my_aggregator.execute(&op_data); - // even though this operation does not cause a forward, if we are the handler, and - // a forward is scheduled, we may be the first to reach this point after the aggregator, - // and so should check for the task. - (void)enqueue_forwarding_task(op_data); - return true; - } - - //! Request an item from the buffer_node - /** true = v contains the returned item
- false = no item has been returned */ - /* override */ bool try_get( T &v ) { - buffer_operation op_data(req_item); - op_data.elem = &v; - my_aggregator.execute(&op_data); - (void)enqueue_forwarding_task(op_data); - return (op_data.status==SUCCEEDED); - } - - //! Reserves an item. - /** false = no item can be reserved
- true = an item is reserved */ - /* override */ bool try_reserve( T &v ) { - buffer_operation op_data(res_item); - op_data.elem = &v; - my_aggregator.execute(&op_data); - (void)enqueue_forwarding_task(op_data); - return (op_data.status==SUCCEEDED); - } - - //! Release a reserved item. - /** true = item has been released and so remains in sender */ - /* override */ bool try_release() { - buffer_operation op_data(rel_res); - my_aggregator.execute(&op_data); - (void)enqueue_forwarding_task(op_data); - return true; - } - - //! Consumes a reserved item. - /** true = item is removed from sender and reservation removed */ - /* override */ bool try_consume() { - buffer_operation op_data(con_res); - my_aggregator.execute(&op_data); - (void)enqueue_forwarding_task(op_data); - return true; - } - -protected: - - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - //! receive an item, return a task *if possible - /* override */ task *try_put_task(const T &t) { - buffer_operation op_data(t, put_item); - my_aggregator.execute(&op_data); - task *ft = grab_forwarding_task(op_data); - // sequencer_nodes can return failure (if an item has been previously inserted) - // We have to spawn the returned task if our own operation fails. - - if(ft && op_data.status == FAILED) { - // we haven't succeeded queueing the item, but for some reason the - // call returned a task (if another request resulted in a successful - // forward this could happen.) Queue the task and reset the pointer. - FLOW_SPAWN(*ft); ft = NULL; - } - else if(!ft && op_data.status == SUCCEEDED) { - ft = SUCCESSFULLY_ENQUEUED; - } - return ft; - } - - /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - internal::reservable_item_buffer::reset(); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_successors.reset(f); - if (f&rf_extract) { - my_built_predecessors.receiver_extract(*this); - } -#endif - forwarder_busy = false; - } - - /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) { } - -}; // buffer_node - -//! Forwards messages in FIFO order -template > -class queue_node : public buffer_node { -protected: - typedef buffer_node base_type; - typedef typename base_type::size_type size_type; - typedef typename base_type::buffer_operation queue_operation; - - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - - /* override */ void internal_forward_task(queue_operation *op) { - if (this->my_reserved || !this->my_item_valid(this->my_head)) { - __TBB_store_with_release(op->status, FAILED); - this->forwarder_busy = false; - return; - } - T i_copy; - task *last_task = NULL; - size_type counter = this->my_successors.size(); - // Keep trying to send items while there is at least one accepting successor - while (counter>0 && this->my_item_valid(this->my_head)) { - this->copy_front(i_copy); - task *new_task = this->my_successors.try_put_task(i_copy); - if(new_task) { - this->destroy_front(); - last_task = combine_tasks(last_task, new_task); - } - --counter; - } - op->ltask = last_task; - if (last_task && !counter) - __TBB_store_with_release(op->status, SUCCEEDED); - else { - __TBB_store_with_release(op->status, FAILED); - this->forwarder_busy = false; - } - } - - /* override */ void internal_pop(queue_operation *op) { - if ( this->my_reserved || !this->my_item_valid(this->my_head)){ - __TBB_store_with_release(op->status, FAILED); - } - else { - this->pop_front(*(op->elem)); - __TBB_store_with_release(op->status, SUCCEEDED); - } - } - /* override */ void internal_reserve(queue_operation *op) { - if (this->my_reserved || !this->my_item_valid(this->my_head)) { - __TBB_store_with_release(op->status, FAILED); - } - else { - this->reserve_front(*(op->elem)); - __TBB_store_with_release(op->status, SUCCEEDED); - } - } - /* override */ void internal_consume(queue_operation *op) { - this->consume_front(); - __TBB_store_with_release(op->status, SUCCEEDED); - } - -public: - typedef T input_type; - typedef T output_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; - - //! Constructor - queue_node( graph &g ) : base_type(g) { - tbb::internal::fgt_node( tbb::internal::FLOW_QUEUE_NODE, &(this->my_graph), - static_cast *>(this), - static_cast *>(this) ); - } - - //! Copy constructor - queue_node( const queue_node& src) : base_type(src) { - tbb::internal::fgt_node( tbb::internal::FLOW_QUEUE_NODE, &(this->my_graph), - static_cast *>(this), - static_cast *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - - /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - base_type::reset(__TBB_PFG_RESET_ARG(f)); - } -}; // queue_node - -//! Forwards messages in sequence order -template< typename T, typename A=cache_aligned_allocator > -class sequencer_node : public queue_node { - internal::function_body< T, size_t > *my_sequencer; - // my_sequencer should be a benign function and must be callable - // from a parallel context. Does this mean it needn't be reset? -public: - typedef T input_type; - typedef T output_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; - - //! Constructor - template< typename Sequencer > - sequencer_node( graph &g, const Sequencer& s ) : queue_node(g), - my_sequencer(new internal::function_body_leaf< T, size_t, Sequencer>(s) ) { - tbb::internal::fgt_node( tbb::internal::FLOW_SEQUENCER_NODE, &(this->my_graph), - static_cast *>(this), - static_cast *>(this) ); - } - - //! Copy constructor - sequencer_node( const sequencer_node& src ) : queue_node(src), - my_sequencer( src.my_sequencer->clone() ) { - tbb::internal::fgt_node( tbb::internal::FLOW_SEQUENCER_NODE, &(this->my_graph), - static_cast *>(this), - static_cast *>(this) ); - } - - //! Destructor - ~sequencer_node() { delete my_sequencer; } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - -protected: - typedef typename buffer_node::size_type size_type; - typedef typename buffer_node::buffer_operation sequencer_operation; - - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - -private: - /* override */ void internal_push(sequencer_operation *op) { - size_type tag = (*my_sequencer)(*(op->elem)); -#if !TBB_DEPRECATED_SEQUENCER_DUPLICATES - if(tag < this->my_head) { - // have already emitted a message with this tag - __TBB_store_with_release(op->status, FAILED); - return; - } -#endif - // cannot modify this->my_tail now; the buffer would be inconsistent. - size_t new_tail = (tag+1 > this->my_tail) ? tag+1 : this->my_tail; - - if(this->size(new_tail) > this->capacity()) { - this->grow_my_array(this->size(new_tail)); - } - this->my_tail = new_tail; - if(this->place_item(tag,*(op->elem))) { - __TBB_store_with_release(op->status, SUCCEEDED); - } - else { - // already have a message with this tag - __TBB_store_with_release(op->status, FAILED); - } - } -}; // sequencer_node - -//! Forwards messages in priority order -template< typename T, typename Compare = std::less, typename A=cache_aligned_allocator > -class priority_queue_node : public buffer_node { -public: - typedef T input_type; - typedef T output_type; - typedef buffer_node base_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; - - //! Constructor - priority_queue_node( graph &g ) : buffer_node(g), mark(0) { - tbb::internal::fgt_node( tbb::internal::FLOW_PRIORITY_QUEUE_NODE, &(this->my_graph), - static_cast *>(this), - static_cast *>(this) ); - } - - //! Copy constructor - priority_queue_node( const priority_queue_node &src ) : buffer_node(src), mark(0) { - tbb::internal::fgt_node( tbb::internal::FLOW_PRIORITY_QUEUE_NODE, &(this->my_graph), - static_cast *>(this), - static_cast *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - - -protected: - - /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - mark = 0; - base_type::reset(__TBB_PFG_RESET_ARG(f)); - } - - typedef typename buffer_node::size_type size_type; - typedef typename buffer_node::item_type item_type; - typedef typename buffer_node::buffer_operation prio_operation; - - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - - /* override */ void handle_operations(prio_operation *op_list) { - prio_operation *tmp = op_list /*, *pop_list*/ ; - bool try_forwarding=false; - while (op_list) { - tmp = op_list; - op_list = op_list->next; - switch (tmp->type) { - case buffer_node::reg_succ: this->internal_reg_succ(tmp); try_forwarding = true; break; - case buffer_node::rem_succ: this->internal_rem_succ(tmp); break; - case buffer_node::put_item: internal_push(tmp); try_forwarding = true; break; - case buffer_node::try_fwd_task: internal_forward_task(tmp); break; - case buffer_node::rel_res: internal_release(tmp); try_forwarding = true; break; - case buffer_node::con_res: internal_consume(tmp); try_forwarding = true; break; - case buffer_node::req_item: internal_pop(tmp); break; - case buffer_node::res_item: internal_reserve(tmp); break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - case buffer_node::add_blt_succ: this->internal_add_built_succ(tmp); break; - case buffer_node::del_blt_succ: this->internal_del_built_succ(tmp); break; - case buffer_node::add_blt_pred: this->internal_add_built_pred(tmp); break; - case buffer_node::del_blt_pred: this->internal_del_built_pred(tmp); break; - case buffer_node::blt_succ_cnt: this->internal_succ_cnt(tmp); break; - case buffer_node::blt_pred_cnt: this->internal_pred_cnt(tmp); break; - case buffer_node::blt_succ_cpy: this->internal_copy_succs(tmp); break; - case buffer_node::blt_pred_cpy: this->internal_copy_preds(tmp); break; -#endif - } - } - // process pops! for now, no special pop processing - if (markmy_tail) heapify(); - if (try_forwarding && !this->forwarder_busy) { - task* tp = this->my_graph.root_task(); - if(tp) { - this->forwarder_busy = true; - task *new_task = new(task::allocate_additional_child_of(*tp)) internal:: - forward_task_bypass - < buffer_node >(*this); - // tmp should point to the last item handled by the aggregator. This is the operation - // the handling thread enqueued. So modifying that record will be okay. - tbb::task *tmp1 = tmp->ltask; - tmp->ltask = combine_tasks(tmp1, new_task); - } - } - } - - //! Tries to forward valid items to successors - /* override */ void internal_forward_task(prio_operation *op) { - T i_copy; - task * last_task = NULL; // flagged when a successor accepts - size_type counter = this->my_successors.size(); - - if (this->my_reserved || this->my_tail == 0) { - __TBB_store_with_release(op->status, FAILED); - this->forwarder_busy = false; - return; - } - // Keep trying to send while there exists an accepting successor - while (counter>0 && this->my_tail > 0) { - i_copy = this->get_my_item(0); - task * new_task = this->my_successors.try_put_task(i_copy); - if ( new_task ) { - last_task = combine_tasks(last_task, new_task); - this->destroy_item(0); // we've forwarded this item - if (mark == this->my_tail) --mark; - if(--(this->my_tail)) { // didn't consume last item on heap - this->move_item(0,this->my_tail); - } - if (this->my_tail > 1) // don't reheap for heap of size 1 - reheap(); - } - --counter; - } - op->ltask = last_task; - if (last_task && !counter) - __TBB_store_with_release(op->status, SUCCEEDED); - else { - __TBB_store_with_release(op->status, FAILED); - this->forwarder_busy = false; - } - } - - /* override */ void internal_push(prio_operation *op) { - if ( this->my_tail >= this->my_array_size ) - this->grow_my_array( this->my_tail + 1 ); - (void) this->place_item(this->my_tail, *(op->elem)); - ++(this->my_tail); - __TBB_store_with_release(op->status, SUCCEEDED); - } - - /* override */ void internal_pop(prio_operation *op) { - // if empty or already reserved, don't pop - if ( this->my_reserved == true || this->my_tail == 0 ) { - __TBB_store_with_release(op->status, FAILED); - return; - } - if (markmy_tail && // item pushed, no re-heap - compare(this->get_my_item(0), - this->get_my_item(this->my_tail-1))) { - // there are newly pushed elems; last one higher than top - // copy the data - this->fetch_item(this->my_tail-1, *(op->elem)); - __TBB_store_with_release(op->status, SUCCEEDED); - --(this->my_tail); - return; - } - // extract and push the last element down heap - *(op->elem) = this->get_my_item(0); // copy the data, item 0 still valid - __TBB_store_with_release(op->status, SUCCEEDED); - if (mark == this->my_tail) --mark; - __TBB_ASSERT(this->my_item_valid(this->my_tail - 1), NULL); - if(--(this->my_tail)) { - // there were two or more items in heap. Move the - // last item to the top of the heap - this->set_my_item(0,this->get_my_item(this->my_tail)); - } - this->destroy_item(this->my_tail); - if (this->my_tail > 1) // don't reheap for heap of size 1 - reheap(); - } - - /* override */ void internal_reserve(prio_operation *op) { - if (this->my_reserved == true || this->my_tail == 0) { - __TBB_store_with_release(op->status, FAILED); - return; - } - this->my_reserved = true; - *(op->elem) = reserved_item = this->get_my_item(0); - if (mark == this->my_tail) --mark; - --(this->my_tail); - __TBB_store_with_release(op->status, SUCCEEDED); - this->set_my_item(0, this->get_my_item(this->my_tail)); - this->destroy_item(this->my_tail); - if (this->my_tail > 1) - reheap(); - } - - /* override */ void internal_consume(prio_operation *op) { - this->my_reserved = false; - __TBB_store_with_release(op->status, SUCCEEDED); - } - /* override */ void internal_release(prio_operation *op) { - if (this->my_tail >= this->my_array_size) - this->grow_my_array( this->my_tail + 1 ); - this->set_my_item(this->my_tail, reserved_item); - ++(this->my_tail); - this->my_reserved = false; - __TBB_store_with_release(op->status, SUCCEEDED); - heapify(); - } -private: - Compare compare; - size_type mark; - input_type reserved_item; - - // turn array into heap - void heapify() { - if (!mark) mark = 1; - for (; markmy_tail; ++mark) { // for each unheaped element - size_type cur_pos = mark; - input_type to_place; - this->fetch_item(mark,to_place); - do { // push to_place up the heap - size_type parent = (cur_pos-1)>>1; - if (!compare(this->get_my_item(parent), to_place)) - break; - this->move_item(cur_pos, parent); - cur_pos = parent; - } while( cur_pos ); - (void) this->place_item(cur_pos, to_place); - } - } - - // otherwise heapified array with new root element; rearrange to heap - void reheap() { - size_type cur_pos=0, child=1; - while (child < mark) { - size_type target = child; - if (child+1get_my_item(child), - this->get_my_item(child+1))) - ++target; - // target now has the higher priority child - if (compare(this->get_my_item(target), - this->get_my_item(cur_pos))) - break; - // swap - this->swap_items(cur_pos, target); - cur_pos = target; - child = (cur_pos<<1)+1; - } - } -}; // priority_queue_node - -//! Forwards messages only if the threshold has not been reached -/** This node forwards items until its threshold is reached. - It contains no buffering. If the downstream node rejects, the - message is dropped. */ -template< typename T > -class limiter_node : public graph_node, public receiver< T >, public sender< T > { -protected: - using graph_node::my_graph; -public: - typedef T input_type; - typedef T output_type; - typedef sender< input_type > predecessor_type; - typedef receiver< output_type > successor_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector successor_vector_type; - typedef std::vector predecessor_vector_type; -#endif - -private: - size_t my_threshold; - size_t my_count; //number of successful puts - size_t my_tries; //number of active put attempts - internal::reservable_predecessor_cache< T, spin_mutex > my_predecessors; - spin_mutex my_mutex; - internal::broadcast_cache< T > my_successors; - int init_decrement_predecessors; - - friend class internal::forward_task_bypass< limiter_node >; - - // Let decrementer call decrement_counter() - friend class internal::decrementer< limiter_node >; - - bool check_conditions() { // always called under lock - return ( my_count + my_tries < my_threshold && !my_predecessors.empty() && !my_successors.empty() ); - } - - // only returns a valid task pointer or NULL, never SUCCESSFULLY_ENQUEUED - task *forward_task() { - input_type v; - task *rval = NULL; - bool reserved = false; - { - spin_mutex::scoped_lock lock(my_mutex); - if ( check_conditions() ) - ++my_tries; - else - return NULL; - } - - //SUCCESS - // if we can reserve and can put, we consume the reservation - // we increment the count and decrement the tries - if ( (my_predecessors.try_reserve(v)) == true ){ - reserved=true; - if ( (rval = my_successors.try_put_task(v)) != NULL ){ - { - spin_mutex::scoped_lock lock(my_mutex); - ++my_count; - --my_tries; - my_predecessors.try_consume(); - if ( check_conditions() ) { - task* tp = this->my_graph.root_task(); - if ( tp ) { - task *rtask = new ( task::allocate_additional_child_of( *tp ) ) - internal::forward_task_bypass< limiter_node >( *this ); - FLOW_SPAWN (*rtask); - } - } - } - return rval; - } - } - //FAILURE - //if we can't reserve, we decrement the tries - //if we can reserve but can't put, we decrement the tries and release the reservation - { - spin_mutex::scoped_lock lock(my_mutex); - --my_tries; - if (reserved) my_predecessors.try_release(); - if ( check_conditions() ) { - task* tp = this->my_graph.root_task(); - if ( tp ) { - task *rtask = new ( task::allocate_additional_child_of( *tp ) ) - internal::forward_task_bypass< limiter_node >( *this ); - __TBB_ASSERT(!rval, "Have two tasks to handle"); - return rtask; - } - } - return rval; - } - } - - void forward() { - __TBB_ASSERT(false, "Should never be called"); - return; - } - - task * decrement_counter() { - { - spin_mutex::scoped_lock lock(my_mutex); - if(my_count) --my_count; - } - return forward_task(); - } - -public: - //! The internal receiver< continue_msg > that decrements the count - internal::decrementer< limiter_node > decrement; - - //! Constructor - limiter_node(graph &g, size_t threshold, int num_decrement_predecessors=0) : - graph_node(g), my_threshold(threshold), my_count(0), my_tries(0), - init_decrement_predecessors(num_decrement_predecessors), - decrement(num_decrement_predecessors) - { - my_predecessors.set_owner(this); - my_successors.set_owner(this); - decrement.set_owner(this); - tbb::internal::fgt_node( tbb::internal::FLOW_LIMITER_NODE, &this->my_graph, - static_cast *>(this), static_cast *>(&decrement), - static_cast *>(this) ); - } - - //! Copy constructor - limiter_node( const limiter_node& src ) : - graph_node(src.my_graph), receiver(), sender(), - my_threshold(src.my_threshold), my_count(0), my_tries(0), - init_decrement_predecessors(src.init_decrement_predecessors), - decrement(src.init_decrement_predecessors) - { - my_predecessors.set_owner(this); - my_successors.set_owner(this); - decrement.set_owner(this); - tbb::internal::fgt_node( tbb::internal::FLOW_LIMITER_NODE, &this->my_graph, - static_cast *>(this), static_cast *>(&decrement), - static_cast *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - - //! Replace the current successor with this new successor - /* override */ bool register_successor( receiver &r ) { - spin_mutex::scoped_lock lock(my_mutex); - bool was_empty = my_successors.empty(); - my_successors.register_successor(r); - //spawn a forward task if this is the only successor - if ( was_empty && !my_predecessors.empty() && my_count + my_tries < my_threshold ) { - task* tp = this->my_graph.root_task(); - if ( tp ) { - FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *tp ) ) - internal::forward_task_bypass < limiter_node >( *this ) ) ); - } - } - return true; - } - - //! Removes a successor from this node - /** r.remove_predecessor(*this) is also called. */ - /* override */ bool remove_successor( receiver &r ) { - r.remove_predecessor(*this); - my_successors.remove_successor(r); - return true; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void internal_add_built_successor(receiver &src) { - my_successors.internal_add_built_successor(src); - } - - /*override*/void internal_delete_built_successor(receiver &src) { - my_successors.internal_delete_built_successor(src); - } - - /*override*/size_t successor_count() { return my_successors.successor_count(); } - - /*override*/ void copy_successors(successor_vector_type &v) { - my_successors.copy_successors(v); - } - - /*override*/void internal_add_built_predecessor(sender &src) { - my_predecessors.internal_add_built_predecessor(src); - } - - /*override*/void internal_delete_built_predecessor(sender &src) { - my_predecessors.internal_delete_built_predecessor(src); - } - - /*override*/size_t predecessor_count() { return my_predecessors.predecessor_count(); } - - /*override*/ void copy_predecessors(predecessor_vector_type &v) { - my_predecessors.copy_predecessors(v); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - //! Adds src to the list of cached predecessors. - /* override */ bool register_predecessor( predecessor_type &src ) { - spin_mutex::scoped_lock lock(my_mutex); - my_predecessors.add( src ); - task* tp = this->my_graph.root_task(); - if ( my_count + my_tries < my_threshold && !my_successors.empty() && tp ) { - FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *tp ) ) - internal::forward_task_bypass < limiter_node >( *this ) ) ); - } - return true; - } - - //! Removes src from the list of cached predecessors. - /* override */ bool remove_predecessor( predecessor_type &src ) { - my_predecessors.remove( src ); - return true; - } - -protected: - - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - //! Puts an item to this receiver - /* override */ task *try_put_task( const T &t ) { - { - spin_mutex::scoped_lock lock(my_mutex); - if ( my_count + my_tries >= my_threshold ) - return NULL; - else - ++my_tries; - } - - task * rtask = my_successors.try_put_task(t); - - if ( !rtask ) { // try_put_task failed. - spin_mutex::scoped_lock lock(my_mutex); - --my_tries; - task* tp = this->my_graph.root_task(); - if ( check_conditions() && tp ) { - rtask = new ( task::allocate_additional_child_of( *tp ) ) - internal::forward_task_bypass< limiter_node >( *this ); - } - } - else { - spin_mutex::scoped_lock lock(my_mutex); - ++my_count; - --my_tries; - } - return rtask; - } - - /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - my_count = 0; - my_predecessors.reset(__TBB_PFG_RESET_ARG(f)); - decrement.reset_receiver(__TBB_PFG_RESET_ARG(f)); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_successors.reset(f); -#endif - } - - /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags f)) { my_predecessors.reset(__TBB_PFG_RESET_ARG(f)); } -}; // limiter_node - -#include "internal/_flow_graph_join_impl.h" - -using internal::reserving_port; -using internal::queueing_port; -using internal::tag_matching_port; -using internal::input_port; -using internal::tag_value; -using internal::NO_TAG; - -template class join_node; - -template -class join_node: public internal::unfolded_join_node::value, reserving_port, OutputTuple, reserving> { -private: - static const int N = tbb::flow::tuple_size::value; - typedef typename internal::unfolded_join_node unfolded_type; -public: - typedef OutputTuple output_type; - typedef typename unfolded_type::input_ports_type input_ports_type; - join_node(graph &g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_RESERVING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - join_node(const join_node &other) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_RESERVING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - -}; - -template -class join_node: public internal::unfolded_join_node::value, queueing_port, OutputTuple, queueing> { -private: - static const int N = tbb::flow::tuple_size::value; - typedef typename internal::unfolded_join_node unfolded_type; -public: - typedef OutputTuple output_type; - typedef typename unfolded_type::input_ports_type input_ports_type; - join_node(graph &g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_QUEUEING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - join_node(const join_node &other) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_QUEUEING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - -}; - -// template for tag_matching join_node -template -class join_node : public internal::unfolded_join_node::value, - tag_matching_port, OutputTuple, tag_matching> { -private: - static const int N = tbb::flow::tuple_size::value; - typedef typename internal::unfolded_join_node unfolded_type; -public: - typedef OutputTuple output_type; - typedef typename unfolded_type::input_ports_type input_ports_type; - - template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1) : unfolded_type(g, b0, b1) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2) : unfolded_type(g, b0, b1, b2) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3) : unfolded_type(g, b0, b1, b2, b3) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4) : - unfolded_type(g, b0, b1, b2, b3, b4) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } -#if __TBB_VARIADIC_MAX >= 6 - template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5) : - unfolded_type(g, b0, b1, b2, b3, b4, b5) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } -#endif -#if __TBB_VARIADIC_MAX >= 7 - template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6) : - unfolded_type(g, b0, b1, b2, b3, b4, b5, b6) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } -#endif -#if __TBB_VARIADIC_MAX >= 8 - template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6, - __TBB_B7 b7) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } -#endif -#if __TBB_VARIADIC_MAX >= 9 - template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6, - __TBB_B7 b7, __TBB_B8 b8) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7, b8) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } -#endif -#if __TBB_VARIADIC_MAX >= 10 - template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6, - __TBB_B7 b7, __TBB_B8 b8, __TBB_B9 b9) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } -#endif - join_node(const join_node &other) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - -}; - -// indexer node -#include "internal/_flow_graph_indexer_impl.h" - -struct indexer_null_type {}; - -template class indexer_node; - -//indexer node specializations -template -class indexer_node : public internal::unfolded_indexer_node > { -private: - static const int N = 1; -public: - typedef tuple InputTuple; - typedef typename internal::tagged_msg output_type; - typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif -}; - -template -class indexer_node : public internal::unfolded_indexer_node > { -private: - static const int N = 2; -public: - typedef tuple InputTuple; - typedef typename internal::tagged_msg output_type; - typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif -}; - -template -class indexer_node : public internal::unfolded_indexer_node > { -private: - static const int N = 3; -public: - typedef tuple InputTuple; - typedef typename internal::tagged_msg output_type; - typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif -}; - -template -class indexer_node : public internal::unfolded_indexer_node > { -private: - static const int N = 4; -public: - typedef tuple InputTuple; - typedef typename internal::tagged_msg output_type; - typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif -}; - -template -class indexer_node : public internal::unfolded_indexer_node > { -private: - static const int N = 5; -public: - typedef tuple InputTuple; - typedef typename internal::tagged_msg output_type; - typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif -}; - -#if __TBB_VARIADIC_MAX >= 6 -template -class indexer_node : public internal::unfolded_indexer_node > { -private: - static const int N = 6; -public: - typedef tuple InputTuple; - typedef typename internal::tagged_msg output_type; - typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif -}; -#endif //variadic max 6 - -#if __TBB_VARIADIC_MAX >= 7 -template -class indexer_node : public internal::unfolded_indexer_node > { -private: - static const int N = 7; -public: - typedef tuple InputTuple; - typedef typename internal::tagged_msg output_type; - typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif -}; -#endif //variadic max 7 - -#if __TBB_VARIADIC_MAX >= 8 -template -class indexer_node : public internal::unfolded_indexer_node > { -private: - static const int N = 8; -public: - typedef tuple InputTuple; - typedef typename internal::tagged_msg output_type; - typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif -}; -#endif //variadic max 8 - -#if __TBB_VARIADIC_MAX >= 9 -template -class indexer_node : public internal::unfolded_indexer_node > { -private: - static const int N = 9; -public: - typedef tuple InputTuple; - typedef typename internal::tagged_msg output_type; - typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif -}; -#endif //variadic max 9 - -#if __TBB_VARIADIC_MAX >= 10 -template -class indexer_node/*default*/ : public internal::unfolded_indexer_node > { -private: - static const int N = 10; -public: - typedef tuple InputTuple; - typedef typename internal::tagged_msg output_type; - typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, - this->input_ports(), static_cast< sender< output_type > *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - /* override */ void set_name( const char *name ) { - tbb::internal::fgt_node_desc( this, name ); - } -#endif -}; -#endif //variadic max 10 - -//! Makes an edge between a single predecessor and a single successor -template< typename T > -inline void make_edge( sender &p, receiver &s ) { -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - s.internal_add_built_predecessor(p); - p.internal_add_built_successor(s); -#endif - p.register_successor( s ); - tbb::internal::fgt_make_edge( &p, &s ); -} - -//! Makes an edge between a single predecessor and a single successor -template< typename T > -inline void remove_edge( sender &p, receiver &s ) { - p.remove_successor( s ); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - // TODO: should we try to remove p from the predecessor list of s, in case the edge is reversed? - p.internal_delete_built_successor(s); - s.internal_delete_built_predecessor(p); -#endif - tbb::internal::fgt_remove_edge( &p, &s ); -} - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES -template -template< typename S > -void edge_container::sender_extract( S &s ) { - edge_vector e = built_edges; - for ( typename edge_vector::iterator i = e.begin(); i != e.end(); ++i ) { - remove_edge(s, **i); - } -} - -template -template< typename R > -void edge_container::receiver_extract( R &r ) { - edge_vector e = built_edges; - for ( typename edge_vector::iterator i = e.begin(); i != e.end(); ++i ) { - remove_edge(**i, r); - } -} -#endif - -//! Returns a copy of the body from a function or continue node -template< typename Body, typename Node > -Body copy_body( Node &n ) { - return n.template copy_function_object(); -} - -} // interface7 - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - using interface7::reset_flags; - using interface7::rf_reset_protocol; - using interface7::rf_reset_bodies; - using interface7::rf_extract; -#endif - - using interface7::graph; - using interface7::graph_node; - using interface7::continue_msg; - using interface7::sender; - using interface7::receiver; - using interface7::continue_receiver; - - using interface7::source_node; - using interface7::function_node; - using interface7::multifunction_node; - using interface7::split_node; - using interface7::internal::output_port; - using interface7::indexer_node; - using interface7::internal::tagged_msg; - using interface7::internal::cast_to; - using interface7::internal::is_a; - using interface7::continue_node; - using interface7::overwrite_node; - using interface7::write_once_node; - using interface7::broadcast_node; - using interface7::buffer_node; - using interface7::queue_node; - using interface7::sequencer_node; - using interface7::priority_queue_node; - using interface7::limiter_node; - using namespace interface7::internal::graph_policy_namespace; - using interface7::join_node; - using interface7::input_port; - using interface7::copy_body; - using interface7::make_edge; - using interface7::remove_edge; - using interface7::internal::NO_TAG; - using interface7::internal::tag_value; - -} // flow -} // tbb - -#undef __TBB_PFG_RESET_ARG -#undef __TBB_COMMA - -#endif // __TBB_flow_graph_H +#include "../oneapi/tbb/flow_graph.h" diff --git a/src/tbb/include/tbb/flow_graph_abstractions.h b/src/tbb/include/tbb/flow_graph_abstractions.h new file mode 100644 index 000000000..cd9dc2967 --- /dev/null +++ b/src/tbb/include/tbb/flow_graph_abstractions.h @@ -0,0 +1,17 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "../oneapi/tbb/flow_graph_abstractions.h" diff --git a/src/tbb/include/tbb/global_control.h b/src/tbb/include/tbb/global_control.h new file mode 100644 index 000000000..2688996ec --- /dev/null +++ b/src/tbb/include/tbb/global_control.h @@ -0,0 +1,17 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "../oneapi/tbb/global_control.h" diff --git a/src/tbb/include/tbb/index.html b/src/tbb/include/tbb/index.html deleted file mode 100644 index 6ceb5da61..000000000 --- a/src/tbb/include/tbb/index.html +++ /dev/null @@ -1,29 +0,0 @@ - - - -

Overview

-Include files for Intel® Threading Building Blocks classes and functions. - -
Click here to see all files in the directory. - -

Directories

-
-
compat -
Include files for source level compatibility with other frameworks. -
internal -
Include files with implementation details; not for direct use. -
machine -
Include files for low-level architecture specific functionality; not for direct use. -
- -
-Up to parent directory -

-Copyright © 2005-2014 Intel Corporation. All Rights Reserved. -

-Intel is a registered trademark or trademark of Intel Corporation -or its subsidiaries in the United States and other countries. -

-* Other names and brands may be claimed as the property of others. - - diff --git a/src/tbb/include/tbb/info.h b/src/tbb/include/tbb/info.h new file mode 100644 index 000000000..02d331650 --- /dev/null +++ b/src/tbb/include/tbb/info.h @@ -0,0 +1,17 @@ +/* + Copyright (c) 2019-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "../oneapi/tbb/info.h" diff --git a/src/tbb/include/tbb/internal/_aggregator_impl.h b/src/tbb/include/tbb/internal/_aggregator_impl.h deleted file mode 100644 index 854cb4eef..000000000 --- a/src/tbb/include/tbb/internal/_aggregator_impl.h +++ /dev/null @@ -1,180 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB__aggregator_impl_H -#define __TBB__aggregator_impl_H - -#include "../atomic.h" -#if !__TBBMALLOC_BUILD -#include "../tbb_profiling.h" -#endif - -namespace tbb { -namespace interface6 { -namespace internal { - -using namespace tbb::internal; - -//! aggregated_operation base class -template -class aggregated_operation { - public: - uintptr_t status; - Derived *next; - aggregated_operation() : status(0), next(NULL) {} -}; - -//! Aggregator base class -/** An aggregator for collecting operations coming from multiple sources and executing - them serially on a single thread. operation_type must be derived from - aggregated_operation. The parameter handler_type is a functor that will be passed the - list of operations and is expected to handle each operation appropriately, setting the - status of each operation to non-zero.*/ -template < typename operation_type > -class aggregator_generic { -public: - aggregator_generic() : handler_busy(false) { pending_operations = NULL; } - - //! Place operation in list - /** Place operation in list and either handle list or wait for operation to - complete. - long_life_time specifies life time of an operation inserting in an aggregator. - "Long" (long_life_time == true) life time operation can be accessed - even after executing it. - "Short" (long_life_time == false) life time operations can be destroyed - during executing so any access to it after executing is invalid.*/ - template < typename handler_type > - void execute(operation_type *op, handler_type &handle_operations, bool long_life_time = true) { - operation_type *res; - // op->status should be read before inserting the operation in the - // aggregator queue since it can become invalid after executing a - // handler (if the operation has 'short' life time.) - const uintptr_t status = op->status; - - // ITT note: &(op->status) tag is used to cover accesses to this op node. This - // thread has created the operation, and now releases it so that the handler - // thread may handle the associated operation w/o triggering a race condition; - // thus this tag will be acquired just before the operation is handled in the - // handle_operations functor. - call_itt_notify(releasing, &(op->status)); - // insert the operation in the queue. - do { - // ITT may flag the following line as a race; it is a false positive: - // This is an atomic read; we don't provide itt_hide_load_word for atomics - op->next = res = pending_operations; // NOT A RACE - } while (pending_operations.compare_and_swap(op, res) != res); - if (!res) { // first in the list; handle the operations. - // ITT note: &pending_operations tag covers access to the handler_busy flag, - // which this waiting handler thread will try to set before entering - // handle_operations. - call_itt_notify(acquired, &pending_operations); - start_handle_operations(handle_operations); - // The operation with 'short' life time can already be destroyed. - if (long_life_time) - __TBB_ASSERT(op->status, NULL); - } - // not first; wait for op to be ready. - else if (!status) { // operation is blocking here. - __TBB_ASSERT(long_life_time, "The blocking operation cannot have 'short' life time. Since it can already be destroyed."); - call_itt_notify(prepare, &(op->status)); - spin_wait_while_eq(op->status, uintptr_t(0)); - itt_load_word_with_acquire(op->status); - } - } - - private: - //! An atomically updated list (aka mailbox) of pending operations - atomic pending_operations; - //! Controls thread access to handle_operations - uintptr_t handler_busy; - - //! Trigger the handling of operations when the handler is free - template < typename handler_type > - void start_handle_operations( handler_type &handle_operations ) { - operation_type *op_list; - - // ITT note: &handler_busy tag covers access to pending_operations as it is passed - // between active and waiting handlers. Below, the waiting handler waits until - // the active handler releases, and the waiting handler acquires &handler_busy as - // it becomes the active_handler. The release point is at the end of this - // function, when all operations in pending_operations have been handled by the - // owner of this aggregator. - call_itt_notify(prepare, &handler_busy); - // get the handler_busy: - // only one thread can possibly spin here at a time - spin_wait_until_eq(handler_busy, uintptr_t(0)); - call_itt_notify(acquired, &handler_busy); - // acquire fence not necessary here due to causality rule and surrounding atomics - __TBB_store_with_release(handler_busy, uintptr_t(1)); - - // ITT note: &pending_operations tag covers access to the handler_busy flag - // itself. Capturing the state of the pending_operations signifies that - // handler_busy has been set and a new active handler will now process that list's - // operations. - call_itt_notify(releasing, &pending_operations); - // grab pending_operations - op_list = pending_operations.fetch_and_store(NULL); - - // handle all the operations - handle_operations(op_list); - - // release the handler - itt_store_word_with_release(handler_busy, uintptr_t(0)); - } -}; - -template < typename handler_type, typename operation_type > -class aggregator : public aggregator_generic { - handler_type handle_operations; -public: - aggregator() {} - explicit aggregator(handler_type h) : handle_operations(h) {} - - void initialize_handler(handler_type h) { handle_operations = h; } - - void execute(operation_type *op) { - aggregator_generic::execute(op, handle_operations); - } -}; - -// the most-compatible friend declaration (vs, gcc, icc) is -// template friend class aggregating_functor; -template -class aggregating_functor { - aggregating_class *fi; -public: - aggregating_functor() {} - aggregating_functor(aggregating_class *fi_) : fi(fi_) {} - void operator()(operation_list* op_list) { fi->handle_operations(op_list); } -}; - -} // namespace internal -} // namespace interface6 - -namespace internal { - using interface6::internal::aggregated_operation; - using interface6::internal::aggregator_generic; - using interface6::internal::aggregator; - using interface6::internal::aggregating_functor; -} // namespace internal - -} // namespace tbb - -#endif // __TBB__aggregator_impl_H diff --git a/src/tbb/include/tbb/internal/_concurrent_queue_impl.h b/src/tbb/include/tbb/internal/_concurrent_queue_impl.h deleted file mode 100644 index 41248baff..000000000 --- a/src/tbb/include/tbb/internal/_concurrent_queue_impl.h +++ /dev/null @@ -1,1082 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB__concurrent_queue_impl_H -#define __TBB__concurrent_queue_impl_H - -#ifndef __TBB_concurrent_queue_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#include "../tbb_stddef.h" -#include "../tbb_machine.h" -#include "../atomic.h" -#include "../spin_mutex.h" -#include "../cache_aligned_allocator.h" -#include "../tbb_exception.h" -#include "../tbb_profiling.h" -#include -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -namespace tbb { - -#if !__TBB_TEMPLATE_FRIENDS_BROKEN - -// forward declaration -namespace strict_ppl { -template class concurrent_queue; -} - -template class concurrent_bounded_queue; - -#endif - -//! For internal use only. -namespace strict_ppl { - -//! @cond INTERNAL -namespace internal { - -using namespace tbb::internal; - -typedef size_t ticket; - -template class micro_queue ; -template class micro_queue_pop_finalizer ; -template class concurrent_queue_base_v3; -template struct concurrent_queue_rep; - -//! parts of concurrent_queue_rep that do not have references to micro_queue -/** - * For internal use only. - */ -struct concurrent_queue_rep_base : no_copy { - template friend class micro_queue; - template friend class concurrent_queue_base_v3; - -protected: - //! Approximately n_queue/golden ratio - static const size_t phi = 3; - -public: - // must be power of 2 - static const size_t n_queue = 8; - - //! Prefix on a page - struct page { - page* next; - uintptr_t mask; - }; - - atomic head_counter; - char pad1[NFS_MaxLineSize-sizeof(atomic)]; - atomic tail_counter; - char pad2[NFS_MaxLineSize-sizeof(atomic)]; - - //! Always a power of 2 - size_t items_per_page; - - //! Size of an item - size_t item_size; - - //! number of invalid entries in the queue - atomic n_invalid_entries; - - char pad3[NFS_MaxLineSize-sizeof(size_t)-sizeof(size_t)-sizeof(atomic)]; -} ; - -inline bool is_valid_page(const concurrent_queue_rep_base::page* p) { - return uintptr_t(p)>1; -} - -//! Abstract class to define interface for page allocation/deallocation -/** - * For internal use only. - */ -class concurrent_queue_page_allocator -{ - template friend class micro_queue ; - template friend class micro_queue_pop_finalizer ; -protected: - virtual ~concurrent_queue_page_allocator() {} -private: - virtual concurrent_queue_rep_base::page* allocate_page() = 0; - virtual void deallocate_page( concurrent_queue_rep_base::page* p ) = 0; -} ; - -#if _MSC_VER && !defined(__INTEL_COMPILER) -// unary minus operator applied to unsigned type, result still unsigned -#pragma warning( push ) -#pragma warning( disable: 4146 ) -#endif - -//! A queue using simple locking. -/** For efficiency, this class has no constructor. - The caller is expected to zero-initialize it. */ -template -class micro_queue : no_copy { -public: - typedef void (*item_constructor_t)(T* location, const void* src); -private: - typedef concurrent_queue_rep_base::page page; - - //! Class used to ensure exception-safety of method "pop" - class destroyer: no_copy { - T& my_value; - public: - destroyer( T& value ) : my_value(value) {} - ~destroyer() {my_value.~T();} - }; - - void copy_item( page& dst, size_t dindex, const void* src, item_constructor_t construct_item ) { - construct_item( &get_ref(dst, dindex), src ); - } - - void copy_item( page& dst, size_t dindex, const page& src, size_t sindex, - item_constructor_t construct_item ) - { - T& src_item = get_ref( const_cast(src), sindex ); - construct_item( &get_ref(dst, dindex), static_cast(&src_item) ); - } - - void assign_and_destroy_item( void* dst, page& src, size_t index ) { - T& from = get_ref(src,index); - destroyer d(from); - *static_cast(dst) = tbb::internal::move( from ); - } - - void spin_wait_until_my_turn( atomic& counter, ticket k, concurrent_queue_rep_base& rb ) const ; - -public: - friend class micro_queue_pop_finalizer; - - struct padded_page: page { - //! Not defined anywhere - exists to quiet warnings. - padded_page(); - //! Not defined anywhere - exists to quiet warnings. - void operator=( const padded_page& ); - //! Must be last field. - T last; - }; - - static T& get_ref( page& p, size_t index ) { - return (&static_cast(static_cast(&p))->last)[index]; - } - - atomic head_page; - atomic head_counter; - - atomic tail_page; - atomic tail_counter; - - spin_mutex page_mutex; - - void push( const void* item, ticket k, concurrent_queue_base_v3& base, - item_constructor_t construct_item ) ; - - bool pop( void* dst, ticket k, concurrent_queue_base_v3& base ) ; - - micro_queue& assign( const micro_queue& src, concurrent_queue_base_v3& base, - item_constructor_t construct_item ) ; - - page* make_copy( concurrent_queue_base_v3& base, const page* src_page, size_t begin_in_page, - size_t end_in_page, ticket& g_index, item_constructor_t construct_item ) ; - - void invalidate_page_and_rethrow( ticket k ) ; -}; - -template -void micro_queue::spin_wait_until_my_turn( atomic& counter, ticket k, concurrent_queue_rep_base& rb ) const { - for( atomic_backoff b(true);;b.pause() ) { - ticket c = counter; - if( c==k ) return; - else if( c&1 ) { - ++rb.n_invalid_entries; - throw_exception( eid_bad_last_alloc ); - } - } -} - -template -void micro_queue::push( const void* item, ticket k, concurrent_queue_base_v3& base, - item_constructor_t construct_item ) -{ - k &= -concurrent_queue_rep_base::n_queue; - page* p = NULL; - size_t index = modulo_power_of_two( k/concurrent_queue_rep_base::n_queue, base.my_rep->items_per_page); - if( !index ) { - __TBB_TRY { - concurrent_queue_page_allocator& pa = base; - p = pa.allocate_page(); - } __TBB_CATCH (...) { - ++base.my_rep->n_invalid_entries; - invalidate_page_and_rethrow( k ); - } - p->mask = 0; - p->next = NULL; - } - - if( tail_counter != k ) spin_wait_until_my_turn( tail_counter, k, *base.my_rep ); - call_itt_notify(acquired, &tail_counter); - - if( p ) { - spin_mutex::scoped_lock lock( page_mutex ); - page* q = tail_page; - if( is_valid_page(q) ) - q->next = p; - else - head_page = p; - tail_page = p; - } else { - p = tail_page; - } - - __TBB_TRY { - copy_item( *p, index, item, construct_item ); - // If no exception was thrown, mark item as present. - itt_hide_store_word(p->mask, p->mask | uintptr_t(1)<n_invalid_entries; - call_itt_notify(releasing, &tail_counter); - tail_counter += concurrent_queue_rep_base::n_queue; - __TBB_RETHROW(); - } -} - -template -bool micro_queue::pop( void* dst, ticket k, concurrent_queue_base_v3& base ) { - k &= -concurrent_queue_rep_base::n_queue; - if( head_counter!=k ) spin_wait_until_eq( head_counter, k ); - call_itt_notify(acquired, &head_counter); - if( tail_counter==k ) spin_wait_while_eq( tail_counter, k ); - call_itt_notify(acquired, &tail_counter); - page& p = *head_page; - __TBB_ASSERT( &p, NULL ); - size_t index = modulo_power_of_two( k/concurrent_queue_rep_base::n_queue, base.my_rep->items_per_page ); - bool success = false; - { - micro_queue_pop_finalizer finalizer( *this, base, k+concurrent_queue_rep_base::n_queue, index==base.my_rep->items_per_page-1 ? &p : NULL ); - if( p.mask & uintptr_t(1)<n_invalid_entries; - } - } - return success; -} - -template -micro_queue& micro_queue::assign( const micro_queue& src, concurrent_queue_base_v3& base, - item_constructor_t construct_item ) -{ - head_counter = src.head_counter; - tail_counter = src.tail_counter; - - const page* srcp = src.head_page; - if( is_valid_page(srcp) ) { - ticket g_index = head_counter; - __TBB_TRY { - size_t n_items = (tail_counter-head_counter)/concurrent_queue_rep_base::n_queue; - size_t index = modulo_power_of_two( head_counter/concurrent_queue_rep_base::n_queue, base.my_rep->items_per_page ); - size_t end_in_first_page = (index+n_itemsitems_per_page)?(index+n_items):base.my_rep->items_per_page; - - head_page = make_copy( base, srcp, index, end_in_first_page, g_index, construct_item ); - page* cur_page = head_page; - - if( srcp != src.tail_page ) { - for( srcp = srcp->next; srcp!=src.tail_page; srcp=srcp->next ) { - cur_page->next = make_copy( base, srcp, 0, base.my_rep->items_per_page, g_index, construct_item ); - cur_page = cur_page->next; - } - - __TBB_ASSERT( srcp==src.tail_page, NULL ); - size_t last_index = modulo_power_of_two( tail_counter/concurrent_queue_rep_base::n_queue, base.my_rep->items_per_page ); - if( last_index==0 ) last_index = base.my_rep->items_per_page; - - cur_page->next = make_copy( base, srcp, 0, last_index, g_index, construct_item ); - cur_page = cur_page->next; - } - tail_page = cur_page; - } __TBB_CATCH (...) { - invalidate_page_and_rethrow( g_index ); - } - } else { - head_page = tail_page = NULL; - } - return *this; -} - -template -void micro_queue::invalidate_page_and_rethrow( ticket k ) { - // Append an invalid page at address 1 so that no more pushes are allowed. - page* invalid_page = (page*)uintptr_t(1); - { - spin_mutex::scoped_lock lock( page_mutex ); - itt_store_word_with_release(tail_counter, k+concurrent_queue_rep_base::n_queue+1); - page* q = tail_page; - if( is_valid_page(q) ) - q->next = invalid_page; - else - head_page = invalid_page; - tail_page = invalid_page; - } - __TBB_RETHROW(); -} - -template -concurrent_queue_rep_base::page* micro_queue::make_copy( concurrent_queue_base_v3& base, - const concurrent_queue_rep_base::page* src_page, size_t begin_in_page, size_t end_in_page, - ticket& g_index, item_constructor_t construct_item ) -{ - concurrent_queue_page_allocator& pa = base; - page* new_page = pa.allocate_page(); - new_page->next = NULL; - new_page->mask = src_page->mask; - for( ; begin_in_page!=end_in_page; ++begin_in_page, ++g_index ) - if( new_page->mask & uintptr_t(1)< -class micro_queue_pop_finalizer: no_copy { - typedef concurrent_queue_rep_base::page page; - ticket my_ticket; - micro_queue& my_queue; - page* my_page; - concurrent_queue_page_allocator& allocator; -public: - micro_queue_pop_finalizer( micro_queue& queue, concurrent_queue_base_v3& b, ticket k, page* p ) : - my_ticket(k), my_queue(queue), my_page(p), allocator(b) - {} - ~micro_queue_pop_finalizer() ; -}; - -template -micro_queue_pop_finalizer::~micro_queue_pop_finalizer() { - page* p = my_page; - if( is_valid_page(p) ) { - spin_mutex::scoped_lock lock( my_queue.page_mutex ); - page* q = p->next; - my_queue.head_page = q; - if( !is_valid_page(q) ) { - my_queue.tail_page = NULL; - } - } - itt_store_word_with_release(my_queue.head_counter, my_ticket); - if( is_valid_page(p) ) { - allocator.deallocate_page( p ); - } -} - -#if _MSC_VER && !defined(__INTEL_COMPILER) -#pragma warning( pop ) -#endif // warning 4146 is back - -template class concurrent_queue_iterator_rep ; -template class concurrent_queue_iterator_base_v3; - -//! representation of concurrent_queue_base -/** - * the class inherits from concurrent_queue_rep_base and defines an array of micro_queue's - */ -template -struct concurrent_queue_rep : public concurrent_queue_rep_base { - micro_queue array[n_queue]; - - //! Map ticket to an array index - static size_t index( ticket k ) { - return k*phi%n_queue; - } - - micro_queue& choose( ticket k ) { - // The formula here approximates LRU in a cache-oblivious way. - return array[index(k)]; - } -}; - -//! base class of concurrent_queue -/** - * The class implements the interface defined by concurrent_queue_page_allocator - * and has a pointer to an instance of concurrent_queue_rep. - */ -template -class concurrent_queue_base_v3: public concurrent_queue_page_allocator { - //! Internal representation - concurrent_queue_rep* my_rep; - - friend struct concurrent_queue_rep; - friend class micro_queue; - friend class concurrent_queue_iterator_rep; - friend class concurrent_queue_iterator_base_v3; - -protected: - typedef typename concurrent_queue_rep::page page; - -private: - typedef typename micro_queue::padded_page padded_page; - typedef typename micro_queue::item_constructor_t item_constructor_t; - - /* override */ virtual page *allocate_page() { - concurrent_queue_rep& r = *my_rep; - size_t n = sizeof(padded_page) + (r.items_per_page-1)*sizeof(T); - return reinterpret_cast(allocate_block ( n )); - } - - /* override */ virtual void deallocate_page( concurrent_queue_rep_base::page *p ) { - concurrent_queue_rep& r = *my_rep; - size_t n = sizeof(padded_page) + (r.items_per_page-1)*sizeof(T); - deallocate_block( reinterpret_cast(p), n ); - } - - //! custom allocator - virtual void *allocate_block( size_t n ) = 0; - - //! custom de-allocator - virtual void deallocate_block( void *p, size_t n ) = 0; - -protected: - concurrent_queue_base_v3(); - - /* override */ virtual ~concurrent_queue_base_v3() { -#if TBB_USE_ASSERT - size_t nq = my_rep->n_queue; - for( size_t i=0; iarray[i].tail_page==NULL, "pages were not freed properly" ); -#endif /* TBB_USE_ASSERT */ - cache_aligned_allocator >().deallocate(my_rep,1); - } - - //! Enqueue item at tail of queue - void internal_push( const void* src, item_constructor_t construct_item ) { - concurrent_queue_rep& r = *my_rep; - ticket k = r.tail_counter++; - r.choose(k).push( src, k, *this, construct_item ); - } - - //! Attempt to dequeue item from queue. - /** NULL if there was no item to dequeue. */ - bool internal_try_pop( void* dst ) ; - - //! Get size of queue; result may be invalid if queue is modified concurrently - size_t internal_size() const ; - - //! check if the queue is empty; thread safe - bool internal_empty() const ; - - //! free any remaining pages - /* note that the name may be misleading, but it remains so due to a historical accident. */ - void internal_finish_clear() ; - - //! Obsolete - void internal_throw_exception() const { - throw_exception( eid_bad_alloc ); - } - - //! copy or move internal representation - void assign( const concurrent_queue_base_v3& src, item_constructor_t construct_item ) ; - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! swap internal representation - void internal_swap( concurrent_queue_base_v3& src ) { - std::swap( my_rep, src.my_rep ); - } -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ -}; - -template -concurrent_queue_base_v3::concurrent_queue_base_v3() { - const size_t item_size = sizeof(T); - my_rep = cache_aligned_allocator >().allocate(1); - __TBB_ASSERT( (size_t)my_rep % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->head_counter % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->tail_counter % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->array % NFS_GetLineSize()==0, "alignment error" ); - memset(my_rep,0,sizeof(concurrent_queue_rep)); - my_rep->item_size = item_size; - my_rep->items_per_page = item_size<= 8 ? 32 : - item_size<= 16 ? 16 : - item_size<= 32 ? 8 : - item_size<= 64 ? 4 : - item_size<=128 ? 2 : - 1; -} - -template -bool concurrent_queue_base_v3::internal_try_pop( void* dst ) { - concurrent_queue_rep& r = *my_rep; - ticket k; - do { - k = r.head_counter; - for(;;) { - if( (ptrdiff_t)(r.tail_counter-k)<=0 ) { - // Queue is empty - return false; - } - // Queue had item with ticket k when we looked. Attempt to get that item. - ticket tk=k; -#if defined(_MSC_VER) && defined(_Wp64) - #pragma warning (push) - #pragma warning (disable: 4267) -#endif - k = r.head_counter.compare_and_swap( tk+1, tk ); -#if defined(_MSC_VER) && defined(_Wp64) - #pragma warning (pop) -#endif - if( k==tk ) - break; - // Another thread snatched the item, retry. - } - } while( !r.choose( k ).pop( dst, k, *this ) ); - return true; -} - -template -size_t concurrent_queue_base_v3::internal_size() const { - concurrent_queue_rep& r = *my_rep; - __TBB_ASSERT( sizeof(ptrdiff_t)<=sizeof(size_t), NULL ); - ticket hc = r.head_counter; - size_t nie = r.n_invalid_entries; - ticket tc = r.tail_counter; - __TBB_ASSERT( hc!=tc || !nie, NULL ); - ptrdiff_t sz = tc-hc-nie; - return sz<0 ? 0 : size_t(sz); -} - -template -bool concurrent_queue_base_v3::internal_empty() const { - concurrent_queue_rep& r = *my_rep; - ticket tc = r.tail_counter; - ticket hc = r.head_counter; - // if tc!=r.tail_counter, the queue was not empty at some point between the two reads. - return tc==r.tail_counter && tc==hc+r.n_invalid_entries ; -} - -template -void concurrent_queue_base_v3::internal_finish_clear() { - concurrent_queue_rep& r = *my_rep; - size_t nq = r.n_queue; - for( size_t i=0; i -void concurrent_queue_base_v3::assign( const concurrent_queue_base_v3& src, - item_constructor_t construct_item ) -{ - concurrent_queue_rep& r = *my_rep; - r.items_per_page = src.my_rep->items_per_page; - - // copy concurrent_queue_rep data - r.head_counter = src.my_rep->head_counter; - r.tail_counter = src.my_rep->tail_counter; - r.n_invalid_entries = src.my_rep->n_invalid_entries; - - // copy or move micro_queues - for( size_t i = 0; i < r.n_queue; ++i ) - r.array[i].assign( src.my_rep->array[i], *this, construct_item); - - __TBB_ASSERT( r.head_counter==src.my_rep->head_counter && r.tail_counter==src.my_rep->tail_counter, - "the source concurrent queue should not be concurrently modified." ); -} - -template class concurrent_queue_iterator; - -template -class concurrent_queue_iterator_rep: no_assign { - typedef typename micro_queue::padded_page padded_page; -public: - ticket head_counter; - const concurrent_queue_base_v3& my_queue; - typename concurrent_queue_base_v3::page* array[concurrent_queue_rep::n_queue]; - concurrent_queue_iterator_rep( const concurrent_queue_base_v3& queue ) : - head_counter(queue.my_rep->head_counter), - my_queue(queue) - { - for( size_t k=0; k::n_queue; ++k ) - array[k] = queue.my_rep->array[k].head_page; - } - - //! Set item to point to kth element. Return true if at end of queue or item is marked valid; false otherwise. - bool get_item( T*& item, size_t k ) ; -}; - -template -bool concurrent_queue_iterator_rep::get_item( T*& item, size_t k ) { - if( k==my_queue.my_rep->tail_counter ) { - item = NULL; - return true; - } else { - typename concurrent_queue_base_v3::page* p = array[concurrent_queue_rep::index(k)]; - __TBB_ASSERT(p,NULL); - size_t i = modulo_power_of_two( k/concurrent_queue_rep::n_queue, my_queue.my_rep->items_per_page ); - item = µ_queue::get_ref(*p,i); - return (p->mask & uintptr_t(1)< -class concurrent_queue_iterator_base_v3 : no_assign { - //! Represents concurrent_queue over which we are iterating. - /** NULL if one past last element in queue. */ - concurrent_queue_iterator_rep* my_rep; - - template - friend bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); - - template - friend bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); -protected: - //! Pointer to current item - Value* my_item; - - //! Default constructor - concurrent_queue_iterator_base_v3() : my_rep(NULL), my_item(NULL) { -#if __TBB_GCC_OPTIMIZER_ORDERING_BROKEN - __TBB_compiler_fence(); -#endif - } - - //! Copy constructor - concurrent_queue_iterator_base_v3( const concurrent_queue_iterator_base_v3& i ) - : no_assign(), my_rep(NULL), my_item(NULL) { - assign(i); - } - - //! Construct iterator pointing to head of queue. - concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue ) ; - - //! Assignment - void assign( const concurrent_queue_iterator_base_v3& other ) ; - - //! Advance iterator one step towards tail of queue. - void advance() ; - - //! Destructor - ~concurrent_queue_iterator_base_v3() { - cache_aligned_allocator >().deallocate(my_rep, 1); - my_rep = NULL; - } -}; - -template -concurrent_queue_iterator_base_v3::concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue ) { - my_rep = cache_aligned_allocator >().allocate(1); - new( my_rep ) concurrent_queue_iterator_rep(queue); - size_t k = my_rep->head_counter; - if( !my_rep->get_item(my_item, k) ) advance(); -} - -template -void concurrent_queue_iterator_base_v3::assign( const concurrent_queue_iterator_base_v3& other ) { - if( my_rep!=other.my_rep ) { - if( my_rep ) { - cache_aligned_allocator >().deallocate(my_rep, 1); - my_rep = NULL; - } - if( other.my_rep ) { - my_rep = cache_aligned_allocator >().allocate(1); - new( my_rep ) concurrent_queue_iterator_rep( *other.my_rep ); - } - } - my_item = other.my_item; -} - -template -void concurrent_queue_iterator_base_v3::advance() { - __TBB_ASSERT( my_item, "attempt to increment iterator past end of queue" ); - size_t k = my_rep->head_counter; - const concurrent_queue_base_v3& queue = my_rep->my_queue; -#if TBB_USE_ASSERT - Value* tmp; - my_rep->get_item(tmp,k); - __TBB_ASSERT( my_item==tmp, NULL ); -#endif /* TBB_USE_ASSERT */ - size_t i = modulo_power_of_two( k/concurrent_queue_rep::n_queue, queue.my_rep->items_per_page ); - if( i==queue.my_rep->items_per_page-1 ) { - typename concurrent_queue_base_v3::page*& root = my_rep->array[concurrent_queue_rep::index(k)]; - root = root->next; - } - // advance k - my_rep->head_counter = ++k; - if( !my_rep->get_item(my_item, k) ) advance(); -} - -//! Similar to C++0x std::remove_cv -/** "tbb_" prefix added to avoid overload confusion with C++0x implementations. */ -template struct tbb_remove_cv {typedef T type;}; -template struct tbb_remove_cv {typedef T type;}; -template struct tbb_remove_cv {typedef T type;}; -template struct tbb_remove_cv {typedef T type;}; - -//! Meets requirements of a forward iterator for STL. -/** Value is either the T or const T type of the container. - @ingroup containers */ -template -class concurrent_queue_iterator: public concurrent_queue_iterator_base_v3::type>, - public std::iterator { -#if !__TBB_TEMPLATE_FRIENDS_BROKEN - template - friend class ::tbb::strict_ppl::concurrent_queue; -#else -public: // workaround for MSVC -#endif - //! Construct iterator pointing to head of queue. - concurrent_queue_iterator( const concurrent_queue_base_v3& queue ) : - concurrent_queue_iterator_base_v3::type>(queue) - { - } - -public: - concurrent_queue_iterator() {} - - concurrent_queue_iterator( const concurrent_queue_iterator& other ) : - concurrent_queue_iterator_base_v3::type>(other) - {} - - //! Iterator assignment - concurrent_queue_iterator& operator=( const concurrent_queue_iterator& other ) { - this->assign(other); - return *this; - } - - //! Reference to current item - Value& operator*() const { - return *static_cast(this->my_item); - } - - Value* operator->() const {return &operator*();} - - //! Advance to next item in queue - concurrent_queue_iterator& operator++() { - this->advance(); - return *this; - } - - //! Post increment - Value* operator++(int) { - Value* result = &operator*(); - operator++(); - return result; - } -}; // concurrent_queue_iterator - - -template -bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { - return i.my_item==j.my_item; -} - -template -bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { - return i.my_item!=j.my_item; -} - -} // namespace internal - -//! @endcond - -} // namespace strict_ppl - -//! @cond INTERNAL -namespace internal { - -class concurrent_queue_rep; -class concurrent_queue_iterator_rep; -class concurrent_queue_iterator_base_v3; -template class concurrent_queue_iterator; - -//! For internal use only. -/** Type-independent portion of concurrent_queue. - @ingroup containers */ -class concurrent_queue_base_v3: no_copy { - //! Internal representation - concurrent_queue_rep* my_rep; - - friend class concurrent_queue_rep; - friend struct micro_queue; - friend class micro_queue_pop_finalizer; - friend class concurrent_queue_iterator_rep; - friend class concurrent_queue_iterator_base_v3; -protected: - //! Prefix on a page - struct page { - page* next; - uintptr_t mask; - }; - - //! Capacity of the queue - ptrdiff_t my_capacity; - - //! Always a power of 2 - size_t items_per_page; - - //! Size of an item - size_t item_size; - - enum copy_specifics { copy, move }; - -#if __TBB_PROTECTED_NESTED_CLASS_BROKEN -public: -#endif - template - struct padded_page: page { - //! Not defined anywhere - exists to quiet warnings. - padded_page(); - //! Not defined anywhere - exists to quiet warnings. - void operator=( const padded_page& ); - //! Must be last field. - T last; - }; - -private: - virtual void copy_item( page& dst, size_t index, const void* src ) = 0; - virtual void assign_and_destroy_item( void* dst, page& src, size_t index ) = 0; -protected: - __TBB_EXPORTED_METHOD concurrent_queue_base_v3( size_t item_size ); - virtual __TBB_EXPORTED_METHOD ~concurrent_queue_base_v3(); - - //! Enqueue item at tail of queue using copy operation - void __TBB_EXPORTED_METHOD internal_push( const void* src ); - - //! Dequeue item from head of queue - void __TBB_EXPORTED_METHOD internal_pop( void* dst ); - - //! Abort all pending queue operations - void __TBB_EXPORTED_METHOD internal_abort(); - - //! Attempt to enqueue item onto queue using copy operation - bool __TBB_EXPORTED_METHOD internal_push_if_not_full( const void* src ); - - //! Attempt to dequeue item from queue. - /** NULL if there was no item to dequeue. */ - bool __TBB_EXPORTED_METHOD internal_pop_if_present( void* dst ); - - //! Get size of queue - ptrdiff_t __TBB_EXPORTED_METHOD internal_size() const; - - //! Check if the queue is emtpy - bool __TBB_EXPORTED_METHOD internal_empty() const; - - //! Set the queue capacity - void __TBB_EXPORTED_METHOD internal_set_capacity( ptrdiff_t capacity, size_t element_size ); - - //! custom allocator - virtual page *allocate_page() = 0; - - //! custom de-allocator - virtual void deallocate_page( page *p ) = 0; - - //! free any remaining pages - /* note that the name may be misleading, but it remains so due to a historical accident. */ - void __TBB_EXPORTED_METHOD internal_finish_clear() ; - - //! throw an exception - void __TBB_EXPORTED_METHOD internal_throw_exception() const; - - //! copy internal representation - void __TBB_EXPORTED_METHOD assign( const concurrent_queue_base_v3& src ) ; - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //! swap queues - void internal_swap( concurrent_queue_base_v3& src ) { - std::swap( my_capacity, src.my_capacity ); - std::swap( items_per_page, src.items_per_page ); - std::swap( item_size, src.item_size ); - std::swap( my_rep, src.my_rep ); - } -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - - //! Enqueues item at tail of queue using specified operation (copy or move) - void internal_insert_item( const void* src, copy_specifics op_type ); - - //! Attempts to enqueue at tail of queue using specified operation (copy or move) - bool internal_insert_if_not_full( const void* src, copy_specifics op_type ); - - //! Assigns one queue to another using specified operation (copy or move) - void internal_assign( const concurrent_queue_base_v3& src, copy_specifics op_type ); -private: - virtual void copy_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) = 0; -}; - -//! For internal use only. -/** Backward compatible modification of concurrent_queue_base_v3 - @ingroup containers */ -class concurrent_queue_base_v8: public concurrent_queue_base_v3 { -protected: - concurrent_queue_base_v8( size_t item_sz ) : concurrent_queue_base_v3( item_sz ) {} - - //! move items - void __TBB_EXPORTED_METHOD move_content( concurrent_queue_base_v8& src ) ; - - //! Attempt to enqueue item onto queue using move operation - bool __TBB_EXPORTED_METHOD internal_push_move_if_not_full( const void* src ); - - //! Enqueue item at tail of queue using move operation - void __TBB_EXPORTED_METHOD internal_push_move( const void* src ); -private: - friend struct micro_queue; - virtual void move_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) = 0; - virtual void move_item( page& dst, size_t index, const void* src ) = 0; -}; - -//! Type-independent portion of concurrent_queue_iterator. -/** @ingroup containers */ -class concurrent_queue_iterator_base_v3 { - //! concurrent_queue over which we are iterating. - /** NULL if one past last element in queue. */ - concurrent_queue_iterator_rep* my_rep; - - template - friend bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); - - template - friend bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); - - void initialize( const concurrent_queue_base_v3& queue, size_t offset_of_data ); -protected: - //! Pointer to current item - void* my_item; - - //! Default constructor - concurrent_queue_iterator_base_v3() : my_rep(NULL), my_item(NULL) {} - - //! Copy constructor - concurrent_queue_iterator_base_v3( const concurrent_queue_iterator_base_v3& i ) : my_rep(NULL), my_item(NULL) { - assign(i); - } - - //! Obsolete entry point for constructing iterator pointing to head of queue. - /** Does not work correctly for SSE types. */ - __TBB_EXPORTED_METHOD concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue ); - - //! Construct iterator pointing to head of queue. - __TBB_EXPORTED_METHOD concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue, size_t offset_of_data ); - - //! Assignment - void __TBB_EXPORTED_METHOD assign( const concurrent_queue_iterator_base_v3& i ); - - //! Advance iterator one step towards tail of queue. - void __TBB_EXPORTED_METHOD advance(); - - //! Destructor - __TBB_EXPORTED_METHOD ~concurrent_queue_iterator_base_v3(); -}; - -typedef concurrent_queue_iterator_base_v3 concurrent_queue_iterator_base; - -//! Meets requirements of a forward iterator for STL. -/** Value is either the T or const T type of the container. - @ingroup containers */ -template -class concurrent_queue_iterator: public concurrent_queue_iterator_base, - public std::iterator { - -#if !defined(_MSC_VER) || defined(__INTEL_COMPILER) - template - friend class ::tbb::concurrent_bounded_queue; -#else -public: // workaround for MSVC -#endif - - //! Construct iterator pointing to head of queue. - concurrent_queue_iterator( const concurrent_queue_base_v3& queue ) : - concurrent_queue_iterator_base_v3(queue,__TBB_offsetof(concurrent_queue_base_v3::padded_page,last)) - { - } - -public: - concurrent_queue_iterator() {} - - /** If Value==Container::value_type, then this routine is the copy constructor. - If Value==const Container::value_type, then this routine is a conversion constructor. */ - concurrent_queue_iterator( const concurrent_queue_iterator& other ) : - concurrent_queue_iterator_base_v3(other) - {} - - //! Iterator assignment - concurrent_queue_iterator& operator=( const concurrent_queue_iterator& other ) { - assign(other); - return *this; - } - - //! Reference to current item - Value& operator*() const { - return *static_cast(my_item); - } - - Value* operator->() const {return &operator*();} - - //! Advance to next item in queue - concurrent_queue_iterator& operator++() { - advance(); - return *this; - } - - //! Post increment - Value* operator++(int) { - Value* result = &operator*(); - operator++(); - return result; - } -}; // concurrent_queue_iterator - - -template -bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { - return i.my_item==j.my_item; -} - -template -bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { - return i.my_item!=j.my_item; -} - -} // namespace internal; - -//! @endcond - -} // namespace tbb - -#endif /* __TBB__concurrent_queue_impl_H */ diff --git a/src/tbb/include/tbb/internal/_concurrent_unordered_impl.h b/src/tbb/include/tbb/internal/_concurrent_unordered_impl.h deleted file mode 100644 index 77a86394f..000000000 --- a/src/tbb/include/tbb/internal/_concurrent_unordered_impl.h +++ /dev/null @@ -1,1565 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -/* Container implementations in this header are based on PPL implementations - provided by Microsoft. */ - -#ifndef __TBB__concurrent_unordered_impl_H -#define __TBB__concurrent_unordered_impl_H -#if !defined(__TBB_concurrent_unordered_map_H) && !defined(__TBB_concurrent_unordered_set_H) && !defined(__TBB_concurrent_hash_map_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#include "../tbb_stddef.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include -#include // Need std::pair -#include // Need std::equal_to (in ../concurrent_unordered_*.h) -#include // For tbb_hasher -#include // Need std::memset - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "../atomic.h" -#include "../tbb_exception.h" -#include "../tbb_allocator.h" - -#if __TBB_INITIALIZER_LISTS_PRESENT - #include -#endif - -namespace tbb { -namespace interface5 { -//! @cond INTERNAL -namespace internal { - -template -class split_ordered_list; -template -class concurrent_unordered_base; - -// Forward list iterators (without skipping dummy elements) -template -class flist_iterator : public std::iterator -{ - template - friend class split_ordered_list; - template - friend class concurrent_unordered_base; - template - friend class flist_iterator; - - typedef typename Solist::nodeptr_t nodeptr_t; -public: - typedef typename Solist::value_type value_type; - typedef typename Solist::difference_type difference_type; - typedef typename Solist::pointer pointer; - typedef typename Solist::reference reference; - - flist_iterator() : my_node_ptr(0) {} - flist_iterator( const flist_iterator &other ) - : my_node_ptr(other.my_node_ptr) {} - - reference operator*() const { return my_node_ptr->my_element; } - pointer operator->() const { return &**this; } - - flist_iterator& operator++() { - my_node_ptr = my_node_ptr->my_next; - return *this; - } - - flist_iterator operator++(int) { - flist_iterator tmp = *this; - ++*this; - return tmp; - } - -protected: - flist_iterator(nodeptr_t pnode) : my_node_ptr(pnode) {} - nodeptr_t get_node_ptr() const { return my_node_ptr; } - - nodeptr_t my_node_ptr; - - template - friend bool operator==( const flist_iterator &i, const flist_iterator &j ); - template - friend bool operator!=( const flist_iterator& i, const flist_iterator& j ); -}; - -template -bool operator==( const flist_iterator &i, const flist_iterator &j ) { - return i.my_node_ptr == j.my_node_ptr; -} -template -bool operator!=( const flist_iterator& i, const flist_iterator& j ) { - return i.my_node_ptr != j.my_node_ptr; -} - -// Split-order list iterators, needed to skip dummy elements -template -class solist_iterator : public flist_iterator -{ - typedef flist_iterator base_type; - typedef typename Solist::nodeptr_t nodeptr_t; - using base_type::get_node_ptr; - template - friend class split_ordered_list; - template - friend class solist_iterator; - template - friend bool operator==( const solist_iterator &i, const solist_iterator &j ); - template - friend bool operator!=( const solist_iterator& i, const solist_iterator& j ); - - const Solist *my_list_ptr; - solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {} - -public: - typedef typename Solist::value_type value_type; - typedef typename Solist::difference_type difference_type; - typedef typename Solist::pointer pointer; - typedef typename Solist::reference reference; - - solist_iterator() {} - solist_iterator(const solist_iterator &other ) - : base_type(other), my_list_ptr(other.my_list_ptr) {} - - reference operator*() const { - return this->base_type::operator*(); - } - - pointer operator->() const { - return (&**this); - } - - solist_iterator& operator++() { - do ++(*(base_type *)this); - while (get_node_ptr() != NULL && get_node_ptr()->is_dummy()); - - return (*this); - } - - solist_iterator operator++(int) { - solist_iterator tmp = *this; - do ++*this; - while (get_node_ptr() != NULL && get_node_ptr()->is_dummy()); - - return (tmp); - } -}; - -template -bool operator==( const solist_iterator &i, const solist_iterator &j ) { - return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr; -} -template -bool operator!=( const solist_iterator& i, const solist_iterator& j ) { - return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr; -} - -// Forward type and class definitions -typedef size_t sokey_t; - - -// Forward list in which elements are sorted in a split-order -template -class split_ordered_list -{ -public: - typedef split_ordered_list self_type; - typedef typename Allocator::template rebind::other allocator_type; - struct node; - typedef node *nodeptr_t; - - typedef typename allocator_type::size_type size_type; - typedef typename allocator_type::difference_type difference_type; - typedef typename allocator_type::pointer pointer; - typedef typename allocator_type::const_pointer const_pointer; - typedef typename allocator_type::reference reference; - typedef typename allocator_type::const_reference const_reference; - typedef typename allocator_type::value_type value_type; - - typedef solist_iterator const_iterator; - typedef solist_iterator iterator; - typedef flist_iterator raw_const_iterator; - typedef flist_iterator raw_iterator; - - // Node that holds the element in a split-ordered list - struct node : tbb::internal::no_assign - { - private: - // for compilers that try to generate default constructors though they are not needed. - node(); // VS 2008, 2010, 2012 - public: - // Initialize the node with the given order key - void init(sokey_t order_key) { - my_order_key = order_key; - my_next = NULL; - } - - // Return the order key (needed for hashing) - sokey_t get_order_key() const { // TODO: remove - return my_order_key; - } - - // Inserts the new element in the list in an atomic fashion - nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node) - { - // Try to change the next pointer on the current element to a new element, only if it still points to the cached next - nodeptr_t exchange_node = tbb::internal::as_atomic(my_next).compare_and_swap(new_node, current_node); - - if (exchange_node == current_node) // TODO: why this branch? - { - // Operation succeeded, return the new node - return new_node; - } - else - { - // Operation failed, return the "interfering" node - return exchange_node; - } - } - - // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets - // in the hash table to quickly index into the right subsection of the split-ordered list. - bool is_dummy() const { - return (my_order_key & 0x1) == 0; - } - - - nodeptr_t my_next; // Next element in the list - value_type my_element; // Element storage - sokey_t my_order_key; // Order key for this element - }; - - // Allocate a new node with the given order key and value - nodeptr_t create_node(sokey_t order_key, const T &value) { - nodeptr_t pnode = my_node_allocator.allocate(1); - - __TBB_TRY { - new(static_cast(&pnode->my_element)) T(value); - pnode->init(order_key); - } __TBB_CATCH(...) { - my_node_allocator.deallocate(pnode, 1); - __TBB_RETHROW(); - } - - return (pnode); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - //TODO: try to combine both implementations using poor man forward - //TODO: use RAII scoped guard instead of explicit catch - // Allocate a new node with the given order key and value - nodeptr_t create_node(sokey_t order_key, T &&value) { - nodeptr_t pnode = my_node_allocator.allocate(1); - - __TBB_TRY { - new(static_cast(&pnode->my_element)) T(std::move(value)); - pnode->init(order_key); - } __TBB_CATCH(...) { - my_node_allocator.deallocate(pnode, 1); - __TBB_RETHROW(); - } - - return (pnode); - } -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - - // Allocate a new node with the given order key; used to allocate dummy nodes - nodeptr_t create_node(sokey_t order_key) { - nodeptr_t pnode = my_node_allocator.allocate(1); - pnode->init(order_key); - return (pnode); - } - - split_ordered_list(allocator_type a = allocator_type()) - : my_node_allocator(a), my_element_count(0) - { - // Immediately allocate a dummy node with order key of 0. This node - // will always be the head of the list. - my_head = create_node(0); - } - - ~split_ordered_list() - { - // Clear the list - clear(); - - // Remove the head element which is not cleared by clear() - nodeptr_t pnode = my_head; - my_head = NULL; - - __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node"); - - destroy_node(pnode); - } - - // Common forward list functions - - allocator_type get_allocator() const { - return (my_node_allocator); - } - - void clear() { - nodeptr_t pnext; - nodeptr_t pnode = my_head; - - __TBB_ASSERT(my_head != NULL, "Invalid head list node"); - pnext = pnode->my_next; - pnode->my_next = NULL; - pnode = pnext; - - while (pnode != NULL) - { - pnext = pnode->my_next; - destroy_node(pnode); - pnode = pnext; - } - - my_element_count = 0; - } - - // Returns a first non-dummy element in the SOL - iterator begin() { - return first_real_iterator(raw_begin()); - } - - // Returns a first non-dummy element in the SOL - const_iterator begin() const { - return first_real_iterator(raw_begin()); - } - - iterator end() { - return (iterator(0, this)); - } - - const_iterator end() const { - return (const_iterator(0, this)); - } - - const_iterator cbegin() const { - return (((const self_type *)this)->begin()); - } - - const_iterator cend() const { - return (((const self_type *)this)->end()); - } - - // Checks if the number of elements (non-dummy) is 0 - bool empty() const { - return (my_element_count == 0); - } - - // Returns the number of non-dummy elements in the list - size_type size() const { - return my_element_count; - } - - // Returns the maximum size of the list, determined by the allocator - size_type max_size() const { - return my_node_allocator.max_size(); - } - - // Swaps 'this' list with the passed in one - void swap(self_type& other) - { - if (this == &other) - { - // Nothing to do - return; - } - - std::swap(my_element_count, other.my_element_count); - std::swap(my_head, other.my_head); - } - - // Split-order list functions - - // Returns a first element in the SOL, which is always a dummy - raw_iterator raw_begin() { - return raw_iterator(my_head); - } - - // Returns a first element in the SOL, which is always a dummy - raw_const_iterator raw_begin() const { - return raw_const_iterator(my_head); - } - - raw_iterator raw_end() { - return raw_iterator(0); - } - - raw_const_iterator raw_end() const { - return raw_const_iterator(0); - } - - static sokey_t get_order_key(const raw_const_iterator& it) { - return it.get_node_ptr()->get_order_key(); - } - - static sokey_t get_safe_order_key(const raw_const_iterator& it) { - if( !it.get_node_ptr() ) return ~sokey_t(0); - return it.get_node_ptr()->get_order_key(); - } - - // Returns a public iterator version of the internal iterator. Public iterator must not - // be a dummy private iterator. - iterator get_iterator(raw_iterator it) { - __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)"); - return iterator(it.get_node_ptr(), this); - } - - // Returns a public iterator version of the internal iterator. Public iterator must not - // be a dummy private iterator. - const_iterator get_iterator(raw_const_iterator it) const { - __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)"); - return const_iterator(it.get_node_ptr(), this); - } - - // Returns a non-const version of the raw_iterator - raw_iterator get_iterator(raw_const_iterator it) { - return raw_iterator(it.get_node_ptr()); - } - - // Returns a non-const version of the iterator - static iterator get_iterator(const_iterator it) { - return iterator(it.my_node_ptr, it.my_list_ptr); - } - - // Returns a public iterator version of a first non-dummy internal iterator at or after - // the passed in internal iterator. - iterator first_real_iterator(raw_iterator it) - { - // Skip all dummy, internal only iterators - while (it != raw_end() && it.get_node_ptr()->is_dummy()) - ++it; - - return iterator(it.get_node_ptr(), this); - } - - // Returns a public iterator version of a first non-dummy internal iterator at or after - // the passed in internal iterator. - const_iterator first_real_iterator(raw_const_iterator it) const - { - // Skip all dummy, internal only iterators - while (it != raw_end() && it.get_node_ptr()->is_dummy()) - ++it; - - return const_iterator(it.get_node_ptr(), this); - } - - // Erase an element using the allocator - void destroy_node(nodeptr_t pnode) { - if (!pnode->is_dummy()) my_node_allocator.destroy(pnode); - my_node_allocator.deallocate(pnode, 1); - } - - // Try to insert a new element in the list. If insert fails, return the node that - // was inserted instead. - nodeptr_t try_insert(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) { - new_node->my_next = current_node; - return previous->atomic_set_next(new_node, current_node); - } - - // Insert a new element between passed in iterators - std::pair try_insert(raw_iterator it, raw_iterator next, const value_type &value, sokey_t order_key, size_type *new_count) - { - nodeptr_t pnode = create_node(order_key, value); - nodeptr_t inserted_node = try_insert(it.get_node_ptr(), pnode, next.get_node_ptr()); - - if (inserted_node == pnode) - { - // If the insert succeeded, check that the order is correct and increment the element count - check_range(); - *new_count = __TBB_FetchAndAddW((uintptr_t*)&my_element_count, uintptr_t(1)); - return std::pair(iterator(pnode, this), true); - } - else - { - // If the insert failed (element already there), then delete the new one - destroy_node(pnode); - return std::pair(end(), false); - } - } - - // Insert a new dummy element, starting search at a parent dummy element - raw_iterator insert_dummy(raw_iterator it, sokey_t order_key) - { - raw_iterator last = raw_end(); - raw_iterator where = it; - - __TBB_ASSERT(where != last, "Invalid head node"); - - ++where; - - // Create a dummy element up front, even though it may be discarded (due to concurrent insertion) - nodeptr_t dummy_node = create_node(order_key); - - for (;;) - { - __TBB_ASSERT(it != last, "Invalid head list node"); - - // If the head iterator is at the end of the list, or past the point where this dummy - // node needs to be inserted, then try to insert it. - if (where == last || get_order_key(where) > order_key) - { - __TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list"); - - // Try to insert it in the right place - nodeptr_t inserted_node = try_insert(it.get_node_ptr(), dummy_node, where.get_node_ptr()); - - if (inserted_node == dummy_node) - { - // Insertion succeeded, check the list for order violations - check_range(); - return raw_iterator(dummy_node); - } - else - { - // Insertion failed: either dummy node was inserted by another thread, or - // a real element was inserted at exactly the same place as dummy node. - // Proceed with the search from the previous location where order key was - // known to be larger (note: this is legal only because there is no safe - // concurrent erase operation supported). - where = it; - ++where; - continue; - } - } - else if (get_order_key(where) == order_key) - { - // Another dummy node with the same value found, discard the new one. - destroy_node(dummy_node); - return where; - } - - // Move the iterator forward - it = where; - ++where; - } - - } - - // This erase function can handle both real and dummy nodes - void erase_node(raw_iterator previous, raw_const_iterator& where) - { - nodeptr_t pnode = (where++).get_node_ptr(); - nodeptr_t prevnode = previous.get_node_ptr(); - __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators"); - prevnode->my_next = pnode->my_next; - - destroy_node(pnode); - } - - // Erase the element (previous node needs to be passed because this is a forward only list) - iterator erase_node(raw_iterator previous, const_iterator where) - { - raw_const_iterator it = where; - erase_node(previous, it); - my_element_count--; - - return get_iterator(first_real_iterator(it)); - } - - // Move all elements from the passed in split-ordered list to this one - void move_all(self_type& source) - { - raw_const_iterator first = source.raw_begin(); - raw_const_iterator last = source.raw_end(); - - if (first == last) - return; - - nodeptr_t previous_node = my_head; - raw_const_iterator begin_iterator = first++; - - // Move all elements one by one, including dummy ones - for (raw_const_iterator it = first; it != last;) - { - nodeptr_t pnode = it.get_node_ptr(); - - nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element); - previous_node = try_insert(previous_node, dummy_node, NULL); - __TBB_ASSERT(previous_node != NULL, "Insertion must succeed"); - raw_const_iterator where = it++; - source.erase_node(get_iterator(begin_iterator), where); - } - check_range(); - } - - -private: - //Need to setup private fields of split_ordered_list in move constructor and assignment of concurrent_unordered_base - template - friend class concurrent_unordered_base; - - // Check the list for order violations - void check_range() - { -#if TBB_USE_ASSERT - for (raw_iterator it = raw_begin(); it != raw_end(); ++it) - { - raw_iterator next_iterator = it; - ++next_iterator; - - __TBB_ASSERT(next_iterator == end() || next_iterator.get_node_ptr()->get_order_key() >= it.get_node_ptr()->get_order_key(), "!!! List order inconsistency !!!"); - } -#endif - } - - typename allocator_type::template rebind::other my_node_allocator; // allocator object for nodes - size_type my_element_count; // Total item count, not counting dummy nodes - nodeptr_t my_head; // pointer to head node -}; - -// Template class for hash compare -template -class hash_compare -{ -public: - typedef Hasher hasher; - typedef Key_equality key_equal; - - hash_compare() {} - - hash_compare(Hasher a_hasher) : my_hash_object(a_hasher) {} - - hash_compare(Hasher a_hasher, Key_equality a_keyeq) : my_hash_object(a_hasher), my_key_compare_object(a_keyeq) {} - - size_t operator()(const Key& key) const { - return ((size_t)my_hash_object(key)); - } - - bool operator()(const Key& key1, const Key& key2) const { - return (!my_key_compare_object(key1, key2)); - } - - Hasher my_hash_object; // The hash object - Key_equality my_key_compare_object; // The equality comparator object -}; - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) -#pragma warning(push) -#pragma warning(disable: 4127) // warning C4127: conditional expression is constant -#endif - -template -class concurrent_unordered_base : public Traits -{ -protected: - // Type definitions - typedef concurrent_unordered_base self_type; - typedef typename Traits::value_type value_type; - typedef typename Traits::key_type key_type; - typedef typename Traits::hash_compare hash_compare; - typedef typename Traits::value_compare value_compare; - typedef typename Traits::allocator_type allocator_type; - typedef typename hash_compare::hasher hasher; - typedef typename hash_compare::key_equal key_equal; - typedef typename allocator_type::pointer pointer; - typedef typename allocator_type::const_pointer const_pointer; - typedef typename allocator_type::reference reference; - typedef typename allocator_type::const_reference const_reference; - typedef typename allocator_type::size_type size_type; - typedef typename allocator_type::difference_type difference_type; - typedef split_ordered_list solist_t; - typedef typename solist_t::nodeptr_t nodeptr_t; - // Iterators that walk the entire split-order list, including dummy nodes - typedef typename solist_t::raw_iterator raw_iterator; - typedef typename solist_t::raw_const_iterator raw_const_iterator; - typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets - typedef typename solist_t::const_iterator const_iterator; - typedef iterator local_iterator; - typedef const_iterator const_local_iterator; - using Traits::my_hash_compare; - using Traits::get_key; - using Traits::allow_multimapping; - - static const size_type initial_bucket_number = 8; // Initial number of buckets -private: - typedef std::pair pairii_t; - typedef std::pair paircc_t; - - static size_type const pointers_per_table = sizeof(size_type) * 8; // One bucket segment per bit - static const size_type initial_bucket_load = 4; // Initial maximum number of elements per bucket - - struct call_internal_clear_on_exit{ - concurrent_unordered_base* my_instance; - call_internal_clear_on_exit(concurrent_unordered_base* instance) : my_instance(instance) {} - void dismiss(){ my_instance = NULL;} - ~call_internal_clear_on_exit(){ - if (my_instance){ - my_instance->internal_clear(); - } - } - }; -protected: - // Constructors/Destructors - concurrent_unordered_base(size_type n_of_buckets = initial_bucket_number, - const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type()) - : Traits(hc), my_solist(a), - my_allocator(a), my_maximum_bucket_size((float) initial_bucket_load) - { - if( n_of_buckets == 0) ++n_of_buckets; - my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)n_of_buckets*2-1); // round up to power of 2 - internal_init(); - } - - concurrent_unordered_base(const concurrent_unordered_base& right, const allocator_type& a) - : Traits(right.my_hash_compare), my_solist(a), my_allocator(a) - { - internal_init(); - internal_copy(right); - } - - concurrent_unordered_base(const concurrent_unordered_base& right) - : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator()) - { - //FIXME:exception safety seems to be broken here - internal_init(); - internal_copy(right); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - concurrent_unordered_base(concurrent_unordered_base&& right) - : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator()) - { - internal_init(); - swap(right); - } - - concurrent_unordered_base(concurrent_unordered_base&& right, const allocator_type& a) - : Traits(right.my_hash_compare), my_solist(a), my_allocator(a) - { - call_internal_clear_on_exit clear_buckets_on_exception(this); - - internal_init(); - if (a == right.get_allocator()){ - this->swap(right); - }else{ - my_maximum_bucket_size = right.my_maximum_bucket_size; - my_number_of_buckets = right.my_number_of_buckets; - my_solist.my_element_count = right.my_solist.my_element_count; - - if (! right.my_solist.empty()){ - nodeptr_t previous_node = my_solist.my_head; - - // Move all elements one by one, including dummy ones - for (raw_const_iterator it = ++(right.my_solist.raw_begin()), last = right.my_solist.raw_end(); it != last; ++it) - { - const nodeptr_t pnode = it.get_node_ptr(); - nodeptr_t node; - if (pnode->is_dummy()) { - node = my_solist.create_node(pnode->get_order_key()); - size_type bucket = __TBB_ReverseBits(pnode->get_order_key()) % my_number_of_buckets; - set_bucket(bucket, node); - }else{ - node = my_solist.create_node(pnode->get_order_key(), std::move(pnode->my_element)); - } - - previous_node = my_solist.try_insert(previous_node, node, NULL); - __TBB_ASSERT(previous_node != NULL, "Insertion of node failed. Concurrent inserts in constructor ?"); - } - my_solist.check_range(); - } - } - - clear_buckets_on_exception.dismiss(); - } - -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - - concurrent_unordered_base& operator=(const concurrent_unordered_base& right) { - if (this != &right) - internal_copy(right); - return (*this); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - concurrent_unordered_base& operator=(concurrent_unordered_base&& other) - { - if(this != &other){ - typedef typename tbb::internal::allocator_traits::propagate_on_container_move_assignment pocma_t; - if(pocma_t::value || this->my_allocator == other.my_allocator) { - concurrent_unordered_base trash (std::move(*this)); - swap(other); - if (pocma_t::value) { - using std::swap; - //TODO: swapping allocators here may be a problem, replace with single direction moving - swap(this->my_solist.my_node_allocator, other.my_solist.my_node_allocator); - swap(this->my_allocator, other.my_allocator); - } - } else { - concurrent_unordered_base moved_copy(std::move(other),this->my_allocator); - this->swap(moved_copy); - } - } - return *this; - } - -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! assignment operator from initializer_list - concurrent_unordered_base& operator=(std::initializer_list il) - { - this->clear(); - this->insert(il.begin(),il.end()); - return (*this); - } -#endif //# __TBB_INITIALIZER_LISTS_PRESENT - - - ~concurrent_unordered_base() { - // Delete all node segments - internal_clear(); - } - -public: - allocator_type get_allocator() const { - return my_solist.get_allocator(); - } - - // Size and capacity function - bool empty() const { - return my_solist.empty(); - } - - size_type size() const { - return my_solist.size(); - } - - size_type max_size() const { - return my_solist.max_size(); - } - - // Iterators - iterator begin() { - return my_solist.begin(); - } - - const_iterator begin() const { - return my_solist.begin(); - } - - iterator end() { - return my_solist.end(); - } - - const_iterator end() const { - return my_solist.end(); - } - - const_iterator cbegin() const { - return my_solist.cbegin(); - } - - const_iterator cend() const { - return my_solist.cend(); - } - - // Parallel traversal support - class const_range_type : tbb::internal::no_assign { - const concurrent_unordered_base &my_table; - raw_const_iterator my_begin_node; - raw_const_iterator my_end_node; - mutable raw_const_iterator my_midpoint_node; - public: - //! Type for size of a range - typedef typename concurrent_unordered_base::size_type size_type; - typedef typename concurrent_unordered_base::value_type value_type; - typedef typename concurrent_unordered_base::reference reference; - typedef typename concurrent_unordered_base::difference_type difference_type; - typedef typename concurrent_unordered_base::const_iterator iterator; - - //! True if range is empty. - bool empty() const {return my_begin_node == my_end_node;} - - //! True if range can be partitioned into two subranges. - bool is_divisible() const { - return my_midpoint_node != my_end_node; - } - //! Split range. - const_range_type( const_range_type &r, split ) : - my_table(r.my_table), my_end_node(r.my_end_node) - { - r.my_end_node = my_begin_node = r.my_midpoint_node; - __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" ); - __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" ); - set_midpoint(); - r.set_midpoint(); - } - //! Init range with container and grainsize specified - const_range_type( const concurrent_unordered_base &a_table ) : - my_table(a_table), my_begin_node(a_table.my_solist.begin()), - my_end_node(a_table.my_solist.end()) - { - set_midpoint(); - } - iterator begin() const { return my_table.my_solist.get_iterator(my_begin_node); } - iterator end() const { return my_table.my_solist.get_iterator(my_end_node); } - //! The grain size for this range. - size_type grainsize() const { return 1; } - - //! Set my_midpoint_node to point approximately half way between my_begin_node and my_end_node. - void set_midpoint() const { - if( my_begin_node == my_end_node ) // not divisible - my_midpoint_node = my_end_node; - else { - sokey_t begin_key = solist_t::get_safe_order_key(my_begin_node); - sokey_t end_key = solist_t::get_safe_order_key(my_end_node); - size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets; - while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket); - if(__TBB_ReverseBits(mid_bucket) > begin_key) { - // found a dummy_node between begin and end - my_midpoint_node = my_table.my_solist.first_real_iterator(my_table.get_bucket( mid_bucket )); - } - else { - // didn't find a dummy node between begin and end. - my_midpoint_node = my_end_node; - } -#if TBB_USE_ASSERT - { - sokey_t mid_key = solist_t::get_safe_order_key(my_midpoint_node); - __TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" ); - __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" ); - } -#endif // TBB_USE_ASSERT - } - } - }; - - class range_type : public const_range_type { - public: - typedef typename concurrent_unordered_base::iterator iterator; - //! Split range. - range_type( range_type &r, split ) : const_range_type( r, split() ) {} - //! Init range with container and grainsize specified - range_type( const concurrent_unordered_base &a_table ) : const_range_type(a_table) {} - - iterator begin() const { return solist_t::get_iterator( const_range_type::begin() ); } - iterator end() const { return solist_t::get_iterator( const_range_type::end() ); } - }; - - range_type range() { - return range_type( *this ); - } - - const_range_type range() const { - return const_range_type( *this ); - } - - // Modifiers - std::pair insert(const value_type& value) { - return internal_insert(value); - } - - iterator insert(const_iterator, const value_type& value) { - // Ignore hint - return insert(value).first; - } - - template - void insert(Iterator first, Iterator last) { - for (Iterator it = first; it != last; ++it) - insert(*it); - } - -#if __TBB_INITIALIZER_LISTS_PRESENT - //! Insert initializer list - void insert(std::initializer_list il) { - insert(il.begin(), il.end()); - } -#endif - - iterator unsafe_erase(const_iterator where) { - return internal_erase(where); - } - - iterator unsafe_erase(const_iterator first, const_iterator last) { - while (first != last) - unsafe_erase(first++); - return my_solist.get_iterator(first); - } - - size_type unsafe_erase(const key_type& key) { - pairii_t where = equal_range(key); - size_type item_count = internal_distance(where.first, where.second); - unsafe_erase(where.first, where.second); - return item_count; - } - - void swap(concurrent_unordered_base& right) { - if (this != &right) { - std::swap(my_hash_compare, right.my_hash_compare); // TODO: check what ADL meant here - my_solist.swap(right.my_solist); - internal_swap_buckets(right); - std::swap(my_number_of_buckets, right.my_number_of_buckets); - std::swap(my_maximum_bucket_size, right.my_maximum_bucket_size); - } - } - - // Observers - hasher hash_function() const { - return my_hash_compare.my_hash_object; - } - - key_equal key_eq() const { - return my_hash_compare.my_key_compare_object; - } - - void clear() { - // Clear list - my_solist.clear(); - - // Clear buckets - internal_clear(); - - // Initialize bucket 0 - __TBB_ASSERT(my_buckets[0] == NULL, NULL); - raw_iterator dummy_node = my_solist.raw_begin(); - set_bucket(0, dummy_node); - } - - // Lookup - iterator find(const key_type& key) { - return internal_find(key); - } - - const_iterator find(const key_type& key) const { - return const_cast(this)->internal_find(key); - } - - size_type count(const key_type& key) const { - if(allow_multimapping) { - paircc_t answer = equal_range(key); - size_type item_count = internal_distance(answer.first, answer.second); - return item_count; - } else { - return const_cast(this)->internal_find(key) == end()?0:1; - } - } - - std::pair equal_range(const key_type& key) { - return internal_equal_range(key); - } - - std::pair equal_range(const key_type& key) const { - return const_cast(this)->internal_equal_range(key); - } - - // Bucket interface - for debugging - size_type unsafe_bucket_count() const { - return my_number_of_buckets; - } - - size_type unsafe_max_bucket_count() const { - return segment_size(pointers_per_table-1); - } - - size_type unsafe_bucket_size(size_type bucket) { - size_type item_count = 0; - if (is_initialized(bucket)) { - raw_iterator it = get_bucket(bucket); - ++it; - for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it) - ++item_count; - } - return item_count; - } - - size_type unsafe_bucket(const key_type& key) const { - sokey_t order_key = (sokey_t) my_hash_compare(key); - size_type bucket = order_key % my_number_of_buckets; - return bucket; - } - - // If the bucket is initialized, return a first non-dummy element in it - local_iterator unsafe_begin(size_type bucket) { - if (!is_initialized(bucket)) - return end(); - - raw_iterator it = get_bucket(bucket); - return my_solist.first_real_iterator(it); - } - - // If the bucket is initialized, return a first non-dummy element in it - const_local_iterator unsafe_begin(size_type bucket) const - { - if (!is_initialized(bucket)) - return end(); - - raw_const_iterator it = get_bucket(bucket); - return my_solist.first_real_iterator(it); - } - - // @REVIEW: Takes O(n) - // Returns the iterator after the last non-dummy element in the bucket - local_iterator unsafe_end(size_type bucket) - { - if (!is_initialized(bucket)) - return end(); - - raw_iterator it = get_bucket(bucket); - - // Find the end of the bucket, denoted by the dummy element - do ++it; - while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy()); - - // Return the first real element past the end of the bucket - return my_solist.first_real_iterator(it); - } - - // @REVIEW: Takes O(n) - // Returns the iterator after the last non-dummy element in the bucket - const_local_iterator unsafe_end(size_type bucket) const - { - if (!is_initialized(bucket)) - return end(); - - raw_const_iterator it = get_bucket(bucket); - - // Find the end of the bucket, denoted by the dummy element - do ++it; - while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy()); - - // Return the first real element past the end of the bucket - return my_solist.first_real_iterator(it); - } - - const_local_iterator unsafe_cbegin(size_type bucket) const { - return ((const self_type *) this)->unsafe_begin(bucket); - } - - const_local_iterator unsafe_cend(size_type bucket) const { - return ((const self_type *) this)->unsafe_end(bucket); - } - - // Hash policy - float load_factor() const { - return (float) size() / (float) unsafe_bucket_count(); - } - - float max_load_factor() const { - return my_maximum_bucket_size; - } - - void max_load_factor(float newmax) { - if (newmax != newmax || newmax < 0) - tbb::internal::throw_exception(tbb::internal::eid_invalid_load_factor); - my_maximum_bucket_size = newmax; - } - - // This function is a noop, because the underlying split-ordered list - // is already sorted, so an increase in the bucket number will be - // reflected next time this bucket is touched. - void rehash(size_type buckets) { - size_type current_buckets = my_number_of_buckets; - if (current_buckets >= buckets) - return; - my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)buckets*2-1); // round up to power of 2 - } - -private: - - // Initialize the hash and keep the first bucket open - void internal_init() { - // Allocate an array of segment pointers - memset(my_buckets, 0, pointers_per_table * sizeof(void *)); - - // Initialize bucket 0 - raw_iterator dummy_node = my_solist.raw_begin(); - set_bucket(0, dummy_node); - } - - void internal_clear() { - for (size_type index = 0; index < pointers_per_table; ++index) { - if (my_buckets[index] != NULL) { - size_type sz = segment_size(index); - for (size_type index2 = 0; index2 < sz; ++index2) - my_allocator.destroy(&my_buckets[index][index2]); - my_allocator.deallocate(my_buckets[index], sz); - my_buckets[index] = 0; - } - } - } - - void internal_copy(const self_type& right) { - clear(); - - my_maximum_bucket_size = right.my_maximum_bucket_size; - my_number_of_buckets = right.my_number_of_buckets; - - __TBB_TRY { - insert(right.begin(), right.end()); - my_hash_compare = right.my_hash_compare; - } __TBB_CATCH(...) { - my_solist.clear(); - __TBB_RETHROW(); - } - } - - void internal_swap_buckets(concurrent_unordered_base& right) - { - // Swap all node segments - for (size_type index = 0; index < pointers_per_table; ++index) - { - raw_iterator * iterator_pointer = my_buckets[index]; - my_buckets[index] = right.my_buckets[index]; - right.my_buckets[index] = iterator_pointer; - } - } - - //TODO: why not use std::distance? - // Hash APIs - size_type internal_distance(const_iterator first, const_iterator last) const - { - size_type num = 0; - - for (const_iterator it = first; it != last; ++it) - ++num; - - return num; - } - - // Insert an element in the hash given its value - std::pair internal_insert(const value_type& value) - { - sokey_t order_key = (sokey_t) my_hash_compare(get_key(value)); - size_type bucket = order_key % my_number_of_buckets; - - // If bucket is empty, initialize it first - if (!is_initialized(bucket)) - init_bucket(bucket); - - size_type new_count = 0; - order_key = split_order_key_regular(order_key); - raw_iterator it = get_bucket(bucket); - raw_iterator last = my_solist.raw_end(); - raw_iterator where = it; - - __TBB_ASSERT(where != last, "Invalid head node"); - - // First node is a dummy node - ++where; - - for (;;) - { - if (where == last || solist_t::get_order_key(where) > order_key) - { - // Try to insert it in the right place - std::pair result = my_solist.try_insert(it, where, value, order_key, &new_count); - - if (result.second) - { - // Insertion succeeded, adjust the table size, if needed - adjust_table_size(new_count, my_number_of_buckets); - return result; - } - else - { - // Insertion failed: either the same node was inserted by another thread, or - // another element was inserted at exactly the same place as this node. - // Proceed with the search from the previous location where order key was - // known to be larger (note: this is legal only because there is no safe - // concurrent erase operation supported). - where = it; - ++where; - continue; - } - } - else if (!allow_multimapping && solist_t::get_order_key(where) == order_key && my_hash_compare(get_key(*where), get_key(value)) == 0) - { - // Element already in the list, return it - return std::pair(my_solist.get_iterator(where), false); - } - - // Move the iterator forward - it = where; - ++where; - } - } - - // Find the element in the split-ordered list - iterator internal_find(const key_type& key) - { - sokey_t order_key = (sokey_t) my_hash_compare(key); - size_type bucket = order_key % my_number_of_buckets; - - // If bucket is empty, initialize it first - if (!is_initialized(bucket)) - init_bucket(bucket); - - order_key = split_order_key_regular(order_key); - raw_iterator last = my_solist.raw_end(); - - for (raw_iterator it = get_bucket(bucket); it != last; ++it) - { - if (solist_t::get_order_key(it) > order_key) - { - // If the order key is smaller than the current order key, the element - // is not in the hash. - return end(); - } - else if (solist_t::get_order_key(it) == order_key) - { - // The fact that order keys match does not mean that the element is found. - // Key function comparison has to be performed to check whether this is the - // right element. If not, keep searching while order key is the same. - if (!my_hash_compare(get_key(*it), key)) - return my_solist.get_iterator(it); - } - } - - return end(); - } - - // Erase an element from the list. This is not a concurrency safe function. - iterator internal_erase(const_iterator it) - { - key_type key = get_key(*it); - sokey_t order_key = (sokey_t) my_hash_compare(key); - size_type bucket = order_key % my_number_of_buckets; - - // If bucket is empty, initialize it first - if (!is_initialized(bucket)) - init_bucket(bucket); - - order_key = split_order_key_regular(order_key); - - raw_iterator previous = get_bucket(bucket); - raw_iterator last = my_solist.raw_end(); - raw_iterator where = previous; - - __TBB_ASSERT(where != last, "Invalid head node"); - - // First node is a dummy node - ++where; - - for (;;) { - if (where == last) - return end(); - else if (my_solist.get_iterator(where) == it) - return my_solist.erase_node(previous, it); - - // Move the iterator forward - previous = where; - ++where; - } - } - - // Return the [begin, end) pair of iterators with the same key values. - // This operation makes sense only if mapping is many-to-one. - pairii_t internal_equal_range(const key_type& key) - { - sokey_t order_key = (sokey_t) my_hash_compare(key); - size_type bucket = order_key % my_number_of_buckets; - - // If bucket is empty, initialize it first - if (!is_initialized(bucket)) - init_bucket(bucket); - - order_key = split_order_key_regular(order_key); - raw_iterator end_it = my_solist.raw_end(); - - for (raw_iterator it = get_bucket(bucket); it != end_it; ++it) - { - if (solist_t::get_order_key(it) > order_key) - { - // There is no element with the given key - return pairii_t(end(), end()); - } - else if (solist_t::get_order_key(it) == order_key && !my_hash_compare(get_key(*it), key)) - { - iterator first = my_solist.get_iterator(it); - iterator last = first; - do ++last; while( allow_multimapping && last != end() && !my_hash_compare(get_key(*last), key) ); - return pairii_t(first, last); - } - } - - return pairii_t(end(), end()); - } - - // Bucket APIs - void init_bucket(size_type bucket) - { - // Bucket 0 has no parent. - __TBB_ASSERT( bucket != 0, "The first bucket must always be initialized"); - - size_type parent_bucket = get_parent(bucket); - - // All parent_bucket buckets have to be initialized before this bucket is - if (!is_initialized(parent_bucket)) - init_bucket(parent_bucket); - - raw_iterator parent = get_bucket(parent_bucket); - - // Create a dummy first node in this bucket - raw_iterator dummy_node = my_solist.insert_dummy(parent, split_order_key_dummy(bucket)); - set_bucket(bucket, dummy_node); - } - - void adjust_table_size(size_type total_elements, size_type current_size) - { - // Grow the table by a factor of 2 if possible and needed - if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size ) - { - // Double the size of the hash only if size has not changed in between loads - my_number_of_buckets.compare_and_swap(2u*current_size, current_size); - //Simple "my_number_of_buckets.compare_and_swap( current_size<<1, current_size );" does not work for VC8 - //due to overzealous compiler warnings in /Wp64 mode - } - } - - size_type get_parent(size_type bucket) const - { - // Unsets bucket's most significant turned-on bit - size_type msb = __TBB_Log2((uintptr_t)bucket); - return bucket & ~(size_type(1) << msb); - } - - - // Dynamic sized array (segments) - //! @return segment index of given index in the array - static size_type segment_index_of( size_type index ) { - return size_type( __TBB_Log2( uintptr_t(index|1) ) ); - } - - //! @return the first array index of given segment - static size_type segment_base( size_type k ) { - return (size_type(1)< my_number_of_buckets; // Current table size - solist_t my_solist; // List where all the elements are kept - typename allocator_type::template rebind::other my_allocator; // Allocator object for segments - float my_maximum_bucket_size; // Maximum size of the bucket - atomic my_buckets[pointers_per_table]; // The segment table -}; -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) -#pragma warning(pop) // warning 4127 is back -#endif - -//! Hash multiplier -static const size_t hash_multiplier = tbb::internal::select_size_t_constant<2654435769U, 11400714819323198485ULL>::value; -} // namespace internal -//! @endcond -//! Hasher functions -template -inline size_t tbb_hasher( const T& t ) { - return static_cast( t ) * internal::hash_multiplier; -} -template -inline size_t tbb_hasher( P* ptr ) { - size_t const h = reinterpret_cast( ptr ); - return (h >> 3) ^ h; -} -template -inline size_t tbb_hasher( const std::basic_string& s ) { - size_t h = 0; - for( const E* c = s.c_str(); *c; ++c ) - h = static_cast(*c) ^ (h * internal::hash_multiplier); - return h; -} -template -inline size_t tbb_hasher( const std::pair& p ) { - return tbb_hasher(p.first) ^ tbb_hasher(p.second); -} -} // namespace interface5 -using interface5::tbb_hasher; - - -// Template class for hash compare -template -class tbb_hash -{ -public: - tbb_hash() {} - - size_t operator()(const Key& key) const - { - return tbb_hasher(key); - } -}; - -} // namespace tbb -#endif// __TBB__concurrent_unordered_impl_H diff --git a/src/tbb/include/tbb/internal/_flow_graph_impl.h b/src/tbb/include/tbb/internal/_flow_graph_impl.h deleted file mode 100644 index 97da56df7..000000000 --- a/src/tbb/include/tbb/internal/_flow_graph_impl.h +++ /dev/null @@ -1,757 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB__flow_graph_impl_H -#define __TBB__flow_graph_impl_H - -#ifndef __TBB_flow_graph_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -namespace internal { - - namespace graph_policy_namespace { - enum graph_buffer_policy { rejecting, reserving, queueing, tag_matching }; - } - -// -------------- function_body containers ---------------------- - - //! A functor that takes no input and generates a value of type Output - template< typename Output > - class source_body : tbb::internal::no_assign { - public: - virtual ~source_body() {} - virtual bool operator()(Output &output) = 0; - virtual source_body* clone() = 0; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - virtual void reset_body() = 0; -#endif - }; - - //! The leaf for source_body - template< typename Output, typename Body> - class source_body_leaf : public source_body { - public: - source_body_leaf( const Body &_body ) : body(_body), init_body(_body) { } - /*override*/ bool operator()(Output &output) { return body( output ); } - /*override*/ source_body_leaf* clone() { - return new source_body_leaf< Output, Body >(init_body); - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/ void reset_body() { - body = init_body; - } -#endif - Body get_body() { return body; } - private: - Body body; - Body init_body; - }; - - //! A functor that takes an Input and generates an Output - template< typename Input, typename Output > - class function_body : tbb::internal::no_assign { - public: - virtual ~function_body() {} - virtual Output operator()(const Input &input) = 0; - virtual function_body* clone() = 0; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - virtual void reset_body() = 0; -#endif - }; - - //! the leaf for function_body - template - class function_body_leaf : public function_body< Input, Output > { - public: - function_body_leaf( const B &_body ) : body(_body), init_body(_body) { } - Output operator()(const Input &i) { return body(i); } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/ void reset_body() { - body = init_body; - } -#endif - B get_body() { return body; } - /*override*/ function_body_leaf* clone() { - return new function_body_leaf< Input, Output, B >(init_body); - } - private: - B body; - B init_body; - }; - - //! the leaf for function_body specialized for Input and output of continue_msg - template - class function_body_leaf< continue_msg, continue_msg, B> : public function_body< continue_msg, continue_msg > { - public: - function_body_leaf( const B &_body ) : body(_body), init_body(_body) { } - continue_msg operator()( const continue_msg &i ) { - body(i); - return i; - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/ void reset_body() { - body = init_body; - } -#endif - B get_body() { return body; } - /*override*/ function_body_leaf* clone() { - return new function_body_leaf< continue_msg, continue_msg, B >(init_body); - } - private: - B body; - B init_body; - }; - - //! the leaf for function_body specialized for Output of continue_msg - template - class function_body_leaf< Input, continue_msg, B> : public function_body< Input, continue_msg > { - public: - function_body_leaf( const B &_body ) : body(_body), init_body(_body) { } - continue_msg operator()(const Input &i) { - body(i); - return continue_msg(); - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/ void reset_body() { - body = init_body; - } -#endif - B get_body() { return body; } - /*override*/ function_body_leaf* clone() { - return new function_body_leaf< Input, continue_msg, B >(init_body); - } - private: - B body; - B init_body; - }; - - //! the leaf for function_body specialized for Input of continue_msg - template - class function_body_leaf< continue_msg, Output, B > : public function_body< continue_msg, Output > { - public: - function_body_leaf( const B &_body ) : body(_body), init_body(_body) { } - Output operator()(const continue_msg &i) { - return body(i); - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/ void reset_body() { - body = init_body; - } -#endif - B get_body() { return body; } - /*override*/ function_body_leaf* clone() { - return new function_body_leaf< continue_msg, Output, B >(init_body); - } - private: - B body; - B init_body; - }; - - //! function_body that takes an Input and a set of output ports - template - class multifunction_body : tbb::internal::no_assign { - public: - virtual ~multifunction_body () {} - virtual void operator()(const Input &/* input*/, OutputSet &/*oset*/) = 0; - virtual multifunction_body* clone() = 0; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - virtual void reset_body() = 0; -#endif - }; - - //! leaf for multifunction. OutputSet can be a std::tuple or a vector. - template - class multifunction_body_leaf : public multifunction_body { - public: - multifunction_body_leaf(const B &_body) : body(_body), init_body(_body) { } - void operator()(const Input &input, OutputSet &oset) { - body(input, oset); // body may explicitly put() to one or more of oset. - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/ void reset_body() { - body = init_body; - } -#endif - B get_body() { return body; } - /*override*/ multifunction_body_leaf* clone() { - return new multifunction_body_leaf(init_body); - } - private: - B body; - B init_body; - }; - -// --------------------------- end of function_body containers ------------------------ - -// --------------------------- node task bodies --------------------------------------- - - //! A task that calls a node's forward_task function - template< typename NodeType > - class forward_task_bypass : public task { - - NodeType &my_node; - - public: - - forward_task_bypass( NodeType &n ) : my_node(n) {} - - task *execute() { - task * new_task = my_node.forward_task(); - if (new_task == SUCCESSFULLY_ENQUEUED) new_task = NULL; - return new_task; - } - }; - - //! A task that calls a node's apply_body_bypass function, passing in an input of type Input - // return the task* unless it is SUCCESSFULLY_ENQUEUED, in which case return NULL - template< typename NodeType, typename Input > - class apply_body_task_bypass : public task { - - NodeType &my_node; - Input my_input; - - public: - - apply_body_task_bypass( NodeType &n, const Input &i ) : my_node(n), my_input(i) {} - - task *execute() { - task * next_task = my_node.apply_body_bypass( my_input ); - if(next_task == SUCCESSFULLY_ENQUEUED) next_task = NULL; - return next_task; - } - }; - - //! A task that calls a node's apply_body function with no input - template< typename NodeType > - class source_task_bypass : public task { - - NodeType &my_node; - - public: - - source_task_bypass( NodeType &n ) : my_node(n) {} - - task *execute() { - task *new_task = my_node.apply_body_bypass( ); - if(new_task == SUCCESSFULLY_ENQUEUED) return NULL; - return new_task; - } - }; - -// ------------------------ end of node task bodies ----------------------------------- - - //! An empty functor that takes an Input and returns a default constructed Output - template< typename Input, typename Output > - struct empty_body { - Output operator()( const Input & ) const { return Output(); } - }; - - //! A node_cache maintains a std::queue of elements of type T. Each operation is protected by a lock. - template< typename T, typename M=spin_mutex > - class node_cache { - public: - - typedef size_t size_type; - - bool empty() { - typename my_mutex_type::scoped_lock lock( my_mutex ); - return internal_empty(); - } - - void add( T &n ) { - typename my_mutex_type::scoped_lock lock( my_mutex ); - internal_push(n); - } - - void remove( T &n ) { - typename my_mutex_type::scoped_lock lock( my_mutex ); - for ( size_t i = internal_size(); i != 0; --i ) { - T &s = internal_pop(); - if ( &s == &n ) return; // only remove one predecessor per request - internal_push(s); - } - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector predecessor_vector_type; - void internal_add_built_predecessor( T &n ) { - typename my_mutex_type::scoped_lock lock( my_mutex ); - my_built_predecessors.add_edge(n); - } - - void internal_delete_built_predecessor( T &n ) { - typename my_mutex_type::scoped_lock lock( my_mutex ); - my_built_predecessors.delete_edge(n); - } - - void copy_predecessors( predecessor_vector_type &v) { - typename my_mutex_type::scoped_lock lock( my_mutex ); - my_built_predecessors.copy_edges(v); - } - - size_t predecessor_count() { - typename my_mutex_type::scoped_lock lock(my_mutex); - return (size_t)(my_built_predecessors.edge_count()); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - protected: - - typedef M my_mutex_type; - my_mutex_type my_mutex; - std::queue< T * > my_q; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - edge_container my_built_predecessors; -#endif - - // Assumes lock is held - inline bool internal_empty( ) { - return my_q.empty(); - } - - // Assumes lock is held - inline size_type internal_size( ) { - return my_q.size(); - } - - // Assumes lock is held - inline void internal_push( T &n ) { - my_q.push(&n); - } - - // Assumes lock is held - inline T &internal_pop() { - T *v = my_q.front(); - my_q.pop(); - return *v; - } - - }; - - //! A cache of predecessors that only supports try_get - template< typename T, typename M=spin_mutex > - class predecessor_cache : public node_cache< sender, M > { - public: - typedef M my_mutex_type; - typedef T output_type; - typedef sender predecessor_type; - typedef receiver successor_type; - - predecessor_cache( ) : my_owner( NULL ) { } - - void set_owner( successor_type *owner ) { my_owner = owner; } - - bool get_item( output_type &v ) { - - bool msg = false; - - do { - predecessor_type *src; - { - typename my_mutex_type::scoped_lock lock(this->my_mutex); - if ( this->internal_empty() ) { - break; - } - src = &this->internal_pop(); - } - - // Try to get from this sender - msg = src->try_get( v ); - - if (msg == false) { - // Relinquish ownership of the edge - if ( my_owner) - src->register_successor( *my_owner ); - } else { - // Retain ownership of the edge - this->add(*src); - } - } while ( msg == false ); - return msg; - } - - void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - if(my_owner) { - for(;;) { - predecessor_type *src; - { - if(this->internal_empty()) break; - src = &this->internal_pop(); - } - src->register_successor( *my_owner); - } - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - if (f&rf_extract && my_owner) - my_built_predecessors.receiver_extract(*my_owner); - __TBB_ASSERT(!(f&rf_extract) || this->internal_empty(), "predecessor cache not empty"); -#endif - } - - protected: - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - using node_cache< sender, M >::my_built_predecessors; -#endif - successor_type *my_owner; - }; - - //! An cache of predecessors that supports requests and reservations - template< typename T, typename M=spin_mutex > - class reservable_predecessor_cache : public predecessor_cache< T, M > { - public: - typedef M my_mutex_type; - typedef T output_type; - typedef sender predecessor_type; - typedef receiver successor_type; - - reservable_predecessor_cache( ) : reserved_src(NULL) { } - - bool - try_reserve( output_type &v ) { - bool msg = false; - - do { - { - typename my_mutex_type::scoped_lock lock(this->my_mutex); - if ( reserved_src || this->internal_empty() ) - return false; - - reserved_src = &this->internal_pop(); - } - - // Try to get from this sender - msg = reserved_src->try_reserve( v ); - - if (msg == false) { - typename my_mutex_type::scoped_lock lock(this->my_mutex); - // Relinquish ownership of the edge - reserved_src->register_successor( *this->my_owner ); - reserved_src = NULL; - } else { - // Retain ownership of the edge - this->add( *reserved_src ); - } - } while ( msg == false ); - - return msg; - } - - bool - try_release( ) { - reserved_src->try_release( ); - reserved_src = NULL; - return true; - } - - bool - try_consume( ) { - reserved_src->try_consume( ); - reserved_src = NULL; - return true; - } - - void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - reserved_src = NULL; - predecessor_cache::reset(__TBB_PFG_RESET_ARG(f)); - } - - private: - predecessor_type *reserved_src; - }; - - - //! An abstract cache of successors - template - class successor_cache : tbb::internal::no_copy { - protected: - - typedef M my_mutex_type; - my_mutex_type my_mutex; - - typedef receiver *pointer_type; - typedef std::list< pointer_type > my_successors_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - edge_container > my_built_successors; -#endif - my_successors_type my_successors; - - sender *my_owner; - - public: -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector successor_vector_type; - void internal_add_built_successor( receiver &r) { - typename my_mutex_type::scoped_lock l(my_mutex, true); - my_built_successors.add_edge( r ); - } - - void internal_delete_built_successor( receiver &r) { - typename my_mutex_type::scoped_lock l(my_mutex, true); - my_built_successors.delete_edge(r); - } - - void copy_successors( successor_vector_type &v) { - typename my_mutex_type::scoped_lock l(my_mutex, false); - my_built_successors.copy_edges(v); - } - - size_t successor_count() { - typename my_mutex_type::scoped_lock l(my_mutex,false); - return my_built_successors.edge_count(); - } - - void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - if (f&rf_extract && my_owner) - my_built_successors.sender_extract(*my_owner); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - successor_cache( ) : my_owner(NULL) {} - - void set_owner( sender *owner ) { my_owner = owner; } - - virtual ~successor_cache() {} - - void register_successor( receiver &r ) { - typename my_mutex_type::scoped_lock l(my_mutex, true); - my_successors.push_back( &r ); - } - - void remove_successor( receiver &r ) { - typename my_mutex_type::scoped_lock l(my_mutex, true); - for ( typename my_successors_type::iterator i = my_successors.begin(); - i != my_successors.end(); ++i ) { - if ( *i == & r ) { - my_successors.erase(i); - break; - } - } - } - - bool empty() { - typename my_mutex_type::scoped_lock l(my_mutex, false); - return my_successors.empty(); - } - - void clear() { - my_successors.clear(); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_built_successors.clear(); -#endif - } - - virtual task * try_put_task( const T &t ) = 0; - }; - - //! An abstract cache of successors, specialized to continue_msg - template<> - class successor_cache< continue_msg > : tbb::internal::no_copy { - protected: - - typedef spin_rw_mutex my_mutex_type; - my_mutex_type my_mutex; - - typedef receiver *pointer_type; - typedef std::list< pointer_type > my_successors_type; - my_successors_type my_successors; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - edge_container > my_built_successors; -#endif - - sender *my_owner; - - public: - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector successor_vector_type; - void internal_add_built_successor( receiver &r) { - my_mutex_type::scoped_lock l(my_mutex, true); - my_built_successors.add_edge( r ); - } - - void internal_delete_built_successor( receiver &r) { - my_mutex_type::scoped_lock l(my_mutex, true); - my_built_successors.delete_edge(r); - } - - void copy_successors( successor_vector_type &v) { - my_mutex_type::scoped_lock l(my_mutex, false); - my_built_successors.copy_edges(v); - } - - size_t successor_count() { - my_mutex_type::scoped_lock l(my_mutex,false); - return my_built_successors.edge_count(); - } - - void reset( __TBB_PFG_RESET_ARG(reset_flags f)) { - if (f&rf_extract && my_owner) - my_built_successors.sender_extract(*my_owner); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - successor_cache( ) : my_owner(NULL) {} - - void set_owner( sender *owner ) { my_owner = owner; } - - virtual ~successor_cache() {} - - void register_successor( receiver &r ) { - my_mutex_type::scoped_lock l(my_mutex, true); - my_successors.push_back( &r ); - if ( my_owner && r.is_continue_receiver() ) { - r.register_predecessor( *my_owner ); - } - } - - void remove_successor( receiver &r ) { - my_mutex_type::scoped_lock l(my_mutex, true); - for ( my_successors_type::iterator i = my_successors.begin(); - i != my_successors.end(); ++i ) { - if ( *i == & r ) { - // TODO: Check if we need to test for continue_receiver before - // removing from r. - if ( my_owner ) - r.remove_predecessor( *my_owner ); - my_successors.erase(i); - break; - } - } - } - - bool empty() { - my_mutex_type::scoped_lock l(my_mutex, false); - return my_successors.empty(); - } - - void clear() { - my_successors.clear(); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_built_successors.clear(); -#endif - } - - virtual task * try_put_task( const continue_msg &t ) = 0; - - }; - - //! A cache of successors that are broadcast to - template - class broadcast_cache : public successor_cache { - typedef M my_mutex_type; - typedef std::list< receiver * > my_successors_type; - - public: - - broadcast_cache( ) {} - - // as above, but call try_put_task instead, and return the last task we received (if any) - /*override*/ task * try_put_task( const T &t ) { - task * last_task = NULL; - bool upgraded = true; - typename my_mutex_type::scoped_lock l(this->my_mutex, upgraded); - typename my_successors_type::iterator i = this->my_successors.begin(); - while ( i != this->my_successors.end() ) { - task *new_task = (*i)->try_put_task(t); - last_task = combine_tasks(last_task, new_task); // enqueue if necessary - if(new_task) { - ++i; - } - else { // failed - if ( (*i)->register_predecessor(*this->my_owner) ) { - if (!upgraded) { - l.upgrade_to_writer(); - upgraded = true; - } - i = this->my_successors.erase(i); - } else { - ++i; - } - } - } - return last_task; - } - - }; - - //! A cache of successors that are put in a round-robin fashion - template - class round_robin_cache : public successor_cache { - typedef size_t size_type; - typedef M my_mutex_type; - typedef std::list< receiver * > my_successors_type; - - public: - - round_robin_cache( ) {} - - size_type size() { - typename my_mutex_type::scoped_lock l(this->my_mutex, false); - return this->my_successors.size(); - } - - /*override*/task *try_put_task( const T &t ) { - bool upgraded = true; - typename my_mutex_type::scoped_lock l(this->my_mutex, upgraded); - typename my_successors_type::iterator i = this->my_successors.begin(); - while ( i != this->my_successors.end() ) { - task *new_task = (*i)->try_put_task(t); - if ( new_task ) { - return new_task; - } else { - if ( (*i)->register_predecessor(*this->my_owner) ) { - if (!upgraded) { - l.upgrade_to_writer(); - upgraded = true; - } - i = this->my_successors.erase(i); - } - else { - ++i; - } - } - } - return NULL; - } - }; - - template - class decrementer : public continue_receiver, tbb::internal::no_copy { - - T *my_node; - - task *execute() { - return my_node->decrement_counter(); - } - - public: - - typedef continue_msg input_type; - typedef continue_msg output_type; - decrementer( int number_of_predecessors = 0 ) : continue_receiver( number_of_predecessors ) { } - void set_owner( T *node ) { my_node = node; } - }; - -} - -#endif // __TBB__flow_graph_impl_H - diff --git a/src/tbb/include/tbb/internal/_flow_graph_indexer_impl.h b/src/tbb/include/tbb/internal/_flow_graph_indexer_impl.h deleted file mode 100644 index 947e1d414..000000000 --- a/src/tbb/include/tbb/internal/_flow_graph_indexer_impl.h +++ /dev/null @@ -1,453 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB__flow_graph_indexer_impl_H -#define __TBB__flow_graph_indexer_impl_H - -#ifndef __TBB_flow_graph_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#include "tbb/internal/_flow_graph_types_impl.h" - -namespace internal { - - // Output of the indexer_node is a tbb::flow::tagged_msg, and will be of - // the form tagged_msg - // where the value of tag will indicate which result was put to the - // successor. - - template - task* do_try_put(const T &v, void *p) { - typename IndexerNodeBaseType::output_type o(K, v); - return reinterpret_cast(p)->try_put_task(&o); - } - - template - struct indexer_helper { - template - static inline void set_indexer_node_pointer(PortTuple &my_input, IndexerNodeBaseType *p) { - typedef typename tuple_element::type T; - task *(*indexer_node_put_task)(const T&, void *) = do_try_put; - tbb::flow::get(my_input).set_up(p, indexer_node_put_task); - indexer_helper::template set_indexer_node_pointer(my_input, p); - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - template - static inline void reset_inputs(InputTuple &my_input, reset_flags f) { - join_helper::reset_inputs(my_input, f); - tbb::flow::get(my_input).reset_receiver(f); - } -#endif - }; - - template - struct indexer_helper { - template - static inline void set_indexer_node_pointer(PortTuple &my_input, IndexerNodeBaseType *p) { - typedef typename tuple_element<0, TupleTypes>::type T; - task *(*indexer_node_put_task)(const T&, void *) = do_try_put; - tbb::flow::get<0>(my_input).set_up(p, indexer_node_put_task); - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - template - static inline void reset_inputs(InputTuple &my_input, reset_flags f) { - tbb::flow::get<0>(my_input).reset_receiver(f); - } -#endif - }; - - template - class indexer_input_port : public receiver { - private: - void* my_indexer_ptr; - typedef task* (* forward_function_ptr)(T const &, void* ); - forward_function_ptr my_try_put_task; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - spin_mutex my_pred_mutex; - edge_container > my_built_predecessors; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - public: -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - indexer_input_port() : my_pred_mutex() {} - indexer_input_port( const indexer_input_port & /*other*/ ) : receiver(), my_pred_mutex() { - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - void set_up(void *p, forward_function_ptr f) { - my_indexer_ptr = p; - my_try_put_task = f; - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector *> predecessor_vector_type; - /*override*/size_t predecessor_count() { - spin_mutex::scoped_lock l(my_pred_mutex); - return my_built_predecessors.edge_count(); - } - /*override*/void internal_add_built_predecessor(sender &p) { - spin_mutex::scoped_lock l(my_pred_mutex); - my_built_predecessors.add_edge(p); - } - /*override*/void internal_delete_built_predecessor(sender &p) { - spin_mutex::scoped_lock l(my_pred_mutex); - my_built_predecessors.delete_edge(p); - } - /*override*/void copy_predecessors( predecessor_vector_type &v) { - spin_mutex::scoped_lock l(my_pred_mutex); - return my_built_predecessors.copy_edges(v); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - task *try_put_task(const T &v) { - return my_try_put_task(v, my_indexer_ptr); - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - public: - /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags f)) { - if(f&rf_extract) my_built_predecessors.receiver_extract(*this); - } -#else - /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) { } -#endif - - }; - - template - class indexer_node_FE { - public: - static const int N = tbb::flow::tuple_size::value; - typedef OutputType output_type; - typedef InputTuple input_type; - - input_type &input_ports() { return my_inputs; } - protected: - input_type my_inputs; - }; - - //! indexer_node_base - template - class indexer_node_base : public graph_node, public indexer_node_FE, - public sender { - protected: - using graph_node::my_graph; - public: - static const size_t N = tbb::flow::tuple_size::value; - typedef OutputType output_type; - typedef StructTypes tuple_types; - typedef receiver successor_type; - typedef indexer_node_FE input_ports_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector successor_vector_type; -#endif - - private: - // ----------- Aggregator ------------ - enum op_type { reg_succ, rem_succ, try__put_task -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - , add_blt_succ, del_blt_succ, - blt_succ_cnt, blt_succ_cpy -#endif - }; - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - typedef indexer_node_base my_class; - - class indexer_node_base_operation : public aggregated_operation { - public: - char type; - union { - output_type const *my_arg; - successor_type *my_succ; - task *bypass_t; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - size_t cnt_val; - successor_vector_type *succv; -#endif - }; - indexer_node_base_operation(const output_type* e, op_type t) : - type(char(t)), my_arg(e) {} - indexer_node_base_operation(const successor_type &s, op_type t) : type(char(t)), - my_succ(const_cast(&s)) {} - indexer_node_base_operation(op_type t) : type(char(t)) {} - }; - - typedef internal::aggregating_functor my_handler; - friend class internal::aggregating_functor; - aggregator my_aggregator; - - void handle_operations(indexer_node_base_operation* op_list) { - indexer_node_base_operation *current; - while(op_list) { - current = op_list; - op_list = op_list->next; - switch(current->type) { - - case reg_succ: - my_successors.register_successor(*(current->my_succ)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - - case rem_succ: - my_successors.remove_successor(*(current->my_succ)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case try__put_task: { - current->bypass_t = my_successors.try_put_task(*(current->my_arg)); - __TBB_store_with_release(current->status, SUCCEEDED); // return of try_put_task actual return value - } - break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - case add_blt_succ: - my_successors.internal_add_built_successor(*(current->my_succ)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case del_blt_succ: - my_successors.internal_delete_built_successor(*(current->my_succ)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case blt_succ_cnt: - current->cnt_val = my_successors.successor_count(); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case blt_succ_cpy: - my_successors.copy_successors(*(current->succv)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - } - } - } - // ---------- end aggregator ----------- - public: - indexer_node_base(graph& g) : graph_node(g), input_ports_type() { - indexer_helper::set_indexer_node_pointer(this->my_inputs, this); - my_successors.set_owner(this); - my_aggregator.initialize_handler(my_handler(this)); - } - - indexer_node_base(const indexer_node_base& other) : graph_node(other.my_graph), input_ports_type(), sender() { - indexer_helper::set_indexer_node_pointer(this->my_inputs, this); - my_successors.set_owner(this); - my_aggregator.initialize_handler(my_handler(this)); - } - - bool register_successor(successor_type &r) { - indexer_node_base_operation op_data(r, reg_succ); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - - bool remove_successor( successor_type &r) { - indexer_node_base_operation op_data(r, rem_succ); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - - task * try_put_task(output_type const *v) { - indexer_node_base_operation op_data(v, try__put_task); - my_aggregator.execute(&op_data); - return op_data.bypass_t; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - void internal_add_built_successor( successor_type &r) { - indexer_node_base_operation op_data(r, add_blt_succ); - my_aggregator.execute(&op_data); - } - - void internal_delete_built_successor( successor_type &r) { - indexer_node_base_operation op_data(r, del_blt_succ); - my_aggregator.execute(&op_data); - } - - size_t successor_count() { - indexer_node_base_operation op_data(blt_succ_cnt); - my_aggregator.execute(&op_data); - return op_data.cnt_val; - } - - void copy_successors( successor_vector_type &v) { - indexer_node_base_operation op_data(blt_succ_cpy); - op_data.succv = &v; - my_aggregator.execute(&op_data); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - protected: - /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) { -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_successors.reset(f); - indexer_helper::reset_inputs(this->my_inputs, f); -#endif - } - - private: - broadcast_cache my_successors; - }; //indexer_node_base - - - template struct input_types; - - template - struct input_types<1, InputTuple> { - typedef typename tuple_element<0, InputTuple>::type first_type; - typedef typename internal::tagged_msg type; - }; - - template - struct input_types<2, InputTuple> { - typedef typename tuple_element<0, InputTuple>::type first_type; - typedef typename tuple_element<1, InputTuple>::type second_type; - typedef typename internal::tagged_msg type; - }; - - template - struct input_types<3, InputTuple> { - typedef typename tuple_element<0, InputTuple>::type first_type; - typedef typename tuple_element<1, InputTuple>::type second_type; - typedef typename tuple_element<2, InputTuple>::type third_type; - typedef typename internal::tagged_msg type; - }; - - template - struct input_types<4, InputTuple> { - typedef typename tuple_element<0, InputTuple>::type first_type; - typedef typename tuple_element<1, InputTuple>::type second_type; - typedef typename tuple_element<2, InputTuple>::type third_type; - typedef typename tuple_element<3, InputTuple>::type fourth_type; - typedef typename internal::tagged_msg type; - }; - - template - struct input_types<5, InputTuple> { - typedef typename tuple_element<0, InputTuple>::type first_type; - typedef typename tuple_element<1, InputTuple>::type second_type; - typedef typename tuple_element<2, InputTuple>::type third_type; - typedef typename tuple_element<3, InputTuple>::type fourth_type; - typedef typename tuple_element<4, InputTuple>::type fifth_type; - typedef typename internal::tagged_msg type; - }; - - template - struct input_types<6, InputTuple> { - typedef typename tuple_element<0, InputTuple>::type first_type; - typedef typename tuple_element<1, InputTuple>::type second_type; - typedef typename tuple_element<2, InputTuple>::type third_type; - typedef typename tuple_element<3, InputTuple>::type fourth_type; - typedef typename tuple_element<4, InputTuple>::type fifth_type; - typedef typename tuple_element<5, InputTuple>::type sixth_type; - typedef typename internal::tagged_msg type; - }; - - template - struct input_types<7, InputTuple> { - typedef typename tuple_element<0, InputTuple>::type first_type; - typedef typename tuple_element<1, InputTuple>::type second_type; - typedef typename tuple_element<2, InputTuple>::type third_type; - typedef typename tuple_element<3, InputTuple>::type fourth_type; - typedef typename tuple_element<4, InputTuple>::type fifth_type; - typedef typename tuple_element<5, InputTuple>::type sixth_type; - typedef typename tuple_element<6, InputTuple>::type seventh_type; - typedef typename internal::tagged_msg type; - }; - - - template - struct input_types<8, InputTuple> { - typedef typename tuple_element<0, InputTuple>::type first_type; - typedef typename tuple_element<1, InputTuple>::type second_type; - typedef typename tuple_element<2, InputTuple>::type third_type; - typedef typename tuple_element<3, InputTuple>::type fourth_type; - typedef typename tuple_element<4, InputTuple>::type fifth_type; - typedef typename tuple_element<5, InputTuple>::type sixth_type; - typedef typename tuple_element<6, InputTuple>::type seventh_type; - typedef typename tuple_element<7, InputTuple>::type eighth_type; - typedef typename internal::tagged_msg type; - }; - - - template - struct input_types<9, InputTuple> { - typedef typename tuple_element<0, InputTuple>::type first_type; - typedef typename tuple_element<1, InputTuple>::type second_type; - typedef typename tuple_element<2, InputTuple>::type third_type; - typedef typename tuple_element<3, InputTuple>::type fourth_type; - typedef typename tuple_element<4, InputTuple>::type fifth_type; - typedef typename tuple_element<5, InputTuple>::type sixth_type; - typedef typename tuple_element<6, InputTuple>::type seventh_type; - typedef typename tuple_element<7, InputTuple>::type eighth_type; - typedef typename tuple_element<8, InputTuple>::type nineth_type; - typedef typename internal::tagged_msg type; - }; - - template - struct input_types<10, InputTuple> { - typedef typename tuple_element<0, InputTuple>::type first_type; - typedef typename tuple_element<1, InputTuple>::type second_type; - typedef typename tuple_element<2, InputTuple>::type third_type; - typedef typename tuple_element<3, InputTuple>::type fourth_type; - typedef typename tuple_element<4, InputTuple>::type fifth_type; - typedef typename tuple_element<5, InputTuple>::type sixth_type; - typedef typename tuple_element<6, InputTuple>::type seventh_type; - typedef typename tuple_element<7, InputTuple>::type eighth_type; - typedef typename tuple_element<8, InputTuple>::type nineth_type; - typedef typename tuple_element<9, InputTuple>::type tenth_type; - typedef typename internal::tagged_msg type; - }; - - // type generators - template - struct indexer_types : public input_types::value, OutputTuple> { - static const int N = tbb::flow::tuple_size::value; - typedef typename input_types::type output_type; - typedef typename wrap_tuple_elements::type input_ports_type; - typedef internal::indexer_node_FE indexer_FE_type; - typedef internal::indexer_node_base indexer_base_type; - }; - - template - class unfolded_indexer_node : public indexer_types::indexer_base_type { - public: - typedef typename indexer_types::input_ports_type input_ports_type; - typedef OutputTuple tuple_types; - typedef typename indexer_types::output_type output_type; - private: - typedef typename indexer_types::indexer_base_type base_type; - public: - unfolded_indexer_node(graph& g) : base_type(g) {} - unfolded_indexer_node(const unfolded_indexer_node &other) : base_type(other) {} - }; - -} /* namespace internal */ - -#endif /* __TBB__flow_graph_indexer_impl_H */ diff --git a/src/tbb/include/tbb/internal/_flow_graph_item_buffer_impl.h b/src/tbb/include/tbb/internal/_flow_graph_item_buffer_impl.h deleted file mode 100644 index f6c0a820e..000000000 --- a/src/tbb/include/tbb/internal/_flow_graph_item_buffer_impl.h +++ /dev/null @@ -1,279 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB__flow_graph_item_buffer_impl_H -#define __TBB__flow_graph_item_buffer_impl_H - -#ifndef __TBB_flow_graph_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#include "tbb/internal/_flow_graph_types_impl.h" // for aligned_pair - -// in namespace tbb::flow::interface7 (included in _flow_graph_node_impl.h) - - //! Expandable buffer of items. The possible operations are push, pop, - //* tests for empty and so forth. No mutual exclusion is built in. - //* objects are constructed into and explicitly-destroyed. get_my_item gives - // a read-only reference to the item in the buffer. set_my_item may be called - // with either an empty or occupied slot. - - using internal::aligned_pair; - using internal::alignment_of; - -namespace internal { - - template > - class item_buffer { - public: - typedef T item_type; - enum buffer_item_state { no_item=0, has_item=1, reserved_item=2 }; - protected: - typedef size_t size_type; - typedef typename aligned_pair::type buffer_item_type; - typedef typename A::template rebind::other allocator_type; - - buffer_item_type *my_array; - size_type my_array_size; - static const size_type initial_buffer_size = 4; - size_type my_head; - size_type my_tail; - - bool buffer_empty() { return my_head == my_tail; } - - buffer_item_type &item(size_type i) { - __TBB_ASSERT(!(size_type(&(my_array[i&(my_array_size-1)].second))%alignment_of::value),NULL); - __TBB_ASSERT(!(size_type(&(my_array[i&(my_array_size-1)].first))%alignment_of::value), NULL); - return my_array[i & (my_array_size - 1) ]; - } - - bool my_item_valid(size_type i) { return item(i).second != no_item; } - bool my_item_reserved(size_type i) { return item(i).second == reserved_item; } - - // object management in buffer - const item_type &get_my_item(size_t i) { - __TBB_ASSERT(my_item_valid(i),"attempt to get invalid item"); - item_type *itm = (tbb::internal::punned_cast(&(item(i).first))); - return *(const item_type *)itm; - } - - // may be called with an empty slot or a slot that has already been constructed into. - void set_my_item(size_t i, const item_type &o) { - if(item(i).second != no_item) { - destroy_item(i); - } - new(&(item(i).first)) item_type(o); - item(i).second = has_item; - } - - // destructively-fetch an object from the buffer - void fetch_item(size_t i, item_type &o) { - __TBB_ASSERT(my_item_valid(i), "Trying to fetch an empty slot"); - o = get_my_item(i); // could have std::move assign semantics - destroy_item(i); - } - - // move an existing item from one slot to another. The moved-to slot must be unoccupied, - // the moved-from slot must exist and not be reserved. The after, from will be empty, - // to will be occupied but not reserved - void move_item(size_t to, size_t from) { - __TBB_ASSERT(!my_item_valid(to), "Trying to move to a non-empty slot"); - __TBB_ASSERT(my_item_valid(from), "Trying to move from an empty slot"); - set_my_item(to, get_my_item(from)); // could have std::move semantics - destroy_item(from); - - } - - // put an item in an empty slot. Return true if successful, else false - bool place_item(size_t here, const item_type &me) { -#if !TBB_DEPRECATED_SEQUENCER_DUPLICATES - if(my_item_valid(here)) return false; -#endif - set_my_item(here, me); - return true; - } - - // could be implemented with std::move semantics - void swap_items(size_t i, size_t j) { - __TBB_ASSERT(my_item_valid(i) && my_item_valid(j), "attempt to swap invalid item(s)"); - item_type temp = get_my_item(i); - set_my_item(i, get_my_item(j)); - set_my_item(j, temp); - } - - void destroy_item(size_type i) { - __TBB_ASSERT(my_item_valid(i), "destruction of invalid item"); - (tbb::internal::punned_cast(&(item(i).first)))->~item_type(); - item(i).second = no_item; - } - - // returns a copy of the front - void copy_front(item_type &v) { - __TBB_ASSERT(my_item_valid(my_head), "attempt to fetch head non-item"); - v = get_my_item(my_head); - } - // returns a copy of the back - void copy_back(item_type &v) { - __TBB_ASSERT(my_item_valid(my_tail-1), "attempt to fetch head non-item"); - v = get_my_item(my_tail-1); - } - - // following methods are for reservation of the front of a bufffer. - void reserve_item(size_type i) { __TBB_ASSERT(my_item_valid(i) && !my_item_reserved(i), "item cannot be reserved"); item(i).second = reserved_item; } - void release_item(size_type i) { __TBB_ASSERT(my_item_reserved(i), "item is not reserved"); item(i).second = has_item; } - - void destroy_front() { destroy_item(my_head); ++my_head; } - void destroy_back() { destroy_item(my_tail-1); --my_tail; } - - // we have to be able to test against a new tail value without changing my_tail - // grow_array doesn't work if we change my_tail when the old array is too small - size_type size(size_t new_tail = 0) { return (new_tail ? new_tail : my_tail) - my_head; } - size_type capacity() { return my_array_size; } - // sequencer_node does not use this method, so we don't - // need a version that passes in the new_tail value. - bool buffer_full() { return size() >= capacity(); } - - //! Grows the internal array. - void grow_my_array( size_t minimum_size ) { - // test that we haven't made the structure inconsistent. - __TBB_ASSERT(capacity() >= my_tail - my_head, "total items exceed capacity"); - size_type new_size = my_array_size ? 2*my_array_size : initial_buffer_size; - while( new_size > - class reservable_item_buffer : public item_buffer { - protected: - using item_buffer::my_item_valid; - using item_buffer::my_head; - - public: - reservable_item_buffer() : item_buffer(), my_reserved(false) {} - void reset() {my_reserved = false; item_buffer::reset(); } - protected: - - bool reserve_front(T &v) { - if(my_reserved || !my_item_valid(my_head)) return false; - my_reserved = true; - // reserving the head - this->copy_front(v); - this->reserve_item(this->my_head); - return true; - } - - void consume_front() { - __TBB_ASSERT(my_reserved, "Attempt to consume a non-reserved item"); - this->destroy_front(); - my_reserved = false; - } - - void release_front() { - __TBB_ASSERT(my_reserved, "Attempt to release a non-reserved item"); - this->release_item(this->my_head); - my_reserved = false; - } - - bool my_reserved; - }; - -} // namespace internal - -#endif // __TBB__flow_graph_item_buffer_impl_H diff --git a/src/tbb/include/tbb/internal/_flow_graph_join_impl.h b/src/tbb/include/tbb/internal/_flow_graph_join_impl.h deleted file mode 100644 index 4ef893ef9..000000000 --- a/src/tbb/include/tbb/internal/_flow_graph_join_impl.h +++ /dev/null @@ -1,1739 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB__flow_graph_join_impl_H -#define __TBB__flow_graph_join_impl_H - -#ifndef __TBB_flow_graph_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#include "_flow_graph_types_impl.h" - -namespace internal { - - typedef size_t tag_value; - static const tag_value NO_TAG = tag_value(-1); - - struct forwarding_base { - forwarding_base(graph &g) : my_graph_ptr(&g), current_tag(NO_TAG) {} - virtual ~forwarding_base() {} - // decrement_port_count may create a forwarding task. If we cannot handle the task - // ourselves, ask decrement_port_count to deal with it. - virtual task * decrement_port_count(bool handle_task) = 0; - virtual void increment_port_count() = 0; - virtual task * increment_tag_count(tag_value /*t*/, bool /*handle_task*/) {return NULL;} - // moved here so input ports can queue tasks - graph* my_graph_ptr; - tag_value current_tag; // so ports can refer to FE's desired items - }; - - template< int N > - struct join_helper { - - template< typename TupleType, typename PortType > - static inline void set_join_node_pointer(TupleType &my_input, PortType *port) { - tbb::flow::get( my_input ).set_join_node_pointer(port); - join_helper::set_join_node_pointer( my_input, port ); - } - template< typename TupleType > - static inline void consume_reservations( TupleType &my_input ) { - tbb::flow::get( my_input ).consume(); - join_helper::consume_reservations( my_input ); - } - - template< typename TupleType > - static inline void release_my_reservation( TupleType &my_input ) { - tbb::flow::get( my_input ).release(); - } - - template - static inline void release_reservations( TupleType &my_input) { - join_helper::release_reservations(my_input); - release_my_reservation(my_input); - } - - template< typename InputTuple, typename OutputTuple > - static inline bool reserve( InputTuple &my_input, OutputTuple &out) { - if ( !tbb::flow::get( my_input ).reserve( tbb::flow::get( out ) ) ) return false; - if ( !join_helper::reserve( my_input, out ) ) { - release_my_reservation( my_input ); - return false; - } - return true; - } - - template - static inline bool get_my_item( InputTuple &my_input, OutputTuple &out) { - bool res = tbb::flow::get(my_input).get_item(tbb::flow::get(out) ); // may fail - return join_helper::get_my_item(my_input, out) && res; // do get on other inputs before returning - } - - template - static inline bool get_items(InputTuple &my_input, OutputTuple &out) { - return get_my_item(my_input, out); - } - - template - static inline void reset_my_port(InputTuple &my_input) { - join_helper::reset_my_port(my_input); - tbb::flow::get(my_input).reset_port(); - } - - template - static inline void reset_ports(InputTuple& my_input) { - reset_my_port(my_input); - } - - template - static inline void set_tag_func(InputTuple &my_input, TagFuncTuple &my_tag_funcs) { - tbb::flow::get(my_input).set_my_original_tag_func(tbb::flow::get(my_tag_funcs)); - tbb::flow::get(my_input).set_my_tag_func(tbb::flow::get(my_input).my_original_func()->clone()); - tbb::flow::get(my_tag_funcs) = NULL; - join_helper::set_tag_func(my_input, my_tag_funcs); - } - - template< typename TagFuncTuple1, typename TagFuncTuple2> - static inline void copy_tag_functors(TagFuncTuple1 &my_inputs, TagFuncTuple2 &other_inputs) { - if(tbb::flow::get(other_inputs).my_original_func()) { - tbb::flow::get(my_inputs).set_my_tag_func(tbb::flow::get(other_inputs).my_original_func()->clone()); - tbb::flow::get(my_inputs).set_my_original_tag_func(tbb::flow::get(other_inputs).my_original_func()->clone()); - } - join_helper::copy_tag_functors(my_inputs, other_inputs); - } - - template - static inline void reset_inputs(InputTuple &my_input __TBB_PFG_RESET_ARG(__TBB_COMMA reset_flags f)) { - join_helper::reset_inputs(my_input __TBB_PFG_RESET_ARG(__TBB_COMMA f)); - tbb::flow::get(my_input).reset_receiver(__TBB_PFG_RESET_ARG(f)); - } - }; - - template< > - struct join_helper<1> { - - template< typename TupleType, typename PortType > - static inline void set_join_node_pointer(TupleType &my_input, PortType *port) { - tbb::flow::get<0>( my_input ).set_join_node_pointer(port); - } - - template< typename TupleType > - static inline void consume_reservations( TupleType &my_input ) { - tbb::flow::get<0>( my_input ).consume(); - } - - template< typename TupleType > - static inline void release_my_reservation( TupleType &my_input ) { - tbb::flow::get<0>( my_input ).release(); - } - - template - static inline void release_reservations( TupleType &my_input) { - release_my_reservation(my_input); - } - - template< typename InputTuple, typename OutputTuple > - static inline bool reserve( InputTuple &my_input, OutputTuple &out) { - return tbb::flow::get<0>( my_input ).reserve( tbb::flow::get<0>( out ) ); - } - - template - static inline bool get_my_item( InputTuple &my_input, OutputTuple &out) { - return tbb::flow::get<0>(my_input).get_item(tbb::flow::get<0>(out)); - } - - template - static inline bool get_items(InputTuple &my_input, OutputTuple &out) { - return get_my_item(my_input, out); - } - - template - static inline void reset_my_port(InputTuple &my_input) { - tbb::flow::get<0>(my_input).reset_port(); - } - - template - static inline void reset_ports(InputTuple& my_input) { - reset_my_port(my_input); - } - - template - static inline void set_tag_func(InputTuple &my_input, TagFuncTuple &my_tag_funcs) { - tbb::flow::get<0>(my_input).set_my_original_tag_func(tbb::flow::get<0>(my_tag_funcs)); - tbb::flow::get<0>(my_input).set_my_tag_func(tbb::flow::get<0>(my_input).my_original_func()->clone()); - tbb::flow::get<0>(my_tag_funcs) = NULL; - } - - template< typename TagFuncTuple1, typename TagFuncTuple2> - static inline void copy_tag_functors(TagFuncTuple1 &my_inputs, TagFuncTuple2 &other_inputs) { - if(tbb::flow::get<0>(other_inputs).my_original_func()) { - tbb::flow::get<0>(my_inputs).set_my_tag_func(tbb::flow::get<0>(other_inputs).my_original_func()->clone()); - tbb::flow::get<0>(my_inputs).set_my_original_tag_func(tbb::flow::get<0>(other_inputs).my_original_func()->clone()); - } - } - template - static inline void reset_inputs(InputTuple &my_input __TBB_PFG_RESET_ARG(__TBB_COMMA reset_flags f)) { - tbb::flow::get<0>(my_input).reset_receiver(__TBB_PFG_RESET_ARG(f)); - } - }; - - //! The two-phase join port - template< typename T > - class reserving_port : public receiver { - public: - typedef T input_type; - typedef sender predecessor_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector predecessor_vector_type; -#endif - private: - // ----------- Aggregator ------------ - enum op_type { reg_pred, rem_pred, res_item, rel_res, con_res -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - , add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy -#endif - }; - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - typedef reserving_port my_class; - - class reserving_port_operation : public aggregated_operation { - public: - char type; - union { - T *my_arg; - predecessor_type *my_pred; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - size_t cnt_val; - predecessor_vector_type *pvec; -#endif - }; - reserving_port_operation(const T& e, op_type t) : - type(char(t)), my_arg(const_cast(&e)) {} - reserving_port_operation(const predecessor_type &s, op_type t) : type(char(t)), - my_pred(const_cast(&s)) {} - reserving_port_operation(op_type t) : type(char(t)) {} - }; - - typedef internal::aggregating_functor my_handler; - friend class internal::aggregating_functor; - aggregator my_aggregator; - - void handle_operations(reserving_port_operation* op_list) { - reserving_port_operation *current; - bool no_predecessors; - while(op_list) { - current = op_list; - op_list = op_list->next; - switch(current->type) { - case reg_pred: - no_predecessors = my_predecessors.empty(); - my_predecessors.add(*(current->my_pred)); - if ( no_predecessors ) { - (void) my_join->decrement_port_count(true); // may try to forward - } - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case rem_pred: - my_predecessors.remove(*(current->my_pred)); - if(my_predecessors.empty()) my_join->increment_port_count(); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case res_item: - if ( reserved ) { - __TBB_store_with_release(current->status, FAILED); - } - else if ( my_predecessors.try_reserve( *(current->my_arg) ) ) { - reserved = true; - __TBB_store_with_release(current->status, SUCCEEDED); - } else { - if ( my_predecessors.empty() ) { - my_join->increment_port_count(); - } - __TBB_store_with_release(current->status, FAILED); - } - break; - case rel_res: - reserved = false; - my_predecessors.try_release( ); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case con_res: - reserved = false; - my_predecessors.try_consume( ); - __TBB_store_with_release(current->status, SUCCEEDED); - break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - case add_blt_pred: - my_predecessors.internal_add_built_predecessor(*(current->my_pred)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case del_blt_pred: - my_predecessors.internal_delete_built_predecessor(*(current->my_pred)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case blt_pred_cnt: - current->cnt_val = my_predecessors.predecessor_count(); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case blt_pred_cpy: - my_predecessors.copy_predecessors(*(current->pvec)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - } - } - } - - protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - task *try_put_task( const T & ) { - return NULL; - } - - public: - - //! Constructor - reserving_port() : reserved(false) { - my_join = NULL; - my_predecessors.set_owner( this ); - my_aggregator.initialize_handler(my_handler(this)); - } - - // copy constructor - reserving_port(const reserving_port& /* other */) : receiver() { - reserved = false; - my_join = NULL; - my_predecessors.set_owner( this ); - my_aggregator.initialize_handler(my_handler(this)); - } - - void set_join_node_pointer(forwarding_base *join) { - my_join = join; - } - - //! Add a predecessor - bool register_predecessor( sender &src ) { - reserving_port_operation op_data(src, reg_pred); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - - //! Remove a predecessor - bool remove_predecessor( sender &src ) { - reserving_port_operation op_data(src, rem_pred); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - - //! Reserve an item from the port - bool reserve( T &v ) { - reserving_port_operation op_data(v, res_item); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - - //! Release the port - void release( ) { - reserving_port_operation op_data(rel_res); - my_aggregator.execute(&op_data); - } - - //! Complete use of the port - void consume( ) { - reserving_port_operation op_data(con_res); - my_aggregator.execute(&op_data); - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void internal_add_built_predecessor(predecessor_type &src) { - reserving_port_operation op_data(src, add_blt_pred); - my_aggregator.execute(&op_data); - } - - /*override*/void internal_delete_built_predecessor(predecessor_type &src) { - reserving_port_operation op_data(src, del_blt_pred); - my_aggregator.execute(&op_data); - } - - /*override*/size_t predecessor_count() { - reserving_port_operation op_data(blt_pred_cnt); - my_aggregator.execute(&op_data); - return op_data.cnt_val; - } - - /*override*/void copy_predecessors(predecessor_vector_type &v) { - reserving_port_operation op_data(blt_pred_cpy); - op_data.pvec = &v; - my_aggregator.execute(&op_data); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - /*override*/void reset_receiver( __TBB_PFG_RESET_ARG(reset_flags f)) { - my_predecessors.reset(__TBB_PFG_RESET_ARG(f)); - reserved = false; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - __TBB_ASSERT(!(f&rf_extract) || my_predecessors.empty(), "port edges not removed"); -#endif - } - - private: - forwarding_base *my_join; - reservable_predecessor_cache< T, null_mutex > my_predecessors; - bool reserved; - }; - - //! queueing join_port - template - class queueing_port : public receiver, public item_buffer { - public: - typedef T input_type; - typedef sender predecessor_type; - typedef queueing_port my_node_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector predecessor_vector_type; -#endif - - // ----------- Aggregator ------------ - private: - enum op_type { get__item, res_port, try__put_task -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - , add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy -#endif - }; - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - typedef queueing_port my_class; - - class queueing_port_operation : public aggregated_operation { - public: - char type; - T my_val; - T *my_arg; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - sender *pred; - size_t cnt_val; - predecessor_vector_type *pvec; -#endif - task * bypass_t; - // constructor for value parameter - queueing_port_operation(const T& e, op_type t) : - type(char(t)), my_val(e) - , bypass_t(NULL) - {} - // constructor for pointer parameter - queueing_port_operation(const T* p, op_type t) : - type(char(t)), my_arg(const_cast(p)) - , bypass_t(NULL) - {} - // constructor with no parameter - queueing_port_operation(op_type t) : type(char(t)) - , bypass_t(NULL) - {} - }; - - typedef internal::aggregating_functor my_handler; - friend class internal::aggregating_functor; - aggregator my_aggregator; - - void handle_operations(queueing_port_operation* op_list) { - queueing_port_operation *current; - bool was_empty; - while(op_list) { - current = op_list; - op_list = op_list->next; - switch(current->type) { - case try__put_task: { - task *rtask = NULL; - was_empty = this->buffer_empty(); - this->push_back(current->my_val); - if (was_empty) rtask = my_join->decrement_port_count(false); - else - rtask = SUCCESSFULLY_ENQUEUED; - current->bypass_t = rtask; - __TBB_store_with_release(current->status, SUCCEEDED); - } - break; - case get__item: - if(!this->buffer_empty()) { - this->copy_front(*(current->my_arg)); - __TBB_store_with_release(current->status, SUCCEEDED); - } - else { - __TBB_store_with_release(current->status, FAILED); - } - break; - case res_port: - __TBB_ASSERT(this->my_item_valid(this->my_head), "No item to reset"); - this->destroy_front(); - if(this->my_item_valid(this->my_head)) { - (void)my_join->decrement_port_count(true); - } - __TBB_store_with_release(current->status, SUCCEEDED); - break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - case add_blt_pred: - my_built_predecessors.add_edge(*(current->pred)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case del_blt_pred: - my_built_predecessors.delete_edge(*(current->pred)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case blt_pred_cnt: - current->cnt_val = my_built_predecessors.edge_count(); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case blt_pred_cpy: - my_built_predecessors.copy_edges(*(current->pvec)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - } - } - } - // ------------ End Aggregator --------------- - - protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - /*override*/task *try_put_task(const T &v) { - queueing_port_operation op_data(v, try__put_task); - my_aggregator.execute(&op_data); - __TBB_ASSERT(op_data.status == SUCCEEDED || !op_data.bypass_t, "inconsistent return from aggregator"); - if(!op_data.bypass_t) return SUCCESSFULLY_ENQUEUED; - return op_data.bypass_t; - } - - public: - - //! Constructor - queueing_port() : item_buffer() { - my_join = NULL; - my_aggregator.initialize_handler(my_handler(this)); - } - - //! copy constructor - queueing_port(const queueing_port& /* other */) : receiver(), item_buffer() { - my_join = NULL; - my_aggregator.initialize_handler(my_handler(this)); - } - - //! record parent for tallying available items - void set_join_node_pointer(forwarding_base *join) { - my_join = join; - } - - bool get_item( T &v ) { - queueing_port_operation op_data(&v, get__item); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - - // reset_port is called when item is accepted by successor, but - // is initiated by join_node. - void reset_port() { - queueing_port_operation op_data(res_port); - my_aggregator.execute(&op_data); - return; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void internal_add_built_predecessor(sender &p) { - queueing_port_operation op_data(add_blt_pred); - op_data.pred = &p; - my_aggregator.execute(&op_data); - } - - /*override*/void internal_delete_built_predecessor(sender &p) { - queueing_port_operation op_data(del_blt_pred); - op_data.pred = &p; - my_aggregator.execute(&op_data); - } - - /*override*/size_t predecessor_count() { - queueing_port_operation op_data(blt_pred_cnt); - my_aggregator.execute(&op_data); - return op_data.cnt_val; - } - - /*override*/void copy_predecessors(predecessor_vector_type &v) { - queueing_port_operation op_data(blt_pred_cpy); - op_data.pvec = &v; - my_aggregator.execute(&op_data); - } - - /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags f)) { - item_buffer::reset(); - if (f & rf_extract) - my_built_predecessors.receiver_extract(*this); - } -#else - /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) { item_buffer::reset(); } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - private: - forwarding_base *my_join; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - edge_container > my_built_predecessors; -#endif - }; - -#include "_flow_graph_tagged_buffer_impl.h" - - template< typename T > - class tag_matching_port : public receiver, public tagged_buffer< tag_value, T, NO_TAG > { - public: - typedef T input_type; - typedef sender predecessor_type; - typedef tag_matching_port my_node_type; // for forwarding, if needed - typedef function_body my_tag_func_type; - typedef tagged_buffer my_buffer_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector predecessor_vector_type; -#endif - private: -// ----------- Aggregator ------------ - private: - enum op_type { try__put, get__item, res_port, - add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy - }; - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - typedef tag_matching_port my_class; - - class tag_matching_port_operation : public aggregated_operation { - public: - char type; - T my_val; - T *my_arg; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - predecessor_type *pred; - size_t cnt_val; - predecessor_vector_type *pvec; -#endif - tag_value my_tag_value; - // constructor for value parameter - tag_matching_port_operation(const T& e, op_type t) : - type(char(t)), my_val(e) {} - // constructor for pointer parameter - tag_matching_port_operation(const T* p, op_type t) : - type(char(t)), my_arg(const_cast(p)) {} - // constructor with no parameter - tag_matching_port_operation(op_type t) : type(char(t)) {} - }; - - typedef internal::aggregating_functor my_handler; - friend class internal::aggregating_functor; - aggregator my_aggregator; - - void handle_operations(tag_matching_port_operation* op_list) { - tag_matching_port_operation *current; - while(op_list) { - current = op_list; - op_list = op_list->next; - switch(current->type) { - case try__put: { - bool was_inserted = this->tagged_insert(current->my_tag_value, current->my_val); - // return failure if a duplicate insertion occurs - __TBB_store_with_release(current->status, was_inserted ? SUCCEEDED : FAILED); - } - break; - case get__item: - // use current_tag from FE for item - if(!this->tagged_find(my_join->current_tag, *(current->my_arg))) { - __TBB_ASSERT(false, "Failed to find item corresponding to current_tag."); - } - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case res_port: - // use current_tag from FE for item - this->tagged_delete(my_join->current_tag); - __TBB_store_with_release(current->status, SUCCEEDED); - break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - case add_blt_pred: - my_built_predecessors.add_edge(*(current->pred)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case del_blt_pred: - my_built_predecessors.delete_edge(*(current->pred)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case blt_pred_cnt: - current->cnt_val = my_built_predecessors.edge_count(); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case blt_pred_cpy: - my_built_predecessors.copy_edges(*(current->pvec)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; -#endif - } - } - } -// ------------ End Aggregator --------------- - protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - /*override*/task *try_put_task(const T& v) { - tag_matching_port_operation op_data(v, try__put); - op_data.my_tag_value = (*my_tag_func)(v); - task *rtask = NULL; - my_aggregator.execute(&op_data); - if(op_data.status == SUCCEEDED) { - rtask = my_join->increment_tag_count(op_data.my_tag_value, false); // may spawn - // rtask has to reflect the return status of the try_put - if(!rtask) rtask = SUCCESSFULLY_ENQUEUED; - } - return rtask; - } - - public: - - tag_matching_port() : receiver(), tagged_buffer() { - my_join = NULL; - my_tag_func = NULL; - my_original_tag_func = NULL; - my_aggregator.initialize_handler(my_handler(this)); - } - - // copy constructor - tag_matching_port(const tag_matching_port& /*other*/) : receiver(), tagged_buffer() { - my_join = NULL; - // setting the tag methods is done in the copy-constructor for the front-end. - my_tag_func = NULL; - my_original_tag_func = NULL; - my_aggregator.initialize_handler(my_handler(this)); - } - - ~tag_matching_port() { - if (my_tag_func) delete my_tag_func; - if (my_original_tag_func) delete my_original_tag_func; - } - - void set_join_node_pointer(forwarding_base *join) { - my_join = join; - } - - void set_my_original_tag_func(my_tag_func_type *f) { - my_original_tag_func = f; - } - - void set_my_tag_func(my_tag_func_type *f) { - my_tag_func = f; - } - - bool get_item( T &v ) { - tag_matching_port_operation op_data(&v, get__item); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void internal_add_built_predecessor(sender &p) { - tag_matching_port_operation op_data(add_blt_pred); - op_data.pred = &p; - my_aggregator.execute(&op_data); - } - - /*override*/void internal_delete_built_predecessor(sender &p) { - tag_matching_port_operation op_data(del_blt_pred); - op_data.pred = &p; - my_aggregator.execute(&op_data); - } - - /*override*/size_t predecessor_count() { - tag_matching_port_operation op_data(blt_pred_cnt); - my_aggregator.execute(&op_data); - return op_data.cnt_val; - } - - /*override*/void copy_predecessors(predecessor_vector_type &v) { - tag_matching_port_operation op_data(blt_pred_cpy); - op_data.pvec = &v; - my_aggregator.execute(&op_data); - } -#endif - - // reset_port is called when item is accepted by successor, but - // is initiated by join_node. - void reset_port() { - tag_matching_port_operation op_data(res_port); - my_aggregator.execute(&op_data); - return; - } - - my_tag_func_type *my_func() { return my_tag_func; } - my_tag_func_type *my_original_func() { return my_original_tag_func; } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags f)) { - my_buffer_type::reset(); - if (f & rf_extract) - my_built_predecessors.receiver_extract(*this); - } -#else - /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) { my_buffer_type::reset(); } -#endif - - private: - // need map of tags to values - forwarding_base *my_join; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - edge_container my_built_predecessors; -#endif - my_tag_func_type *my_tag_func; - my_tag_func_type *my_original_tag_func; - }; // tag_matching_port - - using namespace graph_policy_namespace; - - template - class join_node_base; - - //! join_node_FE : implements input port policy - template - class join_node_FE; - - template - class join_node_FE : public forwarding_base { - public: - static const int N = tbb::flow::tuple_size::value; - typedef OutputTuple output_type; - typedef InputTuple input_type; - typedef join_node_base my_node_type; // for forwarding - - join_node_FE(graph &g) : forwarding_base(g), my_node(NULL) { - ports_with_no_inputs = N; - join_helper::set_join_node_pointer(my_inputs, this); - } - - join_node_FE(const join_node_FE& other) : forwarding_base(*(other.forwarding_base::my_graph_ptr)), my_node(NULL) { - ports_with_no_inputs = N; - join_helper::set_join_node_pointer(my_inputs, this); - } - - void set_my_node(my_node_type *new_my_node) { my_node = new_my_node; } - - void increment_port_count() { - ++ports_with_no_inputs; - } - - // if all input_ports have predecessors, spawn forward to try and consume tuples - task * decrement_port_count(bool handle_task) { - if(ports_with_no_inputs.fetch_and_decrement() == 1) { - task* tp = this->my_graph_ptr->root_task(); - if(tp) { - task *rtask = new ( task::allocate_additional_child_of( *tp ) ) - forward_task_bypass(*my_node); - if(!handle_task) return rtask; - FLOW_SPAWN(*rtask); - } - } - return NULL; - } - - input_type &input_ports() { return my_inputs; } - - protected: - - void reset( __TBB_PFG_RESET_ARG( reset_flags f)) { - // called outside of parallel contexts - ports_with_no_inputs = N; - join_helper::reset_inputs(my_inputs __TBB_PFG_RESET_ARG( __TBB_COMMA f)); - } - - // all methods on input ports should be called under mutual exclusion from join_node_base. - - bool tuple_build_may_succeed() { - return !ports_with_no_inputs; - } - - bool try_to_make_tuple(output_type &out) { - if(ports_with_no_inputs) return false; - return join_helper::reserve(my_inputs, out); - } - - void tuple_accepted() { - join_helper::consume_reservations(my_inputs); - } - void tuple_rejected() { - join_helper::release_reservations(my_inputs); - } - - input_type my_inputs; - my_node_type *my_node; - atomic ports_with_no_inputs; - }; - - template - class join_node_FE : public forwarding_base { - public: - static const int N = tbb::flow::tuple_size::value; - typedef OutputTuple output_type; - typedef InputTuple input_type; - typedef join_node_base my_node_type; // for forwarding - - join_node_FE(graph &g) : forwarding_base(g), my_node(NULL) { - ports_with_no_items = N; - join_helper::set_join_node_pointer(my_inputs, this); - } - - join_node_FE(const join_node_FE& other) : forwarding_base(*(other.forwarding_base::my_graph_ptr)), my_node(NULL) { - ports_with_no_items = N; - join_helper::set_join_node_pointer(my_inputs, this); - } - - // needed for forwarding - void set_my_node(my_node_type *new_my_node) { my_node = new_my_node; } - - void reset_port_count() { - ports_with_no_items = N; - } - - // if all input_ports have items, spawn forward to try and consume tuples - task * decrement_port_count(bool handle_task) - { - if(ports_with_no_items.fetch_and_decrement() == 1) { - task* tp = this->my_graph_ptr->root_task(); - if(tp) { - task *rtask = new ( task::allocate_additional_child_of( *tp ) ) - forward_task_bypass (*my_node); - if(!handle_task) return rtask; - FLOW_SPAWN( *rtask); - } - } - return NULL; - } - - void increment_port_count() { __TBB_ASSERT(false, NULL); } // should never be called - - input_type &input_ports() { return my_inputs; } - - protected: - - void reset( __TBB_PFG_RESET_ARG( reset_flags f)) { - reset_port_count(); - join_helper::reset_inputs(my_inputs __TBB_PFG_RESET_ARG( __TBB_COMMA f) ); - } - - // all methods on input ports should be called under mutual exclusion from join_node_base. - - bool tuple_build_may_succeed() { - return !ports_with_no_items; - } - - bool try_to_make_tuple(output_type &out) { - if(ports_with_no_items) return false; - return join_helper::get_items(my_inputs, out); - } - - void tuple_accepted() { - reset_port_count(); - join_helper::reset_ports(my_inputs); - } - void tuple_rejected() { - // nothing to do. - } - - input_type my_inputs; - my_node_type *my_node; - atomic ports_with_no_items; - }; - - // tag_matching join input port. - template - class join_node_FE : public forwarding_base, - // buffer of tag value counts buffer of output items - public tagged_buffer, public item_buffer { - public: - static const int N = tbb::flow::tuple_size::value; - typedef OutputTuple output_type; - typedef InputTuple input_type; - typedef tagged_buffer my_tag_buffer; - typedef item_buffer output_buffer_type; - typedef join_node_base my_node_type; // for forwarding - -// ----------- Aggregator ------------ - // the aggregator is only needed to serialize the access to the hash table. - // and the output_buffer_type base class - private: - enum op_type { res_count, inc_count, may_succeed, try_make }; - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - typedef join_node_FE my_class; - - class tag_matching_FE_operation : public aggregated_operation { - public: - char type; - union { - tag_value my_val; - output_type* my_output; - }; - task *bypass_t; - bool enqueue_task; - // constructor for value parameter - tag_matching_FE_operation(const tag_value& e , bool q_task , op_type t) : type(char(t)), my_val(e), - bypass_t(NULL), enqueue_task(q_task) {} - tag_matching_FE_operation(output_type *p, op_type t) : type(char(t)), my_output(p), bypass_t(NULL), - enqueue_task(true) {} - // constructor with no parameter - tag_matching_FE_operation(op_type t) : type(char(t)), bypass_t(NULL), enqueue_task(true) {} - }; - - typedef internal::aggregating_functor my_handler; - friend class internal::aggregating_functor; - aggregator my_aggregator; - - // called from aggregator, so serialized - // construct as many output objects as possible. - // returns a task pointer if the a task would have been enqueued but we asked that - // it be returned. Otherwise returns NULL. - task * fill_output_buffer(tag_value t, bool should_enqueue, bool handle_task) { - output_type l_out; - task *rtask = NULL; - task* tp = this->my_graph_ptr->root_task(); - bool do_fwd = should_enqueue && this->buffer_empty() && tp; - this->current_tag = t; - this->tagged_delete(this->current_tag); // remove the tag - if(join_helper::get_items(my_inputs, l_out)) { // <== call back - this->push_back(l_out); - if(do_fwd) { // we enqueue if receiving an item from predecessor, not if successor asks for item - rtask = new ( task::allocate_additional_child_of( *tp ) ) - forward_task_bypass(*my_node); - if(handle_task) { - FLOW_SPAWN(*rtask); - rtask = NULL; - } - do_fwd = false; - } - // retire the input values - join_helper::reset_ports(my_inputs); // <== call back - this->current_tag = NO_TAG; - } - else { - __TBB_ASSERT(false, "should have had something to push"); - } - return rtask; - } - - void handle_operations(tag_matching_FE_operation* op_list) { - tag_matching_FE_operation *current; - while(op_list) { - current = op_list; - op_list = op_list->next; - switch(current->type) { - case res_count: // called from BE - { - this->destroy_front(); - __TBB_store_with_release(current->status, SUCCEEDED); - } - break; - case inc_count: { // called from input ports - size_t *p = 0; - tag_value t = current->my_val; - bool do_enqueue = current->enqueue_task; - if(!(this->tagged_find_ref(t,p))) { - this->tagged_insert(t, 0); - if(!(this->tagged_find_ref(t,p))) { - __TBB_ASSERT(false, "should find tag after inserting it"); - } - } - if(++(*p) == size_t(N)) { - task *rtask = fill_output_buffer(t, true, do_enqueue); - __TBB_ASSERT(!rtask || !do_enqueue, "task should not be returned"); - current->bypass_t = rtask; - } - } - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case may_succeed: // called from BE - __TBB_store_with_release(current->status, this->buffer_empty() ? FAILED : SUCCEEDED); - break; - case try_make: // called from BE - if(this->buffer_empty()) { - __TBB_store_with_release(current->status, FAILED); - } - else { - this->copy_front(*(current->my_output)); - __TBB_store_with_release(current->status, SUCCEEDED); - } - break; - } - } - } -// ------------ End Aggregator --------------- - - public: - template - join_node_FE(graph &g, FunctionTuple tag_funcs) : forwarding_base(g), my_node(NULL) { - join_helper::set_join_node_pointer(my_inputs, this); - join_helper::set_tag_func(my_inputs, tag_funcs); - my_aggregator.initialize_handler(my_handler(this)); - } - - join_node_FE(const join_node_FE& other) : forwarding_base(*(other.forwarding_base::my_graph_ptr)), my_tag_buffer(), - output_buffer_type() { - my_node = NULL; - join_helper::set_join_node_pointer(my_inputs, this); - join_helper::copy_tag_functors(my_inputs, const_cast(other.my_inputs)); - my_aggregator.initialize_handler(my_handler(this)); - } - - // needed for forwarding - void set_my_node(my_node_type *new_my_node) { my_node = new_my_node; } - - void reset_port_count() { // called from BE - tag_matching_FE_operation op_data(res_count); - my_aggregator.execute(&op_data); - return; - } - - // if all input_ports have items, spawn forward to try and consume tuples - // return a task if we are asked and did create one. - task *increment_tag_count(tag_value t, bool handle_task) { // called from input_ports - tag_matching_FE_operation op_data(t, handle_task, inc_count); - my_aggregator.execute(&op_data); - return op_data.bypass_t; - } - - /*override*/ task *decrement_port_count(bool /*handle_task*/) { __TBB_ASSERT(false, NULL); return NULL; } - - void increment_port_count() { __TBB_ASSERT(false, NULL); } // should never be called - - input_type &input_ports() { return my_inputs; } - - protected: - - void reset( __TBB_PFG_RESET_ARG( reset_flags f )) { - // called outside of parallel contexts - join_helper::reset_inputs(my_inputs __TBB_PFG_RESET_ARG( __TBB_COMMA f)); - - my_tag_buffer::reset(); // have to reset the tag counts - output_buffer_type::reset(); // also the queue of outputs - my_node->current_tag = NO_TAG; - } - - // all methods on input ports should be called under mutual exclusion from join_node_base. - - bool tuple_build_may_succeed() { // called from back-end - tag_matching_FE_operation op_data(may_succeed); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - - // cannot lock while calling back to input_ports. current_tag will only be set - // and reset under the aggregator, so it will remain consistent. - bool try_to_make_tuple(output_type &out) { - tag_matching_FE_operation op_data(&out,try_make); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - - void tuple_accepted() { - reset_port_count(); // reset current_tag after ports reset. - } - - void tuple_rejected() { - // nothing to do. - } - - input_type my_inputs; // input ports - my_node_type *my_node; - }; // join_node_FE - - //! join_node_base - template - class join_node_base : public graph_node, public join_node_FE, - public sender { - protected: - using graph_node::my_graph; - public: - typedef OutputTuple output_type; - - typedef receiver successor_type; - typedef join_node_FE input_ports_type; - using input_ports_type::tuple_build_may_succeed; - using input_ports_type::try_to_make_tuple; - using input_ports_type::tuple_accepted; - using input_ports_type::tuple_rejected; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector successor_vector_type; -#endif - - private: - // ----------- Aggregator ------------ - enum op_type { reg_succ, rem_succ, try__get, do_fwrd, do_fwrd_bypass -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - , add_blt_succ, del_blt_succ, blt_succ_cnt, blt_succ_cpy -#endif - }; - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - typedef join_node_base my_class; - - class join_node_base_operation : public aggregated_operation { - public: - char type; - union { - output_type *my_arg; - successor_type *my_succ; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - size_t cnt_val; - successor_vector_type *svec; -#endif - }; - task *bypass_t; - join_node_base_operation(const output_type& e, op_type t) : type(char(t)), - my_arg(const_cast(&e)), bypass_t(NULL) {} - join_node_base_operation(const successor_type &s, op_type t) : type(char(t)), - my_succ(const_cast(&s)), bypass_t(NULL) {} - join_node_base_operation(op_type t) : type(char(t)), bypass_t(NULL) {} - }; - - typedef internal::aggregating_functor my_handler; - friend class internal::aggregating_functor; - bool forwarder_busy; - aggregator my_aggregator; - - void handle_operations(join_node_base_operation* op_list) { - join_node_base_operation *current; - while(op_list) { - current = op_list; - op_list = op_list->next; - switch(current->type) { - case reg_succ: { - my_successors.register_successor(*(current->my_succ)); - task* tp = this->graph_node::my_graph.root_task(); - if(tuple_build_may_succeed() && !forwarder_busy && tp) { - task *rtask = new ( task::allocate_additional_child_of(*tp) ) - forward_task_bypass - >(*this); - FLOW_SPAWN(*rtask); - forwarder_busy = true; - } - __TBB_store_with_release(current->status, SUCCEEDED); - } - break; - case rem_succ: - my_successors.remove_successor(*(current->my_succ)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case try__get: - if(tuple_build_may_succeed()) { - if(try_to_make_tuple(*(current->my_arg))) { - tuple_accepted(); - __TBB_store_with_release(current->status, SUCCEEDED); - } - else __TBB_store_with_release(current->status, FAILED); - } - else __TBB_store_with_release(current->status, FAILED); - break; - case do_fwrd_bypass: { - bool build_succeeded; - task *last_task = NULL; - output_type out; - if(tuple_build_may_succeed()) { - do { - build_succeeded = try_to_make_tuple(out); - if(build_succeeded) { - task *new_task = my_successors.try_put_task(out); - last_task = combine_tasks(last_task, new_task); - if(new_task) { - tuple_accepted(); - } - else { - tuple_rejected(); - build_succeeded = false; - } - } - } while(build_succeeded); - } - current->bypass_t = last_task; - __TBB_store_with_release(current->status, SUCCEEDED); - forwarder_busy = false; - } - break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - case add_blt_succ: - my_successors.internal_add_built_successor(*(current->my_succ)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case del_blt_succ: - my_successors.internal_delete_built_successor(*(current->my_succ)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case blt_succ_cnt: - current->cnt_val = my_successors.successor_count(); - __TBB_store_with_release(current->status, SUCCEEDED); - break; - case blt_succ_cpy: - my_successors.copy_successors(*(current->svec)); - __TBB_store_with_release(current->status, SUCCEEDED); - break; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - } - } - } - // ---------- end aggregator ----------- - public: - join_node_base(graph &g) : graph_node(g), input_ports_type(g), forwarder_busy(false) { - my_successors.set_owner(this); - input_ports_type::set_my_node(this); - my_aggregator.initialize_handler(my_handler(this)); - } - - join_node_base(const join_node_base& other) : - graph_node(other.graph_node::my_graph), input_ports_type(other), - sender(), forwarder_busy(false), my_successors() { - my_successors.set_owner(this); - input_ports_type::set_my_node(this); - my_aggregator.initialize_handler(my_handler(this)); - } - - template - join_node_base(graph &g, FunctionTuple f) : graph_node(g), input_ports_type(g, f), forwarder_busy(false) { - my_successors.set_owner(this); - input_ports_type::set_my_node(this); - my_aggregator.initialize_handler(my_handler(this)); - } - - bool register_successor(successor_type &r) { - join_node_base_operation op_data(r, reg_succ); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - - bool remove_successor( successor_type &r) { - join_node_base_operation op_data(r, rem_succ); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - - bool try_get( output_type &v) { - join_node_base_operation op_data(v, try__get); - my_aggregator.execute(&op_data); - return op_data.status == SUCCEEDED; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void internal_add_built_successor( successor_type &r) { - join_node_base_operation op_data(r, add_blt_succ); - my_aggregator.execute(&op_data); - } - - /*override*/void internal_delete_built_successor( successor_type &r) { - join_node_base_operation op_data(r, del_blt_succ); - my_aggregator.execute(&op_data); - } - - /*override*/size_t successor_count() { - join_node_base_operation op_data(blt_succ_cnt); - my_aggregator.execute(&op_data); - return op_data.cnt_val; - } - - /*override*/ void copy_successors(successor_vector_type &v) { - join_node_base_operation op_data(blt_succ_cpy); - op_data.svec = &v; - my_aggregator.execute(&op_data); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - protected: - - /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) { - input_ports_type::reset(__TBB_PFG_RESET_ARG(f)); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_successors.reset(f); -#endif - } - - private: - broadcast_cache my_successors; - - friend class forward_task_bypass< join_node_base >; - task *forward_task() { - join_node_base_operation op_data(do_fwrd_bypass); - my_aggregator.execute(&op_data); - return op_data.bypass_t; - } - - }; - - // join base class type generator - template class PT, typename OutputTuple, graph_buffer_policy JP> - struct join_base { - typedef typename internal::join_node_base::type, OutputTuple> type; - }; - - //! unfolded_join_node : passes input_ports_type to join_node_base. We build the input port type - // using tuple_element. The class PT is the port type (reserving_port, queueing_port, tag_matching_port) - // and should match the graph_buffer_policy. - - template class PT, typename OutputTuple, graph_buffer_policy JP> - class unfolded_join_node : public join_base::type { - public: - typedef typename wrap_tuple_elements::type input_ports_type; - typedef OutputTuple output_type; - private: - typedef join_node_base base_type; - public: - unfolded_join_node(graph &g) : base_type(g) {} - unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} - }; - - // tag_matching unfolded_join_node. This must be a separate specialization because the constructors - // differ. - - template - class unfolded_join_node<2,tag_matching_port,OutputTuple,tag_matching> : public - join_base<2,tag_matching_port,OutputTuple,tag_matching>::type { - typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; - typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; - public: - typedef typename wrap_tuple_elements<2,tag_matching_port,OutputTuple>::type input_ports_type; - typedef OutputTuple output_type; - private: - typedef join_node_base base_type; - typedef typename internal::function_body *f0_p; - typedef typename internal::function_body *f1_p; - typedef typename tbb::flow::tuple< f0_p, f1_p > func_initializer_type; - public: - template - unfolded_join_node(graph &g, B0 b0, B1 b1) : base_type(g, - func_initializer_type( - new internal::function_body_leaf(b0), - new internal::function_body_leaf(b1) - ) ) {} - unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} - }; - - template - class unfolded_join_node<3,tag_matching_port,OutputTuple,tag_matching> : public - join_base<3,tag_matching_port,OutputTuple,tag_matching>::type { - typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; - typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; - typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; - public: - typedef typename wrap_tuple_elements<3, tag_matching_port, OutputTuple>::type input_ports_type; - typedef OutputTuple output_type; - private: - typedef join_node_base base_type; - typedef typename internal::function_body *f0_p; - typedef typename internal::function_body *f1_p; - typedef typename internal::function_body *f2_p; - typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p > func_initializer_type; - public: - template - unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2) : base_type(g, - func_initializer_type( - new internal::function_body_leaf(b0), - new internal::function_body_leaf(b1), - new internal::function_body_leaf(b2) - ) ) {} - unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} - }; - - template - class unfolded_join_node<4,tag_matching_port,OutputTuple,tag_matching> : public - join_base<4,tag_matching_port,OutputTuple,tag_matching>::type { - typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; - typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; - typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; - typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3; - public: - typedef typename wrap_tuple_elements<4, tag_matching_port, OutputTuple>::type input_ports_type; - typedef OutputTuple output_type; - private: - typedef join_node_base base_type; - typedef typename internal::function_body *f0_p; - typedef typename internal::function_body *f1_p; - typedef typename internal::function_body *f2_p; - typedef typename internal::function_body *f3_p; - typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p > func_initializer_type; - public: - template - unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3) : base_type(g, - func_initializer_type( - new internal::function_body_leaf(b0), - new internal::function_body_leaf(b1), - new internal::function_body_leaf(b2), - new internal::function_body_leaf(b3) - ) ) {} - unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} - }; - - template - class unfolded_join_node<5,tag_matching_port,OutputTuple,tag_matching> : public - join_base<5,tag_matching_port,OutputTuple,tag_matching>::type { - typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; - typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; - typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; - typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3; - typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4; - public: - typedef typename wrap_tuple_elements<5, tag_matching_port, OutputTuple>::type input_ports_type; - typedef OutputTuple output_type; - private: - typedef join_node_base base_type; - typedef typename internal::function_body *f0_p; - typedef typename internal::function_body *f1_p; - typedef typename internal::function_body *f2_p; - typedef typename internal::function_body *f3_p; - typedef typename internal::function_body *f4_p; - typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p > func_initializer_type; - public: - template - unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4) : base_type(g, - func_initializer_type( - new internal::function_body_leaf(b0), - new internal::function_body_leaf(b1), - new internal::function_body_leaf(b2), - new internal::function_body_leaf(b3), - new internal::function_body_leaf(b4) - ) ) {} - unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} - }; - -#if __TBB_VARIADIC_MAX >= 6 - template - class unfolded_join_node<6,tag_matching_port,OutputTuple,tag_matching> : public - join_base<6,tag_matching_port,OutputTuple,tag_matching>::type { - typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; - typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; - typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; - typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3; - typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4; - typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5; - public: - typedef typename wrap_tuple_elements<6, tag_matching_port, OutputTuple>::type input_ports_type; - typedef OutputTuple output_type; - private: - typedef join_node_base base_type; - typedef typename internal::function_body *f0_p; - typedef typename internal::function_body *f1_p; - typedef typename internal::function_body *f2_p; - typedef typename internal::function_body *f3_p; - typedef typename internal::function_body *f4_p; - typedef typename internal::function_body *f5_p; - typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p > func_initializer_type; - public: - template - unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5) : base_type(g, - func_initializer_type( - new internal::function_body_leaf(b0), - new internal::function_body_leaf(b1), - new internal::function_body_leaf(b2), - new internal::function_body_leaf(b3), - new internal::function_body_leaf(b4), - new internal::function_body_leaf(b5) - ) ) {} - unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} - }; -#endif - -#if __TBB_VARIADIC_MAX >= 7 - template - class unfolded_join_node<7,tag_matching_port,OutputTuple,tag_matching> : public - join_base<7,tag_matching_port,OutputTuple,tag_matching>::type { - typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; - typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; - typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; - typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3; - typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4; - typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5; - typedef typename tbb::flow::tuple_element<6, OutputTuple>::type T6; - public: - typedef typename wrap_tuple_elements<7, tag_matching_port, OutputTuple>::type input_ports_type; - typedef OutputTuple output_type; - private: - typedef join_node_base base_type; - typedef typename internal::function_body *f0_p; - typedef typename internal::function_body *f1_p; - typedef typename internal::function_body *f2_p; - typedef typename internal::function_body *f3_p; - typedef typename internal::function_body *f4_p; - typedef typename internal::function_body *f5_p; - typedef typename internal::function_body *f6_p; - typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p > func_initializer_type; - public: - template - unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5, B6 b6) : base_type(g, - func_initializer_type( - new internal::function_body_leaf(b0), - new internal::function_body_leaf(b1), - new internal::function_body_leaf(b2), - new internal::function_body_leaf(b3), - new internal::function_body_leaf(b4), - new internal::function_body_leaf(b5), - new internal::function_body_leaf(b6) - ) ) {} - unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} - }; -#endif - -#if __TBB_VARIADIC_MAX >= 8 - template - class unfolded_join_node<8,tag_matching_port,OutputTuple,tag_matching> : public - join_base<8,tag_matching_port,OutputTuple,tag_matching>::type { - typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; - typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; - typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; - typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3; - typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4; - typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5; - typedef typename tbb::flow::tuple_element<6, OutputTuple>::type T6; - typedef typename tbb::flow::tuple_element<7, OutputTuple>::type T7; - public: - typedef typename wrap_tuple_elements<8, tag_matching_port, OutputTuple>::type input_ports_type; - typedef OutputTuple output_type; - private: - typedef join_node_base base_type; - typedef typename internal::function_body *f0_p; - typedef typename internal::function_body *f1_p; - typedef typename internal::function_body *f2_p; - typedef typename internal::function_body *f3_p; - typedef typename internal::function_body *f4_p; - typedef typename internal::function_body *f5_p; - typedef typename internal::function_body *f6_p; - typedef typename internal::function_body *f7_p; - typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p, f7_p > func_initializer_type; - public: - template - unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5, B6 b6, B7 b7) : base_type(g, - func_initializer_type( - new internal::function_body_leaf(b0), - new internal::function_body_leaf(b1), - new internal::function_body_leaf(b2), - new internal::function_body_leaf(b3), - new internal::function_body_leaf(b4), - new internal::function_body_leaf(b5), - new internal::function_body_leaf(b6), - new internal::function_body_leaf(b7) - ) ) {} - unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} - }; -#endif - -#if __TBB_VARIADIC_MAX >= 9 - template - class unfolded_join_node<9,tag_matching_port,OutputTuple,tag_matching> : public - join_base<9,tag_matching_port,OutputTuple,tag_matching>::type { - typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; - typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; - typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; - typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3; - typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4; - typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5; - typedef typename tbb::flow::tuple_element<6, OutputTuple>::type T6; - typedef typename tbb::flow::tuple_element<7, OutputTuple>::type T7; - typedef typename tbb::flow::tuple_element<8, OutputTuple>::type T8; - public: - typedef typename wrap_tuple_elements<9, tag_matching_port, OutputTuple>::type input_ports_type; - typedef OutputTuple output_type; - private: - typedef join_node_base base_type; - typedef typename internal::function_body *f0_p; - typedef typename internal::function_body *f1_p; - typedef typename internal::function_body *f2_p; - typedef typename internal::function_body *f3_p; - typedef typename internal::function_body *f4_p; - typedef typename internal::function_body *f5_p; - typedef typename internal::function_body *f6_p; - typedef typename internal::function_body *f7_p; - typedef typename internal::function_body *f8_p; - typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p, f7_p, f8_p > func_initializer_type; - public: - template - unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5, B6 b6, B7 b7, B8 b8) : base_type(g, - func_initializer_type( - new internal::function_body_leaf(b0), - new internal::function_body_leaf(b1), - new internal::function_body_leaf(b2), - new internal::function_body_leaf(b3), - new internal::function_body_leaf(b4), - new internal::function_body_leaf(b5), - new internal::function_body_leaf(b6), - new internal::function_body_leaf(b7), - new internal::function_body_leaf(b8) - ) ) {} - unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} - }; -#endif - -#if __TBB_VARIADIC_MAX >= 10 - template - class unfolded_join_node<10,tag_matching_port,OutputTuple,tag_matching> : public - join_base<10,tag_matching_port,OutputTuple,tag_matching>::type { - typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; - typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; - typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; - typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3; - typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4; - typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5; - typedef typename tbb::flow::tuple_element<6, OutputTuple>::type T6; - typedef typename tbb::flow::tuple_element<7, OutputTuple>::type T7; - typedef typename tbb::flow::tuple_element<8, OutputTuple>::type T8; - typedef typename tbb::flow::tuple_element<9, OutputTuple>::type T9; - public: - typedef typename wrap_tuple_elements<10, tag_matching_port, OutputTuple>::type input_ports_type; - typedef OutputTuple output_type; - private: - typedef join_node_base base_type; - typedef typename internal::function_body *f0_p; - typedef typename internal::function_body *f1_p; - typedef typename internal::function_body *f2_p; - typedef typename internal::function_body *f3_p; - typedef typename internal::function_body *f4_p; - typedef typename internal::function_body *f5_p; - typedef typename internal::function_body *f6_p; - typedef typename internal::function_body *f7_p; - typedef typename internal::function_body *f8_p; - typedef typename internal::function_body *f9_p; - typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p, f7_p, f8_p, f9_p > func_initializer_type; - public: - template - unfolded_join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5, B6 b6, B7 b7, B8 b8, B9 b9) : base_type(g, - func_initializer_type( - new internal::function_body_leaf(b0), - new internal::function_body_leaf(b1), - new internal::function_body_leaf(b2), - new internal::function_body_leaf(b3), - new internal::function_body_leaf(b4), - new internal::function_body_leaf(b5), - new internal::function_body_leaf(b6), - new internal::function_body_leaf(b7), - new internal::function_body_leaf(b8), - new internal::function_body_leaf(b9) - ) ) {} - unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} - }; -#endif - - //! templated function to refer to input ports of the join node - template - typename tbb::flow::tuple_element::type &input_port(JNT &jn) { - return tbb::flow::get(jn.input_ports()); - } - -} -#endif // __TBB__flow_graph_join_impl_H - diff --git a/src/tbb/include/tbb/internal/_flow_graph_node_impl.h b/src/tbb/include/tbb/internal/_flow_graph_node_impl.h deleted file mode 100644 index 837d83449..000000000 --- a/src/tbb/include/tbb/internal/_flow_graph_node_impl.h +++ /dev/null @@ -1,742 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB__flow_graph_node_impl_H -#define __TBB__flow_graph_node_impl_H - -#ifndef __TBB_flow_graph_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#include "_flow_graph_item_buffer_impl.h" - -//! @cond INTERNAL -namespace internal { - - using tbb::internal::aggregated_operation; - using tbb::internal::aggregating_functor; - using tbb::internal::aggregator; - - template< typename T, typename A > - class function_input_queue : public item_buffer { - public: - bool pop( T& t ) { - return this->pop_front( t ); - } - - bool push( T& t ) { - return this->push_back( t ); - } - }; - - //! Input and scheduling for a function node that takes a type Input as input - // The only up-ref is apply_body_impl, which should implement the function - // call and any handling of the result. - template< typename Input, typename A, typename ImplType > - class function_input_base : public receiver, tbb::internal::no_assign { - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; - enum op_type {reg_pred, rem_pred, app_body, try_fwd, tryput_bypass, app_body_bypass -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - , add_blt_pred, del_blt_pred, - blt_pred_cnt, blt_pred_cpy // create vector copies of preds and succs -#endif - }; - typedef function_input_base my_class; - - public: - - //! The input type of this receiver - typedef Input input_type; - typedef sender predecessor_type; - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector predecessor_vector_type; -#endif - - //! Constructor for function_input_base - function_input_base( graph &g, size_t max_concurrency, function_input_queue *q = NULL ) - : my_graph(g), my_max_concurrency(max_concurrency), my_concurrency(0), - my_queue(q), forwarder_busy(false) { - my_predecessors.set_owner(this); - my_aggregator.initialize_handler(my_handler(this)); - } - - //! Copy constructor - function_input_base( const function_input_base& src, function_input_queue *q = NULL ) : - receiver(), tbb::internal::no_assign(), - my_graph(src.my_graph), my_max_concurrency(src.my_max_concurrency), - my_concurrency(0), my_queue(q), forwarder_busy(false) - { - my_predecessors.set_owner(this); - my_aggregator.initialize_handler(my_handler(this)); - } - - //! Destructor - virtual ~function_input_base() { - if ( my_queue ) delete my_queue; - } - - //! Put to the node, returning a task if available - virtual task * try_put_task( const input_type &t ) { - if ( my_max_concurrency == 0 ) { - return create_body_task( t ); - } else { - my_operation op_data(t, tryput_bypass); - my_aggregator.execute(&op_data); - if(op_data.status == SUCCEEDED ) { - return op_data.bypass_t; - } - return NULL; - } - } - - //! Adds src to the list of cached predecessors. - /* override */ bool register_predecessor( predecessor_type &src ) { - my_operation op_data(reg_pred); - op_data.r = &src; - my_aggregator.execute(&op_data); - return true; - } - - //! Removes src from the list of cached predecessors. - /* override */ bool remove_predecessor( predecessor_type &src ) { - my_operation op_data(rem_pred); - op_data.r = &src; - my_aggregator.execute(&op_data); - return true; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - //! Adds to list of predecessors added by make_edge - /*override*/ void internal_add_built_predecessor( predecessor_type &src) { - my_operation op_data(add_blt_pred); - op_data.r = &src; - my_aggregator.execute(&op_data); - } - - //! removes from to list of predecessors (used by remove_edge) - /*override*/ void internal_delete_built_predecessor( predecessor_type &src) { - my_operation op_data(del_blt_pred); - op_data.r = &src; - my_aggregator.execute(&op_data); - } - - /*override*/ size_t predecessor_count() { - my_operation op_data(blt_pred_cnt); - my_aggregator.execute(&op_data); - return op_data.cnt_val; - } - - /*override*/ void copy_predecessors(predecessor_vector_type &v) { - my_operation op_data(blt_pred_cpy); - op_data.predv = &v; - my_aggregator.execute(&op_data); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - protected: - - void reset_function_input_base( __TBB_PFG_RESET_ARG(reset_flags f)) { - my_concurrency = 0; - if(my_queue) { - my_queue->reset(); - } - reset_receiver(__TBB_PFG_RESET_ARG(f)); - forwarder_busy = false; - } - - graph& my_graph; - const size_t my_max_concurrency; - size_t my_concurrency; - function_input_queue *my_queue; - predecessor_cache my_predecessors; - - /*override*/void reset_receiver( __TBB_PFG_RESET_ARG(reset_flags f)) { - my_predecessors.reset(__TBB_PFG_RESET_ARG(f)); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - __TBB_ASSERT(!(f & rf_extract) || my_predecessors.empty(), "function_input_base reset failed"); -#endif - } - - private: - - friend class apply_body_task_bypass< my_class, input_type >; - friend class forward_task_bypass< my_class >; - - class my_operation : public aggregated_operation< my_operation > { - public: - char type; - union { - input_type *elem; - predecessor_type *r; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - size_t cnt_val; - predecessor_vector_type *predv; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - }; - tbb::task *bypass_t; - my_operation(const input_type& e, op_type t) : - type(char(t)), elem(const_cast(&e)) {} - my_operation(op_type t) : type(char(t)), r(NULL) {} - }; - - bool forwarder_busy; - typedef internal::aggregating_functor my_handler; - friend class internal::aggregating_functor; - aggregator< my_handler, my_operation > my_aggregator; - - void handle_operations(my_operation *op_list) { - my_operation *tmp; - while (op_list) { - tmp = op_list; - op_list = op_list->next; - switch (tmp->type) { - case reg_pred: - my_predecessors.add(*(tmp->r)); - __TBB_store_with_release(tmp->status, SUCCEEDED); - if (!forwarder_busy) { - forwarder_busy = true; - spawn_forward_task(); - } - break; - case rem_pred: - my_predecessors.remove(*(tmp->r)); - __TBB_store_with_release(tmp->status, SUCCEEDED); - break; - case app_body: - __TBB_ASSERT(my_max_concurrency != 0, NULL); - --my_concurrency; - __TBB_store_with_release(tmp->status, SUCCEEDED); - if (my_concurrencypop(i); - else - item_was_retrieved = my_predecessors.get_item(i); - if (item_was_retrieved) { - ++my_concurrency; - spawn_body_task(i); - } - } - break; - case app_body_bypass: { - task * new_task = NULL; - __TBB_ASSERT(my_max_concurrency != 0, NULL); - --my_concurrency; - if (my_concurrencypop(i); - else - item_was_retrieved = my_predecessors.get_item(i); - if (item_was_retrieved) { - ++my_concurrency; - new_task = create_body_task(i); - } - } - tmp->bypass_t = new_task; - __TBB_store_with_release(tmp->status, SUCCEEDED); - } - break; - case tryput_bypass: internal_try_put_task(tmp); break; - case try_fwd: internal_forward(tmp); break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - case add_blt_pred: { - my_predecessors.internal_add_built_predecessor(*(tmp->r)); - __TBB_store_with_release(tmp->status, SUCCEEDED); - } - break; - case del_blt_pred: - my_predecessors.internal_delete_built_predecessor(*(tmp->r)); - __TBB_store_with_release(tmp->status, SUCCEEDED); - break; - case blt_pred_cnt: - tmp->cnt_val = my_predecessors.predecessor_count(); - __TBB_store_with_release(tmp->status, SUCCEEDED); - break; - case blt_pred_cpy: - my_predecessors.copy_predecessors( *(tmp->predv) ); - __TBB_store_with_release(tmp->status, SUCCEEDED); - break; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - } - } - } - - //! Put to the node, but return the task instead of enqueueing it - void internal_try_put_task(my_operation *op) { - __TBB_ASSERT(my_max_concurrency != 0, NULL); - if (my_concurrency < my_max_concurrency) { - ++my_concurrency; - task * new_task = create_body_task(*(op->elem)); - op->bypass_t = new_task; - __TBB_store_with_release(op->status, SUCCEEDED); - } else if ( my_queue && my_queue->push(*(op->elem)) ) { - op->bypass_t = SUCCESSFULLY_ENQUEUED; - __TBB_store_with_release(op->status, SUCCEEDED); - } else { - op->bypass_t = NULL; - __TBB_store_with_release(op->status, FAILED); - } - } - - //! Tries to spawn bodies if available and if concurrency allows - void internal_forward(my_operation *op) { - op->bypass_t = NULL; - if (my_concurrencypop(i); - else - item_was_retrieved = my_predecessors.get_item(i); - if (item_was_retrieved) { - ++my_concurrency; - op->bypass_t = create_body_task(i); - __TBB_store_with_release(op->status, SUCCEEDED); - return; - } - } - __TBB_store_with_release(op->status, FAILED); - forwarder_busy = false; - } - - //! Applies the body to the provided input - // then decides if more work is available - void apply_body( input_type &i ) { - task *new_task = apply_body_bypass(i); - if(!new_task) return; - if(new_task == SUCCESSFULLY_ENQUEUED) return; - FLOW_SPAWN(*new_task); - return; - } - - //! Applies the body to the provided input - // then decides if more work is available - task * apply_body_bypass( input_type &i ) { - task * new_task = static_cast(this)->apply_body_impl_bypass(i); - if ( my_max_concurrency != 0 ) { - my_operation op_data(app_body_bypass); // tries to pop an item or get_item, enqueues another apply_body - my_aggregator.execute(&op_data); - tbb::task *ttask = op_data.bypass_t; - new_task = combine_tasks(new_task, ttask); - } - return new_task; - } - - //! allocates a task to call apply_body( input ) - inline task * create_body_task( const input_type &input ) { - - task* tp = my_graph.root_task(); - return (tp) ? - new(task::allocate_additional_child_of(*tp)) - apply_body_task_bypass < my_class, input_type >(*this, input) : - NULL; - } - - //! Spawns a task that calls apply_body( input ) - inline void spawn_body_task( const input_type &input ) { - task* tp = create_body_task(input); - // tp == NULL => g.reset(), which shouldn't occur in concurrent context - if(tp) { - FLOW_SPAWN(*tp); - } - } - - //! This is executed by an enqueued task, the "forwarder" - task *forward_task() { - my_operation op_data(try_fwd); - task *rval = NULL; - do { - op_data.status = WAIT; - my_aggregator.execute(&op_data); - if(op_data.status == SUCCEEDED) { - tbb::task *ttask = op_data.bypass_t; - rval = combine_tasks(rval, ttask); - } - } while (op_data.status == SUCCEEDED); - return rval; - } - - inline task *create_forward_task() { - task* tp = my_graph.root_task(); - return (tp) ? - new(task::allocate_additional_child_of(*tp)) forward_task_bypass< my_class >(*this) : - NULL; - } - - //! Spawns a task that calls forward() - inline void spawn_forward_task() { - task* tp = create_forward_task(); - if(tp) { - FLOW_SPAWN(*tp); - } - } - }; // function_input_base - - //! Implements methods for a function node that takes a type Input as input and sends - // a type Output to its successors. - template< typename Input, typename Output, typename A> - class function_input : public function_input_base > { - public: - typedef Input input_type; - typedef Output output_type; - typedef function_input my_class; - typedef function_input_base base_type; - typedef function_input_queue input_queue_type; - - - // constructor - template - function_input( graph &g, size_t max_concurrency, Body& body, function_input_queue *q = NULL ) : - base_type(g, max_concurrency, q), - my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) { - } - - //! Copy constructor - function_input( const function_input& src, input_queue_type *q = NULL ) : - base_type(src, q), - my_body( src.my_body->clone() ) { - } - - ~function_input() { - delete my_body; - } - - template< typename Body > - Body copy_function_object() { - internal::function_body &body_ref = *this->my_body; - return dynamic_cast< internal::function_body_leaf & >(body_ref).get_body(); - } - - task * apply_body_impl_bypass( const input_type &i) { -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - // There is an extra copied needed to capture the - // body execution without the try_put - tbb::internal::fgt_begin_body( my_body ); - output_type v = (*my_body)(i); - tbb::internal::fgt_end_body( my_body ); - task * new_task = successors().try_put_task( v ); -#else - task * new_task = successors().try_put_task( (*my_body)(i) ); -#endif - return new_task; - } - - protected: - - void reset_function_input(__TBB_PFG_RESET_ARG(reset_flags f)) { - base_type::reset_function_input_base(__TBB_PFG_RESET_ARG(f)); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - if(f & rf_reset_bodies) my_body->reset_body(); -#endif - } - - function_body *my_body; - virtual broadcast_cache &successors() = 0; - - }; // function_input - - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - // helper templates to reset the successor edges of the output ports of an multifunction_node - template - struct reset_element { - template - static void reset_this(P &p, reset_flags f) { - (void)tbb::flow::get(p).successors().reset(f); - reset_element::reset_this(p, f); - } - template - static bool this_empty(P &p) { - if(tbb::flow::get(p).successors().empty()) - return reset_element::this_empty(p); - return false; - } - }; - - template<> - struct reset_element<1> { - template - static void reset_this(P &p, reset_flags f) { - (void)tbb::flow::get<0>(p).successors().reset(f); - } - template - static bool this_empty(P &p) { - return tbb::flow::get<0>(p).successors().empty(); - } - }; -#endif - - //! Implements methods for a function node that takes a type Input as input - // and has a tuple of output ports specified. - template< typename Input, typename OutputPortSet, typename A> - class multifunction_input : public function_input_base > { - public: - static const int N = tbb::flow::tuple_size::value; - typedef Input input_type; - typedef OutputPortSet output_ports_type; - typedef multifunction_input my_class; - typedef function_input_base base_type; - typedef function_input_queue input_queue_type; - - - // constructor - template - multifunction_input( - graph &g, - size_t max_concurrency, - Body& body, - function_input_queue *q = NULL ) : - base_type(g, max_concurrency, q), - my_body( new internal::multifunction_body_leaf(body) ) { - } - - //! Copy constructor - multifunction_input( const multifunction_input& src, input_queue_type *q = NULL ) : - base_type(src, q), - my_body( src.my_body->clone() ) { - } - - ~multifunction_input() { - delete my_body; - } - - template< typename Body > - Body copy_function_object() { - internal::multifunction_body &body_ref = *this->my_body; - return dynamic_cast< internal::multifunction_body_leaf & >(body_ref).get_body(); - } - - // for multifunction nodes we do not have a single successor as such. So we just tell - // the task we were successful. - task * apply_body_impl_bypass( const input_type &i) { - tbb::internal::fgt_begin_body( my_body ); - (*my_body)(i, my_output_ports); - tbb::internal::fgt_end_body( my_body ); - task * new_task = SUCCESSFULLY_ENQUEUED; - return new_task; - } - - output_ports_type &output_ports(){ return my_output_ports; } - - protected: - - /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) { - base_type::reset_function_input_base(__TBB_PFG_RESET_ARG(f)); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - reset_element::reset_this(my_output_ports, f); - if(f & rf_reset_bodies) my_body->reset_body(); - __TBB_ASSERT(!(f & rf_extract) || reset_element::this_empty(my_output_ports), "multifunction_node reset failed"); -#endif - } - - multifunction_body *my_body; - output_ports_type my_output_ports; - - }; // multifunction_input - - // template to refer to an output port of a multifunction_node - template - typename tbb::flow::tuple_element::type &output_port(MOP &op) { - return tbb::flow::get(op.output_ports()); - } - -// helper structs for split_node - template - struct emit_element { - template - static void emit_this(const T &t, P &p) { - (void)tbb::flow::get(p).try_put(tbb::flow::get(t)); - emit_element::emit_this(t,p); - } - }; - - template<> - struct emit_element<1> { - template - static void emit_this(const T &t, P &p) { - (void)tbb::flow::get<0>(p).try_put(tbb::flow::get<0>(t)); - } - }; - - //! Implements methods for an executable node that takes continue_msg as input - template< typename Output > - class continue_input : public continue_receiver { - public: - - //! The input type of this receiver - typedef continue_msg input_type; - - //! The output type of this receiver - typedef Output output_type; - - template< typename Body > - continue_input( graph &g, Body& body ) - : my_graph_ptr(&g), - my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) { } - - template< typename Body > - continue_input( graph &g, int number_of_predecessors, Body& body ) - : continue_receiver( number_of_predecessors ), my_graph_ptr(&g), - my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) { } - - continue_input( const continue_input& src ) : continue_receiver(src), - my_graph_ptr(src.my_graph_ptr), my_body( src.my_body->clone() ) {} - - ~continue_input() { - delete my_body; - } - - template< typename Body > - Body copy_function_object() { - internal::function_body &body_ref = *my_body; - return dynamic_cast< internal::function_body_leaf & >(body_ref).get_body(); - } - - /*override*/void reset_receiver( __TBB_PFG_RESET_ARG(reset_flags f)) { - continue_receiver::reset_receiver(__TBB_PFG_RESET_ARG(f)); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - if(f & rf_reset_bodies) my_body->reset_body(); -#endif - } - - protected: - - graph* my_graph_ptr; - function_body *my_body; - - virtual broadcast_cache &successors() = 0; - - friend class apply_body_task_bypass< continue_input< Output >, continue_msg >; - - //! Applies the body to the provided input - /* override */ task *apply_body_bypass( input_type ) { -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - // There is an extra copied needed to capture the - // body execution without the try_put - tbb::internal::fgt_begin_body( my_body ); - output_type v = (*my_body)( continue_msg() ); - tbb::internal::fgt_end_body( my_body ); - return successors().try_put_task( v ); -#else - return successors().try_put_task( (*my_body)( continue_msg() ) ); -#endif - } - - //! Spawns a task that applies the body - /* override */ task *execute( ) { - task* tp = my_graph_ptr->root_task(); - return (tp) ? - new ( task::allocate_additional_child_of( *tp ) ) - apply_body_task_bypass< continue_input< Output >, continue_msg >( *this, continue_msg() ) : - NULL; - } - - }; // continue_input - - //! Implements methods for both executable and function nodes that puts Output to its successors - template< typename Output > - class function_output : public sender { - public: - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - template friend struct reset_element; -#endif - typedef Output output_type; - typedef receiver successor_type; - typedef broadcast_cache broadcast_cache_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef std::vector successor_vector_type; -#endif - - function_output() { my_successors.set_owner(this); } - function_output(const function_output & /*other*/) : sender() { - my_successors.set_owner(this); - } - - //! Adds a new successor to this node - /* override */ bool register_successor( receiver &r ) { - successors().register_successor( r ); - return true; - } - - //! Removes a successor from this node - /* override */ bool remove_successor( receiver &r ) { - successors().remove_successor( r ); - return true; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/ void internal_add_built_successor( receiver &r) { - successors().internal_add_built_successor( r ); - } - - /*override*/ void internal_delete_built_successor( receiver &r) { - successors().internal_delete_built_successor( r ); - } - - /*override*/ size_t successor_count() { - return successors().successor_count(); - } - - /*override*/ void copy_successors( successor_vector_type &v) { - successors().copy_successors(v); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - // for multifunction_node. The function_body that implements - // the node will have an input and an output tuple of ports. To put - // an item to a successor, the body should - // - // get(output_ports).try_put(output_value); - // - // return value will be bool returned from successors.try_put. - task *try_put_task(const output_type &i) { return my_successors.try_put_task(i); } - - protected: - broadcast_cache_type my_successors; - broadcast_cache_type &successors() { return my_successors; } - - }; // function_output - - template< typename Output > - class multifunction_output : public function_output { - public: - typedef Output output_type; - typedef function_output base_type; - using base_type::my_successors; - - multifunction_output() : base_type() {my_successors.set_owner(this);} - multifunction_output( const multifunction_output &/*other*/) : base_type() { my_successors.set_owner(this); } - - bool try_put(const output_type &i) { - task *res = my_successors.try_put_task(i); - if(!res) return false; - if(res != SUCCESSFULLY_ENQUEUED) FLOW_SPAWN(*res); - return true; - } - }; // multifunction_output - -} // internal - -#endif // __TBB__flow_graph_node_impl_H diff --git a/src/tbb/include/tbb/internal/_flow_graph_tagged_buffer_impl.h b/src/tbb/include/tbb/internal/_flow_graph_tagged_buffer_impl.h deleted file mode 100644 index 8c13eb592..000000000 --- a/src/tbb/include/tbb/internal/_flow_graph_tagged_buffer_impl.h +++ /dev/null @@ -1,251 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// tagged buffer that can expand, and can support as many deletions as additions -// list-based, with elements of list held in array (for destruction management), -// multiplicative hashing (like ets). No synchronization built-in. -// - -#ifndef __TBB__flow_graph_tagged_buffer_impl_H -#define __TBB__flow_graph_tagged_buffer_impl_H - -#ifndef __TBB_flow_graph_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -// included in namespace tbb::flow::interface7::internal - -template -struct otherData { - T t; - U next; - otherData() : t(NoTagMark), next(NULL) {} -}; - -template -struct buffer_element_type { - // the second parameter below is void * because we can't forward-declare the type - // itself, so we just reinterpret_cast below. - typedef typename aligned_pair >::type type; -}; - -template - < - typename TagType, - typename ValueType, - size_t NoTagMark = 0, - typename Allocator=tbb::cache_aligned_allocator< typename buffer_element_type::type > - > -class tagged_buffer { -public: - static const size_t INITIAL_SIZE = 8; // initial size of the hash pointer table - static const TagType NO_TAG = TagType(NoTagMark); - typedef ValueType value_type; - typedef typename buffer_element_type::type element_type; - typedef value_type *pointer_type; - typedef element_type *list_array_type; // array we manage manually - typedef list_array_type *pointer_array_type; - typedef typename Allocator::template rebind::other pointer_array_allocator_type; - typedef typename Allocator::template rebind::other elements_array_allocator; -private: - size_t my_size; - size_t nelements; - pointer_array_type pointer_array; // pointer_array[my_size] - list_array_type elements_array; // elements_array[my_size / 2] - element_type* free_list; - - size_t mask() { return my_size - 1; } - - static size_t hash(TagType t) { - return uintptr_t(t)*tbb::internal::select_size_t_constant<0x9E3779B9,0x9E3779B97F4A7C15ULL>::value; - } - - void set_up_free_list( element_type **p_free_list, list_array_type la, size_t sz) { - for(size_t i=0; i < sz - 1; ++i ) { // construct free list - la[i].second.next = &(la[i+1]); - la[i].second.t = NO_TAG; - } - la[sz-1].second.next = NULL; - *p_free_list = &(la[0]); - } - - // cleanup for exceptions - struct DoCleanup { - pointer_array_type *my_pa; - list_array_type *my_elements; - size_t my_size; - - DoCleanup(pointer_array_type &pa, list_array_type &my_els, size_t sz) : - my_pa(&pa), my_elements(&my_els), my_size(sz) { } - ~DoCleanup() { - if(my_pa) { - size_t dont_care = 0; - internal_free_buffer(*my_pa, *my_elements, my_size, dont_care); - } - } - }; - - // exception-safety requires we do all the potentially-throwing operations first - void grow_array() { - size_t new_size = my_size*2; - size_t new_nelements = nelements; // internal_free_buffer zeroes this - list_array_type new_elements_array = NULL; - pointer_array_type new_pointer_array = NULL; - list_array_type new_free_list = NULL; - { - DoCleanup my_cleanup(new_pointer_array, new_elements_array, new_size); - new_elements_array = elements_array_allocator().allocate(my_size); - new_pointer_array = pointer_array_allocator_type().allocate(new_size); - for(size_t i=0; i < new_size; ++i) new_pointer_array[i] = NULL; - set_up_free_list(&new_free_list, new_elements_array, my_size ); - - for(size_t i=0; i < my_size; ++i) { - for( element_type* op = pointer_array[i]; op; op = (element_type *)(op->second.next)) { - value_type *ov = reinterpret_cast(&(op->first)); - // could have std::move semantics - internal_tagged_insert(new_pointer_array, new_size, new_free_list, op->second.t, *ov); - } - } - my_cleanup.my_pa = NULL; - my_cleanup.my_elements = NULL; - } - - internal_free_buffer(pointer_array, elements_array, my_size, nelements); - free_list = new_free_list; - pointer_array = new_pointer_array; - elements_array = new_elements_array; - my_size = new_size; - nelements = new_nelements; - } - - // v should have perfect forwarding if std::move implemented. - // we use this method to move elements in grow_array, so can't use class fields - void internal_tagged_insert( element_type **p_pointer_array, size_t p_sz, list_array_type &p_free_list, - const TagType t, const value_type &v) { - size_t l_mask = p_sz-1; - size_t h = hash(t) & l_mask; - __TBB_ASSERT(p_free_list, "Error: free list not set up."); - element_type* my_elem = p_free_list; p_free_list = (element_type *)(p_free_list->second.next); - my_elem->second.t = t; - (void) new(&(my_elem->first)) value_type(v); - my_elem->second.next = p_pointer_array[h]; - p_pointer_array[h] = my_elem; - } - - void internal_initialize_buffer() { - pointer_array = pointer_array_allocator_type().allocate(my_size); - for(size_t i = 0; i < my_size; ++i) pointer_array[i] = NULL; - elements_array = elements_array_allocator().allocate(my_size / 2); - set_up_free_list(&free_list, elements_array, my_size / 2); - } - - // made static so an enclosed class can use to properly dispose of the internals - static void internal_free_buffer( pointer_array_type &pa, list_array_type &el, size_t &sz, size_t &ne ) { - if(pa) { - for(size_t i = 0; i < sz; ++i ) { - element_type *p_next; - for( element_type *p = pa[i]; p; p = p_next) { - p_next = (element_type *)p->second.next; - value_type *vp = reinterpret_cast(&(p->first)); - vp->~value_type(); - } - } - pointer_array_allocator_type().deallocate(pa, sz); - pa = NULL; - } - // Separate test (if allocation of pa throws, el may be allocated. - // but no elements will be constructed.) - if(el) { - elements_array_allocator().deallocate(el, sz / 2); - el = NULL; - } - sz = INITIAL_SIZE; - ne = 0; - } - -public: - tagged_buffer() : my_size(INITIAL_SIZE), nelements(0) { - internal_initialize_buffer(); - } - - ~tagged_buffer() { - internal_free_buffer(pointer_array, elements_array, my_size, nelements); - } - - void reset() { - internal_free_buffer(pointer_array, elements_array, my_size, nelements); - internal_initialize_buffer(); - } - - bool tagged_insert(const TagType t, const value_type &v) { - pointer_type p; - if(tagged_find_ref(t, p)) { - p->~value_type(); - (void) new(p) value_type(v); // copy-construct into the space - return false; - } - ++nelements; - if(nelements*2 > my_size) grow_array(); - internal_tagged_insert(pointer_array, my_size, free_list, t, v); - return true; - } - - // returns reference to array element.v - bool tagged_find_ref(const TagType t, pointer_type &v) { - size_t i = hash(t) & mask(); - for(element_type* p = pointer_array[i]; p; p = (element_type *)(p->second.next)) { - if(p->second.t == t) { - v = reinterpret_cast(&(p->first)); - return true; - } - } - return false; - } - - bool tagged_find( const TagType t, value_type &v) { - value_type *p; - if(tagged_find_ref(t, p)) { - v = *p; - return true; - } - else - return false; - } - - void tagged_delete(const TagType t) { - size_t h = hash(t) & mask(); - element_type* prev = NULL; - for(element_type* p = pointer_array[h]; p; prev = p, p = (element_type *)(p->second.next)) { - if(p->second.t == t) { - value_type *vp = reinterpret_cast(&(p->first)); - vp->~value_type(); - p->second.t = NO_TAG; - if(prev) prev->second.next = p->second.next; - else pointer_array[h] = (element_type *)(p->second.next); - p->second.next = free_list; - free_list = p; - --nelements; - return; - } - } - __TBB_ASSERT(false, "tag not found for delete"); - } -}; -#endif // __TBB__flow_graph_tagged_buffer_impl_H diff --git a/src/tbb/include/tbb/internal/_flow_graph_trace_impl.h b/src/tbb/include/tbb/internal/_flow_graph_trace_impl.h deleted file mode 100644 index 43efc7c8e..000000000 --- a/src/tbb/include/tbb/internal/_flow_graph_trace_impl.h +++ /dev/null @@ -1,205 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef _FGT_GRAPH_TRACE_IMPL_H -#define _FGT_GRAPH_TRACE_IMPL_H - -#include "../tbb_profiling.h" - -namespace tbb { - namespace internal { - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - -static inline void fgt_internal_create_input_port( void *node, void *p, string_index name_index ) { - itt_make_task_group( ITT_DOMAIN_FLOW, p, FLOW_INPUT_PORT, node, FLOW_NODE, name_index ); -} - -static inline void fgt_internal_create_output_port( void *node, void *p, string_index name_index ) { - itt_make_task_group( ITT_DOMAIN_FLOW, p, FLOW_OUTPUT_PORT, node, FLOW_NODE, name_index ); -} - -template < typename TypesTuple, typename PortsTuple, int N > -struct fgt_internal_input_helper { - static void register_port( void *node, PortsTuple &ports ) { - fgt_internal_create_input_port( node, (void*)static_cast< tbb::flow::interface7::receiver< typename tbb::flow::tuple_element::type > * >(&(tbb::flow::get(ports))), - static_cast(FLOW_INPUT_PORT_0 + N - 1) ); - fgt_internal_input_helper::register_port( node, ports ); - } -}; - -template < typename TypesTuple, typename PortsTuple > -struct fgt_internal_input_helper { - static void register_port( void *node, PortsTuple &ports ) { - fgt_internal_create_input_port( node, (void*)static_cast< tbb::flow::interface7::receiver< typename tbb::flow::tuple_element<0,TypesTuple>::type > * >(&(tbb::flow::get<0>(ports))), - FLOW_INPUT_PORT_0 ); - } -}; - -template < typename TypesTuple, typename PortsTuple, int N > -struct fgt_internal_output_helper { - static void register_port( void *node, PortsTuple &ports ) { - fgt_internal_create_output_port( node, (void*)static_cast< tbb::flow::interface7::sender< typename tbb::flow::tuple_element::type > * >(&(tbb::flow::get(ports))), - static_cast(FLOW_OUTPUT_PORT_0 + N - 1) ); - fgt_internal_output_helper::register_port( node, ports ); - } -}; - -template < typename TypesTuple, typename PortsTuple > -struct fgt_internal_output_helper { - static void register_port( void *node, PortsTuple &ports ) { - fgt_internal_create_output_port( node, (void*)static_cast< tbb::flow::interface7::sender< typename tbb::flow::tuple_element<0,TypesTuple>::type > * >(&(tbb::flow::get<0>(ports))), - FLOW_OUTPUT_PORT_0 ); - } -}; - -template< typename NodeType > -void fgt_multioutput_node_desc( const NodeType *node, const char *desc ) { - void *addr = (void *)( static_cast< tbb::flow::interface7::receiver< typename NodeType::input_type > * >(const_cast< NodeType *>(node)) ); - itt_metadata_str_add( ITT_DOMAIN_FLOW, addr, FLOW_NODE, FLOW_OBJECT_NAME, desc ); -} - -template< typename NodeType > -static inline void fgt_node_desc( const NodeType *node, const char *desc ) { - void *addr = (void *)( static_cast< tbb::flow::interface7::sender< typename NodeType::output_type > * >(const_cast< NodeType *>(node)) ); - itt_metadata_str_add( ITT_DOMAIN_FLOW, addr, FLOW_NODE, FLOW_OBJECT_NAME, desc ); -} - -static inline void fgt_graph_desc( void *g, const char *desc ) { - itt_metadata_str_add( ITT_DOMAIN_FLOW, g, FLOW_GRAPH, FLOW_OBJECT_NAME, desc ); -} - -static inline void fgt_body( void *node, void *body ) { - itt_relation_add( ITT_DOMAIN_FLOW, body, FLOW_BODY, __itt_relation_is_child_of, node, FLOW_NODE ); -} - -template< typename OutputTuple, int N, typename PortsTuple > -static inline void fgt_multioutput_node( string_index t, void *g, void *input_port, PortsTuple &ports ) { - itt_make_task_group( ITT_DOMAIN_FLOW, input_port, FLOW_NODE, g, FLOW_GRAPH, t ); - fgt_internal_create_input_port( input_port, input_port, FLOW_INPUT_PORT_0 ); - fgt_internal_output_helper::register_port( input_port, ports ); -} - -template< typename OutputTuple, int N, typename PortsTuple > -static inline void fgt_multioutput_node_with_body( string_index t, void *g, void *input_port, PortsTuple &ports, void *body ) { - itt_make_task_group( ITT_DOMAIN_FLOW, input_port, FLOW_NODE, g, FLOW_GRAPH, t ); - fgt_internal_create_input_port( input_port, input_port, FLOW_INPUT_PORT_0 ); - fgt_internal_output_helper::register_port( input_port, ports ); - fgt_body( input_port, body ); -} - - -template< typename InputTuple, int N, typename PortsTuple > -static inline void fgt_multiinput_node( string_index t, void *g, PortsTuple &ports, void *output_port) { - itt_make_task_group( ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); - fgt_internal_create_output_port( output_port, output_port, FLOW_OUTPUT_PORT_0 ); - fgt_internal_input_helper::register_port( output_port, ports ); -} - -static inline void fgt_node( string_index t, void *g, void *output_port ) { - itt_make_task_group( ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); - fgt_internal_create_output_port( output_port, output_port, FLOW_OUTPUT_PORT_0 ); -} - -static inline void fgt_node_with_body( string_index t, void *g, void *output_port, void *body ) { - itt_make_task_group( ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); - fgt_internal_create_output_port( output_port, output_port, FLOW_OUTPUT_PORT_0 ); - fgt_body( output_port, body ); -} - - -static inline void fgt_node( string_index t, void *g, void *input_port, void *output_port ) { - fgt_node( t, g, output_port ); - fgt_internal_create_input_port( output_port, input_port, FLOW_INPUT_PORT_0 ); -} - -static inline void fgt_node_with_body( string_index t, void *g, void *input_port, void *output_port, void *body ) { - fgt_node_with_body( t, g, output_port, body ); - fgt_internal_create_input_port( output_port, input_port, FLOW_INPUT_PORT_0 ); -} - - -static inline void fgt_node( string_index t, void *g, void *input_port, void *decrement_port, void *output_port ) { - fgt_node( t, g, input_port, output_port ); - fgt_internal_create_input_port( output_port, decrement_port, FLOW_INPUT_PORT_1 ); -} - -static inline void fgt_make_edge( void *output_port, void *input_port ) { - itt_relation_add( ITT_DOMAIN_FLOW, output_port, FLOW_OUTPUT_PORT, __itt_relation_is_predecessor_to, input_port, FLOW_INPUT_PORT); -} - -static inline void fgt_remove_edge( void *output_port, void *input_port ) { - itt_relation_add( ITT_DOMAIN_FLOW, output_port, FLOW_OUTPUT_PORT, __itt_relation_is_sibling_of, input_port, FLOW_INPUT_PORT); -} - -static inline void fgt_graph( void *g ) { - itt_make_task_group( ITT_DOMAIN_FLOW, g, FLOW_GRAPH, NULL, FLOW_NULL, FLOW_GRAPH ); -} - -static inline void fgt_begin_body( void *body ) { - itt_task_begin( ITT_DOMAIN_FLOW, body, FLOW_BODY, NULL, FLOW_NULL, FLOW_NULL ); -} - -static inline void fgt_end_body( void * ) { - itt_task_end( ITT_DOMAIN_FLOW ); -} - -#else // TBB_PREVIEW_FLOW_GRAPH_TRACE - -static inline void fgt_graph( void * /*g*/ ) { } - -template< typename NodeType > -static inline void fgt_multioutput_node_desc( const NodeType * /*node*/, const char * /*desc*/ ) { } - -template< typename NodeType > -static inline void fgt_node_desc( const NodeType * /*node*/, const char * /*desc*/ ) { } - -static inline void fgt_graph_desc( void * /*g*/, const char * /*desc*/ ) { } - -static inline void fgt_body( void * /*node*/, void * /*body*/ ) { } - -template< typename OutputTuple, int N, typename PortsTuple > -static inline void fgt_multioutput_node( string_index /*t*/, void * /*g*/, void * /*input_port*/, PortsTuple & /*ports*/ ) { } - -template< typename OutputTuple, int N, typename PortsTuple > -static inline void fgt_multioutput_node_with_body( string_index /*t*/, void * /*g*/, void * /*input_port*/, PortsTuple & /*ports*/, void * /*body*/ ) { } - -template< typename InputTuple, int N, typename PortsTuple > -static inline void fgt_multiinput_node( string_index /*t*/, void * /*g*/, PortsTuple & /*ports*/, void * /*output_port*/ ) { } - -static inline void fgt_node( string_index /*t*/, void * /*g*/, void * /*output_port*/ ) { } -static inline void fgt_node( string_index /*t*/, void * /*g*/, void * /*input_port*/, void * /*output_port*/ ) { } -static inline void fgt_node( string_index /*t*/, void * /*g*/, void * /*input_port*/, void * /*decrement_port*/, void * /*output_port*/ ) { } - -static inline void fgt_node_with_body( string_index /*t*/, void * /*g*/, void * /*output_port*/, void * /*body*/ ) { } -static inline void fgt_node_with_body( string_index /*t*/, void * /*g*/, void * /*input_port*/, void * /*output_port*/, void * /*body*/ ) { } - -static inline void fgt_make_edge( void * /*output_port*/, void * /*input_port*/ ) { } -static inline void fgt_remove_edge( void * /*output_port*/, void * /*input_port*/ ) { } - -static inline void fgt_begin_body( void * /*body*/ ) { } -static inline void fgt_end_body( void * /*body*/) { } - -#endif // TBB_PREVIEW_FLOW_GRAPH_TRACE - - } // namespace internal -} // namespace tbb - -#endif diff --git a/src/tbb/include/tbb/internal/_flow_graph_types_impl.h b/src/tbb/include/tbb/internal/_flow_graph_types_impl.h deleted file mode 100644 index 28a525a4d..000000000 --- a/src/tbb/include/tbb/internal/_flow_graph_types_impl.h +++ /dev/null @@ -1,497 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB__flow_graph_types_impl_H -#define __TBB__flow_graph_types_impl_H - -#ifndef __TBB_flow_graph_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -// included in namespace tbb::flow::interface7 - -namespace internal { -// wrap each element of a tuple in a template, and make a tuple of the result. - - template class PT, typename TypeTuple> - struct wrap_tuple_elements; - - template class PT, typename TypeTuple> - struct wrap_tuple_elements<1, PT, TypeTuple> { - typedef typename tbb::flow::tuple< - PT::type> > - type; - }; - - template class PT, typename TypeTuple> - struct wrap_tuple_elements<2, PT, TypeTuple> { - typedef typename tbb::flow::tuple< - PT::type>, - PT::type> > - type; - }; - - template class PT, typename TypeTuple> - struct wrap_tuple_elements<3, PT, TypeTuple> { - typedef typename tbb::flow::tuple< - PT::type>, - PT::type>, - PT::type> > - type; - }; - - template class PT, typename TypeTuple> - struct wrap_tuple_elements<4, PT, TypeTuple> { - typedef typename tbb::flow::tuple< - PT::type>, - PT::type>, - PT::type>, - PT::type> > - type; - }; - - template class PT, typename TypeTuple> - struct wrap_tuple_elements<5, PT, TypeTuple> { - typedef typename tbb::flow::tuple< - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type> > - type; - }; - -#if __TBB_VARIADIC_MAX >= 6 - template class PT, typename TypeTuple> - struct wrap_tuple_elements<6, PT, TypeTuple> { - typedef typename tbb::flow::tuple< - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type> > - type; - }; -#endif - -#if __TBB_VARIADIC_MAX >= 7 - template class PT, typename TypeTuple> - struct wrap_tuple_elements<7, PT, TypeTuple> { - typedef typename tbb::flow::tuple< - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type> > - type; - }; -#endif - -#if __TBB_VARIADIC_MAX >= 8 - template class PT, typename TypeTuple> - struct wrap_tuple_elements<8, PT, TypeTuple> { - typedef typename tbb::flow::tuple< - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type> > - type; - }; -#endif - -#if __TBB_VARIADIC_MAX >= 9 - template class PT, typename TypeTuple> - struct wrap_tuple_elements<9, PT, TypeTuple> { - typedef typename tbb::flow::tuple< - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type> > - type; - }; -#endif - -#if __TBB_VARIADIC_MAX >= 10 - template class PT, typename TypeTuple> - struct wrap_tuple_elements<10, PT, TypeTuple> { - typedef typename tbb::flow::tuple< - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type>, - PT::type> > - type; - }; -#endif - -//! type mimicking std::pair but with trailing fill to ensure each element of an array -//* will have the correct alignment - template - struct type_plus_align { - char first[sizeof(T1)]; - T2 second; - char fill1[REM]; - }; - - template - struct type_plus_align { - char first[sizeof(T1)]; - T2 second; - }; - - template struct alignment_of { - typedef struct { char t; U padded; } test_alignment; - static const size_t value = sizeof(test_alignment) - sizeof(U); - }; - - // T1, T2 are actual types stored. The space defined for T1 in the type returned - // is a char array of the correct size. Type T2 should be trivially-constructible, - // T1 must be explicitly managed. - template - struct aligned_pair { - static const size_t t1_align = alignment_of::value; - static const size_t t2_align = alignment_of::value; - typedef type_plus_align just_pair; - static const size_t max_align = t1_align < t2_align ? t2_align : t1_align; - static const size_t extra_bytes = sizeof(just_pair) % max_align; - static const size_t remainder = extra_bytes ? max_align - extra_bytes : 0; - public: - typedef type_plus_align type; - }; // aligned_pair - -// support for variant type -// type we use when we're not storing a value -struct default_constructed { }; - -// type which contains another type, tests for what type is contained, and references to it. -// internal::Wrapper -// void CopyTo( void *newSpace) : builds a Wrapper copy of itself in newSpace - -// struct to allow us to copy and test the type of objects -struct WrapperBase { - virtual ~WrapperBase() {} - virtual void CopyTo(void* /*newSpace*/) const { } -}; - -// Wrapper contains a T, with the ability to test what T is. The Wrapper can be -// constructed from a T, can be copy-constructed from another Wrapper, and can be -// examined via value(), but not modified. -template -struct Wrapper: public WrapperBase { - typedef T value_type; - typedef T* pointer_type; -private: - T value_space; -public: - const value_type &value() const { return value_space; } - -private: - Wrapper(); - - // on exception will ensure the Wrapper will contain only a trivially-constructed object - struct _unwind_space { - pointer_type space; - _unwind_space(pointer_type p) : space(p) {} - ~_unwind_space() { - if(space) (void) new (space) Wrapper(default_constructed()); - } - }; -public: - explicit Wrapper( const T& other ) : value_space(other) { } - explicit Wrapper(const Wrapper& other) : value_space(other.value_space) { } - - /*override*/void CopyTo(void* newSpace) const { - _unwind_space guard((pointer_type)newSpace); - (void) new(newSpace) Wrapper(value_space); - guard.space = NULL; - } - /*override*/~Wrapper() { } -}; - -// specialization for array objects -template -struct Wrapper : public WrapperBase { - typedef T value_type; - typedef T* pointer_type; - // space must be untyped. - typedef T ArrayType[N]; -private: - // The space is not of type T[N] because when copy-constructing, it would be - // default-initialized and then copied to in some fashion, resulting in two - // constructions and one destruction per element. If the type is char[ ], we - // placement new into each element, resulting in one construction per element. - static const size_t space_size = sizeof(ArrayType) / sizeof(char); - char value_space[space_size]; - - - // on exception will ensure the already-built objects will be destructed - // (the value_space is a char array, so it is already trivially-destructible.) - struct _unwind_class { - pointer_type space; - int already_built; - _unwind_class(pointer_type p) : space(p), already_built(0) {} - ~_unwind_class() { - if(space) { - for(size_t i = already_built; i > 0 ; --i ) space[i-1].~value_type(); - (void) new(space) Wrapper(default_constructed()); - } - } - }; -public: - const ArrayType &value() const { - char *vp = const_cast(value_space); - return reinterpret_cast(*vp); - } - -private: - Wrapper(); -public: - // have to explicitly construct because other decays to a const value_type* - explicit Wrapper(const ArrayType& other) { - _unwind_class guard((pointer_type)value_space); - pointer_type vp = reinterpret_cast(&value_space); - for(size_t i = 0; i < N; ++i ) { - (void) new(vp++) value_type(other[i]); - ++(guard.already_built); - } - guard.space = NULL; - } - explicit Wrapper(const Wrapper& other) : WrapperBase() { - // we have to do the heavy lifting to copy contents - _unwind_class guard((pointer_type)value_space); - pointer_type dp = reinterpret_cast(value_space); - pointer_type sp = reinterpret_cast(const_cast(other.value_space)); - for(size_t i = 0; i < N; ++i, ++dp, ++sp) { - (void) new(dp) value_type(*sp); - ++(guard.already_built); - } - guard.space = NULL; - } - - /*override*/void CopyTo(void* newSpace) const { - (void) new(newSpace) Wrapper(*this); // exceptions handled in copy constructor - } - - /*override*/~Wrapper() { - // have to destroy explicitly in reverse order - pointer_type vp = reinterpret_cast(&value_space); - for(size_t i = N; i > 0 ; --i ) vp[i-1].~value_type(); - } -}; - -// given a tuple, return the type of the element that has the maximum alignment requirement. -// Given a tuple and that type, return the number of elements of the object with the max -// alignment requirement that is at least as big as the largest object in the tuple. - -template struct pick_one; -template struct pick_one { typedef T1 type; }; -template struct pick_one { typedef T2 type; }; - -template< template class Selector, typename T1, typename T2 > -struct pick_max { - typedef typename pick_one< (Selector::value > Selector::value), T1, T2 >::type type; -}; - -template struct size_of { static const int value = sizeof(T); }; - -template< size_t N, class Tuple, template class Selector > struct pick_tuple_max { - typedef typename pick_tuple_max::type LeftMaxType; - typedef typename tbb::flow::tuple_element::type ThisType; - typedef typename pick_max::type type; -}; - -template< class Tuple, template class Selector > struct pick_tuple_max<0, Tuple, Selector> { - typedef typename tbb::flow::tuple_element<0, Tuple>::type type; -}; - -// is the specified type included in a tuple? - -template struct is_same_type { static const bool value = false; }; -template struct is_same_type { static const bool value = true; }; - -template -struct is_element_of { - typedef typename tbb::flow::tuple_element::type T_i; - static const bool value = is_same_type::value || is_element_of::value; -}; - -template -struct is_element_of { - typedef typename tbb::flow::tuple_element<0, Tuple>::type T_i; - static const bool value = is_same_type::value; -}; - -// allow the construction of types that are listed tuple. If a disallowed type -// construction is written, a method involving this type is created. The -// type has no definition, so a syntax error is generated. -template struct ERROR_Type_Not_allowed_In_Tagged_Msg_Not_Member_Of_Tuple; - -template struct do_if; -template -struct do_if { - static void construct(void *mySpace, const T& x) { - (void) new(mySpace) Wrapper(x); - } -}; -template -struct do_if { - static void construct(void * /*mySpace*/, const T& x) { - // This method is instantiated when the type T does not match any of the - // element types in the Tuple in variant. - ERROR_Type_Not_allowed_In_Tagged_Msg_Not_Member_Of_Tuple::bad_type(x); - } -}; - -// Tuple tells us the allowed types that variant can hold. It determines the alignment of the space in -// Wrapper, and how big Wrapper is. -// -// the object can only be tested for type, and a read-only reference can be fetched by cast_to(). - -using tbb::internal::punned_cast; -struct tagged_null_type {}; -template -class tagged_msg { - typedef tbb::flow::tuple= 6 - , T5 - #endif - #if __TBB_VARIADIC_MAX >= 7 - , T6 - #endif - #if __TBB_VARIADIC_MAX >= 8 - , T7 - #endif - #if __TBB_VARIADIC_MAX >= 9 - , T8 - #endif - #if __TBB_VARIADIC_MAX >= 10 - , T9 - #endif - > Tuple; - -private: - class variant { - static const size_t N = tbb::flow::tuple_size::value; - typedef typename pick_tuple_max::type AlignType; - typedef typename pick_tuple_max::type MaxSizeType; - static const size_t MaxNBytes = (sizeof(Wrapper)+sizeof(AlignType)-1); - static const size_t MaxNElements = MaxNBytes/sizeof(AlignType); - typedef typename tbb::aligned_space SpaceType; - SpaceType my_space; - static const size_t MaxSize = sizeof(SpaceType); - - public: - variant() { (void) new(&my_space) Wrapper(default_constructed()); } - - template - variant( const T& x ) { - do_if::value>::construct(&my_space,x); - } - - variant(const variant& other) { - const WrapperBase * h = punned_cast(&(other.my_space)); - h->CopyTo(&my_space); - } - - // assignment must destroy and re-create the Wrapper type, as there is no way - // to create a Wrapper-to-Wrapper assign even if we find they agree in type. - void operator=( const variant& rhs ) { - if(&rhs != this) { - WrapperBase *h = punned_cast(&my_space); - h->~WrapperBase(); - const WrapperBase *ch = punned_cast(&(rhs.my_space)); - ch->CopyTo(&my_space); - } - } - - template - const U& variant_cast_to() const { - const Wrapper *h = dynamic_cast*>(punned_cast(&my_space)); - if(!h) { - tbb::internal::throw_exception(tbb::internal::eid_bad_tagged_msg_cast); - } - return h->value(); - } - template - bool variant_is_a() const { return dynamic_cast*>(punned_cast(&my_space)) != NULL; } - - bool variant_is_default_constructed() const {return variant_is_a();} - - ~variant() { - WrapperBase *h = punned_cast(&my_space); - h->~WrapperBase(); - } - }; //class variant - - TagType my_tag; - variant my_msg; - -public: - tagged_msg(): my_tag(TagType(~0)), my_msg(){} - - template - tagged_msg(T const &index, R const &value) : my_tag(index), my_msg(value) {} - - #if __TBB_CONST_REF_TO_ARRAY_TEMPLATE_PARAM_BROKEN - template - tagged_msg(T const &index, R (&value)[N]) : my_tag(index), my_msg(value) {} - #endif - - void set_tag(TagType const &index) {my_tag = index;} - TagType tag() const {return my_tag;} - - template - const V& cast_to() const {return my_msg.template variant_cast_to();} - - template - bool is_a() const {return my_msg.template variant_is_a();} - - bool is_default_constructed() const {return my_msg.variant_is_default_constructed();} -}; //class tagged_msg - -// template to simplify cast and test for tagged_msg in template contexts -template -const T& cast_to(V const &v) { return v.template cast_to(); } - -template -bool is_a(V const &v) { return v.template is_a(); } - -} // namespace internal - -#endif /* __TBB__flow_graph_types_impl_H */ diff --git a/src/tbb/include/tbb/internal/_mutex_padding.h b/src/tbb/include/tbb/internal/_mutex_padding.h deleted file mode 100644 index ae07599c0..000000000 --- a/src/tbb/include/tbb/internal/_mutex_padding.h +++ /dev/null @@ -1,102 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_mutex_padding_H -#define __TBB_mutex_padding_H - -// wrapper for padding mutexes to be alone on a cache line, without requiring they be allocated -// from a pool. Because we allow them to be defined anywhere they must be two cache lines in size. - - -namespace tbb { -namespace interface7 { -namespace internal { - -static const size_t cache_line_size = 64; - -// Pad a mutex to occupy a number of full cache lines sufficient to avoid false sharing -// with other data; space overhead is up to 2*cache_line_size-1. -template class padded_mutex; - -template -class padded_mutex : tbb::internal::mutex_copy_deprecated_and_disabled { - typedef long pad_type; - pad_type my_pad[((sizeof(Mutex)+cache_line_size-1)/cache_line_size+1)*cache_line_size/sizeof(pad_type)]; - - Mutex *impl() { return (Mutex *)((uintptr_t(this)|(cache_line_size-1))+1);} - -public: - static const bool is_rw_mutex = Mutex::is_rw_mutex; - static const bool is_recursive_mutex = Mutex::is_recursive_mutex; - static const bool is_fair_mutex = Mutex::is_fair_mutex; - - padded_mutex() { new(impl()) Mutex(); } - ~padded_mutex() { impl()->~Mutex(); } - - //! Represents acquisition of a mutex. - class scoped_lock : tbb::internal::no_copy { - typename Mutex::scoped_lock my_scoped_lock; - public: - scoped_lock() : my_scoped_lock() {} - scoped_lock( padded_mutex& m ) : my_scoped_lock(*m.impl()) { } - ~scoped_lock() { } - - void acquire( padded_mutex& m ) { my_scoped_lock.acquire(*m.impl()); } - bool try_acquire( padded_mutex& m ) { return my_scoped_lock.try_acquire(*m.impl()); } - void release() { my_scoped_lock.release(); } - }; -}; - -template -class padded_mutex : tbb::internal::mutex_copy_deprecated_and_disabled { - typedef long pad_type; - pad_type my_pad[((sizeof(Mutex)+cache_line_size-1)/cache_line_size+1)*cache_line_size/sizeof(pad_type)]; - - Mutex *impl() { return (Mutex *)((uintptr_t(this)|(cache_line_size-1))+1);} - -public: - static const bool is_rw_mutex = Mutex::is_rw_mutex; - static const bool is_recursive_mutex = Mutex::is_recursive_mutex; - static const bool is_fair_mutex = Mutex::is_fair_mutex; - - padded_mutex() { new(impl()) Mutex(); } - ~padded_mutex() { impl()->~Mutex(); } - - //! Represents acquisition of a mutex. - class scoped_lock : tbb::internal::no_copy { - typename Mutex::scoped_lock my_scoped_lock; - public: - scoped_lock() : my_scoped_lock() {} - scoped_lock( padded_mutex& m, bool write = true ) : my_scoped_lock(*m.impl(),write) { } - ~scoped_lock() { } - - void acquire( padded_mutex& m, bool write = true ) { my_scoped_lock.acquire(*m.impl(),write); } - bool try_acquire( padded_mutex& m, bool write = true ) { return my_scoped_lock.try_acquire(*m.impl(),write); } - bool upgrade_to_writer() { return my_scoped_lock.upgrade_to_writer(); } - bool downgrade_to_reader() { return my_scoped_lock.downgrade_to_reader(); } - void release() { my_scoped_lock.release(); } - }; -}; - -} // namespace internal -} // namespace interface7 -} // namespace tbb - -#endif /* __TBB_mutex_padding_H */ diff --git a/src/tbb/include/tbb/internal/_range_iterator.h b/src/tbb/include/tbb/internal/_range_iterator.h deleted file mode 100644 index 0622c4ffb..000000000 --- a/src/tbb/include/tbb/internal/_range_iterator.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_range_iterator_H -#define __TBB_range_iterator_H - -#include "../tbb_stddef.h" - -#if __TBB_CPP11_STD_BEGIN_END_PRESENT && __TBB_CPP11_AUTO_PRESENT && __TBB_CPP11_DECLTYPE_PRESENT - #include -#endif - -namespace tbb { - // iterators to first and last elements of container - namespace internal { - -#if __TBB_CPP11_STD_BEGIN_END_PRESENT && __TBB_CPP11_AUTO_PRESENT && __TBB_CPP11_DECLTYPE_PRESENT - using std::begin; - using std::end; - template - auto first(Container& c)-> decltype(begin(c)) {return begin(c);} - - template - auto first(const Container& c)-> decltype(begin(c)) {return begin(c);} - - template - auto last(Container& c)-> decltype(begin(c)) {return end(c);} - - template - auto last(const Container& c)-> decltype(begin(c)) {return end(c);} -#else - template - typename Container::iterator first(Container& c) {return c.begin();} - - template - typename Container::const_iterator first(const Container& c) {return c.begin();} - - template - typename Container::iterator last(Container& c) {return c.end();} - - template - typename Container::const_iterator last(const Container& c) {return c.end();} -#endif - - template - T* first(T (&arr) [size]) {return arr;} - - template - T* last(T (&arr) [size]) {return arr + size;} - } //namespace internal -} //namespace tbb - -#endif // __TBB_range_iterator_H diff --git a/src/tbb/include/tbb/internal/_tbb_windef.h b/src/tbb/include/tbb/internal/_tbb_windef.h deleted file mode 100644 index 551dc2b0b..000000000 --- a/src/tbb/include/tbb/internal/_tbb_windef.h +++ /dev/null @@ -1,73 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_tbb_windef_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif /* __TBB_tbb_windef_H */ - -// Check that the target Windows version has all API calls requried for TBB. -// Do not increase the version in condition beyond 0x0500 without prior discussion! -#if defined(_WIN32_WINNT) && _WIN32_WINNT<0x0501 -#error TBB is unable to run on old Windows versions; _WIN32_WINNT must be 0x0501 or greater. -#endif - -#if !defined(_MT) -#error TBB requires linkage with multithreaded C/C++ runtime library. \ - Choose multithreaded DLL runtime in project settings, or use /MD[d] compiler switch. -#endif - -// Workaround for the problem with MVSC headers failing to define namespace std -namespace std { - using ::size_t; using ::ptrdiff_t; -} - -#define __TBB_STRING_AUX(x) #x -#define __TBB_STRING(x) __TBB_STRING_AUX(x) - -// Default setting of TBB_USE_DEBUG -#ifdef TBB_USE_DEBUG -# if TBB_USE_DEBUG -# if !defined(_DEBUG) -# pragma message(__FILE__ "(" __TBB_STRING(__LINE__) ") : Warning: Recommend using /MDd if compiling with TBB_USE_DEBUG!=0") -# endif -# else -# if defined(_DEBUG) -# pragma message(__FILE__ "(" __TBB_STRING(__LINE__) ") : Warning: Recommend using /MD if compiling with TBB_USE_DEBUG==0") -# endif -# endif -#endif - -#if (__TBB_BUILD || __TBBMALLOC_BUILD) && !defined(__TBB_NO_IMPLICIT_LINKAGE) -#define __TBB_NO_IMPLICIT_LINKAGE 1 -#endif - -#if _MSC_VER - #if !__TBB_NO_IMPLICIT_LINKAGE - #ifdef __TBB_LIB_NAME - #pragma comment(lib, __TBB_STRING(__TBB_LIB_NAME)) - #else - #ifdef _DEBUG - #pragma comment(lib, "tbb_debug.lib") - #else - #pragma comment(lib, "tbb.lib") - #endif - #endif - #endif -#endif diff --git a/src/tbb/include/tbb/internal/_x86_eliding_mutex_impl.h b/src/tbb/include/tbb/internal/_x86_eliding_mutex_impl.h deleted file mode 100644 index d73877aa8..000000000 --- a/src/tbb/include/tbb/internal/_x86_eliding_mutex_impl.h +++ /dev/null @@ -1,148 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB__x86_eliding_mutex_impl_H -#define __TBB__x86_eliding_mutex_impl_H - -#ifndef __TBB_spin_mutex_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#if ( __TBB_x86_32 || __TBB_x86_64 ) - -namespace tbb { -namespace interface7 { -namespace internal { - -template -class padded_mutex; - -//! An eliding lock that occupies a single byte. -/** A x86_eliding_mutex is an HLE-enabled spin mutex. It is recommended to - put the mutex on a cache line that is not shared by the data it protects. - It should be used for locking short critical sections where the lock is - contended but the data it protects are not. If zero-initialized, the - mutex is considered unheld. - @ingroup synchronization */ -class x86_eliding_mutex : tbb::internal::mutex_copy_deprecated_and_disabled { - //! 0 if lock is released, 1 if lock is acquired. - __TBB_atomic_flag flag; - - friend class padded_mutex; - -public: - //! Construct unacquired lock. - /** Equivalent to zero-initialization of *this. */ - x86_eliding_mutex() : flag(0) {} - -// bug in gcc 3.x.x causes syntax error in spite of the friend declaration above. -// Make the scoped_lock public in that case. -#if __TBB_USE_X86_ELIDING_MUTEX || __TBB_GCC_VERSION < 40000 -#else - // by default we will not provide the scoped_lock interface. The user - // should use the padded version of the mutex. scoped_lock is used in - // padded_mutex template. -private: -#endif - // scoped_lock in padded_mutex<> is the interface to use. - //! Represents acquisition of a mutex. - class scoped_lock : tbb::internal::no_copy { - private: - //! Points to currently held mutex, or NULL if no lock is held. - x86_eliding_mutex* my_mutex; - - public: - //! Construct without acquiring a mutex. - scoped_lock() : my_mutex(NULL) {} - - //! Construct and acquire lock on a mutex. - scoped_lock( x86_eliding_mutex& m ) : my_mutex(NULL) { acquire(m); } - - //! Acquire lock. - void acquire( x86_eliding_mutex& m ) { - __TBB_ASSERT( !my_mutex, "already holding a lock" ); - - my_mutex=&m; - my_mutex->lock(); - } - - //! Try acquiring lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_acquire( x86_eliding_mutex& m ) { - __TBB_ASSERT( !my_mutex, "already holding a lock" ); - - bool result = m.try_lock(); - if( result ) { - my_mutex = &m; - } - return result; - } - - //! Release lock - void release() { - __TBB_ASSERT( my_mutex, "release on scoped_lock that is not holding a lock" ); - - my_mutex->unlock(); - my_mutex = NULL; - } - - //! Destroy lock. If holding a lock, releases the lock first. - ~scoped_lock() { - if( my_mutex ) { - release(); - } - } - }; -#if __TBB_USE_X86_ELIDING_MUTEX || __TBB_GCC_VERSION < 40000 -#else -public: -#endif /* __TBB_USE_X86_ELIDING_MUTEX */ - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = false; - - // ISO C++0x compatibility methods - - //! Acquire lock - void lock() { - __TBB_LockByteElided(flag); - } - - //! Try acquiring lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_lock() { - return __TBB_TryLockByteElided(flag); - } - - //! Release lock - void unlock() { - __TBB_UnlockByteElided( flag ); - } -}; // end of x86_eliding_mutex - -} // namespace internal -} // namespace interface7 -} // namespace tbb - -#endif /* ( __TBB_x86_32 || __TBB_x86_64 ) */ - -#endif /* __TBB__x86_eliding_mutex_impl_H */ diff --git a/src/tbb/include/tbb/internal/_x86_rtm_rw_mutex_impl.h b/src/tbb/include/tbb/internal/_x86_rtm_rw_mutex_impl.h deleted file mode 100644 index 9fb8c82f3..000000000 --- a/src/tbb/include/tbb/internal/_x86_rtm_rw_mutex_impl.h +++ /dev/null @@ -1,225 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB__x86_rtm_rw_mutex_impl_H -#define __TBB__x86_rtm_rw_mutex_impl_H - -#ifndef __TBB_spin_rw_mutex_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#if __TBB_TSX_AVAILABLE - -#include "../tbb_stddef.h" -#include "../tbb_machine.h" -#include "../tbb_profiling.h" -#include "../spin_rw_mutex.h" - -namespace tbb { -namespace interface8 { -namespace internal { - -enum RTM_type { - RTM_not_in_mutex, - RTM_transacting_reader, - RTM_transacting_writer, - RTM_real_reader, - RTM_real_writer -}; - -static const unsigned long speculation_granularity = 64; - -//! Fast, unfair, spinning speculation-enabled reader-writer lock with backoff and -// writer-preference -/** @ingroup synchronization */ -class x86_rtm_rw_mutex: private spin_rw_mutex { -#if __TBB_USE_X86_RTM_RW_MUTEX || __TBB_GCC_VERSION < 40000 -// bug in gcc 3.x.x causes syntax error in spite of the friend declaration below. -// Make the scoped_lock public in that case. -public: -#else -private: -#endif - friend class interface7::internal::padded_mutex; - class scoped_lock; // should be private - friend class scoped_lock; -private: - //! @cond INTERNAL - - //! Internal construct unacquired mutex. - void __TBB_EXPORTED_METHOD internal_construct(); - - //! Internal acquire write lock. - // only_speculate == true if we're doing a try_lock, else false. - void __TBB_EXPORTED_METHOD internal_acquire_writer(x86_rtm_rw_mutex::scoped_lock&, bool only_speculate=false); - - //! Internal acquire read lock. - // only_speculate == true if we're doing a try_lock, else false. - void __TBB_EXPORTED_METHOD internal_acquire_reader(x86_rtm_rw_mutex::scoped_lock&, bool only_speculate=false); - - //! Internal upgrade reader to become a writer. - bool __TBB_EXPORTED_METHOD internal_upgrade( x86_rtm_rw_mutex::scoped_lock& ); - - //! Out of line code for downgrading a writer to a reader. - bool __TBB_EXPORTED_METHOD internal_downgrade( x86_rtm_rw_mutex::scoped_lock& ); - - //! Internal try_acquire write lock. - bool __TBB_EXPORTED_METHOD internal_try_acquire_writer( x86_rtm_rw_mutex::scoped_lock& ); - - //! Internal release lock. - void __TBB_EXPORTED_METHOD internal_release( x86_rtm_rw_mutex::scoped_lock& ); - - static x86_rtm_rw_mutex* internal_get_mutex( const spin_rw_mutex::scoped_lock& lock ) - { - return static_cast( lock.internal_get_mutex() ); - } - static void internal_set_mutex( spin_rw_mutex::scoped_lock& lock, spin_rw_mutex* mtx ) - { - lock.internal_set_mutex( mtx ); - } - //! @endcond -public: - //! Construct unacquired mutex. - x86_rtm_rw_mutex() { - w_flag = false; -#if TBB_USE_THREADING_TOOLS - internal_construct(); -#endif - } - -#if TBB_USE_ASSERT - //! Empty destructor. - ~x86_rtm_rw_mutex() {} -#endif /* TBB_USE_ASSERT */ - - // Mutex traits - static const bool is_rw_mutex = true; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = false; - -#if __TBB_USE_X86_RTM_RW_MUTEX || __TBB_GCC_VERSION < 40000 -#else - // by default we will not provide the scoped_lock interface. The user - // should use the padded version of the mutex. scoped_lock is used in - // padded_mutex template. -private: -#endif - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - // Speculation-enabled scoped lock for spin_rw_mutex - // The idea is to be able to reuse the acquire/release methods of spin_rw_mutex - // and its scoped lock wherever possible. The only way to use a speculative lock is to use - // a scoped_lock. (because transaction_state must be local) - - class scoped_lock : tbb::internal::no_copy { - friend class x86_rtm_rw_mutex; - spin_rw_mutex::scoped_lock my_scoped_lock; - - RTM_type transaction_state; - - public: - //! Construct lock that has not acquired a mutex. - /** Equivalent to zero-initialization of *this. */ - scoped_lock() : my_scoped_lock(), transaction_state(RTM_not_in_mutex) { - } - - //! Acquire lock on given mutex. - scoped_lock( x86_rtm_rw_mutex& m, bool write = true ) : my_scoped_lock(), - transaction_state(RTM_not_in_mutex) { - acquire(m, write); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if(transaction_state != RTM_not_in_mutex) release(); - } - - //! Acquire lock on given mutex. - void acquire( x86_rtm_rw_mutex& m, bool write = true ) { - if( write ) m.internal_acquire_writer(*this); - else m.internal_acquire_reader(*this); - } - - //! Release lock - void release() { - x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock); - __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( transaction_state!=RTM_not_in_mutex, "lock is not acquired" ); - return mutex->internal_release(*this); - } - - //! Upgrade reader to become a writer. - /** Returns whether the upgrade happened without releasing and re-acquiring the lock */ - bool upgrade_to_writer() { - x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock); - __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( transaction_state==RTM_transacting_reader || transaction_state==RTM_real_reader, "Invalid state for upgrade" ); - return mutex->internal_upgrade(*this); - } - - //! Downgrade writer to become a reader. - /** Returns whether the downgrade happened without releasing and re-acquiring the lock */ - bool downgrade_to_reader() { - x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock); - __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( transaction_state==RTM_transacting_writer || transaction_state==RTM_real_writer, "Invalid state for downgrade" ); - return mutex->internal_downgrade(*this); - } - - //! Attempt to acquire mutex. - /** returns true if successful. */ - bool try_acquire( x86_rtm_rw_mutex& m, bool write = true ) { -#if TBB_USE_ASSERT - x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock); - __TBB_ASSERT( !mutex, "lock is already acquired" ); -#endif - // have to assign m to our mutex. - // cannot set the mutex, because try_acquire in spin_rw_mutex depends on it being NULL. - if(write) return m.internal_try_acquire_writer(*this); - // speculatively acquire the lock. If this fails, do try_acquire on the spin_rw_mutex. - m.internal_acquire_reader(*this, /*only_speculate=*/true); - if(transaction_state == RTM_transacting_reader) return true; - if( my_scoped_lock.try_acquire(m, false)) { - transaction_state = RTM_real_reader; - return true; - } - return false; - } - - }; // class x86_rtm_rw_mutex::scoped_lock - - // ISO C++0x compatibility methods not provided because we cannot maintain - // state about whether a thread is in a transaction. - -private: - char pad[speculation_granularity-sizeof(spin_rw_mutex)]; // padding - - // If true, writer holds the spin_rw_mutex. - tbb::atomic w_flag; // want this on a separate cache line - -}; // x86_rtm_rw_mutex - -} // namespace internal -} // namespace interface8 -} // namespace tbb - -#endif /* __TBB_TSX_AVAILABLE */ -#endif /* __TBB__x86_rtm_rw_mutex_impl_H */ diff --git a/src/tbb/include/tbb/machine/gcc_armv7.h b/src/tbb/include/tbb/machine/gcc_armv7.h deleted file mode 100644 index 83f5c55e6..000000000 --- a/src/tbb/include/tbb/machine/gcc_armv7.h +++ /dev/null @@ -1,217 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -/* - Platform isolation layer for the ARMv7-a architecture. -*/ - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -//TODO: is ARMv7 is the only version ever to support? -#if !(__ARM_ARCH_7A__) -#error compilation requires an ARMv7-a architecture. -#endif - -#include -#include - -#define __TBB_WORDSIZE 4 - -// Traditionally ARM is little-endian. -// Note that, since only the layout of aligned 32-bit words is of interest, -// any apparent PDP-endianness of 32-bit words at half-word alignment or -// any little-endian ordering of big-endian 32-bit words in 64-bit quantities -// may be disregarded for this setting. -#if __BIG_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__) - #define __TBB_ENDIANNESS __TBB_ENDIAN_BIG -#elif __LITTLE_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__) - #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE -#elif defined(__BYTE_ORDER__) - #define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED -#else - #define __TBB_ENDIANNESS __TBB_ENDIAN_DETECT -#endif - - -#define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory") -#define __TBB_full_memory_fence() __asm__ __volatile__("dmb ish": : :"memory") -#define __TBB_control_consistency_helper() __TBB_full_memory_fence() -#define __TBB_acquire_consistency_helper() __TBB_full_memory_fence() -#define __TBB_release_consistency_helper() __TBB_full_memory_fence() - -//-------------------------------------------------- -// Compare and swap -//-------------------------------------------------- - -/** - * Atomic CAS for 32 bit values, if *ptr==comparand, then *ptr=value, returns *ptr - * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand - * @param value value to assign *ptr to if *ptr==comparand - * @param comparand value to compare with *ptr - * @return value originally in memory at ptr, regardless of success -*/ -static inline int32_t __TBB_machine_cmpswp4(volatile void *ptr, int32_t value, int32_t comparand ) -{ - int32_t oldval, res; - - __TBB_full_memory_fence(); - - do { - __asm__ __volatile__( - "ldrex %1, [%3]\n" - "mov %0, #0\n" - "cmp %1, %4\n" - "it eq\n" - "strexeq %0, %5, [%3]\n" - : "=&r" (res), "=&r" (oldval), "+Qo" (*(volatile int32_t*)ptr) - : "r" ((int32_t *)ptr), "Ir" (comparand), "r" (value) - : "cc"); - } while (res); - - __TBB_full_memory_fence(); - - return oldval; -} - -/** - * Atomic CAS for 64 bit values, if *ptr==comparand, then *ptr=value, returns *ptr - * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand - * @param value value to assign *ptr to if *ptr==comparand - * @param comparand value to compare with *ptr - * @return value originally in memory at ptr, regardless of success - */ -static inline int64_t __TBB_machine_cmpswp8(volatile void *ptr, int64_t value, int64_t comparand ) -{ - int64_t oldval; - int32_t res; - - __TBB_full_memory_fence(); - - do { - __asm__ __volatile__( - "mov %0, #0\n" - "ldrexd %1, %H1, [%3]\n" - "cmp %1, %4\n" - "it eq\n" - "cmpeq %H1, %H4\n" - "it eq\n" - "strexdeq %0, %5, %H5, [%3]" - : "=&r" (res), "=&r" (oldval), "+Qo" (*(volatile int64_t*)ptr) - : "r" ((int64_t *)ptr), "r" (comparand), "r" (value) - : "cc"); - } while (res); - - __TBB_full_memory_fence(); - - return oldval; -} - -static inline int32_t __TBB_machine_fetchadd4(volatile void* ptr, int32_t addend) -{ - unsigned long tmp; - int32_t result, tmp2; - - __TBB_full_memory_fence(); - - __asm__ __volatile__( -"1: ldrex %0, [%4]\n" -" add %3, %0, %5\n" -" strex %1, %3, [%4]\n" -" cmp %1, #0\n" -" bne 1b\n" - : "=&r" (result), "=&r" (tmp), "+Qo" (*(volatile int32_t*)ptr), "=&r"(tmp2) - : "r" ((int32_t *)ptr), "Ir" (addend) - : "cc"); - - __TBB_full_memory_fence(); - - return result; -} - -static inline int64_t __TBB_machine_fetchadd8(volatile void *ptr, int64_t addend) -{ - unsigned long tmp; - int64_t result, tmp2; - - __TBB_full_memory_fence(); - - __asm__ __volatile__( -"1: ldrexd %0, %H0, [%4]\n" -" adds %3, %0, %5\n" -" adc %H3, %H0, %H5\n" -" strexd %1, %3, %H3, [%4]\n" -" cmp %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (*(volatile int64_t*)ptr), "=&r"(tmp2) - : "r" ((int64_t *)ptr), "r" (addend) - : "cc"); - - - __TBB_full_memory_fence(); - - return result; -} - -inline void __TBB_machine_pause (int32_t delay ) -{ - while(delay>0) - { - __TBB_compiler_fence(); - delay--; - } -} - -namespace tbb { -namespace internal { - template - struct machine_load_store_relaxed { - static inline T load ( const volatile T& location ) { - const T value = location; - - /* - * An extra memory barrier is required for errata #761319 - * Please see http://infocenter.arm.com/help/topic/com.arm.doc.uan0004a - */ - __TBB_acquire_consistency_helper(); - return value; - } - - static inline void store ( volatile T& location, T value ) { - location = value; - } - }; -}} // namespaces internal, tbb - -// Machine specific atomic operations - -#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C) -#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C) -#define __TBB_Pause(V) __TBB_machine_pause(V) - -// Use generics for some things -#define __TBB_USE_GENERIC_PART_WORD_CAS 1 -#define __TBB_USE_GENERIC_PART_WORD_FETCH_ADD 1 -#define __TBB_USE_GENERIC_PART_WORD_FETCH_STORE 1 -#define __TBB_USE_GENERIC_FETCH_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_DWORD_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 diff --git a/src/tbb/include/tbb/machine/gcc_generic.h b/src/tbb/include/tbb/machine/gcc_generic.h deleted file mode 100644 index be80ed47f..000000000 --- a/src/tbb/include/tbb/machine/gcc_generic.h +++ /dev/null @@ -1,131 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_gcc_generic_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_gcc_generic_H - -#include -#include - -#define __TBB_WORDSIZE __SIZEOF_POINTER__ - -#if __TBB_GCC_64BIT_ATOMIC_BUILTINS_BROKEN - #define __TBB_64BIT_ATOMICS 0 -#endif - -/** FPU control setting not available for non-Intel architectures on Android **/ -#if __ANDROID__ && __TBB_generic_arch - #define __TBB_CPU_CTL_ENV_PRESENT 0 -#endif - -// __BYTE_ORDER__ is used in accordance with http://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html, -// but __BIG_ENDIAN__ or __LITTLE_ENDIAN__ may be more commonly found instead. -#if __BIG_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__) - #define __TBB_ENDIANNESS __TBB_ENDIAN_BIG -#elif __LITTLE_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__) - #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE -#elif defined(__BYTE_ORDER__) - #define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED -#else - #define __TBB_ENDIANNESS __TBB_ENDIAN_DETECT -#endif - -/** As this generic implementation has absolutely no information about underlying - hardware, its performance most likely will be sub-optimal because of full memory - fence usages where a more lightweight synchronization means (or none at all) - could suffice. Thus if you use this header to enable TBB on a new platform, - consider forking it and relaxing below helpers as appropriate. **/ -#define __TBB_acquire_consistency_helper() __sync_synchronize() -#define __TBB_release_consistency_helper() __sync_synchronize() -#define __TBB_full_memory_fence() __sync_synchronize() -#define __TBB_control_consistency_helper() __sync_synchronize() - -#define __TBB_MACHINE_DEFINE_ATOMICS(S,T) \ -inline T __TBB_machine_cmpswp##S( volatile void *ptr, T value, T comparand ) { \ - return __sync_val_compare_and_swap(reinterpret_cast(ptr),comparand,value); \ -} \ - \ -inline T __TBB_machine_fetchadd##S( volatile void *ptr, T value ) { \ - return __sync_fetch_and_add(reinterpret_cast(ptr),value); \ -} \ - -__TBB_MACHINE_DEFINE_ATOMICS(1,int8_t) -__TBB_MACHINE_DEFINE_ATOMICS(2,int16_t) -__TBB_MACHINE_DEFINE_ATOMICS(4,int32_t) -__TBB_MACHINE_DEFINE_ATOMICS(8,int64_t) - -#undef __TBB_MACHINE_DEFINE_ATOMICS - -namespace tbb{ namespace internal { namespace gcc_builtins { - inline int clz(unsigned int x){ return __builtin_clz(x);}; - inline int clz(unsigned long int x){ return __builtin_clzl(x);}; - inline int clz(unsigned long long int x){ return __builtin_clzll(x);}; -}}} -//gcc __builtin_clz builtin count _number_ of leading zeroes -static inline intptr_t __TBB_machine_lg( uintptr_t x ) { - return sizeof(x)*8 - tbb::internal::gcc_builtins::clz(x) -1 ; -} - -static inline void __TBB_machine_or( volatile void *ptr, uintptr_t addend ) { - __sync_fetch_and_or(reinterpret_cast(ptr),addend); -} - -static inline void __TBB_machine_and( volatile void *ptr, uintptr_t addend ) { - __sync_fetch_and_and(reinterpret_cast(ptr),addend); -} - - -typedef unsigned char __TBB_Flag; - -typedef __TBB_atomic __TBB_Flag __TBB_atomic_flag; - -inline bool __TBB_machine_try_lock_byte( __TBB_atomic_flag &flag ) { - return __sync_lock_test_and_set(&flag,1)==0; -} - -inline void __TBB_machine_unlock_byte( __TBB_atomic_flag &flag ) { - __sync_lock_release(&flag); -} - -// Machine specific atomic operations -#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) - -#define __TBB_TryLockByte __TBB_machine_try_lock_byte -#define __TBB_UnlockByte __TBB_machine_unlock_byte - -// Definition of other functions -#define __TBB_Log2(V) __TBB_machine_lg(V) - -#define __TBB_USE_GENERIC_FETCH_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - -#if __TBB_WORDSIZE==4 - #define __TBB_USE_GENERIC_DWORD_LOAD_STORE 1 -#endif - -#if __TBB_x86_32 || __TBB_x86_64 -#include "gcc_itsx.h" -#endif diff --git a/src/tbb/include/tbb/machine/gcc_ia32_common.h b/src/tbb/include/tbb/machine/gcc_ia32_common.h deleted file mode 100644 index db276310a..000000000 --- a/src/tbb/include/tbb/machine/gcc_ia32_common.h +++ /dev/null @@ -1,100 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_machine_gcc_ia32_common_H -#define __TBB_machine_gcc_ia32_common_H - -//TODO: Add a higher-level function, e.g. tbb::interal::log2(), into tbb_stddef.h, which -//uses __TBB_Log2 and contains the assert and remove the assert from here and all other -//platform-specific headers. -//TODO: Check if use of gcc intrinsic gives a better chance for cross call optimizations -template -static inline intptr_t __TBB_machine_lg( T x ) { - __TBB_ASSERT(x>0, "The logarithm of a non-positive value is undefined."); - uintptr_t j; - __asm__("bsr %1,%0" : "=r"(j) : "r"((uintptr_t)x)); - return j; -} -#define __TBB_Log2(V) __TBB_machine_lg(V) - -#ifndef __TBB_Pause -//TODO: check if raising a ratio of pause instructions to loop control instructions -//(via e.g. loop unrolling) gives any benefit for HT. E.g, the current implementation -//does about 2 CPU-consuming instructions for every pause instruction. Perhaps for -//high pause counts it should use an unrolled loop to raise the ratio, and thus free -//up more integer cycles for the other hyperthread. On the other hand, if the loop is -//unrolled too far, it won't fit in the core's loop cache, and thus take away -//instruction decode slots from the other hyperthread. - -//TODO: check if use of gcc __builtin_ia32_pause intrinsic gives a "some how" better performing code -static inline void __TBB_machine_pause( int32_t delay ) { - for (int32_t i = 0; i < delay; i++) { - __asm__ __volatile__("pause;"); - } - return; -} -#define __TBB_Pause(V) __TBB_machine_pause(V) -#endif /* !__TBB_Pause */ - -// API to retrieve/update FPU control setting -#ifndef __TBB_CPU_CTL_ENV_PRESENT -#define __TBB_CPU_CTL_ENV_PRESENT 1 -namespace tbb { -namespace internal { -class cpu_ctl_env { -private: - int mxcsr; - short x87cw; - static const int MXCSR_CONTROL_MASK = ~0x3f; /* all except last six status bits */ -public: - bool operator!=( const cpu_ctl_env& ctl ) const { return mxcsr != ctl.mxcsr || x87cw != ctl.x87cw; } - void get_env() { - #if __TBB_ICC_12_0_INL_ASM_FSTCW_BROKEN - cpu_ctl_env loc_ctl; - __asm__ __volatile__ ( - "stmxcsr %0\n\t" - "fstcw %1" - : "=m"(loc_ctl.mxcsr), "=m"(loc_ctl.x87cw) - ); - *this = loc_ctl; - #else - __asm__ __volatile__ ( - "stmxcsr %0\n\t" - "fstcw %1" - : "=m"(mxcsr), "=m"(x87cw) - ); - #endif - mxcsr &= MXCSR_CONTROL_MASK; - } - void set_env() const { - __asm__ __volatile__ ( - "ldmxcsr %0\n\t" - "fldcw %1" - : : "m"(mxcsr), "m"(x87cw) - ); - } -}; -} // namespace internal -} // namespace tbb -#endif /* !__TBB_CPU_CTL_ENV_PRESENT */ - -#include "gcc_itsx.h" - -#endif /* __TBB_machine_gcc_ia32_common_H */ diff --git a/src/tbb/include/tbb/machine/gcc_itsx.h b/src/tbb/include/tbb/machine/gcc_itsx.h deleted file mode 100644 index 87971659a..000000000 --- a/src/tbb/include/tbb/machine/gcc_itsx.h +++ /dev/null @@ -1,123 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_gcc_itsx_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_gcc_itsx_H - -#define __TBB_OP_XACQUIRE 0xF2 -#define __TBB_OP_XRELEASE 0xF3 -#define __TBB_OP_LOCK 0xF0 - -#define __TBB_STRINGIZE_INTERNAL(arg) #arg -#define __TBB_STRINGIZE(arg) __TBB_STRINGIZE_INTERNAL(arg) - -#ifdef __TBB_x86_64 -#define __TBB_r_out "=r" -#else -#define __TBB_r_out "=q" -#endif - -inline static uint8_t __TBB_machine_try_lock_elided( volatile uint8_t* lk ) -{ - uint8_t value = 1; - __asm__ volatile (".byte " __TBB_STRINGIZE(__TBB_OP_XACQUIRE)"; lock; xchgb %0, %1;" - : __TBB_r_out(value), "=m"(*lk) : "0"(value), "m"(*lk) : "memory" ); - return uint8_t(value^1); -} - -inline static void __TBB_machine_try_lock_elided_cancel() -{ - // 'pause' instruction aborts HLE/RTM transactions - __asm__ volatile ("pause\n" : : : "memory" ); -} - -inline static void __TBB_machine_unlock_elided( volatile uint8_t* lk ) -{ - __asm__ volatile (".byte " __TBB_STRINGIZE(__TBB_OP_XRELEASE)"; movb $0, %0" - : "=m"(*lk) : "m"(*lk) : "memory" ); -} - -#if __TBB_TSX_INTRINSICS_PRESENT -#include - -#define __TBB_machine_is_in_transaction _xtest -#define __TBB_machine_begin_transaction _xbegin -#define __TBB_machine_end_transaction _xend -#define __TBB_machine_transaction_conflict_abort() _xabort(0xff) - -#else - -/*! - * Check if the instruction is executed in a transaction or not - */ -inline static bool __TBB_machine_is_in_transaction() -{ - int8_t res = 0; -#if __TBB_x86_32 - __asm__ volatile (".byte 0x0F; .byte 0x01; .byte 0xD6;\n" - "setz %0" : "=q"(res) : : "memory" ); -#else - __asm__ volatile (".byte 0x0F; .byte 0x01; .byte 0xD6;\n" - "setz %0" : "=r"(res) : : "memory" ); -#endif - return res==0; -} - -/*! - * Enter speculative execution mode. - * @return -1 on success - * abort cause ( or 0 ) on abort - */ -inline static uint32_t __TBB_machine_begin_transaction() -{ - uint32_t res = ~uint32_t(0); // success value - __asm__ volatile ("1: .byte 0xC7; .byte 0xF8;\n" // XBEGIN - " .long 2f-1b-6\n" // 2f-1b == difference in addresses of start - // of XBEGIN and the MOVL - // 2f - 1b - 6 == that difference minus the size of the - // XBEGIN instruction. This is the abort offset to - // 2: below. - " jmp 3f\n" // success (leave -1 in res) - "2: movl %%eax,%0\n" // store failure code in res - "3:" - :"=r"(res):"0"(res):"memory","%eax"); - return res; -} - -/*! - * Attempt to commit/end transaction - */ -inline static void __TBB_machine_end_transaction() -{ - __asm__ volatile (".byte 0x0F; .byte 0x01; .byte 0xD5" :::"memory"); // XEND -} - -/* - * aborts with code 0xFF (lock already held) - */ -inline static void __TBB_machine_transaction_conflict_abort() -{ - __asm__ volatile (".byte 0xC6; .byte 0xF8; .byte 0xFF" :::"memory"); -} - -#endif /* __TBB_TSX_INTRINSICS_PRESENT */ diff --git a/src/tbb/include/tbb/machine/ibm_aix51.h b/src/tbb/include/tbb/machine/ibm_aix51.h deleted file mode 100644 index 57dfeb336..000000000 --- a/src/tbb/include/tbb/machine/ibm_aix51.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// TODO: revise by comparing with mac_ppc.h - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_ibm_aix51_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_ibm_aix51_H - -#define __TBB_WORDSIZE 8 -#define __TBB_ENDIANNESS __TBB_ENDIAN_BIG // assumption based on operating system - -#include -#include -#include - -extern "C" { -int32_t __TBB_machine_cas_32 (volatile void* ptr, int32_t value, int32_t comparand); -int64_t __TBB_machine_cas_64 (volatile void* ptr, int64_t value, int64_t comparand); -void __TBB_machine_flush (); -void __TBB_machine_lwsync (); -void __TBB_machine_isync (); -} - -// Mapping of old entry point names retained for the sake of backward binary compatibility -#define __TBB_machine_cmpswp4 __TBB_machine_cas_32 -#define __TBB_machine_cmpswp8 __TBB_machine_cas_64 - -#define __TBB_Yield() sched_yield() - -#define __TBB_USE_GENERIC_PART_WORD_CAS 1 -#define __TBB_USE_GENERIC_FETCH_ADD 1 -#define __TBB_USE_GENERIC_FETCH_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - -#if __GNUC__ - #define __TBB_control_consistency_helper() __asm__ __volatile__( "isync": : :"memory") - #define __TBB_acquire_consistency_helper() __asm__ __volatile__("lwsync": : :"memory") - #define __TBB_release_consistency_helper() __asm__ __volatile__("lwsync": : :"memory") - #define __TBB_full_memory_fence() __asm__ __volatile__( "sync": : :"memory") -#else - // IBM C++ Compiler does not support inline assembly - // TODO: Since XL 9.0 or earlier GCC syntax is supported. Replace with more - // lightweight implementation (like in mac_ppc.h) - #define __TBB_control_consistency_helper() __TBB_machine_isync () - #define __TBB_acquire_consistency_helper() __TBB_machine_lwsync () - #define __TBB_release_consistency_helper() __TBB_machine_lwsync () - #define __TBB_full_memory_fence() __TBB_machine_flush () -#endif diff --git a/src/tbb/include/tbb/machine/icc_generic.h b/src/tbb/include/tbb/machine/icc_generic.h deleted file mode 100644 index c31a5a3d5..000000000 --- a/src/tbb/include/tbb/machine/icc_generic.h +++ /dev/null @@ -1,258 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_icc_generic_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#if ! __TBB_ICC_BUILTIN_ATOMICS_PRESENT - #error "Intel C++ Compiler of at least 12.0 version is needed to use ICC intrinsics port" -#endif - -#define __TBB_machine_icc_generic_H - -//ICC mimics the "native" target compiler -#if _MSC_VER - #include "msvc_ia32_common.h" -#else - #include "gcc_ia32_common.h" -#endif - -//TODO: Make __TBB_WORDSIZE macro optional for ICC intrinsics port. -//As compiler intrinsics are used for all the operations it is possible to do. - -#if __TBB_x86_32 - #define __TBB_WORDSIZE 4 -#else - #define __TBB_WORDSIZE 8 -#endif -#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE - -//__TBB_compiler_fence() defined just in case, as it seems not to be used on its own anywhere else -#if _MSC_VER - //TODO: any way to use same intrinsics on windows and linux? - #pragma intrinsic(_ReadWriteBarrier) - #define __TBB_compiler_fence() _ReadWriteBarrier() -#else - #define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory") -#endif - -#ifndef __TBB_full_memory_fence -#if _MSC_VER - //TODO: any way to use same intrinsics on windows and linux? - #pragma intrinsic(_mm_mfence) - #define __TBB_full_memory_fence() _mm_mfence() -#else - #define __TBB_full_memory_fence() __asm__ __volatile__("mfence": : :"memory") -#endif -#endif - -#define __TBB_control_consistency_helper() __TBB_compiler_fence() - -namespace tbb { namespace internal { -//TODO: is there any way to reuse definition of memory_order enum from ICC instead of copy paste. -//however it seems unlikely that ICC will silently change exact enum values, as they are defined -//in the ISO exactly like this. -//TODO: add test that exact values of the enum are same as in the ISO C++11 -typedef enum memory_order { - memory_order_relaxed, memory_order_consume, memory_order_acquire, - memory_order_release, memory_order_acq_rel, memory_order_seq_cst -} memory_order; - -namespace icc_intrinsics_port { - template - T convert_argument(T value){ - return value; - } - //The overload below is needed to have explicit conversion of pointer to void* in argument list. - //compiler bug? - //TODO: add according broken macro and recheck with ICC 13.0 if the overload is still needed - template - void* convert_argument(T* value){ - return (void*)value; - } -} -//TODO: code below is a bit repetitive, consider simplifying it -template -struct machine_load_store { - static T load_with_acquire ( const volatile T& location ) { - return __atomic_load_explicit(&location, memory_order_acquire); - } - static void store_with_release ( volatile T &location, T value ) { - __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_release); - } -}; - -template -struct machine_load_store_relaxed { - static inline T load ( const T& location ) { - return __atomic_load_explicit(&location, memory_order_relaxed); - } - static inline void store ( T& location, T value ) { - __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_relaxed); - } -}; - -template -struct machine_load_store_seq_cst { - static T load ( const volatile T& location ) { - return __atomic_load_explicit(&location, memory_order_seq_cst); - } - - static void store ( volatile T &location, T value ) { - __atomic_store_explicit(&location, value, memory_order_seq_cst); - } -}; - -}} // namespace tbb::internal - -namespace tbb{ namespace internal { namespace icc_intrinsics_port{ - typedef enum memory_order_map { - relaxed = memory_order_relaxed, - acquire = memory_order_acquire, - release = memory_order_release, - full_fence= memory_order_seq_cst - } memory_order_map; -}}}// namespace tbb::internal - -#define __TBB_MACHINE_DEFINE_ATOMICS(S,T,M) \ -inline T __TBB_machine_cmpswp##S##M( volatile void *ptr, T value, T comparand ) { \ - __atomic_compare_exchange_strong_explicit( \ - (T*)ptr \ - ,&comparand \ - ,value \ - , tbb::internal::icc_intrinsics_port::M \ - , tbb::internal::icc_intrinsics_port::M); \ - return comparand; \ -} \ - \ -inline T __TBB_machine_fetchstore##S##M(volatile void *ptr, T value) { \ - return __atomic_exchange_explicit((T*)ptr, value, tbb::internal::icc_intrinsics_port::M); \ -} \ - \ -inline T __TBB_machine_fetchadd##S##M(volatile void *ptr, T value) { \ - return __atomic_fetch_add_explicit((T*)ptr, value, tbb::internal::icc_intrinsics_port::M); \ -} \ - -__TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t, full_fence) -__TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t, acquire) -__TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t, release) -__TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t, relaxed) - -__TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t, full_fence) -__TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t, acquire) -__TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t, release) -__TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t, relaxed) - -__TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t, full_fence) -__TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t, acquire) -__TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t, release) -__TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t, relaxed) - -__TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t, full_fence) -__TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t, acquire) -__TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t, release) -__TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t, relaxed) - - -#undef __TBB_MACHINE_DEFINE_ATOMICS - -#define __TBB_USE_FENCED_ATOMICS 1 - -namespace tbb { namespace internal { -#if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN -__TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(full_fence) -__TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(full_fence) - -__TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(acquire) -__TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(release) - -__TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(relaxed) -__TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(relaxed) - -template -struct machine_load_store { - static T load_with_acquire ( const volatile T& location ) { - if( tbb::internal::is_aligned(&location,8)) { - return __atomic_load_explicit(&location, memory_order_acquire); - } else { - return __TBB_machine_generic_load8acquire(&location); - } - } - static void store_with_release ( volatile T &location, T value ) { - if( tbb::internal::is_aligned(&location,8)) { - __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_release); - } else { - return __TBB_machine_generic_store8release(&location,value); - } - } -}; - -template -struct machine_load_store_relaxed { - static T load( const volatile T& location ) { - if( tbb::internal::is_aligned(&location,8)) { - return __atomic_load_explicit(&location, memory_order_relaxed); - } else { - return __TBB_machine_generic_load8relaxed(&location); - } - } - static void store( volatile T &location, T value ) { - if( tbb::internal::is_aligned(&location,8)) { - __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_relaxed); - } else { - return __TBB_machine_generic_store8relaxed(&location,value); - } - } -}; - -template -struct machine_load_store_seq_cst { - static T load ( const volatile T& location ) { - if( tbb::internal::is_aligned(&location,8)) { - return __atomic_load_explicit(&location, memory_order_seq_cst); - } else { - return __TBB_machine_generic_load8full_fence(&location); - } - - } - - static void store ( volatile T &location, T value ) { - if( tbb::internal::is_aligned(&location,8)) { - __atomic_store_explicit(&location, value, memory_order_seq_cst); - } else { - return __TBB_machine_generic_store8full_fence(&location,value); - } - - } -}; - -#endif -}} // namespace tbb::internal -template -inline void __TBB_machine_OR( T *operand, T addend ) { - __atomic_fetch_or_explicit(operand, addend, tbb::internal::memory_order_seq_cst); -} - -template -inline void __TBB_machine_AND( T *operand, T addend ) { - __atomic_fetch_and_explicit(operand, addend, tbb::internal::memory_order_seq_cst); -} - diff --git a/src/tbb/include/tbb/machine/linux_common.h b/src/tbb/include/tbb/machine/linux_common.h deleted file mode 100644 index 53eeeafc3..000000000 --- a/src/tbb/include/tbb/machine/linux_common.h +++ /dev/null @@ -1,84 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#include -#define __TBB_Yield() sched_yield() - -#include -/* Futex definitions */ -#include - -#if defined(SYS_futex) - -#define __TBB_USE_FUTEX 1 -#include -#include -// Unfortunately, some versions of Linux do not have a header that defines FUTEX_WAIT and FUTEX_WAKE. - -#ifdef FUTEX_WAIT -#define __TBB_FUTEX_WAIT FUTEX_WAIT -#else -#define __TBB_FUTEX_WAIT 0 -#endif - -#ifdef FUTEX_WAKE -#define __TBB_FUTEX_WAKE FUTEX_WAKE -#else -#define __TBB_FUTEX_WAKE 1 -#endif - -#ifndef __TBB_ASSERT -#error machine specific headers must be included after tbb_stddef.h -#endif - -namespace tbb { - -namespace internal { - -inline int futex_wait( void *futex, int comparand ) { - int r = syscall( SYS_futex,futex,__TBB_FUTEX_WAIT,comparand,NULL,NULL,0 ); -#if TBB_USE_ASSERT - int e = errno; - __TBB_ASSERT( r==0||r==EWOULDBLOCK||(r==-1&&(e==EAGAIN||e==EINTR)), "futex_wait failed." ); -#endif /* TBB_USE_ASSERT */ - return r; -} - -inline int futex_wakeup_one( void *futex ) { - int r = ::syscall( SYS_futex,futex,__TBB_FUTEX_WAKE,1,NULL,NULL,0 ); - __TBB_ASSERT( r==0||r==1, "futex_wakeup_one: more than one thread woken up?" ); - return r; -} - -inline int futex_wakeup_all( void *futex ) { - int r = ::syscall( SYS_futex,futex,__TBB_FUTEX_WAKE,INT_MAX,NULL,NULL,0 ); - __TBB_ASSERT( r>=0, "futex_wakeup_all: error in waking up threads" ); - return r; -} - -} /* namespace internal */ - -} /* namespace tbb */ - -#endif /* SYS_futex */ diff --git a/src/tbb/include/tbb/machine/linux_ia32.h b/src/tbb/include/tbb/machine/linux_ia32.h deleted file mode 100644 index 27def2ff5..000000000 --- a/src/tbb/include/tbb/machine/linux_ia32.h +++ /dev/null @@ -1,232 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_linux_ia32_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_linux_ia32_H - -#include -#include "gcc_ia32_common.h" - -#define __TBB_WORDSIZE 4 -#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE - -#define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory") -#define __TBB_control_consistency_helper() __TBB_compiler_fence() -#define __TBB_acquire_consistency_helper() __TBB_compiler_fence() -#define __TBB_release_consistency_helper() __TBB_compiler_fence() -#define __TBB_full_memory_fence() __asm__ __volatile__("mfence": : :"memory") - -#if __TBB_ICC_ASM_VOLATILE_BROKEN -#define __TBB_VOLATILE -#else -#define __TBB_VOLATILE volatile -#endif - -#define __TBB_MACHINE_DEFINE_ATOMICS(S,T,X,R) \ -static inline T __TBB_machine_cmpswp##S (volatile void *ptr, T value, T comparand ) \ -{ \ - T result; \ - \ - __asm__ __volatile__("lock\ncmpxchg" X " %2,%1" \ - : "=a"(result), "=m"(*(__TBB_VOLATILE T*)ptr) \ - : "q"(value), "0"(comparand), "m"(*(__TBB_VOLATILE T*)ptr) \ - : "memory"); \ - return result; \ -} \ - \ -static inline T __TBB_machine_fetchadd##S(volatile void *ptr, T addend) \ -{ \ - T result; \ - __asm__ __volatile__("lock\nxadd" X " %0,%1" \ - : R (result), "=m"(*(__TBB_VOLATILE T*)ptr) \ - : "0"(addend), "m"(*(__TBB_VOLATILE T*)ptr) \ - : "memory"); \ - return result; \ -} \ - \ -static inline T __TBB_machine_fetchstore##S(volatile void *ptr, T value) \ -{ \ - T result; \ - __asm__ __volatile__("lock\nxchg" X " %0,%1" \ - : R (result), "=m"(*(__TBB_VOLATILE T*)ptr) \ - : "0"(value), "m"(*(__TBB_VOLATILE T*)ptr) \ - : "memory"); \ - return result; \ -} \ - -__TBB_MACHINE_DEFINE_ATOMICS(1,int8_t,"","=q") -__TBB_MACHINE_DEFINE_ATOMICS(2,int16_t,"","=r") -__TBB_MACHINE_DEFINE_ATOMICS(4,int32_t,"l","=r") - -#if __INTEL_COMPILER -#pragma warning( push ) -// reference to EBX in a function requiring stack alignment -#pragma warning( disable: 998 ) -#endif - -#if __TBB_GCC_CAS8_BUILTIN_INLINING_BROKEN -#define __TBB_IA32_CAS8_NOINLINE __attribute__ ((noinline)) -#else -#define __TBB_IA32_CAS8_NOINLINE -#endif - -static inline __TBB_IA32_CAS8_NOINLINE int64_t __TBB_machine_cmpswp8 (volatile void *ptr, int64_t value, int64_t comparand ) { -//TODO: remove the extra part of condition once __TBB_GCC_BUILTIN_ATOMICS_PRESENT is lowered to gcc version 4.1.2 -#if (__TBB_GCC_BUILTIN_ATOMICS_PRESENT || (__TBB_GCC_VERSION >= 40102)) && !__TBB_GCC_64BIT_ATOMIC_BUILTINS_BROKEN - return __sync_val_compare_and_swap( reinterpret_cast(ptr), comparand, value ); -#else /* !__TBB_GCC_BUILTIN_ATOMICS_PRESENT */ - //TODO: look like ICC 13.0 has some issues with this code, investigate it more deeply - int64_t result; - union { - int64_t i64; - int32_t i32[2]; - }; - i64 = value; -#if __PIC__ - /* compiling position-independent code */ - // EBX register preserved for compliance with position-independent code rules on IA32 - int32_t tmp; - __asm__ __volatile__ ( - "movl %%ebx,%2\n\t" - "movl %5,%%ebx\n\t" -#if __GNUC__==3 - "lock\n\t cmpxchg8b %1\n\t" -#else - "lock\n\t cmpxchg8b (%3)\n\t" -#endif - "movl %2,%%ebx" - : "=A"(result) - , "=m"(*(__TBB_VOLATILE int64_t *)ptr) - , "=m"(tmp) -#if __GNUC__==3 - : "m"(*(__TBB_VOLATILE int64_t *)ptr) -#else - : "SD"(ptr) -#endif - , "0"(comparand) - , "m"(i32[0]), "c"(i32[1]) - : "memory" -#if __INTEL_COMPILER - ,"ebx" -#endif - ); -#else /* !__PIC__ */ - __asm__ __volatile__ ( - "lock\n\t cmpxchg8b %1\n\t" - : "=A"(result), "=m"(*(__TBB_VOLATILE int64_t *)ptr) - : "m"(*(__TBB_VOLATILE int64_t *)ptr) - , "0"(comparand) - , "b"(i32[0]), "c"(i32[1]) - : "memory" - ); -#endif /* __PIC__ */ - return result; -#endif /* !__TBB_GCC_BUILTIN_ATOMICS_PRESENT */ -} - -#undef __TBB_IA32_CAS8_NOINLINE - -#if __INTEL_COMPILER -#pragma warning( pop ) -#endif // warning 998 is back - -static inline void __TBB_machine_or( volatile void *ptr, uint32_t addend ) { - __asm__ __volatile__("lock\norl %1,%0" : "=m"(*(__TBB_VOLATILE uint32_t *)ptr) : "r"(addend), "m"(*(__TBB_VOLATILE uint32_t *)ptr) : "memory"); -} - -static inline void __TBB_machine_and( volatile void *ptr, uint32_t addend ) { - __asm__ __volatile__("lock\nandl %1,%0" : "=m"(*(__TBB_VOLATILE uint32_t *)ptr) : "r"(addend), "m"(*(__TBB_VOLATILE uint32_t *)ptr) : "memory"); -} - -//TODO: Check if it possible and profitable for IA-32 architecture on (Linux* and Windows*) -//to use of 64-bit load/store via floating point registers together with full fence -//for sequentially consistent load/store, instead of CAS. - -#if __clang__ -#define __TBB_fildq "fildll" -#define __TBB_fistpq "fistpll" -#else -#define __TBB_fildq "fildq" -#define __TBB_fistpq "fistpq" -#endif - -static inline int64_t __TBB_machine_aligned_load8 (const volatile void *ptr) { - __TBB_ASSERT(tbb::internal::is_aligned(ptr,8),"__TBB_machine_aligned_load8 should be used with 8 byte aligned locations only \n"); - int64_t result; - __asm__ __volatile__ ( __TBB_fildq " %1\n\t" - __TBB_fistpq " %0" : "=m"(result) : "m"(*(const __TBB_VOLATILE uint64_t*)ptr) : "memory" ); - return result; -} - -static inline void __TBB_machine_aligned_store8 (volatile void *ptr, int64_t value ) { - __TBB_ASSERT(tbb::internal::is_aligned(ptr,8),"__TBB_machine_aligned_store8 should be used with 8 byte aligned locations only \n"); - // Aligned store - __asm__ __volatile__ ( __TBB_fildq " %1\n\t" - __TBB_fistpq " %0" : "=m"(*(__TBB_VOLATILE int64_t*)ptr) : "m"(value) : "memory" ); -} - -static inline int64_t __TBB_machine_load8 (const volatile void *ptr) { -#if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN - if( tbb::internal::is_aligned(ptr,8)) { -#endif - return __TBB_machine_aligned_load8(ptr); -#if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN - } else { - // Unaligned load - return __TBB_machine_cmpswp8(const_cast(ptr),0,0); - } -#endif -} - -//! Handles misaligned 8-byte store -/** Defined in tbb_misc.cpp */ -extern "C" void __TBB_machine_store8_slow( volatile void *ptr, int64_t value ); -extern "C" void __TBB_machine_store8_slow_perf_warning( volatile void *ptr ); - -static inline void __TBB_machine_store8(volatile void *ptr, int64_t value) { -#if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN - if( tbb::internal::is_aligned(ptr,8)) { -#endif - __TBB_machine_aligned_store8(ptr,value); -#if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN - } else { - // Unaligned store -#if TBB_USE_PERFORMANCE_WARNINGS - __TBB_machine_store8_slow_perf_warning(ptr); -#endif /* TBB_USE_PERFORMANCE_WARNINGS */ - __TBB_machine_store8_slow(ptr,value); - } -#endif -} - -// Machine specific atomic operations -#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) - -#define __TBB_USE_GENERIC_DWORD_FETCH_ADD 1 -#define __TBB_USE_GENERIC_DWORD_FETCH_STORE 1 -#define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - diff --git a/src/tbb/include/tbb/machine/linux_ia64.h b/src/tbb/include/tbb/machine/linux_ia64.h deleted file mode 100644 index a9f386acc..000000000 --- a/src/tbb/include/tbb/machine/linux_ia64.h +++ /dev/null @@ -1,181 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_linux_ia64_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_linux_ia64_H - -#include -#include - -#define __TBB_WORDSIZE 8 -#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE - -#if __INTEL_COMPILER - #define __TBB_compiler_fence() - #define __TBB_control_consistency_helper() __TBB_compiler_fence() - #define __TBB_acquire_consistency_helper() - #define __TBB_release_consistency_helper() - #define __TBB_full_memory_fence() __mf() -#else - #define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory") - #define __TBB_control_consistency_helper() __TBB_compiler_fence() - // Even though GCC imbues volatile loads with acquire semantics, it sometimes moves - // loads over the acquire fence. The following helpers stop such incorrect code motion. - #define __TBB_acquire_consistency_helper() __TBB_compiler_fence() - #define __TBB_release_consistency_helper() __TBB_compiler_fence() - #define __TBB_full_memory_fence() __asm__ __volatile__("mf": : :"memory") -#endif /* !__INTEL_COMPILER */ - -// Most of the functions will be in a .s file -// TODO: revise dynamic_link, memory pools and etc. if the library dependency is removed. - -extern "C" { - int8_t __TBB_machine_fetchadd1__TBB_full_fence (volatile void *ptr, int8_t addend); - int8_t __TBB_machine_fetchadd1acquire(volatile void *ptr, int8_t addend); - int8_t __TBB_machine_fetchadd1release(volatile void *ptr, int8_t addend); - - int16_t __TBB_machine_fetchadd2__TBB_full_fence (volatile void *ptr, int16_t addend); - int16_t __TBB_machine_fetchadd2acquire(volatile void *ptr, int16_t addend); - int16_t __TBB_machine_fetchadd2release(volatile void *ptr, int16_t addend); - - int32_t __TBB_machine_fetchadd4__TBB_full_fence (volatile void *ptr, int32_t value); - int32_t __TBB_machine_fetchadd4acquire(volatile void *ptr, int32_t addend); - int32_t __TBB_machine_fetchadd4release(volatile void *ptr, int32_t addend); - - int64_t __TBB_machine_fetchadd8__TBB_full_fence (volatile void *ptr, int64_t value); - int64_t __TBB_machine_fetchadd8acquire(volatile void *ptr, int64_t addend); - int64_t __TBB_machine_fetchadd8release(volatile void *ptr, int64_t addend); - - int8_t __TBB_machine_fetchstore1__TBB_full_fence (volatile void *ptr, int8_t value); - int8_t __TBB_machine_fetchstore1acquire(volatile void *ptr, int8_t value); - int8_t __TBB_machine_fetchstore1release(volatile void *ptr, int8_t value); - - int16_t __TBB_machine_fetchstore2__TBB_full_fence (volatile void *ptr, int16_t value); - int16_t __TBB_machine_fetchstore2acquire(volatile void *ptr, int16_t value); - int16_t __TBB_machine_fetchstore2release(volatile void *ptr, int16_t value); - - int32_t __TBB_machine_fetchstore4__TBB_full_fence (volatile void *ptr, int32_t value); - int32_t __TBB_machine_fetchstore4acquire(volatile void *ptr, int32_t value); - int32_t __TBB_machine_fetchstore4release(volatile void *ptr, int32_t value); - - int64_t __TBB_machine_fetchstore8__TBB_full_fence (volatile void *ptr, int64_t value); - int64_t __TBB_machine_fetchstore8acquire(volatile void *ptr, int64_t value); - int64_t __TBB_machine_fetchstore8release(volatile void *ptr, int64_t value); - - int8_t __TBB_machine_cmpswp1__TBB_full_fence (volatile void *ptr, int8_t value, int8_t comparand); - int8_t __TBB_machine_cmpswp1acquire(volatile void *ptr, int8_t value, int8_t comparand); - int8_t __TBB_machine_cmpswp1release(volatile void *ptr, int8_t value, int8_t comparand); - - int16_t __TBB_machine_cmpswp2__TBB_full_fence (volatile void *ptr, int16_t value, int16_t comparand); - int16_t __TBB_machine_cmpswp2acquire(volatile void *ptr, int16_t value, int16_t comparand); - int16_t __TBB_machine_cmpswp2release(volatile void *ptr, int16_t value, int16_t comparand); - - int32_t __TBB_machine_cmpswp4__TBB_full_fence (volatile void *ptr, int32_t value, int32_t comparand); - int32_t __TBB_machine_cmpswp4acquire(volatile void *ptr, int32_t value, int32_t comparand); - int32_t __TBB_machine_cmpswp4release(volatile void *ptr, int32_t value, int32_t comparand); - - int64_t __TBB_machine_cmpswp8__TBB_full_fence (volatile void *ptr, int64_t value, int64_t comparand); - int64_t __TBB_machine_cmpswp8acquire(volatile void *ptr, int64_t value, int64_t comparand); - int64_t __TBB_machine_cmpswp8release(volatile void *ptr, int64_t value, int64_t comparand); - - int64_t __TBB_machine_lg(uint64_t value); - void __TBB_machine_pause(int32_t delay); - bool __TBB_machine_trylockbyte( volatile unsigned char &ptr ); - int64_t __TBB_machine_lockbyte( volatile unsigned char &ptr ); - - //! Retrieves the current RSE backing store pointer. IA64 specific. - void* __TBB_get_bsp(); - - int32_t __TBB_machine_load1_relaxed(const void *ptr); - int32_t __TBB_machine_load2_relaxed(const void *ptr); - int32_t __TBB_machine_load4_relaxed(const void *ptr); - int64_t __TBB_machine_load8_relaxed(const void *ptr); - - void __TBB_machine_store1_relaxed(void *ptr, int32_t value); - void __TBB_machine_store2_relaxed(void *ptr, int32_t value); - void __TBB_machine_store4_relaxed(void *ptr, int32_t value); - void __TBB_machine_store8_relaxed(void *ptr, int64_t value); -} // extern "C" - -// Mapping old entry points to the names corresponding to the new full_fence identifier. -#define __TBB_machine_fetchadd1full_fence __TBB_machine_fetchadd1__TBB_full_fence -#define __TBB_machine_fetchadd2full_fence __TBB_machine_fetchadd2__TBB_full_fence -#define __TBB_machine_fetchadd4full_fence __TBB_machine_fetchadd4__TBB_full_fence -#define __TBB_machine_fetchadd8full_fence __TBB_machine_fetchadd8__TBB_full_fence -#define __TBB_machine_fetchstore1full_fence __TBB_machine_fetchstore1__TBB_full_fence -#define __TBB_machine_fetchstore2full_fence __TBB_machine_fetchstore2__TBB_full_fence -#define __TBB_machine_fetchstore4full_fence __TBB_machine_fetchstore4__TBB_full_fence -#define __TBB_machine_fetchstore8full_fence __TBB_machine_fetchstore8__TBB_full_fence -#define __TBB_machine_cmpswp1full_fence __TBB_machine_cmpswp1__TBB_full_fence -#define __TBB_machine_cmpswp2full_fence __TBB_machine_cmpswp2__TBB_full_fence -#define __TBB_machine_cmpswp4full_fence __TBB_machine_cmpswp4__TBB_full_fence -#define __TBB_machine_cmpswp8full_fence __TBB_machine_cmpswp8__TBB_full_fence - -// Mapping relaxed operations to the entry points implementing them. -/** On IA64 RMW operations implicitly have acquire semantics. Thus one cannot - actually have completely relaxed RMW operation here. **/ -#define __TBB_machine_fetchadd1relaxed __TBB_machine_fetchadd1acquire -#define __TBB_machine_fetchadd2relaxed __TBB_machine_fetchadd2acquire -#define __TBB_machine_fetchadd4relaxed __TBB_machine_fetchadd4acquire -#define __TBB_machine_fetchadd8relaxed __TBB_machine_fetchadd8acquire -#define __TBB_machine_fetchstore1relaxed __TBB_machine_fetchstore1acquire -#define __TBB_machine_fetchstore2relaxed __TBB_machine_fetchstore2acquire -#define __TBB_machine_fetchstore4relaxed __TBB_machine_fetchstore4acquire -#define __TBB_machine_fetchstore8relaxed __TBB_machine_fetchstore8acquire -#define __TBB_machine_cmpswp1relaxed __TBB_machine_cmpswp1acquire -#define __TBB_machine_cmpswp2relaxed __TBB_machine_cmpswp2acquire -#define __TBB_machine_cmpswp4relaxed __TBB_machine_cmpswp4acquire -#define __TBB_machine_cmpswp8relaxed __TBB_machine_cmpswp8acquire - -#define __TBB_MACHINE_DEFINE_ATOMICS(S,V) \ - template \ - struct machine_load_store_relaxed { \ - static inline T load ( const T& location ) { \ - return (T)__TBB_machine_load##S##_relaxed(&location); \ - } \ - static inline void store ( T& location, T value ) { \ - __TBB_machine_store##S##_relaxed(&location, (V)value); \ - } \ - } - -namespace tbb { -namespace internal { - __TBB_MACHINE_DEFINE_ATOMICS(1,int8_t); - __TBB_MACHINE_DEFINE_ATOMICS(2,int16_t); - __TBB_MACHINE_DEFINE_ATOMICS(4,int32_t); - __TBB_MACHINE_DEFINE_ATOMICS(8,int64_t); -}} // namespaces internal, tbb - -#undef __TBB_MACHINE_DEFINE_ATOMICS - -#define __TBB_USE_FENCED_ATOMICS 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - -// Definition of Lock functions -#define __TBB_TryLockByte(P) __TBB_machine_trylockbyte(P) -#define __TBB_LockByte(P) __TBB_machine_lockbyte(P) - -// Definition of other utility functions -#define __TBB_Pause(V) __TBB_machine_pause(V) -#define __TBB_Log2(V) __TBB_machine_lg(V) diff --git a/src/tbb/include/tbb/machine/linux_intel64.h b/src/tbb/include/tbb/machine/linux_intel64.h deleted file mode 100644 index 6fe018b83..000000000 --- a/src/tbb/include/tbb/machine/linux_intel64.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_linux_intel64_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_linux_intel64_H - -#include -#include "gcc_ia32_common.h" - -#define __TBB_WORDSIZE 8 -#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE - -#define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory") -#define __TBB_control_consistency_helper() __TBB_compiler_fence() -#define __TBB_acquire_consistency_helper() __TBB_compiler_fence() -#define __TBB_release_consistency_helper() __TBB_compiler_fence() - -#ifndef __TBB_full_memory_fence -#define __TBB_full_memory_fence() __asm__ __volatile__("mfence": : :"memory") -#endif - -#define __TBB_MACHINE_DEFINE_ATOMICS(S,T,X) \ -static inline T __TBB_machine_cmpswp##S (volatile void *ptr, T value, T comparand ) \ -{ \ - T result; \ - \ - __asm__ __volatile__("lock\ncmpxchg" X " %2,%1" \ - : "=a"(result), "=m"(*(volatile T*)ptr) \ - : "q"(value), "0"(comparand), "m"(*(volatile T*)ptr) \ - : "memory"); \ - return result; \ -} \ - \ -static inline T __TBB_machine_fetchadd##S(volatile void *ptr, T addend) \ -{ \ - T result; \ - __asm__ __volatile__("lock\nxadd" X " %0,%1" \ - : "=r"(result),"=m"(*(volatile T*)ptr) \ - : "0"(addend), "m"(*(volatile T*)ptr) \ - : "memory"); \ - return result; \ -} \ - \ -static inline T __TBB_machine_fetchstore##S(volatile void *ptr, T value) \ -{ \ - T result; \ - __asm__ __volatile__("lock\nxchg" X " %0,%1" \ - : "=r"(result),"=m"(*(volatile T*)ptr) \ - : "0"(value), "m"(*(volatile T*)ptr) \ - : "memory"); \ - return result; \ -} \ - -__TBB_MACHINE_DEFINE_ATOMICS(1,int8_t,"") -__TBB_MACHINE_DEFINE_ATOMICS(2,int16_t,"") -__TBB_MACHINE_DEFINE_ATOMICS(4,int32_t,"") -__TBB_MACHINE_DEFINE_ATOMICS(8,int64_t,"q") - -#undef __TBB_MACHINE_DEFINE_ATOMICS - -static inline void __TBB_machine_or( volatile void *ptr, uint64_t value ) { - __asm__ __volatile__("lock\norq %1,%0" : "=m"(*(volatile uint64_t*)ptr) : "r"(value), "m"(*(volatile uint64_t*)ptr) : "memory"); -} - -static inline void __TBB_machine_and( volatile void *ptr, uint64_t value ) { - __asm__ __volatile__("lock\nandq %1,%0" : "=m"(*(volatile uint64_t*)ptr) : "r"(value), "m"(*(volatile uint64_t*)ptr) : "memory"); -} - -#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) - -#define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - diff --git a/src/tbb/include/tbb/machine/mac_ppc.h b/src/tbb/include/tbb/machine/mac_ppc.h deleted file mode 100644 index 2f12c9817..000000000 --- a/src/tbb/include/tbb/machine/mac_ppc.h +++ /dev/null @@ -1,313 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_gcc_power_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_gcc_power_H - -#include -#include - -// TODO: rename to gcc_power.h? -// This file is for Power Architecture with compilers supporting GNU inline-assembler syntax (currently GNU g++ and IBM XL). -// Note that XL V9.0 (sometimes?) has trouble dealing with empty input and/or clobber lists, so they should be avoided. - -#if __powerpc64__ || __ppc64__ - // IBM XL documents __powerpc64__ (and __PPC64__). - // Apple documents __ppc64__ (with __ppc__ only on 32-bit). - #define __TBB_WORDSIZE 8 -#else - #define __TBB_WORDSIZE 4 -#endif - -// Traditionally Power Architecture is big-endian. -// Little-endian could be just an address manipulation (compatibility with TBB not verified), -// or normal little-endian (on more recent systems). Embedded PowerPC systems may support -// page-specific endianness, but then one endianness must be hidden from TBB so that it still sees only one. -#if __BIG_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__) - #define __TBB_ENDIANNESS __TBB_ENDIAN_BIG -#elif __LITTLE_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__) - #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE -#elif defined(__BYTE_ORDER__) - #define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED -#else - #define __TBB_ENDIANNESS __TBB_ENDIAN_DETECT -#endif - -// On Power Architecture, (lock-free) 64-bit atomics require 64-bit hardware: -#if __TBB_WORDSIZE==8 - // Do not change the following definition, because TBB itself will use 64-bit atomics in 64-bit builds. - #define __TBB_64BIT_ATOMICS 1 -#elif __bgp__ - // Do not change the following definition, because this is known 32-bit hardware. - #define __TBB_64BIT_ATOMICS 0 -#else - // To enable 64-bit atomics in 32-bit builds, set the value below to 1 instead of 0. - // You must make certain that the program will only use them on actual 64-bit hardware - // (which typically means that the entire program is only executed on such hardware), - // because their implementation involves machine instructions that are illegal elsewhere. - // The setting can be chosen independently per compilation unit, - // which also means that TBB itself does not need to be rebuilt. - // Alternatively (but only for the current architecture and TBB version), - // override the default as a predefined macro when invoking the compiler. - #ifndef __TBB_64BIT_ATOMICS - #define __TBB_64BIT_ATOMICS 0 - #endif -#endif - -inline int32_t __TBB_machine_cmpswp4 (volatile void *ptr, int32_t value, int32_t comparand ) -{ - int32_t result; - - __asm__ __volatile__("sync\n" - "0:\n\t" - "lwarx %[res],0,%[ptr]\n\t" /* load w/ reservation */ - "cmpw %[res],%[cmp]\n\t" /* compare against comparand */ - "bne- 1f\n\t" /* exit if not same */ - "stwcx. %[val],0,%[ptr]\n\t" /* store new value */ - "bne- 0b\n" /* retry if reservation lost */ - "1:\n\t" /* the exit */ - "isync" - : [res]"=&r"(result) - , "+m"(* (int32_t*) ptr) /* redundant with "memory" */ - : [ptr]"r"(ptr) - , [val]"r"(value) - , [cmp]"r"(comparand) - : "memory" /* compiler full fence */ - , "cr0" /* clobbered by cmp and/or stwcx. */ - ); - return result; -} - -#if __TBB_WORDSIZE==8 - -inline int64_t __TBB_machine_cmpswp8 (volatile void *ptr, int64_t value, int64_t comparand ) -{ - int64_t result; - __asm__ __volatile__("sync\n" - "0:\n\t" - "ldarx %[res],0,%[ptr]\n\t" /* load w/ reservation */ - "cmpd %[res],%[cmp]\n\t" /* compare against comparand */ - "bne- 1f\n\t" /* exit if not same */ - "stdcx. %[val],0,%[ptr]\n\t" /* store new value */ - "bne- 0b\n" /* retry if reservation lost */ - "1:\n\t" /* the exit */ - "isync" - : [res]"=&r"(result) - , "+m"(* (int64_t*) ptr) /* redundant with "memory" */ - : [ptr]"r"(ptr) - , [val]"r"(value) - , [cmp]"r"(comparand) - : "memory" /* compiler full fence */ - , "cr0" /* clobbered by cmp and/or stdcx. */ - ); - return result; -} - -#elif __TBB_64BIT_ATOMICS /* && __TBB_WORDSIZE==4 */ - -inline int64_t __TBB_machine_cmpswp8 (volatile void *ptr, int64_t value, int64_t comparand ) -{ - int64_t result; - int64_t value_register, comparand_register, result_register; // dummy variables to allocate registers - __asm__ __volatile__("sync\n\t" - "ld %[val],%[valm]\n\t" - "ld %[cmp],%[cmpm]\n" - "0:\n\t" - "ldarx %[res],0,%[ptr]\n\t" /* load w/ reservation */ - "cmpd %[res],%[cmp]\n\t" /* compare against comparand */ - "bne- 1f\n\t" /* exit if not same */ - "stdcx. %[val],0,%[ptr]\n\t" /* store new value */ - "bne- 0b\n" /* retry if reservation lost */ - "1:\n\t" /* the exit */ - "std %[res],%[resm]\n\t" - "isync" - : [resm]"=m"(result) - , [res] "=&r"( result_register) - , [val] "=&r"( value_register) - , [cmp] "=&r"(comparand_register) - , "+m"(* (int64_t*) ptr) /* redundant with "memory" */ - : [ptr] "r"(ptr) - , [valm]"m"(value) - , [cmpm]"m"(comparand) - : "memory" /* compiler full fence */ - , "cr0" /* clobbered by cmpd and/or stdcx. */ - ); - return result; -} - -#endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */ - -#define __TBB_MACHINE_DEFINE_LOAD_STORE(S,ldx,stx,cmpx) \ - template \ - struct machine_load_store { \ - static inline T load_with_acquire(const volatile T& location) { \ - T result; \ - __asm__ __volatile__(ldx " %[res],0(%[ptr])\n" \ - "0:\n\t" \ - cmpx " %[res],%[res]\n\t" \ - "bne- 0b\n\t" \ - "isync" \ - : [res]"=r"(result) \ - : [ptr]"b"(&location) /* cannot use register 0 here */ \ - , "m"(location) /* redundant with "memory" */ \ - : "memory" /* compiler acquire fence */ \ - , "cr0" /* clobbered by cmpw/cmpd */); \ - return result; \ - } \ - static inline void store_with_release(volatile T &location, T value) { \ - __asm__ __volatile__("lwsync\n\t" \ - stx " %[val],0(%[ptr])" \ - : "=m"(location) /* redundant with "memory" */ \ - : [ptr]"b"(&location) /* cannot use register 0 here */ \ - , [val]"r"(value) \ - : "memory"/*compiler release fence*/ /*(cr0 not affected)*/); \ - } \ - }; \ - \ - template \ - struct machine_load_store_relaxed { \ - static inline T load (const __TBB_atomic T& location) { \ - T result; \ - __asm__ __volatile__(ldx " %[res],0(%[ptr])" \ - : [res]"=r"(result) \ - : [ptr]"b"(&location) /* cannot use register 0 here */ \ - , "m"(location) \ - ); /*(no compiler fence)*/ /*(cr0 not affected)*/ \ - return result; \ - } \ - static inline void store (__TBB_atomic T &location, T value) { \ - __asm__ __volatile__(stx " %[val],0(%[ptr])" \ - : "=m"(location) \ - : [ptr]"b"(&location) /* cannot use register 0 here */ \ - , [val]"r"(value) \ - ); /*(no compiler fence)*/ /*(cr0 not affected)*/ \ - } \ - }; - -namespace tbb { -namespace internal { - __TBB_MACHINE_DEFINE_LOAD_STORE(1,"lbz","stb","cmpw") - __TBB_MACHINE_DEFINE_LOAD_STORE(2,"lhz","sth","cmpw") - __TBB_MACHINE_DEFINE_LOAD_STORE(4,"lwz","stw","cmpw") - -#if __TBB_WORDSIZE==8 - - __TBB_MACHINE_DEFINE_LOAD_STORE(8,"ld" ,"std","cmpd") - -#elif __TBB_64BIT_ATOMICS /* && __TBB_WORDSIZE==4 */ - - template - struct machine_load_store { - static inline T load_with_acquire(const volatile T& location) { - T result; - T result_register; // dummy variable to allocate a register - __asm__ __volatile__("ld %[res],0(%[ptr])\n\t" - "std %[res],%[resm]\n" - "0:\n\t" - "cmpd %[res],%[res]\n\t" - "bne- 0b\n\t" - "isync" - : [resm]"=m"(result) - , [res]"=&r"(result_register) - : [ptr]"b"(&location) /* cannot use register 0 here */ - , "m"(location) /* redundant with "memory" */ - : "memory" /* compiler acquire fence */ - , "cr0" /* clobbered by cmpd */); - return result; - } - - static inline void store_with_release(volatile T &location, T value) { - T value_register; // dummy variable to allocate a register - __asm__ __volatile__("lwsync\n\t" - "ld %[val],%[valm]\n\t" - "std %[val],0(%[ptr])" - : "=m"(location) /* redundant with "memory" */ - , [val]"=&r"(value_register) - : [ptr]"b"(&location) /* cannot use register 0 here */ - , [valm]"m"(value) - : "memory"/*compiler release fence*/ /*(cr0 not affected)*/); - } - }; - - struct machine_load_store_relaxed { - static inline T load (const volatile T& location) { - T result; - T result_register; // dummy variable to allocate a register - __asm__ __volatile__("ld %[res],0(%[ptr])\n\t" - "std %[res],%[resm]" - : [resm]"=m"(result) - , [res]"=&r"(result_register) - : [ptr]"b"(&location) /* cannot use register 0 here */ - , "m"(location) - ); /*(no compiler fence)*/ /*(cr0 not affected)*/ - return result; - } - - static inline void store (volatile T &location, T value) { - T value_register; // dummy variable to allocate a register - __asm__ __volatile__("ld %[val],%[valm]\n\t" - "std %[val],0(%[ptr])" - : "=m"(location) - , [val]"=&r"(value_register) - : [ptr]"b"(&location) /* cannot use register 0 here */ - , [valm]"m"(value) - ); /*(no compiler fence)*/ /*(cr0 not affected)*/ - } - }; - #define __TBB_machine_load_store_relaxed_8 - -#endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */ - -}} // namespaces internal, tbb - -#undef __TBB_MACHINE_DEFINE_LOAD_STORE - -#define __TBB_USE_GENERIC_PART_WORD_CAS 1 -#define __TBB_USE_GENERIC_FETCH_ADD 1 -#define __TBB_USE_GENERIC_FETCH_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - -#define __TBB_control_consistency_helper() __asm__ __volatile__("isync": : :"memory") -#define __TBB_full_memory_fence() __asm__ __volatile__( "sync": : :"memory") - -static inline intptr_t __TBB_machine_lg( uintptr_t x ) { - __TBB_ASSERT(x, "__TBB_Log2(0) undefined"); - // cntlzd/cntlzw starts counting at 2^63/2^31 (ignoring any higher-order bits), and does not affect cr0 -#if __TBB_WORDSIZE==8 - __asm__ __volatile__ ("cntlzd %0,%0" : "+r"(x)); - return 63-static_cast(x); -#else - __asm__ __volatile__ ("cntlzw %0,%0" : "+r"(x)); - return 31-static_cast(x); -#endif -} -#define __TBB_Log2(V) __TBB_machine_lg(V) - -// Assumes implicit alignment for any 32-bit value -typedef uint32_t __TBB_Flag; -#define __TBB_Flag __TBB_Flag - -inline bool __TBB_machine_trylockbyte( __TBB_atomic __TBB_Flag &flag ) { - return __TBB_machine_cmpswp4(&flag,1,0)==0; -} -#define __TBB_TryLockByte(P) __TBB_machine_trylockbyte(P) diff --git a/src/tbb/include/tbb/machine/macos_common.h b/src/tbb/include/tbb/machine/macos_common.h deleted file mode 100644 index dffcea5d7..000000000 --- a/src/tbb/include/tbb/machine/macos_common.h +++ /dev/null @@ -1,133 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_macos_common_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_macos_common_H - -#include -#define __TBB_Yield() sched_yield() - -// __TBB_HardwareConcurrency - -#include -#include - -static inline int __TBB_macos_available_cpu() { - int name[2] = {CTL_HW, HW_AVAILCPU}; - int ncpu; - size_t size = sizeof(ncpu); - sysctl( name, 2, &ncpu, &size, NULL, 0 ); - return ncpu; -} - -#define __TBB_HardwareConcurrency() __TBB_macos_available_cpu() - -#ifndef __TBB_full_memory_fence - // TBB has not recognized the architecture (none of the architecture abstraction - // headers was included). - #define __TBB_UnknownArchitecture 1 -#endif - -#if __TBB_UnknownArchitecture -// Implementation of atomic operations based on OS provided primitives -#include - -static inline int64_t __TBB_machine_cmpswp8_OsX(volatile void *ptr, int64_t value, int64_t comparand) -{ - __TBB_ASSERT( tbb::internal::is_aligned(ptr,8), "address not properly aligned for OS X* atomics"); - int64_t* address = (int64_t*)ptr; - while( !OSAtomicCompareAndSwap64Barrier(comparand, value, address) ){ -#if __TBB_WORDSIZE==8 - int64_t snapshot = *address; -#else - int64_t snapshot = OSAtomicAdd64( 0, address ); -#endif - if( snapshot!=comparand ) return snapshot; - } - return comparand; -} - -#define __TBB_machine_cmpswp8 __TBB_machine_cmpswp8_OsX - -#endif /* __TBB_UnknownArchitecture */ - -#if __TBB_UnknownArchitecture - -#ifndef __TBB_WORDSIZE -#define __TBB_WORDSIZE 4 -#endif - -#ifdef __TBB_ENDIANNESS - // Already determined based on hardware architecture. -#elif __BIG_ENDIAN__ - #define __TBB_ENDIANNESS __TBB_ENDIAN_BIG -#elif __LITTLE_ENDIAN__ - #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE -#else - #define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED -#endif - -/** As this generic implementation has absolutely no information about underlying - hardware, its performance most likely will be sub-optimal because of full memory - fence usages where a more lightweight synchronization means (or none at all) - could suffice. Thus if you use this header to enable TBB on a new platform, - consider forking it and relaxing below helpers as appropriate. **/ -#define __TBB_control_consistency_helper() OSMemoryBarrier() -#define __TBB_acquire_consistency_helper() OSMemoryBarrier() -#define __TBB_release_consistency_helper() OSMemoryBarrier() -#define __TBB_full_memory_fence() OSMemoryBarrier() - -static inline int32_t __TBB_machine_cmpswp4(volatile void *ptr, int32_t value, int32_t comparand) -{ - __TBB_ASSERT( tbb::internal::is_aligned(ptr,4), "address not properly aligned for OS X* atomics"); - int32_t* address = (int32_t*)ptr; - while( !OSAtomicCompareAndSwap32Barrier(comparand, value, address) ){ - int32_t snapshot = *address; - if( snapshot!=comparand ) return snapshot; - } - return comparand; -} - -static inline int32_t __TBB_machine_fetchadd4(volatile void *ptr, int32_t addend) -{ - __TBB_ASSERT( tbb::internal::is_aligned(ptr,4), "address not properly aligned for OS X* atomics"); - return OSAtomicAdd32Barrier(addend, (int32_t*)ptr) - addend; -} - -static inline int64_t __TBB_machine_fetchadd8(volatile void *ptr, int64_t addend) -{ - __TBB_ASSERT( tbb::internal::is_aligned(ptr,8), "address not properly aligned for OS X* atomics"); - return OSAtomicAdd64Barrier(addend, (int64_t*)ptr) - addend; -} - -#define __TBB_USE_GENERIC_PART_WORD_CAS 1 -#define __TBB_USE_GENERIC_PART_WORD_FETCH_ADD 1 -#define __TBB_USE_GENERIC_FETCH_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 -#if __TBB_WORDSIZE == 4 - #define __TBB_USE_GENERIC_DWORD_LOAD_STORE 1 -#endif -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - -#endif /* __TBB_UnknownArchitecture */ diff --git a/src/tbb/include/tbb/machine/mic_common.h b/src/tbb/include/tbb/machine/mic_common.h deleted file mode 100644 index 1f522da1f..000000000 --- a/src/tbb/include/tbb/machine/mic_common.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_mic_common_H -#define __TBB_mic_common_H - -#ifndef __TBB_machine_H -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#if ! __TBB_DEFINE_MIC - #error mic_common.h should be included only when building for Intel(R) Many Integrated Core Architecture -#endif - -#ifndef __TBB_PREFETCHING -#define __TBB_PREFETCHING 1 -#endif -#if __TBB_PREFETCHING -#include -#define __TBB_cl_prefetch(p) _mm_prefetch((const char*)p, _MM_HINT_T1) -#define __TBB_cl_evict(p) _mm_clevict(p, _MM_HINT_T1) -#endif - -/** Intel(R) Many Integrated Core Architecture does not support mfence and pause instructions **/ -#define __TBB_full_memory_fence() __asm__ __volatile__("lock; addl $0,(%%rsp)":::"memory") -#define __TBB_Pause(x) _mm_delay_32(16*(x)) -#define __TBB_STEALING_PAUSE 1500/16 -#include -#define __TBB_Yield() sched_yield() - -// low-level timing intrinsic and its type -#define __TBB_machine_time_stamp() _rdtsc() -typedef uint64_t machine_tsc_t; - -/** Specifics **/ -#define __TBB_STEALING_ABORT_ON_CONTENTION 1 -#define __TBB_YIELD2P 1 -#define __TBB_HOARD_NONLOCAL_TASKS 1 - -#if ! ( __FreeBSD__ || __linux__ ) - #error Intel(R) Many Integrated Core Compiler does not define __FreeBSD__ or __linux__ anymore. Check for the __TBB_XXX_BROKEN defined under __FreeBSD__ or __linux__. -#endif /* ! ( __FreeBSD__ || __linux__ ) */ - -#endif /* __TBB_mic_common_H */ diff --git a/src/tbb/include/tbb/machine/msvc_armv7.h b/src/tbb/include/tbb/machine/msvc_armv7.h deleted file mode 100644 index b96511c75..000000000 --- a/src/tbb/include/tbb/machine/msvc_armv7.h +++ /dev/null @@ -1,171 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_msvc_armv7_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_msvc_armv7_H - -#include -#include - -#define __TBB_WORDSIZE 4 - -#define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED - -#if defined(TBB_WIN32_USE_CL_BUILTINS) -// We can test this on _M_IX86 -#pragma intrinsic(_ReadWriteBarrier) -#pragma intrinsic(_mm_mfence) -#define __TBB_compiler_fence() _ReadWriteBarrier() -#define __TBB_full_memory_fence() _mm_mfence() -#define __TBB_control_consistency_helper() __TBB_compiler_fence() -#define __TBB_acquire_consistency_helper() __TBB_compiler_fence() -#define __TBB_release_consistency_helper() __TBB_compiler_fence() -#else -//Now __dmb(_ARM_BARRIER_SY) is used for both compiler and memory fences -//This might be changed later after testing -#define __TBB_compiler_fence() __dmb(_ARM_BARRIER_SY) -#define __TBB_full_memory_fence() __dmb(_ARM_BARRIER_SY) -#define __TBB_control_consistency_helper() __TBB_compiler_fence() -#define __TBB_acquire_consistency_helper() __TBB_full_memory_fence() -#define __TBB_release_consistency_helper() __TBB_full_memory_fence() -#endif - -//-------------------------------------------------- -// Compare and swap -//-------------------------------------------------- - -/** - * Atomic CAS for 32 bit values, if *ptr==comparand, then *ptr=value, returns *ptr - * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand - * @param value value to assign *ptr to if *ptr==comparand - * @param comparand value to compare with *ptr - * @return value originally in memory at ptr, regardless of success -*/ - -#define __TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(S,T,F) \ -inline T __TBB_machine_cmpswp##S( volatile void *ptr, T value, T comparand ) { \ - return _InterlockedCompareExchange##F(reinterpret_cast(ptr),value,comparand); \ -} \ - -#define __TBB_MACHINE_DEFINE_ATOMICS_FETCHADD(S,T,F) \ -inline T __TBB_machine_fetchadd##S( volatile void *ptr, T value ) { \ - return _InterlockedExchangeAdd##F(reinterpret_cast(ptr),value); \ -} \ - -__TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(1,char,8) -__TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(2,short,16) -__TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(4,long,) -__TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(8,__int64,64) -__TBB_MACHINE_DEFINE_ATOMICS_FETCHADD(4,long,) -#if defined(TBB_WIN32_USE_CL_BUILTINS) -// No _InterlockedExchangeAdd64 intrinsic on _M_IX86 -#define __TBB_64BIT_ATOMICS 0 -#else -__TBB_MACHINE_DEFINE_ATOMICS_FETCHADD(8,__int64,64) -#endif - -inline void __TBB_machine_pause (int32_t delay ) -{ - while(delay>0) - { - __TBB_compiler_fence(); - delay--; - } -} - -// API to retrieve/update FPU control setting -#define __TBB_CPU_CTL_ENV_PRESENT 1 - -namespace tbb { -namespace internal { - -template -struct machine_load_store_relaxed { - static inline T load ( const volatile T& location ) { - const T value = location; - - /* - * An extra memory barrier is required for errata #761319 - * Please see http://infocenter.arm.com/help/topic/com.arm.doc.uan0004a - */ - __TBB_acquire_consistency_helper(); - return value; - } - - static inline void store ( volatile T& location, T value ) { - location = value; - } -}; - -class cpu_ctl_env { -private: - unsigned int my_ctl; -public: - bool operator!=( const cpu_ctl_env& ctl ) const { return my_ctl != ctl.my_ctl; } - void get_env() { my_ctl = _control87(0, 0); } - void set_env() const { _control87( my_ctl, ~0U ); } -}; - -} // namespace internal -} // namespaces tbb - -// Machine specific atomic operations -#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C) -#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C) -#define __TBB_Pause(V) __TBB_machine_pause(V) - -// Use generics for some things -#define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_PART_WORD_FETCH_ADD 1 -#define __TBB_USE_GENERIC_PART_WORD_FETCH_STORE 1 -#define __TBB_USE_GENERIC_FETCH_STORE 1 -#define __TBB_USE_GENERIC_DWORD_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - -#if defined(TBB_WIN32_USE_CL_BUILTINS) -#if !__TBB_WIN8UI_SUPPORT -extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void ); -#define __TBB_Yield() SwitchToThread() -#else -#include -#define __TBB_Yield() std::this_thread::yield() -#endif -#else -#define __TBB_Yield() __yield() -#endif - -// Machine specific atomic operations -#define __TBB_AtomicOR(P,V) __TBB_machine_OR(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_AND(P,V) - -template -inline void __TBB_machine_OR( T1 *operand, T2 addend ) { - _InterlockedOr((long volatile *)operand, (long)addend); -} - -template -inline void __TBB_machine_AND( T1 *operand, T2 addend ) { - _InterlockedAnd((long volatile *)operand, (long)addend); -} - diff --git a/src/tbb/include/tbb/machine/msvc_ia32_common.h b/src/tbb/include/tbb/machine/msvc_ia32_common.h deleted file mode 100644 index 184c3dac3..000000000 --- a/src/tbb/include/tbb/machine/msvc_ia32_common.h +++ /dev/null @@ -1,216 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_machine_msvc_ia32_common_H -#define __TBB_machine_msvc_ia32_common_H - -#include - -//TODO: consider moving this macro to tbb_config.h and used there MSVC asm is used -#if !_M_X64 || __INTEL_COMPILER - #define __TBB_X86_MSVC_INLINE_ASM_AVAILABLE 1 - - #if _M_X64 - #define __TBB_r(reg_name) r##reg_name - #else - #define __TBB_r(reg_name) e##reg_name - #endif -#else - //MSVC in x64 mode does not accept inline assembler - #define __TBB_X86_MSVC_INLINE_ASM_AVAILABLE 0 -#endif - -#define __TBB_NO_X86_MSVC_INLINE_ASM_MSG "The compiler being used is not supported (outdated?)" - -#if (_MSC_VER >= 1300) || (__INTEL_COMPILER) //Use compiler intrinsic when available - #define __TBB_PAUSE_USE_INTRINSIC 1 - #pragma intrinsic(_mm_pause) - namespace tbb { namespace internal { namespace intrinsics { namespace msvc { - static inline void __TBB_machine_pause (uintptr_t delay ) { - for (;delay>0; --delay ) - _mm_pause(); - } - }}}} -#else - #if !__TBB_X86_MSVC_INLINE_ASM_AVAILABLE - #error __TBB_NO_X86_MSVC_INLINE_ASM_MSG - #endif - - namespace tbb { namespace internal { namespace inline_asm { namespace msvc { - static inline void __TBB_machine_pause (uintptr_t delay ) { - _asm - { - mov __TBB_r(ax), delay - __TBB_L1: - pause - add __TBB_r(ax), -1 - jne __TBB_L1 - } - return; - } - }}}} -#endif - -static inline void __TBB_machine_pause (uintptr_t delay ){ - #if __TBB_PAUSE_USE_INTRINSIC - tbb::internal::intrinsics::msvc::__TBB_machine_pause(delay); - #else - tbb::internal::inline_asm::msvc::__TBB_machine_pause(delay); - #endif -} - -//TODO: move this function to windows_api.h or to place where it is used -#if (_MSC_VER<1400) && (!_WIN64) && (__TBB_X86_MSVC_INLINE_ASM_AVAILABLE) - static inline void* __TBB_machine_get_current_teb () { - void* pteb; - __asm mov eax, fs:[0x18] - __asm mov pteb, eax - return pteb; - } -#endif - -#if ( _MSC_VER>=1400 && !defined(__INTEL_COMPILER) ) || (__INTEL_COMPILER>=1200) -// MSVC did not have this intrinsic prior to VC8. -// ICL 11.1 fails to compile a TBB example if __TBB_Log2 uses the intrinsic. - #define __TBB_LOG2_USE_BSR_INTRINSIC 1 - #if _M_X64 - #define __TBB_BSR_INTRINSIC _BitScanReverse64 - #else - #define __TBB_BSR_INTRINSIC _BitScanReverse - #endif - #pragma intrinsic(__TBB_BSR_INTRINSIC) - - namespace tbb { namespace internal { namespace intrinsics { namespace msvc { - inline uintptr_t __TBB_machine_lg( uintptr_t i ){ - unsigned long j; - __TBB_BSR_INTRINSIC( &j, i ); - return j; - } - }}}} -#else - #if !__TBB_X86_MSVC_INLINE_ASM_AVAILABLE - #error __TBB_NO_X86_MSVC_INLINE_ASM_MSG - #endif - - namespace tbb { namespace internal { namespace inline_asm { namespace msvc { - inline uintptr_t __TBB_machine_lg( uintptr_t i ){ - uintptr_t j; - __asm - { - bsr __TBB_r(ax), i - mov j, __TBB_r(ax) - } - return j; - } - }}}} -#endif - -static inline intptr_t __TBB_machine_lg( uintptr_t i ) { -#if __TBB_LOG2_USE_BSR_INTRINSIC - return tbb::internal::intrinsics::msvc::__TBB_machine_lg(i); -#else - return tbb::internal::inline_asm::msvc::__TBB_machine_lg(i); -#endif -} - -// API to retrieve/update FPU control setting -#define __TBB_CPU_CTL_ENV_PRESENT 1 - -namespace tbb { namespace internal { class cpu_ctl_env; } } -#if __TBB_X86_MSVC_INLINE_ASM_AVAILABLE - inline void __TBB_get_cpu_ctl_env ( tbb::internal::cpu_ctl_env* ctl ) { - __asm { - __asm mov __TBB_r(ax), ctl - __asm stmxcsr [__TBB_r(ax)] - __asm fstcw [__TBB_r(ax)+4] - } - } - inline void __TBB_set_cpu_ctl_env ( const tbb::internal::cpu_ctl_env* ctl ) { - __asm { - __asm mov __TBB_r(ax), ctl - __asm ldmxcsr [__TBB_r(ax)] - __asm fldcw [__TBB_r(ax)+4] - } - } -#else - extern "C" { - void __TBB_EXPORTED_FUNC __TBB_get_cpu_ctl_env ( tbb::internal::cpu_ctl_env* ); - void __TBB_EXPORTED_FUNC __TBB_set_cpu_ctl_env ( const tbb::internal::cpu_ctl_env* ); - } -#endif - -namespace tbb { -namespace internal { -class cpu_ctl_env { -private: - int mxcsr; - short x87cw; - static const int MXCSR_CONTROL_MASK = ~0x3f; /* all except last six status bits */ -public: - bool operator!=( const cpu_ctl_env& ctl ) const { return mxcsr != ctl.mxcsr || x87cw != ctl.x87cw; } - void get_env() { - __TBB_get_cpu_ctl_env( this ); - mxcsr &= MXCSR_CONTROL_MASK; - } - void set_env() const { __TBB_set_cpu_ctl_env( this ); } -}; -} // namespace internal -} // namespace tbb - -#if !__TBB_WIN8UI_SUPPORT -extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void ); -#define __TBB_Yield() SwitchToThread() -#else -#include -#define __TBB_Yield() std::this_thread::yield() -#endif - -#define __TBB_Pause(V) __TBB_machine_pause(V) -#define __TBB_Log2(V) __TBB_machine_lg(V) - -#undef __TBB_r - -extern "C" { - __int8 __TBB_EXPORTED_FUNC __TBB_machine_try_lock_elided (volatile void* ptr); - void __TBB_EXPORTED_FUNC __TBB_machine_unlock_elided (volatile void* ptr); - - // 'pause' instruction aborts HLE/RTM transactions -#if __TBB_PAUSE_USE_INTRINSIC - inline static void __TBB_machine_try_lock_elided_cancel() { _mm_pause(); } -#else - inline static void __TBB_machine_try_lock_elided_cancel() { _asm pause; } -#endif - -#if __TBB_TSX_INTRINSICS_PRESENT - #define __TBB_machine_is_in_transaction _xtest - #define __TBB_machine_begin_transaction _xbegin - #define __TBB_machine_end_transaction _xend - // The value (0xFF) below comes from the - // Intel(R) 64 and IA-32 Architectures Optimization Reference Manual 12.4.5 lock not free - #define __TBB_machine_transaction_conflict_abort() _xabort(0xFF) -#else - __int8 __TBB_EXPORTED_FUNC __TBB_machine_is_in_transaction(); - unsigned __int32 __TBB_EXPORTED_FUNC __TBB_machine_begin_transaction(); - void __TBB_EXPORTED_FUNC __TBB_machine_end_transaction(); - void __TBB_EXPORTED_FUNC __TBB_machine_transaction_conflict_abort(); -#endif /* __TBB_TSX_INTRINSICS_PRESENT */ -} - -#endif /* __TBB_machine_msvc_ia32_common_H */ diff --git a/src/tbb/include/tbb/machine/sunos_sparc.h b/src/tbb/include/tbb/machine/sunos_sparc.h deleted file mode 100644 index 1d4fbf744..000000000 --- a/src/tbb/include/tbb/machine/sunos_sparc.h +++ /dev/null @@ -1,203 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_sunos_sparc_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_sunos_sparc_H - -#include -#include - -#define __TBB_WORDSIZE 8 -// Big endian is assumed for SPARC. -// While hardware may support page-specific bi-endianness, only big endian pages may be exposed to TBB -#define __TBB_ENDIANNESS __TBB_ENDIAN_BIG - -/** To those working on SPARC hardware. Consider relaxing acquire and release - consistency helpers to no-op (as this port covers TSO mode only). **/ -#define __TBB_compiler_fence() __asm__ __volatile__ ("": : :"memory") -#define __TBB_control_consistency_helper() __TBB_compiler_fence() -#define __TBB_acquire_consistency_helper() __TBB_compiler_fence() -#define __TBB_release_consistency_helper() __TBB_compiler_fence() -#define __TBB_full_memory_fence() __asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreStore|#StoreLoad": : : "memory") - -//-------------------------------------------------- -// Compare and swap -//-------------------------------------------------- - -/** - * Atomic CAS for 32 bit values, if *ptr==comparand, then *ptr=value, returns *ptr - * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand - * @param value value to assign *ptr to if *ptr==comparand - * @param comparand value to compare with *ptr - ( @return value originally in memory at ptr, regardless of success -*/ -static inline int32_t __TBB_machine_cmpswp4(volatile void *ptr, int32_t value, int32_t comparand ){ - int32_t result; - __asm__ __volatile__( - "cas\t[%5],%4,%1" - : "=m"(*(int32_t *)ptr), "=r"(result) - : "m"(*(int32_t *)ptr), "1"(value), "r"(comparand), "r"(ptr) - : "memory"); - return result; -} - -/** - * Atomic CAS for 64 bit values, if *ptr==comparand, then *ptr=value, returns *ptr - * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand - * @param value value to assign *ptr to if *ptr==comparand - * @param comparand value to compare with *ptr - ( @return value originally in memory at ptr, regardless of success - */ -static inline int64_t __TBB_machine_cmpswp8(volatile void *ptr, int64_t value, int64_t comparand ){ - int64_t result; - __asm__ __volatile__( - "casx\t[%5],%4,%1" - : "=m"(*(int64_t *)ptr), "=r"(result) - : "m"(*(int64_t *)ptr), "1"(value), "r"(comparand), "r"(ptr) - : "memory"); - return result; -} - -//--------------------------------------------------- -// Fetch and add -//--------------------------------------------------- - -/** - * Atomic fetch and add for 32 bit values, in this case implemented by continuously checking success of atomicity - * @param ptr pointer to value to add addend to - * @param addened value to add to *ptr - * @return value at ptr before addened was added - */ -static inline int32_t __TBB_machine_fetchadd4(volatile void *ptr, int32_t addend){ - int32_t result; - __asm__ __volatile__ ( - "0:\t add\t %3, %4, %0\n" // do addition - "\t cas\t [%2], %3, %0\n" // cas to store result in memory - "\t cmp\t %3, %0\n" // check if value from memory is original - "\t bne,a,pn\t %%icc, 0b\n" // if not try again - "\t mov %0, %3\n" // use branch delay slot to move new value in memory to be added - : "=&r"(result), "=m"(*(int32_t *)ptr) - : "r"(ptr), "r"(*(int32_t *)ptr), "r"(addend), "m"(*(int32_t *)ptr) - : "ccr", "memory"); - return result; -} - -/** - * Atomic fetch and add for 64 bit values, in this case implemented by continuously checking success of atomicity - * @param ptr pointer to value to add addend to - * @param addened value to add to *ptr - * @return value at ptr before addened was added - */ -static inline int64_t __TBB_machine_fetchadd8(volatile void *ptr, int64_t addend){ - int64_t result; - __asm__ __volatile__ ( - "0:\t add\t %3, %4, %0\n" // do addition - "\t casx\t [%2], %3, %0\n" // cas to store result in memory - "\t cmp\t %3, %0\n" // check if value from memory is original - "\t bne,a,pn\t %%xcc, 0b\n" // if not try again - "\t mov %0, %3\n" // use branch delay slot to move new value in memory to be added - : "=&r"(result), "=m"(*(int64_t *)ptr) - : "r"(ptr), "r"(*(int64_t *)ptr), "r"(addend), "m"(*(int64_t *)ptr) - : "ccr", "memory"); - return result; -} - -//-------------------------------------------------------- -// Logarithm (base two, integer) -//-------------------------------------------------------- - -static inline int64_t __TBB_machine_lg( uint64_t x ) { - __TBB_ASSERT(x, "__TBB_Log2(0) undefined"); - uint64_t count; - // one hot encode - x |= (x >> 1); - x |= (x >> 2); - x |= (x >> 4); - x |= (x >> 8); - x |= (x >> 16); - x |= (x >> 32); - // count 1's - __asm__ ("popc %1, %0" : "=r"(count) : "r"(x) ); - return count-1; -} - -//-------------------------------------------------------- - -static inline void __TBB_machine_or( volatile void *ptr, uint64_t value ) { - __asm__ __volatile__ ( - "0:\t or\t %2, %3, %%g1\n" // do operation - "\t casx\t [%1], %2, %%g1\n" // cas to store result in memory - "\t cmp\t %2, %%g1\n" // check if value from memory is original - "\t bne,a,pn\t %%xcc, 0b\n" // if not try again - "\t mov %%g1, %2\n" // use branch delay slot to move new value in memory to be added - : "=m"(*(int64_t *)ptr) - : "r"(ptr), "r"(*(int64_t *)ptr), "r"(value), "m"(*(int64_t *)ptr) - : "ccr", "g1", "memory"); -} - -static inline void __TBB_machine_and( volatile void *ptr, uint64_t value ) { - __asm__ __volatile__ ( - "0:\t and\t %2, %3, %%g1\n" // do operation - "\t casx\t [%1], %2, %%g1\n" // cas to store result in memory - "\t cmp\t %2, %%g1\n" // check if value from memory is original - "\t bne,a,pn\t %%xcc, 0b\n" // if not try again - "\t mov %%g1, %2\n" // use branch delay slot to move new value in memory to be added - : "=m"(*(int64_t *)ptr) - : "r"(ptr), "r"(*(int64_t *)ptr), "r"(value), "m"(*(int64_t *)ptr) - : "ccr", "g1", "memory"); -} - - -static inline void __TBB_machine_pause( int32_t delay ) { - // do nothing, inlined, doesn't matter -} - -// put 0xff in memory location, return memory value, -// generic trylockbyte puts 0x01, however this is fine -// because all that matters is that 0 is unlocked -static inline bool __TBB_machine_trylockbyte(unsigned char &flag){ - unsigned char result; - __asm__ __volatile__ ( - "ldstub\t [%2], %0\n" - : "=r"(result), "=m"(flag) - : "r"(&flag), "m"(flag) - : "memory"); - return result == 0; -} - -#define __TBB_USE_GENERIC_PART_WORD_CAS 1 -#define __TBB_USE_GENERIC_PART_WORD_FETCH_ADD 1 -#define __TBB_USE_GENERIC_FETCH_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - -#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) - -// Definition of other functions -#define __TBB_Pause(V) __TBB_machine_pause(V) -#define __TBB_Log2(V) __TBB_machine_lg(V) - -#define __TBB_TryLockByte(P) __TBB_machine_trylockbyte(P) diff --git a/src/tbb/include/tbb/machine/windows_api.h b/src/tbb/include/tbb/machine/windows_api.h deleted file mode 100644 index c0ccc24c5..000000000 --- a/src/tbb/include/tbb/machine/windows_api.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_machine_windows_api_H -#define __TBB_machine_windows_api_H - -#if _WIN32 || _WIN64 - -#if _XBOX - -#define NONET -#define NOD3D -#include - -#else // Assume "usual" Windows - -#include - -#endif // _XBOX - -#if _WIN32_WINNT < 0x0600 -// The following Windows API function is declared explicitly; -// otherwise it fails to compile by VS2005. -#if !defined(WINBASEAPI) || (_WIN32_WINNT < 0x0501 && _MSC_VER == 1400) -#define __TBB_WINBASEAPI extern "C" -#else -#define __TBB_WINBASEAPI WINBASEAPI -#endif -__TBB_WINBASEAPI BOOL WINAPI TryEnterCriticalSection( LPCRITICAL_SECTION ); -__TBB_WINBASEAPI BOOL WINAPI InitializeCriticalSectionAndSpinCount( LPCRITICAL_SECTION, DWORD ); -// Overloading WINBASEAPI macro and using local functions missing in Windows XP/2003 -#define InitializeCriticalSectionEx inlineInitializeCriticalSectionEx -#define CreateSemaphoreEx inlineCreateSemaphoreEx -#define CreateEventEx inlineCreateEventEx -inline BOOL WINAPI inlineInitializeCriticalSectionEx( LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount, DWORD ) -{ - return InitializeCriticalSectionAndSpinCount( lpCriticalSection, dwSpinCount ); -} -inline HANDLE WINAPI inlineCreateSemaphoreEx( LPSECURITY_ATTRIBUTES lpSemaphoreAttributes, LONG lInitialCount, LONG lMaximumCount, LPCTSTR lpName, DWORD, DWORD ) -{ - return CreateSemaphore( lpSemaphoreAttributes, lInitialCount, lMaximumCount, lpName ); -} -inline HANDLE WINAPI inlineCreateEventEx( LPSECURITY_ATTRIBUTES lpEventAttributes, LPCTSTR lpName, DWORD dwFlags, DWORD ) -{ - BOOL manual_reset = dwFlags&0x00000001 ? TRUE : FALSE; // CREATE_EVENT_MANUAL_RESET - BOOL initial_set = dwFlags&0x00000002 ? TRUE : FALSE; // CREATE_EVENT_INITIAL_SET - return CreateEvent( lpEventAttributes, manual_reset, initial_set, lpName ); -} -#endif - -#if defined(RTL_SRWLOCK_INIT) -#ifndef __TBB_USE_SRWLOCK -// TODO: turn it on when bug 1952 will be fixed -#define __TBB_USE_SRWLOCK 0 -#endif -#endif - -#else -#error tbb/machine/windows_api.h should only be used for Windows based platforms -#endif // _WIN32 || _WIN64 - -#endif // __TBB_machine_windows_api_H diff --git a/src/tbb/include/tbb/machine/windows_ia32.h b/src/tbb/include/tbb/machine/windows_ia32.h deleted file mode 100644 index 3a38634c9..000000000 --- a/src/tbb/include/tbb/machine/windows_ia32.h +++ /dev/null @@ -1,144 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_windows_ia32_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_windows_ia32_H - -#include "msvc_ia32_common.h" - -#define __TBB_WORDSIZE 4 -#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE - -#if __INTEL_COMPILER && (__INTEL_COMPILER < 1100) - #define __TBB_compiler_fence() __asm { __asm nop } - #define __TBB_full_memory_fence() __asm { __asm mfence } -#elif _MSC_VER >= 1300 || __INTEL_COMPILER - #pragma intrinsic(_ReadWriteBarrier) - #pragma intrinsic(_mm_mfence) - #define __TBB_compiler_fence() _ReadWriteBarrier() - #define __TBB_full_memory_fence() _mm_mfence() -#else - #error Unsupported compiler - need to define __TBB_{control,acquire,release}_consistency_helper to support it -#endif - -#define __TBB_control_consistency_helper() __TBB_compiler_fence() -#define __TBB_acquire_consistency_helper() __TBB_compiler_fence() -#define __TBB_release_consistency_helper() __TBB_compiler_fence() - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (push) - #pragma warning (disable: 4244 4267) -#endif - -extern "C" { - __int64 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp8 (volatile void *ptr, __int64 value, __int64 comparand ); - __int64 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd8 (volatile void *ptr, __int64 addend ); - __int64 __TBB_EXPORTED_FUNC __TBB_machine_fetchstore8 (volatile void *ptr, __int64 value ); - void __TBB_EXPORTED_FUNC __TBB_machine_store8 (volatile void *ptr, __int64 value ); - __int64 __TBB_EXPORTED_FUNC __TBB_machine_load8 (const volatile void *ptr); -} - -//TODO: use _InterlockedXXX intrinsics as they available since VC 2005 -#define __TBB_MACHINE_DEFINE_ATOMICS(S,T,U,A,C) \ -static inline T __TBB_machine_cmpswp##S ( volatile void * ptr, U value, U comparand ) { \ - T result; \ - volatile T *p = (T *)ptr; \ - __asm \ - { \ - __asm mov edx, p \ - __asm mov C , value \ - __asm mov A , comparand \ - __asm lock cmpxchg [edx], C \ - __asm mov result, A \ - } \ - return result; \ -} \ -\ -static inline T __TBB_machine_fetchadd##S ( volatile void * ptr, U addend ) { \ - T result; \ - volatile T *p = (T *)ptr; \ - __asm \ - { \ - __asm mov edx, p \ - __asm mov A, addend \ - __asm lock xadd [edx], A \ - __asm mov result, A \ - } \ - return result; \ -}\ -\ -static inline T __TBB_machine_fetchstore##S ( volatile void * ptr, U value ) { \ - T result; \ - volatile T *p = (T *)ptr; \ - __asm \ - { \ - __asm mov edx, p \ - __asm mov A, value \ - __asm lock xchg [edx], A \ - __asm mov result, A \ - } \ - return result; \ -} - - -__TBB_MACHINE_DEFINE_ATOMICS(1, __int8, __int8, al, cl) -__TBB_MACHINE_DEFINE_ATOMICS(2, __int16, __int16, ax, cx) -__TBB_MACHINE_DEFINE_ATOMICS(4, ptrdiff_t, ptrdiff_t, eax, ecx) - -#undef __TBB_MACHINE_DEFINE_ATOMICS - -static inline void __TBB_machine_OR( volatile void *operand, __int32 addend ) { - __asm - { - mov eax, addend - mov edx, [operand] - lock or [edx], eax - } -} - -static inline void __TBB_machine_AND( volatile void *operand, __int32 addend ) { - __asm - { - mov eax, addend - mov edx, [operand] - lock and [edx], eax - } -} - -#define __TBB_AtomicOR(P,V) __TBB_machine_OR(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_AND(P,V) - -//TODO: Check if it possible and profitable for IA-32 architecture on (Linux and Windows) -//to use of 64-bit load/store via floating point registers together with full fence -//for sequentially consistent load/store, instead of CAS. -#define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warnings 4244, 4267 are back - diff --git a/src/tbb/include/tbb/machine/windows_intel64.h b/src/tbb/include/tbb/machine/windows_intel64.h deleted file mode 100644 index 03795efd9..000000000 --- a/src/tbb/include/tbb/machine/windows_intel64.h +++ /dev/null @@ -1,105 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_windows_intel64_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_windows_intel64_H - -#define __TBB_WORDSIZE 8 -#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE - -#include -#include "msvc_ia32_common.h" - -//TODO: Use _InterlockedXXX16 intrinsics for 2 byte operations -#if !__INTEL_COMPILER - #pragma intrinsic(_InterlockedOr64) - #pragma intrinsic(_InterlockedAnd64) - #pragma intrinsic(_InterlockedCompareExchange) - #pragma intrinsic(_InterlockedCompareExchange64) - #pragma intrinsic(_InterlockedExchangeAdd) - #pragma intrinsic(_InterlockedExchangeAdd64) - #pragma intrinsic(_InterlockedExchange) - #pragma intrinsic(_InterlockedExchange64) -#endif /* !(__INTEL_COMPILER) */ - -#if __INTEL_COMPILER && (__INTEL_COMPILER < 1100) - #define __TBB_compiler_fence() __asm { __asm nop } - #define __TBB_full_memory_fence() __asm { __asm mfence } -#elif _MSC_VER >= 1300 || __INTEL_COMPILER - #pragma intrinsic(_ReadWriteBarrier) - #pragma intrinsic(_mm_mfence) - #define __TBB_compiler_fence() _ReadWriteBarrier() - #define __TBB_full_memory_fence() _mm_mfence() -#endif - -#define __TBB_control_consistency_helper() __TBB_compiler_fence() -#define __TBB_acquire_consistency_helper() __TBB_compiler_fence() -#define __TBB_release_consistency_helper() __TBB_compiler_fence() - -// ATTENTION: if you ever change argument types in machine-specific primitives, -// please take care of atomic_word<> specializations in tbb/atomic.h -extern "C" { - __int8 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp1 (volatile void *ptr, __int8 value, __int8 comparand ); - __int8 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd1 (volatile void *ptr, __int8 addend ); - __int8 __TBB_EXPORTED_FUNC __TBB_machine_fetchstore1 (volatile void *ptr, __int8 value ); - __int16 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp2 (volatile void *ptr, __int16 value, __int16 comparand ); - __int16 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd2 (volatile void *ptr, __int16 addend ); - __int16 __TBB_EXPORTED_FUNC __TBB_machine_fetchstore2 (volatile void *ptr, __int16 value ); -} - -inline long __TBB_machine_cmpswp4 (volatile void *ptr, __int32 value, __int32 comparand ) { - return _InterlockedCompareExchange( (long*)ptr, value, comparand ); -} -inline long __TBB_machine_fetchadd4 (volatile void *ptr, __int32 addend ) { - return _InterlockedExchangeAdd( (long*)ptr, addend ); -} -inline long __TBB_machine_fetchstore4 (volatile void *ptr, __int32 value ) { - return _InterlockedExchange( (long*)ptr, value ); -} - -inline __int64 __TBB_machine_cmpswp8 (volatile void *ptr, __int64 value, __int64 comparand ) { - return _InterlockedCompareExchange64( (__int64*)ptr, value, comparand ); -} -inline __int64 __TBB_machine_fetchadd8 (volatile void *ptr, __int64 addend ) { - return _InterlockedExchangeAdd64( (__int64*)ptr, addend ); -} -inline __int64 __TBB_machine_fetchstore8 (volatile void *ptr, __int64 value ) { - return _InterlockedExchange64( (__int64*)ptr, value ); -} - -#define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - -inline void __TBB_machine_OR( volatile void *operand, intptr_t addend ) { - _InterlockedOr64((__int64*)operand, addend); -} - -inline void __TBB_machine_AND( volatile void *operand, intptr_t addend ) { - _InterlockedAnd64((__int64*)operand, addend); -} - -#define __TBB_AtomicOR(P,V) __TBB_machine_OR(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_AND(P,V) - diff --git a/src/tbb/include/tbb/machine/xbox360_ppc.h b/src/tbb/include/tbb/machine/xbox360_ppc.h deleted file mode 100644 index 148e5b1d0..000000000 --- a/src/tbb/include/tbb/machine/xbox360_ppc.h +++ /dev/null @@ -1,119 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// TODO: revise by comparing with mac_ppc.h - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_xbox360_ppc_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_xbox360_ppc_H - -#define NONET -#define NOD3D -#include "xtl.h" -#include "ppcintrinsics.h" - -#if _MSC_VER >= 1300 -extern "C" void _MemoryBarrier(); -#pragma intrinsic(_MemoryBarrier) -#define __TBB_control_consistency_helper() __isync() -#define __TBB_acquire_consistency_helper() _MemoryBarrier() -#define __TBB_release_consistency_helper() _MemoryBarrier() -#endif - -#define __TBB_full_memory_fence() __sync() - -#define __TBB_WORDSIZE 4 -#define __TBB_ENDIANNESS __TBB_ENDIAN_BIG - -//todo: define __TBB_USE_FENCED_ATOMICS and define acquire/release primitives to maximize performance - -inline __int32 __TBB_machine_cmpswp4(volatile void *ptr, __int32 value, __int32 comparand ) { - __sync(); - __int32 result = InterlockedCompareExchange((volatile LONG*)ptr, value, comparand); - __isync(); - return result; -} - -inline __int64 __TBB_machine_cmpswp8(volatile void *ptr, __int64 value, __int64 comparand ) -{ - __sync(); - __int64 result = InterlockedCompareExchange64((volatile LONG64*)ptr, value, comparand); - __isync(); - return result; -} - -#define __TBB_USE_GENERIC_PART_WORD_CAS 1 -#define __TBB_USE_GENERIC_FETCH_ADD 1 -#define __TBB_USE_GENERIC_FETCH_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_DWORD_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - -#pragma optimize( "", off ) -inline void __TBB_machine_pause (__int32 delay ) -{ - for (__int32 i=0; i> 0) & 1) + - ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 1) & 1) + - ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 2) & 1) + - ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 3) & 1) + - ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 4) & 1) + - ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 5) & 1) + 1; // +1 accomodates for the master thread -} - -static inline int __TBB_XBOX360_GetHardwareThreadIndex(int workerThreadIndex) -{ - workerThreadIndex %= __TBB_XBOX360_DetectNumberOfWorkers()-1; - int m = __TBB_XBOX360_HARDWARE_THREAD_MASK; - int index = 0; - int skipcount = workerThreadIndex; - while (true) - { - if ((m & 1)!=0) - { - if (skipcount==0) break; - skipcount--; - } - m >>= 1; - index++; - } - return index; -} - -#define __TBB_HardwareConcurrency() __TBB_XBOX360_DetectNumberOfWorkers() diff --git a/src/tbb/include/tbb/memory_pool.h b/src/tbb/include/tbb/memory_pool.h index 47b8e1b2c..cefe96e36 100644 --- a/src/tbb/include/tbb/memory_pool.h +++ b/src/tbb/include/tbb/memory_pool.h @@ -1,269 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_memory_pool_H -#define __TBB_memory_pool_H - -#if !TBB_PREVIEW_MEMORY_POOL -#error Set TBB_PREVIEW_MEMORY_POOL to include memory_pool.h -#endif -/** @file */ - -#include "scalable_allocator.h" -#include // std::bad_alloc -#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC -#include // std::forward -#endif - -#if __TBB_EXTRA_DEBUG -#define __TBBMALLOC_ASSERT ASSERT -#else -#define __TBBMALLOC_ASSERT(a,b) ((void)0) -#endif - -namespace tbb { -namespace interface6 { -//! @cond INTERNAL -namespace internal { - -//! Base of thread-safe pool allocator for variable-size requests -class pool_base : tbb::internal::no_copy { - // Pool interface is separate from standard allocator classes because it has - // to maintain internal state, no copy or assignment. Move and swap are possible. -public: - //! Reset pool to reuse its memory (free all objects at once) - void recycle() { rml::pool_reset(my_pool); } - - //! The "malloc" analogue to allocate block of memory of size bytes - void *malloc(size_t size) { return rml::pool_malloc(my_pool, size); } - - //! The "free" analogue to discard a previously allocated piece of memory. - void free(void* ptr) { rml::pool_free(my_pool, ptr); } - - //! The "realloc" analogue complementing pool_malloc. - // Enables some low-level optimization possibilities - void *realloc(void* ptr, size_t size) { - return rml::pool_realloc(my_pool, ptr, size); - } - -protected: - //! destroy pool - must be called in a child class - void destroy() { rml::pool_destroy(my_pool); } - - rml::MemoryPool *my_pool; -}; - -} // namespace internal -//! @endcond - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Workaround for erroneous "unreferenced parameter" warning in method destroy. - #pragma warning (push) - #pragma warning (disable: 4100) -#endif - -//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 -/** @ingroup memory_allocation */ -template -class memory_pool_allocator { -protected: - typedef P pool_type; - pool_type *my_pool; - template - friend class memory_pool_allocator; - template - friend bool operator==( const memory_pool_allocator& a, const memory_pool_allocator& b); - template - friend bool operator!=( const memory_pool_allocator& a, const memory_pool_allocator& b); -public: - typedef typename tbb::internal::allocator_type::value_type value_type; - typedef value_type* pointer; - typedef const value_type* const_pointer; - typedef value_type& reference; - typedef const value_type& const_reference; - typedef size_t size_type; - typedef ptrdiff_t difference_type; - template struct rebind { - typedef memory_pool_allocator other; - }; - - memory_pool_allocator(pool_type &pool) throw() : my_pool(&pool) {} - memory_pool_allocator(const memory_pool_allocator& src) throw() : my_pool(src.my_pool) {} - template - memory_pool_allocator(const memory_pool_allocator& src) throw() : my_pool(src.my_pool) {} - - pointer address(reference x) const { return &x; } - const_pointer address(const_reference x) const { return &x; } - - //! Allocate space for n objects. - pointer allocate( size_type n, const void* /*hint*/ = 0) { - return static_cast( my_pool->malloc( n*sizeof(value_type) ) ); - } - //! Free previously allocated block of memory. - void deallocate( pointer p, size_type ) { - my_pool->free(p); - } - //! Largest value for which method allocate might succeed. - size_type max_size() const throw() { - size_type max = static_cast(-1) / sizeof (value_type); - return (max > 0 ? max : 1); - } - //! Copy-construct value at location pointed to by p. -#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - template - void construct(U *p, Args&&... args) - { ::new((void *)p) U(std::forward(args)...); } -#else // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC -#if __TBB_CPP11_RVALUE_REF_PRESENT - void construct( pointer p, value_type&& value ) {::new((void*)(p)) value_type(std::move(value));} -#endif - void construct( pointer p, const value_type& value ) { ::new((void*)(p)) value_type(value); } -#endif // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - - //! Destroy value at location pointed to by p. - void destroy( pointer p ) { p->~value_type(); } + http://www.apache.org/licenses/LICENSE-2.0 -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4100 is back - -//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 -/** @ingroup memory_allocation */ -template -class memory_pool_allocator { -public: - typedef P pool_type; - typedef void* pointer; - typedef const void* const_pointer; - typedef void value_type; - template struct rebind { - typedef memory_pool_allocator other; - }; - - memory_pool_allocator( pool_type &pool) throw() : my_pool(&pool) {} - memory_pool_allocator( const memory_pool_allocator& src) throw() : my_pool(src.my_pool) {} - template - memory_pool_allocator(const memory_pool_allocator& src) throw() : my_pool(src.my_pool) {} - -protected: - pool_type *my_pool; - template - friend class memory_pool_allocator; - template - friend bool operator==( const memory_pool_allocator& a, const memory_pool_allocator& b); - template - friend bool operator!=( const memory_pool_allocator& a, const memory_pool_allocator& b); -}; - -template -inline bool operator==( const memory_pool_allocator& a, const memory_pool_allocator& b) {return a.my_pool==b.my_pool;} - -template -inline bool operator!=( const memory_pool_allocator& a, const memory_pool_allocator& b) {return a.my_pool!=b.my_pool;} - - -//! Thread-safe growable pool allocator for variable-size requests -template -class memory_pool : public internal::pool_base { - Alloc my_alloc; // TODO: base-class optimization - static void *allocate_request(intptr_t pool_id, size_t & bytes); - static int deallocate_request(intptr_t pool_id, void*, size_t raw_bytes); - -public: - //! construct pool with underlying allocator - memory_pool(const Alloc &src = Alloc()); - - //! destroy pool - ~memory_pool() { destroy(); } // call the callbacks first and destroy my_alloc latter - -}; - -class fixed_pool : public internal::pool_base { - void *my_buffer; - size_t my_size; - inline static void *allocate_request(intptr_t pool_id, size_t & bytes); - -public: - //! construct pool with underlying allocator - inline fixed_pool(void *buf, size_t size); - //! destroy pool - ~fixed_pool() { destroy(); } -}; - -//////////////// Implementation /////////////// - -template -memory_pool::memory_pool(const Alloc &src) : my_alloc(src) { - rml::MemPoolPolicy args(allocate_request, deallocate_request, - sizeof(typename Alloc::value_type)); - rml::MemPoolError res = rml::pool_create_v1(intptr_t(this), &args, &my_pool); - if( res!=rml::POOL_OK ) __TBB_THROW(std::bad_alloc()); -} -template -void *memory_pool::allocate_request(intptr_t pool_id, size_t & bytes) { - memory_pool &self = *reinterpret_cast*>(pool_id); - const size_t unit_size = sizeof(typename Alloc::value_type); - __TBBMALLOC_ASSERT( 0 == bytes%unit_size, NULL); - void *ptr; - __TBB_TRY { ptr = self.my_alloc.allocate( bytes/unit_size ); } - __TBB_CATCH(...) { return 0; } - return ptr; -} -#if __TBB_MSVC_UNREACHABLE_CODE_IGNORED - // Workaround for erroneous "unreachable code" warning in the template below. - // Specific for VC++ 17-18 compiler - #pragma warning (push) - #pragma warning (disable: 4702) -#endif -template -int memory_pool::deallocate_request(intptr_t pool_id, void* raw_ptr, size_t raw_bytes) { - memory_pool &self = *reinterpret_cast*>(pool_id); - const size_t unit_size = sizeof(typename Alloc::value_type); - __TBBMALLOC_ASSERT( 0 == raw_bytes%unit_size, NULL); - self.my_alloc.deallocate( static_cast(raw_ptr), raw_bytes/unit_size ); - return 0; -} -#if __TBB_MSVC_UNREACHABLE_CODE_IGNORED - #pragma warning (pop) -#endif -inline fixed_pool::fixed_pool(void *buf, size_t size) : my_buffer(buf), my_size(size) { - if( !buf || !size ) __TBB_THROW(std::bad_alloc()); - rml::MemPoolPolicy args(allocate_request, 0, size, /*fixedPool=*/true); - rml::MemPoolError res = rml::pool_create_v1(intptr_t(this), &args, &my_pool); - if( res!=rml::POOL_OK ) __TBB_THROW(std::bad_alloc()); -} -inline void *fixed_pool::allocate_request(intptr_t pool_id, size_t & bytes) { - fixed_pool &self = *reinterpret_cast(pool_id); - __TBBMALLOC_ASSERT(0 != self.my_size, "The buffer must not be used twice."); - bytes = self.my_size; - self.my_size = 0; // remember that buffer has been used - return self.my_buffer; -} - -} //namespace interface6 -using interface6::memory_pool_allocator; -using interface6::memory_pool; -using interface6::fixed_pool; -} //namespace tbb + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#undef __TBBMALLOC_ASSERT -#endif// __TBB_memory_pool_H +#include "../oneapi/tbb/memory_pool.h" diff --git a/src/tbb/include/tbb/mutex.h b/src/tbb/include/tbb/mutex.h index 32340f8b1..91dbee0fa 100644 --- a/src/tbb/include/tbb/mutex.h +++ b/src/tbb/include/tbb/mutex.h @@ -1,234 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2023 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_mutex_H -#define __TBB_mutex_H - -#if _WIN32||_WIN64 -#include "machine/windows_api.h" -#else -#include -#endif /* _WIN32||_WIN64 */ - -#include -#include "aligned_space.h" -#include "tbb_stddef.h" -#include "tbb_profiling.h" - -namespace tbb { - -//! Wrapper around the platform's native reader-writer lock. -/** For testing purposes only. - @ingroup synchronization */ -class mutex : internal::mutex_copy_deprecated_and_disabled { -public: - //! Construct unacquired mutex. - mutex() { -#if TBB_USE_ASSERT || TBB_USE_THREADING_TOOLS - internal_construct(); -#else - #if _WIN32||_WIN64 - InitializeCriticalSectionEx(&impl, 4000, 0); - #else - int error_code = pthread_mutex_init(&impl,NULL); - if( error_code ) - tbb::internal::handle_perror(error_code,"mutex: pthread_mutex_init failed"); - #endif /* _WIN32||_WIN64*/ -#endif /* TBB_USE_ASSERT */ - }; - - ~mutex() { -#if TBB_USE_ASSERT - internal_destroy(); -#else - #if _WIN32||_WIN64 - DeleteCriticalSection(&impl); - #else - pthread_mutex_destroy(&impl); - - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - }; - - class scoped_lock; - friend class scoped_lock; - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock : internal::no_copy { - public: - //! Construct lock that has not acquired a mutex. - scoped_lock() : my_mutex(NULL) {}; - - //! Acquire lock on given mutex. - scoped_lock( mutex& mutex ) { - acquire( mutex ); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( my_mutex ) - release(); - } - - //! Acquire lock on given mutex. - void acquire( mutex& mutex ) { -#if TBB_USE_ASSERT - internal_acquire(mutex); -#else - mutex.lock(); - my_mutex = &mutex; -#endif /* TBB_USE_ASSERT */ - } - - //! Try acquire lock on given mutex. - bool try_acquire( mutex& mutex ) { -#if TBB_USE_ASSERT - return internal_try_acquire (mutex); -#else - bool result = mutex.try_lock(); - if( result ) - my_mutex = &mutex; - return result; -#endif /* TBB_USE_ASSERT */ - } - - //! Release lock - void release() { -#if TBB_USE_ASSERT - internal_release (); -#else - my_mutex->unlock(); - my_mutex = NULL; -#endif /* TBB_USE_ASSERT */ - } - - private: - //! The pointer to the current mutex to work - mutex* my_mutex; + http://www.apache.org/licenses/LICENSE-2.0 - //! All checks from acquire using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_acquire( mutex& m ); - - //! All checks from try_acquire using mutex.state were moved here - bool __TBB_EXPORTED_METHOD internal_try_acquire( mutex& m ); - - //! All checks from release using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_release(); - - friend class mutex; - }; - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = false; - - // ISO C++0x compatibility methods - - //! Acquire lock - void lock() { -#if TBB_USE_ASSERT - aligned_space tmp; - new(tmp.begin()) scoped_lock(*this); -#else - #if _WIN32||_WIN64 - EnterCriticalSection(&impl); - #else - int error_code = pthread_mutex_lock(&impl); - if( error_code ) - tbb::internal::handle_perror(error_code,"mutex: pthread_mutex_lock failed"); - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Try acquiring lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_lock() { -#if TBB_USE_ASSERT - aligned_space tmp; - scoped_lock& s = *tmp.begin(); - s.my_mutex = NULL; - return s.internal_try_acquire(*this); -#else - #if _WIN32||_WIN64 - return TryEnterCriticalSection(&impl)!=0; - #else - return pthread_mutex_trylock(&impl)==0; - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Release lock - void unlock() { -#if TBB_USE_ASSERT - aligned_space tmp; - scoped_lock& s = *tmp.begin(); - s.my_mutex = this; - s.internal_release(); -#else - #if _WIN32||_WIN64 - LeaveCriticalSection(&impl); - #else - pthread_mutex_unlock(&impl); - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Return native_handle - #if _WIN32||_WIN64 - typedef LPCRITICAL_SECTION native_handle_type; - #else - typedef pthread_mutex_t* native_handle_type; - #endif - native_handle_type native_handle() { return (native_handle_type) &impl; } - - enum state_t { - INITIALIZED=0x1234, - DESTROYED=0x789A, - HELD=0x56CD - }; -private: -#if _WIN32||_WIN64 - CRITICAL_SECTION impl; - enum state_t state; -#else - pthread_mutex_t impl; -#endif /* _WIN32||_WIN64 */ - - //! All checks from mutex constructor using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_construct(); - - //! All checks from mutex destructor using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_destroy(); - -#if _WIN32||_WIN64 -public: - //! Set the internal state - void set_state( state_t to ) { state = to; } -#endif -}; - -__TBB_DEFINE_PROFILING_SET_NAME(mutex) - -} // namespace tbb + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_mutex_H */ +#include "../oneapi/tbb/mutex.h" diff --git a/src/tbb/include/tbb/null_mutex.h b/src/tbb/include/tbb/null_mutex.h index 240e9bdcc..63218bf06 100644 --- a/src/tbb/include/tbb/null_mutex.h +++ b/src/tbb/include/tbb/null_mutex.h @@ -1,54 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_null_mutex_H -#define __TBB_null_mutex_H - -#include "tbb_stddef.h" + http://www.apache.org/licenses/LICENSE-2.0 -namespace tbb { - -//! A mutex which does nothing -/** A null_mutex does no operation and simulates success. - @ingroup synchronization */ -class null_mutex : internal::mutex_copy_deprecated_and_disabled { -public: - //! Represents acquisition of a mutex. - class scoped_lock : internal::no_copy { - public: - scoped_lock() {} - scoped_lock( null_mutex& ) {} - ~scoped_lock() {} - void acquire( null_mutex& ) {} - bool try_acquire( null_mutex& ) { return true; } - void release() {} - }; - - null_mutex() {} - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = true; - static const bool is_fair_mutex = true; -}; - -} + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_null_mutex_H */ +#include "../oneapi/tbb/null_mutex.h" diff --git a/src/tbb/include/tbb/null_rw_mutex.h b/src/tbb/include/tbb/null_rw_mutex.h index 813f79f39..71c42fe26 100644 --- a/src/tbb/include/tbb/null_rw_mutex.h +++ b/src/tbb/include/tbb/null_rw_mutex.h @@ -1,56 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_null_rw_mutex_H -#define __TBB_null_rw_mutex_H - -#include "tbb_stddef.h" + http://www.apache.org/licenses/LICENSE-2.0 -namespace tbb { - -//! A rw mutex which does nothing -/** A null_rw_mutex is a rw mutex that does nothing and simulates successful operation. - @ingroup synchronization */ -class null_rw_mutex : internal::mutex_copy_deprecated_and_disabled { -public: - //! Represents acquisition of a mutex. - class scoped_lock : internal::no_copy { - public: - scoped_lock() {} - scoped_lock( null_rw_mutex& , bool = true ) {} - ~scoped_lock() {} - void acquire( null_rw_mutex& , bool = true ) {} - bool upgrade_to_writer() { return true; } - bool downgrade_to_reader() { return true; } - bool try_acquire( null_rw_mutex& , bool = true ) { return true; } - void release() {} - }; - - null_rw_mutex() {} - - // Mutex traits - static const bool is_rw_mutex = true; - static const bool is_recursive_mutex = true; - static const bool is_fair_mutex = true; -}; - -} + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_null_rw_mutex_H */ +#include "../oneapi/tbb/null_rw_mutex.h" diff --git a/src/tbb/include/tbb/parallel_do.h b/src/tbb/include/tbb/parallel_do.h deleted file mode 100644 index 8173a9715..000000000 --- a/src/tbb/include/tbb/parallel_do.h +++ /dev/null @@ -1,522 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_parallel_do_H -#define __TBB_parallel_do_H - -#include "internal/_range_iterator.h" -#include "task.h" -#include "aligned_space.h" -#include - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - template class parallel_do_feeder_impl; - template class do_group_task; - - //! Strips its template type argument from 'cv' and '&' qualifiers - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; - // Most of the compilers remove cv-qualifiers from non-reference function argument types. - // But unfortunately there are those that don't. - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; -} // namespace internal -//! @endcond - -//! Class the user supplied algorithm body uses to add new tasks -/** \param Item Work item type **/ -template -class parallel_do_feeder: internal::no_copy -{ - parallel_do_feeder() {} - virtual ~parallel_do_feeder () {} - virtual void internal_add( const Item& item ) = 0; - template friend class internal::parallel_do_feeder_impl; -public: - //! Add a work item to a running parallel_do. - void add( const Item& item ) {internal_add(item);} -}; - -//! @cond INTERNAL -namespace internal { - //! For internal use only. - /** Selects one of the two possible forms of function call member operator. - @ingroup algorithms **/ - template - class parallel_do_operator_selector - { - typedef parallel_do_feeder Feeder; - template - static void internal_call( const Body& obj, A1& arg1, A2&, void (Body::*)(CvItem) const ) { - obj(arg1); - } - template - static void internal_call( const Body& obj, A1& arg1, A2& arg2, void (Body::*)(CvItem, parallel_do_feeder&) const ) { - obj(arg1, arg2); - } - - public: - template - static void call( const Body& obj, A1& arg1, A2& arg2 ) - { - internal_call( obj, arg1, arg2, &Body::operator() ); - } - }; - - //! For internal use only. - /** Executes one iteration of a do. - @ingroup algorithms */ - template - class do_iteration_task: public task - { - typedef parallel_do_feeder_impl feeder_type; - - Item my_value; - feeder_type& my_feeder; - - do_iteration_task( const Item& value, feeder_type& feeder ) : - my_value(value), my_feeder(feeder) - {} - - /*override*/ - task* execute() - { - parallel_do_operator_selector::call(*my_feeder.my_body, my_value, my_feeder); - return NULL; - } - - template friend class parallel_do_feeder_impl; - }; // class do_iteration_task - - template - class do_iteration_task_iter: public task - { - typedef parallel_do_feeder_impl feeder_type; - - Iterator my_iter; - feeder_type& my_feeder; - - do_iteration_task_iter( const Iterator& iter, feeder_type& feeder ) : - my_iter(iter), my_feeder(feeder) - {} - - /*override*/ - task* execute() - { - parallel_do_operator_selector::call(*my_feeder.my_body, *my_iter, my_feeder); - return NULL; - } - - template friend class do_group_task_forward; - template friend class do_group_task_input; - template friend class do_task_iter; - }; // class do_iteration_task_iter - - //! For internal use only. - /** Implements new task adding procedure. - @ingroup algorithms **/ - template - class parallel_do_feeder_impl : public parallel_do_feeder - { - /*override*/ - void internal_add( const Item& item ) - { - typedef do_iteration_task iteration_type; - - iteration_type& t = *new (task::allocate_additional_child_of(*my_barrier)) iteration_type(item, *this); - - t.spawn( t ); - } - public: - const Body* my_body; - empty_task* my_barrier; - - parallel_do_feeder_impl() - { - my_barrier = new( task::allocate_root() ) empty_task(); - __TBB_ASSERT(my_barrier, "root task allocation failed"); - } - -#if __TBB_TASK_GROUP_CONTEXT - parallel_do_feeder_impl(tbb::task_group_context &context) - { - my_barrier = new( task::allocate_root(context) ) empty_task(); - __TBB_ASSERT(my_barrier, "root task allocation failed"); - } -#endif - - ~parallel_do_feeder_impl() - { - my_barrier->destroy(*my_barrier); - } - }; // class parallel_do_feeder_impl - - - //! For internal use only - /** Unpacks a block of iterations. - @ingroup algorithms */ - - template - class do_group_task_forward: public task - { - static const size_t max_arg_size = 4; - - typedef parallel_do_feeder_impl feeder_type; - - feeder_type& my_feeder; - Iterator my_first; - size_t my_size; - - do_group_task_forward( Iterator first, size_t size, feeder_type& feeder ) - : my_feeder(feeder), my_first(first), my_size(size) - {} - - /*override*/ task* execute() - { - typedef do_iteration_task_iter iteration_type; - __TBB_ASSERT( my_size>0, NULL ); - task_list list; - task* t; - size_t k=0; - for(;;) { - t = new( allocate_child() ) iteration_type( my_first, my_feeder ); - ++my_first; - if( ++k==my_size ) break; - list.push_back(*t); - } - set_ref_count(int(k+1)); - spawn(list); - spawn_and_wait_for_all(*t); - return NULL; - } - - template friend class do_task_iter; - }; // class do_group_task_forward - - template - class do_group_task_input: public task - { - static const size_t max_arg_size = 4; - - typedef parallel_do_feeder_impl feeder_type; - - feeder_type& my_feeder; - size_t my_size; - aligned_space my_arg; - - do_group_task_input( feeder_type& feeder ) - : my_feeder(feeder), my_size(0) - {} - - /*override*/ task* execute() - { - typedef do_iteration_task_iter iteration_type; - __TBB_ASSERT( my_size>0, NULL ); - task_list list; - task* t; - size_t k=0; - for(;;) { - t = new( allocate_child() ) iteration_type( my_arg.begin() + k, my_feeder ); - if( ++k==my_size ) break; - list.push_back(*t); - } - set_ref_count(int(k+1)); - spawn(list); - spawn_and_wait_for_all(*t); - return NULL; - } - - ~do_group_task_input(){ - for( size_t k=0; k~Item(); - } - - template friend class do_task_iter; - }; // class do_group_task_input - - //! For internal use only. - /** Gets block of iterations and packages them into a do_group_task. - @ingroup algorithms */ - template - class do_task_iter: public task - { - typedef parallel_do_feeder_impl feeder_type; - - public: - do_task_iter( Iterator first, Iterator last , feeder_type& feeder ) : - my_first(first), my_last(last), my_feeder(feeder) - {} - - private: - Iterator my_first; - Iterator my_last; - feeder_type& my_feeder; - - /* Do not merge run(xxx) and run_xxx() methods. They are separated in order - to make sure that compilers will eliminate unused argument of type xxx - (that is will not put it on stack). The sole purpose of this argument - is overload resolution. - - An alternative could be using template functions, but explicit specialization - of member function templates is not supported for non specialized class - templates. Besides template functions would always fall back to the least - efficient variant (the one for input iterators) in case of iterators having - custom tags derived from basic ones. */ - /*override*/ task* execute() - { - typedef typename std::iterator_traits::iterator_category iterator_tag; - return run( (iterator_tag*)NULL ); - } - - /** This is the most restricted variant that operates on input iterators or - iterators with unknown tags (tags not derived from the standard ones). **/ - inline task* run( void* ) { return run_for_input_iterator(); } - - task* run_for_input_iterator() { - typedef do_group_task_input block_type; - - block_type& t = *new( allocate_additional_child_of(*my_feeder.my_barrier) ) block_type(my_feeder); - size_t k=0; - while( !(my_first == my_last) ) { - new (t.my_arg.begin() + k) Item(*my_first); - ++my_first; - if( ++k==block_type::max_arg_size ) { - if ( !(my_first == my_last) ) - recycle_to_reexecute(); - break; - } - } - if( k==0 ) { - destroy(t); - return NULL; - } else { - t.my_size = k; - return &t; - } - } - - inline task* run( std::forward_iterator_tag* ) { return run_for_forward_iterator(); } - - task* run_for_forward_iterator() { - typedef do_group_task_forward block_type; - - Iterator first = my_first; - size_t k=0; - while( !(my_first==my_last) ) { - ++my_first; - if( ++k==block_type::max_arg_size ) { - if ( !(my_first==my_last) ) - recycle_to_reexecute(); - break; - } - } - return k==0 ? NULL : new( allocate_additional_child_of(*my_feeder.my_barrier) ) block_type(first, k, my_feeder); - } - - inline task* run( std::random_access_iterator_tag* ) { return run_for_random_access_iterator(); } - - task* run_for_random_access_iterator() { - typedef do_group_task_forward block_type; - typedef do_iteration_task_iter iteration_type; - - size_t k = static_cast(my_last-my_first); - if( k > block_type::max_arg_size ) { - Iterator middle = my_first + k/2; - - empty_task& c = *new( allocate_continuation() ) empty_task; - do_task_iter& b = *new( c.allocate_child() ) do_task_iter(middle, my_last, my_feeder); - recycle_as_child_of(c); - - my_last = middle; - c.set_ref_count(2); - c.spawn(b); - return this; - }else if( k != 0 ) { - task_list list; - task* t; - size_t k1=0; - for(;;) { - t = new( allocate_child() ) iteration_type(my_first, my_feeder); - ++my_first; - if( ++k1==k ) break; - list.push_back(*t); - } - set_ref_count(int(k+1)); - spawn(list); - spawn_and_wait_for_all(*t); - } - return NULL; - } - }; // class do_task_iter - - //! For internal use only. - /** Implements parallel iteration over a range. - @ingroup algorithms */ - template - void run_parallel_do( Iterator first, Iterator last, const Body& body -#if __TBB_TASK_GROUP_CONTEXT - , task_group_context& context -#endif - ) - { - typedef do_task_iter root_iteration_task; -#if __TBB_TASK_GROUP_CONTEXT - parallel_do_feeder_impl feeder(context); -#else - parallel_do_feeder_impl feeder; -#endif - feeder.my_body = &body; - - root_iteration_task &t = *new( feeder.my_barrier->allocate_child() ) root_iteration_task(first, last, feeder); - - feeder.my_barrier->set_ref_count(2); - feeder.my_barrier->spawn_and_wait_for_all(t); - } - - //! For internal use only. - /** Detects types of Body's operator function arguments. - @ingroup algorithms **/ - template - void select_parallel_do( Iterator first, Iterator last, const Body& body, void (Body::*)(Item) const -#if __TBB_TASK_GROUP_CONTEXT - , task_group_context& context -#endif // __TBB_TASK_GROUP_CONTEXT - ) - { - run_parallel_do::type>( first, last, body -#if __TBB_TASK_GROUP_CONTEXT - , context -#endif // __TBB_TASK_GROUP_CONTEXT - ); - } - - //! For internal use only. - /** Detects types of Body's operator function arguments. - @ingroup algorithms **/ - template - void select_parallel_do( Iterator first, Iterator last, const Body& body, void (Body::*)(Item, parallel_do_feeder<_Item>&) const -#if __TBB_TASK_GROUP_CONTEXT - , task_group_context& context -#endif // __TBB_TASK_GROUP_CONTEXT - ) - { - run_parallel_do::type>( first, last, body -#if __TBB_TASK_GROUP_CONTEXT - , context -#endif // __TBB_TASK_GROUP_CONTEXT - ); - } - -} // namespace internal -//! @endcond - - -/** \page parallel_do_body_req Requirements on parallel_do body - Class \c Body implementing the concept of parallel_do body must define: - - \code - B::operator()( - cv_item_type item, - parallel_do_feeder& feeder - ) const - - OR - - B::operator()( cv_item_type& item ) const - \endcode Process item. - May be invoked concurrently for the same \c this but different \c item. - - - \code item_type( const item_type& ) \endcode - Copy a work item. - - \code ~item_type() \endcode Destroy a work item -**/ - -/** \name parallel_do - See also requirements on \ref parallel_do_body_req "parallel_do Body". **/ -//@{ -//! Parallel iteration over a range, with optional addition of more work. -/** @ingroup algorithms */ -template -void parallel_do( Iterator first, Iterator last, const Body& body ) -{ - if ( first == last ) - return; -#if __TBB_TASK_GROUP_CONTEXT - task_group_context context; -#endif // __TBB_TASK_GROUP_CONTEXT - internal::select_parallel_do( first, last, body, &Body::operator() -#if __TBB_TASK_GROUP_CONTEXT - , context -#endif // __TBB_TASK_GROUP_CONTEXT - ); -} - -template -void parallel_do(Range& rng, const Body& body) { - parallel_do(tbb::internal::first(rng), tbb::internal::last(rng), body); -} - -template -void parallel_do(const Range& rng, const Body& body) { - parallel_do(tbb::internal::first(rng), tbb::internal::last(rng), body); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Parallel iteration over a range, with optional addition of more work and user-supplied context -/** @ingroup algorithms */ -template -void parallel_do( Iterator first, Iterator last, const Body& body, task_group_context& context ) -{ - if ( first == last ) - return; - internal::select_parallel_do( first, last, body, &Body::operator(), context ); -} - -template -void parallel_do(Range& rng, const Body& body, task_group_context& context) { - parallel_do(tbb::internal::first(rng), tbb::internal::last(rng), body, context); -} - -template -void parallel_do(const Range& rng, const Body& body, task_group_context& context) { - parallel_do(tbb::internal::first(rng), tbb::internal::last(rng), body, context); -} - -#endif // __TBB_TASK_GROUP_CONTEXT - -//@} - -} // namespace - -#endif /* __TBB_parallel_do_H */ diff --git a/src/tbb/include/tbb/parallel_for.h b/src/tbb/include/tbb/parallel_for.h index 4dc499cf0..fea1d1b9f 100644 --- a/src/tbb/include/tbb/parallel_for.h +++ b/src/tbb/include/tbb/parallel_for.h @@ -1,373 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_parallel_for_H -#define __TBB_parallel_for_H - -#include -#include "task.h" -#include "partitioner.h" -#include "blocked_range.h" -#include "tbb_exception.h" - -namespace tbb { - -namespace interface7 { -//! @cond INTERNAL -namespace internal { - - //! allocate right task with new parent - void* allocate_sibling(task* start_for_task, size_t bytes); - - //! Task type used in parallel_for - /** @ingroup algorithms */ - template - class start_for: public task { - Range my_range; - const Body my_body; - typename Partitioner::task_partition_type my_partition; - /*override*/ task* execute(); - - //! Update affinity info, if any. - /*override*/ void note_affinity( affinity_id id ) { - my_partition.note_affinity( id ); - } - - public: - //! Constructor for root task. - start_for( const Range& range, const Body& body, Partitioner& partitioner ) : - my_range(range), - my_body(body), - my_partition(partitioner) - { - } - //! Splitting constructor used to generate children. - /** parent_ becomes left child. Newly constructed object is right child. */ - start_for( start_for& parent_, typename Partitioner::split_type& split_obj) : - my_range(parent_.my_range, split_obj), - my_body(parent_.my_body), - my_partition(parent_.my_partition, split_obj) - { - my_partition.set_affinity(*this); - } - //! Construct right child from the given range as response to the demand. - /** parent_ remains left child. Newly constructed object is right child. */ - start_for( start_for& parent_, const Range& r, depth_t d ) : - my_range(r), - my_body(parent_.my_body), - my_partition(parent_.my_partition, split()) - { - my_partition.set_affinity(*this); - my_partition.align_depth( d ); - } - static void run( const Range& range, const Body& body, Partitioner& partitioner ) { - if( !range.empty() ) { -#if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP - start_for& a = *new(task::allocate_root()) start_for(range,body,partitioner); -#else - // Bound context prevents exceptions from body to affect nesting or sibling algorithms, - // and allows users to handle exceptions safely by wrapping parallel_for in the try-block. - task_group_context context; - start_for& a = *new(task::allocate_root(context)) start_for(range,body,partitioner); -#endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */ - task::spawn_root_and_wait(a); - } - } -#if __TBB_TASK_GROUP_CONTEXT - static void run( const Range& range, const Body& body, Partitioner& partitioner, task_group_context& context ) { - if( !range.empty() ) { - start_for& a = *new(task::allocate_root(context)) start_for(range,body,partitioner); - task::spawn_root_and_wait(a); - } - } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - //! Run body for range, serves as callback for partitioner - void run_body( Range &r ) { my_body( r ); } - - //! spawn right task, serves as callback for partitioner - void offer_work(typename Partitioner::split_type& split_obj) { - spawn( *new( allocate_sibling(static_cast(this), sizeof(start_for)) ) start_for(*this, split_obj) ); - } - //! spawn right task, serves as callback for partitioner - void offer_work(const Range& r, depth_t d = 0) { - spawn( *new( allocate_sibling(static_cast(this), sizeof(start_for)) ) start_for(*this, r, d) ); - } - }; - - //! allocate right task with new parent - // TODO: 'inline' here is to avoid multiple definition error but for sake of code size this should not be inlined - inline void* allocate_sibling(task* start_for_task, size_t bytes) { - task* parent_ptr = new( start_for_task->allocate_continuation() ) flag_task(); - start_for_task->set_parent(parent_ptr); - parent_ptr->set_ref_count(2); - return &parent_ptr->allocate_child().allocate(bytes); - } - - //! execute task for parallel_for - template - task* start_for::execute() { - my_partition.check_being_stolen( *this ); - my_partition.execute(*this, my_range); - return NULL; - } -} // namespace internal -//! @endcond -} // namespace interfaceX - -//! @cond INTERNAL -namespace internal { - using interface7::internal::start_for; - - //! Calls the function with values from range [begin, end) with a step provided - template - class parallel_for_body : internal::no_assign { - const Function &my_func; - const Index my_begin; - const Index my_step; - public: - parallel_for_body( const Function& _func, Index& _begin, Index& _step ) - : my_func(_func), my_begin(_begin), my_step(_step) {} - - void operator()( const tbb::blocked_range& r ) const { - // A set of local variables to help the compiler with vectorization of the following loop. - Index b = r.begin(); - Index e = r.end(); - Index ms = my_step; - Index k = my_begin + b*ms; - -#if __INTEL_COMPILER -#pragma ivdep -#if __TBB_ASSERT_ON_VECTORIZATION_FAILURE -#pragma vector always assert -#endif -#endif - for ( Index i = b; i < e; ++i, k += ms ) { - my_func( k ); - } - } - }; -} // namespace internal -//! @endcond - -// Requirements on Range concept are documented in blocked_range.h - -/** \page parallel_for_body_req Requirements on parallel_for body - Class \c Body implementing the concept of parallel_for body must define: - - \code Body::Body( const Body& ); \endcode Copy constructor - - \code Body::~Body(); \endcode Destructor - - \code void Body::operator()( Range& r ) const; \endcode Function call operator applying the body to range \c r. -**/ - -/** \name parallel_for - See also requirements on \ref range_req "Range" and \ref parallel_for_body_req "parallel_for Body". **/ -//@{ - -//! Parallel iteration over range with default partitioner. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body ) { - internal::start_for::run(range,body,__TBB_DEFAULT_PARTITIONER()); -} + http://www.apache.org/licenses/LICENSE-2.0 -//! Parallel iteration over range with simple partitioner. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, const simple_partitioner& partitioner ) { - internal::start_for::run(range,body,partitioner); -} - -//! Parallel iteration over range with auto_partitioner. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, const auto_partitioner& partitioner ) { - internal::start_for::run(range,body,partitioner); -} - -//! Parallel iteration over range with affinity_partitioner. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, affinity_partitioner& partitioner ) { - internal::start_for::run(range,body,partitioner); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Parallel iteration over range with default partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, task_group_context& context ) { - internal::start_for::run(range, body, __TBB_DEFAULT_PARTITIONER(), context); -} - -//! Parallel iteration over range with simple partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, const simple_partitioner& partitioner, task_group_context& context ) { - internal::start_for::run(range, body, partitioner, context); -} - -//! Parallel iteration over range with auto_partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, const auto_partitioner& partitioner, task_group_context& context ) { - internal::start_for::run(range, body, partitioner, context); -} - -//! Parallel iteration over range with affinity_partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, affinity_partitioner& partitioner, task_group_context& context ) { - internal::start_for::run(range,body,partitioner, context); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ -//@} - -namespace strict_ppl { - -//@{ -//! Implementation of parallel iteration over stepped range of integers with explicit step and partitioner -template -void parallel_for_impl(Index first, Index last, Index step, const Function& f, Partitioner& partitioner) { - if (step <= 0 ) - internal::throw_exception(internal::eid_nonpositive_step); // throws std::invalid_argument - else if (last > first) { - // Above "else" avoids "potential divide by zero" warning on some platforms - Index end = (last - first - Index(1)) / step + Index(1); - tbb::blocked_range range(static_cast(0), end); - internal::parallel_for_body body(f, first, step); - tbb::parallel_for(range, body, partitioner); - } -} - -//! Parallel iteration over a range of integers with a step provided and default partitioner -template -void parallel_for(Index first, Index last, Index step, const Function& f) { - parallel_for_impl(first, last, step, f, auto_partitioner()); -} -//! Parallel iteration over a range of integers with a step provided and simple partitioner -template -void parallel_for(Index first, Index last, Index step, const Function& f, const simple_partitioner& partitioner) { - parallel_for_impl(first, last, step, f, partitioner); -} -//! Parallel iteration over a range of integers with a step provided and auto partitioner -template -void parallel_for(Index first, Index last, Index step, const Function& f, const auto_partitioner& partitioner) { - parallel_for_impl(first, last, step, f, partitioner); -} -//! Parallel iteration over a range of integers with a step provided and affinity partitioner -template -void parallel_for(Index first, Index last, Index step, const Function& f, affinity_partitioner& partitioner) { - parallel_for_impl(first, last, step, f, partitioner); -} - -//! Parallel iteration over a range of integers with a default step value and default partitioner -template -void parallel_for(Index first, Index last, const Function& f) { - parallel_for_impl(first, last, static_cast(1), f, auto_partitioner()); -} -//! Parallel iteration over a range of integers with a default step value and simple partitioner -template -void parallel_for(Index first, Index last, const Function& f, const simple_partitioner& partitioner) { - parallel_for_impl(first, last, static_cast(1), f, partitioner); -} -//! Parallel iteration over a range of integers with a default step value and auto partitioner -template -void parallel_for(Index first, Index last, const Function& f, const auto_partitioner& partitioner) { - parallel_for_impl(first, last, static_cast(1), f, partitioner); -} -//! Parallel iteration over a range of integers with a default step value and affinity partitioner -template -void parallel_for(Index first, Index last, const Function& f, affinity_partitioner& partitioner) { - parallel_for_impl(first, last, static_cast(1), f, partitioner); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Implementation of parallel iteration over stepped range of integers with explicit step, task group context, and partitioner -template -void parallel_for_impl(Index first, Index last, Index step, const Function& f, Partitioner& partitioner, tbb::task_group_context &context) { - if (step <= 0 ) - internal::throw_exception(internal::eid_nonpositive_step); // throws std::invalid_argument - else if (last > first) { - // Above "else" avoids "potential divide by zero" warning on some platforms - Index end = (last - first - Index(1)) / step + Index(1); - tbb::blocked_range range(static_cast(0), end); - internal::parallel_for_body body(f, first, step); - tbb::parallel_for(range, body, partitioner, context); - } -} - -//! Parallel iteration over a range of integers with explicit step, task group context, and default partitioner -template -void parallel_for(Index first, Index last, Index step, const Function& f, tbb::task_group_context &context) { - parallel_for_impl(first, last, step, f, auto_partitioner(), context); -} -//! Parallel iteration over a range of integers with explicit step, task group context, and simple partitioner - template -void parallel_for(Index first, Index last, Index step, const Function& f, const simple_partitioner& partitioner, tbb::task_group_context &context) { - parallel_for_impl(first, last, step, f, partitioner, context); -} -//! Parallel iteration over a range of integers with explicit step, task group context, and auto partitioner - template -void parallel_for(Index first, Index last, Index step, const Function& f, const auto_partitioner& partitioner, tbb::task_group_context &context) { - parallel_for_impl(first, last, step, f, partitioner, context); -} -//! Parallel iteration over a range of integers with explicit step, task group context, and affinity partitioner - template -void parallel_for(Index first, Index last, Index step, const Function& f, affinity_partitioner& partitioner, tbb::task_group_context &context) { - parallel_for_impl(first, last, step, f, partitioner, context); -} - - -//! Parallel iteration over a range of integers with a default step value, explicit task group context, and default partitioner -template -void parallel_for(Index first, Index last, const Function& f, tbb::task_group_context &context) { - parallel_for_impl(first, last, static_cast(1), f, auto_partitioner(), context); -} -//! Parallel iteration over a range of integers with a default step value, explicit task group context, and simple partitioner - template -void parallel_for(Index first, Index last, const Function& f, const simple_partitioner& partitioner, tbb::task_group_context &context) { - parallel_for_impl(first, last, static_cast(1), f, partitioner, context); -} -//! Parallel iteration over a range of integers with a default step value, explicit task group context, and auto partitioner - template -void parallel_for(Index first, Index last, const Function& f, const auto_partitioner& partitioner, tbb::task_group_context &context) { - parallel_for_impl(first, last, static_cast(1), f, partitioner, context); -} -//! Parallel iteration over a range of integers with a default step value, explicit task group context, and affinity_partitioner - template -void parallel_for(Index first, Index last, const Function& f, affinity_partitioner& partitioner, tbb::task_group_context &context) { - parallel_for_impl(first, last, static_cast(1), f, partitioner, context); -} - -#endif /* __TBB_TASK_GROUP_CONTEXT */ -//@} - -} // namespace strict_ppl - -using strict_ppl::parallel_for; - -} // namespace tbb - -#if TBB_PREVIEW_SERIAL_SUBSET -#define __TBB_NORMAL_EXECUTION -#include "../serial/tbb/parallel_for.h" -#undef __TBB_NORMAL_EXECUTION -#endif + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_parallel_for_H */ +#include "../oneapi/tbb/parallel_for.h" diff --git a/src/tbb/include/tbb/parallel_for_each.h b/src/tbb/include/tbb/parallel_for_each.h index c7dc39f4d..27c2ab172 100644 --- a/src/tbb/include/tbb/parallel_for_each.h +++ b/src/tbb/include/tbb/parallel_for_each.h @@ -1,95 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_parallel_for_each_H -#define __TBB_parallel_for_each_H - -#include "parallel_do.h" - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - // The class calls user function in operator() - template - class parallel_for_each_body : internal::no_assign { - const Function &my_func; - public: - parallel_for_each_body(const Function &_func) : my_func(_func) {} - parallel_for_each_body(const parallel_for_each_body &_caller) : my_func(_caller.my_func) {} - - void operator() ( typename std::iterator_traits::reference value ) const { - my_func(value); - } - }; -} // namespace internal -//! @endcond - -/** \name parallel_for_each - **/ -//@{ -//! Calls function f for all items from [first, last) interval using user-supplied context -/** @ingroup algorithms */ -#if __TBB_TASK_GROUP_CONTEXT -template -void parallel_for_each(InputIterator first, InputIterator last, const Function& f, task_group_context &context) { - internal::parallel_for_each_body body(f); - tbb::parallel_do (first, last, body, context); -} + http://www.apache.org/licenses/LICENSE-2.0 -//! Calls function f for all items from rng using user-supplied context -/** @ingroup algorithms */ -template -void parallel_for_each(Range& rng, const Function& f, task_group_context& context) { - parallel_for_each(tbb::internal::first(rng), tbb::internal::last(rng), f, context); -} - -//! Calls function f for all items from const rng user-supplied context -/** @ingroup algorithms */ -template -void parallel_for_each(const Range& rng, const Function& f, task_group_context& context) { - parallel_for_each(tbb::internal::first(rng), tbb::internal::last(rng), f, context); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -//! Uses default context -template -void parallel_for_each(InputIterator first, InputIterator last, const Function& f) { - internal::parallel_for_each_body body(f); - tbb::parallel_do (first, last, body); -} - -//! Uses default context -template -void parallel_for_each(Range& rng, const Function& f) { - parallel_for_each(tbb::internal::first(rng), tbb::internal::last(rng), f); -} - -//! Uses default context -template -void parallel_for_each(const Range& rng, const Function& f) { - parallel_for_each(tbb::internal::first(rng), tbb::internal::last(rng), f); -} - -//@} - -} // namespace + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_parallel_for_each_H */ +#include "../oneapi/tbb/parallel_for_each.h" diff --git a/src/tbb/include/tbb/parallel_invoke.h b/src/tbb/include/tbb/parallel_invoke.h index b0d79da2c..6c21100e7 100644 --- a/src/tbb/include/tbb/parallel_invoke.h +++ b/src/tbb/include/tbb/parallel_invoke.h @@ -1,456 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_parallel_invoke_H -#define __TBB_parallel_invoke_H - -#include "task.h" - -#if __TBB_VARIADIC_PARALLEL_INVOKE - #include -#endif - -namespace tbb { - -#if !__TBB_TASK_GROUP_CONTEXT - /** Dummy to avoid cluttering the bulk of the header with enormous amount of ifdefs. **/ - struct task_group_context {}; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -//! @cond INTERNAL -namespace internal { - // Simple task object, executing user method - template - class function_invoker : public task{ - public: - function_invoker(const function& _function) : my_function(_function) {} - private: - const function &my_function; - /*override*/ - task* execute() - { - my_function(); - return NULL; - } - }; - - // The class spawns two or three child tasks - template - class spawner : public task { - private: - const function1& my_func1; - const function2& my_func2; - const function3& my_func3; - bool is_recycled; - - task* execute (){ - if(is_recycled){ - return NULL; - }else{ - __TBB_ASSERT(N==2 || N==3, "Number of arguments passed to spawner is wrong"); - set_ref_count(N); - recycle_as_safe_continuation(); - internal::function_invoker* invoker2 = new (allocate_child()) internal::function_invoker(my_func2); - __TBB_ASSERT(invoker2, "Child task allocation failed"); - spawn(*invoker2); - size_t n = N; // To prevent compiler warnings - if (n>2) { - internal::function_invoker* invoker3 = new (allocate_child()) internal::function_invoker(my_func3); - __TBB_ASSERT(invoker3, "Child task allocation failed"); - spawn(*invoker3); - } - my_func1(); - is_recycled = true; - return NULL; - } - } // execute - - public: - spawner(const function1& _func1, const function2& _func2, const function3& _func3) : my_func1(_func1), my_func2(_func2), my_func3(_func3), is_recycled(false) {} - }; - - // Creates and spawns child tasks - class parallel_invoke_helper : public empty_task { - public: - // Dummy functor class - class parallel_invoke_noop { - public: - void operator() () const {} - }; - // Creates a helper object with user-defined number of children expected - parallel_invoke_helper(int number_of_children) - { - set_ref_count(number_of_children + 1); - } - -#if __TBB_VARIADIC_PARALLEL_INVOKE - void add_children() {} - void add_children(tbb::task_group_context&) {} - - template - void add_children(function&& _func) - { - internal::function_invoker* invoker = new (allocate_child()) internal::function_invoker(std::forward(_func)); - __TBB_ASSERT(invoker, "Child task allocation failed"); - spawn(*invoker); - } - - template - void add_children(function&& _func, tbb::task_group_context&) - { - add_children(std::forward(_func)); - } - - // Adds child(ren) task(s) and spawns them - template - void add_children(function1&& _func1, function2&& _func2, function&&... _func) - { - // The third argument is dummy, it is ignored actually. - parallel_invoke_noop noop; - typedef internal::spawner<2, function1, function2, parallel_invoke_noop> spawner_type; - spawner_type & sub_root = *new(allocate_child()) spawner_type(std::forward(_func1), std::forward(_func2), noop); - spawn(sub_root); - add_children(std::forward(_func)...); - } -#else - // Adds child task and spawns it - template - void add_children (const function &_func) - { - internal::function_invoker* invoker = new (allocate_child()) internal::function_invoker(_func); - __TBB_ASSERT(invoker, "Child task allocation failed"); - spawn(*invoker); - } - - // Adds a task with multiple child tasks and spawns it - // two arguments - template - void add_children (const function1& _func1, const function2& _func2) - { - // The third argument is dummy, it is ignored actually. - parallel_invoke_noop noop; - internal::spawner<2, function1, function2, parallel_invoke_noop>& sub_root = *new(allocate_child())internal::spawner<2, function1, function2, parallel_invoke_noop>(_func1, _func2, noop); - spawn(sub_root); - } - // three arguments - template - void add_children (const function1& _func1, const function2& _func2, const function3& _func3) - { - internal::spawner<3, function1, function2, function3>& sub_root = *new(allocate_child())internal::spawner<3, function1, function2, function3>(_func1, _func2, _func3); - spawn(sub_root); - } -#endif // __TBB_VARIADIC_PARALLEL_INVOKE - - // Waits for all child tasks - template - void run_and_finish(const F0& f0) - { - internal::function_invoker* invoker = new (allocate_child()) internal::function_invoker(f0); - __TBB_ASSERT(invoker, "Child task allocation failed"); - spawn_and_wait_for_all(*invoker); - } - }; - // The class destroys root if exception occurred as well as in normal case - class parallel_invoke_cleaner: internal::no_copy { - public: -#if __TBB_TASK_GROUP_CONTEXT - parallel_invoke_cleaner(int number_of_children, tbb::task_group_context& context) - : root(*new(task::allocate_root(context)) internal::parallel_invoke_helper(number_of_children)) -#else - parallel_invoke_cleaner(int number_of_children, tbb::task_group_context&) - : root(*new(task::allocate_root()) internal::parallel_invoke_helper(number_of_children)) -#endif /* !__TBB_TASK_GROUP_CONTEXT */ - {} - - ~parallel_invoke_cleaner(){ - root.destroy(root); - } - internal::parallel_invoke_helper& root; - }; - -#if __TBB_VARIADIC_PARALLEL_INVOKE -// Determine whether the last parameter in a pack is task_group_context - template struct impl_selector; // to workaround a GCC bug - - template struct impl_selector { - typedef typename impl_selector::type type; - }; - - template struct impl_selector { - typedef false_type type; - }; - template<> struct impl_selector { - typedef true_type type; - }; - - // Select task_group_context parameter from the back of a pack - task_group_context& get_context( task_group_context& tgc ) { return tgc; } - - template - task_group_context& get_context( T1&& /*ignored*/, T&&... t ) - { return get_context( std::forward(t)... ); } - - // task_group_context is known to be at the back of the parameter pack - template - void parallel_invoke_impl(true_type, F0&& f0, F1&& f1, F&&... f) { - __TBB_STATIC_ASSERT(sizeof...(F)>0, "Variadic parallel_invoke implementation broken?"); - // # of child tasks: f0, f1, and a task for each two elements of the pack except the last - const size_t number_of_children = 2 + sizeof...(F)/2; - parallel_invoke_cleaner cleaner(number_of_children, get_context(std::forward(f)...)); - parallel_invoke_helper& root = cleaner.root; - - root.add_children(std::forward(f)...); - root.add_children(std::forward(f1)); - root.run_and_finish(std::forward(f0)); - } - - // task_group_context is not in the pack, needs to be added - template - void parallel_invoke_impl(false_type, F0&& f0, F1&& f1, F&&... f) { - tbb::task_group_context context; - // Add context to the arguments, and redirect to the other overload - parallel_invoke_impl(true_type(), std::forward(f0), std::forward(f1), std::forward(f)..., context); - } -#endif -} // namespace internal -//! @endcond - -/** \name parallel_invoke - **/ -//@{ -//! Executes a list of tasks in parallel and waits for all tasks to complete. -/** @ingroup algorithms */ - -#if __TBB_VARIADIC_PARALLEL_INVOKE - -// parallel_invoke for two or more arguments via variadic templates -// presence of task_group_context is defined automatically -template -void parallel_invoke(F0&& f0, F1&& f1, F&&... f) { - typedef typename internal::impl_selector::type selector_type; - internal::parallel_invoke_impl(selector_type(), std::forward(f0), std::forward(f1), std::forward(f)...); -} - -#else + http://www.apache.org/licenses/LICENSE-2.0 -// parallel_invoke with user-defined context -// two arguments -template -void parallel_invoke(const F0& f0, const F1& f1, tbb::task_group_context& context) { - internal::parallel_invoke_cleaner cleaner(2, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f1); - - root.run_and_finish(f0); -} - -// three arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, tbb::task_group_context& context) { - internal::parallel_invoke_cleaner cleaner(3, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f2); - root.add_children(f1); - - root.run_and_finish(f0); -} - -// four arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(4, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f3); - root.add_children(f2); - root.add_children(f1); - - root.run_and_finish(f0); -} - -// five arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(3, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f4, f3); - root.add_children(f2, f1); - - root.run_and_finish(f0); -} - -// six arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, const F5& f5, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(3, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f5, f4, f3); - root.add_children(f2, f1); - - root.run_and_finish(f0); -} - -// seven arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(3, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f6, f5, f4); - root.add_children(f3, f2, f1); - - root.run_and_finish(f0); -} - -// eight arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(4, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f7, f6, f5); - root.add_children(f4, f3); - root.add_children(f2, f1); - - root.run_and_finish(f0); -} - -// nine arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7, const F8& f8, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(4, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f8, f7, f6); - root.add_children(f5, f4, f3); - root.add_children(f2, f1); - - root.run_and_finish(f0); -} - -// ten arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7, const F8& f8, const F9& f9, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(4, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f9, f8, f7); - root.add_children(f6, f5, f4); - root.add_children(f3, f2, f1); - - root.run_and_finish(f0); -} - -// two arguments -template -void parallel_invoke(const F0& f0, const F1& f1) { - task_group_context context; - parallel_invoke(f0, f1, context); -} -// three arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2) { - task_group_context context; - parallel_invoke(f0, f1, f2, context); -} -// four arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3) { - task_group_context context; - parallel_invoke(f0, f1, f2, f3, context); -} -// five arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4) { - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, context); -} -// six arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, const F5& f5) { - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, f5, context); -} -// seven arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6) -{ - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, f5, f6, context); -} -// eight arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7) -{ - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, f5, f6, f7, context); -} -// nine arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7, const F8& f8) -{ - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, f5, f6, f7, f8, context); -} -// ten arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7, const F8& f8, const F9& f9) -{ - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, context); -} -#endif // __TBB_VARIADIC_PARALLEL_INVOKE -//@} - -} // namespace + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_parallel_invoke_H */ +#include "../oneapi/tbb/parallel_invoke.h" diff --git a/src/tbb/include/tbb/parallel_pipeline.h b/src/tbb/include/tbb/parallel_pipeline.h new file mode 100644 index 000000000..aceee49f8 --- /dev/null +++ b/src/tbb/include/tbb/parallel_pipeline.h @@ -0,0 +1,17 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "../oneapi/tbb/parallel_pipeline.h" diff --git a/src/tbb/include/tbb/parallel_reduce.h b/src/tbb/include/tbb/parallel_reduce.h index 17fba2019..83658755a 100644 --- a/src/tbb/include/tbb/parallel_reduce.h +++ b/src/tbb/include/tbb/parallel_reduce.h @@ -1,533 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_parallel_reduce_H -#define __TBB_parallel_reduce_H - -#include -#include "task.h" -#include "aligned_space.h" -#include "partitioner.h" -#include "tbb_profiling.h" - -namespace tbb { - -namespace interface7 { -//! @cond INTERNAL -namespace internal { - - using namespace tbb::internal; - - /** Values for reduction_context. */ - enum { - root_task, left_child, right_child - }; - - /** Represented as a char, not enum, for compactness. */ - typedef char reduction_context; - - //! Task type used to combine the partial results of parallel_reduce. - /** @ingroup algorithms */ - template - class finish_reduce: public flag_task { - //! Pointer to body, or NULL if the left child has not yet finished. - bool has_right_zombie; - const reduction_context my_context; - Body* my_body; - aligned_space zombie_space; - finish_reduce( reduction_context context_ ) : - has_right_zombie(false), // TODO: substitute by flag_task::child_stolen? - my_context(context_), - my_body(NULL) - { - } - ~finish_reduce() { - if( has_right_zombie ) - zombie_space.begin()->~Body(); - } - task* execute() { - if( has_right_zombie ) { - // Right child was stolen. - Body* s = zombie_space.begin(); - my_body->join( *s ); - // Body::join() won't be called if canceled. Defer destruction to destructor - } - if( my_context==left_child ) - itt_store_word_with_release( static_cast(parent())->my_body, my_body ); - return NULL; - } - template - friend class start_reduce; - }; - - //! allocate right task with new parent - void allocate_sibling(task* start_reduce_task, task *tasks[], size_t start_bytes, size_t finish_bytes); - - //! Task type used to split the work of parallel_reduce. - /** @ingroup algorithms */ - template - class start_reduce: public task { - typedef finish_reduce finish_type; - Body* my_body; - Range my_range; - typename Partitioner::task_partition_type my_partition; - reduction_context my_context; - /*override*/ task* execute(); - //! Update affinity info, if any - /*override*/ void note_affinity( affinity_id id ) { - my_partition.note_affinity( id ); - } - template - friend class finish_reduce; - -public: - //! Constructor used for root task - start_reduce( const Range& range, Body* body, Partitioner& partitioner ) : - my_body(body), - my_range(range), - my_partition(partitioner), - my_context(root_task) - { - } - //! Splitting constructor used to generate children. - /** parent_ becomes left child. Newly constructed object is right child. */ - start_reduce( start_reduce& parent_, typename Partitioner::split_type& split_obj ) : - my_body(parent_.my_body), - my_range(parent_.my_range, split_obj), - my_partition(parent_.my_partition, split_obj), - my_context(right_child) - { - my_partition.set_affinity(*this); - parent_.my_context = left_child; - } - //! Construct right child from the given range as response to the demand. - /** parent_ remains left child. Newly constructed object is right child. */ - start_reduce( start_reduce& parent_, const Range& r, depth_t d ) : - my_body(parent_.my_body), - my_range(r), - my_partition(parent_.my_partition, split()), - my_context(right_child) - { - my_partition.set_affinity(*this); - my_partition.align_depth( d ); // TODO: move into constructor of partitioner - parent_.my_context = left_child; - } - static void run( const Range& range, Body& body, Partitioner& partitioner ) { - if( !range.empty() ) { -#if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP - task::spawn_root_and_wait( *new(task::allocate_root()) start_reduce(range,&body,partitioner) ); -#else - // Bound context prevents exceptions from body to affect nesting or sibling algorithms, - // and allows users to handle exceptions safely by wrapping parallel_for in the try-block. - task_group_context context; - task::spawn_root_and_wait( *new(task::allocate_root(context)) start_reduce(range,&body,partitioner) ); -#endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */ - } - } -#if __TBB_TASK_GROUP_CONTEXT - static void run( const Range& range, Body& body, Partitioner& partitioner, task_group_context& context ) { - if( !range.empty() ) - task::spawn_root_and_wait( *new(task::allocate_root(context)) start_reduce(range,&body,partitioner) ); - } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - //! Run body for range - void run_body( Range &r ) { (*my_body)( r ); } - - //! spawn right task, serves as callback for partitioner - // TODO: remove code duplication from 'offer_work' methods - void offer_work(typename Partitioner::split_type& split_obj) { - task *tasks[2]; - allocate_sibling(static_cast(this), tasks, sizeof(start_reduce), sizeof(finish_type)); - new((void*)tasks[0]) finish_type(my_context); - new((void*)tasks[1]) start_reduce(*this, split_obj); - spawn(*tasks[1]); - } - //! spawn right task, serves as callback for partitioner - void offer_work(const Range& r, depth_t d = 0) { - task *tasks[2]; - allocate_sibling(static_cast(this), tasks, sizeof(start_reduce), sizeof(finish_type)); - new((void*)tasks[0]) finish_type(my_context); - new((void*)tasks[1]) start_reduce(*this, r, d); - spawn(*tasks[1]); - } - }; - - //! allocate right task with new parent - // TODO: 'inline' here is to avoid multiple definition error but for sake of code size this should not be inlined - inline void allocate_sibling(task* start_reduce_task, task *tasks[], size_t start_bytes, size_t finish_bytes) { - tasks[0] = &start_reduce_task->allocate_continuation().allocate(finish_bytes); - start_reduce_task->set_parent(tasks[0]); - tasks[0]->set_ref_count(2); - tasks[1] = &tasks[0]->allocate_child().allocate(start_bytes); - } - - template - task* start_reduce::execute() { - my_partition.check_being_stolen( *this ); - if( my_context==right_child ) { - finish_type* parent_ptr = static_cast(parent()); - if( !itt_load_word_with_acquire(parent_ptr->my_body) ) { // TODO: replace by is_stolen_task() or by parent_ptr->ref_count() == 2??? - my_body = new( parent_ptr->zombie_space.begin() ) Body(*my_body,split()); - parent_ptr->has_right_zombie = true; - } - } else __TBB_ASSERT(my_context==root_task,NULL);// because left leaf spawns right leafs without recycling - my_partition.execute(*this, my_range); - if( my_context==left_child ) { - finish_type* parent_ptr = static_cast(parent()); - __TBB_ASSERT(my_body!=parent_ptr->zombie_space.begin(),NULL); - itt_store_word_with_release(parent_ptr->my_body, my_body ); - } - return NULL; - } - - //! Task type used to combine the partial results of parallel_deterministic_reduce. - /** @ingroup algorithms */ - template - class finish_deterministic_reduce: public task { - Body &my_left_body; - Body my_right_body; - - finish_deterministic_reduce( Body &body ) : - my_left_body( body ), - my_right_body( body, split() ) - { - } - task* execute() { - my_left_body.join( my_right_body ); - return NULL; - } - template - friend class start_deterministic_reduce; - }; - - //! Task type used to split the work of parallel_deterministic_reduce. - /** @ingroup algorithms */ - template - class start_deterministic_reduce: public task { - typedef finish_deterministic_reduce finish_type; - Body &my_body; - Range my_range; - /*override*/ task* execute(); - - //! Constructor used for root task - start_deterministic_reduce( const Range& range, Body& body ) : - my_body( body ), - my_range( range ) - { - } - //! Splitting constructor used to generate children. - /** parent_ becomes left child. Newly constructed object is right child. */ - start_deterministic_reduce( start_deterministic_reduce& parent_, finish_type& c ) : - my_body( c.my_right_body ), - my_range( parent_.my_range, split() ) - { - } - -public: - static void run( const Range& range, Body& body ) { - if( !range.empty() ) { -#if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP - task::spawn_root_and_wait( *new(task::allocate_root()) start_deterministic_reduce(range,&body) ); -#else - // Bound context prevents exceptions from body to affect nesting or sibling algorithms, - // and allows users to handle exceptions safely by wrapping parallel_for in the try-block. - task_group_context context; - task::spawn_root_and_wait( *new(task::allocate_root(context)) start_deterministic_reduce(range,body) ); -#endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */ - } - } -#if __TBB_TASK_GROUP_CONTEXT - static void run( const Range& range, Body& body, task_group_context& context ) { - if( !range.empty() ) - task::spawn_root_and_wait( *new(task::allocate_root(context)) start_deterministic_reduce(range,body) ); - } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - }; - - template - task* start_deterministic_reduce::execute() { - if( !my_range.is_divisible() ) { - my_body( my_range ); - return NULL; - } else { - finish_type& c = *new( allocate_continuation() ) finish_type( my_body ); - recycle_as_child_of(c); - c.set_ref_count(2); - start_deterministic_reduce& b = *new( c.allocate_child() ) start_deterministic_reduce( *this, c ); - task::spawn(b); - return this; - } - } -} // namespace internal -//! @endcond -} //namespace interfaceX - -//! @cond INTERNAL -namespace internal { - using interface7::internal::start_reduce; - using interface7::internal::start_deterministic_reduce; - //! Auxiliary class for parallel_reduce; for internal use only. - /** The adaptor class that implements \ref parallel_reduce_body_req "parallel_reduce Body" - using given \ref parallel_reduce_lambda_req "anonymous function objects". - **/ - /** @ingroup algorithms */ - template - class lambda_reduce_body { - -//FIXME: decide if my_real_body, my_reduction, and identity_element should be copied or referenced -// (might require some performance measurements) - - const Value& identity_element; - const RealBody& my_real_body; - const Reduction& my_reduction; - Value my_value; - lambda_reduce_body& operator= ( const lambda_reduce_body& other ); - public: - lambda_reduce_body( const Value& identity, const RealBody& body, const Reduction& reduction ) - : identity_element(identity) - , my_real_body(body) - , my_reduction(reduction) - , my_value(identity) - { } - lambda_reduce_body( const lambda_reduce_body& other ) - : identity_element(other.identity_element) - , my_real_body(other.my_real_body) - , my_reduction(other.my_reduction) - , my_value(other.my_value) - { } - lambda_reduce_body( lambda_reduce_body& other, tbb::split ) - : identity_element(other.identity_element) - , my_real_body(other.my_real_body) - , my_reduction(other.my_reduction) - , my_value(other.identity_element) - { } - void operator()(Range& range) { - my_value = my_real_body(range, const_cast(my_value)); - } - void join( lambda_reduce_body& rhs ) { - my_value = my_reduction(const_cast(my_value), const_cast(rhs.my_value)); - } - Value result() const { - return my_value; - } - }; - -} // namespace internal -//! @endcond + http://www.apache.org/licenses/LICENSE-2.0 -// Requirements on Range concept are documented in blocked_range.h - -/** \page parallel_reduce_body_req Requirements on parallel_reduce body - Class \c Body implementing the concept of parallel_reduce body must define: - - \code Body::Body( Body&, split ); \endcode Splitting constructor. - Must be able to run concurrently with operator() and method \c join - - \code Body::~Body(); \endcode Destructor - - \code void Body::operator()( Range& r ); \endcode Function call operator applying body to range \c r - and accumulating the result - - \code void Body::join( Body& b ); \endcode Join results. - The result in \c b should be merged into the result of \c this -**/ - -/** \page parallel_reduce_lambda_req Requirements on parallel_reduce anonymous function objects (lambda functions) - TO BE DOCUMENTED -**/ - -/** \name parallel_reduce - See also requirements on \ref range_req "Range" and \ref parallel_reduce_body_req "parallel_reduce Body". **/ -//@{ - -//! Parallel iteration with reduction and default partitioner. -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body ) { - internal::start_reduce::run( range, body, __TBB_DEFAULT_PARTITIONER() ); -} - -//! Parallel iteration with reduction and simple_partitioner -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner ) { - internal::start_reduce::run( range, body, partitioner ); -} - -//! Parallel iteration with reduction and auto_partitioner -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner ) { - internal::start_reduce::run( range, body, partitioner ); -} - -//! Parallel iteration with reduction and affinity_partitioner -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner ) { - internal::start_reduce::run( range, body, partitioner ); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Parallel iteration with reduction, simple partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner, task_group_context& context ) { - internal::start_reduce::run( range, body, partitioner, context ); -} - -//! Parallel iteration with reduction, auto_partitioner and user-supplied context -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner, task_group_context& context ) { - internal::start_reduce::run( range, body, partitioner, context ); -} - -//! Parallel iteration with reduction, affinity_partitioner and user-supplied context -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner, task_group_context& context ) { - internal::start_reduce::run( range, body, partitioner, context ); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -/** parallel_reduce overloads that work with anonymous function objects - (see also \ref parallel_reduce_lambda_req "requirements on parallel_reduce anonymous function objects"). **/ - -//! Parallel iteration with reduction and default partitioner. -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,const __TBB_DEFAULT_PARTITIONER> - ::run(range, body, __TBB_DEFAULT_PARTITIONER() ); - return body.result(); -} - -//! Parallel iteration with reduction and simple_partitioner. -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - const simple_partitioner& partitioner ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,const simple_partitioner> - ::run(range, body, partitioner ); - return body.result(); -} - -//! Parallel iteration with reduction and auto_partitioner -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - const auto_partitioner& partitioner ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,const auto_partitioner> - ::run( range, body, partitioner ); - return body.result(); -} - -//! Parallel iteration with reduction and affinity_partitioner -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - affinity_partitioner& partitioner ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,affinity_partitioner> - ::run( range, body, partitioner ); - return body.result(); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Parallel iteration with reduction, simple partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - const simple_partitioner& partitioner, task_group_context& context ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,const simple_partitioner> - ::run( range, body, partitioner, context ); - return body.result(); -} - -//! Parallel iteration with reduction, auto_partitioner and user-supplied context -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - const auto_partitioner& partitioner, task_group_context& context ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,const auto_partitioner> - ::run( range, body, partitioner, context ); - return body.result(); -} - -//! Parallel iteration with reduction, affinity_partitioner and user-supplied context -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - affinity_partitioner& partitioner, task_group_context& context ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,affinity_partitioner> - ::run( range, body, partitioner, context ); - return body.result(); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -//! Parallel iteration with deterministic reduction and default partitioner. -/** @ingroup algorithms **/ -template -void parallel_deterministic_reduce( const Range& range, Body& body ) { - internal::start_deterministic_reduce::run( range, body ); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Parallel iteration with deterministic reduction, simple partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -void parallel_deterministic_reduce( const Range& range, Body& body, task_group_context& context ) { - internal::start_deterministic_reduce::run( range, body, context ); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -/** parallel_reduce overloads that work with anonymous function objects - (see also \ref parallel_reduce_lambda_req "requirements on parallel_reduce anonymous function objects"). **/ - -//! Parallel iteration with deterministic reduction and default partitioner. -/** @ingroup algorithms **/ -template -Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_deterministic_reduce > - ::run(range, body); - return body.result(); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Parallel iteration with deterministic reduction, simple partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - task_group_context& context ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_deterministic_reduce > - ::run( range, body, context ); - return body.result(); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ -//@} - -} // namespace tbb + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_parallel_reduce_H */ +#include "../oneapi/tbb/parallel_reduce.h" diff --git a/src/tbb/include/tbb/parallel_scan.h b/src/tbb/include/tbb/parallel_scan.h index e9d8c692e..682032a56 100644 --- a/src/tbb/include/tbb/parallel_scan.h +++ b/src/tbb/include/tbb/parallel_scan.h @@ -1,346 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_parallel_scan_H -#define __TBB_parallel_scan_H - -#include "task.h" -#include "aligned_space.h" -#include -#include "partitioner.h" - -namespace tbb { - -//! Used to indicate that the initial scan is being performed. -/** @ingroup algorithms */ -struct pre_scan_tag { - static bool is_final_scan() {return false;} -}; - -//! Used to indicate that the final scan is being performed. -/** @ingroup algorithms */ -struct final_scan_tag { - static bool is_final_scan() {return true;} -}; - -//! @cond INTERNAL -namespace internal { - - //! Performs final scan for a leaf - /** @ingroup algorithms */ - template - class final_sum: public task { - public: - Body my_body; - private: - aligned_space my_range; - //! Where to put result of last subrange, or NULL if not last subrange. - Body* my_stuff_last; - public: - final_sum( Body& body_ ) : - my_body(body_,split()) - { - poison_pointer(my_stuff_last); - } - ~final_sum() { - my_range.begin()->~Range(); - } - void finish_construction( const Range& range_, Body* stuff_last_ ) { - new( my_range.begin() ) Range(range_); - my_stuff_last = stuff_last_; - } - private: - /*override*/ task* execute() { - my_body( *my_range.begin(), final_scan_tag() ); - if( my_stuff_last ) - my_stuff_last->assign(my_body); - return NULL; - } - }; - - //! Split work to be done in the scan. - /** @ingroup algorithms */ - template - class sum_node: public task { - typedef final_sum final_sum_type; - public: - final_sum_type *my_incoming; - final_sum_type *my_body; - Body *my_stuff_last; - private: - final_sum_type *my_left_sum; - sum_node *my_left; - sum_node *my_right; - bool my_left_is_final; - Range my_range; - sum_node( const Range range_, bool left_is_final_ ) : - my_left_sum(NULL), - my_left(NULL), - my_right(NULL), - my_left_is_final(left_is_final_), - my_range(range_) - { - // Poison fields that will be set by second pass. - poison_pointer(my_body); - poison_pointer(my_incoming); - } - task* create_child( const Range& range_, final_sum_type& f, sum_node* n, final_sum_type* incoming_, Body* stuff_last_ ) { - if( !n ) { - f.recycle_as_child_of( *this ); - f.finish_construction( range_, stuff_last_ ); - return &f; - } else { - n->my_body = &f; - n->my_incoming = incoming_; - n->my_stuff_last = stuff_last_; - return n; - } - } - /*override*/ task* execute() { - if( my_body ) { - if( my_incoming ) - my_left_sum->my_body.reverse_join( my_incoming->my_body ); - recycle_as_continuation(); - sum_node& c = *this; - task* b = c.create_child(Range(my_range,split()),*my_left_sum,my_right,my_left_sum,my_stuff_last); - task* a = my_left_is_final ? NULL : c.create_child(my_range,*my_body,my_left,my_incoming,NULL); - set_ref_count( (a!=NULL)+(b!=NULL) ); - my_body = NULL; - if( a ) spawn(*b); - else a = b; - return a; - } else { - return NULL; - } - } - template - friend class start_scan; - - template - friend class finish_scan; - }; - - //! Combine partial results - /** @ingroup algorithms */ - template - class finish_scan: public task { - typedef sum_node sum_node_type; - typedef final_sum final_sum_type; - final_sum_type** const my_sum; - sum_node_type*& my_return_slot; - public: - final_sum_type* my_right_zombie; - sum_node_type& my_result; - - /*override*/ task* execute() { - __TBB_ASSERT( my_result.ref_count()==(my_result.my_left!=NULL)+(my_result.my_right!=NULL), NULL ); - if( my_result.my_left ) - my_result.my_left_is_final = false; - if( my_right_zombie && my_sum ) - ((*my_sum)->my_body).reverse_join(my_result.my_left_sum->my_body); - __TBB_ASSERT( !my_return_slot, NULL ); - if( my_right_zombie || my_result.my_right ) { - my_return_slot = &my_result; - } else { - destroy( my_result ); - } - if( my_right_zombie && !my_sum && !my_result.my_right ) { - destroy(*my_right_zombie); - my_right_zombie = NULL; - } - return NULL; - } - - finish_scan( sum_node_type*& return_slot_, final_sum_type** sum_, sum_node_type& result_ ) : - my_sum(sum_), - my_return_slot(return_slot_), - my_right_zombie(NULL), - my_result(result_) - { - __TBB_ASSERT( !my_return_slot, NULL ); - } - }; + http://www.apache.org/licenses/LICENSE-2.0 - //! Initial task to split the work - /** @ingroup algorithms */ - template - class start_scan: public task { - typedef sum_node sum_node_type; - typedef final_sum final_sum_type; - final_sum_type* my_body; - /** Non-null if caller is requesting total. */ - final_sum_type** my_sum; - sum_node_type** my_return_slot; - /** Null if computing root. */ - sum_node_type* my_parent_sum; - bool my_is_final; - bool my_is_right_child; - Range my_range; - typename Partitioner::partition_type my_partition; - /*override*/ task* execute(); - public: - start_scan( sum_node_type*& return_slot_, start_scan& parent_, sum_node_type* parent_sum_ ) : - my_body(parent_.my_body), - my_sum(parent_.my_sum), - my_return_slot(&return_slot_), - my_parent_sum(parent_sum_), - my_is_final(parent_.my_is_final), - my_is_right_child(false), - my_range(parent_.my_range,split()), - my_partition(parent_.my_partition,split()) - { - __TBB_ASSERT( !*my_return_slot, NULL ); - } - - start_scan( sum_node_type*& return_slot_, const Range& range_, final_sum_type& body_, const Partitioner& partitioner_) : - my_body(&body_), - my_sum(NULL), - my_return_slot(&return_slot_), - my_parent_sum(NULL), - my_is_final(true), - my_is_right_child(false), - my_range(range_), - my_partition(partitioner_) - { - __TBB_ASSERT( !*my_return_slot, NULL ); - } - - static void run( const Range& range_, Body& body_, const Partitioner& partitioner_ ) { - if( !range_.empty() ) { - typedef internal::start_scan start_pass1_type; - internal::sum_node* root = NULL; - typedef internal::final_sum final_sum_type; - final_sum_type* temp_body = new(task::allocate_root()) final_sum_type( body_ ); - start_pass1_type& pass1 = *new(task::allocate_root()) start_pass1_type( - /*my_return_slot=*/root, - range_, - *temp_body, - partitioner_ ); - task::spawn_root_and_wait( pass1 ); - if( root ) { - root->my_body = temp_body; - root->my_incoming = NULL; - root->my_stuff_last = &body_; - task::spawn_root_and_wait( *root ); - } else { - body_.assign(temp_body->my_body); - temp_body->finish_construction( range_, NULL ); - temp_body->destroy(*temp_body); - } - } - } - }; - - template - task* start_scan::execute() { - typedef internal::finish_scan finish_pass1_type; - finish_pass1_type* p = my_parent_sum ? static_cast( parent() ) : NULL; - // Inspecting p->result.left_sum would ordinarily be a race condition. - // But we inspect it only if we are not a stolen task, in which case we - // know that task assigning to p->result.left_sum has completed. - bool treat_as_stolen = my_is_right_child && (is_stolen_task() || my_body!=p->my_result.my_left_sum); - if( treat_as_stolen ) { - // Invocation is for right child that has been really stolen or needs to be virtually stolen - p->my_right_zombie = my_body = new( allocate_root() ) final_sum_type(my_body->my_body); - my_is_final = false; - } - task* next_task = NULL; - if( (my_is_right_child && !treat_as_stolen) || !my_range.is_divisible() || my_partition.should_execute_range(*this) ) { - if( my_is_final ) - (my_body->my_body)( my_range, final_scan_tag() ); - else if( my_sum ) - (my_body->my_body)( my_range, pre_scan_tag() ); - if( my_sum ) - *my_sum = my_body; - __TBB_ASSERT( !*my_return_slot, NULL ); - } else { - sum_node_type* result; - if( my_parent_sum ) - result = new(allocate_additional_child_of(*my_parent_sum)) sum_node_type(my_range,/*my_left_is_final=*/my_is_final); - else - result = new(task::allocate_root()) sum_node_type(my_range,/*my_left_is_final=*/my_is_final); - finish_pass1_type& c = *new( allocate_continuation()) finish_pass1_type(*my_return_slot,my_sum,*result); - // Split off right child - start_scan& b = *new( c.allocate_child() ) start_scan( /*my_return_slot=*/result->my_right, *this, result ); - b.my_is_right_child = true; - // Left child is recycling of *this. Must recycle this before spawning b, - // otherwise b might complete and decrement c.ref_count() to zero, which - // would cause c.execute() to run prematurely. - recycle_as_child_of(c); - c.set_ref_count(2); - c.spawn(b); - my_sum = &result->my_left_sum; - my_return_slot = &result->my_left; - my_is_right_child = false; - next_task = this; - my_parent_sum = result; - __TBB_ASSERT( !*my_return_slot, NULL ); - } - return next_task; - } -} // namespace internal -//! @endcond - -// Requirements on Range concept are documented in blocked_range.h - -/** \page parallel_scan_body_req Requirements on parallel_scan body - Class \c Body implementing the concept of parallel_scan body must define: - - \code Body::Body( Body&, split ); \endcode Splitting constructor. - Split \c b so that \c this and \c b can accumulate separately - - \code Body::~Body(); \endcode Destructor - - \code void Body::operator()( const Range& r, pre_scan_tag ); \endcode - Preprocess iterations for range \c r - - \code void Body::operator()( const Range& r, final_scan_tag ); \endcode - Do final processing for iterations of range \c r - - \code void Body::reverse_join( Body& a ); \endcode - Merge preprocessing state of \c a into \c this, where \c a was - created earlier from \c b by b's splitting constructor -**/ - -/** \name parallel_scan - See also requirements on \ref range_req "Range" and \ref parallel_scan_body_req "parallel_scan Body". **/ -//@{ - -//! Parallel prefix with default partitioner -/** @ingroup algorithms **/ -template -void parallel_scan( const Range& range, Body& body ) { - internal::start_scan::run(range,body,__TBB_DEFAULT_PARTITIONER()); -} - -//! Parallel prefix with simple_partitioner -/** @ingroup algorithms **/ -template -void parallel_scan( const Range& range, Body& body, const simple_partitioner& partitioner ) { - internal::start_scan::run(range,body,partitioner); -} - -//! Parallel prefix with auto_partitioner -/** @ingroup algorithms **/ -template -void parallel_scan( const Range& range, Body& body, const auto_partitioner& partitioner ) { - internal::start_scan::run(range,body,partitioner); -} -//@} - -} // namespace tbb - -#endif /* __TBB_parallel_scan_H */ + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +#include "../oneapi/tbb/parallel_scan.h" diff --git a/src/tbb/include/tbb/parallel_sort.h b/src/tbb/include/tbb/parallel_sort.h index 1d33c1f70..b238e6caa 100644 --- a/src/tbb/include/tbb/parallel_sort.h +++ b/src/tbb/include/tbb/parallel_sort.h @@ -1,253 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_parallel_sort_H -#define __TBB_parallel_sort_H - -#include "parallel_for.h" -#include "blocked_range.h" -#include "internal/_range_iterator.h" -#include -#include -#include - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - -//! Range used in quicksort to split elements into subranges based on a value. -/** The split operation selects a splitter and places all elements less than or equal - to the value in the first range and the remaining elements in the second range. - @ingroup algorithms */ -template -class quick_sort_range: private no_assign { - - inline size_t median_of_three(const RandomAccessIterator &array, size_t l, size_t m, size_t r) const { - return comp(array[l], array[m]) ? ( comp(array[m], array[r]) ? m : ( comp( array[l], array[r]) ? r : l ) ) - : ( comp(array[r], array[m]) ? m : ( comp( array[r], array[l] ) ? r : l ) ); - } - - inline size_t pseudo_median_of_nine( const RandomAccessIterator &array, const quick_sort_range &range ) const { - size_t offset = range.size/8u; - return median_of_three(array, - median_of_three(array, 0, offset, offset*2), - median_of_three(array, offset*3, offset*4, offset*5), - median_of_three(array, offset*6, offset*7, range.size - 1) ); - - } - -public: - - static const size_t grainsize = 500; - const Compare ∁ - RandomAccessIterator begin; - size_t size; - - quick_sort_range( RandomAccessIterator begin_, size_t size_, const Compare &comp_ ) : - comp(comp_), begin(begin_), size(size_) {} - - bool empty() const {return size==0;} - bool is_divisible() const {return size>=grainsize;} - - quick_sort_range( quick_sort_range& range, split ) : comp(range.comp) { - using std::swap; - RandomAccessIterator array = range.begin; - RandomAccessIterator key0 = range.begin; - size_t m = pseudo_median_of_nine(array, range); - if (m) swap ( array[0], array[m] ); - - size_t i=0; - size_t j=range.size; - // Partition interval [i+1,j-1] with key *key0. - for(;;) { - __TBB_ASSERT( i -class quick_sort_pretest_body : internal::no_assign { - const Compare ∁ - -public: - quick_sort_pretest_body(const Compare &_comp) : comp(_comp) {} - - void operator()( const blocked_range& range ) const { - task &my_task = task::self(); - RandomAccessIterator my_end = range.end(); - - int i = 0; - for (RandomAccessIterator k = range.begin(); k != my_end; ++k, ++i) { - if ( i%64 == 0 && my_task.is_cancelled() ) break; - - // The k-1 is never out-of-range because the first chunk starts at begin+serial_cutoff+1 - if ( comp( *(k), *(k-1) ) ) { - my_task.cancel_group_execution(); - break; - } - } - } + http://www.apache.org/licenses/LICENSE-2.0 -}; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -//! Body class used to sort elements in a range that is smaller than the grainsize. -/** @ingroup algorithms */ -template -struct quick_sort_body { - void operator()( const quick_sort_range& range ) const { - //SerialQuickSort( range.begin, range.size, range.comp ); - std::sort( range.begin, range.begin + range.size, range.comp ); - } -}; - -//! Wrapper method to initiate the sort by calling parallel_for. -/** @ingroup algorithms */ -template -void parallel_quick_sort( RandomAccessIterator begin, RandomAccessIterator end, const Compare& comp ) { -#if __TBB_TASK_GROUP_CONTEXT - task_group_context my_context; - const int serial_cutoff = 9; - - __TBB_ASSERT( begin + serial_cutoff < end, "min_parallel_size is smaller than serial cutoff?" ); - RandomAccessIterator k; - for ( k = begin ; k != begin + serial_cutoff; ++k ) { - if ( comp( *(k+1), *k ) ) { - goto do_parallel_quick_sort; - } - } - - parallel_for( blocked_range(k+1, end), - quick_sort_pretest_body(comp), - auto_partitioner(), - my_context); - - if (my_context.is_group_execution_cancelled()) -do_parallel_quick_sort: -#endif /* __TBB_TASK_GROUP_CONTEXT */ - parallel_for( quick_sort_range(begin, end-begin, comp ), - quick_sort_body(), - auto_partitioner() ); -} - -} // namespace internal -//! @endcond - -/** \page parallel_sort_iter_req Requirements on iterators for parallel_sort - Requirements on value type \c T of \c RandomAccessIterator for \c parallel_sort: - - \code void swap( T& x, T& y ) \endcode Swaps \c x and \c y - - \code bool Compare::operator()( const T& x, const T& y ) \endcode - True if x comes before y; -**/ - -/** \name parallel_sort - See also requirements on \ref parallel_sort_iter_req "iterators for parallel_sort". **/ -//@{ - -//! Sorts the data in [begin,end) using the given comparator -/** The compare function object is used for all comparisons between elements during sorting. - The compare object must define a bool operator() function. - @ingroup algorithms **/ -template -void parallel_sort( RandomAccessIterator begin, RandomAccessIterator end, const Compare& comp) { - const int min_parallel_size = 500; - if( end > begin ) { - if (end - begin < min_parallel_size) { - std::sort(begin, end, comp); - } else { - internal::parallel_quick_sort(begin, end, comp); - } - } -} - -//! Sorts the data in [begin,end) with a default comparator \c std::less -/** @ingroup algorithms **/ -template -inline void parallel_sort( RandomAccessIterator begin, RandomAccessIterator end ) { - parallel_sort( begin, end, std::less< typename std::iterator_traits::value_type >() ); -} - -//! Sorts the data in rng using the given comparator -/** @ingroup algorithms **/ -template -void parallel_sort(Range& rng, const Compare& comp) { - parallel_sort(tbb::internal::first(rng), tbb::internal::last(rng), comp); -} - -//! Sorts the data in const rng using the given comparator -/** @ingroup algorithms **/ -template -void parallel_sort(const Range& rng, const Compare& comp) { - parallel_sort(tbb::internal::first(rng), tbb::internal::last(rng), comp); -} - -//! Sorts the data in rng with a default comparator \c std::less -/** @ingroup algorithms **/ -template -void parallel_sort(Range& rng) { - parallel_sort(tbb::internal::first(rng), tbb::internal::last(rng)); -} - -//! Sorts the data in const rng with a default comparator \c std::less -/** @ingroup algorithms **/ -template -void parallel_sort(const Range& rng) { - parallel_sort(tbb::internal::first(rng), tbb::internal::last(rng)); -} - -//! Sorts the data in the range \c [begin,end) with a default comparator \c std::less -/** @ingroup algorithms **/ -template -inline void parallel_sort( T * begin, T * end ) { - parallel_sort( begin, end, std::less< T >() ); -} -//@} - - -} // namespace tbb - -#endif + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +#include "../oneapi/tbb/parallel_sort.h" diff --git a/src/tbb/include/tbb/parallel_while.h b/src/tbb/include/tbb/parallel_while.h deleted file mode 100644 index a1db83413..000000000 --- a/src/tbb/include/tbb/parallel_while.h +++ /dev/null @@ -1,186 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_parallel_while -#define __TBB_parallel_while - -#include "task.h" -#include - -namespace tbb { - -template -class parallel_while; - -//! @cond INTERNAL -namespace internal { - - template class while_task; - - //! For internal use only. - /** Executes one iteration of a while. - @ingroup algorithms */ - template - class while_iteration_task: public task { - const Body& my_body; - typename Body::argument_type my_value; - /*override*/ task* execute() { - my_body(my_value); - return NULL; - } - while_iteration_task( const typename Body::argument_type& value, const Body& body ) : - my_body(body), my_value(value) - {} - template friend class while_group_task; - friend class tbb::parallel_while; - }; - - //! For internal use only - /** Unpacks a block of iterations. - @ingroup algorithms */ - template - class while_group_task: public task { - static const size_t max_arg_size = 4; - const Body& my_body; - size_t size; - typename Body::argument_type my_arg[max_arg_size]; - while_group_task( const Body& body ) : my_body(body), size(0) {} - /*override*/ task* execute() { - typedef while_iteration_task iteration_type; - __TBB_ASSERT( size>0, NULL ); - task_list list; - task* t; - size_t k=0; - for(;;) { - t = new( allocate_child() ) iteration_type(my_arg[k],my_body); - if( ++k==size ) break; - list.push_back(*t); - } - set_ref_count(int(k+1)); - spawn(list); - spawn_and_wait_for_all(*t); - return NULL; - } - template friend class while_task; - }; - - //! For internal use only. - /** Gets block of iterations from a stream and packages them into a while_group_task. - @ingroup algorithms */ - template - class while_task: public task { - Stream& my_stream; - const Body& my_body; - empty_task& my_barrier; - /*override*/ task* execute() { - typedef while_group_task block_type; - block_type& t = *new( allocate_additional_child_of(my_barrier) ) block_type(my_body); - size_t k=0; - while( my_stream.pop_if_present(t.my_arg[k]) ) { - if( ++k==block_type::max_arg_size ) { - // There might be more iterations. - recycle_to_reexecute(); - break; - } - } - if( k==0 ) { - destroy(t); - return NULL; - } else { - t.size = k; - return &t; - } - } - while_task( Stream& stream, const Body& body, empty_task& barrier ) : - my_stream(stream), - my_body(body), - my_barrier(barrier) - {} - friend class tbb::parallel_while; - }; - -} // namespace internal -//! @endcond - -//! Parallel iteration over a stream, with optional addition of more work. -/** The Body b has the requirement: \n - "b(v)" \n - "b.argument_type" \n - where v is an argument_type - @ingroup algorithms */ -template -class parallel_while: internal::no_copy { -public: - //! Construct empty non-running parallel while. - parallel_while() : my_body(NULL), my_barrier(NULL) {} - - //! Destructor cleans up data members before returning. - ~parallel_while() { - if( my_barrier ) { - my_barrier->destroy(*my_barrier); - my_barrier = NULL; - } - } - - //! Type of items - typedef typename Body::argument_type value_type; - - //! Apply body.apply to each item in the stream. - /** A Stream s has the requirements \n - "S::value_type" \n - "s.pop_if_present(value) is convertible to bool */ - template - void run( Stream& stream, const Body& body ); - - //! Add a work item while running. - /** Should be executed only by body.apply or a thread spawned therefrom. */ - void add( const value_type& item ); - -private: - const Body* my_body; - empty_task* my_barrier; -}; - -template -template -void parallel_while::run( Stream& stream, const Body& body ) { - using namespace internal; - empty_task& barrier = *new( task::allocate_root() ) empty_task(); - my_body = &body; - my_barrier = &barrier; - my_barrier->set_ref_count(2); - while_task& w = *new( my_barrier->allocate_child() ) while_task( stream, body, barrier ); - my_barrier->spawn_and_wait_for_all(w); - my_barrier->destroy(*my_barrier); - my_barrier = NULL; - my_body = NULL; -} - -template -void parallel_while::add( const value_type& item ) { - __TBB_ASSERT(my_barrier,"attempt to add to parallel_while that is not running"); - typedef internal::while_iteration_task iteration_type; - iteration_type& i = *new( task::allocate_additional_child_of(*my_barrier) ) iteration_type(item,*my_body); - task::self().spawn( i ); -} - -} // namespace - -#endif /* __TBB_parallel_while */ diff --git a/src/tbb/include/tbb/partitioner.h b/src/tbb/include/tbb/partitioner.h index c008e8ce8..b959e35a2 100644 --- a/src/tbb/include/tbb/partitioner.h +++ b/src/tbb/include/tbb/partitioner.h @@ -1,630 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_partitioner_H -#define __TBB_partitioner_H - -#ifndef __TBB_INITIAL_CHUNKS -// initial task divisions per thread -#define __TBB_INITIAL_CHUNKS 2 -#endif -#ifndef __TBB_RANGE_POOL_CAPACITY -// maximum number of elements in range pool -#define __TBB_RANGE_POOL_CAPACITY 8 -#endif -#ifndef __TBB_INIT_DEPTH -// initial value for depth of range pool -#define __TBB_INIT_DEPTH 5 -#endif -#ifndef __TBB_DEMAND_DEPTH_ADD -// when imbalance is found range splits this value times more -#define __TBB_DEMAND_DEPTH_ADD 2 -#endif -#ifndef __TBB_STATIC_THRESHOLD -// necessary number of clocks for the work to be distributed among all tasks -#define __TBB_STATIC_THRESHOLD 40000 -#endif -#if __TBB_DEFINE_MIC -#define __TBB_NONUNIFORM_TASK_CREATION 1 -#ifdef __TBB_machine_time_stamp -#define __TBB_USE_MACHINE_TIME_STAMPS 1 -#define __TBB_task_duration() __TBB_STATIC_THRESHOLD -#endif // __TBB_machine_time_stamp -#endif // __TBB_DEFINE_MIC - -#include "task.h" -#include "aligned_space.h" -#include "atomic.h" - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings - #pragma warning (push) - #pragma warning (disable: 4244) -#endif - -namespace tbb { - -class auto_partitioner; -class simple_partitioner; -class affinity_partitioner; -namespace interface7 { - namespace internal { - class affinity_partition_type; - } -} - -namespace internal { //< @cond INTERNAL -size_t __TBB_EXPORTED_FUNC get_initial_auto_partitioner_divisor(); - -//! Defines entry point for affinity partitioner into tbb run-time library. -class affinity_partitioner_base_v3: no_copy { - friend class tbb::affinity_partitioner; - friend class tbb::interface7::internal::affinity_partition_type; - //! Array that remembers affinities of tree positions to affinity_id. - /** NULL if my_size==0. */ - affinity_id* my_array; - //! Number of elements in my_array. - size_t my_size; - //! Zeros the fields. - affinity_partitioner_base_v3() : my_array(NULL), my_size(0) {} - //! Deallocates my_array. - ~affinity_partitioner_base_v3() {resize(0);} - //! Resize my_array. - /** Retains values if resulting size is the same. */ - void __TBB_EXPORTED_METHOD resize( unsigned factor ); -}; - -//! Provides backward-compatible methods for partition objects without affinity. -class partition_type_base { -public: - void set_affinity( task & ) {} - void note_affinity( task::affinity_id ) {} - task* continue_after_execute_range() {return NULL;} - bool decide_whether_to_delay() {return false;} - void spawn_or_delay( bool, task& b ) { - task::spawn(b); - } -}; - -template class start_scan; - -} //< namespace internal @endcond - -namespace serial { -namespace interface7 { -template class start_for; -} -} - -namespace interface7 { -//! @cond INTERNAL -namespace internal { -using namespace tbb::internal; -template class start_for; -template class start_reduce; - -//! Join task node that contains shared flag for stealing feedback -class flag_task: public task { -public: - tbb::atomic my_child_stolen; - flag_task() { my_child_stolen = false; } - task* execute() { return NULL; } - static void mark_task_stolen(task &t) { - tbb::atomic &flag = static_cast(t.parent())->my_child_stolen; -#if TBB_USE_THREADING_TOOLS - // Threading tools respect lock prefix but report false-positive data-race via plain store - flag.fetch_and_store(true); -#else - flag = true; -#endif //TBB_USE_THREADING_TOOLS - } - static bool is_peer_stolen(task &t) { - return static_cast(t.parent())->my_child_stolen; - } -}; - -//! Depth is a relative depth of recursive division inside a range pool. Relative depth allows -//! infinite absolute depth of the recursion for heavily unbalanced workloads with range represented -//! by a number that cannot fit into machine word. -typedef unsigned char depth_t; - -//! Range pool stores ranges of type T in a circular buffer with MaxCapacity -template -class range_vector { - depth_t my_head; - depth_t my_tail; - depth_t my_size; - depth_t my_depth[MaxCapacity]; // relative depths of stored ranges - tbb::aligned_space my_pool; - -public: - //! initialize via first range in pool - range_vector(const T& elem) : my_head(0), my_tail(0), my_size(1) { - my_depth[0] = 0; - new( static_cast(my_pool.begin()) ) T(elem);//TODO: std::move? - } - ~range_vector() { - while( !empty() ) pop_back(); - } - bool empty() const { return my_size == 0; } - depth_t size() const { return my_size; } - //! Populates range pool via ranges up to max depth or while divisible - //! max_depth starts from 0, e.g. value 2 makes 3 ranges in the pool up to two 1/4 pieces - void split_to_fill(depth_t max_depth) { - while( my_size < MaxCapacity && is_divisible(max_depth) ) { - depth_t prev = my_head; - my_head = (my_head + 1) % MaxCapacity; - new(my_pool.begin()+my_head) T(my_pool.begin()[prev]); // copy TODO: std::move? - my_pool.begin()[prev].~T(); // instead of assignment - new(my_pool.begin()+prev) T(my_pool.begin()[my_head], split()); // do 'inverse' split - my_depth[my_head] = ++my_depth[prev]; - my_size++; - } - } - void pop_back() { - __TBB_ASSERT(my_size > 0, "range_vector::pop_back() with empty size"); - my_pool.begin()[my_head].~T(); - my_size--; - my_head = (my_head + MaxCapacity - 1) % MaxCapacity; - } - void pop_front() { - __TBB_ASSERT(my_size > 0, "range_vector::pop_front() with empty size"); - my_pool.begin()[my_tail].~T(); - my_size--; - my_tail = (my_tail + 1) % MaxCapacity; - } - T& back() { - __TBB_ASSERT(my_size > 0, "range_vector::back() with empty size"); - return my_pool.begin()[my_head]; - } - T& front() { - __TBB_ASSERT(my_size > 0, "range_vector::front() with empty size"); - return my_pool.begin()[my_tail]; - } - //! similarly to front(), returns depth of the first range in the pool - depth_t front_depth() { - __TBB_ASSERT(my_size > 0, "range_vector::front_depth() with empty size"); - return my_depth[my_tail]; - } - depth_t back_depth() { - __TBB_ASSERT(my_size > 0, "range_vector::back_depth() with empty size"); - return my_depth[my_head]; - } - bool is_divisible(depth_t max_depth) { - return back_depth() < max_depth && back().is_divisible(); - } -}; - -//! Provides default methods for partition objects and common algorithm blocks. -template -struct partition_type_base { - typedef split split_type; - // decision makers - void set_affinity( task & ) {} - void note_affinity( task::affinity_id ) {} - bool check_being_stolen(task &) { return false; } // part of old should_execute_range() - bool check_for_demand(task &) { return false; } - bool is_divisible() { return true; } // part of old should_execute_range() - depth_t max_depth() { return 0; } - void align_depth(depth_t) { } - template split_type get_split() { return split(); } - - // common function blocks - Partition& self() { return *static_cast(this); } // CRTP helper - template - void execute(StartType &start, Range &range) { - // The algorithm in a few words ([]-denotes calls to decision methods of partitioner): - // [If this task is stolen, adjust depth and divisions if necessary, set flag]. - // If range is divisible { - // Spread the work while [initial divisions left]; - // Create trap task [if necessary]; - // } - // If not divisible or [max depth is reached], execute, else do the range pool part - if ( range.is_divisible() ) { - if ( self().is_divisible() ) { - do { // split until is divisible - typename Partition::split_type split_obj = self().template get_split(); - start.offer_work( split_obj ); - } while ( range.is_divisible() && self().is_divisible() ); - } - } - if( !range.is_divisible() || !self().max_depth() ) - start.run_body( range ); // simple partitioner goes always here - else { // do range pool - internal::range_vector range_pool(range); - do { - range_pool.split_to_fill(self().max_depth()); // fill range pool - if( self().check_for_demand( start ) ) { - if( range_pool.size() > 1 ) { - start.offer_work( range_pool.front(), range_pool.front_depth() ); - range_pool.pop_front(); - continue; - } - if( range_pool.is_divisible(self().max_depth()) ) // was not enough depth to fork a task - continue; // note: next split_to_fill() should split range at least once - } - start.run_body( range_pool.back() ); - range_pool.pop_back(); - } while( !range_pool.empty() && !start.is_cancelled() ); - } - } -}; - -//! Provides default methods for auto (adaptive) partition objects. -template -struct adaptive_partition_type_base : partition_type_base { - size_t my_divisor; - depth_t my_max_depth; - adaptive_partition_type_base() : my_max_depth(__TBB_INIT_DEPTH) { - my_divisor = tbb::internal::get_initial_auto_partitioner_divisor() / 4; - __TBB_ASSERT(my_divisor, "initial value of get_initial_auto_partitioner_divisor() is not valid"); - } - adaptive_partition_type_base(adaptive_partition_type_base &src, split) { - my_max_depth = src.my_max_depth; -#if TBB_USE_ASSERT - size_t old_divisor = src.my_divisor; -#endif - -#if __TBB_INITIAL_TASK_IMBALANCE - if( src.my_divisor <= 1 ) my_divisor = 0; - else my_divisor = src.my_divisor = (src.my_divisor + 1u) / 2u; -#else - my_divisor = src.my_divisor / 2u; - src.my_divisor = src.my_divisor - my_divisor; // TODO: check the effect separately - if (my_divisor) src.my_max_depth += static_cast(__TBB_Log2(src.my_divisor / my_divisor)); -#endif - // For affinity_partitioner, my_divisor indicates the number of affinity array indices the task reserves. - // A task which has only one index must produce the right split without reserved index in order to avoid - // it to be overwritten in note_affinity() of the created (right) task. - // I.e. a task created deeper than the affinity array can remember must not save its affinity (LIFO order) - __TBB_ASSERT( (old_divisor <= 1 && my_divisor == 0) || - (old_divisor > 1 && my_divisor != 0), NULL); - } - adaptive_partition_type_base(adaptive_partition_type_base &src, const proportional_split& split_obj) { - my_max_depth = src.my_max_depth; - my_divisor = size_t(float(src.my_divisor) * float(split_obj.right()) - / float(split_obj.left() + split_obj.right())); - src.my_divisor -= my_divisor; - } - bool check_being_stolen( task &t) { // part of old should_execute_range() - if( !my_divisor ) { // if not from the top P tasks of binary tree - my_divisor = 1; // TODO: replace by on-stack flag (partition_state's member)? - if( t.is_stolen_task() && t.parent()->ref_count() >= 2 ) { // runs concurrently with the left task -#if TBB_USE_EXCEPTIONS - // RTTI is available, check whether the cast is valid - __TBB_ASSERT(dynamic_cast(t.parent()), 0); - // correctness of the cast relies on avoiding the root task for which: - // - initial value of my_divisor != 0 (protected by separate assertion) - // - is_stolen_task() always returns false for the root task. -#endif - flag_task::mark_task_stolen(t); - if( !my_max_depth ) my_max_depth++; - my_max_depth += __TBB_DEMAND_DEPTH_ADD; - return true; - } - } - return false; - } - void align_depth(depth_t base) { - __TBB_ASSERT(base <= my_max_depth, 0); - my_max_depth -= base; - } - depth_t max_depth() { return my_max_depth; } -}; + http://www.apache.org/licenses/LICENSE-2.0 -//! Helper that enables one or the other code branches (see example in is_range_divisible_in_proportion) -template struct enable_if { typedef T type; }; -template struct enable_if { }; - -//! Class determines whether template parameter has static boolean -//! constant 'is_divisible_in_proportion' initialized with value of -//! 'true' or not. -/** If template parameter has such field that has been initialized - * with non-zero value then class field will be set to 'true', - * otherwise - 'false' - */ -template -class is_range_divisible_in_proportion { -private: - typedef char yes[1]; - typedef char no [2]; - - template static yes& decide(typename enable_if::type *); - template static no& decide(...); -public: - // equals to 'true' if and only if static const variable 'is_divisible_in_proportion' of template parameter - // initialized with the value of 'true' - static const bool value = (sizeof(decide(0)) == sizeof(yes)); -}; - -//! Provides default methods for affinity (adaptive) partition objects. -class affinity_partition_type : public adaptive_partition_type_base { - static const unsigned factor_power = 4; - static const unsigned factor = 1<(), - my_delay(start) -#ifdef __TBB_USE_MACHINE_TIME_STAMPS - , my_dst_tsc(0) -#endif - { - __TBB_ASSERT( (factor&(factor-1))==0, "factor must be power of two" ); - my_divisor *= factor; - ap.resize(factor); - my_array = ap.my_array; - my_begin = 0; - my_max_depth = factor_power + 1; // the first factor_power ranges will be spawned, and >=1 ranges should be left - __TBB_ASSERT( my_max_depth < __TBB_RANGE_POOL_CAPACITY, 0 ); - } - affinity_partition_type(affinity_partition_type& p, split) - : adaptive_partition_type_base(p, split()), - my_delay(pass), -#ifdef __TBB_USE_MACHINE_TIME_STAMPS - my_dst_tsc(0), -#endif - my_array(p.my_array) { - // the sum of the divisors represents original value of p.my_divisor before split - __TBB_ASSERT(my_divisor + p.my_divisor <= factor, NULL); - my_begin = p.my_begin + p.my_divisor; - } - affinity_partition_type(affinity_partition_type& p, const proportional_split& split_obj) - : adaptive_partition_type_base(p, split_obj), - my_delay(start), -#ifdef __TBB_USE_MACHINE_TIME_STAMPS - my_dst_tsc(0), -#endif - my_array(p.my_array) { - size_t total_divisor = my_divisor + p.my_divisor; - __TBB_ASSERT(total_divisor % factor == 0, NULL); - my_divisor = (my_divisor + factor/2) & (0u - factor); - if (!my_divisor) - my_divisor = factor; - else if (my_divisor == total_divisor) - my_divisor = total_divisor - factor; - p.my_divisor = total_divisor - my_divisor; - __TBB_ASSERT(my_divisor && p.my_divisor, NULL); - my_begin = p.my_begin + p.my_divisor; - } - void set_affinity( task &t ) { - if( my_divisor ) { - if( !my_array[my_begin] ) { - // TODO: consider code reuse for static_paritioner - my_array[my_begin] = affinity_id(my_begin / factor + 1); - } - t.set_affinity( my_array[my_begin] ); - } - } - void note_affinity( task::affinity_id id ) { - if( my_divisor ) - my_array[my_begin] = id; - } - bool check_for_demand( task &t ) { - if( pass == my_delay ) { - if( my_divisor > 1 ) // produce affinitized tasks while they have slot in array - return true; // do not do my_max_depth++ here, but be sure range_pool is splittable once more - else if( my_divisor && my_max_depth ) { // make balancing task - my_divisor = 0; // once for each task; depth will be decreased in align_depth() - return true; - } - else if( flag_task::is_peer_stolen(t) ) { - my_max_depth += __TBB_DEMAND_DEPTH_ADD; - return true; - } - } else if( start == my_delay ) { -#ifndef __TBB_USE_MACHINE_TIME_STAMPS - my_delay = pass; -#else - my_dst_tsc = __TBB_machine_time_stamp() + __TBB_task_duration(); - my_delay = run; - } else if( run == my_delay ) { - if( __TBB_machine_time_stamp() < my_dst_tsc ) { - __TBB_ASSERT(my_max_depth > 0, NULL); - return false; - } - my_delay = pass; - return true; -#endif // __TBB_USE_MACHINE_TIME_STAMPS - } - return false; - } - bool is_divisible() { // part of old should_execute_range() - return my_divisor > factor; - } - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Suppress "conditional expression is constant" warning. - #pragma warning( push ) - #pragma warning( disable: 4127 ) -#endif - template - split_type get_split() { - if (is_range_divisible_in_proportion::value) { - size_t size = my_divisor / factor; -#if __TBB_NONUNIFORM_TASK_CREATION - size_t right = (size + 2) / 3; -#else - size_t right = size / 2; -#endif - size_t left = size - right; - return split_type(left, right); - } else { - return split_type(1, 1); - } - } -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning( pop ) -#endif // warning 4127 is back - - static const unsigned range_pool_size = __TBB_RANGE_POOL_CAPACITY; -}; - -class auto_partition_type: public adaptive_partition_type_base { -public: - auto_partition_type( const auto_partitioner& ) { - my_divisor *= __TBB_INITIAL_CHUNKS; - } - auto_partition_type( auto_partition_type& src, split) - : adaptive_partition_type_base(src, split()) {} - - bool is_divisible() { // part of old should_execute_range() - if( my_divisor > 1 ) return true; - if( my_divisor && my_max_depth ) { // can split the task. TODO: on-stack flag instead - // keep same fragmentation while splitting for the local task pool - my_max_depth--; - my_divisor = 0; // decrease max_depth once per task - return true; - } else return false; - } - bool check_for_demand(task &t) { - if( flag_task::is_peer_stolen(t) ) { - my_max_depth += __TBB_DEMAND_DEPTH_ADD; - return true; - } else return false; - } - - static const unsigned range_pool_size = __TBB_RANGE_POOL_CAPACITY; -}; - -class simple_partition_type: public partition_type_base { -public: - simple_partition_type( const simple_partitioner& ) {} - simple_partition_type( const simple_partition_type&, split ) {} - //! simplified algorithm - template - void execute(StartType &start, Range &range) { - split_type split_obj = split(); // start.offer_work accepts split_type as reference - while( range.is_divisible() ) - start.offer_work( split_obj ); - start.run_body( range ); - } - //static const unsigned range_pool_size = 1; - not necessary because execute() is overridden -}; - -//! Backward-compatible partition for auto and affinity partition objects. -class old_auto_partition_type: public tbb::internal::partition_type_base { - size_t num_chunks; - static const size_t VICTIM_CHUNKS = 4; -public: - bool should_execute_range(const task &t) { - if( num_chunks friend class serial::interface7::start_for; - template friend class interface7::internal::start_for; - template friend class interface7::internal::start_reduce; - template friend class internal::start_scan; - // backward compatibility - class partition_type: public internal::partition_type_base { - public: - bool should_execute_range(const task& ) {return false;} - partition_type( const simple_partitioner& ) {} - partition_type( const partition_type&, split ) {} - }; - // new implementation just extends existing interface - typedef interface7::internal::simple_partition_type task_partition_type; - - // TODO: consider to make split_type public - typedef interface7::internal::simple_partition_type::split_type split_type; -}; - -//! An auto partitioner -/** The range is initial divided into several large chunks. - Chunks are further subdivided into smaller pieces if demand detected and they are divisible. - @ingroup algorithms */ -class auto_partitioner { -public: - auto_partitioner() {} - -private: - template friend class serial::interface7::start_for; - template friend class interface7::internal::start_for; - template friend class interface7::internal::start_reduce; - template friend class internal::start_scan; - // backward compatibility - typedef interface7::internal::old_auto_partition_type partition_type; - // new implementation just extends existing interface - typedef interface7::internal::auto_partition_type task_partition_type; - - // TODO: consider to make split_type public - typedef interface7::internal::auto_partition_type::split_type split_type; -}; - -//! An affinity partitioner -class affinity_partitioner: internal::affinity_partitioner_base_v3 { -public: - affinity_partitioner() {} - -private: - template friend class serial::interface7::start_for; - template friend class interface7::internal::start_for; - template friend class interface7::internal::start_reduce; - template friend class internal::start_scan; - // backward compatibility - for parallel_scan only - typedef interface7::internal::old_auto_partition_type partition_type; - // new implementation just extends existing interface - typedef interface7::internal::affinity_partition_type task_partition_type; - - // TODO: consider to make split_type public - typedef interface7::internal::affinity_partition_type::split_type split_type; -}; - -} // namespace tbb + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4244 is back -#undef __TBB_INITIAL_CHUNKS -#undef __TBB_RANGE_POOL_CAPACITY -#undef __TBB_INIT_DEPTH -#endif /* __TBB_partitioner_H */ +#include "../oneapi/tbb/partitioner.h" diff --git a/src/tbb/include/tbb/pipeline.h b/src/tbb/include/tbb/pipeline.h deleted file mode 100644 index 3a1d3d899..000000000 --- a/src/tbb/include/tbb/pipeline.h +++ /dev/null @@ -1,664 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_pipeline_H -#define __TBB_pipeline_H - -#include "atomic.h" -#include "task.h" -#include "tbb_allocator.h" -#include - -#if __TBB_CPP11_TYPE_PROPERTIES_PRESENT || __TBB_TR1_TYPE_PROPERTIES_IN_STD_PRESENT -#include -#endif - -namespace tbb { - -class pipeline; -class filter; - -//! @cond INTERNAL -namespace internal { - -// The argument for PIPELINE_VERSION should be an integer between 2 and 9 -#define __TBB_PIPELINE_VERSION(x) ((unsigned char)(x-2)<<1) - -typedef unsigned long Token; -typedef long tokendiff_t; -class stage_task; -class input_buffer; -class pipeline_root_task; -class pipeline_cleaner; - -} // namespace internal - -namespace interface6 { - template class filter_t; - - namespace internal { - class pipeline_proxy; - } -} - -//! @endcond - -//! A stage in a pipeline. -/** @ingroup algorithms */ -class filter: internal::no_copy { -private: - //! Value used to mark "not in pipeline" - static filter* not_in_pipeline() {return reinterpret_cast(intptr_t(-1));} -protected: - //! The lowest bit 0 is for parallel vs. serial - static const unsigned char filter_is_serial = 0x1; - - //! 4th bit distinguishes ordered vs unordered filters. - /** The bit was not set for parallel filters in TBB 2.1 and earlier, - but is_ordered() function always treats parallel filters as out of order. */ - static const unsigned char filter_is_out_of_order = 0x1<<4; - - //! 5th bit distinguishes thread-bound and regular filters. - static const unsigned char filter_is_bound = 0x1<<5; - - //! 6th bit marks input filters emitting small objects - static const unsigned char filter_may_emit_null = 0x1<<6; - - //! 7th bit defines exception propagation mode expected by the application. - static const unsigned char exact_exception_propagation = -#if TBB_USE_CAPTURED_EXCEPTION - 0x0; -#else - 0x1<<7; -#endif /* TBB_USE_CAPTURED_EXCEPTION */ - - static const unsigned char current_version = __TBB_PIPELINE_VERSION(5); - static const unsigned char version_mask = 0x7<<1; // bits 1-3 are for version -public: - enum mode { - //! processes multiple items in parallel and in no particular order - parallel = current_version | filter_is_out_of_order, - //! processes items one at a time; all such filters process items in the same order - serial_in_order = current_version | filter_is_serial, - //! processes items one at a time and in no particular order - serial_out_of_order = current_version | filter_is_serial | filter_is_out_of_order, - //! @deprecated use serial_in_order instead - serial = serial_in_order - }; -protected: - filter( bool is_serial_ ) : - next_filter_in_pipeline(not_in_pipeline()), - my_input_buffer(NULL), - my_filter_mode(static_cast((is_serial_ ? serial : parallel) | exact_exception_propagation)), - prev_filter_in_pipeline(not_in_pipeline()), - my_pipeline(NULL), - next_segment(NULL) - {} - - filter( mode filter_mode ) : - next_filter_in_pipeline(not_in_pipeline()), - my_input_buffer(NULL), - my_filter_mode(static_cast(filter_mode | exact_exception_propagation)), - prev_filter_in_pipeline(not_in_pipeline()), - my_pipeline(NULL), - next_segment(NULL) - {} - - // signal end-of-input for concrete_filters - void __TBB_EXPORTED_METHOD set_end_of_input(); - -public: - //! True if filter is serial. - bool is_serial() const { - return bool( my_filter_mode & filter_is_serial ); - } - - //! True if filter must receive stream in order. - bool is_ordered() const { - return (my_filter_mode & (filter_is_out_of_order|filter_is_serial))==filter_is_serial; - } - - //! True if filter is thread-bound. - bool is_bound() const { - return ( my_filter_mode & filter_is_bound )==filter_is_bound; - } - - //! true if an input filter can emit null - bool object_may_be_null() { - return ( my_filter_mode & filter_may_emit_null ) == filter_may_emit_null; - } - - //! Operate on an item from the input stream, and return item for output stream. - /** Returns NULL if filter is a sink. */ - virtual void* operator()( void* item ) = 0; - - //! Destroy filter. - /** If the filter was added to a pipeline, the pipeline must be destroyed first. */ - virtual __TBB_EXPORTED_METHOD ~filter(); - -#if __TBB_TASK_GROUP_CONTEXT - //! Destroys item if pipeline was cancelled. - /** Required to prevent memory leaks. - Note it can be called concurrently even for serial filters.*/ - virtual void finalize( void* /*item*/ ) {}; -#endif - -private: - //! Pointer to next filter in the pipeline. - filter* next_filter_in_pipeline; - - //! has the filter not yet processed all the tokens it will ever see? - // (pipeline has not yet reached end_of_input or this filter has not yet - // seen the last token produced by input_filter) - bool has_more_work(); - - //! Buffer for incoming tokens, or NULL if not required. - /** The buffer is required if the filter is serial or follows a thread-bound one. */ - internal::input_buffer* my_input_buffer; - - friend class internal::stage_task; - friend class internal::pipeline_root_task; - friend class pipeline; - friend class thread_bound_filter; - - //! Storage for filter mode and dynamically checked implementation version. - const unsigned char my_filter_mode; - - //! Pointer to previous filter in the pipeline. - filter* prev_filter_in_pipeline; - - //! Pointer to the pipeline. - pipeline* my_pipeline; - - //! Pointer to the next "segment" of filters, or NULL if not required. - /** In each segment, the first filter is not thread-bound but follows a thread-bound one. */ - filter* next_segment; -}; - -//! A stage in a pipeline served by a user thread. -/** @ingroup algorithms */ -class thread_bound_filter: public filter { -public: - enum result_type { - // item was processed - success, - // item is currently not available - item_not_available, - // there are no more items to process - end_of_stream - }; -protected: - thread_bound_filter(mode filter_mode): - filter(static_cast(filter_mode | filter::filter_is_bound)) - { - __TBB_ASSERT(filter_mode & filter::filter_is_serial, "thread-bound filters must be serial"); - } -public: - //! If a data item is available, invoke operator() on that item. - /** This interface is non-blocking. - Returns 'success' if an item was processed. - Returns 'item_not_available' if no item can be processed now - but more may arrive in the future, or if token limit is reached. - Returns 'end_of_stream' if there are no more items to process. */ - result_type __TBB_EXPORTED_METHOD try_process_item(); - - //! Wait until a data item becomes available, and invoke operator() on that item. - /** This interface is blocking. - Returns 'success' if an item was processed. - Returns 'end_of_stream' if there are no more items to process. - Never returns 'item_not_available', as it blocks until another return condition applies. */ - result_type __TBB_EXPORTED_METHOD process_item(); - -private: - //! Internal routine for item processing - result_type internal_process_item(bool is_blocking); -}; - -//! A processing pipeline that applies filters to items. -/** @ingroup algorithms */ -class pipeline { -public: - //! Construct empty pipeline. - __TBB_EXPORTED_METHOD pipeline(); - - /** Though the current implementation declares the destructor virtual, do not rely on this - detail. The virtualness is deprecated and may disappear in future versions of TBB. */ - virtual __TBB_EXPORTED_METHOD ~pipeline(); - - //! Add filter to end of pipeline. - void __TBB_EXPORTED_METHOD add_filter( filter& filter_ ); - - //! Run the pipeline to completion. - void __TBB_EXPORTED_METHOD run( size_t max_number_of_live_tokens ); - -#if __TBB_TASK_GROUP_CONTEXT - //! Run the pipeline to completion with user-supplied context. - void __TBB_EXPORTED_METHOD run( size_t max_number_of_live_tokens, tbb::task_group_context& context ); -#endif - - //! Remove all filters from the pipeline. - void __TBB_EXPORTED_METHOD clear(); - -private: - friend class internal::stage_task; - friend class internal::pipeline_root_task; - friend class filter; - friend class thread_bound_filter; - friend class internal::pipeline_cleaner; - friend class tbb::interface6::internal::pipeline_proxy; - - //! Pointer to first filter in the pipeline. - filter* filter_list; - - //! Pointer to location where address of next filter to be added should be stored. - filter* filter_end; - - //! task who's reference count is used to determine when all stages are done. - task* end_counter; - - //! Number of idle tokens waiting for input stage. - atomic input_tokens; - - //! Global counter of tokens - atomic token_counter; - - //! False until fetch_input returns NULL. - bool end_of_input; - - //! True if the pipeline contains a thread-bound filter; false otherwise. - bool has_thread_bound_filters; - - //! Remove filter from pipeline. - void remove_filter( filter& filter_ ); - - //! Not used, but retained to satisfy old export files. - void __TBB_EXPORTED_METHOD inject_token( task& self ); - -#if __TBB_TASK_GROUP_CONTEXT - //! Does clean up if pipeline is cancelled or exception occurred - void clear_filters(); -#endif -}; - -//------------------------------------------------------------------------ -// Support for lambda-friendly parallel_pipeline interface -//------------------------------------------------------------------------ - -namespace interface6 { - -namespace internal { - template class concrete_filter; -} - -//! input_filter control to signal end-of-input for parallel_pipeline -class flow_control { - bool is_pipeline_stopped; - flow_control() { is_pipeline_stopped = false; } - template friend class internal::concrete_filter; -public: - void stop() { is_pipeline_stopped = true; } -}; - -//! @cond INTERNAL -namespace internal { - -template struct tbb_large_object {enum { value = sizeof(T) > sizeof(void *) }; }; - -// Obtain type properties in one or another way -#if __TBB_CPP11_TYPE_PROPERTIES_PRESENT -template struct tbb_trivially_copyable { enum { value = std::is_trivially_copyable::value }; }; -#elif __TBB_TR1_TYPE_PROPERTIES_IN_STD_PRESENT -template struct tbb_trivially_copyable { enum { value = std::has_trivial_copy_constructor::value }; }; -#else -// Explicitly list the types we wish to be placed as-is in the pipeline input_buffers. -template struct tbb_trivially_copyable { enum { value = false }; }; -template struct tbb_trivially_copyable { enum { value = true }; }; -template<> struct tbb_trivially_copyable { enum { value = true }; }; -template<> struct tbb_trivially_copyable { enum { value = true }; }; -template<> struct tbb_trivially_copyable { enum { value = !tbb_large_object::value }; }; -template<> struct tbb_trivially_copyable { enum { value = !tbb_large_object::value }; }; -template<> struct tbb_trivially_copyable { enum { value = !tbb_large_object::value }; }; -template<> struct tbb_trivially_copyable { enum { value = !tbb_large_object::value }; }; -template<> struct tbb_trivially_copyable { enum { value = !tbb_large_object::value }; }; -template<> struct tbb_trivially_copyable { enum { value = !tbb_large_object::value }; }; -#endif // Obtaining type properties - -template struct is_large_object {enum { value = tbb_large_object::value || !tbb_trivially_copyable::value }; }; - -template class token_helper; - -// large object helper (uses tbb_allocator) -template -class token_helper { - public: - typedef typename tbb::tbb_allocator allocator; - typedef T* pointer; - typedef T value_type; - static pointer create_token(const value_type & source) { - pointer output_t = allocator().allocate(1); - return new (output_t) T(source); - } - static value_type & token(pointer & t) { return *t;} - static void * cast_to_void_ptr(pointer ref) { return (void *) ref; } - static pointer cast_from_void_ptr(void * ref) { return (pointer)ref; } - static void destroy_token(pointer token) { - allocator().destroy(token); - allocator().deallocate(token,1); - } -}; - -// pointer specialization -template -class token_helper { - public: - typedef T* pointer; - typedef T* value_type; - static pointer create_token(const value_type & source) { return source; } - static value_type & token(pointer & t) { return t;} - static void * cast_to_void_ptr(pointer ref) { return (void *)ref; } - static pointer cast_from_void_ptr(void * ref) { return (pointer)ref; } - static void destroy_token( pointer /*token*/) {} -}; - -// small object specialization (converts void* to the correct type, passes objects directly.) -template -class token_helper { - typedef union { - T actual_value; - void * void_overlay; - } type_to_void_ptr_map; - public: - typedef T pointer; // not really a pointer in this case. - typedef T value_type; - static pointer create_token(const value_type & source) { - return source; } - static value_type & token(pointer & t) { return t;} - static void * cast_to_void_ptr(pointer ref) { - type_to_void_ptr_map mymap; - mymap.void_overlay = NULL; - mymap.actual_value = ref; - return mymap.void_overlay; - } - static pointer cast_from_void_ptr(void * ref) { - type_to_void_ptr_map mymap; - mymap.void_overlay = ref; - return mymap.actual_value; - } - static void destroy_token( pointer /*token*/) {} -}; - -template -class concrete_filter: public tbb::filter { - const Body& my_body; - typedef token_helper::value > t_helper; - typedef typename t_helper::pointer t_pointer; - typedef token_helper::value > u_helper; - typedef typename u_helper::pointer u_pointer; - - /*override*/ void* operator()(void* input) { - t_pointer temp_input = t_helper::cast_from_void_ptr(input); - u_pointer output_u = u_helper::create_token(my_body(t_helper::token(temp_input))); - t_helper::destroy_token(temp_input); - return u_helper::cast_to_void_ptr(output_u); - } - - /*override*/ void finalize(void * input) { - t_pointer temp_input = t_helper::cast_from_void_ptr(input); - t_helper::destroy_token(temp_input); - } - -public: - concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {} -}; - -// input -template -class concrete_filter: public filter { - const Body& my_body; - typedef token_helper::value > u_helper; - typedef typename u_helper::pointer u_pointer; - - /*override*/void* operator()(void*) { - flow_control control; - u_pointer output_u = u_helper::create_token(my_body(control)); - if(control.is_pipeline_stopped) { - u_helper::destroy_token(output_u); - set_end_of_input(); - return NULL; - } - return u_helper::cast_to_void_ptr(output_u); - } - -public: - concrete_filter(tbb::filter::mode filter_mode, const Body& body) : - filter(static_cast(filter_mode | filter_may_emit_null)), - my_body(body) - {} -}; - -template -class concrete_filter: public filter { - const Body& my_body; - typedef token_helper::value > t_helper; - typedef typename t_helper::pointer t_pointer; - - /*override*/ void* operator()(void* input) { - t_pointer temp_input = t_helper::cast_from_void_ptr(input); - my_body(t_helper::token(temp_input)); - t_helper::destroy_token(temp_input); - return NULL; - } - /*override*/ void finalize(void* input) { - t_pointer temp_input = t_helper::cast_from_void_ptr(input); - t_helper::destroy_token(temp_input); - } - -public: - concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {} -}; - -template -class concrete_filter: public filter { - const Body& my_body; - - /** Override privately because it is always called virtually */ - /*override*/ void* operator()(void*) { - flow_control control; - my_body(control); - void* output = control.is_pipeline_stopped ? NULL : (void*)(intptr_t)-1; - return output; - } -public: - concrete_filter(filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {} -}; - -//! The class that represents an object of the pipeline for parallel_pipeline(). -/** It primarily serves as RAII class that deletes heap-allocated filter instances. */ -class pipeline_proxy { - tbb::pipeline my_pipe; -public: - pipeline_proxy( const filter_t& filter_chain ); - ~pipeline_proxy() { - while( filter* f = my_pipe.filter_list ) - delete f; // filter destructor removes it from the pipeline - } - tbb::pipeline* operator->() { return &my_pipe; } -}; - -//! Abstract base class that represents a node in a parse tree underlying a filter_t. -/** These nodes are always heap-allocated and can be shared by filter_t objects. */ -class filter_node: tbb::internal::no_copy { - /** Count must be atomic because it is hidden state for user, but might be shared by threads. */ - tbb::atomic ref_count; -protected: - filter_node() { - ref_count = 0; -#ifdef __TBB_TEST_FILTER_NODE_COUNT - ++(__TBB_TEST_FILTER_NODE_COUNT); -#endif - } -public: - //! Add concrete_filter to pipeline - virtual void add_to( pipeline& ) = 0; - //! Increment reference count - void add_ref() {++ref_count;} - //! Decrement reference count and delete if it becomes zero. - void remove_ref() { - __TBB_ASSERT(ref_count>0,"ref_count underflow"); - if( --ref_count==0 ) - delete this; - } - virtual ~filter_node() { -#ifdef __TBB_TEST_FILTER_NODE_COUNT - --(__TBB_TEST_FILTER_NODE_COUNT); -#endif - } -}; - -//! Node in parse tree representing result of make_filter. -template -class filter_node_leaf: public filter_node { - const tbb::filter::mode mode; - const Body body; - /*override*/void add_to( pipeline& p ) { - concrete_filter* f = new concrete_filter(mode,body); - p.add_filter( *f ); - } -public: - filter_node_leaf( tbb::filter::mode m, const Body& b ) : mode(m), body(b) {} -}; - -//! Node in parse tree representing join of two filters. -class filter_node_join: public filter_node { - friend class filter_node; // to suppress GCC 3.2 warnings - filter_node& left; - filter_node& right; - /*override*/~filter_node_join() { - left.remove_ref(); - right.remove_ref(); - } - /*override*/void add_to( pipeline& p ) { - left.add_to(p); - right.add_to(p); - } -public: - filter_node_join( filter_node& x, filter_node& y ) : left(x), right(y) { - left.add_ref(); - right.add_ref(); - } -}; - -} // namespace internal -//! @endcond - -//! Create a filter to participate in parallel_pipeline -template -filter_t make_filter(tbb::filter::mode mode, const Body& body) { - return new internal::filter_node_leaf(mode, body); -} - -template -filter_t operator& (const filter_t& left, const filter_t& right) { - __TBB_ASSERT(left.root,"cannot use default-constructed filter_t as left argument of '&'"); - __TBB_ASSERT(right.root,"cannot use default-constructed filter_t as right argument of '&'"); - return new internal::filter_node_join(*left.root,*right.root); -} - -//! Class representing a chain of type-safe pipeline filters -template -class filter_t { - typedef internal::filter_node filter_node; - filter_node* root; - filter_t( filter_node* root_ ) : root(root_) { - root->add_ref(); - } - friend class internal::pipeline_proxy; - template - friend filter_t make_filter(tbb::filter::mode, const Body& ); - template - friend filter_t operator& (const filter_t& , const filter_t& ); -public: - filter_t() : root(NULL) {} - filter_t( const filter_t& rhs ) : root(rhs.root) { - if( root ) root->add_ref(); - } - template - filter_t( tbb::filter::mode mode, const Body& body ) : - root( new internal::filter_node_leaf(mode, body) ) { - root->add_ref(); - } - - void operator=( const filter_t& rhs ) { - // Order of operations below carefully chosen so that reference counts remain correct - // in unlikely event that remove_ref throws exception. - filter_node* old = root; - root = rhs.root; - if( root ) root->add_ref(); - if( old ) old->remove_ref(); - } - ~filter_t() { - if( root ) root->remove_ref(); - } - void clear() { - // Like operator= with filter_t() on right side. - if( root ) { - filter_node* old = root; - root = NULL; - old->remove_ref(); - } - } -}; - -inline internal::pipeline_proxy::pipeline_proxy( const filter_t& filter_chain ) : my_pipe() { - __TBB_ASSERT( filter_chain.root, "cannot apply parallel_pipeline to default-constructed filter_t" ); - filter_chain.root->add_to(my_pipe); -} - -inline void parallel_pipeline(size_t max_number_of_live_tokens, const filter_t& filter_chain -#if __TBB_TASK_GROUP_CONTEXT - , tbb::task_group_context& context -#endif - ) { - internal::pipeline_proxy pipe(filter_chain); - // tbb::pipeline::run() is called via the proxy - pipe->run(max_number_of_live_tokens -#if __TBB_TASK_GROUP_CONTEXT - , context -#endif - ); -} - -#if __TBB_TASK_GROUP_CONTEXT -inline void parallel_pipeline(size_t max_number_of_live_tokens, const filter_t& filter_chain) { - tbb::task_group_context context; - parallel_pipeline(max_number_of_live_tokens, filter_chain, context); -} -#endif // __TBB_TASK_GROUP_CONTEXT - -} // interface6 - -using interface6::flow_control; -using interface6::filter_t; -using interface6::make_filter; -using interface6::parallel_pipeline; - -} // tbb - -#endif /* __TBB_pipeline_H */ diff --git a/src/tbb/include/tbb/profiling.h b/src/tbb/include/tbb/profiling.h new file mode 100644 index 000000000..c7cea9c59 --- /dev/null +++ b/src/tbb/include/tbb/profiling.h @@ -0,0 +1,17 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "../oneapi/tbb/profiling.h" diff --git a/src/tbb/include/tbb/queuing_mutex.h b/src/tbb/include/tbb/queuing_mutex.h index 7986b0b45..ad031e4eb 100644 --- a/src/tbb/include/tbb/queuing_mutex.h +++ b/src/tbb/include/tbb/queuing_mutex.h @@ -1,123 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_queuing_mutex_H -#define __TBB_queuing_mutex_H - -#include "tbb_config.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "atomic.h" -#include "tbb_profiling.h" - -namespace tbb { - -//! Queuing mutex with local-only spinning. -/** @ingroup synchronization */ -class queuing_mutex : internal::mutex_copy_deprecated_and_disabled { -public: - //! Construct unacquired mutex. - queuing_mutex() { - q_tail = NULL; -#if TBB_USE_THREADING_TOOLS - internal_construct(); -#endif - } - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock: internal::no_copy { - //! Initialize fields to mean "no lock held". - void initialize() { - mutex = NULL; -#if TBB_USE_ASSERT - internal::poison_pointer(next); -#endif /* TBB_USE_ASSERT */ - } - - public: - //! Construct lock that has not acquired a mutex. - /** Equivalent to zero-initialization of *this. */ - scoped_lock() {initialize();} - - //! Acquire lock on given mutex. - scoped_lock( queuing_mutex& m ) { - initialize(); - acquire(m); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( mutex ) release(); - } + http://www.apache.org/licenses/LICENSE-2.0 - //! Acquire lock on given mutex. - void __TBB_EXPORTED_METHOD acquire( queuing_mutex& m ); - - //! Acquire lock on given mutex if free (i.e. non-blocking) - bool __TBB_EXPORTED_METHOD try_acquire( queuing_mutex& m ); - - //! Release lock. - void __TBB_EXPORTED_METHOD release(); - - private: - //! The pointer to the mutex owned, or NULL if not holding a mutex. - queuing_mutex* mutex; - - //! The pointer to the next competitor for a mutex - scoped_lock *next; - - //! The local spin-wait variable - /** Inverted (0 - blocked, 1 - acquired the mutex) for the sake of - zero-initialization. Defining it as an entire word instead of - a byte seems to help performance slightly. */ - uintptr_t going; - }; - - void __TBB_EXPORTED_METHOD internal_construct(); - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = true; - -private: - //! The last competitor requesting the lock - atomic q_tail; - -}; - -__TBB_DEFINE_PROFILING_SET_NAME(queuing_mutex) - -} // namespace tbb + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_queuing_mutex_H */ +#include "../oneapi/tbb/queuing_mutex.h" diff --git a/src/tbb/include/tbb/queuing_rw_mutex.h b/src/tbb/include/tbb/queuing_rw_mutex.h index 76df16290..203727ccc 100644 --- a/src/tbb/include/tbb/queuing_rw_mutex.h +++ b/src/tbb/include/tbb/queuing_rw_mutex.h @@ -1,163 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_queuing_rw_mutex_H -#define __TBB_queuing_rw_mutex_H - -#include "tbb_config.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "atomic.h" -#include "tbb_profiling.h" - -namespace tbb { - -//! Queuing reader-writer mutex with local-only spinning. -/** Adapted from Krieger, Stumm, et al. pseudocode at - http://www.eecg.toronto.edu/parallel/pubs_abs.html#Krieger_etal_ICPP93 - @ingroup synchronization */ -class queuing_rw_mutex : internal::mutex_copy_deprecated_and_disabled { -public: - //! Construct unacquired mutex. - queuing_rw_mutex() { - q_tail = NULL; -#if TBB_USE_THREADING_TOOLS - internal_construct(); -#endif - } - - //! Destructor asserts if the mutex is acquired, i.e. q_tail is non-NULL - ~queuing_rw_mutex() { -#if TBB_USE_ASSERT - __TBB_ASSERT( !q_tail, "destruction of an acquired mutex"); -#endif - } - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock: internal::no_copy { - //! Initialize fields to mean "no lock held". - void initialize() { - my_mutex = NULL; -#if TBB_USE_ASSERT - my_state = 0xFF; // Set to invalid state - internal::poison_pointer(my_next); - internal::poison_pointer(my_prev); -#endif /* TBB_USE_ASSERT */ - } - - public: - //! Construct lock that has not acquired a mutex. - /** Equivalent to zero-initialization of *this. */ - scoped_lock() {initialize();} - - //! Acquire lock on given mutex. - scoped_lock( queuing_rw_mutex& m, bool write=true ) { - initialize(); - acquire(m,write); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( my_mutex ) release(); - } - - //! Acquire lock on given mutex. - void acquire( queuing_rw_mutex& m, bool write=true ); - - //! Acquire lock on given mutex if free (i.e. non-blocking) - bool try_acquire( queuing_rw_mutex& m, bool write=true ); - - //! Release lock. - void release(); - - //! Upgrade reader to become a writer. - /** Returns whether the upgrade happened without releasing and re-acquiring the lock */ - bool upgrade_to_writer(); + http://www.apache.org/licenses/LICENSE-2.0 - //! Downgrade writer to become a reader. - bool downgrade_to_reader(); - - private: - //! The pointer to the mutex owned, or NULL if not holding a mutex. - queuing_rw_mutex* my_mutex; - - //! The pointer to the previous and next competitors for a mutex - scoped_lock *__TBB_atomic my_prev, *__TBB_atomic my_next; - - typedef unsigned char state_t; - - //! State of the request: reader, writer, active reader, other service states - atomic my_state; - - //! The local spin-wait variable - /** Corresponds to "spin" in the pseudocode but inverted for the sake of zero-initialization */ - unsigned char __TBB_atomic my_going; - - //! A tiny internal lock - unsigned char my_internal_lock; - - //! Acquire the internal lock - void acquire_internal_lock(); - - //! Try to acquire the internal lock - /** Returns true if lock was successfully acquired. */ - bool try_acquire_internal_lock(); - - //! Release the internal lock - void release_internal_lock(); - - //! Wait for internal lock to be released - void wait_for_release_of_internal_lock(); - - //! A helper function - void unblock_or_wait_on_internal_lock( uintptr_t ); - }; - - void __TBB_EXPORTED_METHOD internal_construct(); - - // Mutex traits - static const bool is_rw_mutex = true; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = true; - -private: - //! The last competitor requesting the lock - atomic q_tail; - -}; - -__TBB_DEFINE_PROFILING_SET_NAME(queuing_rw_mutex) - -} // namespace tbb + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_queuing_rw_mutex_H */ +#include "../oneapi/tbb/queuing_rw_mutex.h" diff --git a/src/tbb/include/tbb/reader_writer_lock.h b/src/tbb/include/tbb/reader_writer_lock.h deleted file mode 100644 index 60d24f3dc..000000000 --- a/src/tbb/include/tbb/reader_writer_lock.h +++ /dev/null @@ -1,232 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_reader_writer_lock_H -#define __TBB_reader_writer_lock_H - -#include "tbb_thread.h" -#include "tbb_allocator.h" -#include "atomic.h" - -namespace tbb { -namespace interface5 { -//! Writer-preference reader-writer lock with local-only spinning on readers. -/** Loosely adapted from Mellor-Crummey and Scott pseudocode at - http://www.cs.rochester.edu/research/synchronization/pseudocode/rw.html#s_wp - @ingroup synchronization */ - class reader_writer_lock : tbb::internal::no_copy { - public: - friend class scoped_lock; - friend class scoped_lock_read; - //! Status type for nodes associated with lock instances - /** waiting_nonblocking: the wait state for nonblocking lock - instances; for writes, these transition straight to active - states; for reads, these are unused. - - waiting: the start and spin state for all lock instances; these will - transition to active state when appropriate. Non-blocking write locks - transition from this state to waiting_nonblocking immediately. - - active: the active state means that the lock instance holds - the lock; it will transition to invalid state during node deletion - - invalid: the end state for all nodes; this is set in the - destructor so if we encounter this state, we are looking at - memory that has already been freed - - The state diagrams below describe the status transitions. - Single arrows indicate that the thread that owns the node is - responsible for the transition; double arrows indicate that - any thread could make the transition. - - State diagram for scoped_lock status: - - waiting ----------> waiting_nonblocking - | _____________/ | - V V V - active -----------------> invalid - - State diagram for scoped_lock_read status: - - waiting - | - V - active ----------------->invalid - - */ - enum status_t { waiting_nonblocking, waiting, active, invalid }; - - //! Constructs a new reader_writer_lock - reader_writer_lock() { - internal_construct(); - } - - //! Destructs a reader_writer_lock object - ~reader_writer_lock() { - internal_destroy(); - } - - //! The scoped lock pattern for write locks - /** Scoped locks help avoid the common problem of forgetting to release the lock. - This type also serves as the node for queuing locks. */ - class scoped_lock : tbb::internal::no_copy { - public: - friend class reader_writer_lock; - - //! Construct with blocking attempt to acquire write lock on the passed-in lock - scoped_lock(reader_writer_lock& lock) { - internal_construct(lock); - } - - //! Destructor, releases the write lock - ~scoped_lock() { - internal_destroy(); - } - - void* operator new(size_t s) { - return tbb::internal::allocate_via_handler_v3(s); - } - void operator delete(void* p) { - tbb::internal::deallocate_via_handler_v3(p); - } - - private: - //! The pointer to the mutex to lock - reader_writer_lock *mutex; - //! The next queued competitor for the mutex - scoped_lock* next; - //! Status flag of the thread associated with this node - atomic status; - - //! Construct scoped_lock that is not holding lock - scoped_lock(); - - void __TBB_EXPORTED_METHOD internal_construct(reader_writer_lock&); - void __TBB_EXPORTED_METHOD internal_destroy(); - }; - - //! The scoped lock pattern for read locks - class scoped_lock_read : tbb::internal::no_copy { - public: - friend class reader_writer_lock; - - //! Construct with blocking attempt to acquire read lock on the passed-in lock - scoped_lock_read(reader_writer_lock& lock) { - internal_construct(lock); - } - - //! Destructor, releases the read lock - ~scoped_lock_read() { - internal_destroy(); - } - - void* operator new(size_t s) { - return tbb::internal::allocate_via_handler_v3(s); - } - void operator delete(void* p) { - tbb::internal::deallocate_via_handler_v3(p); - } - - private: - //! The pointer to the mutex to lock - reader_writer_lock *mutex; - //! The next queued competitor for the mutex - scoped_lock_read *next; - //! Status flag of the thread associated with this node - atomic status; - - //! Construct scoped_lock_read that is not holding lock - scoped_lock_read(); - - void __TBB_EXPORTED_METHOD internal_construct(reader_writer_lock&); - void __TBB_EXPORTED_METHOD internal_destroy(); - }; - - //! Acquires the reader_writer_lock for write. - /** If the lock is currently held in write mode by another - context, the writer will block by spinning on a local - variable. Exceptions thrown: improper_lock The context tries - to acquire a reader_writer_lock that it already has write - ownership of.*/ - void __TBB_EXPORTED_METHOD lock(); - - //! Tries to acquire the reader_writer_lock for write. - /** This function does not block. Return Value: True or false, - depending on whether the lock is acquired or not. If the lock - is already held by this acquiring context, try_lock() returns - false. */ - bool __TBB_EXPORTED_METHOD try_lock(); - - //! Acquires the reader_writer_lock for read. - /** If the lock is currently held by a writer, this reader will - block and wait until the writers are done. Exceptions thrown: - improper_lock The context tries to acquire a - reader_writer_lock that it already has write ownership of. */ - void __TBB_EXPORTED_METHOD lock_read(); - - //! Tries to acquire the reader_writer_lock for read. - /** This function does not block. Return Value: True or false, - depending on whether the lock is acquired or not. */ - bool __TBB_EXPORTED_METHOD try_lock_read(); - - //! Releases the reader_writer_lock - void __TBB_EXPORTED_METHOD unlock(); - - private: - void __TBB_EXPORTED_METHOD internal_construct(); - void __TBB_EXPORTED_METHOD internal_destroy(); - - //! Attempts to acquire write lock - /** If unavailable, spins in blocking case, returns false in non-blocking case. */ - bool start_write(scoped_lock *); - //! Sets writer_head to w and attempts to unblock - void set_next_writer(scoped_lock *w); - //! Relinquishes write lock to next waiting writer or group of readers - void end_write(scoped_lock *); - //! Checks if current thread holds write lock - bool is_current_writer(); - - //! Attempts to acquire read lock - /** If unavailable, spins in blocking case, returns false in non-blocking case. */ - void start_read(scoped_lock_read *); - //! Unblocks pending readers - void unblock_readers(); - //! Relinquishes read lock by decrementing counter; last reader wakes pending writer - void end_read(); - - //! The list of pending readers - atomic reader_head; - //! The list of pending writers - atomic writer_head; - //! The last node in the list of pending writers - atomic writer_tail; - //! Writer that owns the mutex; tbb_thread::id() otherwise. - tbb_thread::id my_current_writer; - //! Status of mutex - atomic rdr_count_and_flags; // used with __TBB_AtomicOR, which assumes uintptr_t -}; - -} // namespace interface5 - -using interface5::reader_writer_lock; - -} // namespace tbb - -#endif /* __TBB_reader_writer_lock_H */ diff --git a/src/tbb/include/tbb/recursive_mutex.h b/src/tbb/include/tbb/recursive_mutex.h deleted file mode 100644 index 1e41d90a5..000000000 --- a/src/tbb/include/tbb/recursive_mutex.h +++ /dev/null @@ -1,234 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_recursive_mutex_H -#define __TBB_recursive_mutex_H - -#if _WIN32||_WIN64 -#include "machine/windows_api.h" -#else -#include -#endif /* _WIN32||_WIN64 */ - -#include -#include "aligned_space.h" -#include "tbb_stddef.h" -#include "tbb_profiling.h" - -namespace tbb { -//! Mutex that allows recursive mutex acquisition. -/** Mutex that allows recursive mutex acquisition. - @ingroup synchronization */ -class recursive_mutex : internal::mutex_copy_deprecated_and_disabled { -public: - //! Construct unacquired recursive_mutex. - recursive_mutex() { -#if TBB_USE_ASSERT || TBB_USE_THREADING_TOOLS - internal_construct(); -#else - #if _WIN32||_WIN64 - InitializeCriticalSectionEx(&impl, 4000, 0); - #else - pthread_mutexattr_t mtx_attr; - int error_code = pthread_mutexattr_init( &mtx_attr ); - if( error_code ) - tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutexattr_init failed"); - - pthread_mutexattr_settype( &mtx_attr, PTHREAD_MUTEX_RECURSIVE ); - error_code = pthread_mutex_init( &impl, &mtx_attr ); - if( error_code ) - tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutex_init failed"); - - pthread_mutexattr_destroy( &mtx_attr ); - #endif /* _WIN32||_WIN64*/ -#endif /* TBB_USE_ASSERT */ - }; - - ~recursive_mutex() { -#if TBB_USE_ASSERT - internal_destroy(); -#else - #if _WIN32||_WIN64 - DeleteCriticalSection(&impl); - #else - pthread_mutex_destroy(&impl); - - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - }; - - class scoped_lock; - friend class scoped_lock; - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock: internal::no_copy { - public: - //! Construct lock that has not acquired a recursive_mutex. - scoped_lock() : my_mutex(NULL) {}; - - //! Acquire lock on given mutex. - scoped_lock( recursive_mutex& mutex ) { -#if TBB_USE_ASSERT - my_mutex = &mutex; -#endif /* TBB_USE_ASSERT */ - acquire( mutex ); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( my_mutex ) - release(); - } - - //! Acquire lock on given mutex. - void acquire( recursive_mutex& mutex ) { -#if TBB_USE_ASSERT - internal_acquire( mutex ); -#else - my_mutex = &mutex; - mutex.lock(); -#endif /* TBB_USE_ASSERT */ - } - - //! Try acquire lock on given recursive_mutex. - bool try_acquire( recursive_mutex& mutex ) { -#if TBB_USE_ASSERT - return internal_try_acquire( mutex ); -#else - bool result = mutex.try_lock(); - if( result ) - my_mutex = &mutex; - return result; -#endif /* TBB_USE_ASSERT */ - } - - //! Release lock - void release() { -#if TBB_USE_ASSERT - internal_release(); -#else - my_mutex->unlock(); - my_mutex = NULL; -#endif /* TBB_USE_ASSERT */ - } - - private: - //! The pointer to the current recursive_mutex to work - recursive_mutex* my_mutex; - - //! All checks from acquire using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_acquire( recursive_mutex& m ); - - //! All checks from try_acquire using mutex.state were moved here - bool __TBB_EXPORTED_METHOD internal_try_acquire( recursive_mutex& m ); - - //! All checks from release using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_release(); - - friend class recursive_mutex; - }; - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = true; - static const bool is_fair_mutex = false; - - // C++0x compatibility interface - - //! Acquire lock - void lock() { -#if TBB_USE_ASSERT - aligned_space tmp; - new(tmp.begin()) scoped_lock(*this); -#else - #if _WIN32||_WIN64 - EnterCriticalSection(&impl); - #else - int error_code = pthread_mutex_lock(&impl); - if( error_code ) - tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutex_lock failed"); - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Try acquiring lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_lock() { -#if TBB_USE_ASSERT - aligned_space tmp; - return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this); -#else - #if _WIN32||_WIN64 - return TryEnterCriticalSection(&impl)!=0; - #else - return pthread_mutex_trylock(&impl)==0; - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Release lock - void unlock() { -#if TBB_USE_ASSERT - aligned_space tmp; - scoped_lock& s = *tmp.begin(); - s.my_mutex = this; - s.internal_release(); -#else - #if _WIN32||_WIN64 - LeaveCriticalSection(&impl); - #else - pthread_mutex_unlock(&impl); - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Return native_handle - #if _WIN32||_WIN64 - typedef LPCRITICAL_SECTION native_handle_type; - #else - typedef pthread_mutex_t* native_handle_type; - #endif - native_handle_type native_handle() { return (native_handle_type) &impl; } - -private: -#if _WIN32||_WIN64 - CRITICAL_SECTION impl; - enum state_t { - INITIALIZED=0x1234, - DESTROYED=0x789A, - } state; -#else - pthread_mutex_t impl; -#endif /* _WIN32||_WIN64 */ - - //! All checks from mutex constructor using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_construct(); - - //! All checks from mutex destructor using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_destroy(); -}; - -__TBB_DEFINE_PROFILING_SET_NAME(recursive_mutex) - -} // namespace tbb - -#endif /* __TBB_recursive_mutex_H */ diff --git a/src/tbb/include/tbb/runtime_loader.h b/src/tbb/include/tbb/runtime_loader.h deleted file mode 100644 index f5cd412ec..000000000 --- a/src/tbb/include/tbb/runtime_loader.h +++ /dev/null @@ -1,180 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_runtime_loader_H -#define __TBB_runtime_loader_H - -#if ! TBB_PREVIEW_RUNTIME_LOADER - #error Set TBB_PREVIEW_RUNTIME_LOADER to include runtime_loader.h -#endif - -#include "tbb_stddef.h" -#include - -#if _MSC_VER - #if ! __TBB_NO_IMPLICIT_LINKAGE - #ifdef _DEBUG - #pragma comment( linker, "/nodefaultlib:tbb_debug.lib" ) - #pragma comment( linker, "/defaultlib:tbbproxy_debug.lib" ) - #else - #pragma comment( linker, "/nodefaultlib:tbb.lib" ) - #pragma comment( linker, "/defaultlib:tbbproxy.lib" ) - #endif - #endif -#endif - -namespace tbb { - -namespace interface6 { - -//! Load TBB at runtime. -/*! - -\b Usage: - -In source code: - -\code -#include "tbb/runtime_loader.h" - -char const * path[] = { "/lib/ia32", NULL }; -tbb::runtime_loader loader( path ); - -// Now use TBB. -\endcode - -Link with \c tbbproxy.lib (or \c libtbbproxy.a) instead of \c tbb.lib (\c libtbb.dylib, -\c libtbb.so). - -TBB library will be loaded at runtime from \c /lib/ia32 directory. - -\b Attention: - -All \c runtime_loader objects (in the same module, i.e. exe or dll) share some global state. -The most noticeable piece of global state is loaded TBB library. -There are some implications: - - - Only one TBB library can be loaded per module. - - - If one object has already loaded TBB library, another object will not load TBB. - If the loaded TBB library is suitable for the second object, both will use TBB - cooperatively, otherwise the second object will report an error. - - - \c runtime_loader objects will not work (correctly) in parallel due to absence of - synchronization. - -*/ - -class runtime_loader : tbb::internal::no_copy { - - public: - - //! Error mode constants. - enum error_mode { - em_status, //!< Save status of operation and continue. - em_throw, //!< Throw an exception of tbb::runtime_loader::error_code type. - em_abort //!< Print message to \c stderr and call \c abort(). - }; // error_mode - - //! Error codes. - enum error_code { - ec_ok, //!< No errors. - ec_bad_call, //!< Invalid function call (e. g. load() called when TBB is already loaded). - ec_bad_arg, //!< Invalid argument passed. - ec_bad_lib, //!< Invalid library found (e. g. \c TBB_runtime_version symbol not found). - ec_bad_ver, //!< TBB found but version is not suitable. - ec_no_lib //!< No suitable TBB library found. - }; // error_code - - //! Initialize object but do not load TBB. - runtime_loader( error_mode mode = em_abort ); - - //! Initialize object and load TBB. - /*! - See load() for details. - - If error mode is \c em_status, call status() to check whether TBB was loaded or not. - */ - runtime_loader( - char const * path[], //!< List of directories to search TBB in. - int min_ver = TBB_INTERFACE_VERSION, //!< Minimal suitable version of TBB. - int max_ver = INT_MAX, //!< Maximal suitable version of TBB. - error_mode mode = em_abort //!< Error mode for this object. - ); - - //! Destroy object. - ~runtime_loader(); - - //! Load TBB. - /*! - The method searches the directories specified in \c path[] array for the TBB library. - When the library is found, it is loaded and its version is checked. If the version is - not suitable, the library is unloaded, and the search continues. - - \b Note: - - For security reasons, avoid using relative directory names. For example, never load - TBB from current (\c "."), parent (\c "..") or any other relative directory (like - \c "lib" ). Use only absolute directory names (e. g. "/usr/local/lib"). - - For the same security reasons, avoid using system default directories (\c "") on - Windows. (See http://www.microsoft.com/technet/security/advisory/2269637.mspx for - details.) - - Neglecting these rules may cause your program to execute 3-rd party malicious code. - - \b Errors: - - \c ec_bad_call - TBB already loaded by this object. - - \c ec_bad_arg - \p min_ver and/or \p max_ver negative or zero, - or \p min_ver > \p max_ver. - - \c ec_bad_ver - TBB of unsuitable version already loaded by another object. - - \c ec_no_lib - No suitable library found. - */ - error_code - load( - char const * path[], //!< List of directories to search TBB in. - int min_ver = TBB_INTERFACE_VERSION, //!< Minimal suitable version of TBB. - int max_ver = INT_MAX //!< Maximal suitable version of TBB. - - ); - - - //! Report status. - /*! - If error mode is \c em_status, the function returns status of the last operation. - */ - error_code status(); - - private: - - error_mode const my_mode; - error_code my_status; - bool my_loaded; - -}; // class runtime_loader - -} // namespace interface6 - -using interface6::runtime_loader; - -} // namespace tbb - -#endif /* __TBB_runtime_loader_H */ - diff --git a/src/tbb/include/tbb/rw_mutex.h b/src/tbb/include/tbb/rw_mutex.h new file mode 100644 index 000000000..f2499ebac --- /dev/null +++ b/src/tbb/include/tbb/rw_mutex.h @@ -0,0 +1,17 @@ +/* + Copyright (c) 2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "../oneapi/tbb/rw_mutex.h" diff --git a/src/tbb/include/tbb/scalable_allocator.h b/src/tbb/include/tbb/scalable_allocator.h index 3683aa27f..5c654ebd6 100644 --- a/src/tbb/include/tbb/scalable_allocator.h +++ b/src/tbb/include/tbb/scalable_allocator.h @@ -1,319 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_scalable_allocator_H -#define __TBB_scalable_allocator_H -/** @file */ - -#include /* Need ptrdiff_t and size_t from here. */ -#if !_MSC_VER -#include /* Need intptr_t from here. */ -#endif - -#if !defined(__cplusplus) && __ICC==1100 - #pragma warning (push) - #pragma warning (disable: 991) -#endif - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -#if _MSC_VER >= 1400 -#define __TBB_EXPORTED_FUNC __cdecl -#else -#define __TBB_EXPORTED_FUNC -#endif - -/** The "malloc" analogue to allocate block of memory of size bytes. - * @ingroup memory_allocation */ -void * __TBB_EXPORTED_FUNC scalable_malloc (size_t size); - -/** The "free" analogue to discard a previously allocated piece of memory. - @ingroup memory_allocation */ -void __TBB_EXPORTED_FUNC scalable_free (void* ptr); - -/** The "realloc" analogue complementing scalable_malloc. - @ingroup memory_allocation */ -void * __TBB_EXPORTED_FUNC scalable_realloc (void* ptr, size_t size); - -/** The "calloc" analogue complementing scalable_malloc. - @ingroup memory_allocation */ -void * __TBB_EXPORTED_FUNC scalable_calloc (size_t nobj, size_t size); - -/** The "posix_memalign" analogue. - @ingroup memory_allocation */ -int __TBB_EXPORTED_FUNC scalable_posix_memalign (void** memptr, size_t alignment, size_t size); - -/** The "_aligned_malloc" analogue. - @ingroup memory_allocation */ -void * __TBB_EXPORTED_FUNC scalable_aligned_malloc (size_t size, size_t alignment); - -/** The "_aligned_realloc" analogue. - @ingroup memory_allocation */ -void * __TBB_EXPORTED_FUNC scalable_aligned_realloc (void* ptr, size_t size, size_t alignment); - -/** The "_aligned_free" analogue. - @ingroup memory_allocation */ -void __TBB_EXPORTED_FUNC scalable_aligned_free (void* ptr); + http://www.apache.org/licenses/LICENSE-2.0 -/** The analogue of _msize/malloc_size/malloc_usable_size. - Returns the usable size of a memory block previously allocated by scalable_*, - or 0 (zero) if ptr does not point to such a block. - @ingroup memory_allocation */ -size_t __TBB_EXPORTED_FUNC scalable_msize (void* ptr); - -/* Results for scalable_allocation_* functions */ -typedef enum { - TBBMALLOC_OK, - TBBMALLOC_INVALID_PARAM, - TBBMALLOC_UNSUPPORTED, - TBBMALLOC_NO_MEMORY, - TBBMALLOC_NO_EFFECT -} ScalableAllocationResult; - -/* Setting TBB_MALLOC_USE_HUGE_PAGES environment variable to 1 enables huge pages. - scalable_allocation_mode call has priority over environment variable. */ -typedef enum { - TBBMALLOC_USE_HUGE_PAGES, /* value turns using huge pages on and off */ - /* deprecated, kept for backward compatibility only */ - USE_HUGE_PAGES = TBBMALLOC_USE_HUGE_PAGES, - /* try to limit memory consumption value Bytes, clean internal buffers - if limit is exceeded, but not prevents from requesting memory from OS */ - TBBMALLOC_SET_SOFT_HEAP_LIMIT -} AllocationModeParam; - -/** Set TBB allocator-specific allocation modes. - @ingroup memory_allocation */ -int __TBB_EXPORTED_FUNC scalable_allocation_mode(int param, intptr_t value); - -typedef enum { - /* Clean internal allocator buffers for all threads. - Returns TBBMALLOC_NO_EFFECT if no buffers cleaned, - TBBMALLOC_OK if some memory released from buffers. */ - TBBMALLOC_CLEAN_ALL_BUFFERS, - /* Clean internal allocator buffer for current thread only. - Return values same as for TBBMALLOC_CLEAN_ALL_BUFFERS. */ - TBBMALLOC_CLEAN_THREAD_BUFFERS -} ScalableAllocationCmd; - -/** Call TBB allocator-specific commands. - @ingroup memory_allocation */ -int __TBB_EXPORTED_FUNC scalable_allocation_command(int cmd, void *param); - -#ifdef __cplusplus -} /* extern "C" */ -#endif /* __cplusplus */ - -#ifdef __cplusplus - -//! The namespace rml contains components of low-level memory pool interface. -namespace rml { -class MemoryPool; - -typedef void *(*rawAllocType)(intptr_t pool_id, size_t &bytes); -typedef int (*rawFreeType)(intptr_t pool_id, void* raw_ptr, size_t raw_bytes); - -/* -MemPoolPolicy extension must be compatible with such structure fields layout - -struct MemPoolPolicy { - rawAllocType pAlloc; - rawFreeType pFree; - size_t granularity; // granularity of pAlloc allocations -}; + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ -struct MemPoolPolicy { - enum { - TBBMALLOC_POOL_VERSION = 1 - }; - - rawAllocType pAlloc; - rawFreeType pFree; - // granularity of pAlloc allocations. 0 means default used. - size_t granularity; - int version; - // all memory consumed at 1st pAlloc call and never returned, - // no more pAlloc calls after 1st - unsigned fixedPool : 1, - // memory consumed but returned only at pool termination - keepAllMemory : 1, - reserved : 30; - - MemPoolPolicy(rawAllocType pAlloc_, rawFreeType pFree_, - size_t granularity_ = 0, bool fixedPool_ = false, - bool keepAllMemory_ = false) : - pAlloc(pAlloc_), pFree(pFree_), granularity(granularity_), version(TBBMALLOC_POOL_VERSION), - fixedPool(fixedPool_), keepAllMemory(keepAllMemory_), - reserved(0) {} -}; - -// enums have same values as appropriate enums from ScalableAllocationResult -// TODO: use ScalableAllocationResult in pool_create directly -enum MemPoolError { - // pool created successfully - POOL_OK = TBBMALLOC_OK, - // invalid policy parameters found - INVALID_POLICY = TBBMALLOC_INVALID_PARAM, - // requested pool policy is not supported by allocator library - UNSUPPORTED_POLICY = TBBMALLOC_UNSUPPORTED, - // lack of memory during pool creation - NO_MEMORY = TBBMALLOC_NO_MEMORY, - // action takes no effect - NO_EFFECT = TBBMALLOC_NO_EFFECT -}; - -MemPoolError pool_create_v1(intptr_t pool_id, const MemPoolPolicy *policy, - rml::MemoryPool **pool); - -bool pool_destroy(MemoryPool* memPool); -void *pool_malloc(MemoryPool* memPool, size_t size); -void *pool_realloc(MemoryPool* memPool, void *object, size_t size); -void *pool_aligned_malloc(MemoryPool* mPool, size_t size, size_t alignment); -void *pool_aligned_realloc(MemoryPool* mPool, void *ptr, size_t size, size_t alignment); -bool pool_reset(MemoryPool* memPool); -bool pool_free(MemoryPool *memPool, void *object); -} - -#include /* To use new with the placement argument */ - -/* Ensure that including this header does not cause implicit linkage with TBB */ -#ifndef __TBB_NO_IMPLICIT_LINKAGE - #define __TBB_NO_IMPLICIT_LINKAGE 1 - #include "tbb_stddef.h" - #undef __TBB_NO_IMPLICIT_LINKAGE -#else - #include "tbb_stddef.h" -#endif - -#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - #include // std::forward -#endif - -namespace tbb { - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Workaround for erroneous "unreferenced parameter" warning in method destroy. - #pragma warning (push) - #pragma warning (disable: 4100) -#endif - -//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 -/** The members are ordered the same way they are in section 20.4.1 - of the ISO C++ standard. - @ingroup memory_allocation */ -template -class scalable_allocator { -public: - typedef typename internal::allocator_type::value_type value_type; - typedef value_type* pointer; - typedef const value_type* const_pointer; - typedef value_type& reference; - typedef const value_type& const_reference; - typedef size_t size_type; - typedef ptrdiff_t difference_type; - template struct rebind { - typedef scalable_allocator other; - }; - - scalable_allocator() throw() {} - scalable_allocator( const scalable_allocator& ) throw() {} - template scalable_allocator(const scalable_allocator&) throw() {} - - pointer address(reference x) const {return &x;} - const_pointer address(const_reference x) const {return &x;} - - //! Allocate space for n objects. - pointer allocate( size_type n, const void* /*hint*/ =0 ) { - return static_cast( scalable_malloc( n * sizeof(value_type) ) ); - } - - //! Free previously allocated block of memory - void deallocate( pointer p, size_type ) { - scalable_free( p ); - } - - //! Largest value for which method allocate might succeed. - size_type max_size() const throw() { - size_type absolutemax = static_cast(-1) / sizeof (value_type); - return (absolutemax > 0 ? absolutemax : 1); - } -#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - template - void construct(U *p, Args&&... args) - { ::new((void *)p) U(std::forward(args)...); } -#else // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC -#if __TBB_CPP11_RVALUE_REF_PRESENT - void construct( pointer p, value_type&& value ) { ::new((void*)(p)) value_type( std::move( value ) ); } -#endif - void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);} -#endif // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - void destroy( pointer p ) {p->~value_type();} -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4100 is back - -//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 -/** @ingroup memory_allocation */ -template<> -class scalable_allocator { -public: - typedef void* pointer; - typedef const void* const_pointer; - typedef void value_type; - template struct rebind { - typedef scalable_allocator other; - }; -}; - -template -inline bool operator==( const scalable_allocator&, const scalable_allocator& ) {return true;} - -template -inline bool operator!=( const scalable_allocator&, const scalable_allocator& ) {return false;} - -} // namespace tbb - -#if _MSC_VER - #if (__TBB_BUILD || __TBBMALLOC_BUILD) && !defined(__TBBMALLOC_NO_IMPLICIT_LINKAGE) - #define __TBBMALLOC_NO_IMPLICIT_LINKAGE 1 - #endif - - #if !__TBBMALLOC_NO_IMPLICIT_LINKAGE - #ifdef _DEBUG - #pragma comment(lib, "tbbmalloc_debug.lib") - #else - #pragma comment(lib, "tbbmalloc.lib") - #endif - #endif - - -#endif - -#endif /* __cplusplus */ - -#if !defined(__cplusplus) && __ICC==1100 - #pragma warning (pop) -#endif // ICC 11.0 warning 991 is back - -#endif /* __TBB_scalable_allocator_H */ +#include "../oneapi/tbb/scalable_allocator.h" diff --git a/src/tbb/include/tbb/spin_mutex.h b/src/tbb/include/tbb/spin_mutex.h index a7ed31be4..1a6f7f077 100644 --- a/src/tbb/include/tbb/spin_mutex.h +++ b/src/tbb/include/tbb/spin_mutex.h @@ -1,212 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_spin_mutex_H -#define __TBB_spin_mutex_H - -#include -#include -#include "aligned_space.h" -#include "tbb_stddef.h" -#include "tbb_machine.h" -#include "tbb_profiling.h" -#include "internal/_mutex_padding.h" - -namespace tbb { - -//! A lock that occupies a single byte. -/** A spin_mutex is a spin mutex that fits in a single byte. - It should be used only for locking short critical sections - (typically less than 20 instructions) when fairness is not an issue. - If zero-initialized, the mutex is considered unheld. - @ingroup synchronization */ -class spin_mutex : internal::mutex_copy_deprecated_and_disabled { - //! 0 if lock is released, 1 if lock is acquired. - __TBB_atomic_flag flag; - -public: - //! Construct unacquired lock. - /** Equivalent to zero-initialization of *this. */ - spin_mutex() : flag(0) { -#if TBB_USE_THREADING_TOOLS - internal_construct(); -#endif - } - - //! Represents acquisition of a mutex. - class scoped_lock : internal::no_copy { - private: - //! Points to currently held mutex, or NULL if no lock is held. - spin_mutex* my_mutex; - - //! Value to store into spin_mutex::flag to unlock the mutex. - /** This variable is no longer used. Instead, 0 and 1 are used to - represent that the lock is free and acquired, respectively. - We keep the member variable here to ensure backward compatibility */ - __TBB_Flag my_unlock_value; - - //! Like acquire, but with ITT instrumentation. - void __TBB_EXPORTED_METHOD internal_acquire( spin_mutex& m ); - - //! Like try_acquire, but with ITT instrumentation. - bool __TBB_EXPORTED_METHOD internal_try_acquire( spin_mutex& m ); - - //! Like release, but with ITT instrumentation. - void __TBB_EXPORTED_METHOD internal_release(); - - friend class spin_mutex; - - public: - //! Construct without acquiring a mutex. - scoped_lock() : my_mutex(NULL), my_unlock_value(0) {} - - //! Construct and acquire lock on a mutex. - scoped_lock( spin_mutex& m ) : my_unlock_value(0) { - internal::suppress_unused_warning(my_unlock_value); -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - my_mutex=NULL; - internal_acquire(m); -#else - my_mutex=&m; - __TBB_LockByte(m.flag); -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/ - } - - //! Acquire lock. - void acquire( spin_mutex& m ) { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - internal_acquire(m); -#else - my_mutex = &m; - __TBB_LockByte(m.flag); -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/ - } - - //! Try acquiring lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_acquire( spin_mutex& m ) { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - return internal_try_acquire(m); -#else - bool result = __TBB_TryLockByte(m.flag); - if( result ) - my_mutex = &m; - return result; -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/ - } + http://www.apache.org/licenses/LICENSE-2.0 - //! Release lock - void release() { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - internal_release(); -#else - __TBB_UnlockByte(my_mutex->flag); - my_mutex = NULL; -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - - //! Destroy lock. If holding a lock, releases the lock first. - ~scoped_lock() { - if( my_mutex ) { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - internal_release(); -#else - __TBB_UnlockByte(my_mutex->flag); -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - } - }; - - //! Internal constructor with ITT instrumentation. - void __TBB_EXPORTED_METHOD internal_construct(); - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = false; - - // ISO C++0x compatibility methods - - //! Acquire lock - void lock() { -#if TBB_USE_THREADING_TOOLS - aligned_space tmp; - new(tmp.begin()) scoped_lock(*this); -#else - __TBB_LockByte(flag); -#endif /* TBB_USE_THREADING_TOOLS*/ - } - - //! Try acquiring lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_lock() { -#if TBB_USE_THREADING_TOOLS - aligned_space tmp; - return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this); -#else - return __TBB_TryLockByte(flag); -#endif /* TBB_USE_THREADING_TOOLS*/ - } - - //! Release lock - void unlock() { -#if TBB_USE_THREADING_TOOLS - aligned_space tmp; - scoped_lock& s = *tmp.begin(); - s.my_mutex = this; - s.internal_release(); -#else - __TBB_store_with_release(flag, 0); -#endif /* TBB_USE_THREADING_TOOLS */ - } - - friend class scoped_lock; -}; // end of spin_mutex - -__TBB_DEFINE_PROFILING_SET_NAME(spin_mutex) - -} // namespace tbb - -#if ( __TBB_x86_32 || __TBB_x86_64 ) -#include "internal/_x86_eliding_mutex_impl.h" -#endif - -namespace tbb { -//! A cross-platform spin mutex with speculative lock acquisition. -/** On platforms with proper HW support, this lock may speculatively execute - its critical sections, using HW mechanisms to detect real data races and - ensure atomicity of the critical sections. In particular, it uses - Intel(R) Transactional Synchronization Extensions (Intel(R) TSX). - Without such HW support, it behaves like a spin_mutex. - It should be used for locking short critical sections where the lock is - contended but the data it protects are not. If zero-initialized, the - mutex is considered unheld. - @ingroup synchronization */ - -#if ( __TBB_x86_32 || __TBB_x86_64 ) -typedef interface7::internal::padded_mutex speculative_spin_mutex; -#else -typedef interface7::internal::padded_mutex speculative_spin_mutex; -#endif -__TBB_DEFINE_PROFILING_SET_NAME(speculative_spin_mutex) - -} // namespace tbb + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_spin_mutex_H */ +#include "../oneapi/tbb/spin_mutex.h" diff --git a/src/tbb/include/tbb/spin_rw_mutex.h b/src/tbb/include/tbb/spin_rw_mutex.h index 61151c179..d36282b48 100644 --- a/src/tbb/include/tbb/spin_rw_mutex.h +++ b/src/tbb/include/tbb/spin_rw_mutex.h @@ -1,259 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_spin_rw_mutex_H -#define __TBB_spin_rw_mutex_H - -#include "tbb_stddef.h" -#include "tbb_machine.h" -#include "tbb_profiling.h" -#include "internal/_mutex_padding.h" - -namespace tbb { - -#if __TBB_TSX_AVAILABLE -namespace interface8 { namespace internal { - class x86_rtm_rw_mutex; -}} -#endif - -class spin_rw_mutex_v3; -typedef spin_rw_mutex_v3 spin_rw_mutex; - -//! Fast, unfair, spinning reader-writer lock with backoff and writer-preference -/** @ingroup synchronization */ -class spin_rw_mutex_v3 : internal::mutex_copy_deprecated_and_disabled { - //! @cond INTERNAL - - //! Internal acquire write lock. - bool __TBB_EXPORTED_METHOD internal_acquire_writer(); - - //! Out of line code for releasing a write lock. - /** This code has debug checking and instrumentation for Intel(R) Thread Checker and Intel(R) Thread Profiler. */ - void __TBB_EXPORTED_METHOD internal_release_writer(); - - //! Internal acquire read lock. - void __TBB_EXPORTED_METHOD internal_acquire_reader(); - - //! Internal upgrade reader to become a writer. - bool __TBB_EXPORTED_METHOD internal_upgrade(); - - //! Out of line code for downgrading a writer to a reader. - /** This code has debug checking and instrumentation for Intel(R) Thread Checker and Intel(R) Thread Profiler. */ - void __TBB_EXPORTED_METHOD internal_downgrade(); - - //! Internal release read lock. - void __TBB_EXPORTED_METHOD internal_release_reader(); - - //! Internal try_acquire write lock. - bool __TBB_EXPORTED_METHOD internal_try_acquire_writer(); - - //! Internal try_acquire read lock. - bool __TBB_EXPORTED_METHOD internal_try_acquire_reader(); - - //! @endcond -public: - //! Construct unacquired mutex. - spin_rw_mutex_v3() : state(0) { -#if TBB_USE_THREADING_TOOLS - internal_construct(); -#endif - } - -#if TBB_USE_ASSERT - //! Destructor asserts if the mutex is acquired, i.e. state is zero. - ~spin_rw_mutex_v3() { - __TBB_ASSERT( !state, "destruction of an acquired mutex"); - }; -#endif /* TBB_USE_ASSERT */ - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock : internal::no_copy { -#if __TBB_TSX_AVAILABLE - friend class tbb::interface8::internal::x86_rtm_rw_mutex; - // helper methods for x86_rtm_rw_mutex - spin_rw_mutex *internal_get_mutex() const { return mutex; } - void internal_set_mutex(spin_rw_mutex* m) { mutex = m; } -#endif - public: - //! Construct lock that has not acquired a mutex. - /** Equivalent to zero-initialization of *this. */ - scoped_lock() : mutex(NULL), is_writer(false) {} - - //! Acquire lock on given mutex. - scoped_lock( spin_rw_mutex& m, bool write = true ) : mutex(NULL) { - acquire(m, write); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( mutex ) release(); - } - - //! Acquire lock on given mutex. - void acquire( spin_rw_mutex& m, bool write = true ) { - __TBB_ASSERT( !mutex, "holding mutex already" ); - is_writer = write; - mutex = &m; - if( write ) mutex->internal_acquire_writer(); - else mutex->internal_acquire_reader(); - } + http://www.apache.org/licenses/LICENSE-2.0 - //! Upgrade reader to become a writer. - /** Returns whether the upgrade happened without releasing and re-acquiring the lock */ - bool upgrade_to_writer() { - __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( !is_writer, "not a reader" ); - is_writer = true; - return mutex->internal_upgrade(); - } - - //! Release lock. - void release() { - __TBB_ASSERT( mutex, "lock is not acquired" ); - spin_rw_mutex *m = mutex; - mutex = NULL; -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - if( is_writer ) m->internal_release_writer(); - else m->internal_release_reader(); -#else - if( is_writer ) __TBB_AtomicAND( &m->state, READERS ); - else __TBB_FetchAndAddWrelease( &m->state, -(intptr_t)ONE_READER); -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - - //! Downgrade writer to become a reader. - bool downgrade_to_reader() { - __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( is_writer, "not a writer" ); -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - mutex->internal_downgrade(); -#else - __TBB_FetchAndAddW( &mutex->state, ((intptr_t)ONE_READER-WRITER)); -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - is_writer = false; - return true; - } - - //! Try acquire lock on given mutex. - bool try_acquire( spin_rw_mutex& m, bool write = true ) { - __TBB_ASSERT( !mutex, "holding mutex already" ); - bool result; - is_writer = write; - result = write? m.internal_try_acquire_writer() - : m.internal_try_acquire_reader(); - if( result ) - mutex = &m; - return result; - } - - protected: - - //! The pointer to the current mutex that is held, or NULL if no mutex is held. - spin_rw_mutex* mutex; - - //! If mutex!=NULL, then is_writer is true if holding a writer lock, false if holding a reader lock. - /** Not defined if not holding a lock. */ - bool is_writer; - }; - - // Mutex traits - static const bool is_rw_mutex = true; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = false; - - // ISO C++0x compatibility methods - - //! Acquire writer lock - void lock() {internal_acquire_writer();} - - //! Try acquiring writer lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_lock() {return internal_try_acquire_writer();} - - //! Release lock - void unlock() { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - if( state&WRITER ) internal_release_writer(); - else internal_release_reader(); -#else - if( state&WRITER ) __TBB_AtomicAND( &state, READERS ); - else __TBB_FetchAndAddWrelease( &state, -(intptr_t)ONE_READER); -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - - // Methods for reader locks that resemble ISO C++0x compatibility methods. - - //! Acquire reader lock - void lock_read() {internal_acquire_reader();} - - //! Try acquiring reader lock (non-blocking) - /** Return true if reader lock acquired; false otherwise. */ - bool try_lock_read() {return internal_try_acquire_reader();} - -protected: - typedef intptr_t state_t; - static const state_t WRITER = 1; - static const state_t WRITER_PENDING = 2; - static const state_t READERS = ~(WRITER | WRITER_PENDING); - static const state_t ONE_READER = 4; - static const state_t BUSY = WRITER | READERS; - //! State of lock - /** Bit 0 = writer is holding lock - Bit 1 = request by a writer to acquire lock (hint to readers to wait) - Bit 2..N = number of readers holding lock */ - state_t state; - -private: - void __TBB_EXPORTED_METHOD internal_construct(); -}; - -__TBB_DEFINE_PROFILING_SET_NAME(spin_rw_mutex) - -} // namespace tbb - -#if __TBB_TSX_AVAILABLE -#include "internal/_x86_rtm_rw_mutex_impl.h" -#endif - -namespace tbb { -namespace interface8 { -//! A cross-platform spin reader/writer mutex with speculative lock acquisition. -/** On platforms with proper HW support, this lock may speculatively execute - its critical sections, using HW mechanisms to detect real data races and - ensure atomicity of the critical sections. In particular, it uses - Intel(R) Transactional Synchronization Extensions (Intel(R) TSX). - Without such HW support, it behaves like a spin_rw_mutex. - It should be used for locking short critical sections where the lock is - contended but the data it protects are not. - @ingroup synchronization */ -#if __TBB_TSX_AVAILABLE -typedef interface7::internal::padded_mutex speculative_spin_rw_mutex; -#else -typedef interface7::internal::padded_mutex speculative_spin_rw_mutex; -#endif -} // namespace interface8 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -using interface8::speculative_spin_rw_mutex; -__TBB_DEFINE_PROFILING_SET_NAME(speculative_spin_rw_mutex) -} // namespace tbb -#endif /* __TBB_spin_rw_mutex_H */ +#include "../oneapi/tbb/spin_rw_mutex.h" diff --git a/src/tbb/include/tbb/task.h b/src/tbb/include/tbb/task.h index 5dabcd97a..9be95b0d6 100644 --- a/src/tbb/include/tbb/task.h +++ b/src/tbb/include/tbb/task.h @@ -1,1007 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_task_H -#define __TBB_task_H - -#include "tbb_stddef.h" -#include "tbb_machine.h" -#include - -typedef struct ___itt_caller *__itt_caller; - -namespace tbb { - -class task; -class task_list; -class task_group_context; - -// MSVC does not allow taking the address of a member that was defined -// privately in task_base and made public in class task via a using declaration. -#if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3) -#define __TBB_TASK_BASE_ACCESS public -#else -#define __TBB_TASK_BASE_ACCESS private -#endif - -namespace internal { //< @cond INTERNAL - - class allocate_additional_child_of_proxy: no_assign { - //! No longer used, but retained for binary layout compatibility. Always NULL. - task* self; - task& parent; - public: - explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {} - task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; - void __TBB_EXPORTED_METHOD free( task& ) const; - }; - - struct cpu_ctl_env_space { int space[sizeof(internal::uint64_t)/sizeof(int)]; }; -} //< namespace internal @endcond - -namespace interface5 { - namespace internal { - //! Base class for methods that became static in TBB 3.0. - /** TBB's evolution caused the "this" argument for several methods to become obsolete. - However, for backwards binary compatibility, the new methods need distinct names, - otherwise the One Definition Rule would be broken. Hence the new methods are - defined in this private base class, and then exposed in class task via - using declarations. */ - class task_base: tbb::internal::no_copy { - __TBB_TASK_BASE_ACCESS: - friend class tbb::task; - - //! Schedule task for execution when a worker becomes available. - static void spawn( task& t ); - - //! Spawn multiple tasks and clear list. - static void spawn( task_list& list ); - - //! Like allocate_child, except that task's parent becomes "t", not this. - /** Typically used in conjunction with schedule_to_reexecute to implement while loops. - Atomically increments the reference count of t.parent() */ - static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) { - return tbb::internal::allocate_additional_child_of_proxy(t); - } - - //! Destroy a task. - /** Usually, calling this method is unnecessary, because a task is - implicitly deleted after its execute() method runs. However, - sometimes a task needs to be explicitly deallocated, such as - when a root task is used as the parent in spawn_and_wait_for_all. */ - static void __TBB_EXPORTED_FUNC destroy( task& victim ); - }; - } // internal -} // interface5 - -//! @cond INTERNAL -namespace internal { - - class scheduler: no_copy { - public: - //! For internal use only - virtual void spawn( task& first, task*& next ) = 0; - - //! For internal use only - virtual void wait_for_all( task& parent, task* child ) = 0; - - //! For internal use only - virtual void spawn_root_and_wait( task& first, task*& next ) = 0; - - //! Pure virtual destructor; - // Have to have it just to shut up overzealous compilation warnings - virtual ~scheduler() = 0; - - //! For internal use only - virtual void enqueue( task& t, void* reserved ) = 0; - }; - - //! A reference count - /** Should always be non-negative. A signed type is used so that underflow can be detected. */ - typedef intptr_t reference_count; - - //! An id as used for specifying affinity. - typedef unsigned short affinity_id; - -#if __TBB_TASK_GROUP_CONTEXT - class generic_scheduler; - - struct context_list_node_t { - context_list_node_t *my_prev, - *my_next; - }; - - class allocate_root_with_context_proxy: no_assign { - task_group_context& my_context; - public: - allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {} - task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; - void __TBB_EXPORTED_METHOD free( task& ) const; - }; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - class allocate_root_proxy: no_assign { - public: - static task& __TBB_EXPORTED_FUNC allocate( size_t size ); - static void __TBB_EXPORTED_FUNC free( task& ); - }; - - class allocate_continuation_proxy: no_assign { - public: - task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; - void __TBB_EXPORTED_METHOD free( task& ) const; - }; - - class allocate_child_proxy: no_assign { - public: - task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; - void __TBB_EXPORTED_METHOD free( task& ) const; - }; - - //! Memory prefix to a task object. - /** This class is internal to the library. - Do not reference it directly, except within the library itself. - Fields are ordered in way that preserves backwards compatibility and yields - good packing on typical 32-bit and 64-bit platforms. - - In case task prefix size exceeds 32 or 64 bytes on IA32 and Intel64 - architectures correspondingly, consider dynamic setting of task_alignment - and task_prefix_reservation_size based on the maximal operand size supported - by the current CPU. - - @ingroup task_scheduling */ - class task_prefix { - private: - friend class tbb::task; - friend class tbb::interface5::internal::task_base; - friend class tbb::task_list; - friend class internal::scheduler; - friend class internal::allocate_root_proxy; - friend class internal::allocate_child_proxy; - friend class internal::allocate_continuation_proxy; - friend class internal::allocate_additional_child_of_proxy; - -#if __TBB_TASK_GROUP_CONTEXT - //! Shared context that is used to communicate asynchronous state changes - /** Currently it is used to broadcast cancellation requests generated both - by users and as the result of unhandled exceptions in the task::execute() - methods. */ - task_group_context *context; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - //! The scheduler that allocated the task, or NULL if the task is big. - /** Small tasks are pooled by the scheduler that allocated the task. - If a scheduler needs to free a small task allocated by another scheduler, - it returns the task to that other scheduler. This policy avoids - memory space blowup issues for memory allocators that allocate from - thread-specific pools. */ - scheduler* origin; - -#if __TBB_TASK_PRIORITY - union { -#endif /* __TBB_TASK_PRIORITY */ - //! Obsolete. The scheduler that owns the task. - /** Retained only for the sake of backward binary compatibility. - Still used by inline methods in the task.h header. **/ - scheduler* owner; - -#if __TBB_TASK_PRIORITY - //! Pointer to the next offloaded lower priority task. - /** Used to maintain a list of offloaded tasks inside the scheduler. **/ - task* next_offloaded; - }; -#endif /* __TBB_TASK_PRIORITY */ - - //! The task whose reference count includes me. - /** In the "blocking style" of programming, this field points to the parent task. - In the "continuation-passing style" of programming, this field points to the - continuation of the parent. */ - tbb::task* parent; - - //! Reference count used for synchronization. - /** In the "continuation-passing style" of programming, this field is - the difference of the number of allocated children minus the - number of children that have completed. - In the "blocking style" of programming, this field is one more than the difference. */ - __TBB_atomic reference_count ref_count; - - //! Obsolete. Used to be scheduling depth before TBB 2.2 - /** Retained only for the sake of backward binary compatibility. - Not used by TBB anymore. **/ - int depth; - - //! A task::state_type, stored as a byte for compactness. - /** This state is exposed to users via method task::state(). */ - unsigned char state; - - //! Miscellaneous state that is not directly visible to users, stored as a byte for compactness. - /** 0x0 -> version 1.0 task - 0x1 -> version >=2.1 task - 0x10 -> task was enqueued - 0x20 -> task_proxy - 0x40 -> task has live ref_count - 0x80 -> a stolen task */ - unsigned char extra_state; - - affinity_id affinity; - - //! "next" field for list of task - tbb::task* next; - - //! The task corresponding to this task_prefix. - tbb::task& task() {return *reinterpret_cast(this+1);} - }; - -} // namespace internal -//! @endcond - -#if __TBB_TASK_GROUP_CONTEXT - -#if __TBB_TASK_PRIORITY -namespace internal { - static const int priority_stride_v4 = INT_MAX / 4; -} - -enum priority_t { - priority_normal = internal::priority_stride_v4 * 2, - priority_low = priority_normal - internal::priority_stride_v4, - priority_high = priority_normal + internal::priority_stride_v4 -}; - -#endif /* __TBB_TASK_PRIORITY */ - -#if TBB_USE_CAPTURED_EXCEPTION - class tbb_exception; -#else - namespace internal { - class tbb_exception_ptr; - } -#endif /* !TBB_USE_CAPTURED_EXCEPTION */ - -class task_scheduler_init; -namespace interface7 { class task_arena; } - -//! Used to form groups of tasks -/** @ingroup task_scheduling - The context services explicit cancellation requests from user code, and unhandled - exceptions intercepted during tasks execution. Intercepting an exception results - in generating internal cancellation requests (which is processed in exactly the - same way as external ones). - - The context is associated with one or more root tasks and defines the cancellation - group that includes all the descendants of the corresponding root task(s). Association - is established when a context object is passed as an argument to the task::allocate_root() - method. See task_group_context::task_group_context for more details. - - The context can be bound to another one, and other contexts can be bound to it, - forming a tree-like structure: parent -> this -> children. Arrows here designate - cancellation propagation direction. If a task in a cancellation group is cancelled - all the other tasks in this group and groups bound to it (as children) get cancelled too. - - IMPLEMENTATION NOTE: - When adding new members to task_group_context or changing types of existing ones, - update the size of both padding buffers (_leading_padding and _trailing_padding) - appropriately. See also VERSIONING NOTE at the constructor definition below. **/ -class task_group_context : internal::no_copy { -private: - friend class internal::generic_scheduler; - friend class task_scheduler_init; - friend class interface7::task_arena; - -#if TBB_USE_CAPTURED_EXCEPTION - typedef tbb_exception exception_container_type; -#else - typedef internal::tbb_exception_ptr exception_container_type; -#endif - - enum version_traits_word_layout { - traits_offset = 16, - version_mask = 0xFFFF, - traits_mask = 0xFFFFul << traits_offset - }; - -public: - enum kind_type { - isolated, - bound - }; - - enum traits_type { - exact_exception = 0x0001ul << traits_offset, -#if __TBB_FP_CONTEXT - fp_settings = 0x0002ul << traits_offset, -#endif - concurrent_wait = 0x0004ul << traits_offset, -#if TBB_USE_CAPTURED_EXCEPTION - default_traits = 0 -#else - default_traits = exact_exception -#endif /* !TBB_USE_CAPTURED_EXCEPTION */ - }; - -private: - enum state { - may_have_children = 1, - // the following enumerations must be the last, new 2^x values must go above - next_state_value, low_unused_state_bit = (next_state_value-1)*2 - }; - - union { - //! Flavor of this context: bound or isolated. - // TODO: describe asynchronous use, and whether any memory semantics are needed - __TBB_atomic kind_type my_kind; - uintptr_t _my_kind_aligner; - }; - - //! Pointer to the context of the parent cancellation group. NULL for isolated contexts. - task_group_context *my_parent; - - //! Used to form the thread specific list of contexts without additional memory allocation. - /** A context is included into the list of the current thread when its binding to - its parent happens. Any context can be present in the list of one thread only. **/ - internal::context_list_node_t my_node; - - //! Used to set and maintain stack stitching point for Intel Performance Tools. - __itt_caller itt_caller; - - //! Leading padding protecting accesses to frequently used members from false sharing. - /** Read accesses to the field my_cancellation_requested are on the hot path inside - the scheduler. This padding ensures that this field never shares the same cache - line with a local variable that is frequently written to. **/ - char _leading_padding[internal::NFS_MaxLineSize - - 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t) - - sizeof(__itt_caller) -#if __TBB_FP_CONTEXT - - sizeof(internal::cpu_ctl_env_space) -#endif - ]; - -#if __TBB_FP_CONTEXT - //! Space for platform-specific FPU settings. - /** Must only be accessed inside TBB binaries, and never directly in user - code or inline methods. */ - internal::cpu_ctl_env_space my_cpu_ctl_env; -#endif - - //! Specifies whether cancellation was requested for this task group. - uintptr_t my_cancellation_requested; - - //! Version for run-time checks and behavioral traits of the context. - /** Version occupies low 16 bits, and traits (zero or more ORed enumerators - from the traits_type enumerations) take the next 16 bits. - Original (zeroth) version of the context did not support any traits. **/ - uintptr_t my_version_and_traits; - - //! Pointer to the container storing exception being propagated across this task group. - exception_container_type *my_exception; - - //! Scheduler instance that registered this context in its thread specific list. - internal::generic_scheduler *my_owner; - - //! Internal state (combination of state flags, currently only may_have_children). - uintptr_t my_state; - -#if __TBB_TASK_PRIORITY - //! Priority level of the task group (in normalized representation) - intptr_t my_priority; -#endif /* __TBB_TASK_PRIORITY */ - - //! Trailing padding protecting accesses to frequently used members from false sharing - /** \sa _leading_padding **/ - char _trailing_padding[internal::NFS_MaxLineSize - 2 * sizeof(uintptr_t) - 2 * sizeof(void*) -#if __TBB_TASK_PRIORITY - - sizeof(intptr_t) -#endif /* __TBB_TASK_PRIORITY */ - ]; - -public: - //! Default & binding constructor. - /** By default a bound context is created. That is this context will be bound - (as child) to the context of the task calling task::allocate_root(this_context) - method. Cancellation requests passed to the parent context are propagated - to all the contexts bound to it. Similarly priority change is propagated - from the parent context to its children. - - If task_group_context::isolated is used as the argument, then the tasks associated - with this context will never be affected by events in any other context. - - Creating isolated contexts involve much less overhead, but they have limited - utility. Normally when an exception occurs in an algorithm that has nested - ones running, it is desirably to have all the nested algorithms cancelled - as well. Such a behavior requires nested algorithms to use bound contexts. - - There is one good place where using isolated algorithms is beneficial. It is - a master thread. That is if a particular algorithm is invoked directly from - the master thread (not from a TBB task), supplying it with explicitly - created isolated context will result in a faster algorithm startup. - - VERSIONING NOTE: - Implementation(s) of task_group_context constructor(s) cannot be made - entirely out-of-line because the run-time version must be set by the user - code. This will become critically important for binary compatibility, if - we ever have to change the size of the context object. - - Boosting the runtime version will also be necessary if new data fields are - introduced in the currently unused padding areas and these fields are updated - by inline methods. **/ - task_group_context ( kind_type relation_with_parent = bound, - uintptr_t traits = default_traits ) - : my_kind(relation_with_parent) - , my_version_and_traits(2 | traits) - { - init(); - } - - // Do not introduce standalone unbind method since it will break state propagation assumptions - __TBB_EXPORTED_METHOD ~task_group_context (); - - //! Forcefully reinitializes the context after the task tree it was associated with is completed. - /** Because the method assumes that all the tasks that used to be associated with - this context have already finished, calling it while the context is still - in use somewhere in the task hierarchy leads to undefined behavior. - - IMPORTANT: This method is not thread safe! - - The method does not change the context's parent if it is set. **/ - void __TBB_EXPORTED_METHOD reset (); - - //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups. - /** \return false if cancellation has already been requested, true otherwise. - - Note that canceling never fails. When false is returned, it just means that - another thread (or this one) has already sent cancellation request to this - context or to one of its ancestors (if this context is bound). It is guaranteed - that when this method is concurrently called on the same not yet cancelled - context, true will be returned by one and only one invocation. **/ - bool __TBB_EXPORTED_METHOD cancel_group_execution (); - - //! Returns true if the context received cancellation request. - bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const; - - //! Records the pending exception, and cancels the task group. - /** May be called only from inside a catch-block. If the context is already - cancelled, does nothing. - The method brings the task group associated with this context exactly into - the state it would be in, if one of its tasks threw the currently pending - exception during its execution. In other words, it emulates the actions - of the scheduler's dispatch loop exception handler. **/ - void __TBB_EXPORTED_METHOD register_pending_exception (); - -#if __TBB_FP_CONTEXT - //! Captures the current FPU control settings to the context. - /** Because the method assumes that all the tasks that used to be associated with - this context have already finished, calling it while the context is still - in use somewhere in the task hierarchy leads to undefined behavior. - - IMPORTANT: This method is not thread safe! - - The method does not change the FPU control settings of the context's parent. **/ - void __TBB_EXPORTED_METHOD capture_fp_settings (); -#endif - -#if __TBB_TASK_PRIORITY - //! Changes priority of the task group - void set_priority ( priority_t ); + http://www.apache.org/licenses/LICENSE-2.0 - //! Retrieves current priority of the current task group - priority_t priority () const; -#endif /* __TBB_TASK_PRIORITY */ - -protected: - //! Out-of-line part of the constructor. - /** Singled out to ensure backward binary compatibility of the future versions. **/ - void __TBB_EXPORTED_METHOD init (); - -private: - friend class task; - friend class internal::allocate_root_with_context_proxy; - - static const kind_type binding_required = bound; - static const kind_type binding_completed = kind_type(bound+1); - static const kind_type detached = kind_type(binding_completed+1); - static const kind_type dying = kind_type(detached+1); - - //! Propagates any state change detected to *this, and as an optimisation possibly also upward along the heritage line. - template - void propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ); - - //! Registers this context with the local scheduler and binds it to its parent context - void bind_to ( internal::generic_scheduler *local_sched ); - - //! Registers this context with the local scheduler - void register_with ( internal::generic_scheduler *local_sched ); - -#if __TBB_FP_CONTEXT - //! Copies FPU control setting from another context - // TODO: Consider adding #else stub in order to omit #if sections in other code - void copy_fp_settings( const task_group_context &src ); -#endif /* __TBB_FP_CONTEXT */ -}; // class task_group_context - -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -//! Base class for user-defined tasks. -/** @ingroup task_scheduling */ -class task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base { - - //! Set reference count - void __TBB_EXPORTED_METHOD internal_set_ref_count( int count ); - - //! Decrement reference count and return its new value. - internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count(); - -protected: - //! Default constructor. - task() {prefix().extra_state=1;} - -public: - //! Destructor. - virtual ~task() {} - - //! Should be overridden by derived classes. - virtual task* execute() = 0; - - //! Enumeration of task states that the scheduler considers. - enum state_type { - //! task is running, and will be destroyed after method execute() completes. - executing, - //! task to be rescheduled. - reexecute, - //! task is in ready pool, or is going to be put there, or was just taken off. - ready, - //! task object is freshly allocated or recycled. - allocated, - //! task object is on free list, or is going to be put there, or was just taken off. - freed, - //! task to be recycled as continuation - recycle -#if __TBB_RECYCLE_TO_ENQUEUE - //! task to be scheduled for starvation-resistant execution - ,to_enqueue -#endif - }; - - //------------------------------------------------------------------------ - // Allocating tasks - //------------------------------------------------------------------------ - - //! Returns proxy for overloaded new that allocates a root task. - static internal::allocate_root_proxy allocate_root() { - return internal::allocate_root_proxy(); - } - -#if __TBB_TASK_GROUP_CONTEXT - //! Returns proxy for overloaded new that allocates a root task associated with user supplied context. - static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) { - return internal::allocate_root_with_context_proxy(ctx); - } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - //! Returns proxy for overloaded new that allocates a continuation task of *this. - /** The continuation's parent becomes the parent of *this. */ - internal::allocate_continuation_proxy& allocate_continuation() { - return *reinterpret_cast(this); - } - - //! Returns proxy for overloaded new that allocates a child task of *this. - internal::allocate_child_proxy& allocate_child() { - return *reinterpret_cast(this); - } - - //! Define recommended static form via import from base class. - using task_base::allocate_additional_child_of; - -#if __TBB_DEPRECATED_TASK_INTERFACE - //! Destroy a task. - /** Usually, calling this method is unnecessary, because a task is - implicitly deleted after its execute() method runs. However, - sometimes a task needs to be explicitly deallocated, such as - when a root task is used as the parent in spawn_and_wait_for_all. */ - void __TBB_EXPORTED_METHOD destroy( task& t ); -#else /* !__TBB_DEPRECATED_TASK_INTERFACE */ - //! Define recommended static form via import from base class. - using task_base::destroy; -#endif /* !__TBB_DEPRECATED_TASK_INTERFACE */ - - //------------------------------------------------------------------------ - // Recycling of tasks - //------------------------------------------------------------------------ - - //! Change this to be a continuation of its former self. - /** The caller must guarantee that the task's refcount does not become zero until - after the method execute() returns. Typically, this is done by having - method execute() return a pointer to a child of the task. If the guarantee - cannot be made, use method recycle_as_safe_continuation instead. - - Because of the hazard, this method may be deprecated in the future. */ - void recycle_as_continuation() { - __TBB_ASSERT( prefix().state==executing, "execute not running?" ); - prefix().state = allocated; - } - - //! Recommended to use, safe variant of recycle_as_continuation - /** For safety, it requires additional increment of ref_count. - With no descendants and ref_count of 1, it has the semantics of recycle_to_reexecute. */ - void recycle_as_safe_continuation() { - __TBB_ASSERT( prefix().state==executing, "execute not running?" ); - prefix().state = recycle; - } - - //! Change this to be a child of new_parent. - void recycle_as_child_of( task& new_parent ) { - internal::task_prefix& p = prefix(); - __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" ); - __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" ); - __TBB_ASSERT( p.parent==NULL, "parent must be null" ); - __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" ); - __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" ); - p.state = allocated; - p.parent = &new_parent; -#if __TBB_TASK_GROUP_CONTEXT - p.context = new_parent.prefix().context; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - } - - //! Schedule this for reexecution after current execute() returns. - /** Made obsolete by recycle_as_safe_continuation; may become deprecated. */ - void recycle_to_reexecute() { - __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" ); - __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" ); - prefix().state = reexecute; - } - -#if __TBB_RECYCLE_TO_ENQUEUE - //! Schedule this to enqueue after descendant tasks complete. - /** Save enqueue/spawn difference, it has the semantics of recycle_as_safe_continuation. */ - void recycle_to_enqueue() { - __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" ); - prefix().state = to_enqueue; - } -#endif /* __TBB_RECYCLE_TO_ENQUEUE */ - - //------------------------------------------------------------------------ - // Spawning and blocking - //------------------------------------------------------------------------ - - //! Set reference count - void set_ref_count( int count ) { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - internal_set_ref_count(count); -#else - prefix().ref_count = count; -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - - //! Atomically increment reference count and returns its old value. - /** Has acquire semantics */ - void increment_ref_count() { - __TBB_FetchAndIncrementWacquire( &prefix().ref_count ); - } - - //! Atomically decrement reference count and returns its new value. - /** Has release semantics. */ - int decrement_ref_count() { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - return int(internal_decrement_ref_count()); -#else - return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1; -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - - //! Define recommended static forms via import from base class. - using task_base::spawn; - - //! Similar to spawn followed by wait_for_all, but more efficient. - void spawn_and_wait_for_all( task& child ) { - prefix().owner->wait_for_all( *this, &child ); - } - - //! Similar to spawn followed by wait_for_all, but more efficient. - void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list ); - - //! Spawn task allocated by allocate_root, wait for it to complete, and deallocate it. - static void spawn_root_and_wait( task& root ) { - root.prefix().owner->spawn_root_and_wait( root, root.prefix().next ); - } - - //! Spawn root tasks on list and wait for all of them to finish. - /** If there are more tasks than worker threads, the tasks are spawned in - order of front to back. */ - static void spawn_root_and_wait( task_list& root_list ); - - //! Wait for reference count to become one, and set reference count to zero. - /** Works on tasks while waiting. */ - void wait_for_all() { - prefix().owner->wait_for_all( *this, NULL ); - } - - //! Enqueue task for starvation-resistant execution. -#if __TBB_TASK_PRIORITY - /** The task will be enqueued on the normal priority level disregarding the - priority of its task group. - - The rationale of such semantics is that priority of an enqueued task is - statically fixed at the moment of its enqueuing, while task group priority - is dynamic. Thus automatic priority inheritance would be generally a subject - to the race, which may result in unexpected behavior. - - Use enqueue() overload with explicit priority value and task::group_priority() - method to implement such priority inheritance when it is really necessary. **/ -#endif /* __TBB_TASK_PRIORITY */ - static void enqueue( task& t ) { - t.prefix().owner->enqueue( t, NULL ); - } - -#if __TBB_TASK_PRIORITY - //! Enqueue task for starvation-resistant execution on the specified priority level. - static void enqueue( task& t, priority_t p ) { - __TBB_ASSERT( p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value" ); - t.prefix().owner->enqueue( t, (void*)p ); - } -#endif /* __TBB_TASK_PRIORITY */ - - //! The innermost task being executed or destroyed by the current thread at the moment. - static task& __TBB_EXPORTED_FUNC self(); - - //! task on whose behalf this task is working, or NULL if this is a root. - task* parent() const {return prefix().parent;} - - //! sets parent task pointer to specified value - void set_parent(task* p) { -#if __TBB_TASK_GROUP_CONTEXT - __TBB_ASSERT(prefix().context == p->prefix().context, "The tasks must be in the same context"); -#endif - prefix().parent = p; - } - -#if __TBB_TASK_GROUP_CONTEXT - //! This method is deprecated and will be removed in the future. - /** Use method group() instead. **/ - task_group_context* context() {return prefix().context;} - - //! Pointer to the task group descriptor. - task_group_context* group () { return prefix().context; } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - //! True if task was stolen from the task pool of another thread. - bool is_stolen_task() const { - return (prefix().extra_state & 0x80)!=0; - } - - //------------------------------------------------------------------------ - // Debugging - //------------------------------------------------------------------------ - - //! Current execution state - state_type state() const {return state_type(prefix().state);} - - //! The internal reference count. - int ref_count() const { -#if TBB_USE_ASSERT - internal::reference_count ref_count_ = prefix().ref_count; - __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error"); -#endif - return int(prefix().ref_count); - } - - //! Obsolete, and only retained for the sake of backward compatibility. Always returns true. - bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const; - - //------------------------------------------------------------------------ - // Affinity - //------------------------------------------------------------------------ - - //! An id as used for specifying affinity. - /** Guaranteed to be integral type. Value of 0 means no affinity. */ - typedef internal::affinity_id affinity_id; - - //! Set affinity for this task. - void set_affinity( affinity_id id ) {prefix().affinity = id;} - - //! Current affinity of this task - affinity_id affinity() const {return prefix().affinity;} - - //! Invoked by scheduler to notify task that it ran on unexpected thread. - /** Invoked before method execute() runs, if task is stolen, or task has - affinity but will be executed on another thread. - - The default action does nothing. */ - virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id ); - -#if __TBB_TASK_GROUP_CONTEXT - //! Moves this task from its current group into another one. - /** Argument ctx specifies the new group. - - The primary purpose of this method is to associate unique task group context - with a task allocated for subsequent enqueuing. In contrast to spawned tasks - enqueued ones normally outlive the scope where they were created. This makes - traditional usage model where task group context are allocated locally on - the stack inapplicable. Dynamic allocation of context objects is performance - inefficient. Method change_group() allows to make task group context object - a member of the task class, and then associate it with its containing task - object in the latter's constructor. **/ - void __TBB_EXPORTED_METHOD change_group ( task_group_context& ctx ); - - //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups. - /** \return false if cancellation has already been requested, true otherwise. **/ - bool cancel_group_execution () { return prefix().context->cancel_group_execution(); } - - //! Returns true if the context has received cancellation request. - bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); } -#else - bool is_cancelled () const { return false; } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#if __TBB_TASK_PRIORITY - //! Changes priority of the task group this task belongs to. - void set_group_priority ( priority_t p ) { prefix().context->set_priority(p); } - - //! Retrieves current priority of the task group this task belongs to. - priority_t group_priority () const { return prefix().context->priority(); } - -#endif /* __TBB_TASK_PRIORITY */ - -private: - friend class interface5::internal::task_base; - friend class task_list; - friend class internal::scheduler; - friend class internal::allocate_root_proxy; -#if __TBB_TASK_GROUP_CONTEXT - friend class internal::allocate_root_with_context_proxy; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - friend class internal::allocate_continuation_proxy; - friend class internal::allocate_child_proxy; - friend class internal::allocate_additional_child_of_proxy; - - //! Get reference to corresponding task_prefix. - /** Version tag prevents loader on Linux from using the wrong symbol in debug builds. **/ - internal::task_prefix& prefix( internal::version_tag* = NULL ) const { - return reinterpret_cast(const_cast(this))[-1]; - } -}; // class task - -//! task that does nothing. Useful for synchronization. -/** @ingroup task_scheduling */ -class empty_task: public task { - /*override*/ task* execute() { - return NULL; - } -}; - -//! @cond INTERNAL -namespace internal { - template - class function_task : public task { - F my_func; - /*override*/ task* execute() { - my_func(); - return NULL; - } - public: - function_task( const F& f ) : my_func(f) {} - }; -} // namespace internal -//! @endcond - -//! A list of children. -/** Used for method task::spawn_children - @ingroup task_scheduling */ -class task_list: internal::no_copy { -private: - task* first; - task** next_ptr; - friend class task; - friend class interface5::internal::task_base; -public: - //! Construct empty list - task_list() : first(NULL), next_ptr(&first) {} - - //! Destroys the list, but does not destroy the task objects. - ~task_list() {} - - //! True if list if empty; false otherwise. - bool empty() const {return !first;} - - //! Push task onto back of list. - void push_back( task& task ) { - task.prefix().next = NULL; - *next_ptr = &task; - next_ptr = &task.prefix().next; - } - - //! Pop the front task from the list. - task& pop_front() { - __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" ); - task* result = first; - first = result->prefix().next; - if( !first ) next_ptr = &first; - return *result; - } - - //! Clear the list - void clear() { - first=NULL; - next_ptr=&first; - } -}; - -inline void interface5::internal::task_base::spawn( task& t ) { - t.prefix().owner->spawn( t, t.prefix().next ); -} - -inline void interface5::internal::task_base::spawn( task_list& list ) { - if( task* t = list.first ) { - t->prefix().owner->spawn( *t, *list.next_ptr ); - list.clear(); - } -} - -inline void task::spawn_root_and_wait( task_list& root_list ) { - if( task* t = root_list.first ) { - t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr ); - root_list.clear(); - } -} - -} // namespace tbb - -inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) { - return &tbb::internal::allocate_root_proxy::allocate(bytes); -} - -inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) { - tbb::internal::allocate_root_proxy::free( *static_cast(task) ); -} - -#if __TBB_TASK_GROUP_CONTEXT -inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) { - return &p.allocate(bytes); -} - -inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) { - p.free( *static_cast(task) ); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) { - return &p.allocate(bytes); -} - -inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) { - p.free( *static_cast(task) ); -} - -inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) { - return &p.allocate(bytes); -} - -inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) { - p.free( *static_cast(task) ); -} - -inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) { - return &p.allocate(bytes); -} - -inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) { - p.free( *static_cast(task) ); -} + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_task_H */ +#include "../oneapi/tbb/task.h" diff --git a/src/tbb/include/tbb/task_arena.h b/src/tbb/include/tbb/task_arena.h index ee04c1248..f6e34b3e6 100644 --- a/src/tbb/include/tbb/task_arena.h +++ b/src/tbb/include/tbb/task_arena.h @@ -1,256 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_task_arena_H -#define __TBB_task_arena_H - -#include "task.h" -#include "tbb_exception.h" -#if TBB_USE_THREADING_TOOLS -#include "atomic.h" // for as_atomic -#endif - -#if __TBB_TASK_ARENA - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - //! Internal to library. Should not be used by clients. - /** @ingroup task_scheduling */ - class arena; - class task_scheduler_observer_v3; -} // namespace internal -//! @endcond - -namespace interface7 { -//! @cond INTERNAL -namespace internal { -using namespace tbb::internal; //e.g. function_task from task.h - -class delegate_base : no_assign { -public: - virtual void operator()() const = 0; - virtual ~delegate_base() {} -}; - -template -class delegated_function : public delegate_base { - F &my_func; - /*override*/ void operator()() const { - my_func(); - } -public: - delegated_function ( F& f ) : my_func(f) {} -}; - -class task_arena_base { -protected: - //! NULL if not currently initialized. - internal::arena* my_arena; - -#if __TBB_TASK_GROUP_CONTEXT - //! default context of the arena - task_group_context *my_context; -#endif - - //! Concurrency level for deferred initialization - int my_max_concurrency; - - //! Reserved master slots - unsigned my_master_slots; - - //! Special settings - intptr_t my_version_and_traits; - - enum { - default_flags = 0 -#if __TBB_TASK_GROUP_CONTEXT - | (task_group_context::default_traits & task_group_context::exact_exception) // 0 or 1 << 16 - , exact_exception_flag = task_group_context::exact_exception // used to specify flag for context directly -#endif - }; - - task_arena_base(int max_concurrency, unsigned reserved_for_masters) - : my_arena(0) -#if __TBB_TASK_GROUP_CONTEXT - , my_context(0) -#endif - , my_max_concurrency(max_concurrency) - , my_master_slots(reserved_for_masters) - , my_version_and_traits(default_flags) - {} - - void __TBB_EXPORTED_METHOD internal_initialize( ); - void __TBB_EXPORTED_METHOD internal_terminate( ); - void __TBB_EXPORTED_METHOD internal_enqueue( task&, intptr_t ) const; - void __TBB_EXPORTED_METHOD internal_execute( delegate_base& ) const; - void __TBB_EXPORTED_METHOD internal_wait() const; - static int __TBB_EXPORTED_FUNC internal_current_slot(); -public: - //! Typedef for number of threads that is automatic. - static const int automatic = -1; // any value < 1 means 'automatic' - -}; - -} // namespace internal -//! @endcond + http://www.apache.org/licenses/LICENSE-2.0 -/** 1-to-1 proxy representation class of scheduler's arena - * Constructors set up settings only, real construction is deferred till the first method invocation - * Destructor only removes one of the references to the inner arena representation. - * Final destruction happens when all the references (and the work) are gone. - */ -class task_arena : public internal::task_arena_base { - friend class tbb::internal::task_scheduler_observer_v3; - bool my_initialized; - -public: - //! Creates task_arena with certain concurrency limits - /** Sets up settings only, real construction is deferred till the first method invocation - * @arg max_concurrency specifies total number of slots in arena where threads work - * @arg reserved_for_masters specifies number of slots to be used by master threads only. - * Value of 1 is default and reflects behavior of implicit arenas. - **/ - task_arena(int max_concurrency = automatic, unsigned reserved_for_masters = 1) - : task_arena_base(max_concurrency, reserved_for_masters) - , my_initialized(false) - {} - - //! Copies settings from another task_arena - task_arena(const task_arena &s) // copy settings but not the reference or instance - : task_arena_base(s.my_max_concurrency, s.my_master_slots) - , my_initialized(false) - {} - - //! Forces allocation of the resources for the task_arena as specified in constructor arguments - inline void initialize() { - if( !my_initialized ) { - internal_initialize(); -#if TBB_USE_THREADING_TOOLS - // Threading tools respect lock prefix but report false-positive data-race via plain store - internal::as_atomic(my_initialized).fetch_and_store(true); -#else - my_initialized = true; -#endif //TBB_USE_THREADING_TOOLS - } - } - - //! Overrides concurrency level and forces initialization of internal representation - inline void initialize(int max_concurrency, unsigned reserved_for_masters = 1) { - __TBB_ASSERT( !my_arena, "Impossible to modify settings of an already initialized task_arena"); - if( !my_initialized ) { - my_max_concurrency = max_concurrency; - my_master_slots = reserved_for_masters; - initialize(); - } - } - - //! Removes the reference to the internal arena representation. - //! Not thread safe wrt concurrent invocations of other methods. - inline void terminate() { - if( my_initialized ) { - internal_terminate(); - my_initialized = false; - } - } - - //! Removes the reference to the internal arena representation, and destroys the external object. - //! Not thread safe wrt concurrent invocations of other methods. - ~task_arena() { - terminate(); - } - - //! Returns true if the arena is active (initialized); false otherwise. - //! The name was chosen to match a task_scheduler_init method with the same semantics. - bool is_active() const { return my_initialized; } - - //! Enqueues a task into the arena to process a functor, and immediately returns. - //! Does not require the calling thread to join the arena - template - void enqueue( const F& f ) { - initialize(); -#if __TBB_TASK_GROUP_CONTEXT - internal_enqueue( *new( task::allocate_root(*my_context) ) internal::function_task(f), 0 ); -#else - internal_enqueue( *new( task::allocate_root() ) internal::function_task(f), 0 ); -#endif - } - -#if __TBB_TASK_PRIORITY - //! Enqueues a task with priority p into the arena to process a functor f, and immediately returns. - //! Does not require the calling thread to join the arena - template - void enqueue( const F& f, priority_t p ) { - __TBB_ASSERT( p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value" ); - initialize(); -#if __TBB_TASK_GROUP_CONTEXT - internal_enqueue( *new( task::allocate_root(*my_context) ) internal::function_task(f), (intptr_t)p ); -#else - internal_enqueue( *new( task::allocate_root() ) internal::function_task(f), (intptr_t)p ); -#endif - } -#endif// __TBB_TASK_PRIORITY - - //! Joins the arena and executes a functor, then returns - //! If not possible to join, wraps the functor into a task, enqueues it and waits for task completion - //! Can decrement the arena demand for workers, causing a worker to leave and free a slot to the calling thread - template - void execute(F& f) { - initialize(); - internal::delegated_function d(f); - internal_execute( d ); - } - - //! Joins the arena and executes a functor, then returns - //! If not possible to join, wraps the functor into a task, enqueues it and waits for task completion - //! Can decrement the arena demand for workers, causing a worker to leave and free a slot to the calling thread - template - void execute(const F& f) { - initialize(); - internal::delegated_function d(f); - internal_execute( d ); - } - -#if __TBB_EXTRA_DEBUG - //! Wait for all work in the arena to be completed - //! Even submitted by other application threads - //! Joins arena if/when possible (in the same way as execute()) - void debug_wait_until_empty() { - initialize(); - internal_wait(); - } -#endif //__TBB_EXTRA_DEBUG - - //! Returns the index, aka slot number, of the calling thread in its current arena - inline static int current_thread_index() { - return internal_current_slot(); - } -}; - -} // namespace interfaceX - -using interface7::task_arena; - -} // namespace tbb - -#endif /* __TBB_TASK_ARENA */ + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_task_arena_H */ +#include "../oneapi/tbb/task_arena.h" diff --git a/src/tbb/include/tbb/task_group.h b/src/tbb/include/tbb/task_group.h index 4cbe7d55b..2f0250397 100644 --- a/src/tbb/include/tbb/task_group.h +++ b/src/tbb/include/tbb/task_group.h @@ -1,222 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_task_group_H -#define __TBB_task_group_H - -#include "task.h" -#include "tbb_exception.h" - -#if __TBB_TASK_GROUP_CONTEXT - -namespace tbb { - -namespace internal { - template class task_handle_task; -} - -class task_group; -class structured_task_group; - -template -class task_handle : internal::no_assign { - template friend class internal::task_handle_task; - friend class task_group; - friend class structured_task_group; - - static const intptr_t scheduled = 0x1; - - F my_func; - intptr_t my_state; - - void mark_scheduled () { - // The check here is intentionally lax to avoid the impact of interlocked operation - if ( my_state & scheduled ) - internal::throw_exception( internal::eid_invalid_multiple_scheduling ); - my_state |= scheduled; - } -public: - task_handle( const F& f ) : my_func(f), my_state(0) {} - - void operator() () const { my_func(); } -}; - -enum task_group_status { - not_complete, - complete, - canceled -}; - -namespace internal { - -template -class task_handle_task : public task { - task_handle& my_handle; - /*override*/ task* execute() { - my_handle(); - return NULL; - } -public: - task_handle_task( task_handle& h ) : my_handle(h) { h.mark_scheduled(); } -}; - -class task_group_base : internal::no_copy { -protected: - empty_task* my_root; - task_group_context my_context; - - task& owner () { return *my_root; } - - template - task_group_status internal_run_and_wait( F& f ) { - __TBB_TRY { - if ( !my_context.is_group_execution_cancelled() ) - f(); - } __TBB_CATCH( ... ) { - my_context.register_pending_exception(); - } - return wait(); - } - - template - void internal_run( F& f ) { - owner().spawn( *new( owner().allocate_additional_child_of(*my_root) ) Task(f) ); - } + http://www.apache.org/licenses/LICENSE-2.0 -public: - task_group_base( uintptr_t traits = 0 ) - : my_context(task_group_context::bound, task_group_context::default_traits | traits) - { - my_root = new( task::allocate_root(my_context) ) empty_task; - my_root->set_ref_count(1); - } - - ~task_group_base() __TBB_NOEXCEPT(false) { - if( my_root->ref_count() > 1 ) { - bool stack_unwinding_in_progress = std::uncaught_exception(); - // Always attempt to do proper cleanup to avoid inevitable memory corruption - // in case of missing wait (for the sake of better testability & debuggability) - if ( !is_canceling() ) - cancel(); - __TBB_TRY { - my_root->wait_for_all(); - } __TBB_CATCH (...) { - task::destroy(*my_root); - __TBB_RETHROW(); - } - task::destroy(*my_root); - if ( !stack_unwinding_in_progress ) - internal::throw_exception( internal::eid_missing_wait ); - } - else { - task::destroy(*my_root); - } - } - - template - void run( task_handle& h ) { - internal_run< task_handle, internal::task_handle_task >( h ); - } - - task_group_status wait() { - __TBB_TRY { - my_root->wait_for_all(); - } __TBB_CATCH( ... ) { - my_context.reset(); - __TBB_RETHROW(); - } - if ( my_context.is_group_execution_cancelled() ) { - my_context.reset(); - return canceled; - } - return complete; - } - - bool is_canceling() { - return my_context.is_group_execution_cancelled(); - } - - void cancel() { - my_context.cancel_group_execution(); - } -}; // class task_group_base - -} // namespace internal - -class task_group : public internal::task_group_base { -public: - task_group () : task_group_base( task_group_context::concurrent_wait ) {} - -#if __SUNPRO_CC - template - void run( task_handle& h ) { - internal_run< task_handle, internal::task_handle_task >( h ); - } -#else - using task_group_base::run; -#endif - - template - void run( const F& f ) { - internal_run< const F, internal::function_task >( f ); - } - - template - task_group_status run_and_wait( const F& f ) { - return internal_run_and_wait( f ); - } - - template - task_group_status run_and_wait( task_handle& h ) { - h.mark_scheduled(); - return internal_run_and_wait< task_handle >( h ); - } -}; // class task_group - -class structured_task_group : public internal::task_group_base { -public: - template - task_group_status run_and_wait ( task_handle& h ) { - h.mark_scheduled(); - return internal_run_and_wait< task_handle >( h ); - } - - task_group_status wait() { - task_group_status res = task_group_base::wait(); - my_root->set_ref_count(1); - return res; - } -}; // class structured_task_group - -inline -bool is_current_task_group_canceling() { - return task::self().is_cancelled(); -} - -template -task_handle make_task( const F& f ) { - return task_handle( f ); -} - -} // namespace tbb - -#endif /* __TBB_TASK_GROUP_CONTEXT */ + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_task_group_H */ +#include "../oneapi/tbb/task_group.h" diff --git a/src/tbb/include/tbb/task_scheduler_init.h b/src/tbb/include/tbb/task_scheduler_init.h deleted file mode 100644 index b49bddb89..000000000 --- a/src/tbb/include/tbb/task_scheduler_init.h +++ /dev/null @@ -1,153 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_task_scheduler_init_H -#define __TBB_task_scheduler_init_H - -#include "tbb_stddef.h" -#include "limits.h" - -namespace tbb { - -typedef std::size_t stack_size_type; - -//! @cond INTERNAL -namespace internal { - //! Internal to library. Should not be used by clients. - /** @ingroup task_scheduling */ - class scheduler; -} // namespace internal -//! @endcond - -//! Class delimiting the scope of task scheduler activity. -/** A thread can construct a task_scheduler_init object and keep it alive - while it uses TBB's tasking subsystem (including parallel algorithms). - - This class allows to customize properties of the TBB task pool to some extent. - For example it can limit concurrency level of parallel work initiated by the - given thread. It also can be used to specify stack size of the TBB worker threads, - though this setting is not effective if the thread pool has already been created. - - If a parallel construct is used without task_scheduler_init object previously - created, the scheduler will be initialized automatically with default settings, - and will persist until this thread exits. Default concurrency level is defined - as described in task_scheduler_init::initialize(). - @ingroup task_scheduling */ -class task_scheduler_init: internal::no_copy { - enum ExceptionPropagationMode { - propagation_mode_exact = 1u, - propagation_mode_captured = 2u, - propagation_mode_mask = propagation_mode_exact | propagation_mode_captured - }; -#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE - enum { - wait_workers_in_terminate_flag = 128u - }; -#endif - - /** NULL if not currently initialized. */ - internal::scheduler* my_scheduler; -public: - - //! Typedef for number of threads that is automatic. - static const int automatic = -1; - - //! Argument to initialize() or constructor that causes initialization to be deferred. - static const int deferred = -2; - - //! Ensure that scheduler exists for this thread - /** A value of -1 lets TBB decide on the number of threads, which is usually - maximal hardware concurrency for this process, that is the number of logical - CPUs on the machine (possibly limited by the processor affinity mask of this - process (Windows) or of this thread (Linux, FreeBSD). It is preferable option - for production code because it helps to avoid nasty surprises when several - TBB based components run side-by-side or in a nested fashion inside the same - process. - - The number_of_threads is ignored if any other task_scheduler_inits - currently exist. A thread may construct multiple task_scheduler_inits. - Doing so does no harm because the underlying scheduler is reference counted. */ - void __TBB_EXPORTED_METHOD initialize( int number_of_threads=automatic ); - - //! The overloaded method with stack size parameter - /** Overloading is necessary to preserve ABI compatibility */ - void __TBB_EXPORTED_METHOD initialize( int number_of_threads, stack_size_type thread_stack_size ); - - //! Inverse of method initialize. - void __TBB_EXPORTED_METHOD terminate(); - - //! Shorthand for default constructor followed by call to initialize(number_of_threads). -#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE - task_scheduler_init( int number_of_threads=automatic, stack_size_type thread_stack_size=0, bool wait_workers_in_terminate = false ) : my_scheduler(NULL) -#else - task_scheduler_init( int number_of_threads=automatic, stack_size_type thread_stack_size=0 ) : my_scheduler(NULL) -#endif - { - // Two lowest order bits of the stack size argument may be taken to communicate - // default exception propagation mode of the client to be used when the - // client manually creates tasks in the master thread and does not use - // explicit task group context object. This is necessary because newer - // TBB binaries with exact propagation enabled by default may be used - // by older clients that expect tbb::captured_exception wrapper. - // All zeros mean old client - no preference. - __TBB_ASSERT( !(thread_stack_size & propagation_mode_mask), "Requested stack size is not aligned" ); -#if TBB_USE_EXCEPTIONS - thread_stack_size |= TBB_USE_CAPTURED_EXCEPTION ? propagation_mode_captured : propagation_mode_exact; -#endif /* TBB_USE_EXCEPTIONS */ -#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE - if (wait_workers_in_terminate) - my_scheduler = (internal::scheduler*)wait_workers_in_terminate_flag; -#endif - initialize( number_of_threads, thread_stack_size ); - } - - //! Destroy scheduler for this thread if thread has no other live task_scheduler_inits. - ~task_scheduler_init() { - if( my_scheduler ) - terminate(); - internal::poison_pointer( my_scheduler ); - } - //! Returns the number of threads TBB scheduler would create if initialized by default. - /** Result returned by this method does not depend on whether the scheduler - has already been initialized. - - Because tbb 2.0 does not support blocking tasks yet, you may use this method - to boost the number of threads in the tbb's internal pool, if your tasks are - doing I/O operations. The optimal number of additional threads depends on how - much time your tasks spend in the blocked state. - - Before TBB 3.0 U4 this method returned the number of logical CPU in the - system. Currently on Windows, Linux and FreeBSD it returns the number of - logical CPUs available to the current process in accordance with its affinity - mask. - - NOTE: The return value of this method never changes after its first invocation. - This means that changes in the process affinity mask that took place after - this method was first invoked will not affect the number of worker threads - in the TBB worker threads pool. */ - static int __TBB_EXPORTED_FUNC default_num_threads (); - - //! Returns true if scheduler is active (initialized); false otherwise - bool is_active() const { return my_scheduler != NULL; } -}; - -} // namespace tbb - -#endif /* __TBB_task_scheduler_init_H */ diff --git a/src/tbb/include/tbb/task_scheduler_observer.h b/src/tbb/include/tbb/task_scheduler_observer.h index 6c8ac4da0..9236f4cdf 100644 --- a/src/tbb/include/tbb/task_scheduler_observer.h +++ b/src/tbb/include/tbb/task_scheduler_observer.h @@ -1,167 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_task_scheduler_observer_H -#define __TBB_task_scheduler_observer_H - -#include "atomic.h" -#if __TBB_TASK_ARENA -#include "task_arena.h" -#endif //__TBB_TASK_ARENA - -#if __TBB_SCHEDULER_OBSERVER - -namespace tbb { -namespace interface6 { -class task_scheduler_observer; -} -namespace internal { - -class observer_proxy; -class observer_list; - -class task_scheduler_observer_v3 { - friend class observer_proxy; - friend class observer_list; - friend class interface6::task_scheduler_observer; - - //! Pointer to the proxy holding this observer. - /** Observers are proxied by the scheduler to maintain persistent lists of them. **/ - observer_proxy* my_proxy; - - //! Counter preventing the observer from being destroyed while in use by the scheduler. - /** Valid only when observation is on. **/ - atomic my_busy_count; - -public: - //! Enable or disable observation - /** For local observers the method can be used only when the current thread - has the task scheduler initialized or is attached to an arena. - - Repeated calls with the same state are no-ops. **/ - void __TBB_EXPORTED_METHOD observe( bool state=true ); - - //! Returns true if observation is enabled, false otherwise. - bool is_observing() const {return my_proxy!=NULL;} - - //! Construct observer with observation disabled. - task_scheduler_observer_v3() : my_proxy(NULL) { my_busy_count.store(0); } - - //! Entry notification - /** Invoked from inside observe(true) call and whenever a worker enters the arena - this observer is associated with. If a thread is already in the arena when - the observer is activated, the entry notification is called before it - executes the first stolen task. - - Obsolete semantics. For global observers it is called by a thread before - the first steal since observation became enabled. **/ - virtual void on_scheduler_entry( bool /*is_worker*/ ) {} - - //! Exit notification - /** Invoked from inside observe(false) call and whenever a worker leaves the - arena this observer is associated with. + http://www.apache.org/licenses/LICENSE-2.0 - Obsolete semantics. For global observers it is called by a thread before - the first steal since observation became enabled. **/ - virtual void on_scheduler_exit( bool /*is_worker*/ ) {} - - //! Destructor automatically switches observation off if it is enabled. - virtual ~task_scheduler_observer_v3() { if(my_proxy) observe(false);} -}; - -} // namespace internal - -#if __TBB_ARENA_OBSERVER -namespace interface6 { -class task_scheduler_observer : public internal::task_scheduler_observer_v3 { - friend class internal::task_scheduler_observer_v3; - friend class internal::observer_proxy; - friend class internal::observer_list; - - /** Negative numbers with the largest absolute value to minimize probability - of coincidence in case of a bug in busy count usage. **/ - // TODO: take more high bits for version number - static const intptr_t v6_trait = (intptr_t)((~(uintptr_t)0 >> 1) + 1); - - //! contains task_arena pointer or tag indicating local or global semantics of the observer - intptr_t my_context_tag; - enum { global_tag = 0, implicit_tag = 1 }; - -public: - //! Construct local or global observer in inactive state (observation disabled). - /** For a local observer entry/exit notifications are invoked whenever a worker - thread joins/leaves the arena of the observer's owner thread. If a thread is - already in the arena when the observer is activated, the entry notification is - called before it executes the first stolen task. **/ - /** TODO: Obsolete. - Global observer semantics is obsolete as it violates master thread isolation - guarantees and is not composable. Thus the current default behavior of the - constructor is obsolete too and will be changed in one of the future versions - of the library. **/ - task_scheduler_observer( bool local = false ) { - my_context_tag = local? implicit_tag : global_tag; - } - -#if __TBB_TASK_ARENA - //! Construct local observer for a given arena in inactive state (observation disabled). - /** entry/exit notifications are invoked whenever a thread joins/leaves arena. - If a thread is already in the arena when the observer is activated, the entry notification - is called before it executes the first stolen task. **/ - task_scheduler_observer( task_arena & a) { - my_context_tag = (intptr_t)&a; - } -#endif //__TBB_TASK_ARENA - - /** Destructor protects instance of the observer from concurrent notification. - It is recommended to disable observation before destructor of a derived class starts, - otherwise it can lead to concurrent notification callback on partly destroyed object **/ - virtual ~task_scheduler_observer() { if(my_proxy) observe(false); } - - //! Enable or disable observation - /** Warning: concurrent invocations of this method are not safe. - Repeated calls with the same state are no-ops. **/ - void observe( bool state=true ) { - if( state && !my_proxy ) { - __TBB_ASSERT( !my_busy_count, "Inconsistent state of task_scheduler_observer instance"); - my_busy_count.store(v6_trait); - } - internal::task_scheduler_observer_v3::observe(state); - } - - //! Return commands for may_sleep() - enum { keep_awake = false, allow_sleep = true }; - - //! The callback can be invoked by a worker thread before it goes to sleep. - /** If it returns false ('keep_awake'), the thread will keep spinning and looking for work. - It will not be called for master threads. **/ - virtual bool may_sleep() { return allow_sleep; } -}; - -} //namespace interface6 -using interface6::task_scheduler_observer; -#else /*__TBB_ARENA_OBSERVER*/ -typedef tbb::internal::task_scheduler_observer_v3 task_scheduler_observer; -#endif /*__TBB_ARENA_OBSERVER*/ - -} // namespace tbb - -#endif /* __TBB_SCHEDULER_OBSERVER */ + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_task_scheduler_observer_H */ +#include "../oneapi/tbb/task_scheduler_observer.h" diff --git a/src/tbb/include/tbb/tbb.h b/src/tbb/include/tbb/tbb.h index 8a8a8dfa8..e443b8f1c 100644 --- a/src/tbb/include/tbb/tbb.h +++ b/src/tbb/include/tbb/tbb.h @@ -1,82 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_tbb_H -#define __TBB_tbb_H - -/** - This header bulk-includes declarations or definitions of all the functionality - provided by TBB (save for malloc dependent headers). + http://www.apache.org/licenses/LICENSE-2.0 - If you use only a few TBB constructs, consider including specific headers only. - Any header listed below can be included independently of others. -**/ - -#if TBB_PREVIEW_AGGREGATOR -#include "aggregator.h" -#endif -#include "aligned_space.h" -#include "atomic.h" -#include "blocked_range.h" -#include "blocked_range2d.h" -#include "blocked_range3d.h" -#include "cache_aligned_allocator.h" -#include "combinable.h" -#include "concurrent_hash_map.h" -#if TBB_PREVIEW_CONCURRENT_LRU_CACHE -#include "concurrent_lru_cache.h" -#endif -#include "concurrent_priority_queue.h" -#include "concurrent_queue.h" -#include "concurrent_unordered_map.h" -#include "concurrent_unordered_set.h" -#include "concurrent_vector.h" -#include "critical_section.h" -#include "enumerable_thread_specific.h" -#include "flow_graph.h" -#include "mutex.h" -#include "null_mutex.h" -#include "null_rw_mutex.h" -#include "parallel_do.h" -#include "parallel_for.h" -#include "parallel_for_each.h" -#include "parallel_invoke.h" -#include "parallel_reduce.h" -#include "parallel_scan.h" -#include "parallel_sort.h" -#include "partitioner.h" -#include "pipeline.h" -#include "queuing_mutex.h" -#include "queuing_rw_mutex.h" -#include "reader_writer_lock.h" -#include "recursive_mutex.h" -#include "spin_mutex.h" -#include "spin_rw_mutex.h" -#include "task.h" -#include "task_arena.h" -#include "task_group.h" -#include "task_scheduler_init.h" -#include "task_scheduler_observer.h" -#include "tbb_allocator.h" -#include "tbb_exception.h" -#include "tbb_thread.h" -#include "tick_count.h" + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_tbb_H */ +#include "../oneapi/tbb.h" diff --git a/src/tbb/include/tbb/tbb_allocator.h b/src/tbb/include/tbb/tbb_allocator.h index d9480f2ad..81ab9d33b 100644 --- a/src/tbb/include/tbb/tbb_allocator.h +++ b/src/tbb/include/tbb/tbb_allocator.h @@ -1,218 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_tbb_allocator_H -#define __TBB_tbb_allocator_H - -#include "tbb_stddef.h" -#include -#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - #include // std::forward -#endif - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - - //! Deallocates memory using FreeHandler - /** The function uses scalable_free if scalable allocator is available and free if not*/ - void __TBB_EXPORTED_FUNC deallocate_via_handler_v3( void *p ); - - //! Allocates memory using MallocHandler - /** The function uses scalable_malloc if scalable allocator is available and malloc if not*/ - void* __TBB_EXPORTED_FUNC allocate_via_handler_v3( size_t n ); - - //! Returns true if standard malloc/free are used to work with memory. - bool __TBB_EXPORTED_FUNC is_malloc_used_v3(); -} -//! @endcond - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Workaround for erroneous "unreferenced parameter" warning in method destroy. - #pragma warning (push) - #pragma warning (disable: 4100) -#endif - -//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 -/** The class selects the best memory allocation mechanism available - from scalable_malloc and standard malloc. - The members are ordered the same way they are in section 20.4.1 - of the ISO C++ standard. - @ingroup memory_allocation */ -template -class tbb_allocator { -public: - typedef typename internal::allocator_type::value_type value_type; - typedef value_type* pointer; - typedef const value_type* const_pointer; - typedef value_type& reference; - typedef const value_type& const_reference; - typedef size_t size_type; - typedef ptrdiff_t difference_type; - template struct rebind { - typedef tbb_allocator other; - }; - - //! Specifies current allocator - enum malloc_type { - scalable, - standard - }; - - tbb_allocator() throw() {} - tbb_allocator( const tbb_allocator& ) throw() {} - template tbb_allocator(const tbb_allocator&) throw() {} + http://www.apache.org/licenses/LICENSE-2.0 - pointer address(reference x) const {return &x;} - const_pointer address(const_reference x) const {return &x;} - - //! Allocate space for n objects. - pointer allocate( size_type n, const void* /*hint*/ = 0) { - return pointer(internal::allocate_via_handler_v3( n * sizeof(value_type) )); - } - - //! Free previously allocated block of memory. - void deallocate( pointer p, size_type ) { - internal::deallocate_via_handler_v3(p); - } - - //! Largest value for which method allocate might succeed. - size_type max_size() const throw() { - size_type max = static_cast(-1) / sizeof (value_type); - return (max > 0 ? max : 1); - } - - //! Copy-construct value at location pointed to by p. -#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - template - void construct(U *p, Args&&... args) - { ::new((void *)p) U(std::forward(args)...); } -#else // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC -#if __TBB_CPP11_RVALUE_REF_PRESENT - void construct( pointer p, value_type&& value ) {::new((void*)(p)) value_type(std::move(value));} -#endif - void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);} -#endif // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - - //! Destroy value at location pointed to by p. - void destroy( pointer p ) {p->~value_type();} - - //! Returns current allocator - static malloc_type allocator_type() { - return internal::is_malloc_used_v3() ? standard : scalable; - } -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4100 is back - -//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 -/** @ingroup memory_allocation */ -template<> -class tbb_allocator { -public: - typedef void* pointer; - typedef const void* const_pointer; - typedef void value_type; - template struct rebind { - typedef tbb_allocator other; - }; -}; - -template -inline bool operator==( const tbb_allocator&, const tbb_allocator& ) {return true;} - -template -inline bool operator!=( const tbb_allocator&, const tbb_allocator& ) {return false;} - -//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 -/** The class is an adapter over an actual allocator that fills the allocation - using memset function with template argument C as the value. - The members are ordered the same way they are in section 20.4.1 - of the ISO C++ standard. - @ingroup memory_allocation */ -template class Allocator = tbb_allocator> -class zero_allocator : public Allocator -{ -public: - typedef Allocator base_allocator_type; - typedef typename base_allocator_type::value_type value_type; - typedef typename base_allocator_type::pointer pointer; - typedef typename base_allocator_type::const_pointer const_pointer; - typedef typename base_allocator_type::reference reference; - typedef typename base_allocator_type::const_reference const_reference; - typedef typename base_allocator_type::size_type size_type; - typedef typename base_allocator_type::difference_type difference_type; - template struct rebind { - typedef zero_allocator other; - }; - - zero_allocator() throw() { } - zero_allocator(const zero_allocator &a) throw() : base_allocator_type( a ) { } - template - zero_allocator(const zero_allocator &a) throw() : base_allocator_type( Allocator( a ) ) { } - - pointer allocate(const size_type n, const void *hint = 0 ) { - pointer ptr = base_allocator_type::allocate( n, hint ); - std::memset( ptr, 0, n * sizeof(value_type) ); - return ptr; - } -}; - -//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 -/** @ingroup memory_allocation */ -template class Allocator> -class zero_allocator : public Allocator { -public: - typedef Allocator base_allocator_type; - typedef typename base_allocator_type::value_type value_type; - typedef typename base_allocator_type::pointer pointer; - typedef typename base_allocator_type::const_pointer const_pointer; - template struct rebind { - typedef zero_allocator other; - }; -}; - -template class B1, typename T2, template class B2> -inline bool operator==( const zero_allocator &a, const zero_allocator &b) { - return static_cast< B1 >(a) == static_cast< B2 >(b); -} -template class B1, typename T2, template class B2> -inline bool operator!=( const zero_allocator &a, const zero_allocator &b) { - return static_cast< B1 >(a) != static_cast< B2 >(b); -} - -} // namespace tbb + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_tbb_allocator_H */ +#include "../oneapi/tbb/tbb_allocator.h" diff --git a/src/tbb/include/tbb/tbb_config.h b/src/tbb/include/tbb/tbb_config.h deleted file mode 100644 index ed6d83c00..000000000 --- a/src/tbb/include/tbb/tbb_config.h +++ /dev/null @@ -1,639 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_tbb_config_H -#define __TBB_tbb_config_H - -/** This header is supposed to contain macro definitions and C style comments only. - The macros defined here are intended to control such aspects of TBB build as - - presence of compiler features - - compilation modes - - feature sets - - known compiler/platform issues -**/ - -/*Check which standard library we use on OS X.*/ -/*__TBB_SYMBOL is defined only while processing exported symbols list where C++ is not allowed.*/ -#if !defined(__TBB_SYMBOL) && __APPLE__ - #include -#endif - -// note that when ICC is in use __TBB_GCC_VERSION might not closely match GCC version on the machine -#define __TBB_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) - -#if __clang__ - /**according to clang documentation version can be vendor specific **/ - #define __TBB_CLANG_VERSION (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) -#endif - -/** Preprocessor symbols to determine HW architecture **/ - -#if _WIN32||_WIN64 -# if defined(_M_X64)||defined(__x86_64__) // the latter for MinGW support -# define __TBB_x86_64 1 -# elif defined(_M_IA64) -# define __TBB_ipf 1 -# elif defined(_M_IX86)||defined(__i386__) // the latter for MinGW support -# define __TBB_x86_32 1 -# else -# define __TBB_generic_arch 1 -# endif -#else /* Assume generic Unix */ -# if !__linux__ && !__APPLE__ -# define __TBB_generic_os 1 -# endif -# if __x86_64__ -# define __TBB_x86_64 1 -# elif __ia64__ -# define __TBB_ipf 1 -# elif __i386__||__i386 // __i386 is for Sun OS -# define __TBB_x86_32 1 -# else -# define __TBB_generic_arch 1 -# endif -#endif - -#if __MIC__ || __MIC2__ -#define __TBB_DEFINE_MIC 1 -#endif - -#define __TBB_TSX_AVAILABLE (__TBB_x86_32 || __TBB_x86_64) && !__TBB_DEFINE_MIC - -/** Presence of compiler features **/ - -#if __INTEL_COMPILER == 9999 && __INTEL_COMPILER_BUILD_DATE == 20110811 -/* Intel(R) Composer XE 2011 Update 6 incorrectly sets __INTEL_COMPILER. Fix it. */ - #undef __INTEL_COMPILER - #define __INTEL_COMPILER 1210 -#endif - -#if __TBB_GCC_VERSION >= 40400 && !defined(__INTEL_COMPILER) - /** warning suppression pragmas available in GCC since 4.4 **/ - #define __TBB_GCC_WARNING_SUPPRESSION_PRESENT 1 -#endif - -/* Select particular features of C++11 based on compiler version. - ICC 12.1 (Linux), GCC 4.3 and higher, clang 2.9 and higher - set __GXX_EXPERIMENTAL_CXX0X__ in c++11 mode. - - Compilers that mimics other compilers (ICC, clang) must be processed before - compilers they mimic (GCC, MSVC). - - TODO: The following conditions should be extended when new compilers/runtimes - support added. - */ - -#if __INTEL_COMPILER - /** C++11 mode detection macros for Intel C++ compiler (enabled by -std=c++0x option): - __INTEL_CXX11_MODE__ for version >=13.0 - __STDC_HOSTED__ for version >=12.0 on Windows, - __GXX_EXPERIMENTAL_CXX0X__ for version >=12.0 on Linux and OS X. **/ - // On Windows, C++11 features supported by Visual Studio 2010 and higher are enabled by default - #ifndef __INTEL_CXX11_MODE__ - #define __INTEL_CXX11_MODE__ ((_MSC_VER && __STDC_HOSTED__) || __GXX_EXPERIMENTAL_CXX0X__) - // TODO: check if more conditions can be simplified with the above macro - #endif - #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT (__INTEL_CXX11_MODE__ && __VARIADIC_TEMPLATES) - // Both r-value reference support in compiler and std::move/std::forward - // presence in C++ standard library is checked. - #define __TBB_CPP11_RVALUE_REF_PRESENT ((__GXX_EXPERIMENTAL_CXX0X__ && (__TBB_GCC_VERSION >= 40300 || _LIBCPP_VERSION) || _MSC_VER >= 1600) && __INTEL_COMPILER >= 1200) - #if _MSC_VER >= 1600 - #define __TBB_EXCEPTION_PTR_PRESENT ( __INTEL_COMPILER > 1300 \ - /*ICC 12.1 Upd 10 and 13 beta Upd 2 fixed exception_ptr linking issue*/ \ - || (__INTEL_COMPILER == 1300 && __INTEL_COMPILER_BUILD_DATE >= 20120530) \ - || (__INTEL_COMPILER == 1210 && __INTEL_COMPILER_BUILD_DATE >= 20120410) ) - /** libstdc++ that comes with GCC 4.6 use C++11 features not supported by ICC 12.1. - * Because of that ICC 12.1 does not support C++11 mode with with gcc 4.6 (or higher), - * and therefore does not define __GXX_EXPERIMENTAL_CXX0X__ macro **/ - #elif __TBB_GCC_VERSION >= 40404 && __TBB_GCC_VERSION < 40600 - #define __TBB_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1200) - #elif __TBB_GCC_VERSION >= 40600 - #define __TBB_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1300) - #else - #define __TBB_EXCEPTION_PTR_PRESENT 0 - #endif - #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1700 || (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40600)) - #define __TBB_STATIC_ASSERT_PRESENT (__INTEL_CXX11_MODE__ || _MSC_VER >= 1600) - #define __TBB_CPP11_TUPLE_PRESENT (_MSC_VER >= 1600 || (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40300)) - /**Intel C++ compiler 14.0 crashes on using __has_include. When it fixed, condition will need to be updated. **/ - #if (__clang__ && __INTEL_COMPILER > 1400) - #if (__has_feature(__cxx_generalized_initializers__) && __has_include()) - #define __TBB_INITIALIZER_LISTS_PRESENT 1 - #endif - #else - /** TODO: when MSVC2013 is supported by Intel C++ compiler, it will be enabled silently by compiler, so rule will need to be updated.**/ - #define __TBB_INITIALIZER_LISTS_PRESENT __INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1400 && (_MSC_VER >= 1800 || __TBB_GCC_VERSION >= 40400 || _LIBCPP_VERSION) - #endif - - #define __TBB_CONSTEXPR_PRESENT __INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1400 - #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT __INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1200 - /** ICC seems to disable support of noexcept event in c++11 when compiling in compatibility mode for gcc <4.6 **/ - #define __TBB_NOEXCEPT_PRESENT __INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1300 && (__TBB_GCC_VERSION >= 40600 || _LIBCPP_VERSION || _MSC_VER) - #define __TBB_CPP11_STD_BEGIN_END_PRESENT (_MSC_VER >= 1700 || __GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1310 && (__TBB_GCC_VERSION >= 40600 || _LIBCPP_VERSION)) - #define __TBB_CPP11_AUTO_PRESENT (_MSC_VER >= 1600 || __GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1210) - #define __TBB_CPP11_DECLTYPE_PRESENT (_MSC_VER >= 1600 || __GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1210) -#elif __clang__ -//TODO: these options need to be rechecked -/** on OS X* the only way to get C++11 is to use clang. For library features (e.g. exception_ptr) libc++ is also - * required. So there is no need to check GCC version for clang**/ - #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT (__has_feature(__cxx_variadic_templates__)) - #define __TBB_CPP11_RVALUE_REF_PRESENT (__has_feature(__cxx_rvalue_references__) && (__TBB_GCC_VERSION >= 40300 || _LIBCPP_VERSION)) -/** TODO: extend exception_ptr related conditions to cover libstdc++ **/ - #define __TBB_EXCEPTION_PTR_PRESENT (__cplusplus >= 201103L && _LIBCPP_VERSION) - #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (__cplusplus >= 201103L && _LIBCPP_VERSION) - #define __TBB_STATIC_ASSERT_PRESENT __has_feature(__cxx_static_assert__) - /**Clang (preprocessor) has problems with dealing with expression having __has_include in #ifs - * used inside C++ code. (At least version that comes with OS X 10.8 : Apple LLVM version 4.2 (clang-425.0.28) (based on LLVM 3.2svn)) **/ - #if (__GXX_EXPERIMENTAL_CXX0X__ && __has_include()) - #define __TBB_CPP11_TUPLE_PRESENT 1 - #endif - #if (__has_feature(__cxx_generalized_initializers__) && __has_include()) - #define __TBB_INITIALIZER_LISTS_PRESENT 1 - #endif - #define __TBB_CONSTEXPR_PRESENT __has_feature(__cxx_constexpr__) - #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (__has_feature(__cxx_defaulted_functions__) && __has_feature(__cxx_deleted_functions__)) - /**For some unknown reason __has_feature(__cxx_noexcept) does not yield true for all cases. Compiler bug ? **/ - #define __TBB_NOEXCEPT_PRESENT (__cplusplus >= 201103L) - #define __TBB_CPP11_STD_BEGIN_END_PRESENT (__has_feature(__cxx_range_for__) && _LIBCPP_VERSION) - #define __TBB_CPP11_AUTO_PRESENT __has_feature(__cxx_auto_type__) - #define __TBB_CPP11_DECLTYPE_PRESENT __has_feature(__cxx_decltype__) -#elif __GNUC__ - #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __GXX_EXPERIMENTAL_CXX0X__ - #define __TBB_CPP11_RVALUE_REF_PRESENT __GXX_EXPERIMENTAL_CXX0X__ - /** __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 here is a substitution for _GLIBCXX_ATOMIC_BUILTINS_4, which is a prerequisite - for exception_ptr but cannot be used in this file because it is defined in a header, not by the compiler. - If the compiler has no atomic intrinsics, the C++ library should not expect those as well. **/ - #define __TBB_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40404 && __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) - #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40600) - #define __TBB_STATIC_ASSERT_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40300) - #define __TBB_CPP11_TUPLE_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40300) - #define __TBB_INITIALIZER_LISTS_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400) - /** gcc seems have to support constexpr from 4.4 but tests in (test_atomic) seeming reasonable fail to compile prior 4.6**/ - #define __TBB_CONSTEXPR_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400) - #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400) - #define __TBB_NOEXCEPT_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40600) - #define __TBB_CPP11_STD_BEGIN_END_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40600) - #define __TBB_CPP11_AUTO_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400) - #define __TBB_CPP11_DECLTYPE_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400) -#elif _MSC_VER - #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT (_MSC_VER >= 1800) - #define __TBB_CPP11_RVALUE_REF_PRESENT (_MSC_VER >= 1600) - #define __TBB_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1600) - #define __TBB_STATIC_ASSERT_PRESENT (_MSC_VER >= 1600) - #define __TBB_MAKE_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1700) - #define __TBB_CPP11_TUPLE_PRESENT (_MSC_VER >= 1600) - #define __TBB_INITIALIZER_LISTS_PRESENT (_MSC_VER >= 1800) - #define __TBB_CONSTEXPR_PRESENT 0 - #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (_MSC_VER >= 1800) - #define __TBB_NOEXCEPT_PRESENT 0 /*for _MSC_VER == 1800*/ - #define __TBB_CPP11_STD_BEGIN_END_PRESENT (_MSC_VER >= 1700) - #define __TBB_CPP11_AUTO_PRESENT (_MSC_VER >= 1600) - #define __TBB_CPP11_DECLTYPE_PRESENT (_MSC_VER >= 1600) -#else - #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 0 - #define __TBB_CPP11_RVALUE_REF_PRESENT 0 - #define __TBB_EXCEPTION_PTR_PRESENT 0 - #define __TBB_STATIC_ASSERT_PRESENT 0 - #define __TBB_MAKE_EXCEPTION_PTR_PRESENT 0 - #define __TBB_CPP11_TUPLE_PRESENT 0 - #define __TBB_INITIALIZER_LISTS_PRESENT 0 - #define __TBB_CONSTEXPR_PRESENT 0 - #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT 0 - #define __TBB_NOEXCEPT_PRESENT 0 - #define __TBB_CPP11_STD_BEGIN_END_PRESENT 0 - #define __TBB_CPP11_AUTO_PRESENT 0 - #define __TBB_CPP11_DECLTYPE_PRESENT 0 -#endif - -// C++11 standard library features - -#define __TBB_CPP11_TYPE_PROPERTIES_PRESENT (_LIBCPP_VERSION || _MSC_VER >= 1700) -#define __TBB_TR1_TYPE_PROPERTIES_IN_STD_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40300 || _MSC_VER >= 1600) -//TODO: Probably more accurate way is to analyze version of stdlibc++ via__GLIBCXX__ instead of __TBB_GCC_VERSION -#define __TBB_ALLOCATOR_TRAITS_PRESENT (__cplusplus >= 201103L && _LIBCPP_VERSION || _MSC_VER >= 1700 || \ - __GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40700 && !(__TBB_GCC_VERSION == 40700 && __TBB_DEFINE_MIC) \ - ) - -//TODO: not clear how exactly this macro affects exception_ptr - investigate -// On linux ICC fails to find existing std::exception_ptr in libstdc++ without this define -#if __INTEL_COMPILER && __GNUC__ && __TBB_EXCEPTION_PTR_PRESENT && !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) - #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 1 -#endif - -// Work around a bug in MinGW32 -#if __MINGW32__ && __TBB_EXCEPTION_PTR_PRESENT && !defined(_GLIBCXX_ATOMIC_BUILTINS_4) - #define _GLIBCXX_ATOMIC_BUILTINS_4 -#endif - -#if __GNUC__ || __SUNPRO_CC || __IBMCPP__ - /* ICC defines __GNUC__ and so is covered */ - #define __TBB_ATTRIBUTE_ALIGNED_PRESENT 1 -#elif _MSC_VER && (_MSC_VER >= 1300 || __INTEL_COMPILER) - #define __TBB_DECLSPEC_ALIGN_PRESENT 1 -#endif - -/* Actually ICC supports gcc __sync_* intrinsics starting 11.1, - * but 64 bit support for 32 bit target comes in later ones*/ -/* TODO: change the version back to 4.1.2 once macro __TBB_WORD_SIZE become optional */ -#if __TBB_GCC_VERSION >= 40306 || __INTEL_COMPILER >= 1200 - /** built-in atomics available in GCC since 4.1.2 **/ - #define __TBB_GCC_BUILTIN_ATOMICS_PRESENT 1 -#endif - -#if __INTEL_COMPILER >= 1200 - /** built-in C++11 style atomics available in ICC since 12.0 **/ - #define __TBB_ICC_BUILTIN_ATOMICS_PRESENT 1 -#endif - -#define __TBB_TSX_INTRINSICS_PRESENT ((__RTM__ || _MSC_VER>=1700 || __INTEL_COMPILER>=1300) && !__TBB_DEFINE_MIC && !__ANDROID__) - -/** User controlled TBB features & modes **/ - -#ifndef TBB_USE_DEBUG -#ifdef _DEBUG -#define TBB_USE_DEBUG _DEBUG -#else -#define TBB_USE_DEBUG 0 -#endif -#endif /* TBB_USE_DEBUG */ - -#ifndef TBB_USE_ASSERT -#define TBB_USE_ASSERT TBB_USE_DEBUG -#endif /* TBB_USE_ASSERT */ - -#ifndef TBB_USE_THREADING_TOOLS -#define TBB_USE_THREADING_TOOLS TBB_USE_DEBUG -#endif /* TBB_USE_THREADING_TOOLS */ - -#ifndef TBB_USE_PERFORMANCE_WARNINGS -#ifdef TBB_PERFORMANCE_WARNINGS -#define TBB_USE_PERFORMANCE_WARNINGS TBB_PERFORMANCE_WARNINGS -#else -#define TBB_USE_PERFORMANCE_WARNINGS TBB_USE_DEBUG -#endif /* TBB_PEFORMANCE_WARNINGS */ -#endif /* TBB_USE_PERFORMANCE_WARNINGS */ - -#if !defined(__EXCEPTIONS) && !defined(_CPPUNWIND) && !defined(__SUNPRO_CC) || defined(_XBOX) - #if TBB_USE_EXCEPTIONS - #error Compilation settings do not support exception handling. Please do not set TBB_USE_EXCEPTIONS macro or set it to 0. - #elif !defined(TBB_USE_EXCEPTIONS) - #define TBB_USE_EXCEPTIONS 0 - #endif -#elif !defined(TBB_USE_EXCEPTIONS) - #if __TBB_DEFINE_MIC - #define TBB_USE_EXCEPTIONS 0 - #else - #define TBB_USE_EXCEPTIONS 1 - #endif -#elif TBB_USE_EXCEPTIONS && __TBB_DEFINE_MIC - #error Please do not set TBB_USE_EXCEPTIONS macro or set it to 0. -#endif - -#ifndef TBB_IMPLEMENT_CPP0X - /** By default, use C++11 classes if available **/ - #if __GNUC__==4 && __GNUC_MINOR__>=4 && __GXX_EXPERIMENTAL_CXX0X__ - #define TBB_IMPLEMENT_CPP0X 0 - #elif __clang__ && __cplusplus >= 201103L - //TODO: consider introducing separate macros for each file? - //prevent injection of corresponding tbb names into std:: namespace if native headers are present - #if __has_include() || __has_include() - #define TBB_IMPLEMENT_CPP0X 0 - #else - #define TBB_IMPLEMENT_CPP0X 1 - #endif - #elif _MSC_VER>=1700 - #define TBB_IMPLEMENT_CPP0X 0 - #elif __STDCPP_THREADS__ - #define TBB_IMPLEMENT_CPP0X 0 - #else - #define TBB_IMPLEMENT_CPP0X 1 - #endif -#endif /* TBB_IMPLEMENT_CPP0X */ - -/* TBB_USE_CAPTURED_EXCEPTION should be explicitly set to either 0 or 1, as it is used as C++ const */ -#ifndef TBB_USE_CAPTURED_EXCEPTION - /** IA-64 architecture pre-built TBB binaries do not support exception_ptr. **/ - #if __TBB_EXCEPTION_PTR_PRESENT && !defined(__ia64__) - #define TBB_USE_CAPTURED_EXCEPTION 0 - #else - #define TBB_USE_CAPTURED_EXCEPTION 1 - #endif -#else /* defined TBB_USE_CAPTURED_EXCEPTION */ - #if !TBB_USE_CAPTURED_EXCEPTION && !__TBB_EXCEPTION_PTR_PRESENT - #error Current runtime does not support std::exception_ptr. Set TBB_USE_CAPTURED_EXCEPTION and make sure that your code is ready to catch tbb::captured_exception. - #endif -#endif /* defined TBB_USE_CAPTURED_EXCEPTION */ - -/** Check whether the request to use GCC atomics can be satisfied **/ -#if TBB_USE_GCC_BUILTINS && !__TBB_GCC_BUILTIN_ATOMICS_PRESENT - #error "GCC atomic built-ins are not supported." -#endif - -/** Internal TBB features & modes **/ - -/** __TBB_WEAK_SYMBOLS_PRESENT denotes that the system supports the weak symbol mechanism **/ -#ifndef __TBB_WEAK_SYMBOLS_PRESENT -#define __TBB_WEAK_SYMBOLS_PRESENT ( !_WIN32 && !__APPLE__ && !__sun && (__TBB_GCC_VERSION >= 40000 || __INTEL_COMPILER ) ) -#endif - -/** __TBB_DYNAMIC_LOAD_ENABLED describes the system possibility to load shared libraries at run time **/ -#ifndef __TBB_DYNAMIC_LOAD_ENABLED - #define __TBB_DYNAMIC_LOAD_ENABLED 1 -#endif - -/** __TBB_SOURCE_DIRECTLY_INCLUDED is a mode used in whitebox testing when - it's necessary to test internal functions not exported from TBB DLLs -**/ -#if (_WIN32||_WIN64) && (__TBB_SOURCE_DIRECTLY_INCLUDED || TBB_USE_PREVIEW_BINARY) - #define __TBB_NO_IMPLICIT_LINKAGE 1 - #define __TBBMALLOC_NO_IMPLICIT_LINKAGE 1 -#endif - -#ifndef __TBB_COUNT_TASK_NODES - #define __TBB_COUNT_TASK_NODES TBB_USE_ASSERT -#endif - -#ifndef __TBB_TASK_GROUP_CONTEXT - #define __TBB_TASK_GROUP_CONTEXT 1 -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#ifndef __TBB_SCHEDULER_OBSERVER - #define __TBB_SCHEDULER_OBSERVER 1 -#endif /* __TBB_SCHEDULER_OBSERVER */ - -#ifndef __TBB_FP_CONTEXT - #define __TBB_FP_CONTEXT __TBB_TASK_GROUP_CONTEXT -#endif /* __TBB_FP_CONTEXT */ - -#if __TBB_FP_CONTEXT && !__TBB_TASK_GROUP_CONTEXT - #error __TBB_FP_CONTEXT requires __TBB_TASK_GROUP_CONTEXT to be enabled -#endif - -#ifndef __TBB_TASK_ARENA - #define __TBB_TASK_ARENA 1 -#endif /* __TBB_TASK_ARENA */ -#if __TBB_TASK_ARENA - #define __TBB_RECYCLE_TO_ENQUEUE __TBB_BUILD // keep non-official - #if !__TBB_SCHEDULER_OBSERVER - #error __TBB_TASK_ARENA requires __TBB_SCHEDULER_OBSERVER to be enabled - #endif -#endif /* __TBB_TASK_ARENA */ - -#ifndef __TBB_ARENA_OBSERVER - #define __TBB_ARENA_OBSERVER ((__TBB_BUILD||TBB_PREVIEW_LOCAL_OBSERVER)&& __TBB_SCHEDULER_OBSERVER) -#endif /* __TBB_ARENA_OBSERVER */ - -#ifndef __TBB_SLEEP_PERMISSION - #define __TBB_SLEEP_PERMISSION ((__TBB_CPF_BUILD||TBB_PREVIEW_LOCAL_OBSERVER)&& __TBB_SCHEDULER_OBSERVER) -#endif /* __TBB_SLEEP_PERMISSION */ - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE -#define __TBB_NO_IMPLICIT_LINKAGE 1 -#endif /* TBB_PREVIEW_FLOW_GRAPH_TRACE */ - -#ifndef __TBB_ITT_STRUCTURE_API -#define __TBB_ITT_STRUCTURE_API ( !__TBB_DEFINE_MIC && (__TBB_CPF_BUILD || TBB_PREVIEW_FLOW_GRAPH_TRACE) ) -#endif - -#if TBB_USE_EXCEPTIONS && !__TBB_TASK_GROUP_CONTEXT - #error TBB_USE_EXCEPTIONS requires __TBB_TASK_GROUP_CONTEXT to be enabled -#endif - -#ifndef __TBB_TASK_PRIORITY - #define __TBB_TASK_PRIORITY (!(__TBB_CPF_BUILD||TBB_USE_PREVIEW_BINARY)&&__TBB_TASK_GROUP_CONTEXT) // TODO: it will be enabled for CPF in the next versions -#endif /* __TBB_TASK_PRIORITY */ - -#if __TBB_TASK_PRIORITY && !__TBB_TASK_GROUP_CONTEXT - #error __TBB_TASK_PRIORITY requires __TBB_TASK_GROUP_CONTEXT to be enabled -#endif - -#if TBB_PREVIEW_WAITING_FOR_WORKERS || __TBB_BUILD - #define __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE 1 -#endif - -#if !defined(__TBB_SURVIVE_THREAD_SWITCH) && \ - (_WIN32 || _WIN64 || __APPLE__ || (__linux__ && !__ANDROID__)) - #define __TBB_SURVIVE_THREAD_SWITCH 1 -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ - -#ifndef __TBB_DEFAULT_PARTITIONER -#if TBB_DEPRECATED -/** Default partitioner for parallel loop templates in TBB 1.0-2.1 */ -#define __TBB_DEFAULT_PARTITIONER tbb::simple_partitioner -#else -/** Default partitioner for parallel loop templates since TBB 2.2 */ -#define __TBB_DEFAULT_PARTITIONER tbb::auto_partitioner -#endif /* TBB_DEPRECATED */ -#endif /* !defined(__TBB_DEFAULT_PARTITIONER */ - -#ifndef __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES -#define __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES 1 -#endif - -#ifdef _VARIADIC_MAX -#define __TBB_VARIADIC_MAX _VARIADIC_MAX -#else -#if _MSC_VER >= 1700 -#define __TBB_VARIADIC_MAX 5 /* current VS11 setting, may change. */ -#else -#define __TBB_VARIADIC_MAX 10 -#endif -#endif - -/** __TBB_WIN8UI_SUPPORT enables support of New Windows*8 Store Apps and limit a possibility to load - shared libraries at run time only from application container **/ -#if defined(WINAPI_FAMILY) && WINAPI_FAMILY == WINAPI_FAMILY_APP - #define __TBB_WIN8UI_SUPPORT 1 -#else - #define __TBB_WIN8UI_SUPPORT 0 -#endif - -/** Macros of the form __TBB_XXX_BROKEN denote known issues that are caused by - the bugs in compilers, standard or OS specific libraries. They should be - removed as soon as the corresponding bugs are fixed or the buggy OS/compiler - versions go out of the support list. -**/ - -#if __ANDROID__ && __TBB_GCC_VERSION <= 40403 && !__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 - /** Necessary because on Android 8-byte CAS and F&A are not available for some processor architectures, - but no mandatory warning message appears from GCC 4.4.3. Instead, only a linkage error occurs when - these atomic operations are used (such as in unit test test_atomic.exe). **/ - #define __TBB_GCC_64BIT_ATOMIC_BUILTINS_BROKEN 1 -#elif __TBB_x86_32 && __TBB_GCC_VERSION == 40102 && ! __GNUC_RH_RELEASE__ - /** GCC 4.1.2 erroneously emit call to external function for 64 bit sync_ intrinsics. - However these functions are not defined anywhere. It seems that this problem was fixed later on - and RHEL got an updated version of gcc 4.1.2. **/ - #define __TBB_GCC_64BIT_ATOMIC_BUILTINS_BROKEN 1 -#endif - -#if __GNUC__ && __TBB_x86_64 && __INTEL_COMPILER == 1200 - #define __TBB_ICC_12_0_INL_ASM_FSTCW_BROKEN 1 -#endif - -#if _MSC_VER && __INTEL_COMPILER && (__INTEL_COMPILER<1110 || __INTEL_COMPILER==1110 && __INTEL_COMPILER_BUILD_DATE < 20091012) - /** Necessary to avoid ICL error (or warning in non-strict mode): - "exception specification for implicitly declared virtual destructor is - incompatible with that of overridden one". **/ - #define __TBB_DEFAULT_DTOR_THROW_SPEC_BROKEN 1 -#endif - -#if defined(_MSC_VER) && _MSC_VER < 1500 && !defined(__INTEL_COMPILER) - /** VS2005 and earlier do not allow declaring template class as a friend - of classes defined in other namespaces. **/ - #define __TBB_TEMPLATE_FRIENDS_BROKEN 1 -#endif - -//TODO: recheck for different clang versions -#if __GLIBC__==2 && __GLIBC_MINOR__==3 || (__APPLE__ && ( __INTEL_COMPILER==1200 && !TBB_USE_DEBUG)) - /** Macro controlling EH usages in TBB tests. - Some older versions of glibc crash when exception handling happens concurrently. **/ - #define __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN 1 -#else - #define __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN 0 -#endif - -#if (_WIN32||_WIN64) && __INTEL_COMPILER == 1110 - /** That's a bug in Intel compiler 11.1.044/IA-32/Windows, that leads to a worker thread crash on the thread's startup. **/ - #define __TBB_ICL_11_1_CODE_GEN_BROKEN 1 -#endif - -#if __clang__ || (__GNUC__==3 && __GNUC_MINOR__==3 && !defined(__INTEL_COMPILER)) - /** Bugs with access to nested classes declared in protected area */ - #define __TBB_PROTECTED_NESTED_CLASS_BROKEN 1 -#endif - -#if __MINGW32__ && __TBB_GCC_VERSION < 40200 - /** MinGW has a bug with stack alignment for routines invoked from MS RTLs. - Since GCC 4.2, the bug can be worked around via a special attribute. **/ - #define __TBB_SSE_STACK_ALIGNMENT_BROKEN 1 -#else - #define __TBB_SSE_STACK_ALIGNMENT_BROKEN 0 -#endif - -#if __GNUC__==4 && __GNUC_MINOR__==3 && __GNUC_PATCHLEVEL__==0 - /* GCC of this version may rashly ignore control dependencies */ - #define __TBB_GCC_OPTIMIZER_ORDERING_BROKEN 1 -#endif - -#if __FreeBSD__ - /** A bug in FreeBSD 8.0 results in kernel panic when there is contention - on a mutex created with this attribute. **/ - #define __TBB_PRIO_INHERIT_BROKEN 1 - - /** A bug in FreeBSD 8.0 results in test hanging when an exception occurs - during (concurrent?) object construction by means of placement new operator. **/ - #define __TBB_PLACEMENT_NEW_EXCEPTION_SAFETY_BROKEN 1 -#endif /* __FreeBSD__ */ - -#if (__linux__ || __APPLE__) && __i386__ && defined(__INTEL_COMPILER) - /** The Intel compiler for IA-32 (Linux|OS X) crashes or generates - incorrect code when __asm__ arguments have a cast to volatile. **/ - #define __TBB_ICC_ASM_VOLATILE_BROKEN 1 -#endif - -#if !__INTEL_COMPILER && (_MSC_VER || __GNUC__==3 && __GNUC_MINOR__<=2) - /** Bug in GCC 3.2 and MSVC compilers that sometimes return 0 for __alignof(T) - when T has not yet been instantiated. **/ - #define __TBB_ALIGNOF_NOT_INSTANTIATED_TYPES_BROKEN 1 -#endif - -#if __TBB_DEFINE_MIC - /** Main thread and user's thread have different default thread affinity masks. **/ - #define __TBB_MAIN_THREAD_AFFINITY_BROKEN 1 -#endif - -#if __GXX_EXPERIMENTAL_CXX0X__ && !defined(__EXCEPTIONS) && \ - ((!__INTEL_COMPILER && !__clang__ && (__TBB_GCC_VERSION>=40400 && __TBB_GCC_VERSION<40600)) || \ - (__INTEL_COMPILER<=1400 && (__TBB_GCC_VERSION>=40400 && __TBB_GCC_VERSION<=40801))) -/* There is an issue for specific GCC toolchain when C++11 is enabled - and exceptions are disabled: - exceprion_ptr.h/nested_exception.h use throw unconditionally. - GCC can ignore 'throw' since 4.6; but with ICC the issue still exists. - */ - #define __TBB_LIBSTDCPP_EXCEPTION_HEADERS_BROKEN 1 -#else - #define __TBB_LIBSTDCPP_EXCEPTION_HEADERS_BROKEN 0 -#endif - -#if __INTEL_COMPILER==1300 && __TBB_GCC_VERSION>=40700 && defined(__GXX_EXPERIMENTAL_CXX0X__) -/* Some C++11 features used inside libstdc++ are not supported by Intel compiler. - * Checking version of gcc instead of libstdc++ because - * - they are directly connected, - * - for now it is not possible to check version of any standard library in this file - */ - #define __TBB_ICC_13_0_CPP11_STDLIB_SUPPORT_BROKEN 1 -#else - #define __TBB_ICC_13_0_CPP11_STDLIB_SUPPORT_BROKEN 0 -#endif - -#if (__GNUC__==4 && __GNUC_MINOR__==4 ) && !defined(__INTEL_COMPILER) && !defined(__clang__) - /** excessive warnings related to strict aliasing rules in GCC 4.4 **/ - #define __TBB_GCC_STRICT_ALIASING_BROKEN 1 - /* topical remedy: #pragma GCC diagnostic ignored "-Wstrict-aliasing" */ - #if !__TBB_GCC_WARNING_SUPPRESSION_PRESENT - #error Warning suppression is not supported, while should. - #endif -#endif - -/*In a PIC mode some versions of GCC 4.1.2 generate incorrect inlined code for 8 byte __sync_val_compare_and_swap intrinsic */ -#if __TBB_GCC_VERSION == 40102 && __PIC__ && !defined(__INTEL_COMPILER) && !defined(__clang__) - #define __TBB_GCC_CAS8_BUILTIN_INLINING_BROKEN 1 -#endif - -#if __TBB_x86_32 && (__linux__ || __APPLE__ || _WIN32 || __sun || __ANDROID__) && (__INTEL_COMPILER || (__GNUC__==3 && __GNUC_MINOR__==3 ) || __SUNPRO_CC) - // Some compilers for IA-32 fail to provide 8-byte alignment of objects on the stack, - // even if the object specifies 8-byte alignment. On such platforms, the IA-32 implementation - // of 64 bit atomics (e.g. atomic) use different tactics depending upon - // whether the object is properly aligned or not. - #define __TBB_FORCE_64BIT_ALIGNMENT_BROKEN 1 -#else - #define __TBB_FORCE_64BIT_ALIGNMENT_BROKEN 0 -#endif - -#if __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT && __TBB_GCC_VERSION < 40700 && !defined(__INTEL_COMPILER) && !defined (__clang__) - #define __TBB_ZERO_INIT_WITH_DEFAULTED_CTOR_BROKEN 1 -#endif - -#if _MSC_VER && _MSC_VER <= 1800 && !__INTEL_COMPILER - // With MSVC, when an array is passed by const reference to a template function, - // constness from the function parameter may get propagated to the template parameter. - #define __TBB_CONST_REF_TO_ARRAY_TEMPLATE_PARAM_BROKEN 1 -#endif - -// A compiler bug: a disabled copy constructor prevents use of the moving constructor -#define __TBB_IF_NO_COPY_CTOR_MOVE_SEMANTICS_BROKEN (_MSC_VER && (__INTEL_COMPILER >= 1300 && __INTEL_COMPILER <= 1310) && !__INTEL_CXX11_MODE__) - -// MSVC 2013 and ICC 15 seems do not generate implicit move constructor for empty derived class while should -#define __TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN (__TBB_CPP11_RVALUE_REF_PRESENT && \ - ( !__INTEL_COMPILER && _MSC_VER && _MSC_VER <=1800 || __INTEL_COMPILER && __INTEL_COMPILER <= 1500 )) - -/** End of __TBB_XXX_BROKEN macro section **/ - -#if defined(_MSC_VER) && _MSC_VER>=1500 && !defined(__INTEL_COMPILER) - // A macro to suppress erroneous or benign "unreachable code" MSVC warning (4702) - #define __TBB_MSVC_UNREACHABLE_CODE_IGNORED 1 -#endif - -#define __TBB_ATOMIC_CTORS (__TBB_CONSTEXPR_PRESENT && __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT && (!__TBB_ZERO_INIT_WITH_DEFAULTED_CTOR_BROKEN)) - -#define __TBB_ALLOCATOR_CONSTRUCT_VARIADIC (__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT) - -#define __TBB_VARIADIC_PARALLEL_INVOKE (TBB_PREVIEW_VARIADIC_PARALLEL_INVOKE && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT) -#endif /* __TBB_tbb_config_H */ diff --git a/src/tbb/include/tbb/tbb_exception.h b/src/tbb/include/tbb/tbb_exception.h deleted file mode 100644 index cfef55ef3..000000000 --- a/src/tbb/include/tbb/tbb_exception.h +++ /dev/null @@ -1,379 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_exception_H -#define __TBB_exception_H - -#include "tbb_stddef.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include -#include //required for bad_alloc definition, operators new -#include // required to construct std exception classes - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -namespace tbb { - -//! Exception for concurrent containers -class bad_last_alloc : public std::bad_alloc { -public: - /*override*/ const char* what() const throw(); -#if __TBB_DEFAULT_DTOR_THROW_SPEC_BROKEN - /*override*/ ~bad_last_alloc() throw() {} -#endif -}; - -//! Exception for PPL locks -class improper_lock : public std::exception { -public: - /*override*/ const char* what() const throw(); -}; - -//! Exception for user-initiated abort -class user_abort : public std::exception { -public: - /*override*/ const char* what() const throw(); -}; - -//! Exception for missing wait on structured_task_group -class missing_wait : public std::exception { -public: - /*override*/ const char* what() const throw(); -}; - -//! Exception for repeated scheduling of the same task_handle -class invalid_multiple_scheduling : public std::exception { -public: - /*override*/ const char* what() const throw(); -}; - -namespace internal { -//! Obsolete -void __TBB_EXPORTED_FUNC throw_bad_last_alloc_exception_v4(); - -enum exception_id { - eid_bad_alloc = 1, - eid_bad_last_alloc, - eid_nonpositive_step, - eid_out_of_range, - eid_segment_range_error, - eid_index_range_error, - eid_missing_wait, - eid_invalid_multiple_scheduling, - eid_improper_lock, - eid_possible_deadlock, - eid_operation_not_permitted, - eid_condvar_wait_failed, - eid_invalid_load_factor, - eid_reserved, // free slot for backward compatibility, can be reused. - eid_invalid_swap, - eid_reservation_length_error, - eid_invalid_key, - eid_user_abort, - eid_reserved1, -#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE - // This id is used only inside library and only for support of CPF functionality. - // So, if we drop the functionality, eid_reserved1 can be safely renamed and reused. - eid_blocking_sch_init = eid_reserved1, -#endif - eid_bad_tagged_msg_cast, - //! The last enumerator tracks the number of defined IDs. It must remain the last one. - /** When adding new IDs, place them immediately _before_ this comment (that is - _after_ all the existing IDs. NEVER insert new IDs between the existing ones. **/ - eid_max -}; - -//! Gathers all throw operators in one place. -/** Its purpose is to minimize code bloat that can be caused by throw operators - scattered in multiple places, especially in templates. **/ -void __TBB_EXPORTED_FUNC throw_exception_v4 ( exception_id ); - -//! Versionless convenience wrapper for throw_exception_v4() -inline void throw_exception ( exception_id eid ) { throw_exception_v4(eid); } - -} // namespace internal -} // namespace tbb - -#if __TBB_TASK_GROUP_CONTEXT -#include "tbb_allocator.h" -#include //for typeid - -namespace tbb { - -//! Interface to be implemented by all exceptions TBB recognizes and propagates across the threads. -/** If an unhandled exception of the type derived from tbb::tbb_exception is intercepted - by the TBB scheduler in one of the worker threads, it is delivered to and re-thrown in - the root thread. The root thread is the thread that has started the outermost algorithm - or root task sharing the same task_group_context with the guilty algorithm/task (the one - that threw the exception first). - - Note: when documentation mentions workers with respect to exception handling, - masters are implied as well, because they are completely equivalent in this context. - Consequently a root thread can be master or worker thread. - - NOTE: In case of nested algorithms or complex task hierarchies when the nested - levels share (explicitly or by means of implicit inheritance) the task group - context of the outermost level, the exception may be (re-)thrown multiple times - (ultimately - in each worker on each nesting level) before reaching the root - thread at the outermost level. IMPORTANT: if you intercept an exception derived - from this class on a nested level, you must re-throw it in the catch block by means - of the "throw;" operator. - - TBB provides two implementations of this interface: tbb::captured_exception and - template class tbb::movable_exception. See their declarations for more info. **/ -class tbb_exception : public std::exception -{ - /** No operator new is provided because the TBB usage model assumes dynamic - creation of the TBB exception objects only by means of applying move() - operation on an exception thrown out of TBB scheduler. **/ - void* operator new ( size_t ); - -public: -#if __clang__ - // At -O3 or even -O2 optimization level, Clang may fully throw away an empty destructor - // of tbb_exception from destructors of derived classes. As a result, it does not create - // vtable for tbb_exception, which is a required part of TBB binary interface. - // Making the destructor non-empty (with just a semicolon) prevents that optimization. - ~tbb_exception() throw() { /* keep the semicolon! */ ; } -#endif - - //! Creates and returns pointer to the deep copy of this exception object. - /** Move semantics is allowed. **/ - virtual tbb_exception* move () throw() = 0; - - //! Destroys objects created by the move() method. - /** Frees memory and calls destructor for this exception object. - Can and must be used only on objects created by the move method. **/ - virtual void destroy () throw() = 0; - - //! Throws this exception object. - /** Make sure that if you have several levels of derivation from this interface - you implement or override this method on the most derived level. The implementation - is as simple as "throw *this;". Failure to do this will result in exception - of a base class type being thrown. **/ - virtual void throw_self () = 0; - - //! Returns RTTI name of the originally intercepted exception - virtual const char* name() const throw() = 0; - - //! Returns the result of originally intercepted exception's what() method. - virtual const char* what() const throw() = 0; - - /** Operator delete is provided only to allow using existing smart pointers - with TBB exception objects obtained as the result of applying move() - operation on an exception thrown out of TBB scheduler. - - When overriding method move() make sure to override operator delete as well - if memory is allocated not by TBB's scalable allocator. **/ - void operator delete ( void* p ) { - internal::deallocate_via_handler_v3(p); - } -}; - -//! This class is used by TBB to propagate information about unhandled exceptions into the root thread. -/** Exception of this type is thrown by TBB in the root thread (thread that started a parallel - algorithm ) if an unhandled exception was intercepted during the algorithm execution in one - of the workers. - \sa tbb::tbb_exception **/ -class captured_exception : public tbb_exception -{ -public: - captured_exception ( const captured_exception& src ) - : tbb_exception(src), my_dynamic(false) - { - set(src.my_exception_name, src.my_exception_info); - } - - captured_exception ( const char* name_, const char* info ) - : my_dynamic(false) - { - set(name_, info); - } - - __TBB_EXPORTED_METHOD ~captured_exception () throw(); - - captured_exception& operator= ( const captured_exception& src ) { - if ( this != &src ) { - clear(); - set(src.my_exception_name, src.my_exception_info); - } - return *this; - } - - /*override*/ - captured_exception* __TBB_EXPORTED_METHOD move () throw(); - - /*override*/ - void __TBB_EXPORTED_METHOD destroy () throw(); - - /*override*/ - void throw_self () { __TBB_THROW(*this); } - - /*override*/ - const char* __TBB_EXPORTED_METHOD name() const throw(); - - /*override*/ - const char* __TBB_EXPORTED_METHOD what() const throw(); - - void __TBB_EXPORTED_METHOD set ( const char* name, const char* info ) throw(); - void __TBB_EXPORTED_METHOD clear () throw(); - -private: - //! Used only by method clone(). - captured_exception() {} - - //! Functionally equivalent to {captured_exception e(name,info); return e.clone();} - static captured_exception* allocate ( const char* name, const char* info ); - - bool my_dynamic; - const char* my_exception_name; - const char* my_exception_info; -}; - -//! Template that can be used to implement exception that transfers arbitrary ExceptionData to the root thread -/** Code using TBB can instantiate this template with an arbitrary ExceptionData type - and throw this exception object. Such exceptions are intercepted by the TBB scheduler - and delivered to the root thread (). - \sa tbb::tbb_exception **/ -template -class movable_exception : public tbb_exception -{ - typedef movable_exception self_type; - -public: - movable_exception ( const ExceptionData& data_ ) - : my_exception_data(data_) - , my_dynamic(false) - , my_exception_name( -#if TBB_USE_EXCEPTIONS - typeid(self_type).name() -#else /* !TBB_USE_EXCEPTIONS */ - "movable_exception" -#endif /* !TBB_USE_EXCEPTIONS */ - ) - {} - - movable_exception ( const movable_exception& src ) throw () - : tbb_exception(src) - , my_exception_data(src.my_exception_data) - , my_dynamic(false) - , my_exception_name(src.my_exception_name) - {} - - ~movable_exception () throw() {} - - const movable_exception& operator= ( const movable_exception& src ) { - if ( this != &src ) { - my_exception_data = src.my_exception_data; - my_exception_name = src.my_exception_name; - } - return *this; - } - - ExceptionData& data () throw() { return my_exception_data; } - - const ExceptionData& data () const throw() { return my_exception_data; } - - /*override*/ const char* name () const throw() { return my_exception_name; } - - /*override*/ const char* what () const throw() { return "tbb::movable_exception"; } - - /*override*/ - movable_exception* move () throw() { - void* e = internal::allocate_via_handler_v3(sizeof(movable_exception)); - if ( e ) { - ::new (e) movable_exception(*this); - ((movable_exception*)e)->my_dynamic = true; - } - return (movable_exception*)e; - } - /*override*/ - void destroy () throw() { - __TBB_ASSERT ( my_dynamic, "Method destroy can be called only on dynamically allocated movable_exceptions" ); - if ( my_dynamic ) { - this->~movable_exception(); - internal::deallocate_via_handler_v3(this); - } - } - /*override*/ - void throw_self () { __TBB_THROW( *this ); } - -protected: - //! User data - ExceptionData my_exception_data; - -private: - //! Flag specifying whether this object has been dynamically allocated (by the move method) - bool my_dynamic; - - //! RTTI name of this class - /** We rely on the fact that RTTI names are static string constants. **/ - const char* my_exception_name; -}; - -#if !TBB_USE_CAPTURED_EXCEPTION -namespace internal { - -//! Exception container that preserves the exact copy of the original exception -/** This class can be used only when the appropriate runtime support (mandated - by C++0x) is present **/ -class tbb_exception_ptr { - std::exception_ptr my_ptr; - -public: - static tbb_exception_ptr* allocate (); - static tbb_exception_ptr* allocate ( const tbb_exception& tag ); - //! This overload uses move semantics (i.e. it empties src) - static tbb_exception_ptr* allocate ( captured_exception& src ); - - //! Destroys this objects - /** Note that objects of this type can be created only by the allocate() method. **/ - void destroy () throw(); - - //! Throws the contained exception . - void throw_self () { std::rethrow_exception(my_ptr); } - -private: - tbb_exception_ptr ( const std::exception_ptr& src ) : my_ptr(src) {} - tbb_exception_ptr ( const captured_exception& src ) : - #if __TBB_MAKE_EXCEPTION_PTR_PRESENT - my_ptr(std::make_exception_ptr(src)) // the final function name in C++11 - #else - my_ptr(std::copy_exception(src)) // early C++0x drafts name - #endif - {} -}; // class tbb::internal::tbb_exception_ptr - -} // namespace internal -#endif /* !TBB_USE_CAPTURED_EXCEPTION */ - -} // namespace tbb - -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#endif /* __TBB_exception_H */ diff --git a/src/tbb/include/tbb/tbb_machine.h b/src/tbb/include/tbb/tbb_machine.h deleted file mode 100644 index 479806529..000000000 --- a/src/tbb/include/tbb/tbb_machine.h +++ /dev/null @@ -1,967 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#define __TBB_machine_H - -/** This header provides basic platform abstraction layer by hooking up appropriate - architecture/OS/compiler specific headers from the /include/tbb/machine directory. - If a plug-in header does not implement all the required APIs, it must specify - the missing ones by setting one or more of the following macros: - - __TBB_USE_GENERIC_PART_WORD_CAS - __TBB_USE_GENERIC_PART_WORD_FETCH_ADD - __TBB_USE_GENERIC_PART_WORD_FETCH_STORE - __TBB_USE_GENERIC_FETCH_ADD - __TBB_USE_GENERIC_FETCH_STORE - __TBB_USE_GENERIC_DWORD_FETCH_ADD - __TBB_USE_GENERIC_DWORD_FETCH_STORE - __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE - __TBB_USE_GENERIC_FULL_FENCED_LOAD_STORE - __TBB_USE_GENERIC_RELAXED_LOAD_STORE - __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE - - In this case tbb_machine.h will add missing functionality based on a minimal set - of APIs that are required to be implemented by all plug-n headers as described - further. - Note that these generic implementations may be sub-optimal for a particular - architecture, and thus should be relied upon only after careful evaluation - or as the last resort. - - Additionally __TBB_64BIT_ATOMICS can be set to 0 on a 32-bit architecture to - indicate that the port is not going to support double word atomics. It may also - be set to 1 explicitly, though normally this is not necessary as tbb_machine.h - will set it automatically. - - __TBB_ENDIANNESS macro can be defined by the implementation as well. - It is used only if __TBB_USE_GENERIC_PART_WORD_CAS is set (or for testing), - and must specify the layout of aligned 16-bit and 32-bit data anywhere within a process - (while the details of unaligned 16-bit or 32-bit data or of 64-bit data are irrelevant). - The layout must be the same at all relevant memory locations within the current process; - in case of page-specific endianness, one endianness must be kept "out of sight". - Possible settings, reflecting hardware and possibly O.S. convention, are: - - __TBB_ENDIAN_BIG for big-endian data, - - __TBB_ENDIAN_LITTLE for little-endian data, - - __TBB_ENDIAN_DETECT for run-time detection iff exactly one of the above, - - __TBB_ENDIAN_UNSUPPORTED to prevent undefined behavior if none of the above. - - Prerequisites for each architecture port - ---------------------------------------- - The following functions and macros have no generic implementation. Therefore they must be - implemented in each machine architecture specific header either as a conventional - function or as a functional macro. - - __TBB_WORDSIZE - This is the size of machine word in bytes, i.e. for 32 bit systems it - should be defined to 4. - - __TBB_Yield() - Signals OS that the current thread is willing to relinquish the remainder - of its time quantum. - - __TBB_full_memory_fence() - Must prevent all memory operations from being reordered across it (both - by hardware and compiler). All such fences must be totally ordered (or - sequentially consistent). - - __TBB_machine_cmpswp4( volatile void *ptr, int32_t value, int32_t comparand ) - Must be provided if __TBB_USE_FENCED_ATOMICS is not set. - - __TBB_machine_cmpswp8( volatile void *ptr, int32_t value, int64_t comparand ) - Must be provided for 64-bit architectures if __TBB_USE_FENCED_ATOMICS is not set, - and for 32-bit architectures if __TBB_64BIT_ATOMICS is set - - __TBB_machine_(...), where - = {cmpswp, fetchadd, fetchstore} - = {1, 2, 4, 8} - = {full_fence, acquire, release, relaxed} - Must be provided if __TBB_USE_FENCED_ATOMICS is set. - - __TBB_control_consistency_helper() - Bridges the memory-semantics gap between architectures providing only - implicit C++0x "consume" semantics (like Power Architecture) and those - also implicitly obeying control dependencies (like IA-64 architecture). - It must be used only in conditional code where the condition is itself - data-dependent, and will then make subsequent code behave as if the - original data dependency were acquired. - It needs only a compiler fence where implied by the architecture - either specifically (like IA-64 architecture) or because generally stronger - "acquire" semantics are enforced (like x86). - It is always valid, though potentially suboptimal, to replace - control with acquire on the load and then remove the helper. - - __TBB_acquire_consistency_helper(), __TBB_release_consistency_helper() - Must be provided if __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE is set. - Enforce acquire and release semantics in generic implementations of fenced - store and load operations. Depending on the particular architecture/compiler - combination they may be a hardware fence, a compiler fence, both or nothing. - **/ - -#include "tbb_stddef.h" - -namespace tbb { -namespace internal { //< @cond INTERNAL - -//////////////////////////////////////////////////////////////////////////////// -// Overridable helpers declarations -// -// A machine/*.h file may choose to define these templates, otherwise it must -// request default implementation by setting appropriate __TBB_USE_GENERIC_XXX macro(s). -// -template -struct machine_load_store; - -template -struct machine_load_store_relaxed; - -template -struct machine_load_store_seq_cst; -// -// End of overridable helpers declarations -//////////////////////////////////////////////////////////////////////////////// - -template struct atomic_selector; - -template<> struct atomic_selector<1> { - typedef int8_t word; - inline static word fetch_store ( volatile void* location, word value ); -}; - -template<> struct atomic_selector<2> { - typedef int16_t word; - inline static word fetch_store ( volatile void* location, word value ); -}; - -template<> struct atomic_selector<4> { -#if _MSC_VER && !_WIN64 - // Work-around that avoids spurious /Wp64 warnings - typedef intptr_t word; -#else - typedef int32_t word; -#endif - inline static word fetch_store ( volatile void* location, word value ); -}; - -template<> struct atomic_selector<8> { - typedef int64_t word; - inline static word fetch_store ( volatile void* location, word value ); -}; - -}} //< namespaces internal @endcond, tbb - -#define __TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(M) \ - inline void __TBB_machine_generic_store8##M(volatile void *ptr, int64_t value) { \ - for(;;) { \ - int64_t result = *(volatile int64_t *)ptr; \ - if( __TBB_machine_cmpswp8##M(ptr,value,result)==result ) break; \ - } \ - } \ - -#define __TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(M) \ - inline int64_t __TBB_machine_generic_load8##M(const volatile void *ptr) { \ - /* Comparand and new value may be anything, they only must be equal, and */ \ - /* the value should have a low probability to be actually found in 'location'.*/ \ - const int64_t anyvalue = 2305843009213693951LL; \ - return __TBB_machine_cmpswp8##M(const_cast(ptr),anyvalue,anyvalue); \ - } \ - -// The set of allowed values for __TBB_ENDIANNESS (see above for details) -#define __TBB_ENDIAN_UNSUPPORTED -1 -#define __TBB_ENDIAN_LITTLE 0 -#define __TBB_ENDIAN_BIG 1 -#define __TBB_ENDIAN_DETECT 2 - -#if _WIN32||_WIN64 - -#ifdef _MANAGED -#pragma managed(push, off) -#endif - - #if __MINGW64__ || __MINGW32__ - extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void ); - #define __TBB_Yield() SwitchToThread() - #if (TBB_USE_GCC_BUILTINS && __TBB_GCC_BUILTIN_ATOMICS_PRESENT) - #include "machine/gcc_generic.h" - #elif __MINGW64__ - #include "machine/linux_intel64.h" - #elif __MINGW32__ - #include "machine/linux_ia32.h" - #endif - #elif (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT) - #include "machine/icc_generic.h" - #elif defined(_M_IX86) && !defined(__TBB_WIN32_USE_CL_BUILTINS) - #include "machine/windows_ia32.h" - #elif defined(_M_X64) - #include "machine/windows_intel64.h" - #elif defined(_XBOX) - #include "machine/xbox360_ppc.h" - #elif defined(_M_ARM) || defined(__TBB_WIN32_USE_CL_BUILTINS) - #include "machine/msvc_armv7.h" - #endif - -#ifdef _MANAGED -#pragma managed(pop) -#endif - -#elif __TBB_DEFINE_MIC - - #include "machine/mic_common.h" - #if (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT) - #include "machine/icc_generic.h" - #else - #include "machine/linux_intel64.h" - #endif - -#elif __linux__ || __FreeBSD__ || __NetBSD__ - - #if (TBB_USE_GCC_BUILTINS && __TBB_GCC_BUILTIN_ATOMICS_PRESENT) - #include "machine/gcc_generic.h" - #elif (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT) - #include "machine/icc_generic.h" - #elif __i386__ - #include "machine/linux_ia32.h" - #elif __x86_64__ - #include "machine/linux_intel64.h" - #elif __ia64__ - #include "machine/linux_ia64.h" - #elif __powerpc__ - #include "machine/mac_ppc.h" - #elif __arm__ - #include "machine/gcc_armv7.h" - #elif __TBB_GCC_BUILTIN_ATOMICS_PRESENT - #include "machine/gcc_generic.h" - #endif - #include "machine/linux_common.h" - -#elif __APPLE__ - //TODO: TBB_USE_GCC_BUILTINS is not used for Mac, Sun, Aix - #if (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT) - #include "machine/icc_generic.h" - #elif __i386__ - #include "machine/linux_ia32.h" - #elif __x86_64__ - #include "machine/linux_intel64.h" - #elif __POWERPC__ - #include "machine/mac_ppc.h" - #endif - #include "machine/macos_common.h" - -#elif _AIX - - #include "machine/ibm_aix51.h" - -#elif __sun || __SUNPRO_CC - - #define __asm__ asm - #define __volatile__ volatile - - #if __i386 || __i386__ - #include "machine/linux_ia32.h" - #elif __x86_64__ - #include "machine/linux_intel64.h" - #elif __sparc - #include "machine/sunos_sparc.h" - #endif - #include - - #define __TBB_Yield() sched_yield() - -#endif /* OS selection */ - -#ifndef __TBB_64BIT_ATOMICS - #define __TBB_64BIT_ATOMICS 1 -#endif - -//TODO: replace usage of these functions with usage of tbb::atomic, and then remove them -//TODO: map functions with W suffix to use cast to tbb::atomic and according op, i.e. as_atomic().op() -// Special atomic functions -#if __TBB_USE_FENCED_ATOMICS - #define __TBB_machine_cmpswp1 __TBB_machine_cmpswp1full_fence - #define __TBB_machine_cmpswp2 __TBB_machine_cmpswp2full_fence - #define __TBB_machine_cmpswp4 __TBB_machine_cmpswp4full_fence - #define __TBB_machine_cmpswp8 __TBB_machine_cmpswp8full_fence - - #if __TBB_WORDSIZE==8 - #define __TBB_machine_fetchadd8 __TBB_machine_fetchadd8full_fence - #define __TBB_machine_fetchstore8 __TBB_machine_fetchstore8full_fence - #define __TBB_FetchAndAddWrelease(P,V) __TBB_machine_fetchadd8release(P,V) - #define __TBB_FetchAndIncrementWacquire(P) __TBB_machine_fetchadd8acquire(P,1) - #define __TBB_FetchAndDecrementWrelease(P) __TBB_machine_fetchadd8release(P,(-1)) - #else - #define __TBB_machine_fetchadd4 __TBB_machine_fetchadd4full_fence - #define __TBB_machine_fetchstore4 __TBB_machine_fetchstore4full_fence - #define __TBB_FetchAndAddWrelease(P,V) __TBB_machine_fetchadd4release(P,V) - #define __TBB_FetchAndIncrementWacquire(P) __TBB_machine_fetchadd4acquire(P,1) - #define __TBB_FetchAndDecrementWrelease(P) __TBB_machine_fetchadd4release(P,(-1)) - #endif /* __TBB_WORDSIZE==4 */ -#else /* !__TBB_USE_FENCED_ATOMICS */ - #define __TBB_FetchAndAddWrelease(P,V) __TBB_FetchAndAddW(P,V) - #define __TBB_FetchAndIncrementWacquire(P) __TBB_FetchAndAddW(P,1) - #define __TBB_FetchAndDecrementWrelease(P) __TBB_FetchAndAddW(P,(-1)) -#endif /* !__TBB_USE_FENCED_ATOMICS */ - -#if __TBB_WORDSIZE==4 - #define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp4(P,V,C) - #define __TBB_FetchAndAddW(P,V) __TBB_machine_fetchadd4(P,V) - #define __TBB_FetchAndStoreW(P,V) __TBB_machine_fetchstore4(P,V) -#elif __TBB_WORDSIZE==8 - #if __TBB_USE_GENERIC_DWORD_LOAD_STORE || __TBB_USE_GENERIC_DWORD_FETCH_ADD || __TBB_USE_GENERIC_DWORD_FETCH_STORE - #error These macros should only be used on 32-bit platforms. - #endif - - #define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp8(P,V,C) - #define __TBB_FetchAndAddW(P,V) __TBB_machine_fetchadd8(P,V) - #define __TBB_FetchAndStoreW(P,V) __TBB_machine_fetchstore8(P,V) -#else /* __TBB_WORDSIZE != 8 */ - #error Unsupported machine word size. -#endif /* __TBB_WORDSIZE */ - -#ifndef __TBB_Pause - inline void __TBB_Pause(int32_t) { - __TBB_Yield(); - } -#endif - -namespace tbb { - -//! Sequentially consistent full memory fence. -inline void atomic_fence () { __TBB_full_memory_fence(); } - -namespace internal { //< @cond INTERNAL - -//! Class that implements exponential backoff. -/** See implementation of spin_wait_while_eq for an example. */ -class atomic_backoff : no_copy { - //! Time delay, in units of "pause" instructions. - /** Should be equal to approximately the number of "pause" instructions - that take the same time as an context switch. */ - static const int32_t LOOPS_BEFORE_YIELD = 16; - int32_t count; -public: - // In many cases, an object of this type is initialized eagerly on hot path, - // as in for(atomic_backoff b; ; b.pause()) { /*loop body*/ } - // For this reason, the construction cost must be very small! - atomic_backoff() : count(1) {} - // This constructor pauses immediately; do not use on hot paths! - atomic_backoff( bool ) : count(1) { pause(); } - - //! Pause for a while. - void pause() { - if( count<=LOOPS_BEFORE_YIELD ) { - __TBB_Pause(count); - // Pause twice as long the next time. - count*=2; - } else { - // Pause is so long that we might as well yield CPU to scheduler. - __TBB_Yield(); - } - } - - // pause for a few times and then return false immediately. - bool bounded_pause() { - if( count<=LOOPS_BEFORE_YIELD ) { - __TBB_Pause(count); - // Pause twice as long the next time. - count*=2; - return true; - } else { - return false; - } - } - - void reset() { - count = 1; - } -}; - -//! Spin WHILE the value of the variable is equal to a given value -/** T and U should be comparable types. */ -template -void spin_wait_while_eq( const volatile T& location, U value ) { - atomic_backoff backoff; - while( location==value ) backoff.pause(); -} - -//! Spin UNTIL the value of the variable is equal to a given value -/** T and U should be comparable types. */ -template -void spin_wait_until_eq( const volatile T& location, const U value ) { - atomic_backoff backoff; - while( location!=value ) backoff.pause(); -} - -template -void spin_wait_while(predicate_type condition){ - atomic_backoff backoff; - while( condition() ) backoff.pause(); -} - -//////////////////////////////////////////////////////////////////////////////// -// Generic compare-and-swap applied to only a part of a machine word. -// -#ifndef __TBB_ENDIANNESS -#define __TBB_ENDIANNESS __TBB_ENDIAN_DETECT -#endif - -#if __TBB_USE_GENERIC_PART_WORD_CAS && __TBB_ENDIANNESS==__TBB_ENDIAN_UNSUPPORTED -#error Generic implementation of part-word CAS may not be used with __TBB_ENDIAN_UNSUPPORTED -#endif - -#if __TBB_ENDIANNESS!=__TBB_ENDIAN_UNSUPPORTED -// -// This function is the only use of __TBB_ENDIANNESS. -// The following restrictions/limitations apply for this operation: -// - T must be an integer type of at most 4 bytes for the casts and calculations to work -// - T must also be less than 4 bytes to avoid compiler warnings when computing mask -// (and for the operation to be useful at all, so no workaround is applied) -// - the architecture must consistently use either little-endian or big-endian (same for all locations) -// -// TODO: static_assert for the type requirements stated above -template -inline T __TBB_MaskedCompareAndSwap (volatile T * const ptr, const T value, const T comparand ) { - struct endianness{ static bool is_big_endian(){ - #if __TBB_ENDIANNESS==__TBB_ENDIAN_DETECT - const uint32_t probe = 0x03020100; - return (((const char*)(&probe))[0]==0x03); - #elif __TBB_ENDIANNESS==__TBB_ENDIAN_BIG || __TBB_ENDIANNESS==__TBB_ENDIAN_LITTLE - return __TBB_ENDIANNESS==__TBB_ENDIAN_BIG; - #else - #error Unexpected value of __TBB_ENDIANNESS - #endif - }}; - - const uint32_t byte_offset = (uint32_t) ((uintptr_t)ptr & 0x3); - volatile uint32_t * const aligned_ptr = (uint32_t*)((uintptr_t)ptr - byte_offset ); - - // location of T within uint32_t for a C++ shift operation - const uint32_t bits_to_shift = 8*(endianness::is_big_endian() ? (4 - sizeof(T) - (byte_offset)) : byte_offset); - const uint32_t mask = (((uint32_t)1<<(sizeof(T)*8)) - 1 )<> bits_to_shift); - } - else continue; // CAS failed but the bits of interest were not changed - } -} -#endif // __TBB_ENDIANNESS!=__TBB_ENDIAN_UNSUPPORTED -//////////////////////////////////////////////////////////////////////////////// - -template -inline T __TBB_CompareAndSwapGeneric (volatile void *ptr, T value, T comparand ); - -template<> -inline int8_t __TBB_CompareAndSwapGeneric <1,int8_t> (volatile void *ptr, int8_t value, int8_t comparand ) { -#if __TBB_USE_GENERIC_PART_WORD_CAS - return __TBB_MaskedCompareAndSwap((volatile int8_t *)ptr,value,comparand); -#else - return __TBB_machine_cmpswp1(ptr,value,comparand); -#endif -} - -template<> -inline int16_t __TBB_CompareAndSwapGeneric <2,int16_t> (volatile void *ptr, int16_t value, int16_t comparand ) { -#if __TBB_USE_GENERIC_PART_WORD_CAS - return __TBB_MaskedCompareAndSwap((volatile int16_t *)ptr,value,comparand); -#else - return __TBB_machine_cmpswp2(ptr,value,comparand); -#endif -} - -template<> -inline int32_t __TBB_CompareAndSwapGeneric <4,int32_t> (volatile void *ptr, int32_t value, int32_t comparand ) { - // Cast shuts up /Wp64 warning - return (int32_t)__TBB_machine_cmpswp4(ptr,value,comparand); -} - -#if __TBB_64BIT_ATOMICS -template<> -inline int64_t __TBB_CompareAndSwapGeneric <8,int64_t> (volatile void *ptr, int64_t value, int64_t comparand ) { - return __TBB_machine_cmpswp8(ptr,value,comparand); -} -#endif - -template -inline T __TBB_FetchAndAddGeneric (volatile void *ptr, T addend) { - T result; - for( atomic_backoff b;;b.pause() ) { - result = *reinterpret_cast(ptr); - // __TBB_CompareAndSwapGeneric presumed to have full fence. - if( __TBB_CompareAndSwapGeneric ( ptr, result+addend, result )==result ) - break; - } - return result; -} - -template -inline T __TBB_FetchAndStoreGeneric (volatile void *ptr, T value) { - T result; - for( atomic_backoff b;;b.pause() ) { - result = *reinterpret_cast(ptr); - // __TBB_CompareAndSwapGeneric presumed to have full fence. - if( __TBB_CompareAndSwapGeneric ( ptr, value, result )==result ) - break; - } - return result; -} - -#if __TBB_USE_GENERIC_PART_WORD_CAS -#define __TBB_machine_cmpswp1 tbb::internal::__TBB_CompareAndSwapGeneric<1,int8_t> -#define __TBB_machine_cmpswp2 tbb::internal::__TBB_CompareAndSwapGeneric<2,int16_t> -#endif - -#if __TBB_USE_GENERIC_FETCH_ADD || __TBB_USE_GENERIC_PART_WORD_FETCH_ADD -#define __TBB_machine_fetchadd1 tbb::internal::__TBB_FetchAndAddGeneric<1,int8_t> -#define __TBB_machine_fetchadd2 tbb::internal::__TBB_FetchAndAddGeneric<2,int16_t> -#endif - -#if __TBB_USE_GENERIC_FETCH_ADD -#define __TBB_machine_fetchadd4 tbb::internal::__TBB_FetchAndAddGeneric<4,int32_t> -#endif - -#if __TBB_USE_GENERIC_FETCH_ADD || __TBB_USE_GENERIC_DWORD_FETCH_ADD -#define __TBB_machine_fetchadd8 tbb::internal::__TBB_FetchAndAddGeneric<8,int64_t> -#endif - -#if __TBB_USE_GENERIC_FETCH_STORE || __TBB_USE_GENERIC_PART_WORD_FETCH_STORE -#define __TBB_machine_fetchstore1 tbb::internal::__TBB_FetchAndStoreGeneric<1,int8_t> -#define __TBB_machine_fetchstore2 tbb::internal::__TBB_FetchAndStoreGeneric<2,int16_t> -#endif - -#if __TBB_USE_GENERIC_FETCH_STORE -#define __TBB_machine_fetchstore4 tbb::internal::__TBB_FetchAndStoreGeneric<4,int32_t> -#endif - -#if __TBB_USE_GENERIC_FETCH_STORE || __TBB_USE_GENERIC_DWORD_FETCH_STORE -#define __TBB_machine_fetchstore8 tbb::internal::__TBB_FetchAndStoreGeneric<8,int64_t> -#endif - -#if __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE -#define __TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(S) \ - atomic_selector::word atomic_selector::fetch_store ( volatile void* location, word value ) { \ - return __TBB_machine_fetchstore##S( location, value ); \ - } - -__TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(1) -__TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(2) -__TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(4) -__TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(8) - -#undef __TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE -#endif /* __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE */ - -#if __TBB_USE_GENERIC_DWORD_LOAD_STORE -/*TODO: find a more elegant way to handle function names difference*/ -#if ! __TBB_USE_FENCED_ATOMICS - /* This name forwarding is needed for generic implementation of - * load8/store8 defined below (via macro) to pick the right CAS function*/ - #define __TBB_machine_cmpswp8full_fence __TBB_machine_cmpswp8 -#endif -__TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(full_fence) -__TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(full_fence) - -#if ! __TBB_USE_FENCED_ATOMICS - #undef __TBB_machine_cmpswp8full_fence -#endif - -#define __TBB_machine_store8 tbb::internal::__TBB_machine_generic_store8full_fence -#define __TBB_machine_load8 tbb::internal::__TBB_machine_generic_load8full_fence -#endif /* __TBB_USE_GENERIC_DWORD_LOAD_STORE */ - -#if __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE -/** Fenced operations use volatile qualifier to prevent compiler from optimizing - them out, and on architectures with weak memory ordering to induce compiler - to generate code with appropriate acquire/release semantics. - On architectures like IA32, Intel64 (and likely Sparc TSO) volatile has - no effect on code gen, and consistency helpers serve as a compiler fence (the - latter being true for IA64/gcc as well to fix a bug in some gcc versions). - This code assumes that the generated instructions will operate atomically, - which typically requires a type that can be moved in a single instruction, - cooperation from the compiler for effective use of such an instruction, - and appropriate alignment of the data. **/ -template -struct machine_load_store { - static T load_with_acquire ( const volatile T& location ) { - T to_return = location; - __TBB_acquire_consistency_helper(); - return to_return; - } - static void store_with_release ( volatile T &location, T value ) { - __TBB_release_consistency_helper(); - location = value; - } -}; - -//in general, plain load and store of 32bit compiler is not atomic for 64bit types -#if __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS -template -struct machine_load_store { - static T load_with_acquire ( const volatile T& location ) { - return (T)__TBB_machine_load8( (const volatile void*)&location ); - } - static void store_with_release ( volatile T& location, T value ) { - __TBB_machine_store8( (volatile void*)&location, (int64_t)value ); - } -}; -#endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */ -#endif /* __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE */ - -#if __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE -template -struct machine_load_store_seq_cst { - static T load ( const volatile T& location ) { - __TBB_full_memory_fence(); - return machine_load_store::load_with_acquire( location ); - } -#if __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE - static void store ( volatile T &location, T value ) { - atomic_selector::fetch_store( (volatile void*)&location, (typename atomic_selector::word)value ); - } -#else /* !__TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE */ - static void store ( volatile T &location, T value ) { - machine_load_store::store_with_release( location, value ); - __TBB_full_memory_fence(); - } -#endif /* !__TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE */ -}; - -#if __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS -/** The implementation does not use functions __TBB_machine_load8/store8 as they - are not required to be sequentially consistent. **/ -template -struct machine_load_store_seq_cst { - static T load ( const volatile T& location ) { - // Comparand and new value may be anything, they only must be equal, and - // the value should have a low probability to be actually found in 'location'. - const int64_t anyvalue = 2305843009213693951LL; - return __TBB_machine_cmpswp8( (volatile void*)const_cast(&location), anyvalue, anyvalue ); - } - static void store ( volatile T &location, T value ) { - int64_t result = (volatile int64_t&)location; - while ( __TBB_machine_cmpswp8((volatile void*)&location, (int64_t)value, result) != result ) - result = (volatile int64_t&)location; - } -}; -#endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */ -#endif /*__TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE */ - -#if __TBB_USE_GENERIC_RELAXED_LOAD_STORE -// Relaxed operations add volatile qualifier to prevent compiler from optimizing them out. -/** Volatile should not incur any additional cost on IA32, Intel64, and Sparc TSO - architectures. However on architectures with weak memory ordering compiler may - generate code with acquire/release semantics for operations on volatile data. **/ -template -struct machine_load_store_relaxed { - static inline T load ( const volatile T& location ) { - return location; - } - static inline void store ( volatile T& location, T value ) { - location = value; - } -}; - -#if __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS -template -struct machine_load_store_relaxed { - static inline T load ( const volatile T& location ) { - return (T)__TBB_machine_load8( (const volatile void*)&location ); - } - static inline void store ( volatile T& location, T value ) { - __TBB_machine_store8( (volatile void*)&location, (int64_t)value ); - } -}; -#endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */ -#endif /* __TBB_USE_GENERIC_RELAXED_LOAD_STORE */ - -#undef __TBB_WORDSIZE //this macro is forbidden to use outside of atomic machinery - -template -inline T __TBB_load_with_acquire(const volatile T &location) { - return machine_load_store::load_with_acquire( location ); -} -template -inline void __TBB_store_with_release(volatile T& location, V value) { - machine_load_store::store_with_release( location, T(value) ); -} -//! Overload that exists solely to avoid /Wp64 warnings. -inline void __TBB_store_with_release(volatile size_t& location, size_t value) { - machine_load_store::store_with_release( location, value ); -} - -template -inline T __TBB_load_full_fence(const volatile T &location) { - return machine_load_store_seq_cst::load( location ); -} -template -inline void __TBB_store_full_fence(volatile T& location, V value) { - machine_load_store_seq_cst::store( location, T(value) ); -} -//! Overload that exists solely to avoid /Wp64 warnings. -inline void __TBB_store_full_fence(volatile size_t& location, size_t value) { - machine_load_store_seq_cst::store( location, value ); -} - -template -inline T __TBB_load_relaxed (const volatile T& location) { - return machine_load_store_relaxed::load( const_cast(location) ); -} -template -inline void __TBB_store_relaxed ( volatile T& location, V value ) { - machine_load_store_relaxed::store( const_cast(location), T(value) ); -} -//! Overload that exists solely to avoid /Wp64 warnings. -inline void __TBB_store_relaxed ( volatile size_t& location, size_t value ) { - machine_load_store_relaxed::store( const_cast(location), value ); -} - -// Macro __TBB_TypeWithAlignmentAtLeastAsStrict(T) should be a type with alignment at least as -// strict as type T. The type should have a trivial default constructor and destructor, so that -// arrays of that type can be declared without initializers. -// It is correct (but perhaps a waste of space) if __TBB_TypeWithAlignmentAtLeastAsStrict(T) expands -// to a type bigger than T. -// The default definition here works on machines where integers are naturally aligned and the -// strictest alignment is 64. -#ifndef __TBB_TypeWithAlignmentAtLeastAsStrict - -#if __TBB_ATTRIBUTE_ALIGNED_PRESENT - -#define __TBB_DefineTypeWithAlignment(PowerOf2) \ -struct __TBB_machine_type_with_alignment_##PowerOf2 { \ - uint32_t member[PowerOf2/sizeof(uint32_t)]; \ -} __attribute__((aligned(PowerOf2))); -#define __TBB_alignof(T) __alignof__(T) - -#elif __TBB_DECLSPEC_ALIGN_PRESENT - -#define __TBB_DefineTypeWithAlignment(PowerOf2) \ -__declspec(align(PowerOf2)) \ -struct __TBB_machine_type_with_alignment_##PowerOf2 { \ - uint32_t member[PowerOf2/sizeof(uint32_t)]; \ -}; -#define __TBB_alignof(T) __alignof(T) - -#else /* A compiler with unknown syntax for data alignment */ -#error Must define __TBB_TypeWithAlignmentAtLeastAsStrict(T) -#endif - -/* Now declare types aligned to useful powers of two */ -// TODO: Is __TBB_DefineTypeWithAlignment(8) needed on 32 bit platforms? -__TBB_DefineTypeWithAlignment(16) -__TBB_DefineTypeWithAlignment(32) -__TBB_DefineTypeWithAlignment(64) - -typedef __TBB_machine_type_with_alignment_64 __TBB_machine_type_with_strictest_alignment; - -// Primary template is a declaration of incomplete type so that it fails with unknown alignments -template struct type_with_alignment; - -// Specializations for allowed alignments -template<> struct type_with_alignment<1> { char member; }; -template<> struct type_with_alignment<2> { uint16_t member; }; -template<> struct type_with_alignment<4> { uint32_t member; }; -template<> struct type_with_alignment<8> { uint64_t member; }; -template<> struct type_with_alignment<16> {__TBB_machine_type_with_alignment_16 member; }; -template<> struct type_with_alignment<32> {__TBB_machine_type_with_alignment_32 member; }; -template<> struct type_with_alignment<64> {__TBB_machine_type_with_alignment_64 member; }; - -#if __TBB_ALIGNOF_NOT_INSTANTIATED_TYPES_BROKEN -//! Work around for bug in GNU 3.2 and MSVC compilers. -/** Bug is that compiler sometimes returns 0 for __alignof(T) when T has not yet been instantiated. - The work-around forces instantiation by forcing computation of sizeof(T) before __alignof(T). */ -template -struct work_around_alignment_bug { - static const size_t alignment = __TBB_alignof(T); -}; -#define __TBB_TypeWithAlignmentAtLeastAsStrict(T) tbb::internal::type_with_alignment::alignment> -#else -#define __TBB_TypeWithAlignmentAtLeastAsStrict(T) tbb::internal::type_with_alignment<__TBB_alignof(T)> -#endif /* __TBB_ALIGNOF_NOT_INSTANTIATED_TYPES_BROKEN */ - -#endif /* __TBB_TypeWithAlignmentAtLeastAsStrict */ - -// Template class here is to avoid instantiation of the static data for modules that don't use it -template -struct reverse { - static const T byte_table[256]; -}; -// An efficient implementation of the reverse function utilizes a 2^8 lookup table holding the bit-reversed -// values of [0..2^8 - 1]. Those values can also be computed on the fly at a slightly higher cost. -template -const T reverse::byte_table[256] = { - 0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0, - 0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8, - 0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4, - 0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC, - 0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2, - 0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA, - 0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6, - 0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE, - 0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1, - 0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9, - 0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5, - 0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD, - 0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3, - 0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB, - 0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7, - 0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF -}; - -} // namespace internal @endcond -} // namespace tbb - -// Preserving access to legacy APIs -using tbb::internal::__TBB_load_with_acquire; -using tbb::internal::__TBB_store_with_release; - -// Mapping historically used names to the ones expected by atomic_load_store_traits -#define __TBB_load_acquire __TBB_load_with_acquire -#define __TBB_store_release __TBB_store_with_release - -#ifndef __TBB_Log2 -inline intptr_t __TBB_Log2( uintptr_t x ) { - if( x==0 ) return -1; - intptr_t result = 0; - -#if !defined(_M_ARM) - uintptr_t tmp; - if( sizeof(x)>4 && (tmp = ((uint64_t)x)>>32) ) { x=tmp; result += 32; } -#endif - if( uintptr_t tmp = x>>16 ) { x=tmp; result += 16; } - if( uintptr_t tmp = x>>8 ) { x=tmp; result += 8; } - if( uintptr_t tmp = x>>4 ) { x=tmp; result += 4; } - if( uintptr_t tmp = x>>2 ) { x=tmp; result += 2; } - - return (x&2)? result+1: result; -} -#endif - -#ifndef __TBB_AtomicOR -inline void __TBB_AtomicOR( volatile void *operand, uintptr_t addend ) { - for( tbb::internal::atomic_backoff b;;b.pause() ) { - uintptr_t tmp = *(volatile uintptr_t *)operand; - uintptr_t result = __TBB_CompareAndSwapW(operand, tmp|addend, tmp); - if( result==tmp ) break; - } -} -#endif - -#ifndef __TBB_AtomicAND -inline void __TBB_AtomicAND( volatile void *operand, uintptr_t addend ) { - for( tbb::internal::atomic_backoff b;;b.pause() ) { - uintptr_t tmp = *(volatile uintptr_t *)operand; - uintptr_t result = __TBB_CompareAndSwapW(operand, tmp&addend, tmp); - if( result==tmp ) break; - } -} -#endif - -#if __TBB_PREFETCHING -#ifndef __TBB_cl_prefetch -#error This platform does not define cache management primitives required for __TBB_PREFETCHING -#endif - -#ifndef __TBB_cl_evict -#define __TBB_cl_evict(p) -#endif -#endif - -#ifndef __TBB_Flag -typedef unsigned char __TBB_Flag; -#endif -typedef __TBB_atomic __TBB_Flag __TBB_atomic_flag; - -#ifndef __TBB_TryLockByte -inline bool __TBB_TryLockByte( __TBB_atomic_flag &flag ) { - return __TBB_machine_cmpswp1(&flag,1,0)==0; -} -#endif - -#ifndef __TBB_LockByte -inline __TBB_Flag __TBB_LockByte( __TBB_atomic_flag& flag ) { - tbb::internal::atomic_backoff backoff; - while( !__TBB_TryLockByte(flag) ) backoff.pause(); - return 0; -} -#endif - -#ifndef __TBB_UnlockByte -#define __TBB_UnlockByte(addr) __TBB_store_with_release((addr),0) -#endif - -// lock primitives with TSX -#if ( __TBB_x86_32 || __TBB_x86_64 ) /* only on ia32/intel64 */ -inline void __TBB_TryLockByteElidedCancel() { __TBB_machine_try_lock_elided_cancel(); } - -inline bool __TBB_TryLockByteElided( __TBB_atomic_flag& flag ) { - bool res = __TBB_machine_try_lock_elided( &flag )!=0; - // to avoid the "lemming" effect, we need to abort the transaction - // if __TBB_machine_try_lock_elided returns false (i.e., someone else - // has acquired the mutex non-speculatively). - if( !res ) __TBB_TryLockByteElidedCancel(); - return res; -} - -inline void __TBB_LockByteElided( __TBB_atomic_flag& flag ) -{ - for(;;) { - tbb::internal::spin_wait_while_eq( flag, 1 ); - if( __TBB_machine_try_lock_elided( &flag ) ) - return; - // Another thread acquired the lock "for real". - // To avoid the "lemming" effect, we abort the transaction. - __TBB_TryLockByteElidedCancel(); - } -} - -inline void __TBB_UnlockByteElided( __TBB_atomic_flag& flag ) { - __TBB_machine_unlock_elided( &flag ); -} -#endif - -#ifndef __TBB_ReverseByte -inline unsigned char __TBB_ReverseByte(unsigned char src) { - return tbb::internal::reverse::byte_table[src]; -} -#endif - -template -T __TBB_ReverseBits(T src) { - T dst; - unsigned char *original = (unsigned char *) &src; - unsigned char *reversed = (unsigned char *) &dst; - - for( int i = sizeof(T)-1; i >= 0; i-- ) - reversed[i] = __TBB_ReverseByte( original[sizeof(T)-i-1] ); - - return dst; -} - -#endif /* __TBB_machine_H */ diff --git a/src/tbb/include/tbb/tbb_profiling.h b/src/tbb/include/tbb/tbb_profiling.h deleted file mode 100644 index 5a0830f75..000000000 --- a/src/tbb/include/tbb/tbb_profiling.h +++ /dev/null @@ -1,271 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_profiling_H -#define __TBB_profiling_H - -namespace tbb { - namespace internal { - - // - // This is not under __TBB_ITT_STRUCTURE_API because these values are used directly in flow_graph.h. - // - - // include list of index names - #define TBB_STRING_RESOURCE(index_name,str) index_name, - enum string_index { - #include "internal/_tbb_strings.h" - NUM_STRINGS - }; - #undef TBB_STRING_RESOURCE - - enum itt_relation - { - __itt_relation_is_unknown = 0, - __itt_relation_is_dependent_on, /**< "A is dependent on B" means that A cannot start until B completes */ - __itt_relation_is_sibling_of, /**< "A is sibling of B" means that A and B were created as a group */ - __itt_relation_is_parent_of, /**< "A is parent of B" means that A created B */ - __itt_relation_is_continuation_of, /**< "A is continuation of B" means that A assumes the dependencies of B */ - __itt_relation_is_child_of, /**< "A is child of B" means that A was created by B (inverse of is_parent_of) */ - __itt_relation_is_continued_by, /**< "A is continued by B" means that B assumes the dependencies of A (inverse of is_continuation_of) */ - __itt_relation_is_predecessor_to /**< "A is predecessor to B" means that B cannot start until A completes (inverse of is_dependent_on) */ - }; - - } -} - -// Check if the tools support is enabled -#if (_WIN32||_WIN64||__linux__) && !__MINGW32__ && TBB_USE_THREADING_TOOLS - -#if _WIN32||_WIN64 -#include /* mbstowcs_s */ -#endif -#include "tbb_stddef.h" - -namespace tbb { - namespace internal { - -#if _WIN32||_WIN64 - void __TBB_EXPORTED_FUNC itt_set_sync_name_v3( void *obj, const wchar_t* name ); - inline size_t multibyte_to_widechar( wchar_t* wcs, const char* mbs, size_t bufsize) { -#if _MSC_VER>=1400 - size_t len; - mbstowcs_s( &len, wcs, bufsize, mbs, _TRUNCATE ); - return len; // mbstowcs_s counts null terminator -#else - size_t len = mbstowcs( wcs, mbs, bufsize ); - if(wcs && len!=size_t(-1) ) - wcs[len - inline void itt_store_word_with_release(tbb::atomic& dst, U src) { -#if TBB_USE_THREADING_TOOLS - // This assertion should be replaced with static_assert - __TBB_ASSERT(sizeof(T) == sizeof(void *), "Type must be word-sized."); - itt_store_pointer_with_release_v3(&dst, (void *)uintptr_t(src)); -#else - dst = src; -#endif // TBB_USE_THREADING_TOOLS - } - - template - inline T itt_load_word_with_acquire(const tbb::atomic& src) { -#if TBB_USE_THREADING_TOOLS - // This assertion should be replaced with static_assert - __TBB_ASSERT(sizeof(T) == sizeof(void *), "Type must be word-sized."); -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings - #pragma warning (push) - #pragma warning (disable: 4311) -#endif - T result = (T)itt_load_pointer_with_acquire_v3(&src); -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif - return result; -#else - return src; -#endif // TBB_USE_THREADING_TOOLS - } - - template - inline void itt_store_word_with_release(T& dst, T src) { -#if TBB_USE_THREADING_TOOLS - // This assertion should be replaced with static_assert - __TBB_ASSERT(sizeof(T) == sizeof(void *), "Type must be word-sized."); - itt_store_pointer_with_release_v3(&dst, (void *)src); -#else - __TBB_store_with_release(dst, src); -#endif // TBB_USE_THREADING_TOOLS - } - - template - inline T itt_load_word_with_acquire(const T& src) { -#if TBB_USE_THREADING_TOOLS - // This assertion should be replaced with static_assert - __TBB_ASSERT(sizeof(T) == sizeof(void *), "Type must be word-sized"); - return (T)itt_load_pointer_with_acquire_v3(&src); -#else - return __TBB_load_with_acquire(src); -#endif // TBB_USE_THREADING_TOOLS - } - - template - inline void itt_hide_store_word(T& dst, T src) { -#if TBB_USE_THREADING_TOOLS - //TODO: This assertion should be replaced with static_assert - __TBB_ASSERT(sizeof(T) == sizeof(void *), "Type must be word-sized"); - itt_store_pointer_with_release_v3(&dst, (void *)src); -#else - dst = src; -#endif - } - - //TODO: rename to itt_hide_load_word_relaxed - template - inline T itt_hide_load_word(const T& src) { -#if TBB_USE_THREADING_TOOLS - //TODO: This assertion should be replaced with static_assert - __TBB_ASSERT(sizeof(T) == sizeof(void *), "Type must be word-sized."); - return (T)itt_load_pointer_v3(&src); -#else - return src; -#endif - } - -#if TBB_USE_THREADING_TOOLS - inline void call_itt_notify(notify_type t, void *ptr) { - call_itt_notify_v5((int)t, ptr); - } - -#else - inline void call_itt_notify(notify_type /*t*/, void * /*ptr*/) {} - -#endif // TBB_USE_THREADING_TOOLS - -#if __TBB_ITT_STRUCTURE_API - inline void itt_make_task_group( itt_domain_enum domain, void *group, unsigned long long group_extra, - void *parent, unsigned long long parent_extra, string_index name_index ) { - itt_make_task_group_v7( domain, group, group_extra, parent, parent_extra, name_index ); - } - - inline void itt_metadata_str_add( itt_domain_enum domain, void *addr, unsigned long long addr_extra, - string_index key, const char *value ) { - itt_metadata_str_add_v7( domain, addr, addr_extra, key, value ); - } - - inline void itt_relation_add( itt_domain_enum domain, void *addr0, unsigned long long addr0_extra, - itt_relation relation, void *addr1, unsigned long long addr1_extra ) { - itt_relation_add_v7( domain, addr0, addr0_extra, relation, addr1, addr1_extra ); - } - - inline void itt_task_begin( itt_domain_enum domain, void *task, unsigned long long task_extra, - void *parent, unsigned long long parent_extra, string_index name_index ) { - itt_task_begin_v7( domain, task, task_extra, parent, parent_extra, name_index ); - } - - inline void itt_task_end( itt_domain_enum domain ) { - itt_task_end_v7( domain ); - } -#endif // __TBB_ITT_STRUCTURE_API - - } // namespace internal -} // namespace tbb - -#endif /* __TBB_profiling_H */ diff --git a/src/tbb/include/tbb/tbb_stddef.h b/src/tbb/include/tbb/tbb_stddef.h deleted file mode 100644 index bab11ac5a..000000000 --- a/src/tbb/include/tbb/tbb_stddef.h +++ /dev/null @@ -1,505 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_tbb_stddef_H -#define __TBB_tbb_stddef_H - -// Marketing-driven product version -#define TBB_VERSION_MAJOR 4 -#define TBB_VERSION_MINOR 3 - -// Engineering-focused interface version -#define TBB_INTERFACE_VERSION 8000 -#define TBB_INTERFACE_VERSION_MAJOR TBB_INTERFACE_VERSION/1000 - -// The oldest major interface version still supported -// To be used in SONAME, manifests, etc. -#define TBB_COMPATIBLE_INTERFACE_VERSION 2 - -#define __TBB_STRING_AUX(x) #x -#define __TBB_STRING(x) __TBB_STRING_AUX(x) - -// We do not need defines below for resource processing on windows -#if !defined RC_INVOKED - -// Define groups for Doxygen documentation -/** - * @defgroup algorithms Algorithms - * @defgroup containers Containers - * @defgroup memory_allocation Memory Allocation - * @defgroup synchronization Synchronization - * @defgroup timing Timing - * @defgroup task_scheduling Task Scheduling - */ - -// Simple text that is displayed on the main page of Doxygen documentation. -/** - * \mainpage Main Page - * - * Click the tabs above for information about the - * - Modules (groups of functionality) implemented by the library - * - Classes provided by the library - * - Files constituting the library. - * . - * Please note that significant part of TBB functionality is implemented in the form of - * template functions, descriptions of which are not accessible on the Classes - * tab. Use Modules or Namespace/Namespace Members - * tabs to find them. - * - * Additional pieces of information can be found here - * - \subpage concepts - * . - */ - -/** \page concepts TBB concepts - - A concept is a set of requirements to a type, which are necessary and sufficient - for the type to model a particular behavior or a set of behaviors. Some concepts - are specific to a particular algorithm (e.g. algorithm body), while other ones - are common to several algorithms (e.g. range concept). - - All TBB algorithms make use of different classes implementing various concepts. - Implementation classes are supplied by the user as type arguments of template - parameters and/or as objects passed as function call arguments. The library - provides predefined implementations of some concepts (e.g. several kinds of - \ref range_req "ranges"), while other ones must always be implemented by the user. - - TBB defines a set of minimal requirements each concept must conform to. Here is - the list of different concepts hyperlinked to the corresponding requirements specifications: - - \subpage range_req - - \subpage parallel_do_body_req - - \subpage parallel_for_body_req - - \subpage parallel_reduce_body_req - - \subpage parallel_scan_body_req - - \subpage parallel_sort_iter_req -**/ - -// tbb_config.h should be included the first since it contains macro definitions used in other headers -#include "tbb_config.h" - -#if _MSC_VER >=1400 - #define __TBB_EXPORTED_FUNC __cdecl - #define __TBB_EXPORTED_METHOD __thiscall -#else - #define __TBB_EXPORTED_FUNC - #define __TBB_EXPORTED_METHOD -#endif - -#if __INTEL_COMPILER || _MSC_VER -#define __TBB_NOINLINE(decl) __declspec(noinline) decl -#elif __GNUC__ -#define __TBB_NOINLINE(decl) decl __attribute__ ((noinline)) -#else -#define __TBB_NOINLINE(decl) decl -#endif - -#if __TBB_NOEXCEPT_PRESENT -#define __TBB_NOEXCEPT(expression) noexcept(expression) -#else -#define __TBB_NOEXCEPT(expression) -#endif - -#include /* Need size_t and ptrdiff_t */ - -#if _MSC_VER - #define __TBB_tbb_windef_H - #include "internal/_tbb_windef.h" - #undef __TBB_tbb_windef_H -#endif -#if !defined(_MSC_VER) || _MSC_VER>=1600 - #include -#endif - -//! Type for an assertion handler -typedef void(*assertion_handler_type)( const char* filename, int line, const char* expression, const char * comment ); - -#if TBB_USE_ASSERT - - #define __TBB_ASSERT_NS(predicate,message,ns) ((predicate)?((void)0) : ns::assertion_failure(__FILE__,__LINE__,#predicate,message)) - //! Assert that x is true. - /** If x is false, print assertion failure message. - If the comment argument is not NULL, it is printed as part of the failure message. - The comment argument has no other effect. */ -#if __TBBMALLOC_BUILD -namespace rml { namespace internal { - #define __TBB_ASSERT(predicate,message) __TBB_ASSERT_NS(predicate,message,rml::internal) -#else -namespace tbb { - #define __TBB_ASSERT(predicate,message) __TBB_ASSERT_NS(predicate,message,tbb) -#endif - - #define __TBB_ASSERT_EX __TBB_ASSERT - - //! Set assertion handler and return previous value of it. - assertion_handler_type __TBB_EXPORTED_FUNC set_assertion_handler( assertion_handler_type new_handler ); - - //! Process an assertion failure. - /** Normally called from __TBB_ASSERT macro. - If assertion handler is null, print message for assertion failure and abort. - Otherwise call the assertion handler. */ - void __TBB_EXPORTED_FUNC assertion_failure( const char* filename, int line, const char* expression, const char* comment ); - -#if __TBBMALLOC_BUILD -}} // namespace rml::internal -#else -} // namespace tbb -#endif -#else /* !TBB_USE_ASSERT */ - - //! No-op version of __TBB_ASSERT. - #define __TBB_ASSERT(predicate,comment) ((void)0) - //! "Extended" version is useful to suppress warnings if a variable is only used with an assert - #define __TBB_ASSERT_EX(predicate,comment) ((void)(1 && (predicate))) - -#endif /* !TBB_USE_ASSERT */ - -//! The namespace tbb contains all components of the library. -namespace tbb { - -#if _MSC_VER && _MSC_VER<1600 - namespace internal { - typedef __int8 int8_t; - typedef __int16 int16_t; - typedef __int32 int32_t; - typedef __int64 int64_t; - typedef unsigned __int8 uint8_t; - typedef unsigned __int16 uint16_t; - typedef unsigned __int32 uint32_t; - typedef unsigned __int64 uint64_t; - } // namespace internal -#else /* Posix */ - namespace internal { - using ::int8_t; - using ::int16_t; - using ::int32_t; - using ::int64_t; - using ::uint8_t; - using ::uint16_t; - using ::uint32_t; - using ::uint64_t; - } // namespace internal -#endif /* Posix */ - - using std::size_t; - using std::ptrdiff_t; - -//! The function returns the interface version of the TBB shared library being used. -/** - * The version it returns is determined at runtime, not at compile/link time. - * So it can be different than the value of TBB_INTERFACE_VERSION obtained at compile time. - */ -extern "C" int __TBB_EXPORTED_FUNC TBB_runtime_interface_version(); - -//! Dummy type that distinguishes splitting constructor from copy constructor. -/** - * See description of parallel_for and parallel_reduce for example usages. - * @ingroup algorithms - */ -class split { -}; - -//! Type enables transmission of splitting proportion from partitioners to range objects -/** - * In order to make use of such facility Range objects must implement - * splitting constructor with this type passed and initialize static - * constant boolean field 'is_divisible_in_proportion' with the value - * of 'true' - */ -class proportional_split { -public: - proportional_split(size_t _left = 1, size_t _right = 1) : my_left(_left), my_right(_right) { } - proportional_split(split) : my_left(1), my_right(1) { } - - size_t left() const { return my_left; } - size_t right() const { return my_right; } - - void set_proportion(size_t _left, size_t _right) { - my_left = _left; - my_right = _right; - } - - // used when range does not support proportional split - operator split() const { return split(); } -private: - size_t my_left, my_right; -}; - -/** - * @cond INTERNAL - * @brief Identifiers declared inside namespace internal should never be used directly by client code. - */ -namespace internal { - -//! Compile-time constant that is upper bound on cache line/sector size. -/** It should be used only in situations where having a compile-time upper - bound is more useful than a run-time exact answer. - @ingroup memory_allocation */ -const size_t NFS_MaxLineSize = 128; - -/** Label for data that may be accessed from different threads, and that may eventually become wrapped - in a formal atomic type. - - Note that no problems have yet been observed relating to the definition currently being empty, - even if at least "volatile" would seem to be in order to avoid data sometimes temporarily hiding - in a register (although "volatile" as a "poor man's atomic" lacks several other features of a proper - atomic, some of which are now provided instead through specialized functions). - - Note that usage is intentionally compatible with a definition as qualifier "volatile", - both as a way to have the compiler help enforce use of the label and to quickly rule out - one potential issue. - - Note however that, with some architecture/compiler combinations, e.g. on IA-64 architecture, "volatile" - also has non-portable memory semantics that are needlessly expensive for "relaxed" operations. - - Note that this must only be applied to data that will not change bit patterns when cast to/from - an integral type of the same length; tbb::atomic must be used instead for, e.g., floating-point types. - - TODO: apply wherever relevant **/ -#define __TBB_atomic // intentionally empty, see above - -template -struct padded_base : T { - char pad[S - R]; -}; -template struct padded_base : T {}; - -//! Pads type T to fill out to a multiple of cache line size. -template -struct padded : padded_base {}; - -//! Extended variant of the standard offsetof macro -/** The standard offsetof macro is not sufficient for TBB as it can be used for - POD-types only. The constant 0x1000 (not NULL) is necessary to appease GCC. **/ -#define __TBB_offsetof(class_name, member_name) \ - ((ptrdiff_t)&(reinterpret_cast(0x1000)->member_name) - 0x1000) - -//! Returns address of the object containing a member with the given name and address -#define __TBB_get_object_ref(class_name, member_name, member_addr) \ - (*reinterpret_cast((char*)member_addr - __TBB_offsetof(class_name, member_name))) - -//! Throws std::runtime_error with what() returning error_code description prefixed with aux_info -void __TBB_EXPORTED_FUNC handle_perror( int error_code, const char* aux_info ); - -#if TBB_USE_EXCEPTIONS - #define __TBB_TRY try - #define __TBB_CATCH(e) catch(e) - #define __TBB_THROW(e) throw e - #define __TBB_RETHROW() throw -#else /* !TBB_USE_EXCEPTIONS */ - inline bool __TBB_false() { return false; } - #define __TBB_TRY - #define __TBB_CATCH(e) if ( tbb::internal::__TBB_false() ) - #define __TBB_THROW(e) ((void)0) - #define __TBB_RETHROW() ((void)0) -#endif /* !TBB_USE_EXCEPTIONS */ - -//! Report a runtime warning. -void __TBB_EXPORTED_FUNC runtime_warning( const char* format, ... ); - -#if TBB_USE_ASSERT -static void* const poisoned_ptr = reinterpret_cast(-1); - -//! Set p to invalid pointer value. -// Also works for regular (non-__TBB_atomic) pointers. -template -inline void poison_pointer( T* __TBB_atomic & p ) { p = reinterpret_cast(poisoned_ptr); } - -/** Expected to be used in assertions only, thus no empty form is defined. **/ -template -inline bool is_poisoned( T* p ) { return p == reinterpret_cast(poisoned_ptr); } -#else -template -inline void poison_pointer( T* __TBB_atomic & ) {/*do nothing*/} -#endif /* !TBB_USE_ASSERT */ - -//! Cast between unrelated pointer types. -/** This method should be used sparingly as a last resort for dealing with - situations that inherently break strict ISO C++ aliasing rules. */ -// T is a pointer type because it will be explicitly provided by the programmer as a template argument; -// U is a referent type to enable the compiler to check that "ptr" is a pointer, deducing U in the process. -template -inline T punned_cast( U* ptr ) { - uintptr_t x = reinterpret_cast(ptr); - return reinterpret_cast(x); -} - -//! Base class for types that should not be assigned. -class no_assign { - // Deny assignment - void operator=( const no_assign& ); -public: -#if __GNUC__ - //! Explicitly define default construction, because otherwise gcc issues gratuitous warning. - no_assign() {} -#endif /* __GNUC__ */ -}; - -//! Base class for types that should not be copied or assigned. -class no_copy: no_assign { - //! Deny copy construction - no_copy( const no_copy& ); -public: - //! Allow default construction - no_copy() {} -}; - -#if TBB_DEPRECATED_MUTEX_COPYING -class mutex_copy_deprecated_and_disabled {}; -#else -// By default various implementations of mutexes are not copy constructible -// and not copy assignable. -class mutex_copy_deprecated_and_disabled : no_copy {}; -#endif - -//! A function to check if passed in pointer is aligned on a specific border -template -inline bool is_aligned(T* pointer, uintptr_t alignment) { - return 0==((uintptr_t)pointer & (alignment-1)); -} - -//! A function to check if passed integer is a power of 2 -template -inline bool is_power_of_two(integer_type arg) { - return arg && (0 == (arg & (arg - 1))); -} - -//! A function to compute arg modulo divisor where divisor is a power of 2. -template -inline argument_integer_type modulo_power_of_two(argument_integer_type arg, divisor_integer_type divisor) { - // Divisor is assumed to be a power of two (which is valid for current uses). - __TBB_ASSERT( is_power_of_two(divisor), "Divisor should be a power of two" ); - return (arg & (divisor - 1)); -} - - -//! A function to determine if "arg is a multiplication of a number and a power of 2". -// i.e. for strictly positive i and j, with j a power of 2, -// determines whether i==j< -inline bool is_power_of_two_factor(argument_integer_type arg, divisor_integer_type divisor) { - // Divisor is assumed to be a power of two (which is valid for current uses). - __TBB_ASSERT( is_power_of_two(divisor), "Divisor should be a power of two" ); - return 0 == (arg & (arg - divisor)); -} - -//! Utility template function to prevent "unused" warnings by various compilers. -template -void suppress_unused_warning( const T& ) {} - -// Struct to be used as a version tag for inline functions. -/** Version tag can be necessary to prevent loader on Linux from using the wrong - symbol in debug builds (when inline functions are compiled as out-of-line). **/ -struct version_tag_v3 {}; - -typedef version_tag_v3 version_tag; - -} // internal -} // tbb - -// Following is a set of classes and functions typically used in compile-time "metaprogramming". -// TODO: move all that to a separate header - -#if __TBB_ALLOCATOR_TRAITS_PRESENT -#include //for allocator_traits -#endif - -#if __TBB_CPP11_RVALUE_REF_PRESENT || _LIBCPP_VERSION -#include // for std::move -#endif - -namespace tbb { -namespace internal { - -//! Class for determining type of std::allocator::value_type. -template -struct allocator_type { - typedef T value_type; -}; - -#if _MSC_VER -//! Microsoft std::allocator has non-standard extension that strips const from a type. -template -struct allocator_type { - typedef T value_type; -}; -#endif - -// Ad-hoc implementation of true_type & false_type -// Intended strictly for internal use! For public APIs (traits etc), use C++11 analogues. -template -struct bool_constant { - static /*constexpr*/ const bool value = v; -}; -typedef bool_constant true_type; -typedef bool_constant false_type; - -#if __TBB_ALLOCATOR_TRAITS_PRESENT -using std::allocator_traits; -#else -template -struct allocator_traits{ - typedef tbb::internal::false_type propagate_on_container_move_assignment; -}; -#endif - -//! A template to select either 32-bit or 64-bit constant as compile time, depending on machine word size. -template -struct select_size_t_constant { - //Explicit cast is needed to avoid compiler warnings about possible truncation. - //The value of the right size, which is selected by ?:, is anyway not truncated or promoted. - static const size_t value = (size_t)((sizeof(size_t)==sizeof(u)) ? u : ull); -}; - -#if __TBB_CPP11_RVALUE_REF_PRESENT -using std::move; -#elif defined(_LIBCPP_NAMESPACE) -// libc++ defines "pre-C++11 move" similarly to our; use it to avoid name conflicts in some cases. -using std::_LIBCPP_NAMESPACE::move; -#else -template -T& move( T& x ) { return x; } -#endif - -template -struct STATIC_ASSERTION_FAILED; - -template <> -struct STATIC_ASSERTION_FAILED { enum {value=1};}; - -template<> -struct STATIC_ASSERTION_FAILED; //intentionally left undefined to cause compile time error - -//! @endcond -}} // namespace tbb::internal - -#if __TBB_STATIC_ASSERT_PRESENT -#define __TBB_STATIC_ASSERT(condition,msg) static_assert(condition,msg) -#else -//please note condition is intentionally inverted to get a bit more understandable error msg -#define __TBB_STATIC_ASSERT_IMPL1(condition,msg,line) \ - enum {static_assert_on_line_##line = tbb::internal::STATIC_ASSERTION_FAILED::value} - -#define __TBB_STATIC_ASSERT_IMPL(condition,msg,line) __TBB_STATIC_ASSERT_IMPL1(condition,msg,line) -//! Verify at compile time that passed in condition is hold -#define __TBB_STATIC_ASSERT(condition,msg) __TBB_STATIC_ASSERT_IMPL(condition,msg,__LINE__) -#endif - -#endif /* RC_INVOKED */ -#endif /* __TBB_tbb_stddef_H */ diff --git a/src/tbb/include/tbb/tbb_thread.h b/src/tbb/include/tbb/tbb_thread.h deleted file mode 100644 index 00ec24709..000000000 --- a/src/tbb/include/tbb/tbb_thread.h +++ /dev/null @@ -1,330 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_tbb_thread_H -#define __TBB_tbb_thread_H - -#include "tbb_stddef.h" -#if _WIN32||_WIN64 -#include "machine/windows_api.h" -#define __TBB_NATIVE_THREAD_ROUTINE unsigned WINAPI -#define __TBB_NATIVE_THREAD_ROUTINE_PTR(r) unsigned (WINAPI* r)( void* ) -#if __TBB_WIN8UI_SUPPORT -typedef size_t thread_id_type; -#else // __TBB_WIN8UI_SUPPORT -typedef DWORD thread_id_type; -#endif // __TBB_WIN8UI_SUPPORT -#else -#define __TBB_NATIVE_THREAD_ROUTINE void* -#define __TBB_NATIVE_THREAD_ROUTINE_PTR(r) void* (*r)( void* ) -#include -#endif // _WIN32||_WIN64 - -#include "tick_count.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -namespace tbb { - -namespace internal { - class tbb_thread_v3; -} - -inline void swap( internal::tbb_thread_v3& t1, internal::tbb_thread_v3& t2 ) __TBB_NOEXCEPT(true); - -namespace internal { - - //! Allocate a closure - void* __TBB_EXPORTED_FUNC allocate_closure_v3( size_t size ); - //! Free a closure allocated by allocate_closure_v3 - void __TBB_EXPORTED_FUNC free_closure_v3( void* ); - - struct thread_closure_base { - void* operator new( size_t size ) {return allocate_closure_v3(size);} - void operator delete( void* ptr ) {free_closure_v3(ptr);} - }; - - template struct thread_closure_0: thread_closure_base { - F function; - - static __TBB_NATIVE_THREAD_ROUTINE start_routine( void* c ) { - thread_closure_0 *self = static_cast(c); - self->function(); - delete self; - return 0; - } - thread_closure_0( const F& f ) : function(f) {} - }; - //! Structure used to pass user function with 1 argument to thread. - template struct thread_closure_1: thread_closure_base { - F function; - X arg1; - //! Routine passed to Windows's _beginthreadex by thread::internal_start() inside tbb.dll - static __TBB_NATIVE_THREAD_ROUTINE start_routine( void* c ) { - thread_closure_1 *self = static_cast(c); - self->function(self->arg1); - delete self; - return 0; - } - thread_closure_1( const F& f, const X& x ) : function(f), arg1(x) {} - }; - template struct thread_closure_2: thread_closure_base { - F function; - X arg1; - Y arg2; - //! Routine passed to Windows's _beginthreadex by thread::internal_start() inside tbb.dll - static __TBB_NATIVE_THREAD_ROUTINE start_routine( void* c ) { - thread_closure_2 *self = static_cast(c); - self->function(self->arg1, self->arg2); - delete self; - return 0; - } - thread_closure_2( const F& f, const X& x, const Y& y ) : function(f), arg1(x), arg2(y) {} - }; - - //! Versioned thread class. - class tbb_thread_v3 { -#if __TBB_IF_NO_COPY_CTOR_MOVE_SEMANTICS_BROKEN - // Workaround for a compiler bug: declaring the copy constructor as public - // enables use of the moving constructor. - // The definition is not provided in order to prohibit copying. - public: -#endif - tbb_thread_v3(const tbb_thread_v3&); // = delete; // Deny access - public: -#if _WIN32||_WIN64 - typedef HANDLE native_handle_type; -#else - typedef pthread_t native_handle_type; -#endif // _WIN32||_WIN64 - - class id; - //! Constructs a thread object that does not represent a thread of execution. - tbb_thread_v3() __TBB_NOEXCEPT(true) : my_handle(0) -#if _WIN32||_WIN64 - , my_thread_id(0) -#endif // _WIN32||_WIN64 - {} - - //! Constructs an object and executes f() in a new thread - template explicit tbb_thread_v3(F f) { - typedef internal::thread_closure_0 closure_type; - internal_start(closure_type::start_routine, new closure_type(f)); - } - //! Constructs an object and executes f(x) in a new thread - template tbb_thread_v3(F f, X x) { - typedef internal::thread_closure_1 closure_type; - internal_start(closure_type::start_routine, new closure_type(f,x)); - } - //! Constructs an object and executes f(x,y) in a new thread - template tbb_thread_v3(F f, X x, Y y) { - typedef internal::thread_closure_2 closure_type; - internal_start(closure_type::start_routine, new closure_type(f,x,y)); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - tbb_thread_v3(tbb_thread_v3&& x) __TBB_NOEXCEPT(true) - : my_handle(x.my_handle) -#if _WIN32||_WIN64 - , my_thread_id(x.my_thread_id) -#endif - { - x.internal_wipe(); - } - tbb_thread_v3& operator=(tbb_thread_v3&& x) __TBB_NOEXCEPT(true) { - internal_move(x); - return *this; - } - private: - tbb_thread_v3& operator=(const tbb_thread_v3& x); // = delete; - public: -#else // __TBB_CPP11_RVALUE_REF_PRESENT - tbb_thread_v3& operator=(tbb_thread_v3& x) { - internal_move(x); - return *this; - } -#endif // __TBB_CPP11_RVALUE_REF_PRESENT - - void swap( tbb_thread_v3& t ) __TBB_NOEXCEPT(true) {tbb::swap( *this, t );} - bool joinable() const __TBB_NOEXCEPT(true) {return my_handle!=0; } - //! The completion of the thread represented by *this happens before join() returns. - void __TBB_EXPORTED_METHOD join(); - //! When detach() returns, *this no longer represents the possibly continuing thread of execution. - void __TBB_EXPORTED_METHOD detach(); - ~tbb_thread_v3() {if( joinable() ) detach();} - inline id get_id() const __TBB_NOEXCEPT(true); - native_handle_type native_handle() { return my_handle; } - - //! The number of hardware thread contexts. - /** Before TBB 3.0 U4 this methods returned the number of logical CPU in - the system. Currently on Windows, Linux and FreeBSD it returns the - number of logical CPUs available to the current process in accordance - with its affinity mask. - - NOTE: The return value of this method never changes after its first - invocation. This means that changes in the process affinity mask that - took place after this method was first invoked will not affect the - number of worker threads in the TBB worker threads pool. **/ - static unsigned __TBB_EXPORTED_FUNC hardware_concurrency() __TBB_NOEXCEPT(true); - private: - native_handle_type my_handle; -#if _WIN32||_WIN64 - thread_id_type my_thread_id; -#endif // _WIN32||_WIN64 - - void internal_wipe() __TBB_NOEXCEPT(true) { - my_handle = 0; -#if _WIN32||_WIN64 - my_thread_id = 0; -#endif - } - void internal_move(tbb_thread_v3& x) __TBB_NOEXCEPT(true) { - if (joinable()) detach(); - my_handle = x.my_handle; -#if _WIN32||_WIN64 - my_thread_id = x.my_thread_id; -#endif // _WIN32||_WIN64 - x.internal_wipe(); - } - - /** Runs start_routine(closure) on another thread and sets my_handle to the handle of the created thread. */ - void __TBB_EXPORTED_METHOD internal_start( __TBB_NATIVE_THREAD_ROUTINE_PTR(start_routine), - void* closure ); - friend void __TBB_EXPORTED_FUNC move_v3( tbb_thread_v3& t1, tbb_thread_v3& t2 ); - friend void tbb::swap( tbb_thread_v3& t1, tbb_thread_v3& t2 ) __TBB_NOEXCEPT(true); - }; - - class tbb_thread_v3::id { -#if _WIN32||_WIN64 - thread_id_type my_id; - id( thread_id_type id_ ) : my_id(id_) {} -#else - pthread_t my_id; - id( pthread_t id_ ) : my_id(id_) {} -#endif // _WIN32||_WIN64 - friend class tbb_thread_v3; - public: - id() __TBB_NOEXCEPT(true) : my_id(0) {} - - friend bool operator==( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true); - friend bool operator!=( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true); - friend bool operator<( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true); - friend bool operator<=( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true); - friend bool operator>( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true); - friend bool operator>=( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true); - - template - friend std::basic_ostream& - operator<< (std::basic_ostream &out, - tbb_thread_v3::id id) - { - out << id.my_id; - return out; - } - friend tbb_thread_v3::id __TBB_EXPORTED_FUNC thread_get_id_v3(); - }; // tbb_thread_v3::id - - tbb_thread_v3::id tbb_thread_v3::get_id() const __TBB_NOEXCEPT(true) { -#if _WIN32||_WIN64 - return id(my_thread_id); -#else - return id(my_handle); -#endif // _WIN32||_WIN64 - } - void __TBB_EXPORTED_FUNC move_v3( tbb_thread_v3& t1, tbb_thread_v3& t2 ); - tbb_thread_v3::id __TBB_EXPORTED_FUNC thread_get_id_v3(); - void __TBB_EXPORTED_FUNC thread_yield_v3(); - void __TBB_EXPORTED_FUNC thread_sleep_v3(const tick_count::interval_t &i); - - inline bool operator==(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true) - { - return x.my_id == y.my_id; - } - inline bool operator!=(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true) - { - return x.my_id != y.my_id; - } - inline bool operator<(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true) - { - return x.my_id < y.my_id; - } - inline bool operator<=(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true) - { - return x.my_id <= y.my_id; - } - inline bool operator>(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true) - { - return x.my_id > y.my_id; - } - inline bool operator>=(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true) - { - return x.my_id >= y.my_id; - } - -} // namespace internal; - -//! Users reference thread class by name tbb_thread -typedef internal::tbb_thread_v3 tbb_thread; - -using internal::operator==; -using internal::operator!=; -using internal::operator<; -using internal::operator>; -using internal::operator<=; -using internal::operator>=; - -inline void move( tbb_thread& t1, tbb_thread& t2 ) { - internal::move_v3(t1, t2); -} - -inline void swap( internal::tbb_thread_v3& t1, internal::tbb_thread_v3& t2 ) __TBB_NOEXCEPT(true) { - tbb::tbb_thread::native_handle_type h = t1.my_handle; - t1.my_handle = t2.my_handle; - t2.my_handle = h; -#if _WIN32||_WIN64 - thread_id_type i = t1.my_thread_id; - t1.my_thread_id = t2.my_thread_id; - t2.my_thread_id = i; -#endif /* _WIN32||_WIN64 */ -} - -namespace this_tbb_thread { - inline tbb_thread::id get_id() { return internal::thread_get_id_v3(); } - //! Offers the operating system the opportunity to schedule another thread. - inline void yield() { internal::thread_yield_v3(); } - //! The current thread blocks at least until the time specified. - inline void sleep(const tick_count::interval_t &i) { - internal::thread_sleep_v3(i); - } -} // namespace this_tbb_thread - -} // namespace tbb - -#endif /* __TBB_tbb_thread_H */ diff --git a/src/tbb/include/tbb/tbbmalloc_proxy.h b/src/tbb/include/tbb/tbbmalloc_proxy.h index dde395411..93eaa18e8 100644 --- a/src/tbb/include/tbb/tbbmalloc_proxy.h +++ b/src/tbb/include/tbb/tbbmalloc_proxy.h @@ -1,66 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -/* -Replacing the standard memory allocation routines in Microsoft* C/C++ RTL -(malloc/free, global new/delete, etc.) with the TBB memory allocator. - -Include the following header to a source of any binary which is loaded during -application startup + http://www.apache.org/licenses/LICENSE-2.0 -#include "tbb/tbbmalloc_proxy.h" - -or add following parameters to the linker options for the binary which is -loaded during application startup. It can be either exe-file or dll. - -For win32 -tbbmalloc_proxy.lib /INCLUDE:"___TBB_malloc_proxy" -win64 -tbbmalloc_proxy.lib /INCLUDE:"__TBB_malloc_proxy" + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ -#ifndef __TBB_tbbmalloc_proxy_H -#define __TBB_tbbmalloc_proxy_H - -#if _MSC_VER - -#ifdef _DEBUG - #pragma comment(lib, "tbbmalloc_proxy_debug.lib") -#else - #pragma comment(lib, "tbbmalloc_proxy.lib") -#endif - -#if defined(_WIN64) - #pragma comment(linker, "/include:__TBB_malloc_proxy") -#else - #pragma comment(linker, "/include:___TBB_malloc_proxy") -#endif - -#else -/* Primarily to support MinGW */ - -extern "C" void __TBB_malloc_proxy(); -struct __TBB_malloc_proxy_caller { - __TBB_malloc_proxy_caller() { __TBB_malloc_proxy(); } -} volatile __TBB_malloc_proxy_helper_object; - -#endif // _MSC_VER - -#endif //__TBB_tbbmalloc_proxy_H +#include "../oneapi/tbb/tbbmalloc_proxy.h" diff --git a/src/tbb/include/tbb/tick_count.h b/src/tbb/include/tbb/tick_count.h index b5520f9b0..170074aeb 100644 --- a/src/tbb/include/tbb/tick_count.h +++ b/src/tbb/include/tbb/tick_count.h @@ -1,140 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2021 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_tick_count_H -#define __TBB_tick_count_H - -#include "tbb_stddef.h" - -#if _WIN32||_WIN64 -#include "machine/windows_api.h" -#elif __linux__ -#include -#else /* generic Unix */ -#include -#endif /* (choice of OS) */ - -namespace tbb { - -//! Absolute timestamp -/** @ingroup timing */ -class tick_count { -public: - //! Relative time interval. - class interval_t { - long long value; - explicit interval_t( long long value_ ) : value(value_) {} - public: - //! Construct a time interval representing zero time duration - interval_t() : value(0) {}; - - //! Construct a time interval representing sec seconds time duration - explicit interval_t( double sec ); - - //! Return the length of a time interval in seconds - double seconds() const; - - friend class tbb::tick_count; - - //! Extract the intervals from the tick_counts and subtract them. - friend interval_t operator-( const tick_count& t1, const tick_count& t0 ); - - //! Add two intervals. - friend interval_t operator+( const interval_t& i, const interval_t& j ) { - return interval_t(i.value+j.value); - } + http://www.apache.org/licenses/LICENSE-2.0 - //! Subtract two intervals. - friend interval_t operator-( const interval_t& i, const interval_t& j ) { - return interval_t(i.value-j.value); - } - - //! Accumulation operator - interval_t& operator+=( const interval_t& i ) {value += i.value; return *this;} - - //! Subtraction operator - interval_t& operator-=( const interval_t& i ) {value -= i.value; return *this;} - private: - static long long ticks_per_second(){ -#if _WIN32||_WIN64 - LARGE_INTEGER qpfreq; - int rval = QueryPerformanceFrequency(&qpfreq); - __TBB_ASSERT_EX(rval, "QueryPerformanceFrequency returned zero"); - return static_cast(qpfreq.QuadPart); -#elif __linux__ - return static_cast(1E9); -#else /* generic Unix */ - return static_cast(1E6); -#endif /* (choice of OS) */ - } - }; - - //! Construct an absolute timestamp initialized to zero. - tick_count() : my_count(0) {}; - - //! Return current time. - static tick_count now(); - - //! Subtract two timestamps to get the time interval between - friend interval_t operator-( const tick_count& t1, const tick_count& t0 ); - - //! Return the resolution of the clock in seconds per tick. - static double resolution() { return 1.0 / interval_t::ticks_per_second(); } - -private: - long long my_count; -}; - -inline tick_count tick_count::now() { - tick_count result; -#if _WIN32||_WIN64 - LARGE_INTEGER qpcnt; - int rval = QueryPerformanceCounter(&qpcnt); - __TBB_ASSERT_EX(rval, "QueryPerformanceCounter failed"); - result.my_count = qpcnt.QuadPart; -#elif __linux__ - struct timespec ts; - int status = clock_gettime( CLOCK_REALTIME, &ts ); - __TBB_ASSERT_EX( status==0, "CLOCK_REALTIME not supported" ); - result.my_count = static_cast(1000000000UL)*static_cast(ts.tv_sec) + static_cast(ts.tv_nsec); -#else /* generic Unix */ - struct timeval tv; - int status = gettimeofday(&tv, NULL); - __TBB_ASSERT_EX( status==0, "gettimeofday failed" ); - result.my_count = static_cast(1000000)*static_cast(tv.tv_sec) + static_cast(tv.tv_usec); -#endif /*(choice of OS) */ - return result; -} - -inline tick_count::interval_t::interval_t( double sec ) { - value = static_cast(sec*interval_t::ticks_per_second()); -} - -inline tick_count::interval_t operator-( const tick_count& t1, const tick_count& t0 ) { - return tick_count::interval_t( t1.my_count-t0.my_count ); -} - -inline double tick_count::interval_t::seconds() const { - return value*tick_count::resolution(); -} - -} // namespace tbb + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#endif /* __TBB_tick_count_H */ +#include "../oneapi/tbb/tick_count.h" diff --git a/src/tbb/include/tbb/version.h b/src/tbb/include/tbb/version.h new file mode 100644 index 000000000..cd13a83a1 --- /dev/null +++ b/src/tbb/include/tbb/version.h @@ -0,0 +1,17 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "../oneapi/tbb/version.h" diff --git a/src/tbb/integration/cmake/generate_vars.cmake b/src/tbb/integration/cmake/generate_vars.cmake new file mode 100644 index 000000000..90690610c --- /dev/null +++ b/src/tbb/integration/cmake/generate_vars.cmake @@ -0,0 +1,51 @@ +# Copyright (c) 2020-2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Reuired parameters: +# SOURCE_DIR - incoming path to oneTBB source directory. +# BINARY_DIR - incoming path to oneTBB build directory. +# BIN_PATH - incoming path to oneTBB binaries directory. +# TBB_INSTALL_VARS - install vars generation trigger +# TBB_CMAKE_INSTALL_LIBDIR - subdir for shared object files installation path (used only in TBB_INSTALL_VARS mode) +# VARS_TEMPLATE - path to the vars template file +# VARS_NAME - name of the output vars script + +set(INPUT_FILE "${SOURCE_DIR}/integration/${VARS_TEMPLATE}") +set(OUTPUT_FILE "${BIN_PATH}/${VARS_NAME}") + +file(TO_NATIVE_PATH "${SOURCE_DIR}" TBBROOT_REPLACEMENT) +file(TO_NATIVE_PATH "${BIN_PATH}" LIBRARY_PATH_REPLACEMENT) +if (WIN32) + file(TO_NATIVE_PATH "${BIN_PATH}" BINARY_PATH_REPLACEMENT) +endif() + +if (NOT EXISTS ${OUTPUT_FILE}) + configure_file(${INPUT_FILE} ${OUTPUT_FILE} @ONLY) +endif() + +if (TBB_INSTALL_VARS) + set(OUTPUT_FILE "${BINARY_DIR}/internal_install_vars") + if (UNIX) + set(TBBROOT_REPLACEMENT "$(cd $(dirname \${BASH_SOURCE}) && pwd -P)/..") + set(LIBRARY_PATH_REPLACEMENT "$TBBROOT/${TBB_CMAKE_INSTALL_LIBDIR}/") + set(CMAKE_ENVIRONMENT_SOURCING_STRING "CMAKE_PREFIX_PATH=\"\${TBBROOT}/${TBB_CMAKE_INSTALL_LIBDIR}/cmake/TBB:${CMAKE_PREFIX_PATH}\"; export CMAKE_PREFIX_PATH") + else() + set(TBBROOT_REPLACEMENT "%~d0%~p0..") + set(LIBRARY_PATH_REPLACEMENT "%TBBROOT%\\${TBB_CMAKE_INSTALL_LIBDIR}") + set(BINARY_PATH_REPLACEMENT "%TBBROOT%\\bin") + set(CMAKE_ENVIRONMENT_SOURCING_STRING "set \"CMAKE_PREFIX_PATH=%TBBROOT%\\${TBB_CMAKE_INSTALL_LIBDIR}\\cmake\\TBB;%CMAKE_PREFIX_PATH%\"") + endif() + + configure_file( ${INPUT_FILE} ${OUTPUT_FILE} @ONLY ) +endif() diff --git a/src/tbb/integration/linux/env/vars.sh b/src/tbb/integration/linux/env/vars.sh new file mode 100644 index 000000000..5d913bb61 --- /dev/null +++ b/src/tbb/integration/linux/env/vars.sh @@ -0,0 +1,183 @@ +#!/bin/sh +# shellcheck shell=sh +# +# Copyright (c) 2005-2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The script is setting up environment for oneTBB. +# Supported arguments: +# intel64|ia32 - architecture, intel64 is default. + +# Get absolute path to script. Gets a relative path as argument and outputs an absolute path. +get_script_path() ( + script_path="$1" + while [ -L "$script_path" ] ; do + script_dir=$(command dirname -- "$script_path") + script_dir=$(cd "$script_dir" && command pwd -P) + script_path="$(readlink "$script_path")" + case $script_path in + (/*) ;; + (*) script_path="$script_dir/$script_path" ;; + esac + done + script_dir=$(command dirname -- "$script_path") + script_dir=$(cd "$script_dir" && command pwd -P) + printf "%s" "$script_dir" +) + +_vars_get_proc_name() { + if [ -n "${ZSH_VERSION:-}" ] ; then + script="$(ps -p "$$" -o comm=)" + else + script="$1" + while [ -L "$script" ] ; do + script="$(readlink "$script")" + done + fi + basename -- "$script" +} + +_vars_this_script_name="vars.sh" +if [ "$_vars_this_script_name" = "$(_vars_get_proc_name "$0")" ] ; then + echo ":: ERROR: Incorrect usage: this script must be sourced." + echo " Usage: . path/to/${_vars_this_script_name}" + return 255 2>/dev/null || exit 255 +fi + +# Prepend path segment(s) to path-like env vars (PATH, CPATH, etc.). + +# prepend_path() avoids dangling ":" that affects some env vars (PATH and CPATH) +# PATH > https://www.gnu.org/software/libc/manual/html_node/Standard-Environment.html + +# Usage: +# env_var=$(prepend_path "$prepend_to_var" "$existing_env_var") +# export env_var +# +# Inputs: +# $1 == path segment to be prepended to $2 +# $2 == value of existing path-like environment variable + +prepend_path() ( + path_to_add="$1" + path_is_now="$2" + + if [ "" = "${path_is_now}" ] ; then # avoid dangling ":" + printf "%s" "${path_to_add}" + else + printf "%s" "${path_to_add}:${path_is_now}" + fi +) + +# Extract the name and location of this sourced script. + +# Generally, "ps -o comm=" is limited to a 15 character result, but it works +# fine for this usage, because we are primarily interested in finding the name +# of the execution shell, not the name of any calling script. + +vars_script_name="" +vars_script_shell="$(ps -p "$$" -o comm=)" +# ${var:-} needed to pass "set -eu" checks +if [ -n "${ZSH_VERSION:-}" ] && [ -n "${ZSH_EVAL_CONTEXT:-}" ] ; then # zsh 5.x and later + # shellcheck disable=2249 + case $ZSH_EVAL_CONTEXT in (*:file*) vars_script_name="${(%):-%x}" ;; esac ; +elif [ -n "${KSH_VERSION:-}" ] ; then # ksh, mksh or lksh + if [ "$(set | grep -Fq "KSH_VERSION=.sh.version" ; echo $?)" -eq 0 ] ; then # ksh + vars_script_name="${.sh.file}" ; + else # mksh or lksh or [lm]ksh masquerading as ksh or sh + # force [lm]ksh to issue error msg; which contains this script's path/filename, e.g.: + # mksh: /home/ubuntu/intel/oneapi/vars.sh[137]: ${.sh.file}: bad substitution + vars_script_name="$( (echo "${.sh.file}") 2>&1 )" || : ; + vars_script_name="$(expr "${vars_script_name:-}" : '^.*sh: \(.*\)\[[0-9]*\]:')" ; + fi +elif [ -n "${BASH_VERSION:-}" ] ; then # bash + # shellcheck disable=2128 + (return 0 2>/dev/null) && vars_script_name="${BASH_SOURCE}" ; +elif [ "dash" = "$vars_script_shell" ] ; then # dash + # force dash to issue error msg; which contains this script's rel/path/filename, e.g.: + # dash: 146: /home/ubuntu/intel/oneapi/vars.sh: Bad substitution + vars_script_name="$( (echo "${.sh.file}") 2>&1 )" || : ; + vars_script_name="$(expr "${vars_script_name:-}" : '^.*dash: [0-9]*: \(.*\):')" ; +elif [ "sh" = "$vars_script_shell" ] ; then # could be dash masquerading as /bin/sh + # force a shell error msg; which should contain this script's path/filename + # sample error msg shown; assume this file is named "vars.sh"; as required by setvars.sh + vars_script_name="$( (echo "${.sh.file}") 2>&1 )" || : ; + if [ "$(printf "%s" "$vars_script_name" | grep -Eq "sh: [0-9]+: .*vars\.sh: " ; echo $?)" -eq 0 ] ; then # dash as sh + # sh: 155: /home/ubuntu/intel/oneapi/vars.sh: Bad substitution + vars_script_name="$(expr "${vars_script_name:-}" : '^.*sh: [0-9]*: \(.*\):')" ; + fi +else # unrecognized shell or dash being sourced from within a user's script + # force a shell error msg; which should contain this script's path/filename + # sample error msg shown; assume this file is named "vars.sh"; as required by setvars.sh + vars_script_name="$( (echo "${.sh.file}") 2>&1 )" || : ; + if [ "$(printf "%s" "$vars_script_name" | grep -Eq "^.+: [0-9]+: .*vars\.sh: " ; echo $?)" -eq 0 ] ; then # dash + # .*: 164: intel/oneapi/vars.sh: Bad substitution + vars_script_name="$(expr "${vars_script_name:-}" : '^.*: [0-9]*: \(.*\):')" ; + else + vars_script_name="" ; + fi +fi + +if [ "" = "$vars_script_name" ] ; then + >&2 echo ":: ERROR: Unable to proceed: possible causes listed below." + >&2 echo " This script must be sourced. Did you execute or source this script?" ; + >&2 echo " Unrecognized/unsupported shell (supported: bash, zsh, ksh, m/lksh, dash)." ; + >&2 echo " Can be caused by sourcing from ZSH version 4.x or older." ; + return 255 2>/dev/null || exit 255 +fi + +TBBROOT=$(get_script_path "${vars_script_name:-}")/.. + +TBB_TARGET_ARCH="intel64" +TBB_ARCH_SUFFIX="" + +if [ -n "${SETVARS_ARGS:-}" ]; then + tbb_arg_ia32="$(expr "${SETVARS_ARGS:-}" : '^.*\(ia32\)')" || true + if [ -n "${tbb_arg_ia32:-}" ]; then + TBB_TARGET_ARCH="ia32" + fi +else + for arg do + case "$arg" in + (intel64|ia32) + TBB_TARGET_ARCH="${arg}" + ;; + (*) ;; + esac + done +fi + +TBB_LIB_NAME="libtbb.so.12" + +# Parse layout +if [ -e "$TBBROOT/lib/$TBB_TARGET_ARCH" ]; then + TBB_LIB_DIR="$TBB_TARGET_ARCH/gcc4.8" +else + if [ "$TBB_TARGET_ARCH" = "ia32" ] ; then + TBB_ARCH_SUFFIX="32" + fi + TBB_LIB_DIR="" +fi + +if [ -e "$TBBROOT/lib$TBB_ARCH_SUFFIX/$TBB_LIB_DIR/$TBB_LIB_NAME" ]; then + export TBBROOT + + LIBRARY_PATH=$(prepend_path "${TBBROOT}/lib$TBB_ARCH_SUFFIX/$TBB_LIB_DIR" "${LIBRARY_PATH:-}") ; export LIBRARY_PATH + LD_LIBRARY_PATH=$(prepend_path "${TBBROOT}/lib$TBB_ARCH_SUFFIX/$TBB_LIB_DIR" "${LD_LIBRARY_PATH:-}") ; export LD_LIBRARY_PATH + CPATH=$(prepend_path "${TBBROOT}/include" "${CPATH:-}") ; export CPATH + CMAKE_PREFIX_PATH=$(prepend_path "${TBBROOT}" "${CMAKE_PREFIX_PATH:-}") ; export CMAKE_PREFIX_PATH + PKG_CONFIG_PATH=$(prepend_path "${TBBROOT}/lib$TBB_ARCH_SUFFIX/pkgconfig" "${PKG_CONFIG_PATH:-}") ; export PKG_CONFIG_PATH +else + >&2 echo "ERROR: $TBB_LIB_NAME library does not exist in $TBBROOT/lib$TBB_ARCH_SUFFIX/$TBB_LIB_DIR." + return 255 2>/dev/null || exit 255 +fi diff --git a/src/tbb/integration/linux/env/vars.sh.in b/src/tbb/integration/linux/env/vars.sh.in new file mode 100644 index 000000000..18774e0d2 --- /dev/null +++ b/src/tbb/integration/linux/env/vars.sh.in @@ -0,0 +1,24 @@ +#!/bin/sh +# +# Copyright (c) 2005-2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export TBBROOT=@TBBROOT_REPLACEMENT@ + +LD_LIBRARY_PATH="@LIBRARY_PATH_REPLACEMENT@:${LD_LIBRARY_PATH}"; export LD_LIBRARY_PATH +LIBRARY_PATH="@LIBRARY_PATH_REPLACEMENT@:${LIBRARY_PATH}"; export LIBRARY_PATH +CPATH="${TBBROOT}/include:${CPATH}"; export CPATH +PKG_CONFIG_PATH="@LIBRARY_PATH_REPLACEMENT@/pkgconfig:${PKG_CONFIG_PATH}"; export PKG_CONFIG_PATH + +@CMAKE_ENVIRONMENT_SOURCING_STRING@ diff --git a/src/tbb/integration/linux/modulefiles/tbb b/src/tbb/integration/linux/modulefiles/tbb new file mode 100644 index 000000000..b8c695ed2 --- /dev/null +++ b/src/tbb/integration/linux/modulefiles/tbb @@ -0,0 +1,69 @@ +#%Module1.0################################################################### +# +# Copyright (c) 2020-2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This modulefile requires Environment Modules 4.1 or later. +# Type `module --version` to determine the current installed version. + +############################################################################## + +set min_tcl_ver 8.4 +if { $tcl_version < $min_tcl_ver } { + puts stderr " " + puts stderr "ERROR: This modulefile requires tcl $min_tcl_ver or greater." + puts stderr "Your system reports that tclsh version $tcl_version is installed." + exit 1 +} + +# if modulefile script name is a symlink, resolve it to get the fully +# qualified pathname that points to the actual modulefile script +# see: https://wiki.tcl-lang.org/page/file+normalize +set scriptpath "${ModulesCurrentModulefile}" +set scriptpath "[file dirname [file normalize "$scriptpath/___"]]" + +# define componentroot, modulefilepath, modulefilename and modulefilever +set modulefilename "[file tail [file dirname "${scriptpath}"]]" +set modulefilever "[file tail "${scriptpath}"]" +set modulefilepath "${scriptpath}" +set componentroot "[file dirname [file dirname [file dirname [file dirname "${scriptpath}"]]]]" + +############################################################################## + +module-whatis "Name: Intel(R) oneAPI Threading Building Blocks" +module-whatis "Version: $modulefilename/$modulefilever" +module-whatis "Description: Flexible threading library for adding parallelism to complex applications across accelerated architectures." +module-whatis "URL: https://www.intel.com/content/www/us/en/developer/tools/oneapi/onetbb.html" +module-whatis "Dependencies: none" + +proc ModulesHelp { } { + global modulefilename + global modulefilever + module whatis "${modulefilename}/${modulefilever}" +} + +############################################################################## + +# Define environment variables needed for an isolated component install. + +set tbbroot "$componentroot" +set tbb_target_arch "intel64" + +setenv TBBROOT "$tbbroot" + +prepend-path CPATH "$tbbroot/include" +prepend-path LIBRARY_PATH "$tbbroot/lib" +prepend-path LD_LIBRARY_PATH "$tbbroot/lib" +prepend-path CMAKE_PREFIX_PATH "$tbbroot" +prepend-path PKG_CONFIG_PATH "$tbbroot/lib/pkgconfig" diff --git a/src/tbb/integration/linux/modulefiles/tbb32 b/src/tbb/integration/linux/modulefiles/tbb32 new file mode 100644 index 000000000..db3413517 --- /dev/null +++ b/src/tbb/integration/linux/modulefiles/tbb32 @@ -0,0 +1,69 @@ +#%Module1.0################################################################### +# +# Copyright (c) 2020-2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This modulefile requires Environment Modules 4.1 or later. +# Type `module --version` to determine the current installed version. + +############################################################################## + +set min_tcl_ver 8.4 +if { $tcl_version < $min_tcl_ver } { + puts stderr " " + puts stderr "ERROR: This modulefile requires tcl $min_tcl_ver or greater." + puts stderr "Your system reports that tclsh version $tcl_version is installed." + exit 1 +} + +# if modulefile script name is a symlink, resolve it to get the fully +# qualified pathname that points to the actual modulefile script +# see: https://wiki.tcl-lang.org/page/file+normalize +set scriptpath "${ModulesCurrentModulefile}" +set scriptpath "[file dirname [file normalize "$scriptpath/___"]]" + +# define componentroot, modulefilepath, modulefilename and modulefilever +set modulefilename "[file tail [file dirname "${scriptpath}"]]" +set modulefilever "[file tail "${scriptpath}"]" +set modulefilepath "${scriptpath}" +set componentroot "[file dirname [file dirname [file dirname [file dirname "${scriptpath}"]]]]" + +############################################################################## + +module-whatis "Name: Intel(R) oneAPI Threading Building Blocks" +module-whatis "Version: $modulefilename/$modulefilever" +module-whatis "Description: Flexible threading library for adding parallelism to complex applications across accelerated architectures." +module-whatis "URL: https://www.intel.com/content/www/us/en/developer/tools/oneapi/onetbb.html" +module-whatis "Dependencies: none" + +proc ModulesHelp { } { + global modulefilename + global modulefilever + module whatis "${modulefilename}/${modulefilever}" +} + +############################################################################## + +# Define environment variables needed for an isolated component install. + +set tbbroot "$componentroot" +set tbb_target_arch "ia32" + +setenv TBBROOT "$tbbroot" + +prepend-path CPATH "$tbbroot/include32:$tbbroot/include" +prepend-path LIBRARY_PATH "$tbbroot/lib32" +prepend-path LD_LIBRARY_PATH "$tbbroot/lib32" +prepend-path CMAKE_PREFIX_PATH "$tbbroot" +prepend-path PKG_CONFIG_PATH "$tbbroot/lib32/pkgconfig" diff --git a/src/tbb/integration/linux/oneapi/vars.sh b/src/tbb/integration/linux/oneapi/vars.sh new file mode 100644 index 000000000..ffcf56a55 --- /dev/null +++ b/src/tbb/integration/linux/oneapi/vars.sh @@ -0,0 +1,34 @@ +#!/bin/sh +# shellcheck shell=sh +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ -z "${SETVARS_CALL:-}" ] ; then + >&2 echo " " + >&2 echo ":: ERROR: This script must be sourced by setvars.sh." + >&2 echo " Try 'source /setvars.sh --help' for help." + >&2 echo " " + return 255 +fi + +if [ -z "${ONEAPI_ROOT:-}" ] ; then + >&2 echo " " + >&2 echo ":: ERROR: This script requires that the ONEAPI_ROOT env variable is set." + >&2 echo " Try 'source \setvars.sh --help' for help." + >&2 echo " " + return 254 +fi + +TBBROOT="${ONEAPI_ROOT}"; export TBBROOT diff --git a/src/tbb/integration/linux/sys_check/sys_check.sh b/src/tbb/integration/linux/sys_check/sys_check.sh new file mode 100644 index 000000000..a279c58ee --- /dev/null +++ b/src/tbb/integration/linux/sys_check/sys_check.sh @@ -0,0 +1,21 @@ +#!/bin/sh +# +# Copyright (c) 2019-2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +LOC=$(realpath $(dirname "${BASH_SOURCE[0]}")) +source $LOC/../../../common.sh $@ + +ERRORSTATE=0 +return $ERRORSTATE diff --git a/src/tbb/integration/mac/env/vars.sh b/src/tbb/integration/mac/env/vars.sh new file mode 100644 index 000000000..12be0e52a --- /dev/null +++ b/src/tbb/integration/mac/env/vars.sh @@ -0,0 +1,149 @@ +#!/bin/sh +# shellcheck shell=sh +# +# Copyright (c) 2005-2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Get absolute path to script. Gets a relative path as argument and outputs an absolute path. +get_script_path() ( + script_path="$1" + while [ -L "$script_path" ] ; do + script_dir=$(command dirname -- "$script_path") + script_dir=$(cd "$script_dir" && command pwd -P) + script_path="$(readlink "$script_path")" + case $script_path in + (/*) ;; + (*) script_path="$script_dir/$script_path" ;; + esac + done + script_dir=$(command dirname -- "$script_path") + script_dir=$(cd "$script_dir" && command pwd -P) + printf "%s" "$script_dir" +) + +_vars_get_proc_name() { + if [ -n "${ZSH_VERSION:-}" ] ; then + script="$(ps -p "$$" -o comm=)" + else + script="$1" + while [ -L "$script" ] ; do + script="$(readlink "$script")" + done + fi + basename -- "$script" +} + +_vars_this_script_name="vars.sh" +if [ "$_vars_this_script_name" = "$(_vars_get_proc_name "$0")" ] ; then + echo ":: ERROR: Incorrect usage: this script must be sourced." + echo " Usage: . path/to/${_vars_this_script_name}" + return 255 2>/dev/null || exit 255 +fi + +# Prepend path segment(s) to path-like env vars (PATH, CPATH, etc.). + +# prepend_path() avoids dangling ":" that affects some env vars (PATH and CPATH) +# PATH > https://www.gnu.org/software/libc/manual/html_node/Standard-Environment.html + +# Usage: +# env_var=$(prepend_path "$prepend_to_var" "$existing_env_var") +# export env_var +# +# Inputs: +# $1 == path segment to be prepended to $2 +# $2 == value of existing path-like environment variable + +prepend_path() ( + path_to_add="$1" + path_is_now="$2" + + if [ "" = "${path_is_now}" ] ; then # avoid dangling ":" + printf "%s" "${path_to_add}" + else + printf "%s" "${path_to_add}:${path_is_now}" + fi +) + +# Extract the name and location of this sourced script. + +# Generally, "ps -o comm=" is limited to a 15 character result, but it works +# fine for this usage, because we are primarily interested in finding the name +# of the execution shell, not the name of any calling script. + +vars_script_name="" +vars_script_shell="$(ps -p "$$" -o comm=)" +# ${var:-} needed to pass "set -eu" checks +if [ -n "${ZSH_VERSION:-}" ] && [ -n "${ZSH_EVAL_CONTEXT:-}" ] ; then # zsh 5.x and later + # shellcheck disable=2249 + case $ZSH_EVAL_CONTEXT in (*:file*) vars_script_name="${(%):-%x}" ;; esac ; +elif [ -n "${KSH_VERSION:-}" ] ; then # ksh, mksh or lksh + if [ "$(set | grep -Fq "KSH_VERSION=.sh.version" ; echo $?)" -eq 0 ] ; then # ksh + vars_script_name="${.sh.file}" ; + else # mksh or lksh or [lm]ksh masquerading as ksh or sh + # force [lm]ksh to issue error msg; which contains this script's path/filename, e.g.: + # mksh: /home/ubuntu/intel/oneapi/vars.sh[137]: ${.sh.file}: bad substitution + vars_script_name="$( (echo "${.sh.file}") 2>&1 )" || : ; + vars_script_name="$(expr "${vars_script_name:-}" : '^.*sh: \(.*\)\[[0-9]*\]:')" ; + fi +elif [ -n "${BASH_VERSION:-}" ] ; then # bash + # shellcheck disable=2128 + (return 0 2>/dev/null) && vars_script_name="${BASH_SOURCE}" ; +elif [ "dash" = "$vars_script_shell" ] ; then # dash + # force dash to issue error msg; which contains this script's rel/path/filename, e.g.: + # dash: 146: /home/ubuntu/intel/oneapi/vars.sh: Bad substitution + vars_script_name="$( (echo "${.sh.file}") 2>&1 )" || : ; + vars_script_name="$(expr "${vars_script_name:-}" : '^.*dash: [0-9]*: \(.*\):')" ; +elif [ "sh" = "$vars_script_shell" ] ; then # could be dash masquerading as /bin/sh + # force a shell error msg; which should contain this script's path/filename + # sample error msg shown; assume this file is named "vars.sh"; as required by setvars.sh + vars_script_name="$( (echo "${.sh.file}") 2>&1 )" || : ; + if [ "$(printf "%s" "$vars_script_name" | grep -Eq "sh: [0-9]+: .*vars\.sh: " ; echo $?)" -eq 0 ] ; then # dash as sh + # sh: 155: /home/ubuntu/intel/oneapi/vars.sh: Bad substitution + vars_script_name="$(expr "${vars_script_name:-}" : '^.*sh: [0-9]*: \(.*\):')" ; + fi +else # unrecognized shell or dash being sourced from within a user's script + # force a shell error msg; which should contain this script's path/filename + # sample error msg shown; assume this file is named "vars.sh"; as required by setvars.sh + vars_script_name="$( (echo "${.sh.file}") 2>&1 )" || : ; + if [ "$(printf "%s" "$vars_script_name" | grep -Eq "^.+: [0-9]+: .*vars\.sh: " ; echo $?)" -eq 0 ] ; then # dash + # .*: 164: intel/oneapi/vars.sh: Bad substitution + vars_script_name="$(expr "${vars_script_name:-}" : '^.*: [0-9]*: \(.*\):')" ; + else + vars_script_name="" ; + fi +fi + +if [ "" = "$vars_script_name" ] ; then + >&2 echo ":: ERROR: Unable to proceed: possible causes listed below." + >&2 echo " This script must be sourced. Did you execute or source this script?" ; + >&2 echo " Unrecognized/unsupported shell (supported: bash, zsh, ksh, m/lksh, dash)." ; + >&2 echo " Can be caused by sourcing from ZSH version 4.x or older." ; + return 255 2>/dev/null || exit 255 +fi + +TBBROOT=$(get_script_path "${vars_script_name:-}")/.. +LIBTBB_NAME="libtbb.dylib" + +if [ -e "$TBBROOT/lib/$LIBTBB_NAME" ]; then + export TBBROOT + + LIBRARY_PATH=$(prepend_path "${TBBROOT}/lib" "${LIBRARY_PATH:-}") ; export LIBRARY_PATH + DYLD_LIBRARY_PATH=$(prepend_path "${TBBROOT}/lib" "${DYLD_LIBRARY_PATH:-}") ; export DYLD_LIBRARY_PATH + CPATH=$(prepend_path "${TBBROOT}/include" "${CPATH:-}") ; export CPATH + CMAKE_PREFIX_PATH=$(prepend_path "${TBBROOT}" "${CMAKE_PREFIX_PATH:-}") ; export CMAKE_PREFIX_PATH + PKG_CONFIG_PATH=$(prepend_path "${TBBROOT}/lib/pkgconfig" "${PKG_CONFIG_PATH:-}") ; export PKG_CONFIG_PATH +else + >&2 echo "ERROR: $LIBTBB_NAME library does not exist in $TBBROOT/lib." + return 255 2>/dev/null || exit 255 +fi diff --git a/src/tbb/integration/mac/env/vars.sh.in b/src/tbb/integration/mac/env/vars.sh.in new file mode 100644 index 000000000..2a4ca33cb --- /dev/null +++ b/src/tbb/integration/mac/env/vars.sh.in @@ -0,0 +1,24 @@ +#!/bin/sh +# +# Copyright (c) 2005-2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export TBBROOT=@TBBROOT_REPLACEMENT@ + +DYLD_LIBRARY_PATH="@LIBRARY_PATH_REPLACEMENT@:${DYLD_LIBRARY_PATH}"; export DYLD_LIBRARY_PATH +LIBRARY_PATH="@LIBRARY_PATH_REPLACEMENT@:${LIBRARY_PATH}"; export LIBRARY_PATH +CPATH="${TBBROOT}/include:${CPATH}"; export CPATH +PKG_CONFIG_PATH="@LIBRARY_PATH_REPLACEMENT@/pkgconfig:${PKG_CONFIG_PATH}"; export PKG_CONFIG_PATH + +@CMAKE_ENVIRONMENT_SOURCING_STRING@ diff --git a/src/tbb/integration/pkg-config/tbb.pc.in b/src/tbb/integration/pkg-config/tbb.pc.in new file mode 100644 index 000000000..34ea3bea1 --- /dev/null +++ b/src/tbb/integration/pkg-config/tbb.pc.in @@ -0,0 +1,24 @@ +# Copyright (c) 2021-2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +prefix=@_prefix_for_pc_file@ +libdir=@_libdir_for_pc_file@ +includedir=@_includedir_for_pc_file@ + +Name: oneAPI Threading Building Blocks (oneTBB) +Description: C++ library for parallel programming on multi-core processors. +URL: https://github.com/oneapi-src/oneTBB +Version: @TBB_VERSION@ +Libs: -L${libdir} @_tbb_pc_extra_libdir@ -l@_tbb_pc_lib_name@ +Cflags: -I${includedir} diff --git a/src/tbb/integration/windows/env/vars.bat b/src/tbb/integration/windows/env/vars.bat new file mode 100644 index 000000000..c5ec0ddc4 --- /dev/null +++ b/src/tbb/integration/windows/env/vars.bat @@ -0,0 +1,93 @@ +@echo off +REM +REM Copyright (c) 2005-2023 Intel Corporation +REM +REM Licensed under the Apache License, Version 2.0 (the "License"); +REM you may not use this file except in compliance with the License. +REM You may obtain a copy of the License at +REM +REM http://www.apache.org/licenses/LICENSE-2.0 +REM +REM Unless required by applicable law or agreed to in writing, software +REM distributed under the License is distributed on an "AS IS" BASIS, +REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +REM See the License for the specific language governing permissions and +REM limitations under the License. +REM + +REM Syntax: +REM %SCRIPT_NAME% [^] [^] +REM ^ should be one of the following +REM ia32 : Set up for IA-32 architecture +REM intel64 : Set up for Intel(R) 64 architecture +REM if ^ is not set Intel(R) 64 architecture will be used +REM ^ should be one of the following +REM vs2019 : Set to use with Microsoft Visual Studio 2019 runtime DLLs +REM vs2022 : Set to use with Microsoft Visual Studio 2022 runtime DLLs +REM all : Set to use oneTBB statically linked with Microsoft Visual C++ runtime +REM if ^ is not set oneTBB dynamically linked with Microsoft Visual C++ runtime will be used. + +set "SCRIPT_NAME=%~nx0" +set "TBB_SCRIPT_DIR=%~d0%~p0" +set "TBBROOT=%TBB_SCRIPT_DIR%.." + +:: Set the default arguments +set TBB_TARGET_ARCH=intel64 +set TBB_ARCH_SUFFIX= +set TBB_TARGET_VS=vc14 + +:ParseArgs +:: Parse the incoming arguments +if /i "%1"=="" goto ParseLayout +if /i "%1"=="ia32" (set TBB_TARGET_ARCH=ia32) & shift & goto ParseArgs +if /i "%1"=="intel64" (set TBB_TARGET_ARCH=intel64) & shift & goto ParseArgs +if /i "%1"=="vs2019" (set TBB_TARGET_VS=vc14) & shift & goto ParseArgs +if /i "%1"=="vs2022" (set TBB_TARGET_VS=vc14) & shift & goto ParseArgs +if /i "%1"=="all" (set TBB_TARGET_VS=vc_mt) & shift & goto ParseArgs + +:ParseLayout +if exist "%TBBROOT%\redist\" ( + set "TBB_BIN_DIR=%TBBROOT%\redist" + set "TBB_SUBDIR=%TBB_TARGET_ARCH%" + goto SetEnv +) + +if "%TBB_TARGET_ARCH%" == "ia32" ( + set TBB_ARCH_SUFFIX=32 +) +if exist "%TBBROOT%\bin%TBB_ARCH_SUFFIX%" ( + set "TBB_BIN_DIR=%TBBROOT%\bin%TBB_ARCH_SUFFIX%" + if "%TBB_TARGET_VS%" == "vc14" ( + set TBB_TARGET_VS= + ) + goto SetEnv +) +:: Couldn't parse TBBROOT/bin, unset variable +set TBB_ARCH_SUFFIX= + +if exist "%TBBROOT%\..\redist\" ( + set "TBB_BIN_DIR=%TBBROOT%\..\redist" + set "TBB_SUBDIR=%TBB_TARGET_ARCH%\tbb" + goto SetEnv +) + +:SetEnv +if exist "%TBB_BIN_DIR%\%TBB_SUBDIR%\%TBB_TARGET_VS%\tbb12.dll" ( + set "TBB_DLL_PATH=%TBB_BIN_DIR%\%TBB_SUBDIR%\%TBB_TARGET_VS%" +) else ( + echo: + echo :: ERROR: tbb12.dll library does not exist in "%TBB_BIN_DIR%\%TBB_SUBDIR%\%TBB_TARGET_VS%\" + echo: + exit /b 255 +) + +set "PATH=%TBB_DLL_PATH%;%PATH%" + +set "LIB=%TBBROOT%\lib%TBB_ARCH_SUFFIX%\%TBB_SUBDIR%\%TBB_TARGET_VS%;%LIB%" +set "INCLUDE=%TBBROOT%\include;%INCLUDE%" +set "CPATH=%TBBROOT%\include;%CPATH%" +set "CMAKE_PREFIX_PATH=%TBBROOT%;%CMAKE_PREFIX_PATH%" +set "PKG_CONFIG_PATH=%TBBROOT%\lib%TBB_ARCH_SUFFIX%\pkgconfig;%PKG_CONFIG_PATH%" + +:End +exit /B 0 diff --git a/src/tbb/integration/windows/env/vars.bat.in b/src/tbb/integration/windows/env/vars.bat.in new file mode 100644 index 000000000..5d94588fa --- /dev/null +++ b/src/tbb/integration/windows/env/vars.bat.in @@ -0,0 +1,29 @@ +@echo off +REM +REM Copyright (c) 2005-2021 Intel Corporation +REM +REM Licensed under the Apache License, Version 2.0 (the "License"); +REM you may not use this file except in compliance with the License. +REM You may obtain a copy of the License at +REM +REM http://www.apache.org/licenses/LICENSE-2.0 +REM +REM Unless required by applicable law or agreed to in writing, software +REM distributed under the License is distributed on an "AS IS" BASIS, +REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +REM See the License for the specific language governing permissions and +REM limitations under the License. +REM + +@echo off + +set "TBBROOT=@TBBROOT_REPLACEMENT@" +set "TBB_DLL_PATH=@BINARY_PATH_REPLACEMENT@" + +set "INCLUDE=%TBBROOT%\include;%INCLUDE%" +set "CPATH=%TBBROOT%\include;%CPATH%" +set "LIB=@LIBRARY_PATH_REPLACEMENT@;%LIB%" +set "PATH=@BINARY_PATH_REPLACEMENT@;%PATH%" +set "PKG_CONFIG_PATH=@LIBRARY_PATH_REPLACEMENT@\pkgconfig;%PKG_CONFIG_PATH%" + +@CMAKE_ENVIRONMENT_SOURCING_STRING@ diff --git a/src/tbb/integration/windows/nuget/inteltbb.devel.win.targets b/src/tbb/integration/windows/nuget/inteltbb.devel.win.targets new file mode 100644 index 000000000..1c94a12c1 --- /dev/null +++ b/src/tbb/integration/windows/nuget/inteltbb.devel.win.targets @@ -0,0 +1,64 @@ + + + + + + + + $(MSBuildThisFileDirectory)..\..\build\native\include;%(AdditionalIncludeDirectories) + TBB_USE_DEBUG;%(PreprocessorDefinitions) + + + + + + + $(MSBuildThisFileDirectory)..\..\build\native\win-x86;%(AdditionalLibraryDirectories) + tbb12.lib;tbbmalloc.lib;tbbmalloc_proxy.lib;%(AdditionalDependencies) + + + + + $(MSBuildThisFileDirectory)..\..\build\native\win-x64;%(AdditionalLibraryDirectories) + tbb12.lib;tbbmalloc.lib;tbbmalloc_proxy.lib;%(AdditionalDependencies) + + + + + $(MSBuildThisFileDirectory)..\..\build\native\win-x86;%(AdditionalLibraryDirectories) + tbb12_debug.lib;tbbmalloc_debug.lib;tbbmalloc_proxy_debug.lib;%(AdditionalDependencies) + + + + + $(MSBuildThisFileDirectory)..\..\build\native\win-x64;%(AdditionalLibraryDirectories) + tbb12_debug.lib;tbbmalloc_debug.lib;tbbmalloc_proxy_debug.lib;%(AdditionalDependencies) + + + + + + + + + + + + + diff --git a/src/tbb/integration/windows/nuget/inteltbb.redist.win.targets b/src/tbb/integration/windows/nuget/inteltbb.redist.win.targets new file mode 100644 index 000000000..29b61acfa --- /dev/null +++ b/src/tbb/integration/windows/nuget/inteltbb.redist.win.targets @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + diff --git a/src/tbb/integration/windows/oneapi/vars.bat b/src/tbb/integration/windows/oneapi/vars.bat new file mode 100644 index 000000000..9c53c7105 --- /dev/null +++ b/src/tbb/integration/windows/oneapi/vars.bat @@ -0,0 +1,56 @@ +@echo off +REM +REM Copyright (c) 2023 Intel Corporation +REM +REM Licensed under the Apache License, Version 2.0 (the "License"); +REM you may not use this file except in compliance with the License. +REM You may obtain a copy of the License at +REM +REM http://www.apache.org/licenses/LICENSE-2.0 +REM +REM Unless required by applicable law or agreed to in writing, software +REM distributed under the License is distributed on an "AS IS" BASIS, +REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +REM See the License for the specific language governing permissions and +REM limitations under the License. +REM + +if not defined SETVARS_CALL ( + echo: + echo :: ERROR: This script must be executed by setvars.bat. + echo: Try '[install-dir]\setvars.bat --help' for help. + echo: + exit /b 255 +) + +if not defined ONEAPI_ROOT ( + echo: + echo :: ERROR: This script requires that the ONEAPI_ROOT env variable is set." + echo: Try '[install-dir]\setvars.bat --help' for help. + echo: + exit /b 254 +) + +set "TBBROOT=%ONEAPI_ROOT%" + +:: Set the default arguments +set "TBB_TARGET_ARCH=%INTEL_TARGET_ARCH%" +set TBB_TARGET_VS= +set ARCH_SUFFIX= + +:ParseArgs +:: Parse the incoming arguments +if /i "%1"=="" goto SetEnv +if /i "%1"=="vs2019" (set TBB_TARGET_VS= ) & shift & goto ParseArgs +if /i "%1"=="vs2022" (set TBB_TARGET_VS= ) & shift & goto ParseArgs +if /i "%1"=="all" (set TBB_TARGET_VS=vc_mt) & shift & goto ParseArgs + +if "%TBB_TARGET_ARCH%"=="ia32" set ARCH_SUFFIX=32 + +:SetEnv +if exist "%TBBROOT%\bin%ARCH_SUFFIX%\%TBB_TARGET_VS%\tbb12.dll" ( + set "TBB_DLL_PATH=%TBBROOT%\bin%ARCH_SUFFIX%\%TBB_TARGET_VS%" +) + +:End +exit /B 0 diff --git a/src/tbb/integration/windows/sys_check/sys_check.bat b/src/tbb/integration/windows/sys_check/sys_check.bat new file mode 100644 index 000000000..202bf72c1 --- /dev/null +++ b/src/tbb/integration/windows/sys_check/sys_check.bat @@ -0,0 +1,18 @@ +@echo off +REM +REM Copyright (c) 2019-2021 Intel Corporation +REM +REM Licensed under the Apache License, Version 2.0 (the "License"); +REM you may not use this file except in compliance with the License. +REM You may obtain a copy of the License at +REM +REM http://www.apache.org/licenses/LICENSE-2.0 +REM +REM Unless required by applicable law or agreed to in writing, software +REM distributed under the License is distributed on an "AS IS" BASIS, +REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +REM See the License for the specific language governing permissions and +REM limitations under the License. +REM + +exit /B 0 diff --git a/src/tbb/src/Makefile b/src/tbb/src/Makefile deleted file mode 100644 index 9a361574b..000000000 --- a/src/tbb/src/Makefile +++ /dev/null @@ -1,262 +0,0 @@ -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -tbb_root?=.. -examples_root:=$(tbb_root)/examples -include $(tbb_root)/build/common.inc - -#workaround for non-depend targets tbb and tbbmalloc which both depend on version_string.ver -#According to documentation, recursively invoked make commands can process their targets in parallel -.NOTPARALLEL: - -.PHONY: all tbb tbbmalloc tbbproxy test test_no_depends release debug examples clean - -all: release debug examples - -tbb: tbb_release tbb_debug - -tbbmalloc: tbbmalloc_release tbbmalloc_debug - -tbbproxy: tbbproxy_release tbbproxy_debug - -rml: rml_release rml_debug - -test: tbbmalloc_test_release $(if $(use_proxy),tbbproxy_test_release) tbb_test_release tbbmalloc_test_debug $(if $(use_proxy),tbbproxy_test_debug) tbb_test_debug -ifeq (,$(findstring skip,$(target:android=skip) $(offload:mic=skip))) -test: rml_test_debug rml_test_release -endif - -tbb_test_no_depends: tbbmalloc_test_release_no_depends $(if $(use_proxy),tbbproxy_test_release_no_depends) tbb_test_release_no_depends tbbmalloc_test_debug_no_depends $(if $(use_proxy),tbbproxy_test_debug_no_depends) tbb_test_debug_no_depends - @echo done - -release: tbb_release tbbmalloc_release $(if $(use_proxy),tbbproxy_release) -release: $(call cross_cfg,tbbmalloc_test_release) $(call cross_cfg,test_release) - -debug: tbb_debug tbbmalloc_debug $(if $(use_proxy),tbbproxy_debug) -debug: $(call cross_cfg,tbbmalloc_test_debug) $(call cross_cfg, test_debug) - -examples: tbb tbbmalloc examples_debug clean_examples examples_release - -examples_no_depends: examples_release_no_depends examples_debug_no_depends - -clean: clean_release clean_debug clean_examples - @echo clean done - -.PHONY: full -full: - $(MAKE) -sir --no-print-directory -f Makefile tbb_root=.. clean all -ifeq ($(tbb_os),windows) - $(MAKE) -sir --no-print-directory -f Makefile tbb_root=.. compiler=icl clean all native_examples -else - $(MAKE) -sir --no-print-directory -f Makefile tbb_root=.. compiler=icc clean all native_examples -endif -ifeq ($(arch),intel64) - $(MAKE) -sir --no-print-directory -f Makefile tbb_root=.. arch=ia32 clean all -endif -# it doesn't test compiler=icc arch=ia32 on intel64 systems due to enviroment settings of icc - -native_examples: tbb tbbmalloc - $(MAKE) -C $(examples_root) -r -f Makefile tbb_root=.. compiler=$(native_compiler) debug test - $(MAKE) -C $(examples_root) -r -f Makefile tbb_root=.. compiler=$(native_compiler) clean release test - -../examples/% examples/%:: - $(MAKE) -C $(examples_root) -r -f Makefile tbb_root=.. $(subst examples/,,$(subst ../,,$@)) - -debug_%:: cfg:=$(if $(findstring file,$(origin cfg)),debug,$(cfg)) -debug_%:: export run_cmd=$(debugger) -debug_malloc_% test_malloc_% debug_ScalableAlloc% test_ScalableAlloc%:: TESTFILE=tbbmalloc -debug_rml_% test_rml_%:: TESTFILE=rml -debug_runtime_load% test_runtime_load%:: TESTFILE=tbbproxy -debug_% test_% stress_% time_% perf_%:: TESTFILE?=test -debug_% test_% stress_% time_% perf_%:: - $(MAKE) -C "$(work_dir)_$(cfg)" -r -f $(tbb_root)/build/Makefile.$(TESTFILE) cfg=$(cfg) $@ - -clean_%:: -ifeq ($(origin cfg),file) - @$(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.test cfg=release $@ - @$(MAKE) -C "$(work_dir)_debug" -r -f $(tbb_root)/build/Makefile.test cfg=debug $@ -else - @$(MAKE) -C "$(work_dir)_$(cfg)" -r -f $(tbb_root)/build/Makefile.test $@ -endif - -.PHONY: test_release test_debug test_release_no_depends test_debug_no_depends -.PHONY: tbb_release tbb_debug tbb_test_release tbb_test_debug tbb_test_release_no_depends tbb_test_debug_no_depends -# do not delete double-space after -C option -tbb_release: mkdir_release - $(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.tbb cfg=release - -tbb_debug: mkdir_debug - $(MAKE) -C "$(work_dir)_debug" -r -f $(tbb_root)/build/Makefile.tbb cfg=debug - -tbb_test_release: $(call cross_cfg,tbb_release) $(if $(use_proxy),$(call cross_cfg,tbbproxy_release)) tbb_test_release_no_depends -tbb_test_release_no_depends:$(call cross_cfg,mkdir_release) - $(MAKE) -C "$(call cross_cfg,$(work_dir)_release)" -r -f $(tbb_root)/build/Makefile.test cfg=release - -tbb_test_debug: $(call cross_cfg,tbb_debug) $(if $(use_proxy),$(call cross_cfg,tbbproxy_debug)) tbb_test_debug_no_depends -tbb_test_debug_no_depends:$(call cross_cfg,mkdir_debug) - $(MAKE) -C "$(call cross_cfg,$(work_dir)_debug)" -r -f $(tbb_root)/build/Makefile.test cfg=debug -# backward compatibility -test_release: tbb_test_release -test_debug: tbb_test_debug -test_release_no_depends: tbb_test_release_no_depends -test_debug_no_depends: tbb_test_debug_no_depends - -.PHONY: tbbmalloc_release tbbmalloc_debug -.PHONY: tbbmalloc_dll_release tbbmalloc_dll_debug tbbmalloc_proxy_dll_release tbbmalloc_proxy_dll_debug -.PHONY: tbbmalloc_test tbbmalloc_test_release tbbmalloc_test_debug tbbmalloc_test_release_no_depends tbbmalloc_test_debug_no_depends - -tbbmalloc_release: mkdir_release - $(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=release malloc - -tbbmalloc_debug: mkdir_debug - $(MAKE) -C "$(work_dir)_debug" -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=debug malloc - -tbbmalloc_dll_release: mkdir_release - $(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=release malloc_dll - -tbbmalloc_proxy_dll_release: mkdir_release - $(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=release malloc_proxy_dll - -tbbmalloc_dll_debug: mkdir_debug - $(MAKE) -C "$(work_dir)_debug" -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=debug malloc_dll - -tbbmalloc_proxy_dll_debug: mkdir_debug - $(MAKE) -C "$(work_dir)_debug" -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=debug malloc_proxy_dll - -tbbmalloc_test: tbbmalloc_test_release tbbmalloc_test_debug - -tbbmalloc_test_release: $(call cross_cfg,tbbmalloc_release) tbbmalloc_test_release_no_depends -tbbmalloc_test_release_no_depends: $(call cross_cfg,mkdir_release) - $(MAKE) -C "$(call cross_cfg,$(work_dir)_release)" -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=release malloc_test_no_depends - -tbbmalloc_test_debug: $(call cross_cfg,tbbmalloc_debug) tbbmalloc_test_debug_no_depends -tbbmalloc_test_debug_no_depends: $(call cross_cfg,mkdir_debug) - $(MAKE) -C "$(call cross_cfg,$(work_dir)_debug)" -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=debug malloc_test_no_depends - -.PHONY: tbbproxy_release tbbproxy_debug -.PHONY: tbbproxy_test tbbproxy_test_release tbbproxy_test_debug tbbproxy_test_release_no_depends tbbproxy_test_debug_no_depends - -tbbproxy_release: mkdir_release tbb_release - $(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.tbbproxy cfg=release tbbproxy - -tbbproxy_debug: mkdir_debug tbb_debug - $(MAKE) -C "$(work_dir)_debug" -r -f $(tbb_root)/build/Makefile.tbbproxy cfg=debug tbbproxy - -tbbproxy_test: tbbproxy_test_release tbbproxy_test_debug - -tbbproxy_test_release: $(call cross_cfg,tbb_release) $(call cross_cfg,tbbproxy_release) tbbproxy_test_release_no_depends -tbbproxy_test_release_no_depends:$(call cross_cfg,mkdir_release) - $(MAKE) -C "$(call cross_cfg,$(work_dir)_release)" -r -f $(tbb_root)/build/Makefile.tbbproxy cfg=release tbbproxy_test - -tbbproxy_test_debug: $(call cross_cfg,tbb_debug) $(call cross_cfg,tbbproxy_debug) tbbproxy_test_debug_no_depends -tbbproxy_test_debug_no_depends: $(call cross_cfg,mkdir_debug) - $(MAKE) -C "$(call cross_cfg,$(work_dir)_debug)" -r -f $(tbb_root)/build/Makefile.tbbproxy cfg=debug tbbproxy_test - -.PHONY: rml_release rml_debug rml_test_release rml_test_debug -.PHONY: rml_test_release_no_depends rml_test_debug_no_depends - -rml_release: mkdir_release - $(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.rml cfg=release rml - -rml_debug: mkdir_debug - $(MAKE) -C "$(work_dir)_debug" -r -f $(tbb_root)/build/Makefile.rml cfg=debug rml - -rml_test_release: $(call cross_cfg,rml_release) rml_test_release_no_depends -rml_test_release_no_depends: $(call cross_cfg,mkdir_release) - $(MAKE) -C "$(call cross_cfg,$(work_dir)_release)" -r -f $(tbb_root)/build/Makefile.rml cfg=release rml_test - -rml_test_debug: $(call cross_cfg,rml_debug) rml_test_debug_no_depends -rml_test_debug_no_depends: $(call cross_cfg,mkdir_debug) - $(MAKE) -C "$(call cross_cfg,$(work_dir)_debug)" -r -f $(tbb_root)/build/Makefile.rml cfg=debug rml_test - -.PHONY: examples_release examples_debug examples_release_no_depends examples_debug_no_depends - -examples_release: tbb_release tbbmalloc_release examples_release_no_depends -examples_release_no_depends: - $(MAKE) -C $(examples_root) -r -f Makefile tbb_root=.. release test UI=con - -examples_debug: tbb_debug tbbmalloc_debug examples_debug_no_depends -examples_debug_no_depends: - $(MAKE) -C $(examples_root) -r -f Makefile tbb_root=.. debug test UI=con - -.PHONY: clean_release clean_debug clean_examples - -clean_release: - $(shell $(RM) $(work_dir)_release$(SLASH)*.* >$(NUL) 2>$(NUL)) - $(shell $(RD) $(work_dir)_release >$(NUL) 2>$(NUL)) - -clean_debug: - $(shell $(RM) $(work_dir)_debug$(SLASH)*.* >$(NUL) 2>$(NUL)) - $(shell $(RD) $(work_dir)_debug >$(NUL) 2>$(NUL)) - -clean_examples: - $(shell $(MAKE) -s -i -r -C $(examples_root) -f Makefile tbb_root=.. clean >$(NUL) 2>$(NUL)) - -.PHONY: mkdir_release mkdir_debug codecov do_codecov info tbbvars shell - -mkdir_release: - $(shell $(MD) "$(work_dir)_release" >$(NUL) 2>$(NUL)) - @echo Created $(work_dir)_release directory - -mkdir_debug: - $(shell $(MD) "$(work_dir)_debug" >$(NUL) 2>$(NUL)) - @echo Created $(work_dir)_debug directory - -ifeq ($(compiler),$(if $(findstring windows,$(tbb_os)),icl,icc)) -codecov: codecov=yes -codecov: do_codecov - $(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.test cfg=release codecov_gen -else -codecov: - $(error Only Intel(R) C++ Compiler is supported for code coverage) -endif - -export codecov - -do_codecov: tbb_root=.. -do_codecov: - $(MAKE) RML=yes tbbmalloc_test_release test_release - $(MAKE) clean_test_* cfg=release - $(MAKE) RML=yes crosstest=yes tbbmalloc_test_debug test_debug - $(MAKE) clean_test_* cfg=release - $(MAKE) rml_test_release - $(MAKE) clean_test_* cfg=release - $(MAKE) crosstest=yes rml_test_debug - -info: - @echo OS: $(tbb_os) - @echo arch=$(arch) - @echo compiler=$(compiler) - @echo runtime=$(runtime) - @echo tbb_build_prefix=$(tbb_build_prefix) - @echo work_dir=$(abspath $(tbb_build_dir)$(SLASH)$(tbb_build_prefix)_$(cfg)) - -# [usage]$ source `make tbbvars`.sh -tbbvars: - @echo $(tbb_build_dir)$(SLASH)$(tbb_build_prefix)_$(cfg)$(SLASH)tbbvars - -symbols: args=$(if $(findstring cl,$(compiler)), dumpbin /section:.text *.obj|findstr COMDAT , nm -Pg *.o|grep ' T '|cut -f1 -d' ') -symbols: shell - -shell: -ifdef BUILDING_PHASE - -$(run_cmd) $(shell_cmd) -else - @$(MAKE) -C "$(work_dir)_$(cfg)" -rf $(tbb_root)/src/Makefile BUILDING_PHASE=1 shell shell_cmd="$(if $(args),$(args),$(SHELL))" -endif - diff --git a/src/tbb/src/index.html b/src/tbb/src/index.html deleted file mode 100644 index 7605b5df4..000000000 --- a/src/tbb/src/index.html +++ /dev/null @@ -1,76 +0,0 @@ - - - -

Overview

-This directory contains the source code and unit tests for Intel® Threading Building Blocks. - -

Directories

-
-
tbb -
Source code of the TBB library core. -
tbbmalloc -
Source code of the TBB scalable memory allocator. -
test -
Source code of the TBB unit tests. -
rml -
Source code of the Resource Management Layer (RML). -
perf -
Source code of microbenchmarks. -
old -
Source code of deprecated TBB entities that are still shipped as part of the TBB library for the sake of backward compatibility. -
- -

Files

-
-
Makefile -
Advanced Makefile for developing and debugging of TBB. See the basic build directions. Additional targets and options: -
-
make test_{name} time_{name} -
Make and run individual test or benchmark.
-
make stress_{name} -
Equivalent to 'make test_{name}' but runs until a failure detected or terminated by user.
-
make run_cmd="{command}" [(above options or targets)] -
Command prefix for tests execution. Also, "run_cmd=-" will ignore test execution failures. See also -k and -i options of the GNU make for more options to keep building and testing despite of failures.
-
make debug_{name} -
Equivalent to 'make test_{name}' but compiles in debug mode and runs under debugger ("run_cmd=$(debugger)").
-
make args="{command-line arguments}" [(above options or targets)] -
Additional arguments for the run.
-
make repeat="{N}" [(above options or targets)] -
Repeats execution N times.
-
make clean_{filename} -
Removes executable, object, and other intermediate files with specified filename ('*' also works).
-
make cfg={debug|release} [(above options or targets)] -
Specifies a build mode or corresponding directory to work in.
-
make tbb_strict=1 [(above options or targets)] -
Enables warnings as errors.
-
make examples/{target} -
Invokes examples/Makefile with specified target.
-
make clean_release clean_debug clean_examples -
Removes release or debug build directories, or cleans all examples.
-
make test_no_depends -
Equivalent to 'make test' but does not check for libraries updates.
-
make info -
Output information about build configuration and directories.
-
make cpp0x=1 [(above options or targets)] -
Enables C++0x extensions like lambdas for compilers that implement them as experimental features.
-
make CXXFLAGS={Flags} [(above options or targets)] -
Specifies additional options for compiler.
-
make target={name} [(above options or targets)] -
Includes additional build/{name}.inc file after OS-specific one.
-
make extra_inc={filename} [(above options or targets)] -
Includes additional makefile.
- -
- - -
-Up to parent directory -

-Copyright © 2005-2014 Intel Corporation. All Rights Reserved. -

-Intel is a registered trademark or trademark of Intel Corporation -or its subsidiaries in the United States and other countries. -

-* Other names and brands may be claimed as the property of others. - - diff --git a/src/tbb/src/old/concurrent_queue_v2.cpp b/src/tbb/src/old/concurrent_queue_v2.cpp deleted file mode 100644 index d52287be9..000000000 --- a/src/tbb/src/old/concurrent_queue_v2.cpp +++ /dev/null @@ -1,367 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "concurrent_queue_v2.h" -#include "tbb/cache_aligned_allocator.h" -#include "tbb/spin_mutex.h" -#include "tbb/atomic.h" -#include -#include - -#if defined(_MSC_VER) && defined(_Wp64) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (disable: 4267) -#endif - -#define RECORD_EVENTS 0 - -using namespace std; - -namespace tbb { - -namespace internal { - -class concurrent_queue_rep; - -//! A queue using simple locking. -/** For efficiency, this class has no constructor. - The caller is expected to zero-initialize it. */ -struct micro_queue { - typedef concurrent_queue_base::page page; - typedef size_t ticket; - - atomic head_page; - atomic head_counter; - - atomic tail_page; - atomic tail_counter; - - spin_mutex page_mutex; - - class push_finalizer: no_copy { - ticket my_ticket; - micro_queue& my_queue; - public: - push_finalizer( micro_queue& queue, ticket k ) : - my_ticket(k), my_queue(queue) - {} - ~push_finalizer() { - my_queue.tail_counter = my_ticket; - } - }; - - void push( const void* item, ticket k, concurrent_queue_base& base ); - - class pop_finalizer: no_copy { - ticket my_ticket; - micro_queue& my_queue; - page* my_page; - public: - pop_finalizer( micro_queue& queue, ticket k, page* p ) : - my_ticket(k), my_queue(queue), my_page(p) - {} - ~pop_finalizer() { - page* p = my_page; - if( p ) { - spin_mutex::scoped_lock lock( my_queue.page_mutex ); - page* q = p->next; - my_queue.head_page = q; - if( !q ) { - my_queue.tail_page = NULL; - } - } - my_queue.head_counter = my_ticket; - if( p ) - operator delete(p); - } - }; - - bool pop( void* dst, ticket k, concurrent_queue_base& base ); -}; - -//! Internal representation of a ConcurrentQueue. -/** For efficiency, this class has no constructor. - The caller is expected to zero-initialize it. */ -class concurrent_queue_rep { -public: - typedef size_t ticket; - -private: - friend struct micro_queue; - - //! Approximately n_queue/golden ratio - static const size_t phi = 3; - -public: - //! Must be power of 2 - static const size_t n_queue = 8; - - //! Map ticket to an array index - static size_t index( ticket k ) { - return k*phi%n_queue; - } - - atomic head_counter; - char pad1[NFS_MaxLineSize-sizeof(atomic)]; - - atomic tail_counter; - char pad2[NFS_MaxLineSize-sizeof(atomic)]; - micro_queue array[n_queue]; - - micro_queue& choose( ticket k ) { - // The formula here approximates LRU in a cache-oblivious way. - return array[index(k)]; - } - - //! Value for effective_capacity that denotes unbounded queue. - static const ptrdiff_t infinite_capacity = ptrdiff_t(~size_t(0)/2); -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // unary minus operator applied to unsigned type, result still unsigned - #pragma warning( push ) - #pragma warning( disable: 4146 ) -#endif - -//------------------------------------------------------------------------ -// micro_queue -//------------------------------------------------------------------------ -void micro_queue::push( const void* item, ticket k, concurrent_queue_base& base ) { - k &= -concurrent_queue_rep::n_queue; - page* p = NULL; - size_t index = modulo_power_of_two( k/concurrent_queue_rep::n_queue, base.items_per_page ); - if( !index ) { - size_t n = sizeof(page) + base.items_per_page*base.item_size; - p = static_cast(operator new( n )); - p->mask = 0; - p->next = NULL; - } - { - push_finalizer finalizer( *this, k+concurrent_queue_rep::n_queue ); - spin_wait_until_eq( tail_counter, k ); - if( p ) { - spin_mutex::scoped_lock lock( page_mutex ); - if( page* q = tail_page ) - q->next = p; - else - head_page = p; - tail_page = p; - } else { - p = tail_page; - } - base.copy_item( *p, index, item ); - // If no exception was thrown, mark item as present. - p->mask |= uintptr_t(1)<1 ? item_sz : 2); - my_rep = cache_aligned_allocator().allocate(1); - __TBB_ASSERT( (size_t)my_rep % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->head_counter % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->tail_counter % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->array % NFS_GetLineSize()==0, "alignment error" ); - memset(my_rep,0,sizeof(concurrent_queue_rep)); - this->item_size = item_sz; -} - -concurrent_queue_base::~concurrent_queue_base() { - size_t nq = my_rep->n_queue; - for( size_t i=0; iarray[i].tail_page; - __TBB_ASSERT( my_rep->array[i].head_page==tp, "at most one page should remain" ); - if( tp!=NULL ) - delete tp; - } - cache_aligned_allocator().deallocate(my_rep,1); -} - -void concurrent_queue_base::internal_push( const void* src ) { - concurrent_queue_rep& r = *my_rep; - concurrent_queue_rep::ticket k = r.tail_counter++; - if( my_capacity=const_cast(my_capacity) ) - backoff.pause(); - } - r.choose(k).push(src,k,*this); -} - -void concurrent_queue_base::internal_pop( void* dst ) { - concurrent_queue_rep& r = *my_rep; - concurrent_queue_rep::ticket k; - do { - k = r.head_counter++; - } while( !r.choose(k).pop(dst,k,*this) ); -} - -bool concurrent_queue_base::internal_pop_if_present( void* dst ) { - concurrent_queue_rep& r = *my_rep; - concurrent_queue_rep::ticket k; - do { - for( atomic_backoff b;;b.pause() ) { - k = r.head_counter; - if( r.tail_counter<=k ) { - // Queue is empty - return false; - } - // Queue had item with ticket k when we looked. Attempt to get that item. - if( r.head_counter.compare_and_swap(k+1,k)==k ) { - break; - } - // Another thread snatched the item, so pause and retry. - } - } while( !r.choose(k).pop(dst,k,*this) ); - return true; -} - -bool concurrent_queue_base::internal_push_if_not_full( const void* src ) { - concurrent_queue_rep& r = *my_rep; - concurrent_queue_rep::ticket k; - for( atomic_backoff b;;b.pause() ) { - k = r.tail_counter; - if( (ptrdiff_t)(k-r.head_counter)>=my_capacity ) { - // Queue is full - return false; - } - // Queue had empty slot with ticket k when we looked. Attempt to claim that slot. - if( r.tail_counter.compare_and_swap(k+1,k)==k ) - break; - // Another thread claimed the slot, so pause and retry. - } - r.choose(k).push(src,k,*this); - return true; -} - -ptrdiff_t concurrent_queue_base::internal_size() const { - __TBB_ASSERT( sizeof(ptrdiff_t)<=sizeof(size_t), NULL ); - return ptrdiff_t(my_rep->tail_counter-my_rep->head_counter); -} - -void concurrent_queue_base::internal_set_capacity( ptrdiff_t capacity, size_t /*item_sz*/ ) { - my_capacity = capacity<0 ? concurrent_queue_rep::infinite_capacity : capacity; -} - -//------------------------------------------------------------------------ -// concurrent_queue_iterator_rep -//------------------------------------------------------------------------ -class concurrent_queue_iterator_rep: no_assign { -public: - typedef concurrent_queue_rep::ticket ticket; - ticket head_counter; - const concurrent_queue_base& my_queue; - concurrent_queue_base::page* array[concurrent_queue_rep::n_queue]; - concurrent_queue_iterator_rep( const concurrent_queue_base& queue ) : - head_counter(queue.my_rep->head_counter), - my_queue(queue) - { - const concurrent_queue_rep& rep = *queue.my_rep; - for( size_t k=0; ktail_counter ) - return NULL; - else { - concurrent_queue_base::page* p = array[concurrent_queue_rep::index(k)]; - __TBB_ASSERT(p,NULL); - size_t i = modulo_power_of_two( k/concurrent_queue_rep::n_queue, my_queue.items_per_page ); - return static_cast(static_cast(p+1)) + my_queue.item_size*i; - } - } -}; - -//------------------------------------------------------------------------ -// concurrent_queue_iterator_base -//------------------------------------------------------------------------ -concurrent_queue_iterator_base::concurrent_queue_iterator_base( const concurrent_queue_base& queue ) { - my_rep = new concurrent_queue_iterator_rep(queue); - my_item = my_rep->choose(my_rep->head_counter); -} - -void concurrent_queue_iterator_base::assign( const concurrent_queue_iterator_base& other ) { - if( my_rep!=other.my_rep ) { - if( my_rep ) { - delete my_rep; - my_rep = NULL; - } - if( other.my_rep ) { - my_rep = new concurrent_queue_iterator_rep( *other.my_rep ); - } - } - my_item = other.my_item; -} - -void concurrent_queue_iterator_base::advance() { - __TBB_ASSERT( my_item, "attempt to increment iterator past end of queue" ); - size_t k = my_rep->head_counter; - const concurrent_queue_base& queue = my_rep->my_queue; - __TBB_ASSERT( my_item==my_rep->choose(k), NULL ); - size_t i = modulo_power_of_two( k/concurrent_queue_rep::n_queue, queue.items_per_page ); - if( i==queue.items_per_page-1 ) { - concurrent_queue_base::page*& root = my_rep->array[concurrent_queue_rep::index(k)]; - root = root->next; - } - my_rep->head_counter = k+1; - my_item = my_rep->choose(k+1); -} - -concurrent_queue_iterator_base::~concurrent_queue_iterator_base() { - delete my_rep; - my_rep = NULL; -} - -} // namespace internal - -} // namespace tbb diff --git a/src/tbb/src/old/concurrent_queue_v2.h b/src/tbb/src/old/concurrent_queue_v2.h deleted file mode 100644 index faa8bbc12..000000000 --- a/src/tbb/src/old/concurrent_queue_v2.h +++ /dev/null @@ -1,324 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_queue_H -#define __TBB_concurrent_queue_H - -#include "tbb/tbb_stddef.h" -#include - -namespace tbb { - -template class concurrent_queue; - -//! @cond INTERNAL -namespace internal { - -class concurrent_queue_rep; -class concurrent_queue_iterator_rep; -template class concurrent_queue_iterator; - -//! For internal use only. -/** Type-independent portion of concurrent_queue. - @ingroup containers */ -class concurrent_queue_base: no_copy { - //! Internal representation - concurrent_queue_rep* my_rep; - - friend class concurrent_queue_rep; - friend struct micro_queue; - friend class concurrent_queue_iterator_rep; - friend class concurrent_queue_iterator_base; - - // In C++ 1998/2003 (but quite likely not beyond), friend micro_queue's rights - // do not apply to the declaration of micro_queue::pop_finalizer::my_page, - // as a member of a class nested within that friend class, so... -public: - //! Prefix on a page - struct page { - page* next; - uintptr_t mask; - }; - -protected: - //! Capacity of the queue - ptrdiff_t my_capacity; - - //! Always a power of 2 - size_t items_per_page; - - //! Size of an item - size_t item_size; -private: - virtual void copy_item( page& dst, size_t index, const void* src ) = 0; - virtual void assign_and_destroy_item( void* dst, page& src, size_t index ) = 0; -protected: - __TBB_EXPORTED_METHOD concurrent_queue_base( size_t item_size ); - virtual __TBB_EXPORTED_METHOD ~concurrent_queue_base(); - - //! Enqueue item at tail of queue - void __TBB_EXPORTED_METHOD internal_push( const void* src ); - - //! Dequeue item from head of queue - void __TBB_EXPORTED_METHOD internal_pop( void* dst ); - - //! Attempt to enqueue item onto queue. - bool __TBB_EXPORTED_METHOD internal_push_if_not_full( const void* src ); - - //! Attempt to dequeue item from queue. - /** NULL if there was no item to dequeue. */ - bool __TBB_EXPORTED_METHOD internal_pop_if_present( void* dst ); - - //! Get size of queue - ptrdiff_t __TBB_EXPORTED_METHOD internal_size() const; - - void __TBB_EXPORTED_METHOD internal_set_capacity( ptrdiff_t capacity, size_t element_size ); -}; - -//! Type-independent portion of concurrent_queue_iterator. -/** @ingroup containers */ -class concurrent_queue_iterator_base : no_assign{ - //! concurrent_queue over which we are iterating. - /** NULL if one past last element in queue. */ - concurrent_queue_iterator_rep* my_rep; - - template - friend bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); - - template - friend bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); -protected: - //! Pointer to current item - mutable void* my_item; - - //! Default constructor - __TBB_EXPORTED_METHOD concurrent_queue_iterator_base() : my_rep(NULL), my_item(NULL) {} - - //! Copy constructor - concurrent_queue_iterator_base( const concurrent_queue_iterator_base& i ) : my_rep(NULL), my_item(NULL) { - assign(i); - } - - //! Construct iterator pointing to head of queue. - concurrent_queue_iterator_base( const concurrent_queue_base& queue ); - - //! Assignment - void __TBB_EXPORTED_METHOD assign( const concurrent_queue_iterator_base& i ); - - //! Advance iterator one step towards tail of queue. - void __TBB_EXPORTED_METHOD advance(); - - //! Destructor - __TBB_EXPORTED_METHOD ~concurrent_queue_iterator_base(); -}; - -//! Meets requirements of a forward iterator for STL. -/** Value is either the T or const T type of the container. - @ingroup containers */ -template -class concurrent_queue_iterator: public concurrent_queue_iterator_base { -#if !defined(_MSC_VER) || defined(__INTEL_COMPILER) - template - friend class ::tbb::concurrent_queue; -#else -public: // workaround for MSVC -#endif - //! Construct iterator pointing to head of queue. - concurrent_queue_iterator( const concurrent_queue_base& queue ) : - concurrent_queue_iterator_base(queue) - { - } -public: - concurrent_queue_iterator() {} - - /** If Value==Container::value_type, then this routine is the copy constructor. - If Value==const Container::value_type, then this routine is a conversion constructor. */ - concurrent_queue_iterator( const concurrent_queue_iterator& other ) : - concurrent_queue_iterator_base(other) - {} - - //! Iterator assignment - concurrent_queue_iterator& operator=( const concurrent_queue_iterator& other ) { - assign(other); - return *this; - } - - //! Reference to current item - Value& operator*() const { - return *static_cast(my_item); - } - - Value* operator->() const {return &operator*();} - - //! Advance to next item in queue - concurrent_queue_iterator& operator++() { - advance(); - return *this; - } - - //! Post increment - Value* operator++(int) { - Value* result = &operator*(); - operator++(); - return result; - } -}; // concurrent_queue_iterator - -template -bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { - return i.my_item==j.my_item; -} - -template -bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { - return i.my_item!=j.my_item; -} - -} // namespace internal; -//! @endcond - -//! A high-performance thread-safe queue. -/** Multiple threads may each push and pop concurrently. - Assignment and copy construction are not allowed. - @ingroup containers */ -template -class concurrent_queue: public internal::concurrent_queue_base { - template friend class internal::concurrent_queue_iterator; - - //! Class used to ensure exception-safety of method "pop" - class destroyer { - T& my_value; - public: - destroyer( T& value ) : my_value(value) {} - ~destroyer() {my_value.~T();} - }; - - T& get_ref( page& pg, size_t index ) { - __TBB_ASSERT( index(static_cast(&pg+1))[index]; - } - - /*override*/ virtual void copy_item( page& dst, size_t index, const void* src ) { - new( &get_ref(dst,index) ) T(*static_cast(src)); - } - - /*override*/ virtual void assign_and_destroy_item( void* dst, page& src, size_t index ) { - T& from = get_ref(src,index); - destroyer d(from); - *static_cast(dst) = from; - } - -public: - //! Element type in the queue. - typedef T value_type; - - //! Reference type - typedef T& reference; - - //! Const reference type - typedef const T& const_reference; - - //! Integral type for representing size of the queue. - /** Note that the size_type is a signed integral type. - This is because the size can be negative if there are pending pops without corresponding pushes. */ - typedef std::ptrdiff_t size_type; - - //! Difference type for iterator - typedef std::ptrdiff_t difference_type; - - //! Construct empty queue - concurrent_queue() : - concurrent_queue_base( sizeof(T) ) - { - } - - //! Destroy queue - ~concurrent_queue(); - - //! Enqueue an item at tail of queue. - void push( const T& source ) { - internal_push( &source ); - } - - //! Dequeue item from head of queue. - /** Block until an item becomes available, and then dequeue it. */ - void pop( T& destination ) { - internal_pop( &destination ); - } - - //! Enqueue an item at tail of queue if queue is not already full. - /** Does not wait for queue to become not full. - Returns true if item is pushed; false if queue was already full. */ - bool push_if_not_full( const T& source ) { - return internal_push_if_not_full( &source ); - } - - //! Attempt to dequeue an item from head of queue. - /** Does not wait for item to become available. - Returns true if successful; false otherwise. */ - bool pop_if_present( T& destination ) { - return internal_pop_if_present( &destination ); - } - - //! Return number of pushes minus number of pops. - /** Note that the result can be negative if there are pops waiting for the - corresponding pushes. The result can also exceed capacity() if there - are push operations in flight. */ - size_type size() const {return internal_size();} - - //! Equivalent to size()<=0. - bool empty() const {return size()<=0;} - - //! Maximum number of allowed elements - size_type capacity() const { - return my_capacity; - } - - //! Set the capacity - /** Setting the capacity to 0 causes subsequent push_if_not_full operations to always fail, - and subsequent push operations to block forever. */ - void set_capacity( size_type new_capacity ) { - internal_set_capacity( new_capacity, sizeof(T) ); - } - - typedef internal::concurrent_queue_iterator iterator; - typedef internal::concurrent_queue_iterator const_iterator; - - //------------------------------------------------------------------------ - // The iterators are intended only for debugging. They are slow and not thread safe. - //------------------------------------------------------------------------ - iterator begin() {return iterator(*this);} - iterator end() {return iterator();} - const_iterator begin() const {return const_iterator(*this);} - const_iterator end() const {return const_iterator();} - -}; - -template -concurrent_queue::~concurrent_queue() { - while( !empty() ) { - T value; - internal_pop(&value); - } -} - -} // namespace tbb - -#endif /* __TBB_concurrent_queue_H */ diff --git a/src/tbb/src/old/concurrent_vector_v2.cpp b/src/tbb/src/old/concurrent_vector_v2.cpp deleted file mode 100644 index dfd29efb6..000000000 --- a/src/tbb/src/old/concurrent_vector_v2.cpp +++ /dev/null @@ -1,266 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "concurrent_vector_v2.h" -#include "tbb/tbb_machine.h" -#include "../tbb/itt_notify.h" -#include "tbb/task.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include // std::length_error -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#if defined(_MSC_VER) && defined(_Wp64) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (disable: 4267) -#endif - -namespace tbb { - -namespace internal { - -void concurrent_vector_base::internal_grow_to_at_least( size_type new_size, size_type element_size, internal_array_op1 init ) { - size_type e = my_early_size; - while( e=pointers_per_short_segment && v.my_segment==v.my_storage ) { - extend_segment(v); - } - } -}; - -void concurrent_vector_base::helper::extend_segment( concurrent_vector_base& v ) { - const size_t pointers_per_long_segment = sizeof(void*)==4 ? 32 : 64; - segment_t* s = (segment_t*)NFS_Allocate( pointers_per_long_segment, sizeof(segment_t), NULL ); - std::memset( s, 0, pointers_per_long_segment*sizeof(segment_t) ); - // If other threads are trying to set pointers in the short segment, wait for them to finish their - // assignments before we copy the short segment to the long segment. - atomic_backoff backoff; - while( !v.my_storage[0].array || !v.my_storage[1].array ) backoff.pause(); - s[0] = v.my_storage[0]; - s[1] = v.my_storage[1]; - if( v.my_segment.compare_and_swap( s, v.my_storage )!=v.my_storage ) - NFS_Free(s); -} - -concurrent_vector_base::size_type concurrent_vector_base::internal_capacity() const { - return segment_base( helper::find_segment_end(*this) ); -} - -void concurrent_vector_base::internal_reserve( size_type n, size_type element_size, size_type max_size ) { - if( n>max_size ) { - __TBB_THROW( std::length_error("argument to concurrent_vector::reserve exceeds concurrent_vector::max_size()") ); - } - for( segment_index_t k = helper::find_segment_end(*this); segment_base(k)n-b ) m = n-b; - copy( my_segment[k].array, src.my_segment[k].array, m ); - } - } -} - -void concurrent_vector_base::internal_assign( const concurrent_vector_base& src, size_type element_size, internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy ) { - size_type n = src.my_early_size; - while( my_early_size>n ) { - segment_index_t k = segment_index_of( my_early_size-1 ); - size_type b=segment_base(k); - size_type new_end = b>=n ? b : n; - __TBB_ASSERT( my_early_size>new_end, NULL ); - destroy( (char*)my_segment[k].array+element_size*(new_end-b), my_early_size-new_end ); - my_early_size = new_end; - } - size_type dst_initialized_size = my_early_size; - my_early_size = n; - size_type b; - for( segment_index_t k=0; (b=segment_base(k))n-b ) m = n-b; - size_type a = 0; - if( dst_initialized_size>b ) { - a = dst_initialized_size-b; - if( a>m ) a = m; - assign( my_segment[k].array, src.my_segment[k].array, a ); - m -= a; - a *= element_size; - } - if( m>0 ) - copy( (char*)my_segment[k].array+a, (char*)src.my_segment[k].array+a, m ); - } - __TBB_ASSERT( src.my_early_size==n, "detected use of concurrent_vector::operator= with right side that was concurrently modified" ); -} - -void* concurrent_vector_base::internal_push_back( size_type element_size, size_type& index ) { - __TBB_ASSERT( sizeof(my_early_size)==sizeof(reference_count), NULL ); - //size_t tmp = __TBB_FetchAndIncrementWacquire(*(tbb::internal::reference_count*)&my_early_size); - size_t tmp = __TBB_FetchAndIncrementWacquire((tbb::internal::reference_count*)&my_early_size); - index = tmp; - segment_index_t k_old = segment_index_of( tmp ); - size_type base = segment_base(k_old); - helper::extend_segment_if_necessary(*this,k_old); - segment_t& s = my_segment[k_old]; - void* array = s.array; - if( !array ) { - // FIXME - consider factoring this out and share with internal_grow_by - if( base==tmp ) { - __TBB_ASSERT( !s.array, NULL ); - size_t n = segment_size(k_old); - array = NFS_Allocate( n, element_size, NULL ); - ITT_NOTIFY( sync_releasing, &s.array ); - s.array = array; - } else { - ITT_NOTIFY(sync_prepare, &s.array); - spin_wait_while_eq( s.array, (void*)0 ); - ITT_NOTIFY(sync_acquired, &s.array); - array = s.array; - } - } - size_type j_begin = tmp-base; - return (void*)((char*)array+element_size*j_begin); -} - -concurrent_vector_base::size_type concurrent_vector_base::internal_grow_by( size_type delta, size_type element_size, internal_array_op1 init ) { - size_type result = my_early_size.fetch_and_add(delta); - internal_grow( result, result+delta, element_size, init ); - return result; -} - -void concurrent_vector_base::internal_grow( const size_type start, size_type finish, size_type element_size, internal_array_op1 init ) { - __TBB_ASSERT( start finish-base ? finish-base : n; - (*init)( (void*)((char*)array+element_size*j_begin), j_end-j_begin ); - tmp = base+j_end; - } while( tmp0 ) { - segment_index_t k_old = segment_index_of(finish-1); - segment_t& s = my_segment[k_old]; - __TBB_ASSERT( s.array, NULL ); - size_type base = segment_base(k_old); - size_type j_end = finish-base; - __TBB_ASSERT( j_end, NULL ); - (*destroy)( s.array, j_end ); - finish = base; - } - - // Free the arrays - if( reclaim_storage ) { - size_t k = helper::find_segment_end(*this); - while( k>0 ) { - --k; - segment_t& s = my_segment[k]; - void* array = s.array; - s.array = NULL; - NFS_Free( array ); - } - // Clear short segment. - my_storage[0].array = NULL; - my_storage[1].array = NULL; - segment_t* s = my_segment; - if( s!=my_storage ) { - my_segment = my_storage; - NFS_Free( s ); - } - } -} - -} // namespace internal - -} // tbb diff --git a/src/tbb/src/old/concurrent_vector_v2.h b/src/tbb/src/old/concurrent_vector_v2.h deleted file mode 100644 index 40543d457..000000000 --- a/src/tbb/src/old/concurrent_vector_v2.h +++ /dev/null @@ -1,523 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_vector_H -#define __TBB_concurrent_vector_H - -#include "tbb/tbb_stddef.h" -#include "tbb/atomic.h" -#include "tbb/cache_aligned_allocator.h" -#include "tbb/blocked_range.h" -#include "tbb/tbb_machine.h" -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -namespace tbb { - -template -class concurrent_vector; - -//! @cond INTERNAL -namespace internal { - - //! Base class of concurrent vector implementation. - /** @ingroup containers */ - class concurrent_vector_base { - protected: - - // Basic types declarations - typedef unsigned long segment_index_t; - typedef size_t size_type; - - //! Log2 of "min_segment_size". - static const int lg_min_segment_size = 4; - - //! Minimum size (in physical items) of a segment. - static const int min_segment_size = segment_index_t(1)<>1< my_early_size; - - /** Can be zero-initialized. */ - struct segment_t { - /** Declared volatile because in weak memory model, must have ld.acq/st.rel */ - void* volatile array; -#if TBB_USE_ASSERT - ~segment_t() { - __TBB_ASSERT( !array, "should have been set to NULL by clear" ); - } -#endif /* TBB_USE_ASSERT */ - }; - - // Data fields - - //! Pointer to the segments table - atomic my_segment; - - //! embedded storage of segment pointers - segment_t my_storage[2]; - - // Methods - - concurrent_vector_base() { - my_early_size = 0; - my_storage[0].array = NULL; - my_storage[1].array = NULL; - my_segment = my_storage; - } - - //! An operation on an n-element array starting at begin. - typedef void(__TBB_EXPORTED_FUNC *internal_array_op1)(void* begin, size_type n ); - - //! An operation on n-element destination array and n-element source array. - typedef void(__TBB_EXPORTED_FUNC *internal_array_op2)(void* dst, const void* src, size_type n ); - - void __TBB_EXPORTED_METHOD internal_grow_to_at_least( size_type new_size, size_type element_size, internal_array_op1 init ); - void internal_grow( size_type start, size_type finish, size_type element_size, internal_array_op1 init ); - size_type __TBB_EXPORTED_METHOD internal_grow_by( size_type delta, size_type element_size, internal_array_op1 init ); - void* __TBB_EXPORTED_METHOD internal_push_back( size_type element_size, size_type& index ); - void __TBB_EXPORTED_METHOD internal_clear( internal_array_op1 destroy, bool reclaim_storage ); - void __TBB_EXPORTED_METHOD internal_copy( const concurrent_vector_base& src, size_type element_size, internal_array_op2 copy ); - void __TBB_EXPORTED_METHOD internal_assign( const concurrent_vector_base& src, size_type element_size, - internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy ); -private: - //! Private functionality that does not cross DLL boundary. - class helper; - friend class helper; - }; - - //! Meets requirements of a forward iterator for STL and a Value for a blocked_range.*/ - /** Value is either the T or const T type of the container. - @ingroup containers */ - template - class vector_iterator -#if defined(_WIN64) && defined(_MSC_VER) - // Ensure that Microsoft's internal template function _Val_type works correctly. - : public std::iterator -#endif /* defined(_WIN64) && defined(_MSC_VER) */ - { - //! concurrent_vector over which we are iterating. - Container* my_vector; - - //! Index into the vector - size_t my_index; - - //! Caches my_vector->internal_subscript(my_index) - /** NULL if cached value is not available */ - mutable Value* my_item; - - template - friend bool operator==( const vector_iterator& i, const vector_iterator& j ); - - template - friend bool operator<( const vector_iterator& i, const vector_iterator& j ); - - template - friend ptrdiff_t operator-( const vector_iterator& i, const vector_iterator& j ); - - template - friend class internal::vector_iterator; - -#if !defined(_MSC_VER) || defined(__INTEL_COMPILER) - template - friend class tbb::concurrent_vector; -#else -public: // workaround for MSVC -#endif - - vector_iterator( const Container& vector, size_t index ) : - my_vector(const_cast(&vector)), - my_index(index), - my_item(NULL) - {} - - public: - //! Default constructor - vector_iterator() : my_vector(NULL), my_index(~size_t(0)), my_item(NULL) {} - - vector_iterator( const vector_iterator& other ) : - my_vector(other.my_vector), - my_index(other.my_index), - my_item(other.my_item) - {} - - vector_iterator operator+( ptrdiff_t offset ) const { - return vector_iterator( *my_vector, my_index+offset ); - } - friend vector_iterator operator+( ptrdiff_t offset, const vector_iterator& v ) { - return vector_iterator( *v.my_vector, v.my_index+offset ); - } - vector_iterator operator+=( ptrdiff_t offset ) { - my_index+=offset; - my_item = NULL; - return *this; - } - vector_iterator operator-( ptrdiff_t offset ) const { - return vector_iterator( *my_vector, my_index-offset ); - } - vector_iterator operator-=( ptrdiff_t offset ) { - my_index-=offset; - my_item = NULL; - return *this; - } - Value& operator*() const { - Value* item = my_item; - if( !item ) { - item = my_item = &my_vector->internal_subscript(my_index); - } - __TBB_ASSERT( item==&my_vector->internal_subscript(my_index), "corrupt cache" ); - return *item; - } - Value& operator[]( ptrdiff_t k ) const { - return my_vector->internal_subscript(my_index+k); - } - Value* operator->() const {return &operator*();} - - //! Pre increment - vector_iterator& operator++() { - size_t k = ++my_index; - if( my_item ) { - // Following test uses 2's-complement wizardry and fact that - // min_segment_size is a power of 2. - if( (k& k-concurrent_vector::min_segment_size)==0 ) { - // k is a power of two that is at least k-min_segment_size - my_item= NULL; - } else { - ++my_item; - } - } - return *this; - } - - //! Pre decrement - vector_iterator& operator--() { - __TBB_ASSERT( my_index>0, "operator--() applied to iterator already at beginning of concurrent_vector" ); - size_t k = my_index--; - if( my_item ) { - // Following test uses 2's-complement wizardry and fact that - // min_segment_size is a power of 2. - if( (k& k-concurrent_vector::min_segment_size)==0 ) { - // k is a power of two that is at least k-min_segment_size - my_item= NULL; - } else { - --my_item; - } - } - return *this; - } - - //! Post increment - vector_iterator operator++(int) { - vector_iterator result = *this; - operator++(); - return result; - } - - //! Post decrement - vector_iterator operator--(int) { - vector_iterator result = *this; - operator--(); - return result; - } - - // STL support - - typedef ptrdiff_t difference_type; - typedef Value value_type; - typedef Value* pointer; - typedef Value& reference; - typedef std::random_access_iterator_tag iterator_category; - }; - - template - bool operator==( const vector_iterator& i, const vector_iterator& j ) { - return i.my_index==j.my_index; - } - - template - bool operator!=( const vector_iterator& i, const vector_iterator& j ) { - return !(i==j); - } - - template - bool operator<( const vector_iterator& i, const vector_iterator& j ) { - return i.my_index - bool operator>( const vector_iterator& i, const vector_iterator& j ) { - return j - bool operator>=( const vector_iterator& i, const vector_iterator& j ) { - return !(i - bool operator<=( const vector_iterator& i, const vector_iterator& j ) { - return !(j - ptrdiff_t operator-( const vector_iterator& i, const vector_iterator& j ) { - return ptrdiff_t(i.my_index)-ptrdiff_t(j.my_index); - } - -} // namespace internal -//! @endcond - -//! Concurrent vector -/** @ingroup containers */ -template -class concurrent_vector: private internal::concurrent_vector_base { -public: - using internal::concurrent_vector_base::size_type; -private: - template - class generic_range_type: public blocked_range { - public: - typedef T value_type; - typedef T& reference; - typedef const T& const_reference; - typedef I iterator; - typedef ptrdiff_t difference_type; - generic_range_type( I begin_, I end_, size_t grainsize_ ) : blocked_range(begin_,end_,grainsize_) {} - generic_range_type( generic_range_type& r, split ) : blocked_range(r,split()) {} - }; - - template - friend class internal::vector_iterator; -public: - typedef T& reference; - typedef const T& const_reference; - typedef T value_type; - typedef ptrdiff_t difference_type; - - //! Construct empty vector. - concurrent_vector() {} - - //! Copy a vector. - concurrent_vector( const concurrent_vector& vector ) : internal::concurrent_vector_base() - { internal_copy(vector,sizeof(T),©_array); } - - //! Assignment - concurrent_vector& operator=( const concurrent_vector& vector ) { - if( this!=&vector ) - internal_assign(vector,sizeof(T),&destroy_array,&assign_array,©_array); - return *this; - } - - //! Clear and destroy vector. - ~concurrent_vector() {internal_clear(&destroy_array,/*reclaim_storage=*/true);} - - //------------------------------------------------------------------------ - // Concurrent operations - //------------------------------------------------------------------------ - //! Grow by "delta" elements. - /** Returns old size. */ - size_type grow_by( size_type delta ) { - return delta ? internal_grow_by( delta, sizeof(T), &initialize_array ) : my_early_size.load(); - } - - //! Grow array until it has at least n elements. - void grow_to_at_least( size_type n ) { - if( my_early_size iterator; - typedef internal::vector_iterator const_iterator; - -#if !defined(_MSC_VER) || _CPPLIB_VER>=300 - // Assume ISO standard definition of std::reverse_iterator - typedef std::reverse_iterator reverse_iterator; - typedef std::reverse_iterator const_reverse_iterator; -#else - // Use non-standard std::reverse_iterator - typedef std::reverse_iterator reverse_iterator; - typedef std::reverse_iterator const_reverse_iterator; -#endif /* defined(_MSC_VER) && (_MSC_VER<1300) */ - - // Forward sequence - iterator begin() {return iterator(*this,0);} - iterator end() {return iterator(*this,size());} - const_iterator begin() const {return const_iterator(*this,0);} - const_iterator end() const {return const_iterator(*this,size());} - - // Reverse sequence - reverse_iterator rbegin() {return reverse_iterator(end());} - reverse_iterator rend() {return reverse_iterator(begin());} - const_reverse_iterator rbegin() const {return const_reverse_iterator(end());} - const_reverse_iterator rend() const {return const_reverse_iterator(begin());} - - //------------------------------------------------------------------------ - // Support for TBB algorithms (ranges) - //------------------------------------------------------------------------ - typedef generic_range_type range_type; - typedef generic_range_type const_range_type; - - //! Get range to use with parallel algorithms - range_type range( size_t grainsize = 1 ) { - return range_type( begin(), end(), grainsize ); - } - - //! Get const range for iterating with parallel algorithms - const_range_type range( size_t grainsize = 1 ) const { - return const_range_type( begin(), end(), grainsize ); - } - - //------------------------------------------------------------------------ - // Size and capacity - //------------------------------------------------------------------------ - //! Return size of vector. - size_type size() const {return my_early_size;} - - //! Return false if vector is not empty. - bool empty() const {return !my_early_size;} - - //! Maximum size to which array can grow without allocating more memory. - size_type capacity() const {return internal_capacity();} - - //! Allocate enough space to grow to size n without having to allocate more memory later. - /** Like most of the methods provided for STL compatibility, this method is *not* thread safe. - The capacity afterwards may be bigger than the requested reservation. */ - void reserve( size_type n ) { - if( n ) - internal_reserve(n, sizeof(T), max_size()); - } - - //! Upper bound on argument to reserve. - size_type max_size() const {return (~size_t(0))/sizeof(T);} - - //! Not thread safe - /** Does not change capacity. */ - void clear() {internal_clear(&destroy_array,/*reclaim_storage=*/false);} -private: - //! Get reference to element at given index. - T& internal_subscript( size_type index ) const; - - //! Construct n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC initialize_array( void* begin, size_type n ); - - //! Construct n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC copy_array( void* dst, const void* src, size_type n ); - - //! Assign n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC assign_array( void* dst, const void* src, size_type n ); - - //! Destroy n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC destroy_array( void* begin, size_type n ); -}; - -template -T& concurrent_vector::internal_subscript( size_type index ) const { - __TBB_ASSERT( index(my_segment[k].array)[j]; -} - -template -void concurrent_vector::initialize_array( void* begin, size_type n ) { - T* array = static_cast(begin); - for( size_type j=0; j -void concurrent_vector::copy_array( void* dst, const void* src, size_type n ) { - T* d = static_cast(dst); - const T* s = static_cast(src); - for( size_type j=0; j -void concurrent_vector::assign_array( void* dst, const void* src, size_type n ) { - T* d = static_cast(dst); - const T* s = static_cast(src); - for( size_type j=0; j -void concurrent_vector::destroy_array( void* begin, size_type n ) { - T* array = static_cast(begin); - for( size_type j=n; j>0; --j ) - array[j-1].~T(); -} - -} // namespace tbb - -#endif /* __TBB_concurrent_vector_H */ diff --git a/src/tbb/src/old/spin_rw_mutex_v2.cpp b/src/tbb/src/old/spin_rw_mutex_v2.cpp deleted file mode 100644 index 52fcc9b97..000000000 --- a/src/tbb/src/old/spin_rw_mutex_v2.cpp +++ /dev/null @@ -1,156 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "spin_rw_mutex_v2.h" -#include "tbb/tbb_machine.h" -#include "../tbb/itt_notify.h" -#include "tbb/atomic.h" - -namespace tbb { - -using namespace internal; - -static inline bool CAS(volatile uintptr_t &addr, uintptr_t newv, uintptr_t oldv) { - return as_atomic(addr).compare_and_swap(newv, oldv) == oldv; -} - -//! Signal that write lock is released -void spin_rw_mutex::internal_itt_releasing(spin_rw_mutex *mutex) { - __TBB_ASSERT_EX(mutex, NULL); // To prevent compiler warnings - ITT_NOTIFY(sync_releasing, mutex); -} - -//! Acquire write (exclusive) lock on the given mutex. -bool spin_rw_mutex::internal_acquire_writer(spin_rw_mutex *mutex) -{ - ITT_NOTIFY(sync_prepare, mutex); - for( atomic_backoff backoff;;backoff.pause() ) { - state_t s = mutex->state; - if( !(s & BUSY) ) { // no readers, no writers - if( CAS(mutex->state, WRITER, s) ) - break; // successfully stored writer flag - backoff.reset(); // we could be very close to complete op. - } else if( !(s & WRITER_PENDING) ) { // no pending writers - __TBB_AtomicOR(&mutex->state, WRITER_PENDING); - } - } - ITT_NOTIFY(sync_acquired, mutex); - __TBB_ASSERT( (mutex->state & BUSY)==WRITER, "invalid state of a write lock" ); - return false; -} - -//! Release write lock on the given mutex -void spin_rw_mutex::internal_release_writer(spin_rw_mutex *mutex) { - __TBB_ASSERT( (mutex->state & BUSY)==WRITER, "invalid state of a write lock" ); - ITT_NOTIFY(sync_releasing, mutex); - mutex->state = 0; -} - -//! Acquire read (shared) lock on the given mutex. -void spin_rw_mutex::internal_acquire_reader(spin_rw_mutex *mutex) { - ITT_NOTIFY(sync_prepare, mutex); - for( atomic_backoff backoff;;backoff.pause() ) { - state_t s = mutex->state; - if( !(s & (WRITER|WRITER_PENDING)) ) { // no writer or write requests - if( CAS(mutex->state, s+ONE_READER, s) ) - break; // successfully stored increased number of readers - backoff.reset(); // we could be very close to complete op. - } - } - ITT_NOTIFY(sync_acquired, mutex); - __TBB_ASSERT( mutex->state & READERS, "invalid state of a read lock: no readers" ); - __TBB_ASSERT( !(mutex->state & WRITER), "invalid state of a read lock: active writer" ); -} - -//! Upgrade reader to become a writer. -/** Returns whether the upgrade happened without releasing and re-acquiring the lock */ -bool spin_rw_mutex::internal_upgrade(spin_rw_mutex *mutex) { - state_t s = mutex->state; - __TBB_ASSERT( s & READERS, "invalid state before upgrade: no readers " ); - __TBB_ASSERT( !(s & WRITER), "invalid state before upgrade: active writer " ); - // check and set writer-pending flag - // required conditions: either no pending writers, or we are the only reader - // (with multiple readers and pending writer, another upgrade could have been requested) - while( (s & READERS)==ONE_READER || !(s & WRITER_PENDING) ) { - if( CAS(mutex->state, s | WRITER_PENDING, s) ) - { - ITT_NOTIFY(sync_prepare, mutex); - atomic_backoff backoff; - while( (mutex->state & READERS) != ONE_READER ) backoff.pause(); - __TBB_ASSERT(mutex->state == (ONE_READER | WRITER_PENDING),"invalid state when upgrading to writer"); - // both new readers and writers are blocked at this time - mutex->state = WRITER; - ITT_NOTIFY(sync_acquired, mutex); - __TBB_ASSERT( (mutex->state & BUSY) == WRITER, "invalid state after upgrade" ); - return true; // successfully upgraded - } else { - s = mutex->state; // re-read - } - } - // slow reacquire - internal_release_reader(mutex); - return internal_acquire_writer(mutex); // always returns false -} - -//! Downgrade writer to a reader -void spin_rw_mutex::internal_downgrade(spin_rw_mutex *mutex) { - __TBB_ASSERT( (mutex->state & BUSY) == WRITER, "invalid state before downgrade" ); - ITT_NOTIFY(sync_releasing, mutex); - mutex->state = ONE_READER; - __TBB_ASSERT( mutex->state & READERS, "invalid state after downgrade: no readers" ); - __TBB_ASSERT( !(mutex->state & WRITER), "invalid state after downgrade: active writer" ); -} - -//! Release read lock on the given mutex -void spin_rw_mutex::internal_release_reader(spin_rw_mutex *mutex) -{ - __TBB_ASSERT( mutex->state & READERS, "invalid state of a read lock: no readers" ); - __TBB_ASSERT( !(mutex->state & WRITER), "invalid state of a read lock: active writer" ); - ITT_NOTIFY(sync_releasing, mutex); // release reader - __TBB_FetchAndAddWrelease((volatile void *)&(mutex->state),-(intptr_t)ONE_READER); -} - -//! Try to acquire write lock on the given mutex -bool spin_rw_mutex::internal_try_acquire_writer( spin_rw_mutex * mutex ) -{ - // for a writer: only possible to acquire if no active readers or writers - state_t s = mutex->state; // on IA-64 architecture, this volatile load has acquire semantic - if( !(s & BUSY) ) // no readers, no writers; mask is 1..1101 - if( CAS(mutex->state, WRITER, s) ) { - ITT_NOTIFY(sync_acquired, mutex); - return true; // successfully stored writer flag - } - return false; -} - -//! Try to acquire read lock on the given mutex -bool spin_rw_mutex::internal_try_acquire_reader( spin_rw_mutex * mutex ) -{ - // for a reader: acquire if no active or waiting writers - state_t s = mutex->state; // on IA-64 architecture, a load of volatile variable has acquire semantic - while( !(s & (WRITER|WRITER_PENDING)) ) // no writers - if( CAS(mutex->state, s+ONE_READER, s) ) { - ITT_NOTIFY(sync_acquired, mutex); - return true; // successfully stored increased number of readers - } - return false; -} - -} // namespace tbb diff --git a/src/tbb/src/old/spin_rw_mutex_v2.h b/src/tbb/src/old/spin_rw_mutex_v2.h deleted file mode 100644 index 501fa8cb1..000000000 --- a/src/tbb/src/old/spin_rw_mutex_v2.h +++ /dev/null @@ -1,175 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_spin_rw_mutex_H -#define __TBB_spin_rw_mutex_H - -#include "tbb/tbb_stddef.h" - -namespace tbb { - -//! Fast, unfair, spinning reader-writer lock with backoff and writer-preference -/** @ingroup synchronization */ -class spin_rw_mutex { - //! @cond INTERNAL - - //! Present so that 1.0 headers work with 1.1 dynamic library. - static void __TBB_EXPORTED_FUNC internal_itt_releasing(spin_rw_mutex *); - - //! Internal acquire write lock. - static bool __TBB_EXPORTED_FUNC internal_acquire_writer(spin_rw_mutex *); - - //! Out of line code for releasing a write lock. - /** This code has debug checking and instrumentation for Intel(R) Thread Checker and Intel(R) Thread Profiler. */ - static void __TBB_EXPORTED_FUNC internal_release_writer(spin_rw_mutex *); - - //! Internal acquire read lock. - static void __TBB_EXPORTED_FUNC internal_acquire_reader(spin_rw_mutex *); - - //! Internal upgrade reader to become a writer. - static bool __TBB_EXPORTED_FUNC internal_upgrade(spin_rw_mutex *); - - //! Out of line code for downgrading a writer to a reader. - /** This code has debug checking and instrumentation for Intel(R) Thread Checker and Intel(R) Thread Profiler. */ - static void __TBB_EXPORTED_FUNC internal_downgrade(spin_rw_mutex *); - - //! Internal release read lock. - static void __TBB_EXPORTED_FUNC internal_release_reader(spin_rw_mutex *); - - //! Internal try_acquire write lock. - static bool __TBB_EXPORTED_FUNC internal_try_acquire_writer(spin_rw_mutex *); - - //! Internal try_acquire read lock. - static bool __TBB_EXPORTED_FUNC internal_try_acquire_reader(spin_rw_mutex *); - - //! @endcond -public: - //! Construct unacquired mutex. - spin_rw_mutex() : state(0) {} - -#if TBB_USE_ASSERT - //! Destructor asserts if the mutex is acquired, i.e. state is zero. - ~spin_rw_mutex() { - __TBB_ASSERT( !state, "destruction of an acquired mutex"); - }; -#endif /* TBB_USE_ASSERT */ - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock : internal::no_copy { - public: - //! Construct lock that has not acquired a mutex. - /** Equivalent to zero-initialization of *this. */ - scoped_lock() : mutex(NULL) {} - - //! Construct and acquire lock on given mutex. - scoped_lock( spin_rw_mutex& m, bool write = true ) : mutex(NULL) { - acquire(m, write); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( mutex ) release(); - } - - //! Acquire lock on given mutex. - void acquire( spin_rw_mutex& m, bool write = true ) { - __TBB_ASSERT( !mutex, "holding mutex already" ); - mutex = &m; - is_writer = write; - if( write ) internal_acquire_writer(mutex); - else internal_acquire_reader(mutex); - } - - //! Upgrade reader to become a writer. - /** Returns whether the upgrade happened without releasing and re-acquiring the lock */ - bool upgrade_to_writer() { - __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( !is_writer, "not a reader" ); - is_writer = true; - return internal_upgrade(mutex); - } - - //! Release lock. - void release() { - __TBB_ASSERT( mutex, "lock is not acquired" ); - spin_rw_mutex *m = mutex; - mutex = NULL; - if( is_writer ) { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - internal_release_writer(m); -#else - m->state = 0; -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } else { - internal_release_reader(m); - } - }; - - //! Downgrade writer to become a reader. - bool downgrade_to_reader() { - __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( is_writer, "not a writer" ); -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - internal_downgrade(mutex); -#else - mutex->state = 4; // Bit 2 - reader, 00..00100 -#endif - is_writer = false; - return true; - } - - //! Try acquire lock on given mutex. - bool try_acquire( spin_rw_mutex& m, bool write = true ) { - __TBB_ASSERT( !mutex, "holding mutex already" ); - bool result; - is_writer = write; - result = write? internal_try_acquire_writer(&m) - : internal_try_acquire_reader(&m); - if( result ) mutex = &m; - return result; - } - - private: - //! The pointer to the current mutex that is held, or NULL if no mutex is held. - spin_rw_mutex* mutex; - - //! If mutex!=NULL, then is_writer is true if holding a writer lock, false if holding a reader lock. - /** Not defined if not holding a lock. */ - bool is_writer; - }; - -private: - typedef uintptr_t state_t; - static const state_t WRITER = 1; - static const state_t WRITER_PENDING = 2; - static const state_t READERS = ~(WRITER | WRITER_PENDING); - static const state_t ONE_READER = 4; - static const state_t BUSY = WRITER | READERS; - /** Bit 0 = writer is holding lock - Bit 1 = request by a writer to acquire lock (hint to readers to wait) - Bit 2..N = number of readers holding lock */ - volatile state_t state; -}; - -} // namespace tbb - -#endif /* __TBB_spin_rw_mutex_H */ diff --git a/src/tbb/src/old/task_v2.cpp b/src/tbb/src/old/task_v2.cpp deleted file mode 100644 index 248327879..000000000 --- a/src/tbb/src/old/task_v2.cpp +++ /dev/null @@ -1,38 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -/* This compilation unit provides definition of task::destroy( task& ) - that is binary compatible with TBB 2.x. In TBB 3.0, the method became - static, and its name decoration changed, though the definition remained. - - The macro switch should be set prior to including task.h - or any TBB file that might bring task.h up. -*/ -#define __TBB_DEPRECATED_TASK_INTERFACE 1 -#include "tbb/task.h" - -namespace tbb { - -void task::destroy( task& victim ) { - // Forward to static version - task_base::destroy( victim ); -} - -} // namespace tbb diff --git a/src/tbb/src/old/test_concurrent_queue_v2.cpp b/src/tbb/src/old/test_concurrent_queue_v2.cpp deleted file mode 100644 index 9b98400de..000000000 --- a/src/tbb/src/old/test_concurrent_queue_v2.cpp +++ /dev/null @@ -1,349 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "old/concurrent_queue_v2.h" -#include "tbb/atomic.h" -#include "tbb/tick_count.h" - -#include "../test/harness_assert.h" -#include "../test/harness.h" - -static tbb::atomic FooConstructed; -static tbb::atomic FooDestroyed; - -enum state_t{ - LIVE=0x1234, - DEAD=0xDEAD -}; - -class Foo { - state_t state; -public: - int thread_id; - int serial; - Foo() : state(LIVE) { - ++FooConstructed; - } - Foo( const Foo& item ) : state(LIVE) { - ASSERT( item.state==LIVE, NULL ); - ++FooConstructed; - thread_id = item.thread_id; - serial = item.serial; - } - ~Foo() { - ASSERT( state==LIVE, NULL ); - ++FooDestroyed; - state=DEAD; - thread_id=DEAD; - serial=DEAD; - } - void operator=( Foo& item ) { - ASSERT( item.state==LIVE, NULL ); - ASSERT( state==LIVE, NULL ); - thread_id = item.thread_id; - serial = item.serial; - } - bool is_const() {return false;} - bool is_const() const {return true;} -}; - -const size_t MAXTHREAD = 256; - -static int Sum[MAXTHREAD]; - -//! Count of various pop operations -/** [0] = pop_if_present that failed - [1] = pop_if_present that succeeded - [2] = pop */ -static tbb::atomic PopKind[3]; - -const int M = 10000; - -struct Body: NoAssign { - tbb::concurrent_queue* queue; - const int nthread; - Body( int nthread_ ) : nthread(nthread_) {} - void operator()( long thread_id ) const { - long pop_kind[3] = {0,0,0}; - int serial[MAXTHREAD+1]; - memset( serial, 0, nthread*sizeof(unsigned) ); - ASSERT( thread_idpop_if_present(f); - ++pop_kind[prepopped]; - } - Foo g; - g.thread_id = thread_id; - g.serial = j+1; - queue->push( g ); - if( !prepopped ) { - queue->pop(f); - ++pop_kind[2]; - } - ASSERT( f.thread_id<=nthread, NULL ); - ASSERT( f.thread_id==nthread || serial[f.thread_id]0, "nthread must be positive" ); - if( prefill+1>=capacity ) - return; - bool success = false; - for( int k=0; k<3; ++k ) - PopKind[k] = 0; - for( int trial=0; !success; ++trial ) { - FooConstructed = 0; - FooDestroyed = 0; - Body body(nthread); - tbb::concurrent_queue queue; - queue.set_capacity( capacity ); - body.queue = &queue; - for( int i=0; i=0; ) { - ASSERT( !queue.empty(), NULL ); - Foo f; - queue.pop(f); - ASSERT( queue.size()==i, NULL ); - sum += f.serial-1; - } - ASSERT( queue.empty(), NULL ); - ASSERT( queue.size()==0, NULL ); - if( sum!=expected ) - printf("sum=%d expected=%d\n",sum,expected); - ASSERT( FooConstructed==FooDestroyed, NULL ); - - success = true; - if( nthread>1 && prefill==0 ) { - // Check that pop_if_present got sufficient exercise - for( int k=0; k<2; ++k ) { -#if (_WIN32||_WIN64) - // The TBB library on Windows seems to have a tough time generating - // the desired interleavings for pop_if_present, so the code tries longer, and settles - // for fewer desired interleavings. - const int max_trial = 100; - const int min_requirement = 20; -#else - const int min_requirement = 100; - const int max_trial = 20; -#endif /* _WIN32||_WIN64 */ - if( PopKind[k]=max_trial ) { - if( Verbose ) - printf("Warning: %d threads had only %ld pop_if_present operations %s after %d trials (expected at least %d). " - "This problem may merely be unlucky scheduling. " - "Investigate only if it happens repeatedly.\n", - nthread, long(PopKind[k]), k==0?"failed":"succeeded", max_trial, min_requirement); - else - printf("Warning: the number of %s pop_if_present operations is less than expected for %d threads. Investigate if it happens repeatedly.\n", - k==0?"failed":"succeeded", nthread ); - } else { - success = false; - } - } - } - } - } -} - -template -void TestIteratorAux( Iterator1 i, Iterator2 j, int size ) { - // Now test iteration - Iterator1 old_i; - for( int k=0; k" - ASSERT( k+2==i->serial, NULL ); - } - // Test assignment - old_i = i; - } - ASSERT( k+1==f.serial, NULL ); - } - ASSERT( !(i!=j), NULL ); - ASSERT( i==j, NULL ); -} - -template -void TestIteratorAssignment( Iterator2 j ) { - Iterator1 i(j); - ASSERT( i==j, NULL ); - ASSERT( !(i!=j), NULL ); - Iterator1 k; - k = j; - ASSERT( k==j, NULL ); - ASSERT( !(k!=j), NULL ); -} - -//! Test the iterators for concurrent_queue -void TestIterator() { - tbb::concurrent_queue queue; - tbb::concurrent_queue& const_queue = queue; - for( int j=0; j<500; ++j ) { - TestIteratorAux( queue.begin(), queue.end(), j ); - TestIteratorAux( const_queue.begin(), const_queue.end(), j ); - TestIteratorAux( const_queue.begin(), queue.end(), j ); - TestIteratorAux( queue.begin(), const_queue.end(), j ); - Foo f; - f.serial = j+1; - queue.push(f); - } - TestIteratorAssignment::const_iterator>( const_queue.begin() ); - TestIteratorAssignment::const_iterator>( queue.begin() ); - TestIteratorAssignment:: iterator>( queue.begin() ); -} - -void TestConcurrentQueueType() { - AssertSameType( tbb::concurrent_queue::value_type(), Foo() ); - Foo f; - const Foo g; - tbb::concurrent_queue::reference r = f; - ASSERT( &r==&f, NULL ); - ASSERT( !r.is_const(), NULL ); - tbb::concurrent_queue::const_reference cr = g; - ASSERT( &cr==&g, NULL ); - ASSERT( cr.is_const(), NULL ); -} - -template -void TestEmptyQueue() { - const tbb::concurrent_queue queue; - ASSERT( queue.size()==0, NULL ); - ASSERT( queue.capacity()>0, NULL ); - ASSERT( size_t(queue.capacity())>=size_t(-1)/(sizeof(void*)+sizeof(T)), NULL ); -} - -void TestFullQueue() { - for( int n=0; n<10; ++n ) { - FooConstructed = 0; - FooDestroyed = 0; - tbb::concurrent_queue queue; - queue.set_capacity(n); - for( int i=0; i<=n; ++i ) { - Foo f; - f.serial = i; - bool result = queue.push_if_not_full( f ); - ASSERT( result==(i -struct TestNegativeQueueBody: NoAssign { - tbb::concurrent_queue& queue; - const int nthread; - TestNegativeQueueBody( tbb::concurrent_queue& q, int n ) : queue(q), nthread(n) {} - void operator()( int k ) const { - if( k==0 ) { - int number_of_pops = nthread-1; - // Wait for all pops to pend. - while( queue.size()>-number_of_pops ) { - __TBB_Yield(); - } - for( int i=0; ; ++i ) { - ASSERT( queue.size()==i-number_of_pops, NULL ); - ASSERT( queue.empty()==(queue.size()<=0), NULL ); - if( i==number_of_pops ) break; - // Satisfy another pop - queue.push( T() ); - } - } else { - // Pop item from queue - T item; - queue.pop(item); - } - } -}; - -//! Test a queue with a negative size. -template -void TestNegativeQueue( int nthread ) { - tbb::concurrent_queue queue; - NativeParallelFor( nthread, TestNegativeQueueBody(queue,nthread) ); -} - -int TestMain () { - TestEmptyQueue(); - TestEmptyQueue(); - TestFullQueue(); - TestConcurrentQueueType(); - TestIterator(); - - // Test concurrent operations - for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) { - TestNegativeQueue(nthread); - for( int prefill=0; prefill<64; prefill+=(1+prefill/3) ) { - TestPushPop(prefill,ptrdiff_t(-1),nthread); - TestPushPop(prefill,ptrdiff_t(1),nthread); - TestPushPop(prefill,ptrdiff_t(2),nthread); - TestPushPop(prefill,ptrdiff_t(10),nthread); - TestPushPop(prefill,ptrdiff_t(100),nthread); - } - } - return Harness::Done; -} diff --git a/src/tbb/src/old/test_concurrent_vector_v2.cpp b/src/tbb/src/old/test_concurrent_vector_v2.cpp deleted file mode 100644 index cad4de566..000000000 --- a/src/tbb/src/old/test_concurrent_vector_v2.cpp +++ /dev/null @@ -1,558 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "concurrent_vector_v2.h" -#include -#include -#include "../test/harness_assert.h" - -tbb::atomic FooCount; - -//! Problem size -const size_t N = 500000; - -struct Foo { - int my_bar; -public: - enum State { - DefaultInitialized=0x1234, - CopyInitialized=0x89ab, - Destroyed=0x5678 - } state; - int& bar() { - ASSERT( state==DefaultInitialized||state==CopyInitialized, NULL ); - return my_bar; - } - int bar() const { - ASSERT( state==DefaultInitialized||state==CopyInitialized, NULL ); - return my_bar; - } - static const int initial_value_of_bar = 42; - Foo() { - state = DefaultInitialized; - ++FooCount; - my_bar = initial_value_of_bar; - } - Foo( const Foo& foo ) { - state = CopyInitialized; - ++FooCount; - my_bar = foo.my_bar; - } - ~Foo() { - ASSERT( state==DefaultInitialized||state==CopyInitialized, NULL ); - state = Destroyed; - my_bar = ~initial_value_of_bar; - --FooCount; - } - bool is_const() const {return true;} - bool is_const() {return false;} -}; - -class FooWithAssign: public Foo { -public: - void operator=( const FooWithAssign& x ) { - ASSERT( x.state==DefaultInitialized||x.state==CopyInitialized, NULL ); - ASSERT( state==DefaultInitialized||state==CopyInitialized, NULL ); - my_bar = x.my_bar; - } -}; - -inline void NextSize( int& s ) { - if( s<=32 ) ++s; - else s += s/10; -} - -static void CheckVector( const tbb::concurrent_vector& cv, size_t expected_size, size_t old_size ) { - ASSERT( cv.size()==expected_size, NULL ); - ASSERT( cv.empty()==(expected_size==0), NULL ); - for( int j=0; j vector_t; - for( int old_size=0; old_size<=128; NextSize( old_size ) ) { - for( int new_size=old_size; new_size<=128; NextSize( new_size ) ) { - long count = FooCount; - vector_t v; - ASSERT( count==FooCount, NULL ); - v.grow_by(old_size); - ASSERT( count+old_size==FooCount, NULL ); - for( int j=0; j vector_t; - vector_t v; - v.reserve( old_size ); - ASSERT( v.capacity()>=old_size, NULL ); - v.reserve( new_size ); - ASSERT( v.capacity()>=old_size, NULL ); - ASSERT( v.capacity()>=new_size, NULL ); - for( size_t i=0; i<2*new_size; ++i ) { - ASSERT( size_t(FooCount)==count+i, NULL ); - size_t j = v.grow_by(1); - ASSERT( j==i, NULL ); - } - } - ASSERT( FooCount==count, NULL ); - } - } -} - -struct AssignElement { - typedef tbb::concurrent_vector::range_type::iterator iterator; - iterator base; - void operator()( const tbb::concurrent_vector::range_type& range ) const { - for( iterator i=range.begin(); i!=range.end(); ++i ) { - if( *i!=0 ) - std::printf("ERROR for v[%ld]\n", long(i-base)); - *i = int(i-base); - } - } - AssignElement( iterator base_ ) : base(base_) {} -}; - -struct CheckElement { - typedef tbb::concurrent_vector::const_range_type::iterator iterator; - iterator base; - void operator()( const tbb::concurrent_vector::const_range_type& range ) const { - for( iterator i=range.begin(); i!=range.end(); ++i ) - if( *i != int(i-base) ) - std::printf("ERROR for v[%ld]\n", long(i-base)); - } - CheckElement( iterator base_ ) : base(base_) {} -}; - -#include "tbb/tick_count.h" -#include "tbb/parallel_for.h" -#include "../test/harness.h" - -//! Test parallel access by iterators -void TestParallelFor( int nthread ) { - typedef tbb::concurrent_vector vector_t; - vector_t v; - v.grow_to_at_least(N); - tbb::tick_count t0 = tbb::tick_count::now(); - if( Verbose ) - std::printf("Calling parallel_for.h with %ld threads\n",long(nthread)); - tbb::parallel_for( v.range(10000), AssignElement(v.begin()) ); - tbb::tick_count t1 = tbb::tick_count::now(); - const vector_t& u = v; - tbb::parallel_for( u.range(10000), CheckElement(u.begin()) ); - tbb::tick_count t2 = tbb::tick_count::now(); - if( Verbose ) - std::printf("Time for parallel_for.h: assign time = %8.5f, check time = %8.5f\n", - (t1-t0).seconds(),(t2-t1).seconds()); - for( long i=0; size_t(i) -void TestIteratorAssignment( Iterator2 j ) { - Iterator1 i(j); - ASSERT( i==j, NULL ); - ASSERT( !(i!=j), NULL ); - Iterator1 k; - k = j; - ASSERT( k==j, NULL ); - ASSERT( !(k!=j), NULL ); -} - -template -void TestIteratorTraits() { - AssertSameType( static_cast(0), static_cast(0) ); - AssertSameType( static_cast(0), static_cast(0) ); - AssertSameType( static_cast(0), static_cast(0) ); - AssertSameType( static_cast(0), static_cast(0) ); - T x; - typename Iterator::reference xr = x; - typename Iterator::pointer xp = &x; - ASSERT( &xr==xp, NULL ); -} - -template -void CheckConstIterator( const Vector& u, int i, const Iterator& cp ) { - typename Vector::const_reference pref = *cp; - if( pref.bar()!=i ) - std::printf("ERROR for u[%ld] using const_iterator\n", long(i)); - typename Vector::difference_type delta = cp-u.begin(); - ASSERT( delta==i, NULL ); - if( u[i].bar()!=i ) - std::printf("ERROR for u[%ld] using subscripting\n", long(i)); - ASSERT( u.begin()[i].bar()==i, NULL ); -} - -template -void CheckIteratorComparison( V& u ) { - Iterator1 i = u.begin(); - for( int i_count=0; i_count<100; ++i_count ) { - Iterator2 j = u.begin(); - for( int j_count=0; j_count<100; ++j_count ) { - ASSERT( (i==j)==(i_count==j_count), NULL ); - ASSERT( (i!=j)==(i_count!=j_count), NULL ); - ASSERT( (i-j)==(i_count-j_count), NULL ); - ASSERT( (ij)==(i_count>j_count), NULL ); - ASSERT( (i<=j)==(i_count<=j_count), NULL ); - ASSERT( (i>=j)==(i_count>=j_count), NULL ); - ++j; - } - ++i; - } -} - -//! Test sequential iterators for vector type V. -/** Also does timing. */ -template -void TestSequentialFor() { - V v; - v.grow_by(N); - - // Check iterator - tbb::tick_count t0 = tbb::tick_count::now(); - typename V::iterator p = v.begin(); - ASSERT( !(*p).is_const(), NULL ); - ASSERT( !p->is_const(), NULL ); - for( int i=0; size_t(i)is_const(), NULL ); - for( int i=0; size_t(i)0; ) { - --i; - --cp; - if( i>0 ) { - typename V::const_iterator cp_old = cp--; - int here = (*cp_old).bar(); - ASSERT( here==u[i].bar(), NULL ); - typename V::const_iterator cp_new = cp++; - int prev = (*cp_new).bar(); - ASSERT( prev==u[i-1].bar(), NULL ); - } - CheckConstIterator(u,i,cp); - } - - // Now go forwards and backwards - cp = u.begin(); - ptrdiff_t k = 0; - for( size_t i=0; i(v); - CheckIteratorComparison(v); - CheckIteratorComparison(v); - CheckIteratorComparison(v); - - TestIteratorAssignment( u.begin() ); - TestIteratorAssignment( v.begin() ); - TestIteratorAssignment( v.begin() ); - - // Check reverse_iterator - typename V::reverse_iterator rp = v.rbegin(); - for( size_t i=v.size(); i>0; --i, ++rp ) { - typename V::reference pref = *rp; - ASSERT( size_t(pref.bar())==i-1, NULL ); - ASSERT( rp!=v.rend(), NULL ); - } - ASSERT( rp==v.rend(), NULL ); - - // Check const_reverse_iterator - typename V::const_reverse_iterator crp = u.rbegin(); - for( size_t i=v.size(); i>0; --i, ++crp ) { - typename V::const_reference cpref = *crp; - ASSERT( size_t(cpref.bar())==i-1, NULL ); - ASSERT( crp!=u.rend(), NULL ); - } - ASSERT( crp==u.rend(), NULL ); - - TestIteratorAssignment( u.rbegin() ); - TestIteratorAssignment( v.rbegin() ); -} - -static const size_t Modulus = 7; - -typedef tbb::concurrent_vector MyVector; - -class GrowToAtLeast { - MyVector& my_vector; -public: - void operator()( const tbb::blocked_range& range ) const { - for( size_t i=range.begin(); i!=range.end(); ++i ) { - size_t n = my_vector.size(); - size_t k = n==0 ? 0 : i % (2*n+1); - my_vector.grow_to_at_least(k+1); - ASSERT( my_vector.size()>=k+1, NULL ); - } - } - GrowToAtLeast( MyVector& vector ) : my_vector(vector) {} -}; - -void TestConcurrentGrowToAtLeast() { - MyVector v; - for( size_t s=1; s<1000; s*=10 ) { - tbb::parallel_for( tbb::blocked_range(0,1000000,100), GrowToAtLeast(v) ); - } -} - -//! Test concurrent invocations of method concurrent_vector::grow_by -class GrowBy { - MyVector& my_vector; -public: - void operator()( const tbb::blocked_range& range ) const { - for( int i=range.begin(); i!=range.end(); ++i ) { - if( i%3 ) { - Foo& element = my_vector[my_vector.grow_by(1)]; - element.bar() = i; - } else { - Foo f; - f.bar() = i; - size_t k = my_vector.push_back( f ); - ASSERT( my_vector[k].bar()==i, NULL ); - } - } - } - GrowBy( MyVector& vector ) : my_vector(vector) {} -}; - -//! Test concurrent invocations of method concurrent_vector::grow_by -void TestConcurrentGrowBy( int nthread ) { - int m = 100000; - MyVector v; - tbb::parallel_for( tbb::blocked_range(0,m,1000), GrowBy(v) ); - ASSERT( v.size()==size_t(m), NULL ); - - // Verify that v is a permutation of 0..m - int inversions = 0; - bool* found = new bool[m]; - memset( found, 0, m ); - for( int i=0; i0 ) - inversions += v[i].bar()1 || v[i].bar()==i, "sequential execution is wrong" ); - } - delete[] found; - if( nthread>1 && inversions vector_t; - for( int dst_size=1; dst_size<=128; NextSize( dst_size ) ) { - for( int src_size=2; src_size<=128; NextSize( src_size ) ) { - vector_t u; - u.grow_to_at_least(src_size); - for( int i=0; i - -typedef unsigned long Number; - -static tbb::concurrent_vector Primes; - -class FindPrimes { - bool is_prime( Number val ) const { - int limit, factor = 3; - if( val<5u ) - return val==2; - else { - limit = long(sqrtf(float(val))+0.5f); - while( factor<=limit && val % factor ) - ++factor; - return factor>limit; - } - } -public: - void operator()( const tbb::blocked_range& r ) const { - for( Number i=r.begin(); i!=r.end(); ++i ) { - if( i%2 && is_prime(i) ) { - Primes[Primes.grow_by(1)] = i; - } - } - } -}; - -static double TimeFindPrimes( int nthread ) { - Primes.clear(); - tbb::task_scheduler_init init(nthread); - tbb::tick_count t0 = tbb::tick_count::now(); - tbb::parallel_for( tbb::blocked_range(0,1000000,500), FindPrimes() ); - tbb::tick_count t1 = tbb::tick_count::now(); - return (t1-t0).seconds(); -} - -static void TestFindPrimes() { - // Time fully subscribed run. - double t2 = TimeFindPrimes( tbb::task_scheduler_init::automatic ); - - // Time parallel run that is very likely oversubscribed. - double t128 = TimeFindPrimes(128); - - if( Verbose ) - std::printf("TestFindPrimes: t2==%g t128=%g\n", t2, t128 ); - - // We allow the 128-thread run a little extra time to allow for thread overhead. - // Theoretically, following test will fail on machine with >128 processors. - // But that situation is not going to come up in the near future, - // and the generalization to fix the issue is not worth the trouble. - if( t128>1.10*t2 ) { - std::printf("Warning: grow_by is pathetically slow: t2==%g t128=%g\n", t2, t128); - } -} - -//------------------------------------------------------------------------ -// Test compatibility with STL sort. -//------------------------------------------------------------------------ - -#include - -void TestSort() { - for( int n=1; n<100; n*=3 ) { - tbb::concurrent_vector array; - array.grow_by( n ); - for( int i=0; i::iterator,Foo>(); - TestIteratorTraits::const_iterator,const Foo>(); - TestSequentialFor > (); - TestResizeAndCopy(); - TestAssign(); - TestCapacity(); - for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) { - tbb::task_scheduler_init init( nthread ); - TestParallelFor( nthread ); - TestConcurrentGrowToAtLeast(); - TestConcurrentGrowBy( nthread ); - } - TestFindPrimes(); - TestSort(); - return Harness::Done; -} diff --git a/src/tbb/src/old/test_mutex_v2.cpp b/src/tbb/src/old/test_mutex_v2.cpp deleted file mode 100644 index 76174913f..000000000 --- a/src/tbb/src/old/test_mutex_v2.cpp +++ /dev/null @@ -1,239 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -//------------------------------------------------------------------------ -// Test TBB mutexes when used with parallel_for.h -// -// Usage: test_Mutex.exe [-v] nthread -// -// The -v option causes timing information to be printed. -// -// Compile with _OPENMP and -openmp -//------------------------------------------------------------------------ -#include "../test/harness_defs.h" -#include "tbb/atomic.h" -#include "tbb/blocked_range.h" -#include "tbb/parallel_for.h" -#include "tbb/tick_count.h" -#include "../test/harness.h" -#include "spin_rw_mutex_v2.h" -#include -#include - -// This test deliberately avoids a "using tbb" statement, -// so that the error of putting types in the wrong namespace will be caught. - -template -struct Counter { - typedef M mutex_type; - M mutex; - volatile long value; -}; - -//! Function object for use with parallel_for.h. -template -struct AddOne: NoAssign { - C& counter; - /** Increments counter once for each iteration in the iteration space. */ - void operator()( tbb::blocked_range& range ) const { - for( size_t i=range.begin(); i!=range.end(); ++i ) { - if( i&1 ) { - // Try implicit acquire and explicit release - typename C::mutex_type::scoped_lock lock(counter.mutex); - counter.value = counter.value+1; - lock.release(); - } else { - // Try explicit acquire and implicit release - typename C::mutex_type::scoped_lock lock; - lock.acquire(counter.mutex); - counter.value = counter.value+1; - } - } - } - AddOne( C& counter_ ) : counter(counter_) {} -}; - -//! Generic test of a TBB mutex type M. -/** Does not test features specific to reader-writer locks. */ -template -void Test( const char * name ) { - if( Verbose ) { - printf("%s time = ",name); - fflush(stdout); - } - Counter counter; - counter.value = 0; - const int n = 100000; - tbb::tick_count t0 = tbb::tick_count::now(); - tbb::parallel_for(tbb::blocked_range(0,n,n/10),AddOne >(counter)); - tbb::tick_count t1 = tbb::tick_count::now(); - if( Verbose ) - printf("%g usec\n",(t1-t0).seconds()); - if( counter.value!=n ) - printf("ERROR for %s: counter.value=%ld\n",name,counter.value); -} - -template -struct Invariant { - typedef M mutex_type; - M mutex; - const char* mutex_name; - volatile long value[N]; - Invariant( const char* mutex_name_ ) : - mutex_name(mutex_name_) - { - for( size_t k=0; k -struct TwiddleInvariant: NoAssign { - I& invariant; - TwiddleInvariant( I& invariant_ ) : invariant(invariant_) {} - - /** Increments counter once for each iteration in the iteration space. */ - void operator()( tbb::blocked_range& range ) const { - for( size_t i=range.begin(); i!=range.end(); ++i ) { - //! Every 8th access is a write access - const bool write = (i%8)==7; - bool okay = true; - bool lock_kept = true; - if( (i/8)&1 ) { - // Try implicit acquire and explicit release - typename I::mutex_type::scoped_lock lock(invariant.mutex,write); - execute_aux(lock, i, write, okay, lock_kept); - lock.release(); - } else { - // Try explicit acquire and implicit release - typename I::mutex_type::scoped_lock lock; - lock.acquire(invariant.mutex,write); - execute_aux(lock, i, write, okay, lock_kept); - } - if( !okay ) { - printf( "ERROR for %s at %ld: %s %s %s %s\n",invariant.mutex_name, long(i), - write ? "write," : "read,", - write ? (i%16==7?"downgrade,":"") : (i%8==3?"upgrade,":""), - lock_kept ? "lock kept," : "lock not kept,", // TODO: only if downgrade/upgrade - (i/8)&1 ? "impl/expl" : "expl/impl" ); - } - } - } -private: - void execute_aux(typename I::mutex_type::scoped_lock & lock, const size_t i, const bool write, bool & okay, bool & lock_kept) const { - if( write ) { - long my_value = invariant.value[0]; - invariant.update(); - if( i%16==7 ) { - lock_kept = lock.downgrade_to_reader(); - if( !lock_kept ) - my_value = invariant.value[0] - 1; - okay = invariant.value_is(my_value+1); - } - } else { - okay = invariant.is_okay(); - if( i%8==3 ) { - long my_value = invariant.value[0]; - lock_kept = lock.upgrade_to_writer(); - if( !lock_kept ) - my_value = invariant.value[0]; - invariant.update(); - okay = invariant.value_is(my_value+1); - } - } - } -}; - -/** This test is generic so that we can test any other kinds of ReaderWriter locks we write later. */ -template -void TestReaderWriterLock( const char * mutex_name ) { - if( Verbose ) { - printf("%s readers & writers time = ",mutex_name); - fflush(stdout); - } - Invariant invariant(mutex_name); - const size_t n = 500000; - tbb::tick_count t0 = tbb::tick_count::now(); - tbb::parallel_for(tbb::blocked_range(0,n,n/100),TwiddleInvariant >(invariant)); - tbb::tick_count t1 = tbb::tick_count::now(); - // There is either a writer or a reader upgraded to a writer for each 4th iteration - long expected_value = n/4; - if( !invariant.value_is(expected_value) ) - printf("ERROR for %s: final invariant value is wrong\n",mutex_name); - if( Verbose ) - printf("%g usec\n", (t1-t0).seconds()); -} - -/** Test try_acquire functionality of a non-reenterable mutex */ -template -void TestTryAcquire_OneThread( const char * mutex_name ) { - M tested_mutex; - typename M::scoped_lock lock1; - if( lock1.try_acquire(tested_mutex) ) - lock1.release(); - else - printf("ERROR for %s: try_acquire failed though it should not\n", mutex_name); - { - typename M::scoped_lock lock2(tested_mutex); - if( lock1.try_acquire(tested_mutex) ) - printf("ERROR for %s: try_acquire succeeded though it should not\n", mutex_name); - } - if( lock1.try_acquire(tested_mutex) ) - lock1.release(); - else - printf("ERROR for %s: try_acquire failed though it should not\n", mutex_name); -} - -#include "tbb/task_scheduler_init.h" - -int TestMain () { - for( int p=MinThread; p<=MaxThread; ++p ) { - tbb::task_scheduler_init init( p ); - if( Verbose ) - printf( "testing with %d workers\n", static_cast(p) ); - const int n = 3; - // Run each test several times. - for( int i=0; i( "Spin RW Mutex" ); - TestTryAcquire_OneThread("Spin RW Mutex"); // only tests try_acquire for writers - TestReaderWriterLock( "Spin RW Mutex" ); - } - if( Verbose ) - printf( "calling destructor for task_scheduler_init\n" ); - } - return Harness::Done; -} diff --git a/src/tbb/src/old/test_task_scheduler_observer_v3.cpp b/src/tbb/src/old/test_task_scheduler_observer_v3.cpp deleted file mode 100644 index eaaa4524d..000000000 --- a/src/tbb/src/old/test_task_scheduler_observer_v3.cpp +++ /dev/null @@ -1,121 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -//TODO: when removing TBB_PREVIEW_LOCAL_OBSERVER, change the header or defines here -#include "tbb/task_scheduler_observer.h" - -typedef uintptr_t FlagType; -const int MaxFlagIndex = sizeof(FlagType)*8-1; - -class MyObserver: public tbb::task_scheduler_observer { - FlagType flags; - /*override*/ void on_scheduler_entry( bool is_worker ); - /*override*/ void on_scheduler_exit( bool is_worker ); -public: - MyObserver( FlagType flags_ ) : flags(flags_) { - observe(true); - } -}; - -#include "harness_assert.h" -#include "tbb/atomic.h" - -tbb::atomic EntryCount; -tbb::atomic ExitCount; - -struct State { - FlagType MyFlags; - bool IsMaster; - State() : MyFlags(), IsMaster() {} -}; - -#include "../tbb/tls.h" -tbb::internal::tls LocalState; - -void MyObserver::on_scheduler_entry( bool is_worker ) { - State& state = *LocalState; - ASSERT( is_worker==!state.IsMaster, NULL ); - ++EntryCount; - state.MyFlags |= flags; -} - -void MyObserver::on_scheduler_exit( bool is_worker ) { - State& state = *LocalState; - ASSERT( is_worker==!state.IsMaster, NULL ); - ++ExitCount; - state.MyFlags &= ~flags; -} - -#include "tbb/task.h" - -class FibTask: public tbb::task { - const int n; - FlagType flags; -public: - FibTask( int n_, FlagType flags_ ) : n(n_), flags(flags_) {} - /*override*/ tbb::task* execute() { - ASSERT( !(~LocalState->MyFlags & flags), NULL ); - if( n>=2 ) { - set_ref_count(3); - spawn(*new( allocate_child() ) FibTask(n-1,flags)); - spawn_and_wait_for_all(*new( allocate_child() ) FibTask(n-2,flags)); - } - return NULL; - } -}; - -void DoFib( FlagType flags ) { - tbb::task* t = new( tbb::task::allocate_root() ) FibTask(10,flags); - tbb::task::spawn_root_and_wait(*t); -} - -#include "tbb/task_scheduler_init.h" -#include "harness.h" - -class DoTest { - int nthread; -public: - DoTest( int n ) : nthread(n) {} - void operator()( int i ) const { - LocalState->IsMaster = true; - if( i==0 ) { - tbb::task_scheduler_init init(nthread); - DoFib(0); - } else { - FlagType f = i<=MaxFlagIndex? 1<0, "on_scheduler_entry not exercised" ); - ASSERT( ExitCount>0, "on_scheduler_exit not exercised" ); - return Harness::Done; -} diff --git a/src/tbb/src/perf/coarse_grained_raii_lru_cache.h b/src/tbb/src/perf/coarse_grained_raii_lru_cache.h deleted file mode 100644 index 4208d9aa5..000000000 --- a/src/tbb/src/perf/coarse_grained_raii_lru_cache.h +++ /dev/null @@ -1,145 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef coarse_grained_raii_lru_cache_H -#define coarse_grained_raii_lru_cache_H - -#include -#include - -#include "tbb/spin_mutex.h" -#include "tbb/tbb_stddef.h" -template -class coarse_grained_raii_lru_cache : tbb::internal::no_assign{ - typedef value_functor_type value_function_type; - - typedef std::size_t ref_counter_type; - struct map_value_type; - typedef std::map map_storage_type; - typedef std::list lru_list_type; - struct map_value_type { - value_type my_value; - ref_counter_type my_ref_counter; - typename lru_list_type::iterator my_lru_list_iterator; - bool my_is_ready; - - map_value_type (value_type const& a_value, ref_counter_type a_ref_counter, typename lru_list_type::iterator a_lru_list_iterator, bool a_is_ready) - : my_value(a_value), my_ref_counter(a_ref_counter), my_lru_list_iterator (a_lru_list_iterator) - ,my_is_ready(a_is_ready) - {} - }; - - class handle_object; -public: - typedef handle_object handle; - - coarse_grained_raii_lru_cache(value_function_type f, std::size_t number_of_lru_history_items): my_value_function(f),my_number_of_lru_history_items(number_of_lru_history_items){} - handle_object operator[](key_type k){ - tbb::spin_mutex::scoped_lock lock(my_mutex); - bool is_new_value_needed = false; - typename map_storage_type::iterator it = my_map_storage.find(k); - if (it == my_map_storage.end()){ - it = my_map_storage.insert(it,std::make_pair(k,map_value_type(value_type(),0,my_lru_list.end(),false))); - is_new_value_needed = true; - }else { - typename lru_list_type::iterator list_it = it->second.my_lru_list_iterator; - if (list_it!=my_lru_list.end()) { - my_lru_list.erase(list_it); - it->second.my_lru_list_iterator= my_lru_list.end(); - } - } - typename map_storage_type::reference value_ref = *it; - //increase ref count - ++(value_ref.second.my_ref_counter); - if (is_new_value_needed){ - lock.release(); - value_ref.second.my_value = my_value_function(k); - __TBB_store_with_release(value_ref.second.my_is_ready, true); - - }else{ - if (!value_ref.second.my_is_ready){ - lock.release(); - tbb::internal::spin_wait_while_eq(value_ref.second.my_is_ready,false); - } - } - return handle_object(*this,(value_ref)); - } -private: - void signal_end_of_usage(typename map_storage_type::reference value_ref){ - tbb::spin_mutex::scoped_lock lock(my_mutex); - typename map_storage_type::iterator it = my_map_storage.find(value_ref.first); - __TBB_ASSERT(it!=my_map_storage.end(),"cache should not return past-end iterators to outer world"); - __TBB_ASSERT(&(*it) == &value_ref,"dangling reference has been returned to outside world? data race ?"); - __TBB_ASSERT( my_lru_list.end()== std::find(my_lru_list.begin(),my_lru_list.end(),it), - "object in use should not be in list of unused objects "); - if (! --(it->second.my_ref_counter)){ //decrease ref count, and check if it was the last reference - if (my_lru_list.size()>=my_number_of_lru_history_items){ - size_t number_of_elements_to_evict = 1 + my_lru_list.size() - my_number_of_lru_history_items; - for (size_t i=0; isecond.my_lru_list_iterator = my_lru_list.begin(); - } - } -private: - value_function_type my_value_function; - std::size_t const my_number_of_lru_history_items; - map_storage_type my_map_storage; - lru_list_type my_lru_list; - tbb::spin_mutex my_mutex; -private: - struct handle_move_t:tbb::internal::no_assign{ - coarse_grained_raii_lru_cache & my_cache_ref; - typename map_storage_type::reference my_value_ref; - handle_move_t(coarse_grained_raii_lru_cache & cache_ref, typename map_storage_type::reference value_ref):my_cache_ref(cache_ref),my_value_ref(value_ref) {}; - }; - class handle_object { - coarse_grained_raii_lru_cache * my_cache_pointer; - typename map_storage_type::reference my_value_ref; - public: - handle_object(coarse_grained_raii_lru_cache & cache_ref, typename map_storage_type::reference value_ref):my_cache_pointer(&cache_ref), my_value_ref(value_ref) {} - handle_object(handle_move_t m):my_cache_pointer(&m.my_cache_ref), my_value_ref(m.my_value_ref){} - operator handle_move_t(){ return move(*this);} - value_type& value(){return my_value_ref.second.my_value;} - ~handle_object(){ - if (my_cache_pointer){ - my_cache_pointer->signal_end_of_usage(my_value_ref); - } - } - private: - friend handle_move_t move(handle_object& h){ - return handle_object::move(h); - } - static handle_move_t move(handle_object& h){ - __TBB_ASSERT(h.my_cache_pointer,"move from the same object twice ?"); - coarse_grained_raii_lru_cache * cache_pointer = NULL; - std::swap(cache_pointer,h.my_cache_pointer); - return handle_move_t(*cache_pointer,h.my_value_ref); - } - private: - void operator=(handle_object&); - handle_object(handle_object &); - }; -}; -#endif //coarse_grained_raii_lru_cache_H diff --git a/src/tbb/src/perf/cpq_pdes.cpp b/src/tbb/src/perf/cpq_pdes.cpp deleted file mode 100644 index 251705059..000000000 --- a/src/tbb/src/perf/cpq_pdes.cpp +++ /dev/null @@ -1,231 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include -#include -#include -#include "tbb/tbb_stddef.h" -#include "tbb/spin_mutex.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/parallel_for.h" -#include "tbb/tick_count.h" -#include "tbb/blocked_range.h" -#include "../test/harness.h" -#include "tbb/concurrent_priority_queue.h" - -#pragma warning(disable: 4996) - -#define IMPL_STL 0 -#define IMPL_CPQ 1 - -using namespace tbb; - -//const int contention = 75; // degree contention. 100 = 0 us busy_wait, 50 = 50*contention_unit us -const double contention_unit = 0.025; // in microseconds (us) -const double throughput_window = 30; // in seconds -const int num_initial_events = 10000; // number of initial events in the queue -const int min_elapse = 20; // min contention_units to elapse between event spawns -const int max_elapse = 40; // max contention_units to elapse between event spawns -const int min_spawn = 0; // min number of events to spawn -const int max_spawn = 2; // max number of events to spawn - -tbb::atomic operation_count; -tbb::tick_count start; -bool done; - -class event { -public: - int timestamp; - int elapse; - int spawn; -}; - -class timestamp_compare { -public: - bool operator()(event e1, event e2) { - return e2.timestamp, timestamp_compare > *stl_cpq; -concurrent_priority_queue *lfc_pq; - -unsigned int one_us_iters = 429; // default value - -// if user wants to calibrate to microseconds on particular machine, call this at beginning of program -// sets one_us_iters to number of iters to busy_wait for approx. 1 us -void calibrate_busy_wait() { - tbb::tick_count t0, t1; - - t0 = tbb::tick_count::now(); - for (volatile unsigned int i=0; i<1000000; ++i) continue; - t1 = tbb::tick_count::now(); - - one_us_iters = (1000000.0/(t1-t0).seconds())*0.000001; - printf("one_us_iters: %d\n", one_us_iters); -} - -void busy_wait(double us) -{ - unsigned int iter = us*one_us_iters; - for (volatile unsigned int i=0; ipush(elem); - } - else { - tbb::spin_mutex::scoped_lock myLock(*my_mutex); - stl_cpq->push(elem); - } - } - else { - lfc_pq->push(elem); - } -} - -bool do_pop(event& elem, int nThr, int impl) { - if (impl == IMPL_STL) { - if (nThr == 1) { - if (!stl_cpq->empty()) { - elem = stl_cpq->top(); - stl_cpq->pop(); - return true; - } - } - else { - tbb::spin_mutex::scoped_lock myLock(*my_mutex); - if (!stl_cpq->empty()) { - elem = stl_cpq->top(); - stl_cpq->pop(); - return true; - } - } - } - else { - if (lfc_pq->try_pop(elem)) { - return true; - } - } - return false; -} - -struct TestPDESloadBody : NoAssign { - int nThread; - int implementation; - - TestPDESloadBody(int nThread_, int implementation_) : - nThread(nThread_), implementation(implementation_) {} - - void operator()(const int threadID) const { - if (threadID == nThread) { - sleep(throughput_window); - done = true; - } - else { - event e, tmp; - unsigned int num_operations = 0; - for (;;) { - // pop an event - if (do_pop(e, nThread, implementation)) { - num_operations++; - // do the event - busy_wait(e.elapse*contention_unit); - while (e.spawn > 0) { - tmp.spawn = ((e.spawn+1-min_spawn) % ((max_spawn-min_spawn)+1))+min_spawn; - tmp.timestamp = e.timestamp + e.elapse; - e.timestamp = tmp.timestamp; - e.elapse = ((e.elapse+1-min_elapse) % ((max_elapse-min_elapse)+1))+min_elapse; - tmp.elapse = e.elapse; - do_push(tmp, nThread, implementation); - num_operations++; - e.spawn--; - busy_wait(e.elapse*contention_unit); - if (done) break; - } - } - if (done) break; - } - operation_count += num_operations; - } - } -}; - -void preload_queue(int nThr, int impl) { - event an_event; - for (int i=0; i, timestamp_compare >; - preload_queue(nThreads, IMPL_STL); - TestPDESloadBody my_stl_test(nThreads, IMPL_STL); - start = tbb::tick_count::now(); - NativeParallelFor(nThreads+1, my_stl_test); - delete stl_cpq; - - REPORT(" %10d", operation_count/throughput_window); - - operation_count = 0; - done = false; - lfc_pq = new concurrent_priority_queue; - preload_queue(nThreads, IMPL_CPQ); - TestPDESloadBody my_cpq_test(nThreads, IMPL_CPQ); - start = tbb::tick_count::now(); - NativeParallelFor(nThreads+1, my_cpq_test); - delete lfc_pq; - - REPORT(" %10d\n", operation_count/throughput_window); -} - -int TestMain() { - srand(42); - if (MinThread < 1) - MinThread = 1; - //calibrate_busy_wait(); - cache_aligned_allocator my_mutex_allocator; - my_mutex = (spin_mutex *)my_mutex_allocator.allocate(1); - - REPORT("#Thr "); - REPORT("STL "); -#ifdef LINEARIZABLE - REPORT("CPQ_L\n"); -#else - REPORT("CPQ_N\n"); -#endif - for (int p = MinThread; p <= MaxThread; ++p) { - TestPDESload(p); - } - - return Harness::Done; -} diff --git a/src/tbb/src/perf/cpq_throughput_test.cpp b/src/tbb/src/perf/cpq_throughput_test.cpp deleted file mode 100644 index 2799ff67c..000000000 --- a/src/tbb/src/perf/cpq_throughput_test.cpp +++ /dev/null @@ -1,295 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#define HARNESS_CUSTOM_MAIN 1 -#define HARNESS_NO_PARSE_COMMAND_LINE 1 - -#include -#include -#include -#include "tbb/tbb_stddef.h" -#include "tbb/spin_mutex.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/tick_count.h" -#include "tbb/cache_aligned_allocator.h" -#include "tbb/concurrent_priority_queue.h" -#include "../test/harness.h" -#include "../examples/common/utility/utility.h" -#if _MSC_VER -#pragma warning(disable: 4996) -#endif - -#define IMPL_SERIAL 0 -#define IMPL_STL 1 -#define IMPL_CPQ 2 - -using namespace tbb; - -// test parameters & defaults -int impl = IMPL_CPQ; // which implementation to test -int contention = 1; // busywork between operations in us -int preload = 0; // # elements to pre-load queue with -double throughput_window = 30.0; // in seconds -int ops_per_iteration = 20; // minimum: 2 (1 push, 1 pop) -const int sample_operations = 1000; // for timing checks - -// global data & types -int pushes_per_iter; -int pops_per_iter; -tbb::atomic operation_count; -tbb::tick_count start; - -// a non-trivial data element to use in the priority queue -const int padding_size = 15; // change to get cache line size for test machine -class padding_type { -public: - int p[padding_size]; - padding_type& operator=(const padding_type& other) { - if (this != &other) { - for (int i=0; i, my_less > *serial_cpq; - -// Coarse-locked priority queue -spin_mutex *my_mutex; -std::priority_queue, my_less > *stl_cpq; - -// TBB concurrent_priority_queue -concurrent_priority_queue *agg_cpq; - -// Busy work and calibration helpers -unsigned int one_us_iters = 345; // default value - -// if user wants to calibrate to microseconds on particular machine, call -// this at beginning of program; sets one_us_iters to number of iters to -// busy_wait for approx. 1 us -void calibrate_busy_wait() { - tbb::tick_count t0, t1; - - t0 = tbb::tick_count::now(); - for (volatile unsigned int i=0; i<1000000; ++i) continue; - t1 = tbb::tick_count::now(); - - one_us_iters = (unsigned int)((1000000.0/(t1-t0).seconds())*0.000001); - printf("one_us_iters: %d\n", one_us_iters); -} - -void busy_wait(int us) -{ - unsigned int iter = us*one_us_iters; - for (volatile unsigned int i=0; ipush(elem); - } - else if (impl == IMPL_STL) { - tbb::spin_mutex::scoped_lock myLock(*my_mutex); - stl_cpq->push(elem); - } - else if (impl == IMPL_CPQ) { - agg_cpq->push(elem); - } -} - -// Pop from priority queue, depending on implementation -my_data_type do_pop(int nThr, int impl) { - my_data_type elem; - if (impl == IMPL_SERIAL) { - if (!serial_cpq->empty()) { - elem = serial_cpq->top(); - serial_cpq->pop(); - return elem; - } - } - else if (impl == IMPL_STL) { - tbb::spin_mutex::scoped_lock myLock(*my_mutex); - if (!stl_cpq->empty()) { - elem = stl_cpq->top(); - stl_cpq->pop(); - return elem; - } - } - else if (impl == IMPL_CPQ) { - if (agg_cpq->try_pop(elem)) { - return elem; - } - } - return elem; -} - - -struct TestThroughputBody : NoAssign { - int nThread; - int implementation; - - TestThroughputBody(int nThread_, int implementation_) : - nThread(nThread_), implementation(implementation_) {} - - void operator()(const int threadID) const { - tbb::tick_count now; - size_t pos_in = threadID, pos_out = threadID; - my_data_type elem; - while (1) { - for (int i=0; i= arrsz) pos_in = pos_in % arrsz; - } - // do pops - for (int j=0; j= arrsz) pos_out = pos_out % arrsz; - } - } - now = tbb::tick_count::now(); - operation_count += sample_operations; - if ((now-start).seconds() >= throughput_window) break; - } - } -}; - -void TestSerialThroughput() { - tbb::tick_count now; - - serial_cpq = new std::priority_queue, my_less >; - for (int i=0; i, my_less >; - for (int i=0; i; - for (int i=0; i my_mutex_allocator; - my_mutex = (spin_mutex *)my_mutex_allocator.allocate(1); - - if (impl == IMPL_SERIAL) { - TestSerialThroughput(); - } - else { - for( int p=threads.first; p<=threads.last; p = threads.step(p) ) { - TestThroughputCpqOnNThreads(p); - } - } - return Harness::Done; -} diff --git a/src/tbb/src/perf/fibonacci_impl_tbb.cpp b/src/tbb/src/perf/fibonacci_impl_tbb.cpp deleted file mode 100644 index 835fe95e7..000000000 --- a/src/tbb/src/perf/fibonacci_impl_tbb.cpp +++ /dev/null @@ -1,78 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include -#include - -#include "tbb/task_scheduler_init.h" -#include "tbb/task.h" -#include "tbb/tick_count.h" - -extern long CutOff; - -long SerialFib( const long n ) { - if( n<2 ) - return n; - else - return SerialFib(n-1)+SerialFib(n-2); -} - -struct FibContinuation: public tbb::task { - long* const sum; - long x, y; - FibContinuation( long* sum_ ) : sum(sum_) {} - tbb::task* execute() { - *sum = x+y; - return NULL; - } -}; - -struct FibTask: public tbb::task { - long n; - long * sum; - FibTask( const long n_, long * const sum_ ) : - n(n_), sum(sum_) - {} - tbb::task* execute() { - if( n -#include -#include -#include -#include - -#include "tbb/tick_count.h" - -#define HARNESS_CUSTOM_MAIN 1 -#include "../src/test/harness.h" -#include "../src/test/harness_barrier.h" - -#include "tbb/task_scheduler_init.h" -#include "tbb/task.h" -#include "tbb/atomic.h" - -#if __linux__ || __APPLE__ || __FreeBSD__ || __NetBSD__ - #include -#endif - -__TBB_PERF_API int NumCpus = tbb::task_scheduler_init::default_num_threads(), - NumThreads, - MaxConcurrency; - -namespace Perf { - -SessionSettings theSettings; - -namespace internal { - - typedef std::vector durations_t; - - static uintptr_t NumRuns = 7; - static duration_t RunDuration = 0.01; - - static const int RateFieldLen = 10; - static const int OvhdFieldLen = 12; - - const char* TestNameColumnTitle = "Test name"; - const char* WorkloadNameColumnTitle = "Workload"; - - size_t TitleFieldLen = 0; - size_t WorkloadFieldLen = 0; - - int TotalConfigs = 0; - int MaxTbbMasters = 1; - - //! Defines the mapping between threads and cores in the undersubscription mode - /** When adding new enumerator, insert it before amLast, and do not specify - its value explicitly. **/ - enum AffinitizationMode { - amFirst = 0, - amDense = amFirst, - amSparse, - //! Used to track the number of supported affinitization modes - amLast - }; - - static const int NumAffinitizationModes = amLast - amFirst; - - const char* AffinitizationModeNames[] = { "dense", "sparse" }; - - int NumActiveAffModes = 1; - - //! Settings of a test run configuration - struct RunConfig { - int my_maxConcurrency; - int my_numThreads; // For task scheduler tests this is number of workers + 1 - int my_numMasters; // Used for task scheduler tests only - int my_affinityMode; // Used for task scheduler tests only - int my_workloadID; - - int NumMasters () const { - return theSettings.my_opts & UseTaskScheduler ? my_numMasters : my_numThreads; - } - }; - - double StandardDeviation ( double avg, const durations_t& d ) { - double std_dev = 0; - for ( uintptr_t i = 0; i < d.size(); ++i ) { - double dev = fabs(d[i] - avg); - std_dev += dev * dev; - } - std_dev = sqrt(std_dev / d.size()); - return std_dev / avg * 100; - } - - void Statistics ( const durations_t& d, - duration_t& avgTime, double& stdDev, - duration_t& minTime, duration_t& maxTime ) - { - minTime = maxTime = avgTime = d[0]; - for ( size_t i = 1; i < d.size(); ++i ) { - avgTime += d[i]; - if ( minTime > d[i] ) - minTime = d[i]; - else if ( maxTime < d[i] ) - maxTime = d[i]; - } - avgTime = avgTime / d.size(); - stdDev = StandardDeviation( avgTime, d ); - } - - //! Timing data for the series of repeated runs and results of their statistical processing - struct TimingSeries { - //! Statistical timing series - durations_t my_durations; - - //! Average time obtained from my_durations data - duration_t my_avgTime; - - //! Minimal time obtained from my_durations data - duration_t my_minTime; - - //! Minimal time obtained from my_durations data - duration_t my_maxTime; - - //! Standard deviation of my_avgTime value (per cent) - double my_stdDev; - - TimingSeries ( uintptr_t nruns = NumRuns ) - : my_durations(nruns), my_avgTime(0), my_minTime(0), my_maxTime(0) - {} - - void CalculateStatistics () { - Statistics( my_durations, my_avgTime, my_stdDev, my_minTime, my_maxTime ); - } - }; // struct TimingSeries - - //! Settings and timing results for a test run configuration - struct RunResults { - //! Run configuration settings - RunConfig my_config; - - //! Timing results for this run configuration - TimingSeries my_timing; - }; - - typedef std::vector names_t; - typedef std::vector timings_t; - typedef std::vector test_results_t; - - enum TestMethods { - idRunSerial = 0x01, - idOnStart = 0x02, - idOnFinish = 0x04, - idPrePostProcess = idOnStart | idOnFinish - }; - - //! Set of flags identifying methods not overridden by the currently active test - /** Used as a scratch var. **/ - uintptr_t g_absentMethods; - - //! Test object and timing results for all of its configurations - struct TestResults { - //! Pointer to the test object interface - Test* my_test; - - //! Set of flags identifying optional methods overridden by my_test - /** A set of ORed TestMethods flags **/ - uintptr_t my_availableMethods; - - //! Vector of serial times for each workload supported by this test - /** Element index in the vector serves as a zero based workload ID. **/ - timings_t my_serialBaselines; - - //! Common baselines for both parallel and serial variants - /** Element index in the vector serves as a zero based workload ID. **/ - timings_t my_baselines; - - //! Strings identifying workloads to be used in output - names_t my_workloadNames; - - //! Vector of timings for all run configurations of my_test - test_results_t my_results; - - const char* my_testName; - - mutable bool my_hasOwnership; - - TestResults ( Test* t, const char* className, bool takeOwnership ) - : my_test(t), my_availableMethods(0), my_testName(className), my_hasOwnership(takeOwnership) - {} - - TestResults ( const TestResults& tr ) - : my_test(tr.my_test) - , my_availableMethods(0) - , my_testName(tr.my_testName) - , my_hasOwnership(tr.my_hasOwnership) - { - tr.my_hasOwnership = false; - } - - ~TestResults () { - for ( size_t i = 0; i < my_workloadNames.size(); ++i ) - delete my_workloadNames[i]; - if ( my_hasOwnership ) - delete my_test; - } - }; // struct TestResults - - typedef std::vector session_t; - - session_t theSession; - - TimingSeries CalibrationTiming; - - const uintptr_t CacheSize = 8*1024*1024; - volatile intptr_t W[CacheSize]; - - struct WiperBody { - void operator()( int ) const { - volatile intptr_t sink = 0; - for ( uintptr_t i = 0; i < CacheSize; ++i ) - sink += W[i]; - } - }; - - void TraceHistogram ( const durations_t& t, const char* histogramFileName ) { - FILE* f = histogramFileName ? fopen(histogramFileName, "wt") : stdout; - uintptr_t n = t.size(); - const uintptr_t num_buckets = 100; - double min_val = *std::min_element(t.begin(), t.end()), - max_val = *std::max_element(t.begin(), t.end()), - bucket_size = (max_val - min_val) / num_buckets; - std::vector hist(num_buckets + 1, 0); - for ( uintptr_t i = 0; i < n; ++i ) - ++hist[uintptr_t((t[i]-min_val)/bucket_size)]; - ASSERT (hist[num_buckets] == 1, ""); - ++hist[num_buckets - 1]; - hist.resize(num_buckets); - fprintf (f, "Histogram: nvals = %u, min = %g, max = %g, nbuckets = %u\n", (unsigned)n, min_val, max_val, (unsigned)num_buckets); - double bucket = min_val; - for ( uintptr_t i = 0; i < num_buckets; ++i, bucket+=bucket_size ) - fprintf (f, "%12g\t%u\n", bucket, (unsigned)hist[i]); - fclose(f); - } - -#if _MSC_VER - typedef DWORD_PTR cpu_set_t; - - class AffinityHelper { - static const unsigned MaxAffinitySetSize = sizeof(cpu_set_t) * 8; - static unsigned AffinitySetSize; - - //! Mapping from a CPU index to a valid affinity cpu_mask - /** The first element is not used. **/ - static cpu_set_t m_affinities[MaxAffinitySetSize + 1]; - - static cpu_set_t m_processMask; - - class Initializer { - public: - Initializer () { - SYSTEM_INFO si; - GetNativeSystemInfo(&si); - ASSERT( si.dwNumberOfProcessors <= MaxAffinitySetSize, "Too many CPUs" ); - AffinitySetSize = min (si.dwNumberOfProcessors, MaxAffinitySetSize); - cpu_set_t systemMask = 0; - GetProcessAffinityMask( GetCurrentProcess(), &m_processMask, &systemMask ); - cpu_set_t cpu_mask = 1; - for ( DWORD i = 0; i < AffinitySetSize; ++i ) { - while ( !(cpu_mask & m_processMask) && cpu_mask ) - cpu_mask <<= 1; - ASSERT( cpu_mask != 0, "Process affinity set is culled?" ); - m_affinities[i] = cpu_mask; - cpu_mask <<= 1; - } - } - }; // class AffinityHelper::Initializer - - static Initializer m_initializer; - - public: - static cpu_set_t CpuAffinity ( int cpuIndex ) { - return m_affinities[cpuIndex % AffinitySetSize]; - } - - static const cpu_set_t& ProcessMask () { return m_processMask; } - }; // class AffinityHelper - - unsigned AffinityHelper::AffinitySetSize = 0; - cpu_set_t AffinityHelper::m_affinities[AffinityHelper::MaxAffinitySetSize + 1] = {0}; - cpu_set_t AffinityHelper::m_processMask = 0; - AffinityHelper::Initializer AffinityHelper::m_initializer; - - #define CPU_ZERO(cpu_mask) (*cpu_mask = 0) - #define CPU_SET(cpu_idx, cpu_mask) (*cpu_mask |= AffinityHelper::CpuAffinity(cpu_idx)) - #define CPU_CLR(cpu_idx, cpu_mask) (*cpu_mask &= ~AffinityHelper::CpuAffinity(cpu_idx)) - #define CPU_ISSET(cpu_idx, cpu_mask) ((*cpu_mask & AffinityHelper::CpuAffinity(cpu_idx)) != 0) - -#elif __linux__ /* end of _MSC_VER */ - - #include - #include - #include - - pid_t gettid() { return (pid_t)syscall(__NR_gettid); } - - #define GET_MASK(cpu_set) (*(unsigned*)(void*)&cpu_set) - #define RES_STAT(res) (res != 0 ? "failed" : "ok") - - class AffinityHelper { - static cpu_set_t m_processMask; - - class Initializer { - public: - Initializer () { - CPU_ZERO (&m_processMask); - int res = sched_getaffinity( getpid(), sizeof(cpu_set_t), &m_processMask ); - ASSERT ( res == 0, "sched_getaffinity failed" ); - } - }; // class AffinityHelper::Initializer - - static Initializer m_initializer; - - public: - static const cpu_set_t& ProcessMask () { return m_processMask; } - }; // class AffinityHelper - - cpu_set_t AffinityHelper::m_processMask; - AffinityHelper::Initializer AffinityHelper::m_initializer; -#endif /* __linux__ */ - - bool PinTheThread ( int cpu_idx, tbb::atomic& nThreads ) { - #if _MSC_VER || __linux__ - cpu_set_t orig_mask, target_mask; - CPU_ZERO( &target_mask ); - CPU_SET( cpu_idx, &target_mask ); - ASSERT ( CPU_ISSET(cpu_idx, &target_mask), "CPU_SET failed" ); - #endif - #if _MSC_VER - orig_mask = SetThreadAffinityMask( GetCurrentThread(), target_mask ); - if ( !orig_mask ) - return false; - #elif __linux__ - CPU_ZERO( &orig_mask ); - int res = sched_getaffinity( gettid(), sizeof(cpu_set_t), &orig_mask ); - ASSERT ( res == 0, "sched_getaffinity failed" ); - res = sched_setaffinity( gettid(), sizeof(cpu_set_t), &target_mask ); - ASSERT ( res == 0, "sched_setaffinity failed" ); - #endif /* _MSC_VER */ - --nThreads; - while ( nThreads ) - __TBB_Yield(); - #if _MSC_VER - SetThreadPriority (GetCurrentThread(), THREAD_PRIORITY_HIGHEST); - #endif - return true; - } - - class AffinitySetterTask : tbb::task { - static bool m_result; - static tbb::atomic m_nThreads; - int m_idx; - - tbb::task* execute () { - //TestAffinityOps(); - m_result = PinTheThread( m_idx, m_nThreads ); - return NULL; - } - - public: - AffinitySetterTask ( int idx ) : m_idx(idx) {} - - friend bool AffinitizeTBB ( int, int /*mode*/ ); - }; - - bool AffinitySetterTask::m_result = true; - tbb::atomic AffinitySetterTask::m_nThreads; - - bool AffinitizeTBB ( int p, int affMode ) { - #if _MSC_VER - SetThreadPriority (GetCurrentThread(), THREAD_PRIORITY_HIGHEST); - SetPriorityClass (GetCurrentProcess(), HIGH_PRIORITY_CLASS); - #endif - AffinitySetterTask::m_result = true; - AffinitySetterTask::m_nThreads = p; - tbb::task_list tl; - for ( int i = 0; i < p; ++i ) { - tbb::task &t = *new( tbb::task::allocate_root() ) AffinitySetterTask( affMode == amSparse ? i * NumCpus / p : i ); - t.set_affinity( tbb::task::affinity_id(i + 1) ); - tl.push_back( t ); - } - tbb::task::spawn_root_and_wait(tl); - return AffinitySetterTask::m_result; - } - - inline - void Affinitize ( int p, int affMode ) { - if ( !AffinitizeTBB (p, affMode) ) - REPORT("Warning: Failed to set affinity for %d TBB threads\n", p); - } - - class TbbWorkersTrapper { - tbb::atomic my_refcount; - tbb::task *my_root; - tbb::task_group_context my_context; - Harness::SpinBarrier my_barrier; - - friend class TrapperTask; - - class TrapperTask : public tbb::task { - TbbWorkersTrapper& my_owner; - - tbb::task* execute () { - my_owner.my_barrier.wait(); - my_owner.my_root->wait_for_all(); - my_owner.my_barrier.wait(); - return NULL; - } - public: - TrapperTask ( TbbWorkersTrapper& owner ) : my_owner(owner) {} - }; - - public: - TbbWorkersTrapper () - : my_context(tbb::task_group_context::bound, - tbb::task_group_context::default_traits | tbb::task_group_context::concurrent_wait) - { - my_root = new ( tbb::task::allocate_root(my_context) ) tbb::empty_task; - my_root->set_ref_count(2); - my_barrier.initialize(NumThreads); - for ( int i = 1; i < NumThreads; ++i ) - tbb::task::spawn( *new(tbb::task::allocate_root()) TrapperTask(*this) ); - my_barrier.wait(); // Wait util all workers are ready - } - - ~TbbWorkersTrapper () { - my_root->decrement_ref_count(); - my_barrier.wait(); // Make sure no tasks are referencing us - tbb::task::destroy(*my_root); - } - }; // TbbWorkersTrapper - - -#if __TBB_STATISTICS - static bool StatisticsMode = true; -#else - static bool StatisticsMode = false; -#endif - -//! Suppresses silly warning -inline bool __TBB_bool( bool b ) { return b; } - -#define START_WORKERS(needScheduler, p, a, setWorkersAffinity, trapWorkers) \ - tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred); \ - TbbWorkersTrapper *trapper = NULL; \ - if ( theSettings.my_opts & UseTaskScheduler \ - && (needScheduler) && ((setWorkersAffinity) || (trapWorkers)) ) \ - { \ - init.initialize( p ); \ - if ( __TBB_bool(setWorkersAffinity) ) \ - Affinitize( p, a ); \ - if ( __TBB_bool(trapWorkers) ) \ - trapper = new TbbWorkersTrapper; \ - } - -#define STOP_WORKERS() \ - if ( theSettings.my_opts & UseTaskScheduler && init.is_active() ) { \ - if ( trapper ) \ - delete trapper; \ - init.terminate(); \ - /* Give asynchronous deinitialization time to complete */ \ - Harness::Sleep(50); \ - } - - typedef void (Test::*RunMemFnPtr)( Test::ThreadInfo& ); - - TimingSeries *TlsTimings; - Harness::SpinBarrier multipleMastersBarrier; - - class TimingFunctor { - Test* my_test; - RunConfig *my_cfg; - RunMemFnPtr my_fnRun; - size_t my_numRuns; - size_t my_numRepeats; - uintptr_t my_availableMethods; - - duration_t TimeSingleRun ( Test::ThreadInfo& ti ) const { - if ( my_availableMethods & idOnStart ) - my_test->OnStart(ti); - // Warming run - (my_test->*my_fnRun)(ti); - multipleMastersBarrier.wait(); - tbb::tick_count t0 = tbb::tick_count::now(); - (my_test->*my_fnRun)(ti); - duration_t t = (tbb::tick_count::now() - t0).seconds(); - if ( my_availableMethods & idOnFinish ) - my_test->OnFinish(ti); - return t; - } - - public: - TimingFunctor ( Test* test, RunConfig *cfg, RunMemFnPtr fnRun, - size_t numRuns, size_t nRepeats, uintptr_t availableMethods ) - : my_test(test), my_cfg(cfg), my_fnRun(fnRun) - , my_numRuns(numRuns), my_numRepeats(nRepeats), my_availableMethods(availableMethods) - {} - - void operator()( int tid ) const { - Test::ThreadInfo ti = { tid, NULL }; - durations_t &d = TlsTimings[tid].my_durations; - bool singleMaster = my_cfg->my_numMasters == 1; - START_WORKERS( (!singleMaster || (singleMaster && StatisticsMode)) && my_fnRun != &Test::RunSerial, - my_cfg->my_numThreads, my_cfg->my_affinityMode, singleMaster, singleMaster ); - for ( uintptr_t k = 0; k < my_numRuns; ++k ) { - if ( my_numRepeats > 1 ) { - d[k] = 0; - if ( my_availableMethods & idPrePostProcess ) { - for ( uintptr_t i = 0; i < my_numRepeats; ++i ) - d[k] += TimeSingleRun(ti); - } - else { - multipleMastersBarrier.wait(); - tbb::tick_count t0 = tbb::tick_count::now(); - for ( uintptr_t i = 0; i < my_numRepeats; ++i ) - (my_test->*my_fnRun)(ti); - d[k] = (tbb::tick_count::now() - t0).seconds(); - } - d[k] /= my_numRepeats; - } - else - d[k] = TimeSingleRun(ti); - } - STOP_WORKERS(); - TlsTimings[tid].CalculateStatistics(); - } - }; // class TimingFunctor - - void DoTiming ( TestResults& tr, RunConfig &cfg, RunMemFnPtr fnRun, size_t nRepeats, TimingSeries& ts ) { - int numThreads = cfg.NumMasters(); - size_t numRuns = ts.my_durations.size() / numThreads; - TimingFunctor body( tr.my_test, &cfg, fnRun, numRuns, nRepeats, tr.my_availableMethods ); - multipleMastersBarrier.initialize(numThreads); - tr.my_test->SetWorkload(cfg.my_workloadID); - if ( numThreads == 1 ) { - TimingSeries *t = TlsTimings; - TlsTimings = &ts; - body(0); - TlsTimings = t; - } - else { - ts.my_durations.resize(numThreads * numRuns); - NativeParallelFor( numThreads, body ); - for ( int i = 0, j = 0; i < numThreads; ++i ) { - durations_t &d = TlsTimings[i].my_durations; - for ( size_t k = 0; k < numRuns; ++k, ++j ) - ts.my_durations[j] = d[k]; - } - ts.CalculateStatistics(); - } - } - - //! Runs the test function, does statistical processing, and, if title is nonzero, prints results. - /** If histogramFileName is a string, the histogram of individual runs is generated and stored - in a file with the given name. If it is NULL then the histogram is printed on the console. - By default no histogram is generated. - The histogram format is: "rate bucket start" "number of tests in this bucket". **/ - void RunTestImpl ( TestResults& tr, RunConfig &cfg, RunMemFnPtr pfnTest, TimingSeries& ts ) { - // nRepeats is a number of repeated calls to the test function made as - // part of the same run. It is determined experimentally by the following - // calibration process so that the total run time was approx. RunDuration. - // This is helpful to increase the measurement precision in case of very - // short tests. - size_t nRepeats = 1; - // A minimal stats is enough when doing calibration - CalibrationTiming.my_durations.resize( (NumRuns < 4 ? NumRuns : 3) * cfg.NumMasters() ); - // There's no need to be too precise when calculating nRepeats. And reasonably - // far extrapolation can speed up the process significantly. - for (;;) { - DoTiming( tr, cfg, pfnTest, nRepeats, CalibrationTiming ); - if ( CalibrationTiming.my_avgTime * nRepeats > 1e-4 ) - break; - nRepeats *= 2; - } - nRepeats *= (uintptr_t)ceil( RunDuration / (CalibrationTiming.my_avgTime * nRepeats) ); - - DoTiming(tr, cfg, pfnTest, nRepeats, ts); - - // No histogram for baseline measurements - if ( pfnTest != &Test::RunSerial && pfnTest != &Test::Baseline ) { - const char* histogramName = theSettings.my_histogramName; - if ( histogramName != NoHistogram && tr.my_test->HistogramName() != DefaultHistogram ) - histogramName = tr.my_test->HistogramName(); - if ( histogramName != NoHistogram ) - TraceHistogram( ts.my_durations, histogramName ); - } - } // RunTestImpl - - typedef void (*TestActionFn) ( TestResults&, int mastersRange, int w, int p, int m, int a, int& numTests ); - - int TestResultIndex ( int mastersRange, int w, int p, int m, int a ) { - return ((w * (MaxThread - MinThread + 1) + (p - MinThread)) * mastersRange + m) * NumActiveAffModes + a; - } - - void RunTest ( TestResults& tr, int mastersRange, int w, int p, int m, int a, int& numTests ) { - size_t r = TestResultIndex(mastersRange, w, p, m, a); - ASSERT( r < tr.my_results.size(), NULL ); - RunConfig &rc = tr.my_results[r].my_config; - rc.my_maxConcurrency = MaxConcurrency; - rc.my_numThreads = p; - rc.my_numMasters = m + tr.my_test->MinNumMasters(); - rc.my_affinityMode = a; - rc.my_workloadID = w; - RunTestImpl( tr, rc, &Test::Run, tr.my_results[r].my_timing ); - printf( "Running tests: %04.1f%%\r", ++numTests * 100. / TotalConfigs ); fflush(stdout); - } - - void WalkTests ( TestActionFn fn, int& numTests, bool setAffinity, bool trapWorkers, bool multipleMasters ) { - for ( int p = MinThread; p <= MaxThread; ++p ) { - NumThreads = p; - MaxConcurrency = p < NumCpus ? p : NumCpus; - for ( int a = 0; a < NumActiveAffModes; ++a ) { - START_WORKERS( multipleMasters || !StatisticsMode, p, a, setAffinity, trapWorkers ); - for ( size_t i = 0; i < theSession.size(); ++i ) { - TestResults &tr = theSession[i]; - Test *t = tr.my_test; - int mastersRange = t->MaxNumMasters() - t->MinNumMasters() + 1; - int numWorkloads = theSettings.my_opts & UseSmallestWorkloadOnly ? 1 : t->NumWorkloads(); - for ( int w = 0; w < numWorkloads; ++w ) { - if ( multipleMasters ) - for ( int m = 1; m < mastersRange; ++m ) - fn( tr, mastersRange, w, p, m, a, numTests ); - else - fn( tr, mastersRange, w, p, 0, a, numTests ); - } - } - STOP_WORKERS(); - } - } - } - - void RunTests () { - int numTests = 0; - WalkTests( &RunTest, numTests, !StatisticsMode, !StatisticsMode, false ); - if ( MaxTbbMasters > 1 ) - WalkTests( &RunTest, numTests, true, false, true ); - } - - void InitTestData ( TestResults& tr, int mastersRange, int w, int p, int m, int a, int& ) { - size_t r = TestResultIndex(mastersRange, w, p, m, a); - ASSERT( r < tr.my_results.size(), NULL ); - tr.my_results[r].my_timing.my_durations.resize( - (theSettings.my_opts & UseTaskScheduler ? tr.my_test->MinNumMasters() + m : p) * NumRuns ); - } - - char WorkloadName[MaxWorkloadNameLen + 1]; - - void PrepareTests () { - printf( "Initializing...\r" ); - NumActiveAffModes = theSettings.my_opts & UseAffinityModes ? NumAffinitizationModes : 1; - TotalConfigs = 0; - TitleFieldLen = strlen( TestNameColumnTitle ); - WorkloadFieldLen = strlen( WorkloadNameColumnTitle ); - int numThreads = MaxThread - MinThread + 1; - int numConfigsBase = numThreads * NumActiveAffModes; - int totalWorkloads = 0; - for ( size_t i = 0; i < theSession.size(); ++i ) { - TestResults &tr = theSession[i]; - Test &t = *tr.my_test; - int numWorkloads = theSettings.my_opts & UseSmallestWorkloadOnly ? 1 : t.NumWorkloads(); - int numConfigs = numConfigsBase * numWorkloads; - if ( t.MaxNumMasters() > 1 ) { - ASSERT( theSettings.my_opts & UseTaskScheduler, "Multiple masters mode is only valid for task scheduler tests" ); - if ( MaxTbbMasters < t.MaxNumMasters() ) - MaxTbbMasters = t.MaxNumMasters(); - numConfigs *= t.MaxNumMasters() - t.MinNumMasters() + 1; - } - totalWorkloads += numWorkloads; - TotalConfigs += numConfigs; - - const char* testName = t.Name(); - if ( testName ) - tr.my_testName = testName; - ASSERT( tr.my_testName, "Neither Test::Name() is implemented, nor RTTI is enabled" ); - TitleFieldLen = max( TitleFieldLen, strlen(tr.my_testName) ); - - tr.my_results.resize( numConfigs ); - tr.my_serialBaselines.resize( numWorkloads ); - tr.my_baselines.resize( numWorkloads ); - tr.my_workloadNames.resize( numWorkloads ); - } - TimingSeries tmpTiming; - TlsTimings = &tmpTiming; // All measurements are serial here - int n = 0; - for ( size_t i = 0; i < theSession.size(); ++i ) { - TestResults &tr = theSession[i]; - Test &t = *tr.my_test; - // Detect which methods are overridden by the test implementation - g_absentMethods = 0; - Test::ThreadInfo ti = { 0 }; - t.SetWorkload(0); - t.OnStart(ti); - t.RunSerial(ti); - t.OnFinish(ti); - if ( theSettings.my_opts & UseSerialBaseline && !(g_absentMethods & idRunSerial) ) - tr.my_availableMethods |= idRunSerial; - if ( !(g_absentMethods & idOnStart) ) - tr.my_availableMethods |= idOnStart; - - RunConfig rc = { 1, 1, 1, 0, 0 }; - int numWorkloads = theSettings.my_opts & UseSmallestWorkloadOnly ? 1 : t.NumWorkloads(); - for ( int w = 0; w < numWorkloads; ++w ) { - WorkloadName[0] = 0; - t.SetWorkload(w); - if ( !WorkloadName[0] ) - sprintf( WorkloadName, "%d", w ); - size_t len = strlen(WorkloadName); - tr.my_workloadNames[w] = new char[len + 1]; - strcpy ( (char*)tr.my_workloadNames[w], WorkloadName ); - WorkloadFieldLen = max( WorkloadFieldLen, len ); - - rc.my_workloadID = w; - if ( theSettings.my_opts & UseBaseline ) - RunTestImpl( tr, rc, &Test::Baseline, tr.my_baselines[w] ); - if ( tr.my_availableMethods & idRunSerial ) - RunTestImpl( tr, rc, &Test::RunSerial, tr.my_serialBaselines[w] ); - printf( "Measuring baselines: %04.1f%%\r", ++n * 100. / totalWorkloads ); fflush(stdout); - } - } - TlsTimings = new TimingSeries[MaxThread + MaxTbbMasters - 1]; - if ( theSettings.my_opts & UseTaskScheduler ? MaxTbbMasters : MaxThread ) - WalkTests( &InitTestData, n, false, false, theSettings.my_opts & UseTaskScheduler ? true : false ); - CalibrationTiming.my_durations.reserve( MaxTbbMasters * 3 ); - printf( " \r"); - } - - FILE* ResFile = NULL; - - void Report ( char const* fmt, ... ) { - va_list args; - if ( ResFile ) { - va_start( args, fmt ); - vfprintf( ResFile, fmt, args ); - va_end( args ); - } - va_start( args, fmt ); - vprintf( fmt, args ); - va_end( args ); - } - - void PrintResults () { - if ( theSettings.my_resFile ) - ResFile = fopen( theSettings.my_resFile, "w" ); - Report( "%-*s %-*s %s", TitleFieldLen, "Test-name", WorkloadFieldLen, "Workload", - MaxTbbMasters > 1 ? "W M " : "T " ); - if ( theSettings.my_opts & UseAffinityModes ) - Report( "Aff " ); - Report( "%-*s SD,%% %-*s %-*s %-*s ", - RateFieldLen, "Avg.time", OvhdFieldLen, "Par.ovhd,%", - RateFieldLen, "Min.time", RateFieldLen, "Max.time" ); - Report( " | Repeats = %lu, CPUs %d\n", (unsigned long)NumRuns, NumCpus ); - for ( size_t i = 0; i < theSession.size(); ++i ) { - TestResults &tr = theSession[i]; - for ( size_t j = 0; j < tr.my_results.size(); ++j ) { - RunResults &rr = tr.my_results[j]; - RunConfig &rc = rr.my_config; - int w = rc.my_workloadID; - TimingSeries &ts = rr.my_timing; - duration_t baselineTime = tr.my_baselines[w].my_avgTime, - cleanTime = ts.my_avgTime - baselineTime; - Report( "%-*s %-*s ", TitleFieldLen, tr.my_testName, WorkloadFieldLen, tr.my_workloadNames[w] ); - if ( MaxTbbMasters > 1 ) - Report( "%-4d %-4d ", rc.my_numThreads - 1, rc.my_numMasters ); - else - Report( "%-4d ", rc.my_numThreads ); - if ( theSettings.my_opts & UseAffinityModes ) - Report( "%%-8s ", AffinitizationModeNames[rc.my_affinityMode] ); - Report( "%-*.2e %-6.1f ", RateFieldLen, cleanTime, ts.my_stdDev); - if ( tr.my_availableMethods & idRunSerial ) { - duration_t serialTime = (tr.my_serialBaselines[w].my_avgTime - baselineTime) / rc.my_maxConcurrency; - Report( "%-*.1f ", OvhdFieldLen, 100*(cleanTime - serialTime)/serialTime ); - } - else - Report( "%*s%*s ", OvhdFieldLen/2, "-", OvhdFieldLen - OvhdFieldLen/2, "" ); - Report( "%-*.2e %-*.2e ", RateFieldLen, ts.my_minTime - baselineTime, RateFieldLen, ts.my_maxTime - baselineTime); - Report( "\n" ); - } - } - delete [] TlsTimings; - if ( ResFile ) - fclose(ResFile); - } - - __TBB_PERF_API void RegisterTest ( Test* t, const char* className, bool takeOwnership ) { - // Just collect test objects at this stage - theSession.push_back( TestResults(t, className, takeOwnership) ); - } - -} // namespace internal - -__TBB_PERF_API void Test::Baseline ( ThreadInfo& ) {} - -__TBB_PERF_API void Test::RunSerial ( ThreadInfo& ) { internal::g_absentMethods |= internal::idRunSerial; } - -__TBB_PERF_API void Test::OnStart ( ThreadInfo& ) { internal::g_absentMethods |= internal::idOnStart; } - -__TBB_PERF_API void Test::OnFinish ( ThreadInfo& ) { internal::g_absentMethods |= internal::idOnFinish; } - -__TBB_PERF_API void WipeCaches () { NativeParallelFor( NumCpus, internal::WiperBody() ); } - -__TBB_PERF_API void EmptyFunc () {} -__TBB_PERF_API void AnchorFunc ( void* ) {} -__TBB_PERF_API void AnchorFunc2 ( void*, void* ) {} - -__TBB_PERF_API void SetWorkloadName( const char* format, ... ) { - internal::WorkloadName[MaxWorkloadNameLen] = 0; - va_list args; - va_start(args, format); - vsnprintf( internal::WorkloadName, MaxWorkloadNameLen, format, args ); - va_end(args); -} - - -__TBB_PERF_API int TestMain( int argc, char* argv[], const SessionSettings* defaultSettings ) { -#if _MSC_VER - HANDLE hMutex = CreateMutex( NULL, FALSE, "Global\\TBB_OMP_PerfSession" ); - WaitForSingleObject( hMutex, INFINITE ); -#endif - MinThread = MaxThread = NumCpus; - if ( defaultSettings ) - theSettings = *defaultSettings; - ParseCommandLine( argc, argv ); // May override data in theSettings - - internal::PrepareTests (); - internal::RunTests (); - internal::PrintResults(); - REPORT("\n"); -#if _MSC_VER - ReleaseMutex( hMutex ); - CloseHandle( hMutex ); -#endif - return 0; -} - -} // namespace Perf diff --git a/src/tbb/src/perf/perf.h b/src/tbb/src/perf/perf.h deleted file mode 100644 index 341e11f34..000000000 --- a/src/tbb/src/perf/perf.h +++ /dev/null @@ -1,257 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __tbb_perf_h__ -#define __tbb_perf_h__ - -#ifndef TBB_PERF_TYPEINFO -#define TBB_PERF_TYPEINFO 1 -#endif - -#if TBB_PERF_TYPEINFO - #include - #define __TBB_PERF_TEST_CLASS_NAME(T) typeid(T).name() -#else /* !TBB_PERF_TYPEINFO */ - #define __TBB_PERF_TEST_CLASS_NAME(T) NULL -#endif /* !TBB_PERF_TYPEINFO */ - - -#include "tbb/tick_count.h" - -// TODO: Fix build scripts to provide more reliable build phase identification means -#ifndef __TBB_PERF_API -#if _USRDLL - #if _MSC_VER - #define __TBB_PERF_API __declspec(dllexport) - #else /* !_MSC_VER */ - #define __TBB_PERF_API - #endif /* !_MSC_VER */ -#else /* !_USRDLL */ - #if _MSC_VER - #define __TBB_PERF_API __declspec(dllimport) - #else /* !_MSC_VER */ - #define __TBB_PERF_API - #endif /* !_MSC_VER */ -#endif /* !_USRDLL */ -#endif /* !__TBB_PERF_API */ - -#if _WIN32||_WIN64 - -namespace Perf { - typedef unsigned __int64 tick_t; - #if defined(_M_X64) - inline tick_t rdtsc () { return __rdtsc(); } - #elif _M_IX86 - inline tick_t rdtsc () { __asm { rdtsc } } - #else - #error Unsupported ISA - #endif -} // namespace Perf - -#elif __linux__ || __APPLE__ - -#include - -namespace Perf { - typedef uint64_t tick_t; - #if __x86_64__ || __i386__ || __i386 - inline tick_t rdtsc () { - uint32_t lo, hi; - __asm__ __volatile__ ( "rdtsc" : "=a" (lo), "=d" (hi) ); - return (tick_t)lo | ((tick_t)hi) << 32; - } - #else - #error Unsupported ISA - #endif -} // namespace Perf - -#else - #error Unsupported OS -#endif /* OS */ - -__TBB_PERF_API extern int NumThreads, - MaxConcurrency, - NumCpus; - -// Functions and global variables provided by the benchmarking framework -namespace Perf { - -typedef double duration_t; - -static const int MaxWorkloadNameLen = 64; - -static const char* NoHistogram = (char*)-1; -static const char* DefaultHistogram = (char*)-2; - -__TBB_PERF_API void AnchorFunc ( void* ); -__TBB_PERF_API void AnchorFunc2 ( void*, void* ); - -//! Helper that can be used in the preprocess handler to clean caches -/** Cleaning caches is necessary to obtain reproducible results when a test - accesses significant ranges of memory. **/ -__TBB_PERF_API void WipeCaches (); - -//! Specifies the name to be used to designate the current workload in output -/** Should be used from Test::SetWorkload(). If necessary workload name will be - truncated to MaxWorkloadNameLen characters. **/ -__TBB_PERF_API void SetWorkloadName( const char* format, ... ); - -class __TBB_PERF_API Test { -public: - virtual ~Test () {} - - //! Struct used by tests running in multiple masters mode - struct ThreadInfo { - //! Zero based thread ID - int tid; - //! Pointer to test specific data - /** If used by the test, should be initialized by OnStartLocal(), and - finalized by OnFinishLocal(). **/ - void* data; - }; - - //////////////////////////////////////////////////////////////////////////////// - // Mandatory methods - - //! Returns the number of workloads supported - virtual int NumWorkloads () = 0; - - //! Set workload info for the subsequent calls to Run() and RunSerial() - /** This method can use global helper function Perf::SetWorkloadName() in order - to specify the name of the current workload, which will be used in output - to designate the workload. If SetWorkloadName is not called, workloadIndex - will be used for this purpose. - - When testing task scheduler, make sure that this method does not trigger - its automatic initialization. **/ - virtual void SetWorkload ( int workloadIndex ) = 0; - - //! Test implementation - /** Called by the timing framework several times in a loop to achieve approx. - RunDuration time, and this loop is timed NumRuns times to collect statistics. - Argument ti specifies information about the master thread calling this method. **/ - virtual void Run ( ThreadInfo& ti ) = 0; - - //////////////////////////////////////////////////////////////////////////////// - // Optional methods - - //! Returns short title string to be used in the regular output to identify the test - /** Should uniquely identify the test among other ones in the given benchmark suite. - If not implemented, the test implementation class' RTTI name is used. **/ - virtual const char* Name () { return NULL; }; - - //! Returns minimal number of master threads - /** Used for task scheduler tests only (when UseTbbScheduler option is specified - in session settings). **/ - virtual int MinNumMasters () { return 1; } - - //! Returns maximal number of master threads - /** Used for task scheduler tests only (when UseTbbScheduler option is specified - in session settings). **/ - virtual int MaxNumMasters () { return 1; } - - //! Executes serial workload equivalent to the one processed by Run() - /** Called by the timing framework several times in a loop to collect statistics. **/ - virtual void RunSerial ( ThreadInfo& ti ); - - //! Invoked before each call to Run() - /** Can be used to preinitialize data necessary for the test, clean up - caches (see Perf::WipeCaches), etc. - In multiple masters mode this method is called on each thread. **/ - virtual void OnStart ( ThreadInfo& ti ); - - //! Invoked after each call to Run() - /** Can be used to free resources allocated by OnStart(). - Note that this method must work correctly independently of whether Run(), - RunSerial() or nothing is called between OnStart() and OnFinish(). - In multiple masters mode this method is called on each thread. **/ - virtual void OnFinish ( ThreadInfo& ti ); - - //! Functionality, the cost of which has to be factored out from timing results - /** Applies to both parallel and serial versions. **/ - virtual void Baseline ( ThreadInfo& ); - - //! Returns description string to be used in the benchmark info/summary output - virtual const char* Description () { return NULL; } - - //! Specifies if the histogram of individual run times in a series - /** If the method is not overridden, histogramName argument of TestMain is used. **/ - virtual const char* HistogramName () { return DefaultHistogram; } -}; // class Test - -namespace internal { - __TBB_PERF_API void RegisterTest ( Test*, const char* testClassName, bool takeOwnership ); -} - -template -void RegisterTest() { internal::RegisterTest( new T, __TBB_PERF_TEST_CLASS_NAME(T), true ); } - -template -void RegisterTest( T& t ) { internal::RegisterTest( &t, __TBB_PERF_TEST_CLASS_NAME(T), false ); } - -enum SessionOptions { - //! Use Test::RunSerial if present - UseBaseline = 0x01, - UseSerialBaseline = 0x02, - UseBaselines = UseBaseline | UseSerialBaseline, - UseTaskScheduler = 0x10, - UseAffinityModes = 0x20, - UseSmallestWorkloadOnly = 0x40 -}; - -struct SessionSettings { - //! A combination of SessionOptions flags - uintptr_t my_opts; - - //! Name of a file to store performance results - /** These results are duplicates of what is printed on the console. **/ - const char* my_resFile; - - //! Output destination for the histogram of individual run times in a series - /** If it is a string, the histogram is stored in a file with such name. - If it is NULL, the histogram is printed on the console. By default histograms - are suppressed. - - The histogram is formatted as two column table: - "time bucket start" "number of tests in this bucket" - - When this setting enables histogram generation, an individual test - can override it by implementing HistogramName method. **/ - const char* my_histogramName; - - SessionSettings ( uintptr_t opts = 0, const char* resFile = NULL, const char* histogram = NoHistogram ) - : my_opts(opts) - , my_resFile(resFile) - , my_histogramName(histogram) - {} -}; // struct SessionSettings - -//! Benchmarking session entry point -/** Executes all the individual tests registered previously by means of - RegisterTest **/ -__TBB_PERF_API int TestMain( int argc, char* argv[], - const SessionSettings* defaultSettings = NULL ); - - -} // namespace Perf - -#endif /* __tbb_perf_h__ */ - - diff --git a/src/tbb/src/perf/perf_sched.cpp b/src/tbb/src/perf/perf_sched.cpp deleted file mode 100644 index a03e06f29..000000000 --- a/src/tbb/src/perf/perf_sched.cpp +++ /dev/null @@ -1,456 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "perf.h" - -#include - -#include "tbb/blocked_range.h" -#include "tbb/parallel_for.h" -#include "tbb/parallel_reduce.h" - -#define NUM_CHILD_TASKS 2096 -#define NUM_ROOT_TASKS 256 - -#define N 100000000 -#define FINEST_GRAIN 10 -#define FINE_GRAIN 50 -#define MED_GRAIN 200 -#define COARSE_GRAIN 1000 - - -typedef int count_t; - -const count_t N_finest = (count_t)(N/log((double)N)/10); -const count_t N_fine = N_finest * 20; -const count_t N_med = N_fine * (count_t)log((double)N) / 5; - -class StaticTaskHolder { -public: - tbb::task *my_leafTaskPtr; - StaticTaskHolder (); -}; - -static StaticTaskHolder s_tasks; - -static count_t NumIterations; -static count_t NumLeafTasks; -static count_t NumRootTasks; - -class LeafTaskBase : public tbb::task { -public: - count_t my_ID; - - LeafTaskBase () {} - LeafTaskBase ( count_t id ) : my_ID(id) {} -}; - -class SimpleLeafTask : public LeafTaskBase { - task* execute () { - volatile count_t anchor = 0; - for ( count_t i=0; i < NumIterations; ++i ) - anchor += i; - return NULL; - } -public: - SimpleLeafTask ( count_t ) {} -}; - -StaticTaskHolder::StaticTaskHolder () { - static SimpleLeafTask s_t1(0); - my_leafTaskPtr = &s_t1; -} - -class Test_SPMC : public Perf::Test { -protected: - static const int numWorkloads = 4; - static const count_t workloads[numWorkloads]; - - LeafTaskBase* my_leafTaskPtr; - - const char* Name () { return "SPMC"; } - - int NumWorkloads () { return numWorkloads; } - - void SetWorkload ( int idx ) { - NumRootTasks = 1; - NumIterations = workloads[idx]; - NumLeafTasks = NUM_CHILD_TASKS * NUM_ROOT_TASKS / (NumIterations > 1000 ? 32 : 8); - Perf::SetWorkloadName( "%dx%d", NumLeafTasks, NumIterations ); - } - - void Run ( ThreadInfo& ) { - tbb::empty_task &r = *new( tbb::task::allocate_root() ) tbb::empty_task; - r.set_ref_count( NumLeafTasks + 1 ); - for ( count_t i = 0; i < NumLeafTasks; ++i ) - r.spawn( *new(r.allocate_child()) SimpleLeafTask(0) ); - r.wait_for_all(); - tbb::task::destroy(r); - } - - void RunSerial ( ThreadInfo& ) { - const count_t n = NumLeafTasks * NumRootTasks; - for ( count_t i=0; i < n; ++i ) { - my_leafTaskPtr->my_ID = i; - my_leafTaskPtr->execute(); - } - } - -public: - Test_SPMC ( LeafTaskBase* leafTaskPtr = NULL ) { - static SimpleLeafTask t(0); - my_leafTaskPtr = leafTaskPtr ? leafTaskPtr : &t; - } -}; // class Test_SPMC - -const count_t Test_SPMC::workloads[Test_SPMC::numWorkloads] = { 1, 50, 500, 5000 }; - -template -class LeavesLauncherTask : public tbb::task { - count_t my_groupId; - - task* execute () { - count_t base = my_groupId * NumLeafTasks; - set_ref_count(NumLeafTasks + 1); - for ( count_t i = 0; i < NumLeafTasks; ++i ) - spawn( *new(allocate_child()) LeafTask(base + i) ); - wait_for_all(); - return NULL; - } -public: - LeavesLauncherTask ( count_t groupId ) : my_groupId(groupId) {} -}; - -template -void RunShallowTree () { - tbb::empty_task &r = *new( tbb::task::allocate_root() ) tbb::empty_task; - r.set_ref_count( NumRootTasks + 1 ); - for ( count_t i = 0; i < NumRootTasks; ++i ) - r.spawn( *new(r.allocate_child()) LeavesLauncherTask(i) ); - r.wait_for_all(); - tbb::task::destroy(r); -} - -class Test_ShallowTree : public Test_SPMC { - const char* Name () { return "ShallowTree"; } - - void SetWorkload ( int idx ) { - NumRootTasks = NUM_ROOT_TASKS; - NumIterations = workloads[idx]; - NumLeafTasks = NumIterations > 200 ? NUM_CHILD_TASKS / 10 : - (NumIterations > 50 ? NUM_CHILD_TASKS / 2 : NUM_CHILD_TASKS * 2); - Perf::SetWorkloadName( "%dx%d", NumRootTasks * NumLeafTasks, NumIterations ); - } - - void Run ( ThreadInfo& ) { - RunShallowTree(); - } -}; // class Test_ShallowTree - -class LeafTaskSkewed : public LeafTaskBase { - task* execute () { - volatile count_t anchor = 0; - double K = (double)NumRootTasks * NumLeafTasks; - count_t n = count_t(sqrt(double(my_ID)) * double(my_ID) * my_ID / (4 * K * K)); - for ( count_t i = 0; i < n; ++i ) - anchor += i; - return NULL; - } -public: - LeafTaskSkewed ( count_t id ) : LeafTaskBase(id) {} -}; - -class Test_ShallowTree_Skewed : public Test_SPMC { - static LeafTaskSkewed SerialTaskBody; - - const char* Name () { return "ShallowTree_Skewed"; } - - int NumWorkloads () { return 1; } - - void SetWorkload ( int ) { - NumRootTasks = NUM_ROOT_TASKS; - NumLeafTasks = NUM_CHILD_TASKS; - Perf::SetWorkloadName( "%d", NumRootTasks * NumLeafTasks ); - } - - void Run ( ThreadInfo& ) { - RunShallowTree(); - } - -public: - Test_ShallowTree_Skewed () : Test_SPMC(&SerialTaskBody) {} -}; // class Test_ShallowTree_Skewed - -LeafTaskSkewed Test_ShallowTree_Skewed::SerialTaskBody(0); - -typedef tbb::blocked_range range_t; - -static count_t IterRange = N, - IterGrain = 1; - -enum PartitionerType { - SimplePartitioner = 0, - AutoPartitioner = 1 -}; - -class Test_Algs : public Perf::Test { -protected: - static const int numWorkloads = 4; - static const count_t algRanges[numWorkloads]; - static const count_t algGrains[numWorkloads]; - - tbb::simple_partitioner my_simplePartitioner; - tbb::auto_partitioner my_autoPartitioner; - PartitionerType my_partitionerType; - - bool UseAutoPartitioner () const { return my_partitionerType == AutoPartitioner; } - - int NumWorkloads () { return UseAutoPartitioner() ? 3 : numWorkloads; } - - void SetWorkload ( int idx ) { - if ( UseAutoPartitioner() ) { - IterRange = algRanges[idx ? numWorkloads - 1 : 0]; - IterGrain = idx > 1 ? algGrains[numWorkloads - 1] : 1; - } - else { - IterRange = algRanges[idx]; - IterGrain = algGrains[idx]; - } - Perf::SetWorkloadName( "%d/%d", IterRange, IterGrain ); - } -public: - Test_Algs ( PartitionerType pt = SimplePartitioner ) : my_partitionerType(pt) {} -}; // class Test_Algs - -const count_t Test_Algs::algRanges[] = {N_finest, N_fine, N_med, N}; -const count_t Test_Algs::algGrains[] = {1, FINE_GRAIN, MED_GRAIN, COARSE_GRAIN}; - -template -class Test_PFor : public Test_Algs { -protected: - void Run ( ThreadInfo& ) { - if ( UseAutoPartitioner() ) - tbb::parallel_for( range_t(0, IterRange, IterGrain), Body(), my_autoPartitioner ); - else - tbb::parallel_for( range_t(0, IterRange, IterGrain), Body(), my_simplePartitioner ); - } - - void RunSerial ( ThreadInfo& ) { - Body body; - body( range_t(0, IterRange, IterGrain) ); - } -public: - Test_PFor ( PartitionerType pt = SimplePartitioner ) : Test_Algs(pt) {} -}; // class Test_PFor - -class SimpleForBody { -public: - void operator()( const range_t& r ) const { - count_t end = r.end(); - volatile count_t anchor = 0; - for( count_t i = r.begin(); i < end; ++i ) - anchor += i; - } -}; // class SimpleForBody - -class Test_PFor_Simple : public Test_PFor { -protected: - const char* Name () { return UseAutoPartitioner() ? "PFor-AP" : "PFor"; } -public: - Test_PFor_Simple ( PartitionerType pt = SimplePartitioner ) : Test_PFor(pt) {} -}; // class Test_PFor_Simple - -class SkewedForBody { -public: - void operator()( const range_t& r ) const { - count_t end = (r.end() + 1) * (r.end() + 1); - volatile count_t anchor = 0; - for( count_t i = r.begin() * r.begin(); i < end; ++i ) - anchor += i; - } -}; // class SkewedForBody - -class Test_PFor_Skewed : public Test_PFor { - typedef Test_PFor base_type; -protected: - const char* Name () { return UseAutoPartitioner() ? "PFor-Skewed-AP" : "PFor-Skewed"; } - - void SetWorkload ( int idx ) { - base_type::SetWorkload(idx); - IterRange = (count_t)(sqrt((double)IterRange) * sqrt(sqrt((double)N / IterRange))); - Perf::SetWorkloadName( "%d", IterRange ); - } - -public: - Test_PFor_Skewed ( PartitionerType pt = SimplePartitioner ) : base_type(pt) {} -}; // class Test_PFor_Skewed - -PartitionerType gPartitionerType; -count_t NestingRange; -count_t NestingGrain; - -class NestingForBody { - count_t my_depth; - tbb::simple_partitioner my_simplePartitioner; - tbb::auto_partitioner my_autoPartitioner; - - template - void run ( const range_t& r, Partitioner& p ) const { - count_t end = r.end(); - if ( my_depth > 1 ) - for ( count_t i = r.begin(); i < end; ++i ) - tbb::parallel_for( range_t(0, IterRange, IterGrain), NestingForBody(my_depth - 1), p ); - else - for ( count_t i = r.begin(); i < end; ++i ) - tbb::parallel_for( range_t(0, IterRange, IterGrain), SimpleForBody(), p ); - } -public: - void operator()( const range_t& r ) const { - if ( gPartitionerType == AutoPartitioner ) - run( r, my_autoPartitioner ); - else - run( r, my_simplePartitioner ); - } - NestingForBody ( count_t depth = 1 ) : my_depth(depth) {} -}; // class NestingForBody - -enum NestingType { - HollowNesting, - ShallowNesting, - DeepNesting -}; - -class Test_PFor_Nested : public Test_Algs { - typedef Test_Algs base_type; - - NestingType my_nestingType; - count_t my_nestingDepth; - -protected: - const char* Name () { - static const char* names[] = { "PFor-HollowNested", "PFor-HollowNested-AP", - "PFor-ShallowNested", "PFor-ShallowNested-AP", - "PFor-DeeplyNested", "PFor-DeeplyNested-AP" }; - return names[my_nestingType * 2 + my_partitionerType]; - } - - int NumWorkloads () { return my_nestingType == ShallowNesting ? (UseAutoPartitioner() ? 3 : 2) : 1; } - - void SetWorkload ( int idx ) { - gPartitionerType = my_partitionerType; - if ( my_nestingType == DeepNesting ) { - NestingRange = 1024; - IterGrain = NestingGrain = 1; - IterRange = 4; - my_nestingDepth = 4; - } - else if ( my_nestingType == ShallowNesting ) { - int i = idx ? numWorkloads - 1 : 0; - count_t baseRange = algRanges[i]; - count_t baseGrain = !UseAutoPartitioner() || idx > 1 ? algGrains[i] : 1; - NestingRange = IterRange = (count_t)sqrt((double)baseRange); - NestingGrain = IterGrain = (count_t)sqrt((double)baseGrain); - } - else { - NestingRange = N / 100; - NestingGrain = COARSE_GRAIN / 10; - IterRange = 2; - IterGrain = 1; - } - Perf::SetWorkloadName( "%d/%d", NestingRange, NestingGrain ); - } - - void Run ( ThreadInfo& ) { - if ( UseAutoPartitioner() ) - tbb::parallel_for( range_t(0, NestingRange, NestingGrain), NestingForBody(my_nestingDepth), my_autoPartitioner ); - else - tbb::parallel_for( range_t(0, NestingRange, NestingGrain), NestingForBody(my_nestingDepth), my_simplePartitioner ); - } - - void RunSerial ( ThreadInfo& ) { - for ( int i = 0; i < NestingRange; ++i ) { - SimpleForBody body; - body( range_t(0, IterRange, IterGrain) ); - } - } -public: - Test_PFor_Nested ( NestingType nt, PartitionerType pt ) : base_type(pt), my_nestingType(nt), my_nestingDepth(1) {} -}; // class Test_PFor_Nested - -class SimpleReduceBody { -public: - count_t my_sum; - SimpleReduceBody () : my_sum(0) {} - SimpleReduceBody ( SimpleReduceBody&, tbb::split ) : my_sum(0) {} - void join( SimpleReduceBody& rhs ) { my_sum += rhs.my_sum;} - void operator()( const range_t& r ) { - count_t end = r.end(); - volatile count_t anchor = 0; - for( count_t i = r.begin(); i < end; ++i ) - anchor += i; - my_sum = anchor; - } -}; // class SimpleReduceBody - -class Test_PReduce : public Test_Algs { -protected: - const char* Name () { return UseAutoPartitioner() ? "PReduce-AP" : "PReduce"; } - - void Run ( ThreadInfo& ) { - SimpleReduceBody body; - if ( UseAutoPartitioner() ) - tbb::parallel_reduce( range_t(0, IterRange, IterGrain), body, my_autoPartitioner ); - else - tbb::parallel_reduce( range_t(0, IterRange, IterGrain), body, my_simplePartitioner ); - } - - void RunSerial ( ThreadInfo& ) { - SimpleReduceBody body; - body( range_t(0, IterRange, IterGrain) ); - } -public: - Test_PReduce ( PartitionerType pt = SimplePartitioner ) : Test_Algs(pt) {} -}; // class Test_PReduce - -int main( int argc, char* argv[] ) { - Perf::SessionSettings opts (Perf::UseTaskScheduler | Perf::UseSerialBaseline, "perf_sched.txt"); // Perf::UseBaseline, Perf::UseSmallestWorkloadOnly - Perf::RegisterTest(); - Perf::RegisterTest(); - Perf::RegisterTest(); - Test_PFor_Simple pf_sp(SimplePartitioner), pf_ap(AutoPartitioner); - Perf::RegisterTest(pf_sp); - Perf::RegisterTest(pf_ap); - Test_PReduce pr_sp(SimplePartitioner), pr_ap(AutoPartitioner); - Perf::RegisterTest(pr_sp); - Perf::RegisterTest(pr_ap); - Test_PFor_Skewed pf_s_sp(SimplePartitioner), pf_s_ap(AutoPartitioner); - Perf::RegisterTest(pf_s_sp); - Perf::RegisterTest(pf_s_ap); - Test_PFor_Nested pf_hn_sp(HollowNesting, SimplePartitioner), pf_hn_ap(HollowNesting, AutoPartitioner), - pf_sn_sp(ShallowNesting, SimplePartitioner), pf_sn_ap(ShallowNesting, AutoPartitioner), - pf_dn_sp(DeepNesting, SimplePartitioner), pf_dn_ap(DeepNesting, AutoPartitioner); - Perf::RegisterTest(pf_hn_sp); - Perf::RegisterTest(pf_hn_ap); - Perf::RegisterTest(pf_sn_sp); - Perf::RegisterTest(pf_sn_ap); - Perf::RegisterTest(pf_dn_sp); - Perf::RegisterTest(pf_dn_ap); - return Perf::TestMain(argc, argv, &opts); -} diff --git a/src/tbb/src/perf/run_statistics.sh b/src/tbb/src/perf/run_statistics.sh deleted file mode 100644 index c384be17c..000000000 --- a/src/tbb/src/perf/run_statistics.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# -# Copyright 2005-2014 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. Threading Building Blocks is free software; -# you can redistribute it and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. Threading Building Blocks is -# distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU General Public License for more details. You should have received a copy of -# the GNU General Public License along with Threading Building Blocks; if not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software library without -# restriction. Specifically, if other files instantiate templates or use macros or inline -# functions from this file, or you compile this file and link it with other files to produce -# an executable, this file does not by itself cause the resulting executable to be covered -# by the GNU General Public License. This exception does not however invalidate any other -# reasons why the executable file might be covered by the GNU General Public License. - -export LD_LIBRARY_PATH=.:$LD_LIBRARY_PATH -#setting output format .csv, 'pivot' - is pivot table mode, ++ means append -export STAT_FORMAT=pivot-csv++ -#check existing files because of apend mode -ls *.csv -rm -i *.csv -#setting a delimiter in txt or csv file -#export STAT_DELIMITER=, -export STAT_RUNINFO1=Host=`hostname -s` -#append a suffix after the filename -#export STAT_SUFFIX=$STAT_RUNINFO1 -for ((i=1;i<=${repeat:=100};++i)); do echo $i of $repeat: && STAT_RUNINFO2=Run=$i $* || break; done diff --git a/src/tbb/src/perf/statistics.cpp b/src/tbb/src/perf/statistics.cpp deleted file mode 100644 index 2f621ab3d..000000000 --- a/src/tbb/src/perf/statistics.cpp +++ /dev/null @@ -1,444 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "statistics.h" -#include "statistics_xml.h" - -#define COUNT_PARAMETERS 3 - -#ifdef _MSC_VER -#define snprintf _snprintf -#endif - -void GetTime(char* buff,int size_buff) -{ - tm *newtime; - time_t timer; - time(&timer); - newtime=localtime(&timer); - strftime(buff,size_buff,"%H:%M:%S",newtime); -} - -void GetDate(char* buff,int size_buff) -{ - tm *newtime; - time_t timer; - time(&timer); - newtime=localtime(&timer); - strftime(buff,size_buff,"%Y-%m-%d",newtime); -} - - -StatisticsCollector::TestCase StatisticsCollector::SetTestCase(const char *name, const char *mode, int threads) -{ - string KeyName(name); - switch (SortMode) - { - case ByThreads: KeyName += Format("_%02d_%s", threads, mode); break; - default: - case ByAlg: KeyName += Format("_%s_%02d", mode, threads); break; - } - CurrentKey = Statistics[KeyName]; - if(!CurrentKey) { - CurrentKey = new StatisticResults; - CurrentKey->Mode = mode; - CurrentKey->Name = name; - CurrentKey->Threads = threads; - CurrentKey->Results.reserve(RoundTitles.size()); - Statistics[KeyName] = CurrentKey; - } - return TestCase(CurrentKey); -} - -StatisticsCollector::~StatisticsCollector() -{ - for(Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++) - delete i->second; -} - -void StatisticsCollector::ReserveRounds(size_t index) -{ - size_t i = RoundTitles.size(); - if (i > index) return; - char buf[16]; - RoundTitles.resize(index+1); - for(; i <= index; i++) { - snprintf( buf, 15, "%u", unsigned(i+1) ); - RoundTitles[i] = buf; - } - for(Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++) { - if(!i->second) printf("!!!'%s' = NULL\n", i->first.c_str()); - else i->second->Results.reserve(index+1); - } -} - -void StatisticsCollector::AddRoundResult(const TestCase &key, value_t v) -{ - ReserveRounds(key.access->Results.size()); - key.access->Results.push_back(v); -} - -void StatisticsCollector::SetRoundTitle(size_t index, const char *fmt, ...) -{ - vargf2buff(buff, 128, fmt); - ReserveRounds(index); - RoundTitles[index] = buff; -} - -void StatisticsCollector::AddStatisticValue(const TestCase &key, const char *type, const char *fmt, ...) -{ - vargf2buff(buff, 128, fmt); - AnalysisTitles.insert(type); - key.access->Analysis[type] = buff; -} - -void StatisticsCollector::AddStatisticValue(const char *type, const char *fmt, ...) -{ - vargf2buff(buff, 128, fmt); - AnalysisTitles.insert(type); - CurrentKey->Analysis[type] = buff; -} - -void StatisticsCollector::SetRunInfo(const char *title, const char *fmt, ...) -{ - vargf2buff(buff, 256, fmt); - RunInfo.push_back(make_pair(title, buff)); -} - -void StatisticsCollector::SetStatisticFormula(const char *name, const char *formula) -{ - Formulas[name] = formula; -} - -void StatisticsCollector::SetTitle(const char *fmt, ...) -{ - vargf2buff(buff, 256, fmt); - Title = buff; -} - -string ExcelFormula(const string &fmt, size_t place, size_t rounds, bool is_horizontal) -{ - char buff[16]; - if(is_horizontal) - snprintf(buff, 15, "RC[%u]:RC[%u]", unsigned(place), unsigned(place+rounds-1)); - else - snprintf(buff, 15, "R[%u]C:R[%u]C", unsigned(place+1), unsigned(place+rounds)); - string result(fmt); size_t pos = 0; - while ( (pos = result.find("ROUNDS", pos, 6)) != string::npos ) - result.replace(pos, 6, buff); - return result; -} - -void StatisticsCollector::Print(int dataOutput, const char *ModeName) -{ - FILE *OutputFile; - const char *file_suffix = getenv("STAT_SUFFIX"); - if( !file_suffix ) file_suffix = ""; - const char *file_format = getenv("STAT_FORMAT"); - if( file_format ) { - dataOutput = 0; - if( strstr(file_format, "con")||strstr(file_format, "std") ) dataOutput |= StatisticsCollector::Stdout; - if( strstr(file_format, "txt")||strstr(file_format, "csv") ) dataOutput |= StatisticsCollector::TextFile; - if( strstr(file_format, "excel")||strstr(file_format, "xml") ) dataOutput |= StatisticsCollector::ExcelXML; - if( strstr(file_format, "htm") ) dataOutput |= StatisticsCollector::HTMLFile; - if( strstr(file_format, "pivot") ) dataOutput |= StatisticsCollector::PivotMode; - } - for(int i = 1; i < 10; i++) { - string env = Format("STAT_RUNINFO%d", i); - const char *info = getenv(env.c_str()); - if( info ) { - string title(info); - size_t pos = title.find('='); - if( pos != string::npos ) { - env = title.substr(pos+1); - title.resize(pos); - } else env = title; - RunInfo.push_back(make_pair(title, env)); - } - } - - if (dataOutput & StatisticsCollector::Stdout) - { - printf("\n-=# %s #=-\n", Title.c_str()); - if(SortMode == ByThreads) - printf(" Name | # | %s ", ModeName); - else - printf(" Name | %s | # ", ModeName); - for (AnalysisTitles_t::iterator i = AnalysisTitles.begin(); i != AnalysisTitles.end(); i++) - printf("|%s", i->c_str()+1); - - for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++) - { - if(SortMode == ByThreads) - printf("\n%12s|% 5d|%6s", i->second->Name.c_str(), i->second->Threads, i->second->Mode.c_str()); - else - printf("\n%12s|%6s|% 5d", i->second->Name.c_str(), i->second->Mode.c_str(), i->second->Threads); - Analysis_t &analisis = i->second->Analysis; - AnalysisTitles_t::iterator t = AnalysisTitles.begin(); - for (Analysis_t::iterator a = analisis.begin(); a != analisis.end(); t++) - { - char fmt[8]; snprintf(fmt, 7, "|%% %us", unsigned(max(size_t(3), t->size()))); - if(*t != a->first) - printf(fmt, ""); - else { - printf(fmt, a->second.c_str()); a++; - } - } - } - printf("\n"); - } - if (dataOutput & StatisticsCollector::TextFile) - { - bool append = false; - const char *file_ext = ".txt"; - if( file_format && strstr(file_format, "++") ) append = true; - if( file_format && strstr(file_format, "csv") ) file_ext = ".csv"; - if ((OutputFile = fopen((Name+file_suffix+file_ext).c_str(), append?"at":"wt")) == NULL) { - printf("Can't open .txt file\n"); - } else { - const char *delim = getenv("STAT_DELIMITER"); - if( !delim || !delim[0] ) { - if( file_format && strstr(file_format, "csv") ) delim = ","; - else delim = "\t"; - } - if( !append || !ftell(OutputFile) ) { // header needed - append = false; - if(SortMode == ByThreads) fprintf(OutputFile, "Name%s#%s%s", delim, delim, ModeName); - else fprintf(OutputFile, "Name%s%s%s#", delim, ModeName, delim); - for( size_t k = 0; k < RunInfo.size(); k++ ) - fprintf(OutputFile, "%s%s", delim, RunInfo[k].first.c_str()); - } - if(dataOutput & StatisticsCollector::PivotMode) { - if( !append) fprintf(OutputFile, "%sColumn%sValue", delim, delim); - for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++) - { - string RowHead; - if(SortMode == ByThreads) - RowHead = Format("\n%s%s%d%s%s%s", i->second->Name.c_str(), delim, i->second->Threads, delim, i->second->Mode.c_str(), delim); - else - RowHead = Format("\n%s%s%s%s%d%s", i->second->Name.c_str(), delim, i->second->Mode.c_str(), delim, i->second->Threads, delim); - for( size_t k = 0; k < RunInfo.size(); k++ ) - RowHead.append(RunInfo[k].second + delim); - Analysis_t &analisis = i->second->Analysis; - for (Analysis_t::iterator a = analisis.begin(); a != analisis.end(); ++a) - fprintf(OutputFile, "%s%s%s%s", RowHead.c_str(), a->first.c_str(), delim, a->second.c_str()); - Results_t &r = i->second->Results; - for (size_t k = 0; k < r.size(); k++) { - fprintf(OutputFile, "%s%s%s", RowHead.c_str(), RoundTitles[k].c_str(), delim); - fprintf(OutputFile, ResultsFmt, r[k]); - } - } - } else { - if( !append ) { - for( size_t k = 0; k < RunInfo.size(); k++ ) - fprintf(OutputFile, "%s%s", delim, RunInfo[k].first.c_str()); - for (AnalysisTitles_t::iterator i = AnalysisTitles.begin(); i != AnalysisTitles.end(); i++) - fprintf(OutputFile, "%s%s", delim, i->c_str()+1); - for (size_t i = 0; i < RoundTitles.size(); i++) - fprintf(OutputFile, "%s%s", delim, RoundTitles[i].c_str()); - } - for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++) - { - if(SortMode == ByThreads) - fprintf(OutputFile, "\n%s%s%d%s%s", i->second->Name.c_str(), delim, i->second->Threads, delim, i->second->Mode.c_str()); - else - fprintf(OutputFile, "\n%s%s%s%s%d", i->second->Name.c_str(), delim, i->second->Mode.c_str(), delim, i->second->Threads); - for( size_t k = 0; k < RunInfo.size(); k++ ) - fprintf(OutputFile, "%s%s", delim, RunInfo[k].second.c_str()); - Analysis_t &analisis = i->second->Analysis; - AnalysisTitles_t::iterator t = AnalysisTitles.begin(); - for (Analysis_t::iterator a = analisis.begin(); a != analisis.end(); ++t) { - fprintf(OutputFile, "%s", delim); - if(*t == a->first) { - fprintf(OutputFile, "%s", a->second.c_str()); ++a; - } - } - //data - Results_t &r = i->second->Results; - for (size_t k = 0; k < r.size(); k++) - { - fprintf(OutputFile, "%s", delim); - fprintf(OutputFile, ResultsFmt, r[k]); - } - } - } - fprintf(OutputFile, "\n"); - fclose(OutputFile); - } - } - if (dataOutput & StatisticsCollector::HTMLFile) - { - if ((OutputFile = fopen((Name+file_suffix+".html").c_str(), "w+t")) == NULL) { - printf("Can't open .html file\n"); - } else { - char TimerBuff[100], DateBuff[100]; - GetTime(TimerBuff,sizeof(TimerBuff)); - GetDate(DateBuff,sizeof(DateBuff)); - fprintf(OutputFile, "\n%s\n\n", Title.c_str()); - //----------------------- - fprintf(OutputFile, "\n"); - fprintf(OutputFile, "" - "\n", ModeName); - for (AnalysisTitles_t::iterator i = AnalysisTitles.begin(); i != AnalysisTitles.end(); i++) - fprintf(OutputFile, "", i->c_str()+1); - for (size_t i = 0; i < RoundTitles.size(); i++) - fprintf(OutputFile, "", RoundTitles[i].c_str()); - for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++) - { - fprintf(OutputFile, "\n", - i->second->Name.c_str(), i->second->Threads, i->second->Mode.c_str()); - //statistics - AnalysisTitles_t::iterator t = AnalysisTitles.begin(); - for (Analysis_t::iterator j = i->second->Analysis.begin(); j != i->second->Analysis.end(); t++) - { - fprintf(OutputFile, "", (*t != j->first)?" ":(i->second->Analysis[j->first]).c_str()); - if(*t == j->first) j++; - } - //data - Results_t &r = i->second->Results; - for (size_t k = 0; k < r.size(); k++) - { - fprintf(OutputFile, ""); - } - } - fprintf(OutputFile, "\n
Flip[H]%s%s%s", - DateBuff, TimerBuff, unsigned(AnalysisTitles.size() + RoundTitles.size()), Title.c_str()); - for( size_t k = 0; k < RunInfo.size(); k++ ) - fprintf(OutputFile, "; %s: %s", RunInfo[k].first.c_str(), RunInfo[k].second.c_str()); - fprintf(OutputFile, "
NameThreads%s%s%s
%s%d%4s%s"); - fprintf(OutputFile, ResultsFmt, r[k]); - fprintf(OutputFile, "
\n"); - ////////////////////////////////////////////////////// - fprintf(OutputFile, "\n"); - fprintf(OutputFile, "\n" - "", - DateBuff, TimerBuff, unsigned(max(Statistics.size()-2,size_t(1))), Title.c_str()); - - fprintf(OutputFile, "\n"); - for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++) - fprintf(OutputFile, "", i->second->Name.c_str()); - fprintf(OutputFile, "\n"); - for (Statistics_t::iterator n = Statistics.begin(); n != Statistics.end(); n++) - fprintf(OutputFile, "", n->second->Threads); - fprintf(OutputFile, "\n", ModeName); - for (Statistics_t::iterator m = Statistics.begin(); m != Statistics.end(); m++) - fprintf(OutputFile, "", m->second->Mode.c_str()); - - for (AnalysisTitles_t::iterator t = AnalysisTitles.begin(); t != AnalysisTitles.end(); t++) - { - fprintf(OutputFile, "\n", t->c_str()+1); - for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++) - fprintf(OutputFile, "", i->second->Analysis.count(*t)?i->second->Analysis[*t].c_str():" "); - } - - for (size_t r = 0; r < RoundTitles.size(); r++) - { - fprintf(OutputFile, "\n", RoundTitles[r].c_str()); - for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++) - { - Results_t &result = i->second->Results; - fprintf(OutputFile, ""); - } - } - fprintf(OutputFile, "\n
Flip[V]%s%s%s
Name%s
Threads%d
%s%s
%s%s
%s"); - if(result.size() > r) - fprintf(OutputFile, ResultsFmt, result[r]); - fprintf(OutputFile, "
\n\n"); - fclose(OutputFile); - } - } - if (dataOutput & StatisticsCollector::ExcelXML) - { - if ((OutputFile = fopen((Name+file_suffix+".xml").c_str(), "w+t")) == NULL) { - printf("Can't open .xml file\n"); - } else { - // TODO:PivotMode - char UserName[100]; - char TimerBuff[100], DateBuff[100]; -#if _WIN32 || _WIN64 - strcpy(UserName,getenv("USERNAME")); -#else - strcpy(UserName,getenv("USER")); -#endif - //-------------------------------- - GetTime(TimerBuff,sizeof(TimerBuff)); - GetDate(DateBuff,sizeof(DateBuff)); - //-------------------------- - fprintf(OutputFile, XMLHead, UserName, TimerBuff); - fprintf(OutputFile, XMLStyles); - fprintf(OutputFile, XMLBeginSheet, "Horizontal"); - fprintf(OutputFile, XMLNames,1,1,1,int(AnalysisTitles.size()+Formulas.size()+COUNT_PARAMETERS)); - fprintf(OutputFile, XMLBeginTable, int(RoundTitles.size()+Formulas.size()+AnalysisTitles.size()+COUNT_PARAMETERS+1/*title*/), int(Statistics.size()+1)); - fprintf(OutputFile, XMLBRow); - fprintf(OutputFile, XMLCellTopName); - fprintf(OutputFile, XMLCellTopThread); - fprintf(OutputFile, XMLCellTopMode, ModeName); - for (AnalysisTitles_t::iterator j = AnalysisTitles.begin(); j != AnalysisTitles.end(); j++) - fprintf(OutputFile, XMLAnalysisTitle, j->c_str()+1); - for (Formulas_t::iterator j = Formulas.begin(); j != Formulas.end(); j++) - fprintf(OutputFile, XMLAnalysisTitle, j->first.c_str()+1); - for (RoundTitles_t::iterator j = RoundTitles.begin(); j != RoundTitles.end(); j++) - fprintf(OutputFile, XMLAnalysisTitle, j->c_str()); - string Info = Title; - for( size_t k = 0; k < RunInfo.size(); k++ ) - Info.append("; " + RunInfo[k].first + "=" + RunInfo[k].second); - fprintf(OutputFile, XMLCellEmptyWhite, Info.c_str()); - fprintf(OutputFile, XMLERow); - //------------------------ - for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++) - { - fprintf(OutputFile, XMLBRow); - fprintf(OutputFile, XMLCellName, i->second->Name.c_str()); - fprintf(OutputFile, XMLCellThread,i->second->Threads); - fprintf(OutputFile, XMLCellMode, i->second->Mode.c_str()); - //statistics - AnalysisTitles_t::iterator at = AnalysisTitles.begin(); - for (Analysis_t::iterator j = i->second->Analysis.begin(); j != i->second->Analysis.end(); at++) - { - fprintf(OutputFile, XMLCellAnalysis, (*at != j->first)?"":(i->second->Analysis[j->first]).c_str()); - if(*at == j->first) j++; - } - //formulas - size_t place = 0; - Results_t &v = i->second->Results; - for (Formulas_t::iterator f = Formulas.begin(); f != Formulas.end(); f++, place++) - fprintf(OutputFile, XMLCellFormula, ExcelFormula(f->second, Formulas.size()-place, v.size(), true).c_str()); - //data - for (size_t k = 0; k < v.size(); k++) - { - fprintf(OutputFile, XMLCellData, v[k]); - } - if(v.size() < RoundTitles.size()) - fprintf(OutputFile, XMLMergeRow, int(RoundTitles.size() - v.size())); - fprintf(OutputFile, XMLERow); - } - //------------------------ - fprintf(OutputFile, XMLEndTable); - fprintf(OutputFile, XMLWorkSheetProperties,1,1,3,3,int(RoundTitles.size()+AnalysisTitles.size()+Formulas.size()+COUNT_PARAMETERS)); - fprintf(OutputFile, XMLAutoFilter,1,1,1,int(AnalysisTitles.size()+Formulas.size()+COUNT_PARAMETERS)); - fprintf(OutputFile, XMLEndWorkSheet); - //---------------------------------------- - fprintf(OutputFile, XMLEndWorkbook); - fclose(OutputFile); - } - } -} diff --git a/src/tbb/src/perf/statistics.h b/src/tbb/src/perf/statistics.h deleted file mode 100644 index cf5766cec..000000000 --- a/src/tbb/src/perf/statistics.h +++ /dev/null @@ -1,191 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Internal Intel tool - -#ifndef __STATISTICS_H__ -#define __STATISTICS_H__ - -#define _CRT_SECURE_NO_DEPRECATE 1 - -#include -#include -#include -#include -#include -#include -#include -#include - -using namespace std; -typedef double value_t; - -/* - Statistical collector class. - - Resulting table output: - +---------------------------------------------------------------------------+ - | [Date] ... | - +----------+----v----+--v---+----------------+------------+-..-+------------+ - | TestName | Threads | Mode | Rounds results | Stat_type1 | .. | Stat_typeN | - +----------+---------+------+-+-+-+-..-+-+-+-+------------+-..-+------------+ - | | | | | | | .. | | | | | | | - .. ... ... .................. ...... .. - | | | | | | | .. | | | | | | | - +----------+---------+------+-+-+-+-..-+-+-+-+------------+-..-+------------+ - - Iterating table output: - +---------------------------------------------------------------------------+ - | [Date] <TestName>, Threads: <N>, Mode: <M>; for <Title>... | - +----------+----v----+--v---+----------------+------------+-..-+------------+ - -*/ - -class StatisticsCollector -{ -public: - typedef map<string, string> Analysis_t; - typedef vector<value_t> Results_t; - -protected: - StatisticsCollector(const StatisticsCollector &); - - struct StatisticResults - { - string Name; - string Mode; - int Threads; - Results_t Results; - Analysis_t Analysis; - }; - - // internal members - //bool OpenFile; - StatisticResults *CurrentKey; - string Title; - const char /**Name,*/ *ResultsFmt; - string Name; - //! Data - typedef map<string, StatisticResults*> Statistics_t; - Statistics_t Statistics; - typedef vector<string> RoundTitles_t; - RoundTitles_t RoundTitles; - //TODO: merge those into one structure - typedef map<string, string> Formulas_t; - Formulas_t Formulas; - typedef set<string> AnalysisTitles_t; - AnalysisTitles_t AnalysisTitles; - typedef vector<pair<string, string> > RunInfo_t; - RunInfo_t RunInfo; - -public: - struct TestCase { - StatisticResults *access; - TestCase() : access(0) {} - TestCase(StatisticResults *link) : access(link) {} - const char *getName() const { return access->Name.c_str(); } - const char *getMode() const { return access->Mode.c_str(); } - int getThreads() const { return access->Threads; } - const Results_t &getResults() const { return access->Results; } - const Analysis_t &getAnalysis() const { return access->Analysis; } - }; - - enum Sorting { - ByThreads, ByAlg - }; - - //! Data and output types - enum DataOutput { - // Verbosity level enumeration - Statistic = 1, //< Analytical data - computed after all iterations and rounds passed - Result = 2, //< Testing data - collected after all iterations passed - Iteration = 3, //< Verbose data - collected at each iteration (for each size - in case of containers) - // ExtraVerbose is not applicabe yet :) be happy, but flexibility is always welcome - - // Next constants are bit-fields - Stdout = 1<<8, //< Output to the console - TextFile = 1<<9, //< Output to plain text file "name.txt" (delimiter is TAB by default) - ExcelXML = 1<<10, //< Output to Excel-readable XML-file "name.xml" - HTMLFile = 1<<11, //< Output to HTML file "name.html" - PivotMode= 1<<15 //< Puts all the rounds into one columt to better fit for pivot table in Excel - }; - - //! Constructor. Specify tests set name which used as name of output files - StatisticsCollector(const char *name, Sorting mode = ByThreads, const char *fmt = "%g") - : CurrentKey(NULL), ResultsFmt(fmt), Name(name), SortMode(mode) {} - - ~StatisticsCollector(); - - //! Set tests set title, supporting printf-like arguments - void SetTitle(const char *fmt, ...); - - //! Specify next test key - TestCase SetTestCase(const char *name, const char *mode, int threads); - //! Specify next test key - void SetTestCase(const TestCase &t) { SetTestCase(t.getName(), t.getMode(), t.getThreads()); } - //! Reserve specified number of rounds. Use for effeciency. Used mostly internally - void ReserveRounds(size_t index); - //! Add result of the measure - void AddRoundResult(const TestCase &, value_t v); - //! Add result of the current measure - void AddRoundResult(value_t v) { if(CurrentKey) AddRoundResult(TestCase(CurrentKey), v); } - //! Add title of round - void SetRoundTitle(size_t index, const char *fmt, ...); - //! Add numbered title of round - void SetRoundTitle(size_t index, int num) { SetRoundTitle(index, "%d", num); } - //! Get number of rounds - size_t GetRoundsCount() const { return RoundTitles.size(); } - // Set statistic value for the test - void AddStatisticValue(const TestCase &, const char *type, const char *fmt, ...); - // Set statistic value for the current test - void AddStatisticValue(const char *type, const char *fmt, ...); - //! Add Excel-processing formulas. @arg formula can contain more than one instances of - //! ROUNDS template which transforms into the range of cells with result values - //TODO://! #1 .. #n templates represent data cells from the first to the last - //TODO: merge with Analisis - void SetStatisticFormula(const char *name, const char *formula); - //! Add information about run or compile parameters - void SetRunInfo(const char *title, const char *fmt, ...); - void SetRunInfo(const char *title, int num) { SetRunInfo(title, "%d", num); } - - //! Data output - void Print(int dataOutput, const char *ModeName = "Mode"); - -private: - Sorting SortMode; -}; - -//! using: Func(const char *fmt, ...) { vargf2buff(buff, 128, fmt);... -#define vargf2buff(name, size, fmt) \ - char name[size]; memset(name, 0, size); \ - va_list args; va_start(args, fmt); \ - vsnprintf(name, size-1, fmt, args); \ - va_end(args); - - -inline std::string Format(const char *fmt, ...) { - vargf2buff(buf, 1024, fmt); // from statistics.h - return std::string(buf); -} - -#ifdef STATISTICS_INLINE -#include "statistics.cpp" -#endif -#endif //__STATISTICS_H__ diff --git a/src/tbb/src/perf/statistics_xml.h b/src/tbb/src/perf/statistics_xml.h deleted file mode 100644 index b11b35a85..000000000 --- a/src/tbb/src/perf/statistics_xml.h +++ /dev/null @@ -1,200 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -const char XMLBRow[]= -" <Row>\n"; - -const char XMLERow[]= -" </Row>\n"; - -const char XMLHead[]= -"<?xml version=\"1.0\"?>\n" -"<?mso-application progid=\"Excel.Sheet\"?>\n\ -<Workbook xmlns=\"urn:schemas-microsoft-com:office:spreadsheet\"\n\ - xmlns:o=\"urn:schemas-microsoft-com:office:office\"\n\ - xmlns:x=\"urn:schemas-microsoft-com:office:excel\"\n\ - xmlns:ss=\"urn:schemas-microsoft-com:office:spreadsheet\"\n\ - xmlns:html=\"http://www.w3.org/TR/REC-html40\">\n\ - <DocumentProperties xmlns=\"urn:schemas-microsoft-com:office:office\">\n\ - <Author>%s</Author>\n\ - <Created>%s</Created>\n\ - <Company>Intel Corporation</Company>\n\ - </DocumentProperties>\n\ - <ExcelWorkbook xmlns=\"urn:schemas-microsoft-com:office:excel\">\n\ - <RefModeR1C1/>\n\ - </ExcelWorkbook>\n"; - - const char XMLStyles[]= - " <Styles>\n\ - <Style ss:ID=\"Default\" ss:Name=\"Normal\">\n\ - <Alignment ss:Vertical=\"Bottom\" ss:Horizontal=\"Left\" ss:WrapText=\"0\"/>\n\ - </Style>\n\ - <Style ss:ID=\"s26\">\n\ - <Alignment ss:Vertical=\"Top\" ss:Horizontal=\"Left\" ss:WrapText=\"0\"/>\n\ - <Borders>\n\ - <Border ss:Position=\"Bottom\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Left\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Right\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Top\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - </Borders>\n\ - <Interior ss:Color=\"#FFFF99\" ss:Pattern=\"Solid\"/>\n\ - </Style>\n\ - <Style ss:ID=\"s25\">\n\ - <Alignment ss:Vertical=\"Top\" ss:Horizontal=\"Left\" ss:WrapText=\"0\"/>\n\ - <Borders>\n\ - <Border ss:Position=\"Bottom\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Left\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Right\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Top\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - </Borders>\n\ - <Interior ss:Color=\"#CCFFFF\" ss:Pattern=\"Solid\"/>\n\ - </Style>\n\ - <Style ss:ID=\"s24\">\n\ - <Alignment ss:Vertical=\"Top\" ss:Horizontal=\"Left\" ss:WrapText=\"0\"/>\n\ - <Borders>\n\ - <Border ss:Position=\"Bottom\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Left\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Right\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Top\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - </Borders>\n\ - <Interior ss:Color=\"#CCFFCC\" ss:Pattern=\"Solid\"/>\n\ - </Style>\n\ - <Style ss:ID=\"s23\">\n\ - <Alignment ss:Vertical=\"Top\" ss:Horizontal=\"Left\" ss:WrapText=\"0\"/>\n\ - <Borders>\n\ - <Border ss:Position=\"Bottom\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Left\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Right\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Top\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - </Borders>\n\ - </Style>\n\ - </Styles>\n"; - -const char XMLBeginSheet[]= -" <Worksheet ss:Name=\"%s\">\n"; - -const char XMLNames[]= -" <Names>\n\ - <NamedRange ss:Name=\"_FilterDatabase\" ss:RefersTo=\"R%dC%d:R%dC%d\" ss:Hidden=\"1\"/>\n\ - </Names>\n"; - -const char XMLBeginTable[]= -" <Table ss:ExpandedColumnCount=\"%d\" ss:ExpandedRowCount=\"%d\" x:FullColumns=\"1\"\n\ - x:FullRows=\"1\">\n"; - -const char XMLColumsHorizontalTable[]= -" <Column ss:Index=\"1\" ss:Width=\"108.75\"/>\n\ - <Column ss:Index=\"%d\" ss:Width=\"77.25\" ss:Span=\"%d\"/>\n"; - -const char XMLColumsVerticalTable[]= -" <Column ss:Index=\"1\" ss:Width=\"77.25\" ss:Span=\"%d\"/>\n"; - -const char XMLNameAndTime[]= -" <Cell><Data ss:Type=\"String\">%s</Data></Cell>\n\ - <Cell><Data ss:Type=\"String\">%s</Data></Cell>\n\ - <Cell><Data ss:Type=\"String\">%s</Data></Cell>\n"; - -const char XMLTableParamAndTitle[]= -" <Cell><Data ss:Type=\"Number\">%d</Data></Cell>\n\ - <Cell><Data ss:Type=\"Number\">%d</Data></Cell>\n\ - <Cell><Data ss:Type=\"Number\">%d</Data></Cell>\n\ - <Cell><Data ss:Type=\"String\">%s</Data></Cell>\n"; - -//-------------- -const char XMLCellTopName[]= -" <Cell ss:StyleID=\"s25\"><Data ss:Type=\"String\">Name</Data></Cell>\n"; -const char XMLCellTopThread[]= -" <Cell ss:StyleID=\"s25\"><Data ss:Type=\"String\">Threads</Data></Cell>\n"; -const char XMLCellTopMode[]= -" <Cell ss:StyleID=\"s25\"><Data ss:Type=\"String\">%s</Data></Cell>\n"; -//--------------------- -const char XMLAnalysisTitle[]= -" <Cell ss:StyleID=\"s25\"><Data ss:Type=\"String\">%s</Data></Cell>\n"; - -const char XMLCellName[]= -" <Cell ss:StyleID=\"s24\"><Data ss:Type=\"String\">%s</Data></Cell>\n"; - -const char XMLCellThread[]= -" <Cell ss:StyleID=\"s24\"><Data ss:Type=\"Number\">%d</Data></Cell>\n"; - -const char XMLCellMode[]= -" <Cell ss:StyleID=\"s24\"><Data ss:Type=\"String\">%s</Data></Cell>\n"; - -const char XMLCellAnalysis[]= -" <Cell ss:StyleID=\"s26\"><Data ss:Type=\"String\">%s</Data></Cell>\n"; - -const char XMLCellFormula[]= -" <Cell ss:StyleID=\"s26\" ss:Formula=\"%s\"><Data ss:Type=\"Number\"></Data></Cell>\n"; - -const char XMLCellData[]= -" <Cell ss:StyleID=\"s23\"><Data ss:Type=\"Number\">%g</Data></Cell>\n"; - -const char XMLMergeRow[]= -" <Cell ss:StyleID=\"s23\" ss:MergeAcross=\"%d\" ><Data ss:Type=\"String\"></Data></Cell>\n"; - -const char XMLCellEmptyWhite[]= -" <Cell><Data ss:Type=\"String\">%s</Data></Cell>\n"; - -const char XMLCellEmptyTitle[]= -" <Cell ss:StyleID=\"s25\"><Data ss:Type=\"String\"></Data></Cell>\n"; - -const char XMLEndTable[]= -" </Table>\n"; - -const char XMLAutoFilter[]= -" <AutoFilter x:Range=\"R%dC%d:R%dC%d\" xmlns=\"urn:schemas-microsoft-com:office:excel\">\n\ - </AutoFilter>\n"; - -const char XMLEndWorkSheet[]= - " </Worksheet>\n"; - -const char XMLWorkSheetProperties[]= -" <WorksheetOptions xmlns=\"urn:schemas-microsoft-com:office:excel\">\n\ - <Unsynced/>\n\ - <Selected/>\n\ - <FreezePanes/>\n\ - <FrozenNoSplit/>\n\ - <SplitHorizontal>%d</SplitHorizontal>\n\ - <TopRowBottomPane>%d</TopRowBottomPane>\n\ - <SplitVertical>%d</SplitVertical>\n\ - <LeftColumnRightPane>%d</LeftColumnRightPane>\n\ - <ActivePane>0</ActivePane>\n\ - <Panes>\n\ - <Pane>\n\ - <Number>3</Number>\n\ - </Pane>\n\ - <Pane>\n\ - <Number>1</Number>\n\ - </Pane>\n\ - <Pane>\n\ - <Number>2</Number>\n\ - </Pane>\n\ - <Pane>\n\ - <Number>0</Number>\n\ - <ActiveRow>0</ActiveRow>\n\ - <ActiveCol>%d</ActiveCol>\n\ - </Pane>\n\ - </Panes>\n\ - <ProtectObjects>False</ProtectObjects>\n\ - <ProtectScenarios>False</ProtectScenarios>\n\ - </WorksheetOptions>\n"; - -const char XMLEndWorkbook[]= - "</Workbook>\n"; diff --git a/src/tbb/src/perf/time_fibonacci_cutoff.cpp b/src/tbb/src/perf/time_fibonacci_cutoff.cpp deleted file mode 100644 index 641fd89e5..000000000 --- a/src/tbb/src/perf/time_fibonacci_cutoff.cpp +++ /dev/null @@ -1,130 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include <cstdio> -#include <cstdlib> - -#include "tbb/task_scheduler_init.h" -#include "tbb/task.h" -#include "tbb/tick_count.h" -#include "fibonacci_impl_tbb.cpp" - -long CutOff = 1; - -long SerialFib( const long n ); - -long ParallelFib( const long n ); - -inline void dump_title() { - printf(" Mode, P, repeat, N, =fib value, cutoff, time, speedup\n"); -} - -inline void output(int P, long n, long c, int T, double serial_elapsed, double elapsed, long result) { - printf("%s,%4d,%7d,%3ld,%11ld,%7ld,%9.3g,%8.3g\n", ( (P == 0) ? " Serial" : "Parallel" ), - P, T, n, result, c, elapsed, serial_elapsed / elapsed); -} - -#define MOVE_BY_FOURTHS 1 -inline long calculate_new_cutoff(const long lo, const long hi) { -#if MOVE_BY_FOURTHS - return lo + (3 + hi - lo ) / 4; -#else - return (hi + lo)/2; -#endif -} - -void find_cutoff(const int P, const long n, const int T, const double serial_elapsed) { - long lo = 1, hi = n; - double elapsed = 0, lo_elapsed = 0, hi_elapsed = 0; - long final_cutoff = -1; - - tbb::task_scheduler_init init(P); - - while(true) { - CutOff = calculate_new_cutoff(lo, hi); - long result = 0; - tbb::tick_count t0; - for (int t = -1; t < T; ++t) { - if (t == 0) t0 = tbb::tick_count::now(); - result += ParallelFib(n); - } - elapsed = (tbb::tick_count::now() - t0).seconds(); - output(P,n,CutOff,T,serial_elapsed,elapsed,result); - - if (serial_elapsed / elapsed >= P/2.0) { - final_cutoff = CutOff; - if (hi == CutOff) { - if (hi == lo) { - // we have had this value at both above and below 50% - lo = 1; lo_elapsed = 0; - } else { - break; - } - } - hi = CutOff; - hi_elapsed = elapsed; - } else { - if (lo == CutOff) break; - lo = CutOff; - lo_elapsed = elapsed; - } - } - - double interpolated_cutoff = lo + ( P/2.0 - serial_elapsed/lo_elapsed ) * ( (hi - lo) / ( serial_elapsed/hi_elapsed - serial_elapsed/lo_elapsed )); - - if (final_cutoff != -1) { - printf("50%% efficiency cutoff is %ld ( linearly interpolated cutoff is %g )\n", final_cutoff, interpolated_cutoff); - } else { - printf("Cannot achieve 50%% efficiency\n"); - } - - return; -} - -int main(int argc, char *argv[]) { - if (argc < 4) { - printf("Usage: %s threads n repetitions\nWhere n make sense in range [25; 45]\n",argv[0]); - return 1; - } - - int P = atoi(argv[1]); - volatile long n = atol(argv[2]); - int T = atoi(argv[3]); - - // warmup parallel engine - ParallelFib(n); - - dump_title(); - - // collect serial time - long serial_result = 0; - tbb::tick_count t0; - for (int t = -1; t < T; ++t) { - if (t == 0) t0 = tbb::tick_count::now(); - serial_result += SerialFib(n); - } - double serial_elapsed = (tbb::tick_count::now() - t0).seconds(); - output(0,n,0,T,serial_elapsed,serial_elapsed,serial_result); - - // perform search - find_cutoff(P,n,T,serial_elapsed); - - return 0; -} diff --git a/src/tbb/src/perf/time_framework.h b/src/tbb/src/perf/time_framework.h deleted file mode 100644 index f78a50395..000000000 --- a/src/tbb/src/perf/time_framework.h +++ /dev/null @@ -1,351 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TIME_FRAMEWORK_H__ -#define __TIME_FRAMEWORK_H__ - -#include <cstdlib> -#include <math.h> -#include <vector> -#include <string> -#include <sstream> -#include "tbb/tbb_stddef.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/tick_count.h" -#define HARNESS_CUSTOM_MAIN 1 -#include "../test/harness.h" -#include "../test/harness_barrier.h" -#define STATISTICS_INLINE -#include "statistics.h" - -#ifndef ARG_TYPE -typedef intptr_t arg_t; -#else -typedef ARG_TYPE arg_t; -#endif - -class Timer { - tbb::tick_count tick; -public: - Timer() { tick = tbb::tick_count::now(); } - double get_time() { return (tbb::tick_count::now() - tick).seconds(); } - double diff_time(const Timer &newer) { return (newer.tick - tick).seconds(); } - double mark_time() { tbb::tick_count t1(tbb::tick_count::now()), t2(tick); tick = t1; return (t1 - t2).seconds(); } - double mark_time(const Timer &newer) { tbb::tick_count t(tick); tick = newer.tick; return (tick - t).seconds(); } -}; - -class TesterBase /*: public tbb::internal::no_copy*/ { -protected: - friend class TestProcessor; - friend class TestRunner; - - //! it is barrier for synchronizing between threads - Harness::SpinBarrier *barrier; - - //! number of tests per this tester - const int tests_count; - - //! number of threads to operate - int threads_count; - - //! some value for tester - arg_t value; - - //! tester name - const char *tester_name; - - // avoid false sharing - char pad[128 - sizeof(arg_t) - sizeof(int)*2 - sizeof(void*)*2 ]; - -public: - //! init tester base. @arg ntests is number of embeded tests in this tester. - TesterBase(int ntests) - : barrier(NULL), tests_count(ntests) - {} - virtual ~TesterBase() {} - - //! internal function - void base_init(arg_t v, int t, Harness::SpinBarrier &b) { - threads_count = t; - barrier = &b; - value = v; - init(); - } - - //! optionally override to init after value and threads count were set. - virtual void init() { } - - //! Override to provide your names - virtual std::string get_name(int testn) { - return Format("test %d", testn); - } - - //! optionally override to init test mode just before execution for a given thread number. - virtual void test_prefix(int testn, int threadn) { } - - //! Override to provide main test's entry function returns a value to record - virtual value_t test(int testn, int threadn) = 0; - - //! Type of aggregation from results of threads - enum result_t { - SUM, AVG, MIN, MAX - }; - - //! Override to change result type for the test. Return postfix for test name or 0 if result type is not needed. - virtual const char *get_result_type(int /*testn*/, result_t type) const { - return type == AVG ? "" : 0; // only average result by default - } -}; - -/***** -a user's tester concept: - -class tester: public TesterBase { -public: - //! init tester with known amount of work - tester() : TesterBase(<user-specified tests count>) { ... } - - //! run a test with sequental number @arg test_number for @arg thread. - / *override* / value_t test(int test_number, int thread); -}; - -******/ - -template<typename Tester, int scale = 1> -class TimeTest : public Tester { - /*override*/ value_t test(int testn, int threadn) { - Timer timer; - Tester::test(testn, threadn); - return timer.get_time() * double(scale); - } -}; - -template<typename Tester> -class NanosecPerValue : public Tester { - /*override*/ value_t test(int testn, int threadn) { - Timer timer; - Tester::test(testn, threadn); - // return time (ns) per value - return timer.get_time()*1000000.0/double(Tester::value); - } -}; - -template<typename Tester, int scale = 1> -class ValuePerSecond : public Tester { - /*override*/ value_t test(int testn, int threadn) { - Timer timer; - Tester::test(testn, threadn); - // return value per seconds/scale - return double(Tester::value)/(timer.get_time()*scale); - } -}; - -template<typename Tester, int scale = 1> -class NumberPerSecond : public Tester { - /*override*/ value_t test(int testn, int threadn) { - Timer timer; - Tester::test(testn, threadn); - // return a scale per seconds - return double(scale)/timer.get_time(); - } -}; - -// operate with single tester -class TestRunner { - friend class TestProcessor; - friend struct RunArgsBody; - TestRunner(const TestRunner &); // don't copy - - const char *tester_name; - StatisticsCollector *stat; - std::vector<std::vector<StatisticsCollector::TestCase> > keys; - -public: - TesterBase &tester; - - template<typename Test> - TestRunner(const char *name, Test *test) - : tester_name(name), tester(*static_cast<TesterBase*>(test)) - { - test->tester_name = name; - } - - ~TestRunner() { delete &tester; } - - void init(arg_t value, int threads, Harness::SpinBarrier &barrier, StatisticsCollector *s) { - tester.base_init(value, threads, barrier); - stat = s; - keys.resize(tester.tests_count); - for(int testn = 0; testn < tester.tests_count; testn++) { - keys[testn].resize(threads); - std::string test_name(tester.get_name(testn)); - for(int threadn = 0; threadn < threads; threadn++) - keys[testn][threadn] = stat->SetTestCase(tester_name, test_name.c_str(), threadn); - } - } - - void run_test(int threadn) { - for(int testn = 0; testn < tester.tests_count; testn++) { - tester.test_prefix(testn, threadn); - tester.barrier->wait(); // <<<<<<<<<<<<<<<<< Barrier before running test mode - value_t result = tester.test(testn, threadn); - stat->AddRoundResult(keys[testn][threadn], result); - } - } - - void post_process(StatisticsCollector &report) { - const int threads = tester.threads_count; - for(int testn = 0; testn < tester.tests_count; testn++) { - size_t coln = keys[testn][0].getResults().size()-1; - value_t rsum = keys[testn][0].getResults()[coln]; - value_t rmin = rsum, rmax = rsum; - for(int threadn = 1; threadn < threads; threadn++) { - value_t result = keys[testn][threadn].getResults()[coln]; - rsum += result; // for both SUM or AVG - if(rmin > result) rmin = result; - if(rmax < result) rmax = result; - } - std::string test_name(tester.get_name(testn)); - const char *rname = tester.get_result_type(testn, TesterBase::SUM); - if( rname ) { - report.SetTestCase(tester_name, (test_name+rname).c_str(), threads); - report.AddRoundResult(rsum); - } - rname = tester.get_result_type(testn, TesterBase::MIN); - if( rname ) { - report.SetTestCase(tester_name, (test_name+rname).c_str(), threads); - report.AddRoundResult(rmin); - } - rname = tester.get_result_type(testn, TesterBase::AVG); - if( rname ) { - report.SetTestCase(tester_name, (test_name+rname).c_str(), threads); - report.AddRoundResult(rsum / threads); - } - rname = tester.get_result_type(testn, TesterBase::MAX); - if( rname ) { - report.SetTestCase(tester_name, (test_name+rname).c_str(), threads); - report.AddRoundResult(rmax); - } - } - } -}; - -struct RunArgsBody { - const vector<TestRunner*> &run_list; - RunArgsBody(const vector<TestRunner*> &a) : run_list(a) { } -#ifndef __TBB_parallel_for_H - void operator()(int thread) const { -#else - void operator()(const tbb::blocked_range<int> &r) const { - ASSERT( r.begin() + 1 == r.end(), 0); - int thread = r.begin(); -#endif - for(size_t i = 0; i < run_list.size(); i++) - run_list[i]->run_test(thread); - } -}; - -//! Main test processor. -/** Override or use like this: - class MyTestCollection : public TestProcessor { - void factory(arg_t value, int threads) { - process( value, threads, - run("my1", new tester<my1>() ), - run("my2", new tester<my2>() ), - end ); - if(value == threads) - stat->Print(); - } -}; -*/ - -class TestProcessor { - friend class TesterBase; - - // <threads, collector> - typedef std::map<int, StatisticsCollector *> statistics_collection; - statistics_collection stat_by_threads; - -protected: - // Members - const char *collection_name; - // current stat - StatisticsCollector *stat; - // token - size_t end; - -public: - StatisticsCollector report; - - // token of tests list - template<typename Test> - TestRunner *run(const char *name, Test *test) { - return new TestRunner(name, test); - } - - // iteration processing - void process(arg_t value, int threads, ...) { - // prepare items - stat = stat_by_threads[threads]; - if(!stat) { - stat_by_threads[threads] = stat = new StatisticsCollector((collection_name + Format("@%d", threads)).c_str(), StatisticsCollector::ByAlg); - stat->SetTitle("Detailed log of %s running with %d threads.", collection_name, threads); - } - Harness::SpinBarrier barrier(threads); - // init args - va_list args; va_start(args, threads); - vector<TestRunner*> run_list; run_list.reserve(16); - while(true) { - TestRunner *item = va_arg(args, TestRunner*); - if( !item ) break; - item->init(value, threads, barrier, stat); - run_list.push_back(item); - } - va_end(args); - std::ostringstream buf; - buf << value; - const size_t round_number = stat->GetRoundsCount(); - stat->SetRoundTitle(round_number, buf.str().c_str()); - report.SetRoundTitle(round_number, buf.str().c_str()); - // run them -#ifndef __TBB_parallel_for_H - NativeParallelFor(threads, RunArgsBody(run_list)); -#else - tbb::parallel_for(tbb::blocked_range<int>(0,threads,1), RunArgsBody(run_list)); -#endif - // destroy args - for(size_t i = 0; i < run_list.size(); i++) { - run_list[i]->post_process(report); - delete run_list[i]; - } - } - -public: - TestProcessor(const char *name, StatisticsCollector::Sorting sort_by = StatisticsCollector::ByAlg) - : collection_name(name), stat(NULL), end(0), report(collection_name, sort_by) - { } - - ~TestProcessor() { - for(statistics_collection::iterator i = stat_by_threads.begin(); i != stat_by_threads.end(); i++) - delete i->second; - } -}; - -#endif// __TIME_FRAMEWORK_H__ diff --git a/src/tbb/src/perf/time_hash_map.cpp b/src/tbb/src/perf/time_hash_map.cpp deleted file mode 100644 index 341349b88..000000000 --- a/src/tbb/src/perf/time_hash_map.cpp +++ /dev/null @@ -1,261 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// configuration: -#define TBB_USE_THREADING_TOOLS 0 - -//! enable/disable std::map tests -#define STDTABLE 0 - -//! enable/disable old implementation tests (correct include file also) -#define OLDTABLE 0 -#define OLDTABLEHEADER "tbb/concurrent_hash_map-5468.h"//-4329 - -//! enable/disable experimental implementation tests (correct include file also) -#define TESTTABLE 0 -#define TESTTABLEHEADER "tbb/concurrent_unordered_map.h" - -//! avoid erase() -#define TEST_ERASE 1 - -////////////////////////////////////////////////////////////////////////////////// - -#include <cstdlib> -#include <math.h> -#include "tbb/tbb_stddef.h" -#include <vector> -#include <map> -// needed by hash_maps -#include <stdexcept> -#include <iterator> -#include <algorithm> // std::swap -#include <utility> // Need std::pair from here -#include "tbb/cache_aligned_allocator.h" -#include "tbb/tbb_allocator.h" -#include "tbb/spin_rw_mutex.h" -#include "tbb/aligned_space.h" -#include "tbb/atomic.h" -#define __TBB_concurrent_unordered_set_H -#include "tbb/internal/_concurrent_unordered_impl.h" -#undef __TBB_concurrent_unordered_set_H -// for test -#include "tbb/spin_mutex.h" -#include "time_framework.h" - - -using namespace tbb; -using namespace tbb::internal; - -struct IntHashCompare { - size_t operator() ( int x ) const { return x; } - bool operator() ( int x, int y ) const { return x==y; } - static long hash( int x ) { return x; } - bool equal( int x, int y ) const { return x==y; } -}; - -namespace version_current { - namespace tbb { using namespace ::tbb; namespace internal { using namespace ::tbb::internal; } } - namespace tbb { namespace interface5 { using namespace ::tbb::interface5; namespace internal { using namespace ::tbb::interface5::internal; } } } - #include "tbb/concurrent_hash_map.h" -} -typedef version_current::tbb::concurrent_hash_map<int,int> IntTable; - -#if OLDTABLE -#undef __TBB_concurrent_hash_map_H -namespace version_base { - namespace tbb { using namespace ::tbb; namespace internal { using namespace ::tbb::internal; } } - namespace tbb { namespace interface5 { using namespace ::tbb::interface5; namespace internal { using namespace ::tbb::interface5::internal; } } } - #include OLDTABLEHEADER -} -typedef version_base::tbb::concurrent_hash_map<int,int> OldTable; -#endif - -#if TESTTABLE -#undef __TBB_concurrent_hash_map_H -namespace version_new { - namespace tbb { using namespace ::tbb; namespace internal { using namespace ::tbb::internal; } } - namespace tbb { namespace interface5 { using namespace ::tbb::interface5; namespace internal { using namespace ::tbb::interface5::internal; } } } - #include TESTTABLEHEADER -} -typedef version_new::tbb::concurrent_unordered_map<int,int> TestTable; -#define TESTTABLE 1 -#endif - -/////////////////////////////////////// - -static const char *map_testnames[] = { - "1.insert", "2.count1st", "3.count2nd", "4.insert-exists", "5.erase " -}; - -template<typename TableType> -struct TestTBBMap : TesterBase { - TableType Table; - int n_items; - - TestTBBMap() : TesterBase(4+TEST_ERASE), Table(MaxThread*4) {} - void init() { n_items = value/threads_count; } - - std::string get_name(int testn) { - return std::string(map_testnames[testn]); - } - - double test(int test, int t) - { - switch(test) { - case 0: // fill - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - Table.insert( std::make_pair(i,i) ); - } - break; - case 1: // work1 - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - size_t c = Table.count( i ); - ASSERT( c == 1, NULL); - } - break; - case 2: // work2 - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - Table.count( i ); - } - break; - case 3: // work3 - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - Table.insert( std::make_pair(i,i) ); - } - break; -#if TEST_ERASE - case 4: // clean - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - ASSERT( Table.erase( i ), NULL); - } -#endif - } - return 0; - } -}; - -template<typename M> -struct TestSTLMap : TesterBase { - std::map<int, int> Table; - M mutex; - - int n_items; - TestSTLMap() : TesterBase(4+TEST_ERASE) {} - void init() { n_items = value/threads_count; } - - std::string get_name(int testn) { - return std::string(map_testnames[testn]); - } - - double test(int test, int t) - { - switch(test) { - case 0: // fill - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - typename M::scoped_lock with(mutex); - Table[i] = 0; - } - break; - case 1: // work1 - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - typename M::scoped_lock with(mutex); - size_t c = Table.count(i); - ASSERT( c == 1, NULL); - } - break; - case 2: // work2 - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - typename M::scoped_lock with(mutex); - Table.count(i); - } - break; - case 3: // work3 - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - typename M::scoped_lock with(mutex); - Table.insert(std::make_pair(i,i)); - } - break; - case 4: // clean - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - typename M::scoped_lock with(mutex); - Table.erase(i); - } - } - return 0; - } -}; - -class fake_mutex { -public: - class scoped_lock { - fake_mutex *p; - - public: - scoped_lock() {} - scoped_lock( fake_mutex &m ) { p = &m; } - ~scoped_lock() { } - void acquire( fake_mutex &m ) { p = &m; } - void release() { } - }; -}; - -class test_hash_map : public TestProcessor { -public: - test_hash_map() : TestProcessor("time_hash_map") {} - void factory(int value, int threads) { - if(Verbose) printf("Processing with %d threads: %d...\n", threads, value); - process( value, threads, -#if STDTABLE - run("std::map ", new NanosecPerValue<TestSTLMap<spin_mutex> >() ), -#endif -#if OLDTABLE - run("old::hmap", new NanosecPerValue<TestTBBMap<OldTable> >() ), -#endif - run("tbb::hmap", new NanosecPerValue<TestTBBMap<IntTable> >() ), -#if TESTTABLE - run("new::hmap", new NanosecPerValue<TestTBBMap<TestTable> >() ), -#endif - end ); - //stat->Print(StatisticsCollector::Stdout); - //if(value >= 2097152) stat->Print(StatisticsCollector::HTMLFile); - } -}; - -///////////////////////////////////////////////////////////////////////////////////////// - -int main(int argc, char* argv[]) { - if(argc>1) Verbose = true; - //if(argc>2) ExtraVerbose = true; - MinThread = 1; MaxThread = task_scheduler_init::default_num_threads(); - ParseCommandLine( argc, argv ); - - ASSERT(tbb_allocator<int>::allocator_type() == tbb_allocator<int>::scalable, "expecting scalable allocator library to be loaded. Please build it by:\n\t\tmake tbbmalloc"); - - { - test_hash_map the_test; - for( int t=MinThread; t <= MaxThread; t++) - for( int o=/*2048*/(1<<8)*8; o<2200000; o*=2 ) - the_test.factory(o, t); - the_test.report.SetTitle("Nanoseconds per operation of (Mode) for N items in container (Name)"); - the_test.report.SetStatisticFormula("1AVG per size", "=AVERAGE(ROUNDS)"); - the_test.report.Print(StatisticsCollector::HTMLFile|StatisticsCollector::ExcelXML); - } - return 0; -} diff --git a/src/tbb/src/perf/time_hash_map_fill.cpp b/src/tbb/src/perf/time_hash_map_fill.cpp deleted file mode 100644 index b63502940..000000000 --- a/src/tbb/src/perf/time_hash_map_fill.cpp +++ /dev/null @@ -1,165 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// configuration: - -// Size of final table (must be multiple of STEP_*) -int MAX_TABLE_SIZE = 2000000; - -// Specify list of unique percents (5-30,100) to test against. Max 10 values -#define UNIQUE_PERCENTS PERCENT(5); PERCENT(10); PERCENT(20); PERCENT(30); PERCENT(100) - -// enable/disable tests for: -#define BOX1 "CHMap" -#define BOX1TEST ValuePerSecond<Uniques<tbb::concurrent_hash_map<int,int> >, 1000000/*ns*/> -#define BOX1HEADER "tbb/concurrent_hash_map.h" - -// enable/disable tests for: -#define BOX2 "CUMap" -#define BOX2TEST ValuePerSecond<Uniques<tbb::concurrent_unordered_map<int,int> >, 1000000/*ns*/> -#define BOX2HEADER "tbb/concurrent_unordered_map.h" - -// enable/disable tests for: -//#define BOX3 "OLD" -#define BOX3TEST ValuePerSecond<Uniques<tbb::concurrent_hash_map<int,int> >, 1000000/*ns*/> -#define BOX3HEADER "tbb/concurrent_hash_map-5468.h" - -#define TBB_USE_THREADING_TOOLS 0 -////////////////////////////////////////////////////////////////////////////////// - -#include <cstdlib> -#include <math.h> -#include "tbb/tbb_stddef.h" -#include <vector> -#include <map> -// needed by hash_maps -#include <stdexcept> -#include <iterator> -#include <algorithm> // std::swap -#include <utility> // Need std::pair -#include <cstring> // Need std::memset -#include <typeinfo> -#include "tbb/cache_aligned_allocator.h" -#include "tbb/tbb_allocator.h" -#include "tbb/spin_rw_mutex.h" -#include "tbb/aligned_space.h" -#include "tbb/atomic.h" -#define __TBB_concurrent_unordered_set_H -#include "tbb/internal/_concurrent_unordered_impl.h" -#undef __TBB_concurrent_unordered_set_H -// for test -#include "tbb/spin_mutex.h" -#include "time_framework.h" - - -using namespace tbb; -using namespace tbb::internal; - -///////////////////////////////////////////////////////////////////////////////////////// -// Input data built for test -int *Data; - -// Main test class used to run the timing tests. All overridden methods are called by the framework -template<typename TableType> -struct Uniques : TesterBase { - TableType Table; - int n_items; - - // Initializes base class with number of test modes - Uniques() : TesterBase(2), Table(MaxThread*16) { - //Table->max_load_factor(1); // add stub into hash_map to uncomment it - } - ~Uniques() {} - - // Returns name of test mode specified by number - /*override*/ std::string get_name(int testn) { - if(testn == 1) return "find"; - return "insert"; - } - - // Informs the class that value and threads number become known - /*override*/ void init() { - n_items = value/threads_count; // operations - } - - // Informs the class that the test mode for specified thread is about to start - /*override*/ void test_prefix(int testn, int t) { - barrier->wait(); - if(Verbose && !t && testn) printf("%s: inserted %u, %g%% of operations\n", tester_name, unsigned(Table.size()), 100.0*Table.size()/(value*testn)); - } - - // Executes test mode for a given thread. Return value is ignored when used with timing wrappers. - /*override*/ double test(int testn, int t) - { - if( testn != 1 ) { // do insertions - for(int i = testn*value+t*n_items, e = testn*value+(t+1)*n_items; i < e; i++) { - Table.insert( std::make_pair(Data[i],t) ); - } - } else { // do last finds - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - size_t c = - Table.count( Data[i] ); - ASSERT( c == 1, NULL ); // must exist - } - } - return 0; - } -}; - -///////////////////////////////////////////////////////////////////////////////////////// -#undef max -#include <limits> - -// Using BOX declarations from configuration -#include "time_sandbox.h" - -int rounds = 0; -// Prepares the input data for given unique percent -void execute_percent(test_sandbox &the_test, int p) { - int input_size = MAX_TABLE_SIZE*100/p; - Data = new int[input_size]; - int uniques = p==100?std::numeric_limits<int>::max() : MAX_TABLE_SIZE; - ASSERT(p==100 || p <= 30, "Function is broken for %% > 30 except for 100%%"); - for(int i = 0; i < input_size; i++) - Data[i] = rand()%uniques; - for(int t = MinThread; t <= MaxThread; t++) - the_test.factory(input_size, t); // executes the tests specified in BOX-es for given 'value' and threads - the_test.report.SetRoundTitle(rounds++, "%d%%", p); -} -#define PERCENT(x) execute_percent(the_test, x) - -int main(int argc, char* argv[]) { - if(argc>1) Verbose = true; - //if(argc>2) ExtraVerbose = true; - MinThread = 1; MaxThread = task_scheduler_init::default_num_threads(); - ParseCommandLine( argc, argv ); - if(getenv("TABLE_SIZE")) - MAX_TABLE_SIZE = atoi(getenv("TABLE_SIZE")); - - ASSERT(tbb_allocator<int>::allocator_type() == tbb_allocator<int>::scalable, "expecting scalable allocator library to be loaded. Please build it by:\n\t\tmake tbbmalloc"); - // Declares test processor - test_sandbox the_test("time_hash_map_fill"/*, StatisticsCollector::ByThreads*/); - srand(10101); - UNIQUE_PERCENTS; // test the percents - the_test.report.SetTitle("Operations per nanosecond"); - the_test.report.SetRunInfo("Items", MAX_TABLE_SIZE); - the_test.report.Print(StatisticsCollector::HTMLFile|StatisticsCollector::ExcelXML); // Write files - return 0; -} diff --git a/src/tbb/src/perf/time_hash_map_fill.html b/src/tbb/src/perf/time_hash_map_fill.html deleted file mode 100644 index 954a20be5..000000000 --- a/src/tbb/src/perf/time_hash_map_fill.html +++ /dev/null @@ -1,120 +0,0 @@ -<HTML><BODY> -<H2>time_hash_map_fill</H2> -<P><a href=time_hash_map_fill.cpp>time_hash_map_fill.cpp</a> is a micro-benchmark specifically designed to highlight aspects of concurrent resizing algorithm of the hash tables. -It was derived from the Count Strings example that counts the number of unique words. But to exclude synchronization on the counters from the picture, -it was simplified to build just a set of unique numbers from an input array. The array is filled evenly by using a pseudo-random number generator from the standard C library for various proportions of unique numbers. -For example, for 5% of unique numbers, the same number is repeated 20 times on average. Together, it gives 5% of actual insertions and 95% are just lookups. However, in the beginning, there are more new keys occur than in the end. -In addition, a size of the source array correlates with input rates in order to produce the same number of unique keys at the end, and so exclude cache effects from the equation. -<H2>Diagram</H2><img src="time_hash_map_fill.gif"> -<H3>Prepare results</H3> -<P>This benchmark outputs results in Excel* and html file formats by default. To generate text (CSV) file instead, specify STAT_FORMAT=pivot-csv evironment variable. To change the default table size, set TABLE_SIZE. -<code><b><pre>src$ make time_hash_map_fill args=-v STAT_FORMAT=pivot-csv TABLE_SIZE=250000</pre></b></code>Or to get statistics from different runs: -<code><b><pre>src$ make time_hash_map_fill TABLE_SIZE=50000 run_cmd="bash ../../src/perf/<a href=run_statistics.sh>run_statistics.sh</a>"</pre></b></code> -<H3>Build diagram</H3>You can use <a href="http://ploticus.sourceforge.net/">Ploticus</a> to build diagram from the prepared data using this html file as a script. But first, the input data file should be sorted to join lines from different runs together, e.g.: -<code><b><pre>src$ sort -t , -k 1dr,2 -k 3n,4 -k 7n,7 ../build/<i>{scrambled_path}</i>/time_hash_map_fill.csv -o perf/time_hash_map_fill.csv</pre></b></code>Here, field 7 is "Column" field that contains input rates because run_statistics.sh adds hostname and number of the run as 5 and 6 fields. Now, to build gif diagram, run: -<code><b><pre>perf$ pl -maxrows 200000 -maxfields 1500000 -maxvector 1200000 -gif -scale 1.8 time_hash_map_fill.html</pre></b></code> -<H3>Script body</H3> -<hr><pre> - -#setifnotgiven NAMES = $makelist("1.CHMap 2.CUMap 3.OLD") -#setifnotgiven LABLESIZE = 0.06 - -#proc settings - encodenames: yes - units: cm - -#proc getdata - file: time_hash_map_fill.csv - fieldnameheader: yes - delim: comma - showdata: no - select: @@Mode = insert - pf_fieldnames: Name Mode Threads Value - filter: - ##print @@Name,"@@Items on @@Column",@@3,@@Value - -#endproc - -#proc page - pagesize: 70 50 - tightcrop: yes -#endproc - -#proc processdata - action: summary - fields: Name Mode Threads - valfield: Value - fieldnames: Name Mode Threads Average sd sem n_obs Min Max - showdata: no - -#proc categories - axis: x - datafield: Mode - -#proc areadef - title: Throughput on Insert operation - titledetails: size=14 align=C - areaname: slide - xscaletype: categories - xautorange: datafield=Mode - xaxis.stubs: usecategories - xaxis.label: Threads across table sizes and % of input rates -// yrange: 0 70 - yautorange: datafield=Max,Min - yaxis.stubs: inc - yaxis.label: ops/ns -// yaxis.stubformat: %3.1f - autowidth: 1.1 - autoheight: 0.07 - frame: yes - -#for LABEL in @NAMES -#set NLABEL = $arithl(@NLABEL+1) -#set COLOR = $icolor( @NLABEL ) -#proc legendentry - label: @LABEL - sampletype: color - details: @COLOR - -#procdef catlines - select: @Name = @LABEL - catfield: Mode - subcatfield: Threads - subcats: auto - plotwidth: 0.8 - #saveas C - -#proc catlines - #clone C - dpsymbol: shape=square radius=@LABLESIZE style=solid color=@COLOR - valfield: Average - errfield: sd - -#proc catlines - #clone C - valfield: Max - dpsymbol: shape=triangle radius=@LABLESIZE style=solid color=@COLOR - -#proc catlines - #clone C - valfield: Min - dpsymbol: shape=downtriangle radius=@LABLESIZE style=solid color=@COLOR - -#endloop - -#proc legend - location: 3.2 max - seglen: 0.2 -#endproc -</pre> -<HR> -<A HREF="../index.html">Up to parent directory</A> -<p></p> -Copyright © 2005-2014 Intel Corporation. All Rights Reserved. -<P></P> -Intel is a registered trademark or trademark of Intel Corporation -or its subsidiaries in the United States and other countries. -<p></p> -* Other names and brands may be claimed as the property of others. -</BODY> -</HTML> diff --git a/src/tbb/src/perf/time_locked_work.cpp b/src/tbb/src/perf/time_locked_work.cpp deleted file mode 100644 index 4fa9f0bfb..000000000 --- a/src/tbb/src/perf/time_locked_work.cpp +++ /dev/null @@ -1,166 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -////// Test configuration //////////////////////////////////////////////////// -#define SECONDS_RATIO 1000000 // microseconds - -#ifndef REPEAT_K -#define REPEAT_K 50 // repeat coefficient -#endif - -int outer_work[] = {/*256,*/ 64, 16, 4, 0}; -int inner_work[] = {32, 8, 0 }; - -// keep it to calibrate the time of work without synchronization -#define BOX1 "baseline" -#define BOX1TEST TimeTest< TBB_Mutex<tbb::null_mutex>, SECONDS_RATIO > - -// enable/disable tests for: -#define BOX2 "spin_mutex" -#define BOX2TEST TimeTest< TBB_Mutex<tbb::spin_mutex>, SECONDS_RATIO > - -// enable/disable tests for: -#define BOX3 "spin_rw_mutex" -#define BOX3TEST TimeTest< TBB_Mutex<tbb::spin_rw_mutex>, SECONDS_RATIO > - -// enable/disable tests for: -#define BOX4 "queuing_mutex" -#define BOX4TEST TimeTest< TBB_Mutex<tbb::queuing_mutex>, SECONDS_RATIO > - -// enable/disable tests for: -//#define BOX5 "queuing_rw_mutex" -#define BOX5TEST TimeTest< TBB_Mutex<tbb::queuing_rw_mutex>, SECONDS_RATIO > - -////////////////////////////////////////////////////////////////////////////// - -#include <cstdlib> -#include <math.h> -#include <algorithm> // std::swap -#include <utility> // Need std::pair from here -#include <sstream> -#include "tbb/tbb_stddef.h" -#include "tbb/null_mutex.h" -#include "tbb/spin_rw_mutex.h" -#include "tbb/spin_mutex.h" -#include "tbb/queuing_mutex.h" -#include "tbb/queuing_rw_mutex.h" -#include "tbb/mutex.h" - -#if INTEL_TRIAL==2 -#include "tbb/parallel_for.h" // enable threading by TBB scheduler -#include "tbb/task_scheduler_init.h" -#include "tbb/blocked_range.h" -#endif -// for test -#include "time_framework.h" - -using namespace tbb; -using namespace tbb::internal; - -///////////////////////////////////////////////////////////////////////////////////////// - -//! base class for tests family -struct TestLocks : TesterBase { - // Inherits "value", "threads_count", and other variables - TestLocks() : TesterBase(/*number of modes*/sizeof(outer_work)/sizeof(int)) {} - //! returns name of test part/mode - /*override*/std::string get_name(int testn) { - std::ostringstream buf; - buf.width(4); buf.fill('0'); - buf << outer_work[testn]; // mode number - return buf.str(); - } - //! enables results types and returns theirs suffixes - /*override*/const char *get_result_type(int, result_t type) const { - switch(type) { - case MIN: return " min"; - case MAX: return " max"; - default: return 0; - } - } - //! repeats count - int repeat_until(int /*test_n*/) const { - return REPEAT_K*100;//TODO: suggest better? - } - //! fake work - void do_work(int work) volatile { - for(int i = 0; i < work; i++) { - volatile int x = i; - __TBB_Pause(0); // just to call inline assembler - x *= work/threads_count; - } - } -}; - -//! template test unit for any of TBB mutexes -template<typename M> -struct TBB_Mutex : TestLocks { - M mutex; - - double test(int testn, int /*threadn*/) - { - for(int r = 0; r < repeat_until(testn); ++r) { - do_work(outer_work[testn]); - { - typename M::scoped_lock with(mutex); - do_work(/*inner work*/value); - } - } - return 0; - } -}; - -///////////////////////////////////////////////////////////////////////////////////////// - -//Using BOX declarations -#include "time_sandbox.h" - -// run tests for each of inner work value -void RunLoops(test_sandbox &the_test, int thread) { - for( unsigned i=0; i<sizeof(inner_work)/sizeof(int); ++i ) - the_test.factory(inner_work[i], thread); -} - -int main(int argc, char* argv[]) { - if(argc>1) Verbose = true; - int DefThread = task_scheduler_init::default_num_threads(); - MinThread = 1; MaxThread = DefThread+1; - ParseCommandLine( argc, argv ); - ASSERT(MinThread <= MaxThread, 0); -#if INTEL_TRIAL && defined(__TBB_parallel_for_H) - task_scheduler_init me(MaxThread); -#endif - { - test_sandbox the_test("time_locked_work", StatisticsCollector::ByThreads); - //TODO: refactor this out as RunThreads(test&) - for( int t = MinThread; t < DefThread && t <= MaxThread; t *= 2) - RunLoops( the_test, t ); // execute undersubscribed threads - if( DefThread > MinThread && DefThread <= MaxThread ) - RunLoops( the_test, DefThread ); // execute on all hw threads - if( DefThread < MaxThread) - RunLoops( the_test, MaxThread ); // execute requested oversubscribed threads - - the_test.report.SetTitle("Time of lock/unlock for mutex Name with Outer and Inner work"); - //the_test.report.SetStatisticFormula("1AVG per size", "=AVERAGE(ROUNDS)"); - the_test.report.Print(StatisticsCollector::HTMLFile|StatisticsCollector::ExcelXML, /*ModeName*/ "Outer work"); - } - return 0; -} - diff --git a/src/tbb/src/perf/time_lru_cache_throughput.cpp b/src/tbb/src/perf/time_lru_cache_throughput.cpp deleted file mode 100644 index b12fbb125..000000000 --- a/src/tbb/src/perf/time_lru_cache_throughput.cpp +++ /dev/null @@ -1,216 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "../examples/common/utility/utility.h" -#include "tbb/tick_count.h" -//#include <tbb/parallel_for.h> -#include "tbb/task_scheduler_init.h" //for number of threads -#include <functional> - -#include "coarse_grained_raii_lru_cache.h" -#define TBB_PREVIEW_CONCURRENT_LRU_CACHE 1 -#include "tbb/concurrent_lru_cache.h" - -#define HARNESS_CUSTOM_MAIN 1 -#define HARNESS_NO_PARSE_COMMAND_LINE 1 - -#include "../src/test/harness.h" -#include "../src/test/harness_barrier.h" - -#include <vector> -#include <algorithm> -#include "tbb/mutex.h" - -//TODO: probably move this to separate header utlity file -namespace micro_benchmarking{ -namespace utils{ - template <typename type> - void disable_elimination(type const& v){ - volatile type dummy = v; - (void) dummy; - } - //Busy work and calibration helpers - unsigned int one_us_iters = 345; // default value - - //TODO: add a CLI parameter for calibration run - // if user wants to calibrate to microseconds on particular machine, call - // this at beginning of program; sets one_us_iters to number of iters to - // busy_wait for approx. 1 us - void calibrate_busy_wait() { - tbb::tick_count t0 = tbb::tick_count::now(); - for (volatile unsigned int i=0; i<1000000; ++i) continue; - tbb::tick_count t1 = tbb::tick_count::now(); - - one_us_iters = (unsigned int)((1000000.0/(t1-t0).seconds())*0.000001); - } - - void busy_wait(int us) - { - unsigned int iter = us*one_us_iters; - for (volatile unsigned int i=0; i<iter; ++i) continue; - } -} -} - -struct parameter_pack{ - size_t time_window_sec; - size_t time_check_granularity_ops; - size_t cache_lru_history_size; - size_t time_of_item_use_usec; - size_t cache_miss_percent; - int threads_number; - size_t weight_of_initiation_call_usec; - bool use_serial_initiation_function; - parameter_pack( - size_t a_time_window_sec - ,size_t a_time_check_granularity_ops - ,size_t a_cache_lru_history_size - ,size_t a_time_of_item_use_usec, size_t a_cache_miss_percent - , int a_threads_number ,size_t a_weight_of_initiation_call_usec - , bool a_use_serial_initiation_function - ) : - time_window_sec(a_time_window_sec) - ,time_check_granularity_ops(a_time_check_granularity_ops) - ,cache_lru_history_size(a_cache_lru_history_size) - ,time_of_item_use_usec(a_time_of_item_use_usec) - ,cache_miss_percent(a_cache_miss_percent) - ,threads_number(a_threads_number) - ,weight_of_initiation_call_usec(a_weight_of_initiation_call_usec) - ,use_serial_initiation_function(a_use_serial_initiation_function) - {} -}; - -struct return_size_t { - size_t m_weight_of_initiation_call_usec; - bool use_serial_initiation_function; - return_size_t(size_t a_weight_of_initiation_call_usec, bool a_use_serial_initiation_function) - :m_weight_of_initiation_call_usec(a_weight_of_initiation_call_usec), use_serial_initiation_function(a_use_serial_initiation_function) - {} - size_t operator()(size_t key){ - static tbb::mutex mtx; - if (use_serial_initiation_function){ - mtx.lock(); - } - micro_benchmarking::utils::busy_wait(m_weight_of_initiation_call_usec); - if (use_serial_initiation_function){ - mtx.unlock(); - } - - return key; - } -}; - -template< typename a_cache_type> -struct throughput { - typedef throughput self_type; - typedef a_cache_type cache_type; - - parameter_pack m_parameter_pack; - - - const size_t per_thread_sample_size ; - typedef std::vector<size_t> access_sequence_type; - access_sequence_type m_access_sequence; - cache_type m_cache; - Harness::SpinBarrier m_barrier; - tbb::atomic<size_t> loops_count; - - throughput(parameter_pack a_parameter_pack) - :m_parameter_pack(a_parameter_pack) - ,per_thread_sample_size(m_parameter_pack.cache_lru_history_size *(1 + m_parameter_pack.cache_miss_percent/100)) - ,m_access_sequence(m_parameter_pack.threads_number * per_thread_sample_size ) - ,m_cache(return_size_t(m_parameter_pack.weight_of_initiation_call_usec,m_parameter_pack.use_serial_initiation_function),m_parameter_pack.cache_lru_history_size) - - { - loops_count=0; - //TODO: check if changing from generating longer sequence to generating indexes in a specified range (i.e. making per_thread_sample_size fixed) give any change - std::generate(m_access_sequence.begin(),m_access_sequence.end(),std::rand); - } - - size_t operator()(){ - struct _{ static void retrieve_from_cache(self_type* _this, size_t thread_index){ - parameter_pack& p = _this->m_parameter_pack; - access_sequence_type::iterator const begin_it =_this->m_access_sequence.begin()+ thread_index * _this->per_thread_sample_size; - access_sequence_type::iterator const end_it = begin_it + _this->per_thread_sample_size; - - _this->m_barrier.wait(); - tbb::tick_count start = tbb::tick_count::now(); - - size_t local_loops_count =0; - do { - size_t part_of_the_sample_so_far = (local_loops_count * p.time_check_granularity_ops) % _this->per_thread_sample_size; - access_sequence_type::iterator const iteration_begin_it = begin_it + part_of_the_sample_so_far; - access_sequence_type::iterator const iteration_end_it = iteration_begin_it + - (std::min)(p.time_check_granularity_ops, _this->per_thread_sample_size - part_of_the_sample_so_far); - - for (access_sequence_type::iterator it = iteration_begin_it; it < iteration_end_it; ++it){ - typename cache_type::handle h = _this->m_cache(*it); - micro_benchmarking::utils::busy_wait(p.time_of_item_use_usec); - micro_benchmarking::utils::disable_elimination(h.value()); - } - ++local_loops_count; - }while((tbb::tick_count::now()-start).seconds() < p.time_window_sec); - _this->loops_count+=local_loops_count; - }}; - m_barrier.initialize(m_parameter_pack.threads_number); - - NativeParallelFor(m_parameter_pack.threads_number,std::bind1st(std::ptr_fun(&_::retrieve_from_cache),this)); - - return loops_count * m_parameter_pack.time_check_granularity_ops; - } -}; - -int main(int argc,const char** args ){ - - size_t time_window_sec = 10; - size_t cache_lru_history_size = 1000; - size_t time_check_granularity_ops = 200; - size_t time_of_item_use_usec = 100; - size_t cache_miss_percent = 5; - int threads_number =tbb::task_scheduler_init::default_num_threads(); - size_t weight_of_initiation_call_usec =1000; - bool use_serial_initiation_function = false; - bool use_coarse_grained_locked_cache = false; - - parameter_pack p(time_window_sec, time_check_granularity_ops, cache_lru_history_size,time_of_item_use_usec,cache_miss_percent,threads_number,weight_of_initiation_call_usec,use_serial_initiation_function); - - utility::parse_cli_arguments(argc,args,utility::cli_argument_pack() - .arg(p.cache_lru_history_size,"cache-lru-history-size","") - .arg(p.time_window_sec,"time-window","time frame for measuring, in seconds") - .arg(p.threads_number,"n-of-threads","number of threads to run on") - .arg(p.time_of_item_use_usec,"time-of-item-use","time between consequent requests to the cache, in microseconds") - .arg(p.cache_miss_percent,"cache-miss-percent","cache miss percent ") - .arg(p.weight_of_initiation_call_usec,"initiation-call-weight","time occupied by a single call to initiation function, in microseconds") - .arg(p.use_serial_initiation_function,"use-serial-initiation-function","limit lock-based serial initiation function") - .arg(use_coarse_grained_locked_cache,"use-locked-version","use stl coarse grained lock based version") - ); - - typedef tbb::concurrent_lru_cache<size_t,size_t,return_size_t> tbb_cache; - typedef coarse_grained_raii_lru_cache<size_t,size_t,return_size_t> coarse_grained_locked_cache; - - size_t operations =0; - if (!use_coarse_grained_locked_cache){ - operations = throughput<tbb_cache>(p)(); - }else{ - operations = throughput<coarse_grained_locked_cache>(p)(); - } - std::cout<<"operations: "<<operations<<std::endl; - return 0; -} diff --git a/src/tbb/src/perf/time_sandbox.h b/src/tbb/src/perf/time_sandbox.h deleted file mode 100644 index f79f2aa50..000000000 --- a/src/tbb/src/perf/time_sandbox.h +++ /dev/null @@ -1,171 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TIME_FRAMEWORK_H__ -#error time_framework.h must be included -#endif - -#define INJECT_TBB namespace tbb { using namespace ::tbb; namespace internal { using namespace ::tbb::internal; } } -#define INJECT_TBB5 namespace tbb { namespace interface5 { using namespace ::tbb::interface5; namespace internal { using namespace ::tbb::interface5::internal; } } } - -#ifndef INJECT_BOX_NAMES -#if defined(__TBB_task_H) || defined(__TBB_concurrent_unordered_internal_H) || defined(__TBB_reader_writer_lock_H) || defined(__TBB__concurrent_unordered_impl_H) -#define INJECT_BOX_NAMES INJECT_TBB INJECT_TBB5 -#else -#define INJECT_BOX_NAMES INJECT_TBB -#endif -#endif - -#ifdef BOX1 -namespace sandbox1 { - INJECT_BOX_NAMES -# ifdef BOX1HEADER -# include BOX1HEADER -# endif - typedef ::BOX1TEST testbox; -} -#endif -#ifdef BOX2 -namespace sandbox2 { - INJECT_BOX_NAMES -# ifdef BOX2HEADER -# include BOX2HEADER -# endif - typedef ::BOX2TEST testbox; -} -#endif -#ifdef BOX3 -namespace sandbox3 { - INJECT_BOX_NAMES -# ifdef BOX3HEADER -# include BOX3HEADER -# endif - typedef ::BOX3TEST testbox; -} -#endif -#ifdef BOX4 -namespace sandbox4 { - INJECT_BOX_NAMES -# ifdef BOX4HEADER -# include BOX4HEADER -# endif - typedef ::BOX4TEST testbox; -} -#endif -#ifdef BOX5 -namespace sandbox5 { - INJECT_BOX_NAMES -# ifdef BOX5HEADER -# include BOX5HEADER -# endif - typedef ::BOX5TEST testbox; -} -#endif -#ifdef BOX6 -namespace sandbox6 { - INJECT_BOX_NAMES -# ifdef BOX6HEADER -# include BOX6HEADER -# endif - typedef ::BOX6TEST testbox; -} -#endif -#ifdef BOX7 -namespace sandbox7 { - INJECT_BOX_NAMES -# ifdef BOX7HEADER -# include BOX7HEADER -# endif - typedef ::BOX7TEST testbox; -} -#endif -#ifdef BOX8 -namespace sandbox8 { - INJECT_BOX_NAMES -# ifdef BOX8HEADER -# include BOX8HEADER -# endif - typedef ::BOX8TEST testbox; -} -#endif -#ifdef BOX9 -namespace sandbox9 { - INJECT_BOX_NAMES -# ifdef BOX9HEADER -# include BOX9HEADER -# endif - typedef ::BOX9TEST testbox; -} -#endif - -//if harness.h included -#if defined(ASSERT) && !HARNESS_NO_PARSE_COMMAND_LINE -#ifndef TEST_PREFIX -#define TEST_PREFIX if(Verbose) printf("Processing with %d threads: %ld...\n", threads, long(value)); -#endif -#endif//harness included - -#ifndef TEST_PROCESSOR_NAME -#define TEST_PROCESSOR_NAME test_sandbox -#endif - -class TEST_PROCESSOR_NAME : public TestProcessor { -public: - TEST_PROCESSOR_NAME(const char *name, StatisticsCollector::Sorting sort_by = StatisticsCollector::ByAlg) - : TestProcessor(name, sort_by) {} - void factory(arg_t value, int threads) { -#ifdef TEST_PREFIX - TEST_PREFIX -#endif - process( value, threads, -#define RUNBOX(n) run(#n"."BOX##n, new sandbox##n::testbox() ) -#ifdef BOX1 - RUNBOX(1), -#endif -#ifdef BOX2 - RUNBOX(2), -#endif -#ifdef BOX3 - RUNBOX(3), -#endif -#ifdef BOX4 - RUNBOX(4), -#endif -#ifdef BOX5 - RUNBOX(5), -#endif -#ifdef BOX6 - RUNBOX(6), -#endif -#ifdef BOX7 - RUNBOX(7), -#endif -#ifdef BOX8 - RUNBOX(8), -#endif -#ifdef BOX9 - RUNBOX(9), -#endif - end ); -#ifdef TEST_POSTFIX - TEST_POSTFIX -#endif - } -}; diff --git a/src/tbb/src/perf/time_vector.cpp b/src/tbb/src/perf/time_vector.cpp deleted file mode 100644 index 0c5b5e7bf..000000000 --- a/src/tbb/src/perf/time_vector.cpp +++ /dev/null @@ -1,249 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -//#define DO_SCALABLEALLOC - -#include <cstdlib> -#include <cmath> -#include <vector> -#include <algorithm> -#include <functional> -#include <numeric> -#include "tbb/tbb_stddef.h" -#include "tbb/spin_mutex.h" -#ifdef DO_SCALABLEALLOC -#include "tbb/scalable_allocator.h" -#endif -#include "tbb/concurrent_vector.h" -#include "tbb/tbb_allocator.h" -#include "tbb/cache_aligned_allocator.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/parallel_for.h" -#include "tbb/tick_count.h" -#include "tbb/blocked_range.h" -#define HARNESS_CUSTOM_MAIN 1 -#include "../test/harness.h" -//#include "harness_barrier.h" -#include "../test/harness_allocator.h" -#define STATISTICS_INLINE -#include "statistics.h" - -using namespace tbb; -bool ExtraVerbose = false; - -class Timer { - tbb::tick_count tick; -public: - Timer() { tick = tbb::tick_count::now(); } - double get_time() { return (tbb::tick_count::now() - tick).seconds(); } - double diff_time(const Timer &newer) { return (newer.tick - tick).seconds(); } - double mark_time() { tick_count t1(tbb::tick_count::now()), t2(tick); tick = t1; return (t1 - t2).seconds(); } - double mark_time(const Timer &newer) { tick_count t(tick); tick = newer.tick; return (tick - t).seconds(); } -}; - -/************************************************************************/ -/* TEST1 */ -/************************************************************************/ -#define mk_vector_test1(v, a) vector_test1<v<Timer, static_counting_allocator<a<Timer> > >, v<double, static_counting_allocator<a<double> > > > -template<class timers_vector_t, class values_vector_t> -class vector_test1 { - const char *mode; - StatisticsCollector &stat; - StatisticsCollector::TestCase key[16]; - -public: - vector_test1(const char *m, StatisticsCollector &s) : mode(m), stat(s) {} - - vector_test1 &operator()(size_t len) { - if(Verbose) printf("test1<%s>(%u): collecting timing statistics\n", mode, unsigned(len)); - __TBB_ASSERT(sizeof(Timer) == sizeof(double), NULL); - static const char *test_names[] = { - "b)creation wholly", - "a)creation by push", - "c)operation time per item", - 0 }; - for(int i = 0; test_names[i]; ++i) key[i] = stat.SetTestCase(test_names[i], mode, len); - - Timer timer0; timers_vector_t::allocator_type::init_counters(); - timers_vector_t tv(len); - Timer timer1; values_vector_t::allocator_type::init_counters(); - values_vector_t dv; - for (size_t i = 0; i < len; ++i) - dv.push_back( i ); - Timer timer2; - for (size_t i = 0; i < len; ++i) - { - dv[len-i-1] = timer0.diff_time(tv[i]); - tv[i].mark_time(); - } - stat.AddStatisticValue( key[2], "1total, ms", "%.3f", timer2.get_time()*1000.0 ); - stat.AddStatisticValue( key[1], "1total, ms", "%.3f", timer1.diff_time(timer2)*1000.0 ); - stat.AddStatisticValue( key[0], "1total, ms", "%.3f", timer0.diff_time(timer1)*1000.0 ); - //allocator statistics - stat.AddStatisticValue( key[0], "2total allocations", "%d", int(timers_vector_t::allocator_type::allocations) ); - stat.AddStatisticValue( key[1], "2total allocations", "%d", int(values_vector_t::allocator_type::allocations) ); - stat.AddStatisticValue( key[2], "2total allocations", "%d", 0); - stat.AddStatisticValue( key[0], "3total alloc#items", "%d", int(timers_vector_t::allocator_type::items_allocated) ); - stat.AddStatisticValue( key[1], "3total alloc#items", "%d", int(values_vector_t::allocator_type::items_allocated) ); - stat.AddStatisticValue( key[2], "3total alloc#items", "%d", 0); - //remarks - stat.AddStatisticValue( key[0], "9note", "segment creation time, ns:"); - stat.AddStatisticValue( key[2], "9note", "average op-time per item, ns:"); - Timer last_timer(timer2); double last_value = 0; - for (size_t j = 0, i = 2; i < len; i *= 2, j++) { - stat.AddRoundResult( key[0], (dv[len-i-1]-last_value)*1000000.0 ); - last_value = dv[len-i-1]; - stat.AddRoundResult( key[2], last_timer.diff_time(tv[i])/double(i)*1000000.0 ); - last_timer = tv[i]; - stat.SetRoundTitle(j, i); - } - tv.clear(); dv.clear(); - //__TBB_ASSERT(timers_vector_t::allocator_type::items_allocated == timers_vector_t::allocator_type::items_freed, NULL); - //__TBB_ASSERT(values_vector_t::allocator_type::items_allocated == values_vector_t::allocator_type::items_freed, NULL); - return *this; - } -}; - -/************************************************************************/ -/* TEST2 */ -/************************************************************************/ -#define mk_vector_test2(v, a) vector_test2<v<size_t, a<size_t> > > -template<class vector_t> -class vector_test2 { - const char *mode; - static const int ntrial = 10; - StatisticsCollector &stat; - -public: - vector_test2(const char *m, StatisticsCollector &s) : mode(m), stat(s) {} - - vector_test2 &operator()(size_t len) { - if(Verbose) printf("test2<%s>(%u): performing standard transformation sequence on vector\n", mode, unsigned(len)); - StatisticsCollector::TestCase init_key = stat.SetTestCase("allocate", mode, len); - StatisticsCollector::TestCase fill_key = stat.SetTestCase("fill", mode, len); - StatisticsCollector::TestCase proc_key = stat.SetTestCase("process", mode, len); - StatisticsCollector::TestCase full_key = stat.SetTestCase("total time", mode, len); - for (int i = 0; i < ntrial; i++) { - Timer timer0; - vector_t v1(len); - vector_t v2(len); - Timer timer1; - std::generate(v1.begin(), v1.end(), values(0)); - std::generate(v2.begin(), v2.end(), values(size_t(-len))); - Timer timer2; - std::reverse(v1.rbegin(), v1.rend()); - std::inner_product(v1.begin(), v1.end(), v2.rbegin(), 1); - std::sort(v1.rbegin(), v1.rend()); - std::sort(v2.rbegin(), v2.rend()); - std::set_intersection(v1.begin(), v1.end(), v2.rbegin(), v2.rend(), v1.begin()); - Timer timer3; - stat.AddRoundResult( proc_key, timer2.diff_time(timer3)*1000.0 ); - stat.AddRoundResult( fill_key, timer1.diff_time(timer2)*1000.0 ); - stat.AddRoundResult( init_key, timer0.diff_time(timer1)*1000.0 ); - stat.AddRoundResult( full_key, timer0.diff_time(timer3)*1000.0 ); - } - stat.SetStatisticFormula("1Average", "=AVERAGE(ROUNDS)"); - stat.SetStatisticFormula("2+/-", "=(MAX(ROUNDS)-MIN(ROUNDS))/2"); - return *this; - } - - class values - { - size_t value; - public: - values(size_t i) : value(i) {} - size_t operator()() { - return value++%(1|(value^55)); - } - }; -}; - -/************************************************************************/ -/* TEST3 */ -/************************************************************************/ -#define mk_vector_test3(v, a) vector_test3<v<char, local_counting_allocator<a<char>, size_t > > > -template<class vector_t> -class vector_test3 { - const char *mode; - StatisticsCollector &stat; - -public: - vector_test3(const char *m, StatisticsCollector &s) : mode(m), stat(s) {} - - vector_test3 &operator()(size_t len) { - if(Verbose) printf("test3<%s>(%u): collecting allocator statistics\n", mode, unsigned(len)); - static const size_t sz = 1024; - vector_t V[sz]; - StatisticsCollector::TestCase vinst_key = stat.SetTestCase("instances number", mode, len); - StatisticsCollector::TestCase count_key = stat.SetTestCase("allocations count", mode, len); - StatisticsCollector::TestCase items_key = stat.SetTestCase("allocated items", mode, len); - //stat.ReserveRounds(sz-1); - for (size_t c = 0, i = 0, s = sz/2; s >= 1 && i < sz; s /= 2, c++) - { - const size_t count = c? 1<<(c-1) : 0; - for (size_t e = i+s; i < e; i++) { - //if(count >= 16) V[i].reserve(count); - for (size_t j = 0; j < count; j++) - V[i].push_back(j); - } - stat.SetRoundTitle ( c, count ); - stat.AddRoundResult( vinst_key, s ); - stat.AddRoundResult( count_key, V[i-1].get_allocator().allocations ); - stat.AddRoundResult( items_key, V[i-1].get_allocator().items_allocated ); - } - return *this; - } -}; - -/************************************************************************/ -/* TYPES SET FOR TESTS */ -/************************************************************************/ -#define types_set(n, title, op) { StatisticsCollector Collector("time_vector"#n); Collector.SetTitle title; \ - {mk_vector_test##n(tbb::concurrent_vector, tbb::cache_aligned_allocator) ("TBB:NFS", Collector)op;} \ - {mk_vector_test##n(tbb::concurrent_vector, tbb::tbb_allocator) ("TBB:TBB", Collector)op;} \ - {mk_vector_test##n(tbb::concurrent_vector, std::allocator) ("TBB:STD", Collector)op;} \ - {mk_vector_test##n(std::vector, tbb::cache_aligned_allocator) ("STL:NFS", Collector)op;} \ - {mk_vector_test##n(std::vector, tbb::tbb_allocator) ("STL:TBB", Collector)op;} \ - {mk_vector_test##n(std::vector, std::allocator) ("STL:STD", Collector)op;} \ - Collector.Print(StatisticsCollector::Stdout|StatisticsCollector::HTMLFile|StatisticsCollector::ExcelXML); } - - -/************************************************************************/ -/* MAIN DRIVER */ -/************************************************************************/ -int main(int argc, char* argv[]) { - if(argc>1) Verbose = true; - if(argc>2) ExtraVerbose = true; - MinThread = 0; MaxThread = 500000; // use in another meaning - test#:problem size - ParseCommandLine( argc, argv ); - - ASSERT(tbb_allocator<int>::allocator_type() == tbb_allocator<int>::scalable, "expecting scalable allocator library to be loaded"); - - if(!MinThread || MinThread == 1) - types_set(1, ("Vectors performance test #1 for %d", MaxThread), (MaxThread) ) - if(!MinThread || MinThread == 2) - types_set(2, ("Vectors performance test #2 for %d", MaxThread), (MaxThread) ) - if(!MinThread || MinThread == 3) - types_set(3, ("Vectors performance test #3 for %d", MaxThread), (MaxThread) ) - - if(!Verbose) printf("done\n"); - return 0; -} - diff --git a/src/tbb/src/rml/client/index.html b/src/tbb/src/rml/client/index.html deleted file mode 100644 index c147cd7d8..000000000 --- a/src/tbb/src/rml/client/index.html +++ /dev/null @@ -1,42 +0,0 @@ -<HTML> -<BODY> -<H2>Overview</H2> - -This directory has source code that must be statically linked into an RML client. - -<H2>Files</H2> - -<DL> -<DT><P><A HREF="rml_factory.h">rml_factory.h</A> -<DD>Text shared by <A HREF="rml_omp.cpp">rml_omp.cpp</A> and <A HREF="rml_tbb.cpp">rml_tbb.cpp</A>. - This is not an ordinary include file, so it does not have an #ifndef guard.</P> -</DL> - -<H3> Specific to client=OpenMP</H3> -<DL> -<DT><P><A HREF="rml_omp.cpp">rml_omp.cpp</A> -<DD>Source file for OpenMP client.</P> -<DT><P><A HREF="omp_dynamic_link.h">omp_dynamic_link.h</A> -<DT><A HREF="omp_dynamic_link.cpp">omp_dynamic_link.cpp</A> -<DD>Source files for dynamic linking support. - The code is the code from the TBB source directory, but adjusted so that it - appears in namespace <TT>__kmp</TT> instead of namespace <TT>tbb::internal</TT>. -</DL> -<H3> Specific to client=TBB</H3> -<DL> -<DT><P><A HREF="rml_tbb.cpp">rml_tbb.cpp</A> -<DD>Source file for TBB client. It uses the dynamic linking support from the TBB source directory. -</DL> - -<HR> -<A HREF="../index.html">Up to parent directory</A> -<p></p> -Copyright © 2005-2014 Intel Corporation. All Rights Reserved. -<P></P> -Intel is a registered trademark or trademark of Intel Corporation -or its subsidiaries in the United States and other countries. -<p></p> -* Other names and brands may be claimed as the property of others. -</BODY> -</HTML> - diff --git a/src/tbb/src/rml/client/library_assert.h b/src/tbb/src/rml/client/library_assert.h deleted file mode 100644 index 301fa116a..000000000 --- a/src/tbb/src/rml/client/library_assert.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef LIBRARY_ASSERT_H -#define LIBRARY_ASSERT_H - -#ifndef LIBRARY_ASSERT -#ifdef KMP_ASSERT2 -#define LIBRARY_ASSERT(x,y) KMP_ASSERT2((x),(y)) -#else -#include <assert.h> -#define LIBRARY_ASSERT(x,y) assert(x) -#define __TBB_DYNAMIC_LOAD_ENABLED 1 -#endif -#endif /* LIBRARY_ASSERT */ - -#endif /* LIBRARY_ASSERT_H */ diff --git a/src/tbb/src/rml/client/omp_dynamic_link.cpp b/src/tbb/src/rml/client/omp_dynamic_link.cpp deleted file mode 100644 index 155e172ff..000000000 --- a/src/tbb/src/rml/client/omp_dynamic_link.cpp +++ /dev/null @@ -1,24 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "omp_dynamic_link.h" -#include "library_assert.h" -#include "tbb/dynamic_link.cpp" // Refers to src/tbb, not include/tbb - diff --git a/src/tbb/src/rml/client/omp_dynamic_link.h b/src/tbb/src/rml/client/omp_dynamic_link.h deleted file mode 100644 index 3d3a298fe..000000000 --- a/src/tbb/src/rml/client/omp_dynamic_link.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __KMP_omp_dynamic_link_H -#define __KMP_omp_dynamic_link_H - -#define OPEN_INTERNAL_NAMESPACE namespace __kmp { -#define CLOSE_INTERNAL_NAMESPACE } - -#include "library_assert.h" -#include "tbb/dynamic_link.h" // Refers to src/tbb, not include/tbb - -#endif /* __KMP_omp_dynamic_link_H */ diff --git a/src/tbb/src/rml/client/rml_factory.h b/src/tbb/src/rml/client/rml_factory.h deleted file mode 100644 index 471f0f608..000000000 --- a/src/tbb/src/rml/client/rml_factory.h +++ /dev/null @@ -1,94 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// No ifndef guard because this file is not a normal include file. - -#if TBB_USE_DEBUG -#define DEBUG_SUFFIX "_debug" -#else -#define DEBUG_SUFFIX -#endif /* TBB_USE_DEBUG */ - -// RML_SERVER_NAME is the name of the RML server library. -#if _WIN32||_WIN64 -#define RML_SERVER_NAME "irml" DEBUG_SUFFIX ".dll" -#elif __APPLE__ -#define RML_SERVER_NAME "libirml" DEBUG_SUFFIX ".dylib" -#elif __linux__ -#define RML_SERVER_NAME "libirml" DEBUG_SUFFIX ".so.1" -#elif __FreeBSD__ || __NetBSD__ || __sun || _AIX -#define RML_SERVER_NAME "libirml" DEBUG_SUFFIX ".so" -#else -#error Unknown OS -#endif - -const ::rml::versioned_object::version_type CLIENT_VERSION = 2; - -#if __TBB_WEAK_SYMBOLS_PRESENT - #pragma weak __RML_open_factory - #pragma weak __RML_close_factory - extern "C" { - ::rml::factory::status_type __RML_open_factory ( ::rml::factory&, ::rml::versioned_object::version_type&, ::rml::versioned_object::version_type ); - void __RML_close_factory( ::rml::factory& f ); - } -#endif /* __TBB_WEAK_SYMBOLS_PRESENT */ - -::rml::factory::status_type FACTORY::open() { - // Failure of following assertion indicates that factory is already open, or not zero-inited. - LIBRARY_ASSERT( !library_handle, NULL ); - status_type (*open_factory_routine)( factory&, version_type&, version_type ); - dynamic_link_descriptor server_link_table[4] = { - DLD(__RML_open_factory,open_factory_routine), - MAKE_SERVER(my_make_server_routine), - DLD(__RML_close_factory,my_wait_to_close_routine), - GET_INFO(my_call_with_server_info_routine), - }; - status_type result; - if( dynamic_link( RML_SERVER_NAME, server_link_table, 4, &library_handle ) ) { - version_type server_version; - result = (*open_factory_routine)( *this, server_version, CLIENT_VERSION ); - // server_version can be checked here for incompatibility if necessary. - } else { - library_handle = NULL; - result = st_not_found; - } - return result; -} - -void FACTORY::close() { - if( library_handle ) - (*my_wait_to_close_routine)(*this); - if( (size_t)library_handle>FACTORY::c_dont_unload ) { - dynamic_unlink(library_handle); - library_handle = NULL; - } -} - -::rml::factory::status_type FACTORY::make_server( SERVER*& s, CLIENT& c) { - // Failure of following assertion means that factory was not successfully opened. - LIBRARY_ASSERT( my_make_server_routine, NULL ); - return (*my_make_server_routine)(*this,s,c); -} - -void FACTORY::call_with_server_info( ::rml::server_info_callback_t cb, void* arg ) const { - // Failure of following assertion means that factory was not successfully opened. - LIBRARY_ASSERT( my_call_with_server_info_routine, NULL ); - (*my_call_with_server_info_routine)( cb, arg ); -} diff --git a/src/tbb/src/rml/client/rml_omp.cpp b/src/tbb/src/rml/client/rml_omp.cpp deleted file mode 100644 index b83f2af3e..000000000 --- a/src/tbb/src/rml/client/rml_omp.cpp +++ /dev/null @@ -1,46 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "rml_omp.h" -#include "omp_dynamic_link.h" -#include <assert.h> - -namespace __kmp { -namespace rml { - -#define MAKE_SERVER(x) DLD(__KMP_make_rml_server,x) -#define GET_INFO(x) DLD(__KMP_call_with_my_server_info,x) -#define SERVER omp_server -#define CLIENT omp_client -#define FACTORY omp_factory - -#if __TBB_WEAK_SYMBOLS_PRESENT - #pragma weak __KMP_make_rml_server - #pragma weak __KMP_call_with_my_server_info - extern "C" { - omp_factory::status_type __KMP_make_rml_server( omp_factory& f, omp_server*& server, omp_client& client ); - void __KMP_call_with_my_server_info( ::rml::server_info_callback_t cb, void* arg ); - } -#endif /* __TBB_WEAK_SYMBOLS_PRESENT */ - -#include "rml_factory.h" - -} // rml -} // __kmp diff --git a/src/tbb/src/rml/client/rml_tbb.cpp b/src/tbb/src/rml/client/rml_tbb.cpp deleted file mode 100644 index 5091f5e66..000000000 --- a/src/tbb/src/rml/client/rml_tbb.cpp +++ /dev/null @@ -1,48 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "../include/rml_tbb.h" -#include "tbb/dynamic_link.h" -#include <assert.h> - -namespace tbb { -namespace internal { -namespace rml { - -#define MAKE_SERVER(x) DLD(__TBB_make_rml_server,x) -#define GET_INFO(x) DLD(__TBB_call_with_my_server_info,x) -#define SERVER tbb_server -#define CLIENT tbb_client -#define FACTORY tbb_factory - -#if __TBB_WEAK_SYMBOLS_PRESENT - #pragma weak __TBB_make_rml_server - #pragma weak __TBB_call_with_my_server_info - extern "C" { - ::rml::factory::status_type __TBB_make_rml_server( tbb::internal::rml::tbb_factory& f, tbb::internal::rml::tbb_server*& server, tbb::internal::rml::tbb_client& client ); - void __TBB_call_with_my_server_info( ::rml::server_info_callback_t cb, void* arg ); - } -#endif /* __TBB_WEAK_SYMBOLS_PRESENT */ - -#include "rml_factory.h" - -} // rml -} // internal -} // tbb diff --git a/src/tbb/src/rml/include/index.html b/src/tbb/src/rml/include/index.html deleted file mode 100644 index 3b6716de7..000000000 --- a/src/tbb/src/rml/include/index.html +++ /dev/null @@ -1,29 +0,0 @@ -<HTML> -<BODY> -<H2>Overview</H2> - -This directory has the include files for the Resource Management Layer (RML). - -<H2>Files</H2> - -<DL> -<DT><P><A HREF="rml_base.h">rml_base.h</A> -<DD>Interfaces shared by TBB and OpenMP.</P> -<DT><P><A HREF="rml_omp.h">rml_omp.h</A> -<DD>Interface exclusive to OpenMP.</P> -<DT><P><A HREF="rml_tbb.h">rml_tbb.h</A> -<DD>Interface exclusive to TBB.</P> -</DL> - -<HR> -<A HREF="../index.html">Up to parent directory</A> -<p></p> -Copyright © 2005-2014 Intel Corporation. All Rights Reserved. -<P></P> -Intel is a registered trademark or trademark of Intel Corporation -or its subsidiaries in the United States and other countries. -<p></p> -* Other names and brands may be claimed as the property of others. -</BODY> -</HTML> - diff --git a/src/tbb/src/rml/include/rml_omp.h b/src/tbb/src/rml/include/rml_omp.h deleted file mode 100644 index 641ab3ccb..000000000 --- a/src/tbb/src/rml/include/rml_omp.h +++ /dev/null @@ -1,130 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Header guard and namespace names follow OpenMP runtime conventions. - -#ifndef KMP_RML_OMP_H -#define KMP_RML_OMP_H - -#include "rml_base.h" - -namespace __kmp { -namespace rml { - -class omp_client; - -//------------------------------------------------------------------------ -// Classes instantiated by the server -//------------------------------------------------------------------------ - -//! Represents a set of omp worker threads provided by the server. -class omp_server: public ::rml::server { -public: - //! A number of coins (i.e., threads) - typedef unsigned size_type; - - //! Return the number of coins in the bank. (negative if machine is oversubscribed). - virtual int current_balance() const = 0; - - //! Request n coins. Returns number of coins granted. Oversubscription amount if negative. - /** Always granted if is_strict is true. - - Positive or zero result indicates that the number of coins was taken from the bank. - - Negative result indicates that no coins were taken, and that the bank has deficit - by that amount and the caller (if being a good citizen) should return that many coins. - */ - virtual int try_increase_load( size_type /*n*/, bool /*strict*/ ) = 0; - - //! Return n coins into the bank. - virtual void decrease_load( size_type /*n*/ ) = 0; - - //! Convert n coins into n threads. - /** When a thread returns, it is converted back into a coin and the coin is returned to the bank. */ - virtual void get_threads( size_type /*m*/, void* /*cookie*/, job* /*array*/[] ) = 0; - - /** Putting a thread to sleep - convert a thread into a coin - Waking up a thread - convert a coin into a thread - - Note: conversion between a coin and a thread does not affect the accounting. - */ -#if _WIN32||_WIN64 - //! Inform server of a tbb master thread. - virtual void register_master( execution_resource_t& /*v*/ ) = 0; - - //! Inform server that the tbb master thread is done with its work. - virtual void unregister_master( execution_resource_t /*v*/ ) = 0; - - //! deactivate - /** give control to ConcRT RM */ - virtual void deactivate( job* ) = 0; - - //! reactivate - virtual void reactivate( job* ) = 0; -#endif /* _WIN32||_WIN64 */ -}; - - -//------------------------------------------------------------------------ -// Classes (or base classes thereof) instantiated by the client -//------------------------------------------------------------------------ - -class omp_client: public ::rml::client { -public: - //! Called by server thread when it delivers a thread to client - /** The index argument is a 0-origin index of the job for this thread within the array - returned by method get_threads. Server decreases the load by 1 (i.e., returning the coin - back to the bank) after this method returns. */ - virtual void process( job&, void* /*cookie*/, size_type /*index*/ ) RML_PURE(void) -}; - -/** Client must ensure that instance is zero-inited, typically by being a file-scope object. */ -class omp_factory: public ::rml::factory { - - //! Pointer to routine that creates an RML server. - status_type (*my_make_server_routine)( omp_factory&, omp_server*&, omp_client& ); - - //! Pointer to routine that calls callback function with server version info. - void (*my_call_with_server_info_routine)( ::rml::server_info_callback_t cb, void* arg ); - -public: - typedef ::rml::versioned_object::version_type version_type; - typedef omp_client client_type; - typedef omp_server server_type; - - //! Open factory. - /** Dynamically links against RML library. - Returns st_success, st_incompatible, or st_not_found. */ - status_type open(); - - //! Factory method to be called by client to create a server object. - /** Factory must be open. - Returns st_success or st_incompatible . */ - status_type make_server( server_type*&, client_type& ); - - //! Close factory. - void close(); - - //! Call the callback with the server build info. - void call_with_server_info( ::rml::server_info_callback_t cb, void* arg ) const; -}; - -} // namespace rml -} // namespace __kmp - -#endif /* KMP_RML_OMP_H */ diff --git a/src/tbb/src/rml/index.html b/src/tbb/src/rml/index.html deleted file mode 100644 index 18d9699d1..000000000 --- a/src/tbb/src/rml/index.html +++ /dev/null @@ -1,31 +0,0 @@ -<HTML> -<BODY> -<H2>Overview</H2> - -The subdirectories pertain to the Resource Management Layer (RML). - -<H2>Directories</H2> - -<DL> -<DT><P><A HREF="include/index.html">include/</A> -<DD>Include files used by clients of RML.</P> -<DT><P><A HREF="client/index.html">client/</A> -<DD>Source files for code that must be statically linked with a client.</P> -<DT><P><A HREF="server/index.html">server/</A> -<DD>Source files for the RML server.</P> -<DT><P><A HREF="test">test/</A> -<DD>Unit tests for RML server and its components.</P> -</DL> - -<HR> -<A HREF="../index.html">Up to parent directory</A> -<p></p> -Copyright © 2005-2014 Intel Corporation. All Rights Reserved. -<P></P> -Intel is a registered trademark or trademark of Intel Corporation -or its subsidiaries in the United States and other countries. -<p></p> -* Other names and brands may be claimed as the property of others. -</BODY> -</HTML> - diff --git a/src/tbb/src/rml/perfor/omp_nested.cpp b/src/tbb/src/rml/perfor/omp_nested.cpp deleted file mode 100644 index a6e0eb50d..000000000 --- a/src/tbb/src/rml/perfor/omp_nested.cpp +++ /dev/null @@ -1,144 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include <cstddef> -#include <cstdlib> -#include <cstdio> -#include <float.h> -#include <math.h> -#include <time.h> - -#include <omp.h> -#include <assert.h> - -#include "thread_level.h" - -#if _WIN32||_WIN64 -#include <Windows.h> /* Need Sleep */ -#else -#include <unistd.h> /* Need usleep */ -#endif - -void MilliSleep( unsigned milliseconds ) { -#if _WIN32||_WIN64 - Sleep( milliseconds ); -#else - usleep( milliseconds*1000 ); -#endif /* _WIN32||_WIN64 */ -} - -// Algorithm parameters -const int Max_OMP_Outer_Threads = 8; - -// Global variables -int max_outer_threads = Max_OMP_Outer_Threads; - -// Print help on command-line arguments -void help_message(char *prog_name) { - fprintf(stderr, "\n%s usage:\n", prog_name); - fprintf(stderr, - " Parameters:\n" - " -o<num> : max # of threads OMP should use at outer level\n" - "\n Help:\n" - " -h : print this help message\n"); -} - -// Process command-line arguments -void process_args(int argc, char *argv[], int *max_outer_t) { - (*max_outer_t) = omp_get_max_threads(); - for (int i=1; i<argc; ++i) { - if (argv[i][0] == '-') { - switch (argv[i][1]) { - case 'o': // set max_outer_threads - if (sscanf(&argv[i][2], "%d", max_outer_t) != 1 || *max_outer_t < 1) { - fprintf(stderr, "%s Warning: argument of -o option unacceptable: %s\n", argv[0], &argv[i][2]); - help_message(argv[0]); - } - break; - case 'h': // print help message - help_message(argv[0]); - exit(0); - break; - default: - fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]); - help_message(argv[0]); - break; - } - } else { - fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]); - help_message(argv[0]); - } - } -} - -int main(int argc, char *argv[]) { - process_args(argc, argv, &max_outer_threads); -#ifdef LOG_THREADS - TotalThreadLevel.init(); -#endif - - double start, end; - start = omp_get_wtime( ); - -#pragma omp parallel num_threads(max_outer_threads) - { - int omp_thread = omp_get_thread_num(); -#ifdef LOG_THREADS - if (omp_thread == 0) - TotalThreadLevel.change_level(omp_get_num_threads(), omp_outer); -#endif - if (omp_thread == 0) { - MilliSleep(3000); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, omp_outer); -#endif -#pragma omp parallel - { - int my_omp_thread = omp_get_thread_num(); -#ifdef LOG_THREADS - if (my_omp_thread == 0) - TotalThreadLevel.change_level(omp_get_num_threads(), omp_inner); -#endif - printf("Inner thread %d nested inside outer thread %d\n", my_omp_thread, omp_thread); -#ifdef LOG_THREADS - if (my_omp_thread == 0) - TotalThreadLevel.change_level(-omp_get_num_threads(), omp_inner); -#endif - } -#ifdef LOG_THREADS - TotalThreadLevel.change_level(1, omp_outer); -#endif - } - else { - MilliSleep(6000); - } -#ifdef LOG_THREADS - if (omp_thread == 0) - TotalThreadLevel.change_level(-omp_get_num_threads(), omp_outer); -#endif - } - end = omp_get_wtime( ); - printf("Simple test of nested OMP (%d outer threads max) took: %6.6f\n", - max_outer_threads, end-start); -#ifdef LOG_THREADS - TotalThreadLevel.dump(); -#endif - return 0; -} diff --git a/src/tbb/src/rml/perfor/omp_simple.cpp b/src/tbb/src/rml/perfor/omp_simple.cpp deleted file mode 100644 index 38513a06f..000000000 --- a/src/tbb/src/rml/perfor/omp_simple.cpp +++ /dev/null @@ -1,160 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include <cstddef> -#include <cstdlib> -#include <cstdio> -#include <float.h> -#include <math.h> -#include <time.h> - -#include <omp.h> -#include <assert.h> - -#include "thread_level.h" - -#include "tbb/task.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/parallel_for.h" -#include "tbb/blocked_range.h" - -#if _WIN32||_WIN64 -#include <Windows.h> /* Need Sleep */ -#else -#include <unistd.h> /* Need usleep */ -#endif - -void MilliSleep( unsigned milliseconds ) { -#if _WIN32||_WIN64 - Sleep( milliseconds ); -#else - usleep( milliseconds*1000 ); -#endif /* _WIN32||_WIN64 */ -} - -using namespace std; -using namespace tbb; - -// Algorithm parameters -const int Max_TBB_Threads = 16; -const int Max_OMP_Threads = 16; - -// Global variables -int max_tbb_threads = Max_TBB_Threads; -int max_omp_threads = Max_OMP_Threads; - -// Print help on command-line arguments -void help_message(char *prog_name) { - fprintf(stderr, "\n%s usage:\n", prog_name); - fprintf(stderr, - " Parameters:\n" - " -t<num> : max # of threads TBB should use\n" - " -o<num> : max # of threads OMP should use\n" - "\n Help:\n" - " -h : print this help message\n"); -} - -// Process command-line arguments -void process_args(int argc, char *argv[], int *max_tbb_t, int *max_omp_t) { - for (int i=1; i<argc; ++i) { - if (argv[i][0] == '-') { - switch (argv[i][1]) { - case 't': // set max_tbb_threads - if (sscanf(&argv[i][2], "%d", max_tbb_t) != 1 || *max_tbb_t < 1) { - fprintf(stderr, "%s Warning: argument of -t option unacceptable: %s\n", argv[0], &argv[i][2]); - help_message(argv[0]); - } - break; - case 'o': // set max_omp_threads - if (sscanf(&argv[i][2], "%d", max_omp_t) != 1 || *max_omp_t < 1) { - fprintf(stderr, "%s Warning: argument of -o option unacceptable: %s\n", argv[0], &argv[i][2]); - help_message(argv[0]); - } - break; - case 'h': // print help message - help_message(argv[0]); - exit(0); - break; - default: - fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]); - help_message(argv[0]); - break; - } - } else { - fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]); - help_message(argv[0]); - } - } -} - -int main(int argc, char *argv[]) { - process_args(argc, argv, &max_tbb_threads, &max_omp_threads); - TotalThreadLevel.init(); - - double start, end; - start = omp_get_wtime(); - -#pragma omp parallel num_threads(max_omp_threads) - { - int omp_thread = omp_get_thread_num(); -#ifdef LOG_THREADS - if (omp_thread == 0) - TotalThreadLevel.change_level(omp_get_num_threads(), omp_outer); -#endif - task_scheduler_init phase(max_tbb_threads); - if (omp_thread == 0) { - MilliSleep(3000); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, omp_outer); -#endif - parallel_for(blocked_range<size_t>(0, 1000), - [=](const blocked_range<size_t>& range) { -#ifdef LOG_THREADS - TotalThreadLevel.change_level(1, tbb_inner); -#endif -#pragma ivdep - for (size_t i=range.begin(); i!=range.end(); ++i) { - if (i==range.begin()) - printf("TBB range starting at %d on OMP thread %d\n", (int)i, omp_thread); - } -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, tbb_inner); -#endif - }, auto_partitioner()); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(1, omp_outer); -#endif - } - else { - MilliSleep(6000); - } -#ifdef LOG_THREADS - if (omp_thread == 0) - TotalThreadLevel.change_level(-omp_get_num_threads(), omp_outer); -#endif - } - end = omp_get_wtime(); - printf("Simple test of OMP (%d threads max) with TBB (%d threads max) inside took: %6.6f\n", - max_omp_threads, max_tbb_threads, end-start); -#ifdef LOG_THREADS - TotalThreadLevel.dump(); -#endif - return 0; -} diff --git a/src/tbb/src/rml/perfor/tbb_multi_omp.cpp b/src/tbb/src/rml/perfor/tbb_multi_omp.cpp deleted file mode 100644 index 3d9c390c2..000000000 --- a/src/tbb/src/rml/perfor/tbb_multi_omp.cpp +++ /dev/null @@ -1,186 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include <cstddef> -#include <cstdlib> -#include <cstdio> -#include <float.h> -#include <math.h> -#include <time.h> - -#include <omp.h> -#include <assert.h> - -#include "thread_level.h" - -#include "tbb/task.h" -#include "tbb/tick_count.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/scalable_allocator.h" - -#if _WIN32||_WIN64 -#include <Windows.h> /* Need Sleep */ -#else -#include <unistd.h> /* Need usleep */ -#endif - -void MilliSleep( unsigned milliseconds ) { -#if _WIN32||_WIN64 - Sleep( milliseconds ); -#else - usleep( milliseconds*1000 ); -#endif /* _WIN32||_WIN64 */ -} - -using namespace std; -using namespace tbb; - -// Algorithm parameters -const int Max_TBB_Threads = 16; -const int Max_OMP_Threads = 16; - -// Global variables -int max_tbb_threads = Max_TBB_Threads; -int max_omp_threads = Max_OMP_Threads; - -// Print help on command-line arguments -void help_message(char *prog_name) { - fprintf(stderr, "\n%s usage:\n", prog_name); - fprintf(stderr, - " Parameters:\n" - " -t<num> : max # of threads TBB should use\n" - " -o<num> : max # of threads OMP should use\n" - "\n Help:\n" - " -h : print this help message\n"); -} - -// Process command-line arguments -void process_args(int argc, char *argv[], int *max_tbb_t, int *max_omp_t) { - for (int i=1; i<argc; ++i) { - if (argv[i][0] == '-') { - switch (argv[i][1]) { - case 't': // set max_tbb_threads - if (sscanf(&argv[i][2], "%d", max_tbb_t) != 1 || *max_tbb_t < 1) { - fprintf(stderr, "%s Warning: argument of -t option unacceptable: %s\n", argv[0], &argv[i][2]); - help_message(argv[0]); - } - break; - case 'o': // set max_omp_threads - if (sscanf(&argv[i][2], "%d", max_omp_t) != 1 || *max_omp_t < 1) { - fprintf(stderr, "%s Warning: argument of -o option unacceptable: %s\n", argv[0], &argv[i][2]); - help_message(argv[0]); - } - break; - case 'h': // print help message - help_message(argv[0]); - exit(0); - break; - default: - fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]); - help_message(argv[0]); - break; - } - } else { - fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]); - help_message(argv[0]); - } - } -} - -class SimpleTask : public task { - bool isLeaf; - int myId; -public: - SimpleTask(bool isLeaf_, int myId_) : isLeaf(isLeaf_), myId(myId_) {} - task* execute() { -#ifdef LOG_THREADS - TotalThreadLevel.change_level(1, tbb_outer); -#endif - omp_set_num_threads(max_omp_threads); - if (!isLeaf) { - set_ref_count(65); - for (int i=0; i<64; ++i) { - SimpleTask& st = *new(allocate_child()) SimpleTask(true, i); - spawn(st); - } -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, tbb_outer); -#endif - wait_for_all(); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(1, tbb_outer); -#endif - } - else { - if (myId%2 == 0) { - MilliSleep(3000); -#pragma omp parallel - { -#ifdef LOG_THREADS - if (omp_get_thread_num() == 0) - TotalThreadLevel.change_level(omp_get_num_threads()-1, omp_inner); -#endif - //printf("In OMP parallel region on TBB task with myId=0: thread %d of %d\n", omp_get_thread_num(), omp_get_num_threads()); -#ifdef LOG_THREADS - if (omp_get_thread_num() == 0) - TotalThreadLevel.change_level(-(omp_get_num_threads()-1), omp_inner); -#endif - } - } - else { - MilliSleep(6000); - } - } -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, tbb_outer); -#endif - return NULL; - } -}; - - -int main(int argc, char *argv[]) { -#ifdef LOG_THREADS - TotalThreadLevel.init(); - TotalThreadLevel.change_level(1, tbb_outer); -#endif - process_args(argc, argv, &max_tbb_threads, &max_omp_threads); - - task_scheduler_init phase(max_tbb_threads); - tick_count start, end; - start = tick_count::now(); - SimpleTask& st = *new(task::allocate_root()) SimpleTask(false, -1); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, tbb_outer); -#endif - task::spawn_root_and_wait(st); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(1, tbb_outer); -#endif - end = tick_count::now(); - printf("Simple Test of TBB (%d threads max) with OMP (%d threads max) inside took: %6.6f\n", - max_tbb_threads, max_omp_threads, (end-start).seconds()); - -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, tbb_outer); - TotalThreadLevel.dump(); -#endif - return 0; -} diff --git a/src/tbb/src/rml/perfor/tbb_simple.cpp b/src/tbb/src/rml/perfor/tbb_simple.cpp deleted file mode 100644 index 671d182b6..000000000 --- a/src/tbb/src/rml/perfor/tbb_simple.cpp +++ /dev/null @@ -1,191 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include <cstddef> -#include <cstdlib> -#include <cstdio> -#include <float.h> -#include <math.h> -#include <time.h> - -#include <omp.h> -#include <assert.h> - -#include "thread_level.h" - -#include "tbb/task.h" -#include "tbb/tick_count.h" -#include "tbb/task_scheduler_init.h" - -#if _WIN32||_WIN64 -#include <Windows.h> /* Need Sleep */ -#else -#include <unistd.h> /* Need usleep */ -#endif - -void MilliSleep( unsigned milliseconds ) { -#if _WIN32||_WIN64 - Sleep( milliseconds ); -#else - usleep( milliseconds*1000 ); -#endif /* _WIN32||_WIN64 */ -} - -using namespace std; -using namespace tbb; - -// Algorithm parameters -const int Max_TBB_Threads = 16; -const int Max_OMP_Threads = 16; - -// Global variables -int max_tbb_threads = Max_TBB_Threads; -int max_omp_threads = Max_OMP_Threads; - -// Print help on command-line arguments -void help_message(char *prog_name) { - fprintf(stderr, "\n%s usage:\n", prog_name); - fprintf(stderr, - " Parameters:\n" - " -t<num> : max # of threads TBB should use\n" - " -o<num> : max # of threads OMP should use\n" - "\n Help:\n" - " -h : print this help message\n"); -} - -// Process command-line arguments -void process_args(int argc, char *argv[], int *max_tbb_t, int *max_omp_t) { - for (int i=1; i<argc; ++i) { - if (argv[i][0] == '-') { - switch (argv[i][1]) { - case 't': // set max_tbb_threads - if (sscanf(&argv[i][2], "%d", max_tbb_t) != 1 || *max_tbb_t < 1) { - fprintf(stderr, "%s Warning: argument of -t option unacceptable: %s\n", argv[0], &argv[i][2]); - help_message(argv[0]); - } - break; - case 'o': // set max_omp_threads - if (sscanf(&argv[i][2], "%d", max_omp_t) != 1 || *max_omp_t < 1) { - fprintf(stderr, "%s Warning: argument of -o option unacceptable: %s\n", argv[0], &argv[i][2]); - help_message(argv[0]); - } - break; - case 'h': // print help message - help_message(argv[0]); - exit(0); - break; - default: - fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]); - help_message(argv[0]); - break; - } - } else { - fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]); - help_message(argv[0]); - } - } -} - -class SimpleTask : public task { - bool isLeaf; - int myId; -public: - SimpleTask(bool isLeaf_, int myId_) : isLeaf(isLeaf_), myId(myId_) {} - task* execute() { -#ifdef LOG_THREADS - TotalThreadLevel.change_level(1, tbb_outer); -#endif - omp_set_num_threads(max_omp_threads); - if (!isLeaf) { - set_ref_count(17); - for (int i=0; i<16; ++i) { - SimpleTask& st = *new(allocate_child()) SimpleTask(true, i); - spawn(st); - } -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, tbb_outer); -#endif - wait_for_all(); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(1, tbb_outer); -#endif - } - else { - if (myId == 0) { - MilliSleep(3000); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, tbb_outer); -#endif -#pragma omp parallel - { -#ifdef LOG_THREADS - if (omp_get_thread_num() == 0) - TotalThreadLevel.change_level(omp_get_num_threads(), omp_inner); -#endif - printf("In OMP parallel region on TBB task with myId=0: thread %d of %d\n", - omp_get_thread_num(), omp_get_num_threads()); -#ifdef LOG_THREADS - if (omp_get_thread_num() == 0) - TotalThreadLevel.change_level(-omp_get_num_threads(), omp_inner); -#endif - } -#ifdef LOG_THREADS - TotalThreadLevel.change_level(1, tbb_outer); -#endif - } - else { - MilliSleep(6000); - } - } -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, tbb_outer); -#endif - return NULL; - } -}; - - -int main(int argc, char *argv[]) { -#ifdef LOG_THREADS - TotalThreadLevel.init(); - TotalThreadLevel.change_level(1, tbb_outer); -#endif - process_args(argc, argv, &max_tbb_threads, &max_omp_threads); - - task_scheduler_init phase(max_tbb_threads); - tick_count start, end; - start = tick_count::now(); - SimpleTask& st = *new(task::allocate_root()) SimpleTask(false, -1); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, tbb_outer); -#endif - task::spawn_root_and_wait(st); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(1, tbb_outer); -#endif - end = tick_count::now(); - printf("Simple Test of TBB (%d threads max) with OMP (%d threads max) inside took: %6.6f\n", - max_tbb_threads, max_omp_threads, (end-start).seconds()); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, tbb_outer); - TotalThreadLevel.dump(); -#endif - return 0; -} diff --git a/src/tbb/src/rml/perfor/thread_level.h b/src/tbb/src/rml/perfor/thread_level.h deleted file mode 100644 index 3926680ec..000000000 --- a/src/tbb/src/rml/perfor/thread_level.h +++ /dev/null @@ -1,134 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Thread level recorder -#ifndef __THREAD_LEVEL_H -#define __THREAD_LEVEL_H -#include <cstdio> -#include <omp.h> -#include <assert.h> -#include "tbb/atomic.h" -#include "tbb/tick_count.h" - -//#define LOG_THREADS // use this to ifdef out calls to this class -//#define NO_BAIL_OUT // continue execution after detecting oversubscription - -using namespace tbb; - -typedef enum {tbb_outer, tbb_inner, omp_outer, omp_inner} client_t; - -class ThreadLevelRecorder { - tbb::atomic<int> tbb_outer_level; - tbb::atomic<int> tbb_inner_level; - tbb::atomic<int> omp_outer_level; - tbb::atomic<int> omp_inner_level; - struct record { - tbb::tick_count time; - int n_tbb_outer_thread; - int n_tbb_inner_thread; - int n_omp_outer_thread; - int n_omp_inner_thread; - }; - tbb::atomic<unsigned> next; - /** Must be power of two */ - static const unsigned max_record_count = 1<<20; - record array[max_record_count]; - int max_threads; - bool fail; - public: - void change_level(int delta, client_t whichClient); - void dump(); - void init(); -}; - -void ThreadLevelRecorder::change_level(int delta, client_t whichClient) { - int tox=tbb_outer_level, tix=tbb_inner_level, oox=omp_outer_level, oix=omp_inner_level; - if (whichClient == tbb_outer) { - tox = tbb_outer_level+=delta; - } else if (whichClient == tbb_inner) { - tix = tbb_inner_level+=delta; - } else if (whichClient == omp_outer) { - oox = omp_outer_level+=delta; - } else if (whichClient == omp_inner) { - oix = omp_inner_level+=delta; - } else { - printf("WARNING: Bad client type; ignoring.\n"); - return; - } - // log non-negative entries - tbb::tick_count t = tbb::tick_count::now(); - unsigned k = next++; - if (k<max_record_count) { - record& r = array[k]; - r.time = t; - r.n_tbb_outer_thread = tox>=0?tox:0; - r.n_omp_outer_thread = oox>=0?oox:0; - r.n_tbb_inner_thread = tix>=0?tix:0; - r.n_omp_inner_thread = oix>=0?oix:0; - } - char errStr[100]; - int tot_threads; - tot_threads = tox+tix+oox+oix; - sprintf(errStr, "ERROR: Number of threads (%d+%d+%d+%d=%d) in use exceeds maximum (%d).\n", - tox, tix, oox, oix, tot_threads, max_threads); - if (tot_threads > max_threads) { -#ifdef NO_BAIL_OUT - if (!fail) { - printf("%sContinuing...\n", errStr); - fail = true; - } -#else - dump(); - printf("%s\n", errStr); - assert(tot_threads <= max_threads); -#endif - } -} - -void ThreadLevelRecorder::dump() { - FILE* f = fopen("time.txt","w"); - if (!f) { - perror("fopen(time.txt)\n"); - exit(1); - } - unsigned limit = next; - if (limit>max_record_count) { // Clip - limit = max_record_count; - } - for (unsigned i=0; i<limit; ++i) { - fprintf(f,"%f\t%d\t%d\t%d\t%d\n",(array[i].time-array[0].time).seconds(), array[i].n_tbb_outer_thread, - array[i].n_tbb_inner_thread, array[i].n_omp_outer_thread, array[i].n_omp_inner_thread); - } - fclose(f); - int tox=tbb_outer_level, tix=tbb_inner_level, oox=omp_outer_level, oix=omp_inner_level; - int tot_threads; - tot_threads = tox+tix+oox+oix; - if (!fail) printf("INFO: Passed.\n"); - else printf("INFO: Failed.\n"); -} - -void ThreadLevelRecorder::init() { - fail = false; - max_threads = omp_get_max_threads(); - printf("INFO: Getting maximum hardware threads... %d.\n", max_threads); -} - -ThreadLevelRecorder TotalThreadLevel; -#endif diff --git a/src/tbb/src/rml/server/index.html b/src/tbb/src/rml/server/index.html deleted file mode 100644 index 3f32a7fd5..000000000 --- a/src/tbb/src/rml/server/index.html +++ /dev/null @@ -1,18 +0,0 @@ -<HTML> -<BODY> -<H2>Overview</H2> - -This directory has source code internal to the server. - -<HR> -<A HREF="../index.html">Up to parent directory</A> -<p></p> -Copyright © 2005-2014 Intel Corporation. All Rights Reserved. -<P></P> -Intel is a registered trademark or trademark of Intel Corporation -or its subsidiaries in the United States and other countries. -<p></p> -* Other names and brands may be claimed as the property of others. -</BODY> -</HTML> - diff --git a/src/tbb/src/rml/server/irml.rc b/src/tbb/src/rml/server/irml.rc deleted file mode 100644 index 8b68e2922..000000000 --- a/src/tbb/src/rml/server/irml.rc +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2005-2014 Intel Corporation. All Rights Reserved. -// -// This file is part of Threading Building Blocks. Threading Building Blocks is free software; -// you can redistribute it and/or modify it under the terms of the GNU General Public License -// version 2 as published by the Free Software Foundation. Threading Building Blocks is -// distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. You should have received a copy of -// the GNU General Public License along with Threading Building Blocks; if not, write to the -// Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -// -// As a special exception, you may use this file as part of a free software library without -// restriction. Specifically, if other files instantiate templates or use macros or inline -// functions from this file, or you compile this file and link it with other files to produce -// an executable, this file does not by itself cause the resulting executable to be covered -// by the GNU General Public License. This exception does not however invalidate any other -// reasons why the executable file might be covered by the GNU General Public License. - -// Microsoft Visual C++ generated resource script. -// -#ifdef APSTUDIO_INVOKED -#ifndef APSTUDIO_READONLY_SYMBOLS -#define _APS_NO_MFC 1 -#define _APS_NEXT_RESOURCE_VALUE 102 -#define _APS_NEXT_COMMAND_VALUE 40001 -#define _APS_NEXT_CONTROL_VALUE 1001 -#define _APS_NEXT_SYMED_VALUE 101 -#endif -#endif - -#define APSTUDIO_READONLY_SYMBOLS -///////////////////////////////////////////////////////////////////////////// -// -// Generated from the TEXTINCLUDE 2 resource. -// -#include <winresrc.h> -#define ENDL "\r\n" -#include "tbb/tbb_version.h" - -///////////////////////////////////////////////////////////////////////////// -#undef APSTUDIO_READONLY_SYMBOLS - -///////////////////////////////////////////////////////////////////////////// -// Neutral resources - -#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_NEU) -#ifdef _WIN32 -LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL -#pragma code_page(1252) -#endif //_WIN32 - -///////////////////////////////////////////////////////////////////////////// -// manifest integration -#ifdef TBB_MANIFEST -#include "winuser.h" -2 RT_MANIFEST tbbmanifest.exe.manifest -#endif - -///////////////////////////////////////////////////////////////////////////// -// -// Version -// - -VS_VERSION_INFO VERSIONINFO - FILEVERSION TBB_VERNUMBERS - PRODUCTVERSION TBB_VERNUMBERS - FILEFLAGSMASK 0x17L -#ifdef _DEBUG - FILEFLAGS 0x1L -#else - FILEFLAGS 0x0L -#endif - FILEOS 0x40004L - FILETYPE 0x2L - FILESUBTYPE 0x0L -BEGIN - BLOCK "StringFileInfo" - BEGIN - BLOCK "000004b0" - BEGIN - VALUE "CompanyName", "Intel Corporation\0" - VALUE "FileDescription", "Intel(R) Threading Building Blocks resource manager library\0" - VALUE "FileVersion", TBB_VERSION "\0" - VALUE "LegalCopyright", "Copyright 2005-2014 Intel Corporation. All Rights Reserved.\0" - VALUE "LegalTrademarks", "\0" -#ifndef TBB_USE_DEBUG - VALUE "OriginalFilename", "irml.dll\0" -#else - VALUE "OriginalFilename", "irml_debug.dll\0" -#endif - VALUE "ProductName", "Intel(R) Threading Building Blocks for Windows\0" - VALUE "ProductVersion", TBB_VERSION "\0" - VALUE "PrivateBuild", "\0" - VALUE "SpecialBuild", "\0" - END - END - BLOCK "VarFileInfo" - BEGIN - VALUE "Translation", 0x0, 1200 - END -END - -#endif // Neutral resources -///////////////////////////////////////////////////////////////////////////// - - -#ifndef APSTUDIO_INVOKED -///////////////////////////////////////////////////////////////////////////// -// -// Generated from the TEXTINCLUDE 3 resource. -// - - -///////////////////////////////////////////////////////////////////////////// -#endif // not APSTUDIO_INVOKED - diff --git a/src/tbb/src/rml/server/job_automaton.h b/src/tbb/src/rml/server/job_automaton.h deleted file mode 100644 index f38d075fc..000000000 --- a/src/tbb/src/rml/server/job_automaton.h +++ /dev/null @@ -1,145 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __RML_job_automaton_H -#define __RML_job_automaton_H - -#include "rml_base.h" -#include "tbb/atomic.h" - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings - #pragma warning (push) - #pragma warning (disable: 4244) -#endif - -namespace rml { - -namespace internal { - -//! Finite state machine. -/** /--------------\ - / V - 0 --> 1--> ptr --> -1 - ^ - | - | - V - ptr|1 - -"owner" = corresponding server_thread. -Odd states (except -1) indicate that someone is executing code on the job. -Most transitions driven only by owner. -Transition 0-->-1 is driven by non-owner. -Transition ptr->-1 is driven by owner or non-owner. -*/ -class job_automaton: no_copy { -private: - tbb::atomic<intptr_t> my_job; -public: - /** Created by non-owner */ - job_automaton() { - my_job = 0; - } - - ~job_automaton() { - __TBB_ASSERT( my_job==-1, "must plug before destroying" ); - } - - //! Try to transition 0-->1 or ptr-->ptr|1. - /** Should only be called by owner. */ - bool try_acquire() { - intptr_t snapshot = my_job; - if( snapshot==-1 ) { - return false; - } else { - __TBB_ASSERT( (snapshot&1)==0, "already marked that way" ); - intptr_t old = my_job.compare_and_swap( snapshot|1, snapshot ); - __TBB_ASSERT( old==snapshot || old==-1, "unexpected interference" ); - return old==snapshot; - } - } - //! Transition ptr|1-->ptr - /** Should only be called by owner. */ - void release() { - intptr_t snapshot = my_job; - __TBB_ASSERT( snapshot&1, NULL ); - // Atomic store suffices here. - my_job = snapshot&~1; - } - - //! Transition 1-->ptr - /** Should only be called by owner. */ - void set_and_release( rml::job& job ) { - intptr_t value = reinterpret_cast<intptr_t>(&job); - __TBB_ASSERT( (value&1)==0, "job misaligned" ); - __TBB_ASSERT( value!=0, "null job" ); - __TBB_ASSERT( my_job==1, "already set, or not marked busy?" ); - // Atomic store suffices here. - my_job = value; - } - - //! Transition 0-->-1 - /** If successful, return true. called by non-owner (for TBB and the likes) */ - bool try_plug_null() { - return my_job.compare_and_swap( -1, 0 )==0; - } - - //! Try to transition to -1. If successful, set j to contents and return true. - /** Called by owner or non-owner. (for OpenMP and the likes) */ - bool try_plug( rml::job*&j ) { - for(;;) { - intptr_t snapshot = my_job; - if( snapshot&1 ) { - j = NULL; - return false; - } - // Not busy - if( my_job.compare_and_swap( -1, snapshot )==snapshot ) { - j = reinterpret_cast<rml::job*>(snapshot); - return true; - } - // Need to retry, because current thread may be non-owner that read a 0, and owner might have - // caused transition 0->1->ptr after we took our snapshot. - } - } - - /** Called by non-owner to wait for transition to ptr. */ - rml::job& wait_for_job() const { - intptr_t snapshot; - for(;;) { - snapshot = my_job; - if( snapshot&~1 ) break; - __TBB_Yield(); - } - __TBB_ASSERT( snapshot!=-1, "wait on plugged job_automaton" ); - return *reinterpret_cast<rml::job*>(snapshot&~1); - } -}; - -} // namespace internal -} // namespace rml - - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4244 are back - -#endif /* __RML_job_automaton_H */ diff --git a/src/tbb/src/rml/server/lin-rml-export.def b/src/tbb/src/rml/server/lin-rml-export.def deleted file mode 100644 index 3758f42c9..000000000 --- a/src/tbb/src/rml/server/lin-rml-export.def +++ /dev/null @@ -1,30 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -{ -global: -__RML_open_factory; -__RML_close_factory; -__TBB_make_rml_server; -__KMP_make_rml_server; -__TBB_call_with_my_server_info; -__KMP_call_with_my_server_info; -local:*; -}; diff --git a/src/tbb/src/rml/server/rml_server.cpp b/src/tbb/src/rml/server/rml_server.cpp deleted file mode 100644 index 075e4643f..000000000 --- a/src/tbb/src/rml/server/rml_server.cpp +++ /dev/null @@ -1,3309 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "rml_tbb.h" -#define private public /* Sleazy trick to avoid publishing internal names in public header. */ -#include "rml_omp.h" -#undef private - -#include "tbb/tbb_allocator.h" -#include "tbb/cache_aligned_allocator.h" -#include "tbb/aligned_space.h" -#include "tbb/atomic.h" -#include "tbb/spin_mutex.h" -#include "tbb/tbb_misc.h" // Get AvailableHwConcurrency() from here. -#if _MSC_VER==1500 && !defined(__INTEL_COMPILER) -// VS2008/VC9 seems to have an issue; -#pragma warning( push ) -#pragma warning( disable: 4985 ) -#endif -#include "tbb/concurrent_vector.h" -#if _MSC_VER==1500 && !defined(__INTEL_COMPILER) -#pragma warning( pop ) -#endif -#if _MSC_VER && defined(_Wp64) -// Workaround for overzealous compiler warnings -#pragma warning (push) -#pragma warning (disable: 4244) -#endif - -#include "job_automaton.h" -#include "wait_counter.h" -#include "thread_monitor.h" - -#if RML_USE_WCRM -#include <concrt.h> -#include <concrtrm.h> -using namespace Concurrency; -#include <vector> -#include <hash_map> -#define __RML_REMOVE_VIRTUAL_PROCESSORS_DISABLED 0 -#endif /* RML_USE_WCRM */ - -#define STRINGIFY(x) #x -#define TOSTRING(x) STRINGIFY(x) - -namespace rml { -namespace internal { - -using tbb::internal::rml::tbb_client; -using tbb::internal::rml::tbb_server; - -using __kmp::rml::omp_client; -using __kmp::rml::omp_server; - -typedef versioned_object::version_type version_type; - -#define SERVER_VERSION 2 -#define EARLIEST_COMPATIBLE_CLIENT_VERSION 2 - -static const size_t cache_line_size = tbb::internal::NFS_MaxLineSize; - -template<typename Server, typename Client> class generic_connection; -class tbb_connection_v2; -class omp_connection_v2; - -#if RML_USE_WCRM -//! State of a server_thread -/** Below are diagrams of legal state transitions. - - ts_busy - ^ ^ - / \ - / V - ts_done <----- ts_asleep <------> ts_idle -*/ - -enum thread_state_t { - ts_idle, - ts_asleep, - ts_busy, - ts_done -}; - -//! Extra state of an omp server thread -enum thread_extra_state_t { - ts_none, - ts_removed, - ts_lent -}; - -//! Results from try_grab_for() -enum thread_grab_t { - wk_failed, - wk_from_asleep, - wk_from_idle -}; - -#else /* !RML_USE_WCRM */ - -//! State of a server_thread -/** Below are diagrams of legal state transitions. - - OMP - ts_omp_busy - ^ ^ - / \ - / V - ts_asleep <-----------> ts_idle - - - ts_deactivated - ^ ^ - / \ - V \ - ts_none <--------------> ts_reactivated - - TBB - ts_tbb_busy - ^ ^ - / \ - / V - ts_asleep <-----------> ts_idle --> ts_done - - For TBB only. Extra state transition. - - ts_created -> ts_started -> ts_visited - */ -enum thread_state_t { - //! Thread not doing anything useful, but running and looking for work. - ts_idle, - //! Thread not doing anything useful and is asleep */ - ts_asleep, - //! Thread is enlisted into OpenMP team - ts_omp_busy, - //! Thread is busy doing TBB work. - ts_tbb_busy, - //! For tbb threads only - ts_done, - ts_created, - ts_started, - ts_visited, - //! For omp threads only - ts_none, - ts_deactivated, - ts_reactivated -}; -#endif /* RML_USE_WCRM */ - -#if TBB_USE_ASSERT -#define PRODUCE_ARG(x) ,x -#else -#define PRODUCE_ARG(x) -#endif /* TBB_USE_ASSERT */ - -//! Synchronizes dispatch of OpenMP work. -class omp_dispatch_type { - typedef ::rml::job job_type; - omp_client* client; - void* cookie; - omp_client::size_type index; - tbb::atomic<job_type*> job; -#if TBB_USE_ASSERT - omp_connection_v2* server; -#endif /* TBB_USE_ASSERT */ -public: - omp_dispatch_type() {job=NULL;} - void consume(); - void produce( omp_client& c, job_type& j, void* cookie_, omp_client::size_type index_ PRODUCE_ARG( omp_connection_v2& s )) { - __TBB_ASSERT( &j, NULL ); - __TBB_ASSERT( !job, "job already set" ); - client = &c; -#if TBB_USE_ASSERT - server = &s; -#endif /* TBB_USE_ASSERT */ - cookie = cookie_; - index = index_; - // Must be last - job = &j; - } -}; - -//! A reference count. -/** No default constructor, because users of ref_count must be very careful about whether the - initial reference count is 0 or 1. */ -class ref_count: no_copy { - friend class thread_map; - tbb::atomic<int> my_ref_count; -public: - ref_count(int k ) {my_ref_count=k;} - ~ref_count() {__TBB_ASSERT( !my_ref_count, "premature destruction of refcounted object" );} - //! Add one and return new value. - int add_ref() { - int k = ++my_ref_count; - __TBB_ASSERT(k>=1,"reference count underflowed before add_ref"); - return k; - } - //! Subtract one and return new value. - int remove_ref() { - int k = --my_ref_count; - __TBB_ASSERT(k>=0,"reference count underflow"); - return k; - } -}; - -#if RML_USE_WCRM - -#if USE_UMS_THREAD -#define RML_THREAD_KIND UmsThreadDefault -#define RML_THREAD_KIND_STRING "UmsThread" -#else -#define RML_THREAD_KIND ThreadScheduler -#define RML_THREAD_KIND_STRING "WinThread" -#endif - -// Forward declaration -class thread_map; - -static const IExecutionResource* c_remove_prepare = (IExecutionResource*)0; -static const IExecutionResource* c_remove_returned = (IExecutionResource*)1; - -//! Server thread representation -class server_thread_rep : no_copy { - friend class thread_map; - friend class omp_connection_v2; - friend class server_thread; - friend class tbb_server_thread; - friend class omp_server_thread; - template<typename Connection> friend void make_job( Connection& c, typename Connection::server_thread_type& t ); - typedef int thread_state_rep_t; -public: - //! Ctor - server_thread_rep( bool assigned, IScheduler* s, IExecutionResource* r, thread_map& map, rml::client& cl ) : - uid( GetExecutionContextId() ), my_scheduler(s), my_proxy(NULL), - my_thread_map(map), my_client(cl), my_job(NULL) - { - my_state = assigned ? ts_busy : ts_idle; - my_extra_state = ts_none; - terminate = false; - my_execution_resource = r; - } - //! Dtor - ~server_thread_rep() {} - - //! Synchronization routine - inline rml::job* wait_for_job() { - if( !my_job ) my_job = &my_job_automaton.wait_for_job(); - return my_job; - } - - // Getters and setters - inline thread_state_t read_state() const { thread_state_rep_t s = my_state; return static_cast<thread_state_t>(s); } - inline void set_state( thread_state_t to ) {my_state = to;} - inline void set_removed() { __TBB_ASSERT( my_extra_state==ts_none, NULL ); my_extra_state = ts_removed; } - inline bool is_removed() const { return my_extra_state==ts_removed; } - inline bool is_lent() const {return my_extra_state==ts_lent;} - inline void set_lent() { my_extra_state=ts_lent; } - inline void set_returned() { my_extra_state=ts_none; } - inline IExecutionResource* get_execution_resource() { return my_execution_resource; } - inline IVirtualProcessorRoot* get_virtual_processor() { return (IVirtualProcessorRoot*)get_execution_resource(); } - - //! Enlist the thread for work - inline bool wakeup( thread_state_t to, thread_state_t from ) { - __TBB_ASSERT( from==ts_asleep && (to==ts_idle||to==ts_busy||to==ts_done), NULL ); - return my_state.compare_and_swap( to, from )==from; - } - - //! Enlist the thread for. - thread_grab_t try_grab_for(); - - //! Destroy the client job associated with the thread - template<typename Connection> bool destroy_job( Connection* c ); - - //! Try to re-use the thread - void revive( IScheduler* s, IExecutionResource* r, rml::client& c ) { - // the variables may not have been set before a thread was told to quit - __TBB_ASSERT( my_scheduler==s, "my_scheduler has been altered?\n" ); - my_scheduler = s; - __TBB_ASSERT( &my_client==&c, "my_client has been altered?\n" ); - if( r ) my_execution_resource = r; - my_client = c; - my_state = ts_idle; - __TBB_ASSERT( my_extra_state==ts_removed, NULL ); - my_extra_state = ts_none; - } - -protected: - const int uid; - IScheduler* my_scheduler; - IThreadProxy* my_proxy; - tbb::atomic<IExecutionResource*> my_execution_resource; /* for non-masters, it is IVirtualProcessorRoot */ - thread_map& my_thread_map; - rml::client& my_client; - job* my_job; - job_automaton my_job_automaton; - tbb::atomic<bool> terminate; - tbb::atomic<thread_state_rep_t> my_state; - tbb::atomic<thread_extra_state_t> my_extra_state; -}; - -//! Class that implements IExecutionContext -class server_thread : public IExecutionContext, public server_thread_rep { - friend class tbb_connection_v2; - friend class omp_connection_v2; - friend class tbb_server_thread; - friend class omp_server_thread; - friend class thread_map; - template<typename Connection> friend void make_job( Connection& c, typename Connection::server_thread_type& t ); -protected: - server_thread( bool is_tbb, bool assigned, IScheduler* s, IExecutionResource* r, thread_map& map, rml::client& cl ) : server_thread_rep(assigned,s,r,map,cl), tbb_thread(is_tbb) {} - ~server_thread() {} - /*override*/ unsigned int GetId() const { return uid; } - /*override*/ IScheduler* GetScheduler() { return my_scheduler; } - /*override*/ IThreadProxy* GetProxy() { return my_proxy; } - /*override*/ void SetProxy( IThreadProxy* thr_proxy ) { my_proxy = thr_proxy; } - -private: - bool tbb_thread; -}; - -// Forward declaration -class tbb_connection_v2; -class omp_connection_v2; - -//! TBB server thread -class tbb_server_thread : public server_thread { - friend class tbb_connection_v2; -public: - tbb_server_thread( bool assigned, IScheduler* s, IExecutionResource* r, tbb_connection_v2* con, thread_map& map, rml::client& cl ) : server_thread(true,assigned,s,r,map,cl), my_conn(con) { - activation_count = 0; - } - ~tbb_server_thread() {} - /*override*/ void Dispatch( DispatchState* ); - inline bool initiate_termination(); - bool sleep_perhaps(); - //! Switch out this thread - bool switch_out(); -private: - tbb_connection_v2* my_conn; -public: - tbb::atomic<int> activation_count; -}; - -//! OMP server thread -class omp_server_thread : public server_thread { - friend class omp_connection_v2; -public: - omp_server_thread( bool assigned, IScheduler* s, IExecutionResource* r, omp_connection_v2* con, thread_map& map, rml::client& cl ) : - server_thread(false,assigned,s,r,map,cl), my_conn(con), my_cookie(NULL), my_index(UINT_MAX) {} - ~omp_server_thread() {} - /*override*/ void Dispatch( DispatchState* ); - inline void* get_cookie() {return my_cookie;} - inline ::__kmp::rml::omp_client::size_type get_index() {return my_index;} - - inline IExecutionResource* get_execution_resource() { return get_execution_resource(); } - inline bool initiate_termination() { return destroy_job( (omp_connection_v2*) my_conn ); } - void sleep_perhaps(); -private: - omp_connection_v2* my_conn; - void* my_cookie; - ::__kmp::rml::omp_client::size_type my_index; - omp_dispatch_type omp_data; -}; - -//! Class that implements IScheduler -template<typename Connection> -class scheduler : no_copy, public IScheduler { -public: - /*override*/ unsigned int GetId() const {return uid;} - /*override*/ void Statistics( unsigned int* /*pTaskCompletionRate*/, unsigned int* /*pTaskArrivalRate*/, unsigned int* /*pNumberOfTaskEnqueued*/) {} - /*override*/ SchedulerPolicy GetPolicy() const { __TBB_ASSERT(my_policy,NULL); return *my_policy; } - /*override*/ void AddVirtualProcessors( IVirtualProcessorRoot** vproots, unsigned int count ) { if( !my_conn.is_closing() ) my_conn.add_virtual_processors( vproots, count); } - /*override*/ void RemoveVirtualProcessors( IVirtualProcessorRoot** vproots, unsigned int count ); - /*override*/ void NotifyResourcesExternallyIdle( IVirtualProcessorRoot** vproots, unsigned int count ) { __TBB_ASSERT( false, "This call is not allowed for TBB" ); } - /*override*/ void NotifyResourcesExternallyBusy( IVirtualProcessorRoot** vproots, unsigned int count ) { __TBB_ASSERT( false, "This call is not allowed for TBB" ); } -protected: - scheduler( Connection& conn ); - virtual ~scheduler() { __TBB_ASSERT( my_policy, NULL ); delete my_policy; } - -public: - static scheduler* create( Connection& conn ) {return new scheduler( conn );} - -private: - const int uid; - Connection& my_conn; - SchedulerPolicy* my_policy; -}; - - -/* - * --> ts_busy --> ts_done - */ -class thread_scavenger_thread : public IExecutionContext, no_copy { -public: - thread_scavenger_thread( IScheduler* s, IVirtualProcessorRoot* r, thread_map& map ) : - uid( GetExecutionContextId() ), my_scheduler(s), my_virtual_processor_root(r), my_proxy(NULL), my_thread_map(map) - { - my_state = ts_busy; -#if TBB_USE_ASSERT - activation_count = 0; -#endif - } - ~thread_scavenger_thread() {} - /*override*/ unsigned int GetId() const { return uid; } - /*override*/ IScheduler* GetScheduler() { return my_scheduler; } - /*override*/ IThreadProxy* GetProxy() { return my_proxy; } - /*override*/ void SetProxy( IThreadProxy* thr_proxy ) { my_proxy = thr_proxy; } - /*override*/ void Dispatch( DispatchState* ); - inline thread_state_t read_state() { return my_state; } - inline void set_state( thread_state_t s ) { my_state = s; } - inline IVirtualProcessorRoot* get_virtual_processor() { return my_virtual_processor_root; } -private: - const int uid; - IScheduler* my_scheduler; - IVirtualProcessorRoot* my_virtual_processor_root; - IThreadProxy* my_proxy; - thread_map& my_thread_map; - tbb::atomic<thread_state_t> my_state; -#if TBB_USE_ASSERT -public: - tbb::atomic<int> activation_count; -#endif -}; - -static const thread_scavenger_thread* c_claimed = reinterpret_cast<thread_scavenger_thread*>(1); - -struct garbage_connection_queue { - tbb::atomic<uintptr_t> head; - tbb::atomic<uintptr_t> tail; - static const uintptr_t empty = 0; // connection scavenger thread empty list - static const uintptr_t plugged = 1; // end of use of the list - static const uintptr_t plugged_acked = 2; // connection scavenger saw the plugged flag, and it freed all connections -}; - -//! Connection scavenger -/** It collects closed connection objects, wait for worker threads belonging to the connection to return to ConcRT RM - * then return the object to the memory manager. - */ -class connection_scavenger_thread { - friend void assist_cleanup_connections(); - /* - * connection_scavenger_thread's state - * ts_busy <----> ts_asleep <-- - */ - tbb::atomic<thread_state_t> state; - - /* We steal two bits from a connection pointer to encode - * whether the connection is for TBB or for OMP. - * - * ---------------------------------- - * | | | | - * ---------------------------------- - * ^ ^ - * / | - * 1 : tbb, 0 : omp | - * if set, terminate - */ - // FIXME: pad these? - thread_monitor monitor; - HANDLE thr_handle; -#if TBB_USE_ASSERT - tbb::atomic<int> n_scavenger_threads; -#endif - -public: - connection_scavenger_thread() : thr_handle(NULL) { - state = ts_asleep; -#if TBB_USE_ASSERT - n_scavenger_threads = 0; -#endif - } - - ~connection_scavenger_thread() {} - - void wakeup() { - if( state.compare_and_swap( ts_busy, ts_asleep )==ts_asleep ) - monitor.notify(); - } - - void sleep_perhaps(); - - void process_requests( uintptr_t conn_ex ); - - static __RML_DECL_THREAD_ROUTINE thread_routine( void* arg ); - - void launch() { - thread_monitor::launch( connection_scavenger_thread::thread_routine, this, NULL ); - } - - template<typename Server, typename Client> - void add_request( generic_connection<Server,Client>* conn_to_close ); - - template<typename Server, typename Client> - uintptr_t grab_and_prepend( generic_connection<Server,Client>* last_conn_to_close ); -}; - -void free_all_connections( uintptr_t ); - -#endif /* RML_USE_WCRM */ - -#if !RML_USE_WCRM -class server_thread; - -//! thread_map_base; we need to make the iterator type available to server_thread -struct thread_map_base { - //! A value in the map - class value_type { - public: - server_thread& thread() { - __TBB_ASSERT( my_thread, "thread_map::value_type::thread() called when !my_thread" ); - return *my_thread; - } - rml::job& job() { - __TBB_ASSERT( my_job, "thread_map::value_type::job() called when !my_job" ); - return *my_job; - } - value_type() : my_thread(NULL), my_job(NULL) {} - server_thread& wait_for_thread() const { - for(;;) { - server_thread* ptr=const_cast<server_thread*volatile&>(my_thread); - if( ptr ) - return *ptr; - __TBB_Yield(); - } - } - /** Shortly after when a connection is established, it is possible for the server - to grab a server_thread that has not yet created a job object for that server. */ - rml::job& wait_for_job() const { - if( !my_job ) { - my_job = &my_automaton.wait_for_job(); - } - return *my_job; - } - private: - server_thread* my_thread; - /** Marked mutable because though it is physically modified, conceptually it is a duplicate of - the job held by job_automaton. */ - mutable rml::job* my_job; - job_automaton my_automaton; - // FIXME - pad out to cache line, because my_automaton is hit hard by thread() - friend class thread_map; - }; - typedef tbb::concurrent_vector<value_type,tbb::zero_allocator<value_type,tbb::cache_aligned_allocator> > array_type; -}; -#endif /* !RML_USE_WCRM */ - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Suppress overzealous compiler warnings about uninstantiable class - #pragma warning(push) - #pragma warning(disable:4510 4610) -#endif - -template<typename T> -class padded: public T { - char pad[cache_line_size - sizeof(T)%cache_line_size]; -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning(pop) -#endif - -// FIXME - should we pad out memory to avoid false sharing of our global variables? -static unsigned the_default_concurrency; -static tbb::atomic<int> the_balance; -static tbb::atomic<tbb::internal::do_once_state> rml_module_state; - -#if !RML_USE_WCRM -//! Per thread information -/** ref_count holds number of clients that are using this, - plus 1 if a host thread owns this instance. */ -class server_thread: public ref_count { - friend class thread_map; - template<typename Server, typename Client> friend class generic_connection; - friend class tbb_connection_v2; - friend class omp_connection_v2; - //! Integral type that can hold a thread_state_t - typedef int thread_state_rep_t; - tbb::atomic<thread_state_rep_t> state; -public: - thread_monitor monitor; -private: - bool is_omp_thread; - tbb::atomic<thread_state_rep_t> my_extra_state; - server_thread* link; - thread_map_base::array_type::iterator my_map_pos; - rml::server *my_conn; - rml::job* my_job; - job_automaton* my_ja; - size_t my_index; - tbb::atomic<bool> terminate; - omp_dispatch_type omp_dispatch; - -#if TBB_USE_ASSERT - //! Flag used to check if thread is still using *this. - bool has_active_thread; -#endif /* TBB_USE_ASSERT */ - - //! Volunteer to sleep. - void sleep_perhaps( thread_state_t asleep ); - - //! Destroy job corresponding to given client - /** Return true if thread must quit. */ - template<typename Connection> - bool destroy_job( Connection& c ); - - //! Do terminate the thread - /** Return true if thread must quit. */ - bool do_termination(); - - void loop(); - static __RML_DECL_THREAD_ROUTINE thread_routine( void* arg ); - -public: - server_thread(); - - ~server_thread(); - - //! Read the thread state - thread_state_t read_state() const { - thread_state_rep_t s = state; - __TBB_ASSERT( unsigned(s)<=unsigned(ts_done), "corrupted server thread?" ); - return thread_state_t(s); - } - - //! Read the tbb-specific extra thread state - thread_state_t read_extra_state() const { - thread_state_rep_t s = my_extra_state; - return thread_state_t(s); - } - - //! Launch a thread that is bound to *this. - void launch( size_t stack_size ); - - //! Attempt to wakeup a thread - /** The value "to" is the new state for the thread, if it was woken up. - Returns true if thread was woken up, false otherwise. */ - bool wakeup( thread_state_t to, thread_state_t from ); - - //! Attempt to enslave a thread for OpenMP/TBB. - /** Returns true if state is successfully changed. 's' takes either ts_omp_busy or ts_tbb_busy */ - bool try_grab_for( thread_state_t s ); - -#if _WIN32||_WIN64 - //! Send the worker thread to sleep temporarily - void deactivate(); - - //! Wake the worker thread up - void reactivate(); -#endif /* _WIN32||_WIN64 */ -}; - -//! Bag of threads that are private to a client. -class private_thread_bag { - struct list_thread: server_thread { - list_thread* next; - }; - //! Root of atomic linked list of list_thread - /** ABA problem is avoided because items are only atomically pushed, never popped. */ - tbb::atomic<list_thread*> my_root; - tbb::cache_aligned_allocator<padded<list_thread> > my_allocator; -public: - //! Construct empty bag - private_thread_bag() {my_root=NULL;} - - //! Create a fresh server_thread object. - server_thread& add_one_thread() { - list_thread* t = my_allocator.allocate(1); - new( t ) list_thread; - // Atomically add to list - list_thread* old_root; - do { - old_root = my_root; - t->next = old_root; - } while( my_root.compare_and_swap( t, old_root )!=old_root ); - return *t; - } - - //! Destroy the bag and threads in it. - ~private_thread_bag() { - while( my_root ) { - // Unlink thread from list. - list_thread* t = my_root; - my_root = t->next; - // Destroy and deallocate the thread. - t->~list_thread(); - my_allocator.deallocate(static_cast<padded<list_thread>*>(t),1); - } - } -}; - -//! Forward declaration -void wakeup_some_tbb_threads(); - -//! Type-independent part of class generic_connection. -/** One to one map from server threads to jobs, and associated reference counting. */ -class thread_map : public thread_map_base { -public: - typedef rml::client::size_type size_type; - //! ctor - thread_map( wait_counter& fc, ::rml::client& client ) : - all_visited_at_least_once(false), my_min_stack_size(0), my_server_ref_count(1), - my_client_ref_count(1), my_client(client), my_factory_counter(fc) - { my_unrealized_threads = 0; } - //! dtor - ~thread_map() {} - typedef array_type::iterator iterator; - iterator begin() {return my_array.begin();} - iterator end() {return my_array.end();} - void bind(); - void unbind(); - void assist_cleanup( bool assist_null_only ); - - /** Returns number of unrealized threads to create. */ - size_type wakeup_tbb_threads( size_type n ); - bool wakeup_next_thread( iterator i, tbb_connection_v2& conn ); - void release_tbb_threads( server_thread* t ); - void adjust_balance( int delta ); - - //! Add a server_thread object to the map, but do not bind it. - /** Return NULL if out of unrealized threads. */ - value_type* add_one_thread( bool is_omp_thread_ ); - - void bind_one_thread( rml::server& server, value_type& x ); - - void remove_client_ref(); - int add_server_ref() {return my_server_ref_count.add_ref();} - int remove_server_ref() {return my_server_ref_count.remove_ref();} - - ::rml::client& client() const {return my_client;} - - size_type get_unrealized_threads() { return my_unrealized_threads; } - -private: - private_thread_bag my_private_threads; - bool all_visited_at_least_once; - array_type my_array; - size_t my_min_stack_size; - tbb::atomic<size_type> my_unrealized_threads; - - //! Number of threads referencing *this, plus one extra. - /** When it becomes zero, the containing server object can be safely deleted. */ - ref_count my_server_ref_count; - - //! Number of jobs that need cleanup, plus one extra. - /** When it becomes zero, acknowledge_close_connection is called. */ - ref_count my_client_ref_count; - - ::rml::client& my_client; - //! Counter owned by factory that produced this thread_map. - wait_counter& my_factory_counter; -}; - -void thread_map::bind_one_thread( rml::server& server, value_type& x ) { - // Add one to account for the thread referencing this map hereforth. - server_thread& t = x.thread(); - my_server_ref_count.add_ref(); - my_client_ref_count.add_ref(); -#if TBB_USE_ASSERT - __TBB_ASSERT( t.add_ref()==1, NULL ); -#else - t.add_ref(); -#endif - // Have responsibility to start the thread. - t.my_conn = &server; - t.my_ja = &x.my_automaton; - t.launch( my_min_stack_size ); - /* Must wake thread up so it can fill in its "my_job" field in *this. - Otherwise deadlock can occur where wait_for_job spins on thread that is sleeping. */ - __TBB_ASSERT( t.state!=ts_tbb_busy, NULL ); - t.wakeup( ts_idle, ts_asleep ); -} - -thread_map::value_type* thread_map::add_one_thread( bool is_omp_thread_ ) { - size_type u; - do { - u = my_unrealized_threads; - if( !u ) return NULL; - } while( my_unrealized_threads.compare_and_swap(u-1,u)!=u ); - server_thread& t = my_private_threads.add_one_thread(); - t.is_omp_thread = is_omp_thread_; - __TBB_ASSERT( u>=1, NULL ); - t.my_index = u - 1; - __TBB_ASSERT( t.state!=ts_tbb_busy, NULL ); - t.my_extra_state = t.is_omp_thread ? ts_none : ts_created; - - iterator i = t.my_map_pos = my_array.grow_by(1); - value_type& v = *i; - v.my_thread = &t; - return &v; -} - -void thread_map::bind() { - ++my_factory_counter; - my_min_stack_size = my_client.min_stack_size(); - __TBB_ASSERT( my_unrealized_threads==0, "already called bind?" ); - my_unrealized_threads = my_client.max_job_count(); -} - -void thread_map::unbind() { - // Ask each server_thread to cleanup its job for this server. - for( iterator i=begin(); i!=end(); ++i ) { - server_thread& t = i->thread(); - t.terminate = true; - t.wakeup( ts_idle, ts_asleep ); - } - // Remove extra ref to client. - remove_client_ref(); -} - -void thread_map::assist_cleanup( bool assist_null_only ) { - // To avoid deadlock, the current thread *must* help out with cleanups that have not started, - // because the thread that created the job may be busy for a long time. - for( iterator i = begin(); i!=end(); ++i ) { - rml::job* j=0; - job_automaton& ja = i->my_automaton; - if( assist_null_only ? ja.try_plug_null() : ja.try_plug(j) ) { - if( j ) { - my_client.cleanup(*j); - } else { - // server thread did not get a chance to create a job. - } - remove_client_ref(); - } - } -} - -thread_map::size_type thread_map::wakeup_tbb_threads( size_type n ) { - __TBB_ASSERT(n>0,"must specify positive number of threads to wake up"); - iterator e = end(); - for( iterator k=begin(); k!=e; ++k ) { - // If another thread added *k, there is a tiny timing window where thread() is invalid. - server_thread& t = k->wait_for_thread(); - thread_state_t thr_s = t.read_state(); - if( t.read_extra_state()==ts_created || thr_s==ts_tbb_busy || thr_s==ts_done ) - continue; - if( --the_balance>=0 ) { // try to withdraw a coin from the deposit - while( !t.try_grab_for( ts_tbb_busy ) ) { - thr_s = t.read_state(); - if( thr_s==ts_tbb_busy || thr_s==ts_done ) { - // we lost; move on to the next. - ++the_balance; - goto skip; - } - } - if( --n==0 ) - return 0; - } else { - // overdraft. - ++the_balance; - break; - } -skip: - ; - } - return n<my_unrealized_threads ? n : my_unrealized_threads; -} -#else /* RML_USE_WCRM */ - -class thread_map : no_copy { - friend class omp_connection_v2; - typedef ::std::hash_map<uintptr_t,server_thread*> hash_map_type; - size_t my_min_stack_size; - size_t my_unrealized_threads; - ::rml::client& my_client; - //! Counter owned by factory that produced this thread_map. - wait_counter& my_factory_counter; - //! Ref counters - ref_count my_server_ref_count; - ref_count my_client_ref_count; - // FIXME: pad this? - hash_map_type my_map; - bool shutdown_in_progress; - std::vector<IExecutionResource*> original_exec_resources; - tbb::cache_aligned_allocator<padded<tbb_server_thread> > my_tbb_allocator; - tbb::cache_aligned_allocator<padded<omp_server_thread> > my_omp_allocator; - tbb::cache_aligned_allocator<padded<thread_scavenger_thread> > my_scavenger_allocator; - IResourceManager* my_concrt_resource_manager; - IScheduler* my_scheduler; - ISchedulerProxy* my_scheduler_proxy; - tbb::atomic<thread_scavenger_thread*> my_thread_scavenger_thread; -#if TBB_USE_ASSERT - tbb::atomic<int> n_add_vp_requests; - tbb::atomic<int> n_thread_scavengers_created; -#endif -public: - thread_map( wait_counter& fc, ::rml::client& client ) : - my_min_stack_size(0), my_client(client), my_factory_counter(fc), - my_server_ref_count(1), my_client_ref_count(1), shutdown_in_progress(false), - my_concrt_resource_manager(NULL), my_scheduler(NULL), my_scheduler_proxy(NULL) - { - my_thread_scavenger_thread = NULL; -#if TBB_USE_ASSERT - n_add_vp_requests = 0; - n_thread_scavengers_created; -#endif - } - - ~thread_map() { - __TBB_ASSERT( n_thread_scavengers_created<=1, "too many scavenger thread created" ); - // if thread_scavenger_thread is launched, wait for it to complete - if( my_thread_scavenger_thread ) { - __TBB_ASSERT( my_thread_scavenger_thread!=c_claimed, NULL ); - while( my_thread_scavenger_thread->read_state()==ts_busy ) - __TBB_Yield(); - thread_scavenger_thread* tst = my_thread_scavenger_thread; - my_scavenger_allocator.deallocate(static_cast<padded<thread_scavenger_thread>*>(tst),1); - } - // deallocate thread contexts - for( hash_map_type::const_iterator hi=my_map.begin(); hi!=my_map.end(); ++hi ) { - server_thread* thr = hi->second; - if( thr->tbb_thread ) { - while( ((tbb_server_thread*)thr)->activation_count>1 ) - __TBB_Yield(); - ((tbb_server_thread*)thr)->~tbb_server_thread(); - my_tbb_allocator.deallocate(static_cast<padded<tbb_server_thread>*>(thr),1); - } else { - ((omp_server_thread*)thr)->~omp_server_thread(); - my_omp_allocator.deallocate(static_cast<padded<omp_server_thread>*>(thr),1); - } - } - if( my_scheduler_proxy ) { - my_scheduler_proxy->Shutdown(); - my_concrt_resource_manager->Release(); - __TBB_ASSERT( my_scheduler, NULL ); - delete my_scheduler; - } else { - __TBB_ASSERT( !my_scheduler, NULL ); - } - } - typedef hash_map_type::key_type key_type; - typedef hash_map_type::value_type value_type; - typedef hash_map_type::iterator iterator; - iterator begin() {return my_map.begin();} - iterator end() {return my_map.end();} - iterator find( key_type k ) {return my_map.find( k );} - iterator insert( key_type k, server_thread* v ) { - std::pair<iterator,bool> res = my_map.insert( value_type(k,v) ); - return res.first; - } - void bind( IScheduler* s ) { - ++my_factory_counter; - if( s ) { - my_unrealized_threads = s->GetPolicy().GetPolicyValue( MaxConcurrency ); - __TBB_ASSERT( my_unrealized_threads>0, NULL ); - my_scheduler = s; - my_concrt_resource_manager = CreateResourceManager(); // reference count==3 when first created. - my_scheduler_proxy = my_concrt_resource_manager->RegisterScheduler( s, CONCRT_RM_VERSION_1 ); - my_scheduler_proxy->RequestInitialVirtualProcessors( false ); - } - } - bool is_closing() { return shutdown_in_progress; } - void unbind( rml::server& server, ::tbb::spin_mutex& mtx ); - void add_client_ref() { my_server_ref_count.add_ref(); } - void remove_client_ref(); - void add_server_ref() {my_server_ref_count.add_ref();} - int remove_server_ref() {return my_server_ref_count.remove_ref();} - int get_server_ref_count() { int k = my_server_ref_count.my_ref_count; return k; } - void assist_cleanup( bool assist_null_only ); - void adjust_balance( int delta ); - int current_balance() const {int k = the_balance; return k;} - ::rml::client& client() const {return my_client;} - void register_as_master( server::execution_resource_t& v ) const { (IExecutionResource*&)v = my_scheduler_proxy ? my_scheduler_proxy->SubscribeCurrentThread() : NULL; } - // Rremove() should be called from the same thread that subscribed the current h/w thread (i.e., the one that - // called register_as_master() ). - void unregister( server::execution_resource_t v ) const {if( v ) ((IExecutionResource*)v)->Remove( my_scheduler );} - void add_virtual_processors( IVirtualProcessorRoot** vprocs, unsigned int count, tbb_connection_v2& conn, ::tbb::spin_mutex& mtx ); - void add_virtual_processors( IVirtualProcessorRoot** vprocs, unsigned int count, omp_connection_v2& conn, ::tbb::spin_mutex& mtx ); - void remove_virtual_processors( IVirtualProcessorRoot** vproots, unsigned count, ::tbb::spin_mutex& mtx ); - void mark_virtual_processors_as_lent( IVirtualProcessorRoot** vproots, unsigned count, ::tbb::spin_mutex& mtx ); - void create_oversubscribers( unsigned n, std::vector<server_thread*>& thr_vec, omp_connection_v2& conn, ::tbb::spin_mutex& mtx ); - void wakeup_tbb_threads( int c, ::tbb::spin_mutex& mtx ); - void mark_virtual_processors_as_returned( IVirtualProcessorRoot** vprocs, unsigned int count, tbb::spin_mutex& mtx ); - inline void addto_original_exec_resources( IExecutionResource* r, ::tbb::spin_mutex& mtx ) { - ::tbb::spin_mutex::scoped_lock lck(mtx); - __TBB_ASSERT( !is_closing(), "trying to register master while connection is being shutdown?" ); - original_exec_resources.push_back( r ); - } -#if !__RML_REMOVE_VIRTUAL_PROCESSORS_DISABLED - void allocate_thread_scavenger( IExecutionResource* v ); -#endif - inline thread_scavenger_thread* get_thread_scavenger() { return my_thread_scavenger_thread; } -}; - -garbage_connection_queue connections_to_reclaim; -connection_scavenger_thread connection_scavenger; - -#endif /* !RML_USE_WCRM */ - -//------------------------------------------------------------------------ -// generic_connection -//------------------------------------------------------------------------ - -template<typename Server, typename Client> -struct connection_traits {}; - -// head of the active tbb connections -static tbb::atomic<uintptr_t> active_tbb_connections; -static tbb::atomic<int> current_tbb_conn_readers; -static size_t current_tbb_conn_reader_epoch; -static tbb::atomic<size_t> close_tbb_connection_event_count; - -#if RML_USE_WCRM -template<typename Connection> -void make_job( Connection& c, server_thread& t ); -#endif - -template<typename Server, typename Client> -class generic_connection: public Server, no_copy { - /*override*/ version_type version() const {return SERVER_VERSION;} - /*override*/ void yield() {thread_monitor::yield();} - /*override*/ void independent_thread_number_changed( int delta ) { my_thread_map.adjust_balance( -delta ); } - /*override*/ unsigned default_concurrency() const { return the_default_concurrency; } - friend void wakeup_some_tbb_threads(); - friend class connection_scavenger_thread; - -protected: - thread_map my_thread_map; - generic_connection* next_conn; - size_t my_ec; -#if RML_USE_WCRM - // FIXME: pad it? - tbb::spin_mutex map_mtx; - IScheduler* my_scheduler; - void do_open( IScheduler* s ) { - my_scheduler = s; - my_thread_map.bind( s ); - } - bool is_closing() { return my_thread_map.is_closing(); } - void request_close_connection( bool existing ); -#else - void do_open() {my_thread_map.bind();} - void request_close_connection( bool ); -#endif /* RML_USE_WCRM */ - //! Make destructor virtual - virtual ~generic_connection() {} -#if !RML_USE_WCRM - generic_connection( wait_counter& fc, Client& c ) : my_thread_map(fc,c), next_conn(NULL), my_ec(0) {} -#else - generic_connection( wait_counter& fc, Client& c ) : - my_thread_map(fc,c), next_conn(NULL), my_ec(0), map_mtx(), my_scheduler(NULL) {} - void add_virtual_processors( IVirtualProcessorRoot** vprocs, unsigned int count ); - void remove_virtual_processors( IVirtualProcessorRoot** vprocs, unsigned int count ); - void notify_resources_externally_busy( IVirtualProcessorRoot** vprocs, unsigned int count ) { my_thread_map.mark_virtual_processors_as_lent( vprocs, count, map_mtx ); } - void notify_resources_externally_idle( IVirtualProcessorRoot** vprocs, unsigned int count ) { - my_thread_map.mark_virtual_processors_as_returned( vprocs, count, map_mtx ); - } -#endif /* !RML_USE_WCRM */ - -public: - typedef Server server_type; - typedef Client client_type; - Client& client() const {return static_cast<Client&>(my_thread_map.client());} - void set_scratch_ptr( job& j, void* ptr ) { ::rml::server::scratch_ptr(j) = ptr; } -#if RML_USE_WCRM - template<typename Connection> - friend void make_job( Connection& c, server_thread& t ); - void add_server_ref () {my_thread_map.add_server_ref();} - void remove_server_ref() {if( my_thread_map.remove_server_ref()==0 ) delete this;} - void add_client_ref () {my_thread_map.add_client_ref();} - void remove_client_ref() {my_thread_map.remove_client_ref();} -#else /* !RML_USE_WCRM */ - int add_server_ref () {return my_thread_map.add_server_ref();} - void remove_server_ref() {if( my_thread_map.remove_server_ref()==0 ) delete this;} - void remove_client_ref() {my_thread_map.remove_client_ref();} - void make_job( server_thread& t, job_automaton& ja ); -#endif /* RML_USE_WCRM */ - static generic_connection* get_addr( uintptr_t addr_ex ) { - return reinterpret_cast<generic_connection*>( addr_ex&~(uintptr_t)3 ); - } -}; - -//------------------------------------------------------------------------ -// TBB server -//------------------------------------------------------------------------ - -template<> -struct connection_traits<tbb_server,tbb_client> { - static const bool assist_null_only = true; - static const bool is_tbb = true; -}; - -//! Represents a server/client binding. -/** The internal representation uses inheritance for the server part and a pointer for the client part. */ -class tbb_connection_v2: public generic_connection<tbb_server,tbb_client> { - /*override*/ void adjust_job_count_estimate( int delta ); -#if !RML_USE_WCRM -#if _WIN32||_WIN64 - /*override*/ void register_master ( rml::server::execution_resource_t& /*v*/ ) {} - /*override*/ void unregister_master ( rml::server::execution_resource_t /*v*/ ) {} -#endif -#else - /*override*/ void register_master ( rml::server::execution_resource_t& v ) { - my_thread_map.register_as_master(v); - if( v ) ++nesting; - } - /*override*/ void unregister_master ( rml::server::execution_resource_t v ) { - if( v ) { - __TBB_ASSERT( nesting>0, NULL ); - if( --nesting==0 ) { -#if !__RML_REMOVE_VIRTUAL_PROCESSORS_DISABLED - my_thread_map.allocate_thread_scavenger( (IExecutionResource*)v ); -#endif - } - } - my_thread_map.unregister(v); - } - IScheduler* create_scheduler() {return( scheduler<tbb_connection_v2>::create( *this ) );} - friend void free_all_connections( uintptr_t ); - friend class scheduler<tbb_connection_v2>; - friend class execution_context; - friend class connection_scavenger_thread; -#endif /* RML_USE_WCRM */ - friend void wakeup_some_tbb_threads(); - //! Estimate on number of jobs without threads working on them. - tbb::atomic<int> my_slack; - friend class dummy_class_to_shut_up_gratuitous_warning_from_gcc_3_2_3; -#if TBB_USE_ASSERT - tbb::atomic<int> my_job_count_estimate; -#endif /* TBB_USE_ASSERT */ - - tbb::atomic<int> n_adjust_job_count_requests; -#if RML_USE_WCRM - tbb::atomic<int> nesting; -#endif - - // dtor - ~tbb_connection_v2(); - -public: -#if RML_USE_WCRM - typedef tbb_server_thread server_thread_type; -#endif - //! True if there is slack that try_process can use. - bool has_slack() const {return my_slack>0;} - -#if RML_USE_WCRM - bool try_process( job& job ) -#else - bool try_process( server_thread& t, job& job ) -#endif - { - bool visited = false; - // No check for my_slack>0 here because caller is expected to do that check. - int k = --my_slack; - if( k>=0 ) { -#if !RML_USE_WCRM - t.my_extra_state = ts_visited; // remember the thread paid a trip to process() at least once -#endif - client().process(job); - visited = true; - } - ++my_slack; - return visited; - } - - tbb_connection_v2( wait_counter& fc, tbb_client& client ) : generic_connection<tbb_server,tbb_client>(fc,client) - { - my_slack = 0; -#if RML_USE_WCRM - nesting = 0; -#endif -#if TBB_USE_ASSERT - my_job_count_estimate = 0; -#endif /* TBB_USE_ASSERT */ - __TBB_ASSERT( !my_slack, NULL ); - -#if RML_USE_WCRM - do_open( client.max_job_count()>0 ? create_scheduler() : NULL ); -#else - do_open(); -#endif /* !RML_USE_WCRM */ - n_adjust_job_count_requests = 0; - - // Acquire head of active_tbb_connections & push the connection into the list - uintptr_t conn; - do { - for( ; (conn=active_tbb_connections)&1; ) - __TBB_Yield(); - } while( active_tbb_connections.compare_and_swap( conn|1, conn )!=conn ); - - this->next_conn = generic_connection<tbb_server,tbb_client>::get_addr(conn); - // Update and release head of active_tbb_connections - active_tbb_connections = (uintptr_t) this; // set and release - } - inline void wakeup_tbb_threads( unsigned n ) { - my_thread_map.wakeup_tbb_threads( n -#if RML_USE_WCRM - , map_mtx -#endif - ); - } -#if RML_USE_WCRM - inline int get_nesting_level() { return nesting; } -#else - inline bool wakeup_next_thread( thread_map::iterator i ) {return my_thread_map.wakeup_next_thread( i, *this );} - inline thread_map::size_type get_unrealized_threads () {return my_thread_map.get_unrealized_threads();} -#endif /* !RML_USE_WCRM */ -}; - -//------------------------------------------------------------------------ -// OpenMP server -//------------------------------------------------------------------------ - -template<> -struct connection_traits<omp_server,omp_client> { - static const bool assist_null_only = false; - static const bool is_tbb = false; -}; - -class omp_connection_v2: public generic_connection<omp_server,omp_client> { -#if !RML_USE_WCRM - /*override*/ int current_balance() const {return the_balance;} -#else - friend void free_all_connections( uintptr_t ); - friend class scheduler<omp_connection_v2>; - /*override*/ int current_balance() const {return my_thread_map.current_balance();} -#endif /* !RML_USE_WCRM */ - /*override*/ int try_increase_load( size_type n, bool strict ); - /*override*/ void decrease_load( size_type n ); - /*override*/ void get_threads( size_type request_size, void* cookie, job* array[] ); -#if !RML_USE_WCRM -#if _WIN32||_WIN64 - /*override*/ void register_master ( rml::server::execution_resource_t& /*v*/ ) {} - /*override*/ void unregister_master ( rml::server::execution_resource_t /*v*/ ) {} -#endif -#else - /*override*/ void register_master ( rml::server::execution_resource_t& v ) { - my_thread_map.register_as_master( v ); - my_thread_map.addto_original_exec_resources( (IExecutionResource*)v, map_mtx ); - } - /*override*/ void unregister_master ( rml::server::execution_resource_t v ) { my_thread_map.unregister(v); } -#endif /* !RML_USE_WCRM */ -#if _WIN32||_WIN64 - /*override*/ void deactivate( rml::job* j ); - /*override*/ void reactivate( rml::job* j ); -#endif /* _WIN32||_WIN64 */ -#if RML_USE_WCRM -public: - typedef omp_server_thread server_thread_type; -private: - IScheduler* create_scheduler() {return( scheduler<omp_connection_v2>::create( *this ) );} -#endif /* RML_USE_WCRM */ -public: -#if TBB_USE_ASSERT - //! Net change in delta caused by this connection. - /** Should be zero when connection is broken */ - tbb::atomic<int> net_delta; -#endif /* TBB_USE_ASSERT */ - - omp_connection_v2( wait_counter& fc, omp_client& client ) : generic_connection<omp_server,omp_client>(fc,client) { -#if TBB_USE_ASSERT - net_delta = 0; -#endif /* TBB_USE_ASSERT */ -#if RML_USE_WCRM - do_open( create_scheduler() ); -#else - do_open(); -#endif /* RML_USE_WCRM */ - } - ~omp_connection_v2() {__TBB_ASSERT( net_delta==0, "net increase/decrease of load is nonzero" );} -}; - -#if !RML_USE_WCRM -/* to deal with cases where the machine is oversubscribed; we want each thread to trip to try_process() at least once */ -/* this should not involve computing the_balance */ -bool thread_map::wakeup_next_thread( thread_map::iterator this_thr, tbb_connection_v2& conn ) { - if( all_visited_at_least_once ) - return false; - - iterator e = end(); -retry: - bool exist = false; - iterator k=this_thr; - for( ++k; k!=e; ++k ) { - // If another thread added *k, there is a tiny timing window where thread() is invalid. - server_thread& t = k->wait_for_thread(); - if( t.my_extra_state!=ts_visited ) - exist = true; - if( t.read_state()!=ts_tbb_busy && t.my_extra_state==ts_started ) - if( t.try_grab_for( ts_tbb_busy ) ) - return true; - } - for( k=begin(); k!=this_thr; ++k ) { - server_thread& t = k->wait_for_thread(); - if( t.my_extra_state!=ts_visited ) - exist = true; - if( t.read_state()!=ts_tbb_busy && t.my_extra_state==ts_started ) - if( t.try_grab_for( ts_tbb_busy ) ) - return true; - } - - if( exist ) - if( conn.has_slack() ) - goto retry; - else - all_visited_at_least_once = true; - return false; -} - -void thread_map::release_tbb_threads( server_thread* t ) { - for( ; t; t = t->link ) { - while( t->read_state()!=ts_asleep ) - __TBB_Yield(); - t->my_extra_state = ts_started; - } -} -#endif /* !RML_USE_WCRM */ - -void thread_map::adjust_balance( int delta ) { - int new_balance = the_balance += delta; - if( new_balance>0 && 0>=new_balance-delta /*== old the_balance*/ ) - wakeup_some_tbb_threads(); -} - -void thread_map::remove_client_ref() { - int k = my_client_ref_count.remove_ref(); - if( k==0 ) { - // Notify factory that thread has crossed back into RML. - --my_factory_counter; - // Notify client that RML is done with the client object. - my_client.acknowledge_close_connection(); - } -} - -#if RML_USE_WCRM -/** Not a member of generic_connection because we need Connection to be the derived class. */ -template<typename Connection> -void make_job( Connection& c, typename Connection::server_thread_type& t ) { - if( t.my_job_automaton.try_acquire() ) { - rml::job& j = *t.my_client.create_one_job(); - __TBB_ASSERT( &j!=NULL, "client:::create_one_job returned NULL" ); - __TBB_ASSERT( (intptr_t(&j)&1)==0, "client::create_one_job returned misaligned job" ); - t.my_job_automaton.set_and_release( j ); - c.set_scratch_ptr( j, (void*) &t ); - } -} -#endif /* RML_USE_WCRM */ - -#if _MSC_VER && !defined(__INTEL_COMPILER) -// Suppress "conditional expression is constant" warning. -#pragma warning( push ) -#pragma warning( disable: 4127 ) -#endif -#if RML_USE_WCRM -template<typename Server, typename Client> -void generic_connection<Server,Client>::request_close_connection( bool exiting ) { - // for TBB connections, exiting should always be false - if( connection_traits<Server,Client>::is_tbb ) - __TBB_ASSERT( !exiting, NULL); -#if TBB_USE_ASSERT - else if( exiting ) - reinterpret_cast<omp_connection_v2*>(this)->net_delta = 0; -#endif - if( exiting ) { - uintptr_t tail = connections_to_reclaim.tail; - while( connections_to_reclaim.tail.compare_and_swap( garbage_connection_queue::plugged, tail )!=tail ) - __TBB_Yield(); - my_thread_map.unbind( *this, map_mtx ); - my_thread_map.assist_cleanup( connection_traits<Server,Client>::assist_null_only ); - // It is assumed that the client waits for all other threads to terminate before - // calling request_close_connection with true. Thus, it is safe to return all - // outstanding connection objects that are reachable. It is possible that there may - // be some unreachable connection objects lying somewhere. - free_all_connections( connection_scavenger.grab_and_prepend( this ) ); - return; - } -#else /* !RML_USE_WCRM */ -template<typename Server, typename Client> -void generic_connection<Server,Client>::request_close_connection( bool ) { -#endif /* RML_USE_WCRM */ - if( connection_traits<Server,Client>::is_tbb ) { - // acquire the head of active tbb connections - uintptr_t conn; - do { - for( ; (conn=active_tbb_connections)&1; ) - __TBB_Yield(); - } while( active_tbb_connections.compare_and_swap( conn|1, conn )!=conn ); - - // Locate the current connection - generic_connection* pred_conn = NULL; - generic_connection* curr_conn = (generic_connection*) conn; - for( ; curr_conn && curr_conn!=this; curr_conn=curr_conn->next_conn ) - pred_conn = curr_conn; - __TBB_ASSERT( curr_conn==this, "the current connection is not in the list?" ); - - // Remove this from the list - if( pred_conn ) { - pred_conn->next_conn = curr_conn->next_conn; - active_tbb_connections = reinterpret_cast<uintptr_t>(generic_connection<tbb_server,tbb_client>::get_addr(active_tbb_connections)); // release it - } else - active_tbb_connections = (uintptr_t) curr_conn->next_conn; // update & release it - curr_conn->next_conn = NULL; - // Increment the tbb connection close event count - my_ec = ++close_tbb_connection_event_count; - // Wait happens in tbb_connection_v2::~tbb_connection_v2() - } -#if RML_USE_WCRM - my_thread_map.unbind( *this, map_mtx ); - my_thread_map.assist_cleanup( connection_traits<Server,Client>::assist_null_only ); - connection_scavenger.add_request( this ); -#else - my_thread_map.unbind(); - my_thread_map.assist_cleanup( connection_traits<Server,Client>::assist_null_only ); - // Remove extra reference - remove_server_ref(); -#endif -} -#if _MSC_VER && !defined(__INTEL_COMPILER) -#pragma warning( pop ) -#endif - -#if RML_USE_WCRM - -template<typename Server, typename Client> -void generic_connection<Server,Client>::add_virtual_processors( IVirtualProcessorRoot** vproots, unsigned int count ) -{} - -template<> -void generic_connection<tbb_server,tbb_client>::add_virtual_processors( IVirtualProcessorRoot** vproots, unsigned int count ) -{ - my_thread_map.add_virtual_processors( vproots, count, (tbb_connection_v2&)*this, map_mtx ); -} -template<> -void generic_connection<omp_server,omp_client>::add_virtual_processors( IVirtualProcessorRoot** vproots, unsigned int count ) -{ - // For OMP, since it uses ScheudlerPolicy of MinThreads==MaxThreads, this is called once when - // RequestInitialVirtualProcessors() is called. - my_thread_map.add_virtual_processors( vproots, count, (omp_connection_v2&)*this, map_mtx ); -} - -template<typename Server, typename Client> -void generic_connection<Server,Client>::remove_virtual_processors( IVirtualProcessorRoot** vproots, unsigned int count ) -{ - __TBB_ASSERT( false, "should not be called" ); -} -/* For OMP, RemoveVirtualProcessors() will never be called. */ - -template<> -void generic_connection<tbb_server,tbb_client>::remove_virtual_processors( IVirtualProcessorRoot** vproots, unsigned int count ) -{ - my_thread_map.remove_virtual_processors( vproots, count, map_mtx ); -} - -void tbb_connection_v2::adjust_job_count_estimate( int delta ) { -#if TBB_USE_ASSERT - my_job_count_estimate += delta; -#endif /* TBB_USE_ASSERT */ - // Atomically update slack. - int c = my_slack+=delta; - if( c>0 ) { - ++n_adjust_job_count_requests; - my_thread_map.wakeup_tbb_threads( c, map_mtx ); - --n_adjust_job_count_requests; - } -} -#endif /* RML_USE_WCRM */ - -tbb_connection_v2::~tbb_connection_v2() { -#if TBB_USE_ASSERT - if( my_job_count_estimate!=0 ) { - fprintf(stderr, "TBB client tried to disconnect with non-zero net job count estimate of %d\n", int(my_job_count_estimate )); - abort(); - } - __TBB_ASSERT( !my_slack, "attempt to destroy tbb_server with nonzero slack" ); - __TBB_ASSERT( this!=static_cast<tbb_connection_v2*>(generic_connection<tbb_server,tbb_client >::get_addr(active_tbb_connections)), "request_close_connection() must be called" ); -#endif /* TBB_USE_ASSERT */ -#if !RML_USE_WCRM - // If there are other threads ready for work, give them coins - if( the_balance>0 ) - wakeup_some_tbb_threads(); -#endif - // Someone might be accessing my data members - while( current_tbb_conn_readers>0 && (ptrdiff_t)(my_ec-current_tbb_conn_reader_epoch)>0 ) - __TBB_Yield(); -} - -#if !RML_USE_WCRM -template<typename Server, typename Client> -void generic_connection<Server,Client>::make_job( server_thread& t, job_automaton& ja ) { - if( ja.try_acquire() ) { - rml::job& j = *client().create_one_job(); - __TBB_ASSERT( &j!=NULL, "client:::create_one_job returned NULL" ); - __TBB_ASSERT( (intptr_t(&j)&1)==0, "client::create_one_job returned misaligned job" ); - ja.set_and_release( j ); - __TBB_ASSERT( t.my_conn && t.my_ja && t.my_job==NULL, NULL ); - t.my_job = &j; - set_scratch_ptr( j, (void*) &t ); - } -} - -void tbb_connection_v2::adjust_job_count_estimate( int delta ) { -#if TBB_USE_ASSERT - my_job_count_estimate += delta; -#endif /* TBB_USE_ASSERT */ - // Atomically update slack. - int c = my_slack+=delta; - if( c>0 ) { - ++n_adjust_job_count_requests; - // The client has work to do and there are threads available - thread_map::size_type n = my_thread_map.wakeup_tbb_threads(c); - - server_thread* new_threads_anchor = NULL; - thread_map::size_type i; - { - tbb::internal::affinity_helper fpa; - for( i=0; i<n; ++i ) { - // Obtain unrealized threads - thread_map::value_type* k = my_thread_map.add_one_thread( false ); - if( !k ) - // No unrealized threads left. - break; - // Eagerly start the thread off. - fpa.protect_affinity_mask(); - my_thread_map.bind_one_thread( *this, *k ); - server_thread& t = k->thread(); - __TBB_ASSERT( !t.link, NULL ); - t.link = new_threads_anchor; - new_threads_anchor = &t; - } - // Implicit destruction of fpa resets original affinity mask. - } - - thread_map::size_type j=0; - for( ; the_balance>0 && j<i; ++j ) { - if( --the_balance>=0 ) { - // Withdraw a coin from the bank - __TBB_ASSERT( new_threads_anchor, NULL ); - - server_thread* t = new_threads_anchor; - new_threads_anchor = t->link; - while( !t->try_grab_for( ts_tbb_busy ) ) - __TBB_Yield(); - t->my_extra_state = ts_started; - } else { - // Overdraft. return it to the bank - ++the_balance; - break; - } - } - __TBB_ASSERT( i-j!=0||new_threads_anchor==NULL, NULL ); - // Mark the ones that did not get started as eligible for being snatched. - if( new_threads_anchor ) - my_thread_map.release_tbb_threads( new_threads_anchor ); - - --n_adjust_job_count_requests; - } -} -#endif /* RML_USE_WCRM */ - -#if RML_USE_WCRM -int omp_connection_v2::try_increase_load( size_type n, bool strict ) { - __TBB_ASSERT(int(n)>=0,NULL); - if( strict ) { - the_balance -= int(n); - } else { - int avail, old; - do { - avail = the_balance; - if( avail<=0 ) { - // No atomic read-write-modify operation necessary. - return avail; - } - // Don't read the_system_balance; if it changes, compare_and_swap will fail anyway. - old = the_balance.compare_and_swap( int(n)<avail ? avail-n : 0, avail ); - } while( old!=avail ); - if( int(n)>avail ) - n=avail; - } -#if TBB_USE_ASSERT - net_delta += n; -#endif /* TBB_USE_ASSERT */ - return n; -} - -void omp_connection_v2::decrease_load( size_type /*n*/ ) {} - -void omp_connection_v2::get_threads( size_type request_size, void* cookie, job* array[] ) { - unsigned index = 0; - std::vector<omp_server_thread*> enlisted(request_size); - std::vector<thread_grab_t> to_activate(request_size); - - if( request_size==0 ) return; - - { - tbb::spin_mutex::scoped_lock lock(map_mtx); - - __TBB_ASSERT( !is_closing(), "try to get threads while connection is being shutdown?" ); - - for( int scan=0; scan<2; ++scan ) { - for( thread_map::iterator i=my_thread_map.begin(); i!=my_thread_map.end(); ++i ) { - omp_server_thread* thr = (omp_server_thread*) (*i).second; - // in the first scan, skip VPs that are lent - if( scan==0 && thr->is_lent() ) continue; - thread_grab_t res = thr->try_grab_for(); - if( res!=wk_failed ) {// && if is not busy by some other scheduler - to_activate[index] = res; - enlisted[index] = thr; - if( ++index==request_size ) - goto activate_threads; - } - } - } - } - -activate_threads: - - for( unsigned i=0; i<index; ++i ) { - omp_server_thread* thr = enlisted[i]; - if( to_activate[i]==wk_from_asleep ) - thr->get_virtual_processor()->Activate( thr ); - job* j = thr->wait_for_job(); - array[i] = j; - thr->omp_data.produce( client(), *j, cookie, i PRODUCE_ARG(*this) ); - } - - if( index==request_size ) - return; - - // If we come to this point, it must be becuase dynamic==false - // Create Oversubscribers.. - - // Note that our policy is such that MinConcurrency==MaxConcurrency. - // RM will deliver MaxConcurrency of VirtualProcessors and no more. - __TBB_ASSERT( request_size>index, NULL ); - unsigned n = request_size - index; - std::vector<server_thread*> thr_vec(n); - typedef std::vector<server_thread*>::iterator iterator_thr; - my_thread_map.create_oversubscribers( n, thr_vec, *this, map_mtx ); - for( iterator_thr ti=thr_vec.begin(); ti!=thr_vec.end(); ++ti ) { - omp_server_thread* thr = (omp_server_thread*) *ti; - __TBB_ASSERT( thr, "thread not created?" ); - // Thread is already grabbed; since it is nrewly created, we need to activate it. - thr->get_virtual_processor()->Activate( thr ); - job* j = thr->wait_for_job(); - array[index] = j; - thr->omp_data.produce( client(), *j, cookie, index PRODUCE_ARG(*this) ); - ++index; - } -} - -#if _WIN32||_WIN64 -void omp_connection_v2::deactivate( rml::job* j ) -{ - my_thread_map.adjust_balance(1); -#if TBB_USE_ASSERT - net_delta -= 1; -#endif - omp_server_thread* thr = (omp_server_thread*) scratch_ptr( *j ); - (thr->get_virtual_processor())->Deactivate( thr ); -} - -void omp_connection_v2::reactivate( rml::job* j ) -{ - // Should not adjust the_balance because OMP client is supposed to - // do try_increase_load() to reserve the threads to use. - omp_server_thread* thr = (omp_server_thread*) scratch_ptr( *j ); - (thr->get_virtual_processor())->Activate( thr ); -} -#endif /* !_WIN32||_WIN64 */ - -#endif /* RML_USE_WCRM */ - -//! Wake up some available tbb threads -void wakeup_some_tbb_threads() -{ - /* First, atomically grab the connection, then increase the server ref count to keep - it from being released prematurely. Second, check if the balance is available for TBB - and the tbb conneciton has slack to exploit. If the answer is true, go ahead and - try to wake some up. */ - if( generic_connection<tbb_server,tbb_client >::get_addr(active_tbb_connections)==0 ) - // the next connection will see the change; return. - return; - -start_it_over: - int n_curr_readers = ++current_tbb_conn_readers; - if( n_curr_readers>1 ) // I lost - return; - // if n_curr_readers==1, i am the first one, so I will take responsibility for waking tbb threads up. - - // update the current epoch - current_tbb_conn_reader_epoch = close_tbb_connection_event_count; - - // read and clear - // Newly added connection will not invalidate the pointer, and it will - // compete with the current one to claim coins. - // One that is about to close the connection increments the event count - // after it removes the connection from the list. But it will keep around - // the connection until all readers including this one catch up. So, reading - // the head and clearing the lock bit should be o.k. - generic_connection<tbb_server,tbb_client>* next_conn_wake_up = generic_connection<tbb_server,tbb_client>::get_addr( active_tbb_connections ); - - for( ; next_conn_wake_up; ) { - /* some threads are creating tbb server threads; they may not see my changes made to the_balance */ - /* When a thread is in adjust_job_count_estimate() to increase the slack - RML tries to activate worker threads on behalf of the requesting thread - by repeatedly drawing a coin from the bank optimistically and grabbing a - thread. If it finds the bank overdrafted, it returns the coin back to - the bank and returns the control to the thread (return from the method). - There lies a tiny timing hole. - - When the overdraft occurs (note that multiple masters may be in - adjust_job_count_estimate() so the_balance can be any negative value) and - a worker returns from the TBB work at that moment, its returning the coin - does not bump up the_balance over 0, so it happily returns from - wakeup_some_tbb_threads() without attempting to give coins to worker threads - that are ready. - */ - while( ((tbb_connection_v2*)next_conn_wake_up)->n_adjust_job_count_requests>0 ) - __TBB_Yield(); - - int bal = the_balance; - n_curr_readers = current_tbb_conn_readers; // get the snapshot - if( bal<=0 ) break; - // if the connection is deleted, the following will immediately return because its slack would be 0 or less. - - tbb_connection_v2* tbb_conn = (tbb_connection_v2*)next_conn_wake_up; - int my_slack = tbb_conn->my_slack; - if( my_slack>0 ) tbb_conn->wakeup_tbb_threads( my_slack ); - next_conn_wake_up = next_conn_wake_up->next_conn; - } - - int delta = current_tbb_conn_readers -= n_curr_readers; - //if delta>0, more threads entered the routine since this one took the snapshot - if( delta>0 ) { - current_tbb_conn_readers = 0; - if( the_balance>0 && generic_connection<tbb_server,tbb_client >::get_addr(active_tbb_connections)!=0 ) - goto start_it_over; - } - - // Signal any connection that is waiting for me to complete my access that I am done. - current_tbb_conn_reader_epoch = close_tbb_connection_event_count; -} - -#if !RML_USE_WCRM -int omp_connection_v2::try_increase_load( size_type n, bool strict ) { - __TBB_ASSERT(int(n)>=0,NULL); - if( strict ) { - the_balance -= int(n); - } else { - int avail, old; - do { - avail = the_balance; - if( avail<=0 ) { - // No atomic read-write-modify operation necessary. - return avail; - } - // don't read the_balance; if it changes, compare_and_swap will fail anyway. - old = the_balance.compare_and_swap( int(n)<avail ? avail-n : 0, avail ); - } while( old!=avail ); - if( int(n)>avail ) - n=avail; - } -#if TBB_USE_ASSERT - net_delta += n; -#endif /* TBB_USE_ASSERT */ - return n; -} - -void omp_connection_v2::decrease_load( size_type n ) { - __TBB_ASSERT(int(n)>=0,NULL); - my_thread_map.adjust_balance(int(n)); -#if TBB_USE_ASSERT - net_delta -= n; -#endif /* TBB_USE_ASSERT */ -} - -void omp_connection_v2::get_threads( size_type request_size, void* cookie, job* array[] ) { - - if( !request_size ) - return; - - unsigned index = 0; - for(;;) { // don't return until all request_size threads are grabbed. - // Need to grab some threads - thread_map::iterator k_end=my_thread_map.end(); - for( thread_map::iterator k=my_thread_map.begin(); k!=k_end; ++k ) { - // If another thread added *k, there is a tiny timing window where thread() is invalid. - server_thread& t = k->wait_for_thread(); - if( t.try_grab_for( ts_omp_busy ) ) { - // The preincrement instead of post-increment of index is deliberate. - job& j = k->wait_for_job(); - array[index] = &j; - t.omp_dispatch.produce( client(), j, cookie, index PRODUCE_ARG(*this) ); - if( ++index==request_size ) - return; - } - } - // Need to allocate more threads - for( unsigned i=index; i<request_size; ++i ) { - __TBB_ASSERT( index<request_size, NULL ); - thread_map::value_type* k = my_thread_map.add_one_thread( true ); -#if TBB_USE_ASSERT - if( !k ) { - // Client erred - __TBB_ASSERT(false, "server::get_threads: exceeded job_count\n"); - } -#endif - my_thread_map.bind_one_thread( *this, *k ); - server_thread& t = k->thread(); - if( t.try_grab_for( ts_omp_busy ) ) { - job& j = k->wait_for_job(); - array[index] = &j; - // The preincrement instead of post-increment of index is deliberate. - t.omp_dispatch.produce( client(), j, cookie, index PRODUCE_ARG(*this) ); - if( ++index==request_size ) - return; - } // else someone else snatched it. - } - } -} -#endif /* !RML_USE_WCRM */ - -//------------------------------------------------------------------------ -// Methods of omp_dispatch_type -//------------------------------------------------------------------------ -void omp_dispatch_type::consume() { - // Wait for short window between when master sets state of this thread to ts_omp_busy - // and master thread calls produce. - job_type* j; - tbb::internal::atomic_backoff backoff; - while( (j = job)==NULL ) backoff.pause(); - job = static_cast<job_type*>(NULL); - client->process(*j,cookie,index); -#if TBB_USE_ASSERT - // Return of method process implies "decrease_load" from client's viewpoint, even though - // the actual adjustment of the_balance only happens when this thread really goes to sleep. - --server->net_delta; -#endif /* TBB_USE_ASSERT */ -} - -#if !RML_USE_WCRM -#if _WIN32||_WIN64 -void omp_connection_v2::deactivate( rml::job* j ) -{ -#if TBB_USE_ASSERT - net_delta -= 1; -#endif - __TBB_ASSERT( j, NULL ); - server_thread* thr = (server_thread*) scratch_ptr( *j ); - thr->deactivate(); -} - -void omp_connection_v2::reactivate( rml::job* j ) -{ - // Should not adjust the_balance because OMP client is supposed to - // do try_increase_load() to reserve the threads to use. - __TBB_ASSERT( j, NULL ); - server_thread* thr = (server_thread*) scratch_ptr( *j ); - thr->reactivate(); -} -#endif /* _WIN32||_WIN64 */ - -//------------------------------------------------------------------------ -// Methods of server_thread -//------------------------------------------------------------------------ - -server_thread::server_thread() : - ref_count(0), - link(NULL), - my_map_pos(), - my_conn(NULL), my_job(NULL), my_ja(NULL) -{ - state = ts_idle; - terminate = false; -#if TBB_USE_ASSERT - has_active_thread = false; -#endif /* TBB_USE_ASSERT */ -} - -server_thread::~server_thread() { - __TBB_ASSERT( !has_active_thread, NULL ); -} - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Suppress overzealous compiler warnings about an initialized variable 'sink_for_alloca' not referenced - #pragma warning(push) - #pragma warning(disable:4189) -#endif -__RML_DECL_THREAD_ROUTINE server_thread::thread_routine( void* arg ) { - server_thread* self = static_cast<server_thread*>(arg); - AVOID_64K_ALIASING( self->my_index ); -#if TBB_USE_ASSERT - __TBB_ASSERT( !self->has_active_thread, NULL ); - self->has_active_thread = true; -#endif /* TBB_USE_ASSERT */ - self->loop(); - return 0; -} -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning(pop) -#endif - -void server_thread::launch( size_t stack_size ) { -#if USE_WINTHREAD - thread_monitor::launch( thread_routine, this, stack_size, &this->my_index ); -#else - thread_monitor::launch( thread_routine, this, stack_size ); -#endif /* USE_PTHREAD */ -} - -void server_thread::sleep_perhaps( thread_state_t asleep ) { - if( terminate ) return; - __TBB_ASSERT( asleep==ts_asleep, NULL ); - thread_monitor::cookie c; - monitor.prepare_wait(c); - if( state.compare_and_swap( asleep, ts_idle )==ts_idle ) { - if( !terminate ) { - monitor.commit_wait(c); - // Someone else woke me up. The compare_and_swap further below deals with spurious wakeups. - } else { - monitor.cancel_wait(); - } - thread_state_t s = read_state(); - if( s==ts_asleep ) { - state.compare_and_swap( ts_idle, ts_asleep ); - // I woke myself up, either because I cancelled the wait or suffered a spurious wakeup. - } else { - // Someone else woke me up; there the_balance is decremented by 1. -- tbb only - if( !is_omp_thread ) { - __TBB_ASSERT( s==ts_tbb_busy||s==ts_idle, NULL ); - } - } - } else { - // someone else made it busy ; see try_grab_for when state==ts_idle. - __TBB_ASSERT( state==ts_omp_busy||state==ts_tbb_busy, NULL ); - monitor.cancel_wait(); - } - __TBB_ASSERT( read_state()!=asleep, "a thread can only put itself to sleep" ); -} - -bool server_thread::wakeup( thread_state_t to, thread_state_t from ) { - bool success = false; - __TBB_ASSERT( from==ts_asleep && (to==ts_idle||to==ts_omp_busy||to==ts_tbb_busy), NULL ); - if( state.compare_and_swap( to, from )==from ) { - if( !is_omp_thread ) __TBB_ASSERT( to==ts_idle||to==ts_tbb_busy, NULL ); - // There is a small timing window that permits balance to become negative, - // but such occurrences are probably rare enough to not worry about, since - // at worst the result is slight temporary oversubscription. - monitor.notify(); - success = true; - } - return success; -} - -//! Attempt to change a thread's state to ts_omp_busy, and waking it up if necessary. -bool server_thread::try_grab_for( thread_state_t target_state ) { - bool success = false; - switch( read_state() ) { - case ts_asleep: - success = wakeup( target_state, ts_asleep ); - break; - case ts_idle: - success = state.compare_and_swap( target_state, ts_idle )==ts_idle; - break; - default: - // Thread is not available to be part of an OpenMP thread team. - break; - } - return success; -} - -#if _WIN32||_WIN64 -void server_thread::deactivate() { - thread_state_t es = (thread_state_t) my_extra_state.fetch_and_store( ts_deactivated ); - __TBB_ASSERT( my_extra_state==ts_deactivated, "someone else tampered with my_extra_state?" ); - if( es==ts_none ) - state = ts_idle; - else - __TBB_ASSERT( es==ts_reactivated, "Cannot call deactivate() while in ts_deactivated" ); - // only the thread can transition itself from ts_deactivted to ts_none - __TBB_ASSERT( my_extra_state==ts_deactivated, "someone else tampered with my_extra_state?" ); - my_extra_state = ts_none; // release the critical section - int bal = ++the_balance; - if( bal>0 ) - wakeup_some_tbb_threads(); - if( es==ts_none ) - sleep_perhaps( ts_asleep ); -} - -void server_thread::reactivate() { - thread_state_t es; - do { - while( (es=read_extra_state())==ts_deactivated ) - __TBB_Yield(); - if( es==ts_reactivated ) { - __TBB_ASSERT( false, "two Reactivate() calls in a row. Should not happen" ); - return; - } - __TBB_ASSERT( es==ts_none, NULL ); - } while( (thread_state_t)my_extra_state.compare_and_swap( ts_reactivated, ts_none )!=ts_none ); - if( state!=ts_omp_busy ) { - my_extra_state = ts_none; - while( !try_grab_for( ts_omp_busy ) ) - __TBB_Yield(); - } -} -#endif /* _WIN32||_WIN64 */ - - -template<typename Connection> -bool server_thread::destroy_job( Connection& c ) { - __TBB_ASSERT( !is_omp_thread||(state==ts_idle||state==ts_omp_busy), NULL ); - __TBB_ASSERT( is_omp_thread||(state==ts_idle||state==ts_tbb_busy), NULL ); - if( !is_omp_thread ) { - __TBB_ASSERT( state==ts_idle||state==ts_tbb_busy, NULL ); - if( state==ts_idle ) - state.compare_and_swap( ts_done, ts_idle ); - // 'state' may be set to ts_tbb_busy by another thread. - - if( state==ts_tbb_busy ) { // return the coin to the deposit - // need to deposit first to let the next connection see the change - ++the_balance; - state = ts_done; // no other thread changes the state when it is ts_*_busy - } - } - if( job_automaton* ja = my_ja ) { - rml::job* j; - if( ja->try_plug(j) ) { - __TBB_ASSERT( j, NULL ); - c.client().cleanup(*j); - c.remove_client_ref(); - } else { - // Some other thread took responsibility for cleaning up the job. - } - } - // Must do remove client reference first, because execution of - // c.remove_ref() can cause *this to be destroyed. - int k = remove_ref(); - __TBB_ASSERT_EX( k==0, "more than one references?" ); -#if TBB_USE_ASSERT - has_active_thread = false; -#endif /* TBB_USE_ASSERT */ - c.remove_server_ref(); - return true; -} - -bool server_thread::do_termination() { - if( is_omp_thread ) - return destroy_job( *static_cast<omp_connection_v2*>(my_conn) ); - else - return destroy_job( *static_cast<tbb_connection_v2*>(my_conn) ); -} - -//! Loop that each thread executes -void server_thread::loop() { - if( is_omp_thread ) - static_cast<omp_connection_v2*>(my_conn)->make_job( *this, *my_ja ); - else - static_cast<tbb_connection_v2*>(my_conn)->make_job( *this, *my_ja ); - for(;;) { - __TBB_Yield(); - if( state==ts_idle ) - sleep_perhaps( ts_asleep ); - - // Check whether I should quit. - if( terminate ) - if( do_termination() ) - return; - - // read the state - thread_state_t s = read_state(); - __TBB_ASSERT( s==ts_idle||s==ts_omp_busy||s==ts_tbb_busy, NULL ); - - if( s==ts_omp_busy ) { - // Enslaved by OpenMP team. - omp_dispatch.consume(); - /* here wake tbb threads up if feasible */ - if( ++the_balance>0 ) - wakeup_some_tbb_threads(); - state = ts_idle; - } else if( s==ts_tbb_busy ) { - // do some TBB work. - __TBB_ASSERT( my_conn && my_job, NULL ); - tbb_connection_v2& conn = *static_cast<tbb_connection_v2*>(my_conn); - // give openmp higher priority - bool has_coin = true; - if( conn.has_slack() ) { - // it has the coin, it should trip to the scheduler at least once as long as its slack is positive - do { - if( conn.try_process( *this, *my_job ) ) - if( conn.has_slack() && the_balance>=0 ) - has_coin = !conn.wakeup_next_thread( my_map_pos ); - } while( has_coin && conn.has_slack() && the_balance>=0 ); - } - state = ts_idle; - if( has_coin ) { - ++the_balance; // return the coin back to the deposit - if( conn.has_slack() ) { // a new adjust_job_request_estimate() is in progress - // it may have missed my changes to state and/or the_balance - if( --the_balance>=0 ) { // try to grab the coin back - // I got the coin - if( state.compare_and_swap( ts_tbb_busy, ts_idle )!=ts_idle ) - ++the_balance; // someone else enlisted me. - } else { - // overdraft. return the coin - ++the_balance; - } - } // else the new request will see my changes to state & the_balance. - } - /* here wake tbb threads up if feasible */ - if( the_balance>0 ) - wakeup_some_tbb_threads(); - } - } -} -#endif /* !RML_USE_WCRM */ - -#if RML_USE_WCRM - -class tbb_connection_v2; -class omp_connection_v2; - -#define CREATE_SCHEDULER_POLICY(policy,min_thrs,max_thrs,stack_size) \ - try { \ - policy = new SchedulerPolicy (7, \ - SchedulerKind, RML_THREAD_KIND, /*defined in _rml_serer_msrt.h*/ \ - MinConcurrency, min_thrs, \ - MaxConcurrency, max_thrs, \ - TargetOversubscriptionFactor, 1, \ - ContextStackSize, stack_size/1000, /*ConcRT:kB, iRML:bytes*/ \ - ContextPriority, THREAD_PRIORITY_NORMAL, \ - DynamicProgressFeedback, ProgressFeedbackDisabled ); \ - } catch ( invalid_scheduler_policy_key & ) { \ - __TBB_ASSERT( false, "invalid scheduler policy key exception caught" );\ - } catch ( invalid_scheduler_policy_value & ) { \ - __TBB_ASSERT( false, "invalid scheduler policy value exception caught" );\ - } - -static unsigned int core_count; -static tbb::atomic<int> core_count_inited; - - -static unsigned int get_processor_count() -{ - if( core_count_inited!=2 ) { - if( core_count_inited.compare_and_swap( 1, 0 )==0 ) { - core_count = GetProcessorCount(); - core_count_inited = 2; - } else { - tbb::internal::spin_wait_until_eq( core_count_inited, 2 ); - } - } - return core_count; -} - -template<typename Connection> -scheduler<Connection>::scheduler( Connection& conn ) : uid(GetSchedulerId()), my_conn(conn) {} - -template<> -scheduler<tbb_connection_v2>::scheduler( tbb_connection_v2& conn ) : uid(GetSchedulerId()), my_conn(conn) -{ - rml::client& cl = my_conn.client(); - unsigned max_job_count = cl.max_job_count(); - unsigned count = get_processor_count(); - __TBB_ASSERT( max_job_count>0, "max job count must be positive" ); - __TBB_ASSERT( count>1, "The processor count must be greater than 1" ); - if( max_job_count>count-1) max_job_count = count-1; - CREATE_SCHEDULER_POLICY( my_policy, 0, max_job_count, cl.min_stack_size() ); -} - -#if __RML_REMOVE_VIRTUAL_PROCESSORS_DISABLED -template<> -void scheduler<tbb_connection_v2>::RemoveVirtualProcessors( IVirtualProcessorRoot**, unsigned int) -{ -} -#else -template<> -void scheduler<tbb_connection_v2>::RemoveVirtualProcessors( IVirtualProcessorRoot** vproots, unsigned int count ) -{ - if( !my_conn.is_closing() ) - my_conn.remove_virtual_processors( vproots, count ); -} -#endif - -template<> -void scheduler<tbb_connection_v2>::NotifyResourcesExternallyIdle( IVirtualProcessorRoot** /*vproots*/, unsigned int /*count*/) -{ - __TBB_ASSERT( false, "NotifyResourcesExternallyIdle() is not allowed for TBB" ); -} - -template<> -void scheduler<tbb_connection_v2>::NotifyResourcesExternallyBusy( IVirtualProcessorRoot** /*vproots*/, unsigned int /*count*/ ) -{ - __TBB_ASSERT( false, "NotifyResourcesExternallyBusy() is not allowed for TBB" ); -} - -template<> -scheduler<omp_connection_v2>::scheduler( omp_connection_v2& conn ) : uid(GetSchedulerId()), my_conn(conn) -{ - unsigned count = get_processor_count(); - rml::client& cl = my_conn.client(); - __TBB_ASSERT( count>1, "The processor count must be greater than 1" ); - CREATE_SCHEDULER_POLICY( my_policy, count-1, count-1, cl.min_stack_size() ); -} - -template<> -void scheduler<omp_connection_v2>::RemoveVirtualProcessors( IVirtualProcessorRoot** /*vproots*/, unsigned int /*count*/ ) { - __TBB_ASSERT( false, "RemoveVirtualProcessors() is not allowed for OMP" ); -} - -template<> -void scheduler<omp_connection_v2>::NotifyResourcesExternallyIdle( IVirtualProcessorRoot** vproots, unsigned int count ){ - if( !my_conn.is_closing() ) - my_conn.notify_resources_externally_idle( vproots, count ); -} - -template<> -void scheduler<omp_connection_v2>::NotifyResourcesExternallyBusy( IVirtualProcessorRoot** vproots, unsigned int count ){ - if( !my_conn.is_closing() ) - my_conn.notify_resources_externally_busy( vproots, count ); -} - -/* ts_idle, ts_asleep, ts_busy */ -void tbb_server_thread::Dispatch( DispatchState* ) { - // Activate() will resume a thread right after Deactivate() as if it returns from the call - tbb_connection_v2* tbb_conn = static_cast<tbb_connection_v2*>(my_conn); - make_job( *tbb_conn, *this ); - - for( ;; ) { - // Try to wake some tbb threads if the balance is positive. - // When a thread is added by ConcRT and enter here for the first time, - // the thread may wake itself up (i.e., atomically change its state to ts_busy. - if( the_balance>0 ) - wakeup_some_tbb_threads(); - if( read_state()!=ts_busy ) - if( sleep_perhaps() ) - return; - if( terminate ) - if( initiate_termination() ) - return; - if( read_state()==ts_busy ) { - // this thread has a coin (i.e., state=ts_busy; it should trip to the scheduler at least once - if ( tbb_conn->has_slack() ) { - do { - tbb_conn->try_process( *wait_for_job() ); - } while( tbb_conn->has_slack() && the_balance>=0 && !is_removed() ); - } - __TBB_ASSERT( read_state()==ts_busy, "thread is not in busy state after returning from process()" ); - // see remove_virtual_processors() - if( my_state.compare_and_swap( ts_idle, ts_busy )==ts_busy ) { - int bal = ++the_balance; - if( tbb_conn->has_slack() ) { - // slack is positive, volunteer to help - bal = --the_balance; // try to grab the coin back - if( bal>=0 ) { // got the coin back - if( my_state.compare_and_swap( ts_busy, ts_idle )!=ts_idle ) - ++the_balance; // someone else enlisted me. - // else my_state is ts_busy, I will come back to tbb_conn->try_process(). - } else { - // overdraft. return the coin - ++the_balance; - } - } // else the new request will see my changes to state & the_balance. - } else { - __TBB_ASSERT( false, "someone tampered with my state" ); - } - } // someone else might set the state to somthing other than ts_idle - } -} - -void omp_server_thread::Dispatch( DispatchState* ) { - // Activate() will resume a thread right after Deactivate() as if it returns from the call - make_job( *static_cast<omp_connection_v2*>(my_conn), *this ); - - for( ;; ) { - if( read_state()!=ts_busy ) - sleep_perhaps(); - if( terminate ) { - if( initiate_termination() ) - return; - } - if( read_state()==ts_busy ) { - omp_data.consume(); - __TBB_ASSERT( read_state()==ts_busy, "thread is not in busy state after returning from process()" ); - my_thread_map.adjust_balance( 1 ); - set_state( ts_idle ); - } - // someone else might set the state to somthing other than ts_idle - } -} - -//! Attempt to change a thread's state to ts_omp_busy, and waking it up if necessary. -thread_grab_t server_thread_rep::try_grab_for() { - thread_grab_t res = wk_failed; - thread_state_t s = read_state(); - switch( s ) { - case ts_asleep: - if( wakeup( ts_busy, ts_asleep ) ) - res = wk_from_asleep; - __TBB_ASSERT( res==wk_failed||read_state()==ts_busy, NULL ); - break; - case ts_idle: - if( my_state.compare_and_swap( ts_busy, ts_idle )==ts_idle ) - res = wk_from_idle; - // At this point a thread is grabbed (i.e., its state has changed to ts_busy. - // It is possible that the thread 1) processes the job, returns from process() and - // sets its state ts_idle again. In some cases, it even sets its state to ts_asleep. - break; - default: - break; - } - return res; -} - -bool tbb_server_thread::switch_out() { - thread_state_t s = read_state(); - __TBB_ASSERT( s==ts_asleep||s==ts_busy, NULL ); - // This thread comes back from the TBB scheduler, and changed its state to ts_asleep successfully. - // The master enlisted it and woke it up by Activate()'ing it; now it is emerging from Deactivated(). - // ConcRT requested for removal of the vp associated with the thread, and RML marks it removed. - // Now, it has ts_busy, and removed. -- we should remove it. - IExecutionResource* old_vp = my_execution_resource; - if( s==ts_busy ) { - ++the_balance; - my_state = ts_asleep; - } - IThreadProxy* proxy = my_proxy; - __TBB_ASSERT( proxy, NULL ); - my_execution_resource = (IExecutionResource*) c_remove_prepare; - old_vp->Remove( my_scheduler ); - my_execution_resource = (IExecutionResource*) c_remove_returned; - int cnt = --activation_count; - __TBB_ASSERT_EX( cnt==0||cnt==1, "too many activations?" ); - proxy->SwitchOut(); - if( terminate ) { - bool activated = activation_count==1; -#if TBB_USE_ASSERT - /* In a rare sequence of events, a thread comes out of SwitchOut with activation_count==1. - * 1) The thread is SwitchOut'ed. - * 2) AddVirtualProcessors() arrived and the thread is Activated. - * 3) The thread is coming out of SwitchOut(). - * 4) request_close_connection arrives and inform the thread that it is time to terminate. - * 5) The thread hits the check and falls into the path with 'activated==true'. - * In that case, do the clean-up but do not switch to the thread scavenger; rather simply return to RM. - */ - if( activated ) { - // thread is 'revived' in add_virtual_processors after being Activated(). - // so, if the thread extra state is still marked 'removed', it will shortly change to 'none' - // i.e., !is_remove(). The thread state is changed to ts_idle before the extra state, so - // the thread's state should be either ts_idle or ts_done. - while( is_removed() ) - __TBB_Yield(); - thread_state_t s = read_state(); - __TBB_ASSERT( s==ts_idle || s==ts_done, NULL ); - } -#endif - __TBB_ASSERT( my_state==ts_asleep||my_state==ts_idle, NULL ); - // it is possible that in make_job() the thread may not have a chance to create a job. - // my_job may not be set if the thread did not get a chance to process client's job (i.e., call try_process()) - rml::job* j; - if( my_job_automaton.try_plug(j) ) { - __TBB_ASSERT( j, NULL ); - my_client.cleanup(*j); - my_conn->remove_client_ref(); - } - // Must do remove client reference first, because execution of - // c.remove_ref() can cause *this to be destroyed. - if( !activated ) - proxy->SwitchTo( my_thread_map.get_thread_scavenger(), Idle ); - my_conn->remove_server_ref(); - return true; - } - // We revive a thread in add_virtual_processors() after we Activate the thread on a new virtual processor. - // So briefly wait until the thread's my_execution_resource gets set. - while( get_virtual_processor()==c_remove_returned ) - __TBB_Yield(); - return false; -} - -bool tbb_server_thread::sleep_perhaps () { - if( terminate ) return false; - thread_state_t s = read_state(); - if( s==ts_idle ) { - if( my_state.compare_and_swap( ts_asleep, ts_idle )==ts_idle ) { - // If a thread is between read_state() and compare_and_swap(), and the master tries to terminate, - // the master's compare_and_swap() will fail because the thread's state is ts_idle. - // We need to check if terminate is true or not before letting the thread go to sleep oetherwise - // we will miss the terminate signal. - if( !terminate ) { - if( !is_removed() ) { - --activation_count; - get_virtual_processor()->Deactivate( this ); - } - if( is_removed() ) { - if( switch_out() ) - return true; - __TBB_ASSERT( my_execution_resource>c_remove_returned, NULL ); - } - // in add_virtual_processors(), when we revive a thread, we change its state after Activate the thread - // in that case the state may be ts_asleep for a short period - while( read_state()==ts_asleep ) - __TBB_Yield(); - } else { - if( my_state.compare_and_swap( ts_done, ts_asleep )!=ts_asleep ) { - --activation_count; - // unbind() changed my state. It will call Activate(). So issue a matching Deactivate() - get_virtual_processor()->Deactivate( this ); - } - } - } - } else { - __TBB_ASSERT( s==ts_busy, NULL ); - } - return false; -} - -void omp_server_thread::sleep_perhaps () { - if( terminate ) return; - thread_state_t s = read_state(); - if( s==ts_idle ) { - if( my_state.compare_and_swap( ts_asleep, ts_idle )==ts_idle ) { - // If a thread is between read_state() and compare_and_swap(), and the master tries to terminate, - // the master's compare_and_swap() will fail because the thread's state is ts_idle. - // We need to check if terminate is true or not before letting the thread go to sleep oetherwise - // we will miss the terminate signal. - if( !terminate ) { - get_virtual_processor()->Deactivate( this ); - __TBB_ASSERT( !is_removed(), "OMP threads should not be deprived of a virtual processor" ); - __TBB_ASSERT( read_state()!=ts_asleep, NULL ); - } else { - if( my_state.compare_and_swap( ts_done, ts_asleep )!=ts_asleep ) - // unbind() changed my state. It will call Activate(). So issue a matching Deactivate() - get_virtual_processor()->Deactivate( this ); - } - } - } else { - __TBB_ASSERT( s==ts_busy, NULL ); - } -} - -bool tbb_server_thread::initiate_termination() { - if( read_state()==ts_busy ) { - int bal = ++the_balance; - if( bal>0 ) wakeup_some_tbb_threads(); - } - return destroy_job( (tbb_connection_v2*) my_conn ); -} - -template<typename Connection> -bool server_thread_rep::destroy_job( Connection* c ) { - __TBB_ASSERT( my_state!=ts_asleep, NULL ); - rml::job* j; - if( my_job_automaton.try_plug(j) ) { - __TBB_ASSERT( j, NULL ); - my_client.cleanup(*j); - c->remove_client_ref(); - } - // Must do remove client reference first, because execution of - // c.remove_ref() can cause *this to be destroyed. - c->remove_server_ref(); - return true; -} - -void thread_map::assist_cleanup( bool assist_null_only ) { - // To avoid deadlock, the current thread *must* help out with cleanups that have not started, - // becausd the thread that created the job may be busy for a long time. - for( iterator i = begin(); i!=end(); ++i ) { - rml::job* j=0; - server_thread* thr = (*i).second; - job_automaton& ja = thr->my_job_automaton; - if( assist_null_only ? ja.try_plug_null() : ja.try_plug(j) ) { - if( j ) { - my_client.cleanup(*j); - } else { - // server thread did not get a chance to create a job. - } - remove_client_ref(); - } - } -} - -void thread_map::add_virtual_processors( IVirtualProcessorRoot** vproots, unsigned int count, tbb_connection_v2& conn, ::tbb::spin_mutex& mtx ) -{ -#if TBB_USE_ASSERT - int req_cnt = ++n_add_vp_requests; - __TBB_ASSERT( req_cnt==1, NULL ); -#endif - std::vector<thread_map::iterator> vec(count); - std::vector<tbb_server_thread*> tvec(count); - iterator end; - - { - tbb::spin_mutex::scoped_lock lck( mtx ); - __TBB_ASSERT( my_map.size()==0||count==1, NULL ); - end = my_map.end(); //remember 'end' at the time of 'find' - // find entries in the map for those VPs that were previosly added and then removed. - for( size_t i=0; i<count; ++i ) { - vec[i] = my_map.find( (key_type) vproots[i] ); -#if TBB_USE_DEBUG - if( vec[i]!=end ) { - tbb_server_thread* t = (tbb_server_thread*) (*vec[i]).second; - IVirtualProcessorRoot* v = t->get_virtual_processor(); - __TBB_ASSERT( v==c_remove_prepare||v==c_remove_returned, NULL ); - } -#endif - } - - iterator nxt = my_map.begin(); - for( size_t i=0; i<count; ++i ) { - if( vec[i]!=end ) { -#if TBB_USE_ASSERT - tbb_server_thread* t = (tbb_server_thread*) (*vec[i]).second; - __TBB_ASSERT( t->read_state()==ts_asleep, NULL ); - IVirtualProcessorRoot* r = t->get_virtual_processor(); - __TBB_ASSERT( r==c_remove_prepare||r==c_remove_returned, NULL ); -#endif - continue; - } - - if( my_unrealized_threads>0 ) { - --my_unrealized_threads; - } else { - __TBB_ASSERT( nxt!=end, "nxt should not be thread_map::iterator::end" ); - // find a removed thread context for i - for( ; nxt!=end; ++nxt ) { - tbb_server_thread* t = (tbb_server_thread*) (*nxt).second; - if( t->is_removed() && t->read_state()==ts_asleep && t->get_virtual_processor()==c_remove_returned ) { - vec[i] = nxt++; - break; - } - } - // break target - if( vec[i]==end ) // ignore excessive VP. - vproots[i] = NULL; - } - } - } - - for( size_t i=0; i<count; ++i ) { - __TBB_ASSERT( !tvec[i], NULL ); - if( vec[i]==end ) { - if( vproots[i] ) { - tvec[i] = my_tbb_allocator.allocate(1); - new ( tvec[i] ) tbb_server_thread( false, my_scheduler, (IExecutionResource*)vproots[i], &conn, *this, my_client ); - } -#if TBB_USE_ASSERT - } else { - tbb_server_thread* t = (tbb_server_thread*) (*vec[i]).second; - __TBB_ASSERT( t->GetProxy(), "Proxy is cleared?" ); -#endif - } - } - - { - tbb::spin_mutex::scoped_lock lck( mtx ); - - bool closing = is_closing(); - - for( size_t i=0; i<count; ++i ) { - if( vec[i]==end ) { - if( vproots[i] ) { - thread_map::key_type key = (thread_map::key_type) vproots[i]; - vec[i] = insert( key, (server_thread*) tvec[i] ); - my_client_ref_count.add_ref(); - my_server_ref_count.add_ref(); - } - } else if( !closing ) { - tbb_server_thread* t = (tbb_server_thread*) (*vec[i]).second; - - if( (*vec[i]).first!=(thread_map::key_type)vproots[i] ) { - my_map.erase( vec[i] ); - thread_map::key_type key = (thread_map::key_type) vproots[i]; - __TBB_ASSERT( key, NULL ); - vec[i] = insert( key, t ); - } - __TBB_ASSERT( t->read_state()==ts_asleep, NULL ); - // We did not decrement server/client ref count when a thread is removed. - // So, don't increment server/client ref count here. - } - } - - // we could check is_closing() earlier. That requires marking the newly allocated server_thread objects - // that are not inserted into the thread_map, and deallocate them. Doing so seems more cumbersome - // than simply adding these to the thread_map and let thread_map's destructor take care of reclamation. - __TBB_ASSERT( closing==is_closing(), NULL ); - if( closing ) return; - } - - for( size_t i=0; i<count; ++i ) { - if( vproots[i] ) { - tbb_server_thread* t = (tbb_server_thread*) (*vec[i]).second; - __TBB_ASSERT( tvec[i]!=NULL||t->GetProxy(), "Proxy is cleared?" ); - if( t->is_removed() ) - __TBB_ASSERT( t->get_virtual_processor()==c_remove_returned, NULL ); - int cnt = ++t->activation_count; - __TBB_ASSERT_EX( cnt==0||cnt==1, NULL ); - vproots[i]->Activate( t ); - if( t->is_removed() ) - t->revive( my_scheduler, vproots[i], my_client ); - } - } -#if TBB_USE_ASSERT - req_cnt = --n_add_vp_requests; - __TBB_ASSERT( req_cnt==0, NULL ); -#endif -} - -void thread_map::remove_virtual_processors( IVirtualProcessorRoot** vproots, unsigned count, ::tbb::spin_mutex& mtx ) { - if( my_map.size()==0 ) - return; - tbb::spin_mutex::scoped_lock lck( mtx ); - - if( is_closing() ) return; - - for( unsigned int c=0; c<count; ++c ) { - iterator i = my_map.find( (key_type) vproots[c] ); - if( i==my_map.end() ) { - thread_scavenger_thread* tst = my_thread_scavenger_thread; - if( !tst ) { - // Remove unknown vp from my scheduler; - vproots[c]->Remove( my_scheduler ); - } else { - while( (tst=my_thread_scavenger_thread)==c_claimed ) - __TBB_Yield(); - if( vproots[c]!=tst->get_virtual_processor() ) - vproots[c]->Remove( my_scheduler ); - } - continue; - } - tbb_server_thread* thr = (tbb_server_thread*) (*i).second; - __TBB_ASSERT( thr->tbb_thread, "incorrect type of server_thread" ); - thr->set_removed(); - if( thr->read_state()==ts_asleep ) { - while( thr->activation_count>0 ) { - if( thr->get_virtual_processor()<=c_remove_returned ) - break; - __TBB_Yield(); - } - if( thr->get_virtual_processor()>c_remove_returned ) { - // the thread is in Deactivated state - ++thr->activation_count; - // wake the thread up so that it Switches Out itself. - thr->get_virtual_processor()->Activate( thr ); - } // else, it is Switched Out - } // else the thread will see that it is removed and proceed to switch itself out without Deactivation - } -} - -void thread_map::add_virtual_processors( IVirtualProcessorRoot** vproots, unsigned int count, omp_connection_v2& conn, ::tbb::spin_mutex& mtx ) -{ - std::vector<thread_map::iterator> vec(count); - std::vector<server_thread*> tvec(count); - iterator end; - - { - tbb::spin_mutex::scoped_lock lck( mtx ); - // read the map - end = my_map.end(); //remember 'end' at the time of 'find' - for( size_t i=0; i<count; ++i ) - vec[i] = my_map.find( (key_type) vproots[i] ); - } - - for( size_t i=0; i<count; ++i ) { - __TBB_ASSERT( !tvec[i], NULL ); - if( vec[i]==end ) { - tvec[i] = my_omp_allocator.allocate(1); - new ( tvec[i] ) omp_server_thread( false, my_scheduler, (IExecutionResource*)vproots[i], &conn, *this, my_client ); - } - } - - { - tbb::spin_mutex::scoped_lock lck( mtx ); - - for( size_t i=0; i<count; ++i ) { - if( vec[i]==my_map.end() ) { - thread_map::key_type key = (thread_map::key_type) vproots[i]; - vec[i] = insert( key, tvec[i] ); - my_client_ref_count.add_ref(); - my_server_ref_count.add_ref(); - } - } - - // we could check is_closing() earlier. That requires marking the newly allocated server_thread objects - // that are not inserted into the thread_map, and deallocate them. Doing so seems more cumbersome - // than simply adding these to the thread_map and let thread_map's destructor take care of reclamation. - if( is_closing() ) return; - } - - for( size_t i=0; i<count; ++i ) - vproots[i]->Activate( (*vec[i]).second ); - - { - tbb::spin_mutex::scoped_lock lck( mtx ); - for( size_t i=0; i<count; ++i ) - original_exec_resources.push_back( vproots[i] ); - } -} - -void thread_map::mark_virtual_processors_as_lent( IVirtualProcessorRoot** vproots, unsigned count, ::tbb::spin_mutex& mtx ) { - tbb::spin_mutex::scoped_lock lck( mtx ); - - if( is_closing() ) return; - - iterator end = my_map.end(); - for( unsigned int c=0; c<count; ++c ) { - iterator i = my_map.find( (key_type) vproots[c] ); - if( i==end ) { - // The vproc has not been added to the map in create_oversubscribers() - my_map.insert( hash_map_type::value_type( (key_type) vproots[c], (server_thread*)1 ) ); - } else { - server_thread* thr = (*i).second; - if( ((uintptr_t)thr)&~(uintptr_t)1 ) { - __TBB_ASSERT( !thr->is_removed(), "incorrectly removed" ); - ((omp_server_thread*)thr)->set_lent(); - } - } - } -} - -void thread_map::create_oversubscribers( unsigned n, std::vector<server_thread*>& thr_vec, omp_connection_v2& conn, ::tbb::spin_mutex& mtx ) { - std::vector<IExecutionResource*> curr_exec_rsc; - { - tbb::spin_mutex::scoped_lock lck( mtx ); - curr_exec_rsc = original_exec_resources; // copy construct - } - typedef std::vector<IExecutionResource*>::iterator iterator_er; - typedef ::std::vector<std::pair<hash_map_type::key_type, hash_map_type::mapped_type> > hash_val_vector_t; - hash_val_vector_t v_vec(n); - iterator_er begin = curr_exec_rsc.begin(); - iterator_er end = curr_exec_rsc.end(); - iterator_er i = begin; - for( unsigned c=0; c<n; ++c ) { - IVirtualProcessorRoot* vpr = my_scheduler_proxy->CreateOversubscriber( *i ); - omp_server_thread* t = new ( my_omp_allocator.allocate(1) ) omp_server_thread( true, my_scheduler, (IExecutionResource*)vpr, &conn, *this, my_client ); - thr_vec[c] = t; - v_vec[c] = hash_map_type::value_type( (key_type) vpr, t ); - if( ++i==end ) i = begin; - } - - { - tbb::spin_mutex::scoped_lock lck( mtx ); - - if( is_closing() ) return; - - iterator end = my_map.end(); - unsigned c = 0; - for( hash_val_vector_t::iterator vi=v_vec.begin(); vi!=v_vec.end(); ++vi, ++c ) { - iterator i = my_map.find( (key_type) (*vi).first ); - if( i==end ) { - my_map.insert( *vi ); - } else { - // the vproc has not been added to the map in mark_virtual_processors_as_returned(); - uintptr_t lent = (uintptr_t) (*i).second; - __TBB_ASSERT( lent<=1, "vproc map entry added incorrectly?"); - (*i).second = thr_vec[c]; - if( lent ) - ((omp_server_thread*)thr_vec[c])->set_lent(); - else - ((omp_server_thread*)thr_vec[c])->set_returned(); - } - my_client_ref_count.add_ref(); - my_server_ref_count.add_ref(); - } - } -} - -void thread_map::wakeup_tbb_threads( int c, ::tbb::spin_mutex& mtx ) { - std::vector<tbb_server_thread*> vec(c); - - size_t idx = 0; - { - tbb::spin_mutex::scoped_lock lck( mtx ); - - if( is_closing() ) return; - // only one RML thread is in here to wake worker threads up. - - int bal = the_balance; - int cnt = c<bal ? c : bal; - - if( cnt<=0 ) { return; } - - for( iterator i=begin(); i!=end(); ++i ) { - tbb_server_thread* thr = (tbb_server_thread*) (*i).second; - // ConcRT RM should take threads away from TBB scheduler instead of lending them to another scheduler - if( thr->is_removed() ) - continue; - - if( --the_balance>=0 ) { - thread_grab_t res; - while( (res=thr->try_grab_for())!=wk_from_idle ) { - if( res==wk_from_asleep ) { - vec[idx++] = thr; - break; - } else { - thread_state_t s = thr->read_state(); - if( s==ts_busy ) {// failed because already assigned. move on. - ++the_balance; - goto skip; - } - } - } - thread_state_t s = thr->read_state(); - __TBB_ASSERT_EX( s==ts_busy, "should have set the state to ts_busy" ); - if( --cnt==0 ) - break; - } else { - // overdraft - ++the_balance; - break; - } -skip: - ; - } - } - - for( size_t i=0; i<idx; ++i ) { - tbb_server_thread* thr = vec[i]; - __TBB_ASSERT( thr, NULL ); - thread_state_t s = thr->read_state(); - __TBB_ASSERT_EX( s==ts_busy, "should have set the state to ts_busy" ); - ++thr->activation_count; - thr->get_virtual_processor()->Activate( thr ); - } - -} - -void thread_map::mark_virtual_processors_as_returned( IVirtualProcessorRoot** vprocs, unsigned int count, tbb::spin_mutex& mtx ) { - { - tbb::spin_mutex::scoped_lock lck( mtx ); - - if( is_closing() ) return; - - iterator end = my_map.end(); - for(unsigned c=0; c<count; ++c ) { - iterator i = my_map.find( (key_type) vprocs[c] ); - if( i==end ) { - // the vproc has not been added to the map in create_oversubscribers() - my_map.insert( hash_map_type::value_type( (key_type) vprocs[c], static_cast<server_thread*>(0) ) ); - } else { - omp_server_thread* thr = (omp_server_thread*) (*i).second; - if( ((uintptr_t)thr)&~(uintptr_t)1 ) { - __TBB_ASSERT( !thr->is_removed(), "incorrectly removed" ); - // we shoud not make any assumption on the initial state of an added vproc. - thr->set_returned(); - } - } - } - } -} - - -void thread_map::unbind( rml::server& /*server*/, tbb::spin_mutex& mtx ) { - { - tbb::spin_mutex::scoped_lock lck( mtx ); - shutdown_in_progress = true; // ignore any callbacks from ConcRT RM - - // Ask each server_thread to cleanup its job for this server. - for( iterator i = begin(); i!=end(); ++i ) { - server_thread* t = (*i).second; - t->terminate = true; - if( t->is_removed() ) { - // This is for TBB only as ConcRT RM does not request OMP schedulers to remove virtual processors - if( t->read_state()==ts_asleep ) { - __TBB_ASSERT( my_thread_scavenger_thread, "this is TBB connection; thread_scavenger_thread must be allocated" ); - // thread is on its way to switch_out; see remove_virtual_processors() where - // the thread is Activated() to bring it back from 'Deactivated' in sleep_perhaps() - // now assume that the thread will go to SwitchOut() -#if TBB_USE_ASSERT - while( t->get_virtual_processor()>c_remove_returned ) - __TBB_Yield(); -#endif - // A removed thread is supposed to proceed to SwithcOut. - // There, we remove client&server references. - } - } else { - if( t->wakeup( ts_done, ts_asleep ) ) { - if( t->tbb_thread ) - ++((tbb_server_thread*)t)->activation_count; - t->get_virtual_processor()->Activate( t ); - // We mark in the thread_map such that when termination sequence started, we ignore - // all notification from ConcRT RM. - } - } - } - } - // Remove extra ref to client. - remove_client_ref(); - - if( my_thread_scavenger_thread ) { - thread_scavenger_thread* tst; - while( (tst=my_thread_scavenger_thread)==c_claimed ) - __TBB_Yield(); -#if TBB_USE_ASSERT - ++my_thread_scavenger_thread->activation_count; -#endif - tst->get_virtual_processor()->Activate( tst ); - } -} - -#if !__RML_REMOVE_VIRTUAL_PROCESSORS_DISABLED -void thread_map::allocate_thread_scavenger( IExecutionResource* v ) -{ - if( my_thread_scavenger_thread>c_claimed ) return; - thread_scavenger_thread* c = my_thread_scavenger_thread.fetch_and_store((thread_scavenger_thread*)c_claimed); - if( c==NULL ) { // successfully claimed - add_server_ref(); -#if TBB_USE_ASSERT - ++n_thread_scavengers_created; -#endif - __TBB_ASSERT( v, NULL ); - IVirtualProcessorRoot* vpr = my_scheduler_proxy->CreateOversubscriber( v ); - my_thread_scavenger_thread = c = new ( my_scavenger_allocator.allocate(1) ) thread_scavenger_thread( my_scheduler, vpr, *this ); -#if TBB_USE_ASSERT - ++c->activation_count; -#endif - vpr->Activate( c ); - } else if( c>c_claimed ) { - my_thread_scavenger_thread = c; - } -} -#endif - -void thread_scavenger_thread::Dispatch( DispatchState* ) -{ - __TBB_ASSERT( my_proxy, NULL ); -#if TBB_USE_ASSERT - --activation_count; -#endif - get_virtual_processor()->Deactivate( this ); - for( thread_map::iterator i=my_thread_map.begin(); i!=my_thread_map.end(); ++i ) { - tbb_server_thread* t = (tbb_server_thread*) (*i).second; - if( t->read_state()==ts_asleep && t->is_removed() ) { - while( t->get_execution_resource()!=c_remove_returned ) - __TBB_Yield(); - my_proxy->SwitchTo( t, Blocking ); - } - } - get_virtual_processor()->Remove( my_scheduler ); - my_thread_map.remove_server_ref(); - // signal to the connection scavenger that i am done with the map. - __TBB_ASSERT( activation_count==1, NULL ); - set_state( ts_done ); -} - -//! Windows "DllMain" that handles startup and shutdown of dynamic library. -extern "C" bool WINAPI DllMain( HINSTANCE /*hinstDLL*/, DWORD fwdReason, LPVOID lpvReserved ) { - void assist_cleanup_connections(); - if( fwdReason==DLL_PROCESS_DETACH ) { - // dll is being unloaded - if( !lpvReserved ) // if FreeLibrary has been called - assist_cleanup_connections(); - } - return true; -} - -void free_all_connections( uintptr_t conn_ex ) { - while( conn_ex ) { - bool is_tbb = (conn_ex&2)>0; - //clear extra bits - uintptr_t curr_conn = conn_ex & ~(uintptr_t)3; - __TBB_ASSERT( curr_conn, NULL ); - - // Wait for worker threads to return - if( is_tbb ) { - tbb_connection_v2* tbb_conn = reinterpret_cast<tbb_connection_v2*>(curr_conn); - conn_ex = reinterpret_cast<uintptr_t>(tbb_conn->next_conn); - while( tbb_conn->my_thread_map.remove_server_ref()>0 ) - __TBB_Yield(); - delete tbb_conn; - } else { - omp_connection_v2* omp_conn = reinterpret_cast<omp_connection_v2*>(curr_conn); - conn_ex = reinterpret_cast<uintptr_t>(omp_conn->next_conn); - while( omp_conn->my_thread_map.remove_server_ref()>0 ) - __TBB_Yield(); - delete omp_conn; - } - } -} - -void assist_cleanup_connections() -{ - //signal to connection_scavenger_thread to terminate - uintptr_t tail = connections_to_reclaim.tail; - while( connections_to_reclaim.tail.compare_and_swap( garbage_connection_queue::plugged, tail )!=tail ) { - __TBB_Yield(); - tail = connections_to_reclaim.tail; - } - - __TBB_ASSERT( connection_scavenger.state==ts_busy || connection_scavenger.state==ts_asleep, NULL ); - // Scavenger thread may be busy freeing connections - DWORD thr_exit_code = STILL_ACTIVE; - while( connection_scavenger.state==ts_busy ) { - if( GetExitCodeThread( connection_scavenger.thr_handle, &thr_exit_code )>0 ) - if( thr_exit_code!=STILL_ACTIVE ) - break; - __TBB_Yield(); - thr_exit_code = STILL_ACTIVE; - } - if( connection_scavenger.state==ts_asleep && thr_exit_code==STILL_ACTIVE ) - connection_scavenger.wakeup(); // wake the connection scavenger thread up - - // it is possible that the connection scavenger thread already exited. Take over its responsibility. - if( tail && connections_to_reclaim.tail!=garbage_connection_queue::plugged_acked ) { - // atomically claim the head of the list. - uintptr_t head = connections_to_reclaim.head.fetch_and_store( garbage_connection_queue::empty ); - if( head==garbage_connection_queue::empty ) - head = tail; - connection_scavenger.process_requests( head ); - } - __TBB_ASSERT( connections_to_reclaim.tail==garbage_connection_queue::plugged||connections_to_reclaim.tail==garbage_connection_queue::plugged_acked, "someone else added a request after termination has initiated" ); - __TBB_ASSERT( (unsigned)the_balance==the_default_concurrency, NULL ); -} - -void connection_scavenger_thread::sleep_perhaps() { - uintptr_t tail = connections_to_reclaim.tail; - // connections_to_reclaim.tail==garbage_connection_queue::plugged --> terminate, - // connections_to_reclaim.tail>garbage_connection_queue::plugged : we got work to do - if( tail>=garbage_connection_queue::plugged ) return; - __TBB_ASSERT( !tail, NULL ); - thread_monitor::cookie c; - monitor.prepare_wait(c); - if( state.compare_and_swap( ts_asleep, ts_busy )==ts_busy ) { - if( connections_to_reclaim.tail!=garbage_connection_queue::plugged ) { - monitor.commit_wait(c); - // Someone else woke me up. The compare_and_swap further below deals with spurious wakeups. - } else { - monitor.cancel_wait(); - } - thread_state_t s = state; - if( s==ts_asleep ) // if spurious wakeup. - state.compare_and_swap( ts_busy, ts_asleep ); - // I woke myself up, either because I cancelled the wait or suffered a spurious wakeup. - } else { - __TBB_ASSERT( false, "someone else tampered with my state" ); - } - __TBB_ASSERT( state==ts_busy, "a thread can only put itself to sleep" ); -} - -void connection_scavenger_thread::process_requests( uintptr_t conn_ex ) -{ - __TBB_ASSERT( conn_ex>1, NULL ); - __TBB_ASSERT( n_scavenger_threads==1||connections_to_reclaim.tail==garbage_connection_queue::plugged, "more than one connection_scavenger_thread being active?" ); - - bool done = false; - while( !done ) { - bool is_tbb = (conn_ex&2)>0; - //clear extra bits - uintptr_t curr_conn = conn_ex & ~(uintptr_t)3; - - // no contention. there is only one connection_scavenger_thread!! - uintptr_t next_conn; - tbb_connection_v2* tbb_conn = NULL; - omp_connection_v2* omp_conn = NULL; - // Wait for worker threads to return - if( is_tbb ) { - tbb_conn = reinterpret_cast<tbb_connection_v2*>(curr_conn); - next_conn = reinterpret_cast<uintptr_t>(tbb_conn->next_conn); - while( tbb_conn->my_thread_map.get_server_ref_count()>1 ) - __TBB_Yield(); - } else { - omp_conn = reinterpret_cast<omp_connection_v2*>(curr_conn); - next_conn = reinterpret_cast<uintptr_t>(omp_conn->next_conn); - while( omp_conn->my_thread_map.get_server_ref_count()>1 ) - __TBB_Yield(); - } - - //someone else may try to write into this connection object. - //So access next_conn field first before remove the extra server ref count. - - if( next_conn==0 ) { - uintptr_t tail = connections_to_reclaim.tail; - if( tail==garbage_connection_queue::plugged ) { - tail = garbage_connection_queue::plugged_acked; // connection scavenger saw the flag, and it freed all connections. - done = true; - } else if( tail==conn_ex ) { - if( connections_to_reclaim.tail.compare_and_swap( garbage_connection_queue::empty, tail )==tail ) { - __TBB_ASSERT( !connections_to_reclaim.head, NULL ); - done = true; - } - } - - if( !done ) { - // A new connection to close is added to connections_to_reclaim.tail; - // Wait for curr_conn->next_conn to be set. - if( is_tbb ) { - while( !tbb_conn->next_conn ) - __TBB_Yield(); - conn_ex = reinterpret_cast<uintptr_t>(tbb_conn->next_conn); - } else { - while( !omp_conn->next_conn ) - __TBB_Yield(); - conn_ex = reinterpret_cast<uintptr_t>(omp_conn->next_conn); - } - } - } else { - conn_ex = next_conn; - } - __TBB_ASSERT( conn_ex, NULL ); - if( is_tbb ) - // remove extra srever ref count; this will trigger Shutdown/Release of ConcRT RM - tbb_conn->remove_server_ref(); - else - // remove extra srever ref count; this will trigger Shutdown/Release of ConcRT RM - omp_conn->remove_server_ref(); - } -} - -__RML_DECL_THREAD_ROUTINE connection_scavenger_thread::thread_routine( void* arg ) { - connection_scavenger_thread* thr = (connection_scavenger_thread*) arg; - thr->state = ts_busy; - thr->thr_handle = GetCurrentThread(); -#if TBB_USE_ASSERT - ++thr->n_scavenger_threads; -#endif - for(;;) { - __TBB_Yield(); - thr->sleep_perhaps(); - if( connections_to_reclaim.tail==garbage_connection_queue::plugged || connections_to_reclaim.tail==garbage_connection_queue::plugged_acked ) { - thr->state = ts_asleep; - return 0; - } - - __TBB_ASSERT( connections_to_reclaim.tail!=garbage_connection_queue::plugged_acked, NULL ); - __TBB_ASSERT( connections_to_reclaim.tail>garbage_connection_queue::plugged && (connections_to_reclaim.tail&garbage_connection_queue::plugged)==0 , NULL ); - while( connections_to_reclaim.head==garbage_connection_queue::empty ) - __TBB_Yield(); - uintptr_t head = connections_to_reclaim.head.fetch_and_store( garbage_connection_queue::empty ); - thr->process_requests( head ); - wakeup_some_tbb_threads(); - } -} - -template<typename Server, typename Client> -void connection_scavenger_thread::add_request( generic_connection<Server,Client>* conn_to_close ) -{ - uintptr_t conn_ex = (uintptr_t)conn_to_close | (connection_traits<Server,Client>::is_tbb<<1); - __TBB_ASSERT( !conn_to_close->next_conn, NULL ); - const uintptr_t old_tail_ex = connections_to_reclaim.tail.fetch_and_store(conn_ex); - __TBB_ASSERT( old_tail_ex==0||old_tail_ex>garbage_connection_queue::plugged_acked, "Unloading DLL called while this connection is being closed?" ); - - if( old_tail_ex==garbage_connection_queue::empty ) - connections_to_reclaim.head = conn_ex; - else { - bool is_tbb = (old_tail_ex&2)>0; - uintptr_t old_tail = old_tail_ex & ~(uintptr_t)3; - if( is_tbb ) - reinterpret_cast<tbb_connection_v2*>(old_tail)->next_conn = reinterpret_cast<tbb_connection_v2*>(conn_ex); - else - reinterpret_cast<omp_connection_v2*>(old_tail)->next_conn = reinterpret_cast<omp_connection_v2*>(conn_ex); - } - - if( state==ts_asleep ) - wakeup(); -} - -template<> -uintptr_t connection_scavenger_thread::grab_and_prepend( generic_connection<tbb_server,tbb_client>* /*last_conn_to_close*/ ) { return 0;} - -template<> -uintptr_t connection_scavenger_thread::grab_and_prepend( generic_connection<omp_server,omp_client>* last_conn_to_close ) -{ - uintptr_t conn_ex = (uintptr_t)last_conn_to_close; - uintptr_t head = connections_to_reclaim.head.fetch_and_store( garbage_connection_queue::empty ); - reinterpret_cast<omp_connection_v2*>(last_conn_to_close)->next_conn = reinterpret_cast<omp_connection_v2*>(head); - return conn_ex; -} - -extern "C" ULONGLONG NTAPI VerSetConditionMask( ULONGLONG, DWORD, BYTE); - -bool is_windows7_or_later () -{ - try { - return GetOSVersion()>=IResourceManager::Win7OrLater; - } catch( ... ) { - return false; - } -} - -#endif /* RML_USE_WCRM */ - -template<typename Connection, typename Server, typename Client> -static factory::status_type connect( factory& f, Server*& server, Client& client ) { - server = new Connection(*static_cast<wait_counter*>(f.scratch_ptr),client); - return factory::st_success; -} - -void init_rml_module () { - the_balance = the_default_concurrency = tbb::internal::AvailableHwConcurrency() - 1; -#if RML_USE_WCRM - connection_scavenger.launch(); -#endif -} - -extern "C" factory::status_type __RML_open_factory( factory& f, version_type& server_version, version_type client_version ) { - // Hack to keep this library from being closed by causing the first client's dlopen to not have a corresponding dlclose. - // This code will be removed once we figure out how to do shutdown of the RML perfectly. - static tbb::atomic<bool> one_time_flag; - if( one_time_flag.compare_and_swap(true,false)==false) { - __TBB_ASSERT( (size_t)f.library_handle!=factory::c_dont_unload, NULL ); -#if _WIN32||_WIN64 - f.library_handle = reinterpret_cast<HMODULE>(factory::c_dont_unload); -#else - f.library_handle = reinterpret_cast<void*>(factory::c_dont_unload); -#endif - } - // End of hack - - // Initialize the_balance only once - tbb::internal::atomic_do_once ( &init_rml_module, rml_module_state ); - - server_version = SERVER_VERSION; - f.scratch_ptr = 0; - if( client_version==0 ) { - return factory::st_incompatible; -#if RML_USE_WCRM - } else if ( !is_windows7_or_later() ) { -#if TBB_USE_DEBUG - fprintf(stderr, "This version of the RML library requires Windows 7 to run on.\nConnection request denied.\n"); -#endif - return factory::st_incompatible; -#endif - } else { -#if TBB_USE_DEBUG - if( client_version<EARLIEST_COMPATIBLE_CLIENT_VERSION ) - fprintf(stderr, "This client library is too old for the current RML server.\nThe connection request is granted but oversubscription/undersubscription may occur.\n"); -#endif - f.scratch_ptr = new wait_counter; - return factory::st_success; - } -} - -extern "C" void __RML_close_factory( factory& f ) { - if( wait_counter* fc = static_cast<wait_counter*>(f.scratch_ptr) ) { - f.scratch_ptr = 0; - fc->wait(); - size_t bal = the_balance; - f.scratch_ptr = (void*)bal; - delete fc; - } -} - -void call_with_build_date_str( ::rml::server_info_callback_t cb, void* arg ); - -}} // rml::internal - -namespace tbb { -namespace internal { -namespace rml { - -extern "C" tbb_factory::status_type __TBB_make_rml_server( tbb_factory& f, tbb_server*& server, tbb_client& client ) { - return ::rml::internal::connect< ::rml::internal::tbb_connection_v2>(f,server,client); -} - -extern "C" void __TBB_call_with_my_server_info( ::rml::server_info_callback_t cb, void* arg ) { - return ::rml::internal::call_with_build_date_str( cb, arg ); -} - -}}} - -namespace __kmp { -namespace rml { - -extern "C" omp_factory::status_type __KMP_make_rml_server( omp_factory& f, omp_server*& server, omp_client& client ) { - return ::rml::internal::connect< ::rml::internal::omp_connection_v2>(f,server,client); -} - -extern "C" void __KMP_call_with_my_server_info( ::rml::server_info_callback_t cb, void* arg ) { - return ::rml::internal::call_with_build_date_str( cb, arg ); -} - -}} - -/* - * RML server info - */ -#include "version_string.ver" - -#ifndef __TBB_VERSION_STRINGS -#pragma message("Warning: version_string.ver isn't generated properly by version_info.sh script!") -#endif - -// We use the build time as the RML server info. TBB is required to build RML, so we make it the same as the TBB build time. -#ifndef __TBB_DATETIME -#define __TBB_DATETIME __DATE__ " " __TIME__ -#endif - -#if !RML_USE_WCRM -#define RML_SERVER_BUILD_TIME "Intel(R) RML library built: " __TBB_DATETIME -#define RML_SERVER_VERSION_ST "Intel(R) RML library version: v" TOSTRING(SERVER_VERSION) -#else -#define RML_SERVER_BUILD_TIME "Intel(R) RML library built: " __TBB_DATETIME -#define RML_SERVER_VERSION_ST "Intel(R) RML library version: v" TOSTRING(SERVER_VERSION) " on ConcRT RM with " RML_THREAD_KIND_STRING -#endif - -namespace rml { -namespace internal { - -void call_with_build_date_str( ::rml::server_info_callback_t cb, void* arg ) -{ - (*cb)( arg, RML_SERVER_BUILD_TIME ); - (*cb)( arg, RML_SERVER_VERSION_ST ); -} -}} // rml::internal diff --git a/src/tbb/src/rml/server/thread_monitor.h b/src/tbb/src/rml/server/thread_monitor.h deleted file mode 100644 index 9da5741ab..000000000 --- a/src/tbb/src/rml/server/thread_monitor.h +++ /dev/null @@ -1,270 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// All platform-specific threading support is encapsulated here. */ - -#ifndef __RML_thread_monitor_H -#define __RML_thread_monitor_H - -#if USE_WINTHREAD -#include <windows.h> -#include <process.h> -#include <malloc.h> //_alloca -#include "tbb/tbb_misc.h" // support for processor groups -#if __TBB_WIN8UI_SUPPORT -#include <thread> -#endif -#elif USE_PTHREAD -#include <pthread.h> -#include <string.h> -#include <stdlib.h> -#else -#error Unsupported platform -#endif -#include <stdio.h> -#include "tbb/itt_notify.h" -#include "tbb/atomic.h" -#include "tbb/semaphore.h" - -// All platform-specific threading support is in this header. - -#if (_WIN32||_WIN64)&&!__TBB_ipf -// Deal with 64K aliasing. The formula for "offset" is a Fibonacci hash function, -// which has the desirable feature of spreading out the offsets fairly evenly -// without knowing the total number of offsets, and furthermore unlikely to -// accidentally cancel out other 64K aliasing schemes that Microsoft might implement later. -// See Knuth Vol 3. "Theorem S" for details on Fibonacci hashing. -// The second statement is really does need "volatile", otherwise the compiler might remove the _alloca. -#define AVOID_64K_ALIASING(idx) \ - size_t offset = (idx+1) * 40503U % (1U<<16); \ - void* volatile sink_for_alloca = _alloca(offset); \ - __TBB_ASSERT_EX(sink_for_alloca, "_alloca failed"); -#else -// Linux thread allocators avoid 64K aliasing. -#define AVOID_64K_ALIASING(idx) tbb::internal::suppress_unused_warning(idx) -#endif /* _WIN32||_WIN64 */ - -namespace rml { - -namespace internal { - -#if DO_ITT_NOTIFY -static const ::tbb::tchar *SyncType_RML = _T("%Constant"); -static const ::tbb::tchar *SyncObj_ThreadMonitor = _T("RML Thr Monitor"); -#endif /* DO_ITT_NOTIFY */ - -//! Monitor with limited two-phase commit form of wait. -/** At most one thread should wait on an instance at a time. */ -class thread_monitor { -public: - class cookie { - friend class thread_monitor; - tbb::atomic<size_t> my_epoch; - }; - thread_monitor() : spurious(false) { - my_cookie.my_epoch = 0; - ITT_SYNC_CREATE(&my_sema, SyncType_RML, SyncObj_ThreadMonitor); - in_wait = false; - } - ~thread_monitor() {} - - //! If a thread is waiting or started a two-phase wait, notify it. - /** Can be called by any thread. */ - void notify(); - - //! Begin two-phase wait. - /** Should only be called by thread that owns the monitor. - The caller must either complete the wait or cancel it. */ - void prepare_wait( cookie& c ); - - //! Complete a two-phase wait and wait until notification occurs after the earlier prepare_wait. - void commit_wait( cookie& c ); - - //! Cancel a two-phase wait. - void cancel_wait(); - -#if USE_WINTHREAD - typedef HANDLE handle_type; - - #define __RML_DECL_THREAD_ROUTINE unsigned WINAPI - typedef unsigned (WINAPI *thread_routine_type)(void*); - - //! Launch a thread - static handle_type launch( thread_routine_type thread_routine, void* arg, size_t stack_size, const size_t* worker_index = NULL ); - -#elif USE_PTHREAD - typedef pthread_t handle_type; - - #define __RML_DECL_THREAD_ROUTINE void* - typedef void*(*thread_routine_type)(void*); - - //! Launch a thread - static handle_type launch( thread_routine_type thread_routine, void* arg, size_t stack_size ); -#endif /* USE_PTHREAD */ - - //! Yield control to OS - /** Affects the calling thread. **/ - static void yield(); - - //! Join thread - static void join(handle_type handle); - - //! Detach thread - static void detach_thread(handle_type handle); -private: - cookie my_cookie; - tbb::atomic<bool> in_wait; - bool spurious; - tbb::internal::binary_semaphore my_sema; -#if USE_PTHREAD - static void check( int error_code, const char* routine ); -#endif -}; - -#if USE_WINTHREAD - -#ifndef STACK_SIZE_PARAM_IS_A_RESERVATION -#define STACK_SIZE_PARAM_IS_A_RESERVATION 0x00010000 -#endif - -#if __TBB_WIN8UI_SUPPORT -inline thread_monitor::handle_type thread_monitor::launch( thread_routine_type thread_function, void* arg, size_t, const size_t*) { -//TODO: check that exception thrown from std::thread is not swallowed silently - std::thread* thread_tmp=new std::thread(thread_function, arg); - return thread_tmp->native_handle(); -} -#else //__TBB_WIN8UI_SUPPORT -inline thread_monitor::handle_type thread_monitor::launch( thread_routine_type thread_routine, void* arg, size_t stack_size, const size_t* worker_index ) { - unsigned thread_id; - int number_of_processor_groups = ( worker_index ) ? tbb::internal::NumberOfProcessorGroups() : 0; - unsigned create_flags = ( number_of_processor_groups > 1 ) ? CREATE_SUSPENDED : 0; - HANDLE h = (HANDLE)_beginthreadex( NULL, unsigned(stack_size), thread_routine, arg, STACK_SIZE_PARAM_IS_A_RESERVATION | create_flags, &thread_id ); - if( !h ) { - fprintf(stderr,"thread_monitor::launch: _beginthreadex failed\n"); - exit(1); - } - if ( number_of_processor_groups > 1 ) { - tbb::internal::MoveThreadIntoProcessorGroup( h, - tbb::internal::FindProcessorGroupIndex( static_cast<int>(*worker_index) ) ); - ResumeThread( h ); - } - return h; -} -#endif //__TBB_WIN8UI_SUPPORT - -void thread_monitor::join(handle_type handle) { -#if TBB_USE_ASSERT - DWORD res = -#endif - WaitForSingleObjectEx(handle, INFINITE, FALSE); - __TBB_ASSERT( res==WAIT_OBJECT_0, NULL ); -#if TBB_USE_ASSERT - BOOL val = -#endif - CloseHandle(handle); - __TBB_ASSERT( val, NULL ); -} - -void thread_monitor::detach_thread(handle_type handle) { -#if TBB_USE_ASSERT - BOOL val = -#endif - CloseHandle(handle); - __TBB_ASSERT( val, NULL ); -} - -inline void thread_monitor::yield() { -// TODO: consider unification via __TBB_Yield or tbb::this_tbb_thread::yield -#if !__TBB_WIN8UI_SUPPORT - SwitchToThread(); -#else - std::this_thread::yield(); -#endif -} -#endif /* USE_WINTHREAD */ - -#if USE_PTHREAD -// TODO: can we throw exceptions instead of termination? -inline void thread_monitor::check( int error_code, const char* routine ) { - if( error_code ) { - fprintf(stderr,"thread_monitor %s in %s\n", strerror(error_code), routine ); - exit(1); - } -} - -inline thread_monitor::handle_type thread_monitor::launch( void* (*thread_routine)(void*), void* arg, size_t stack_size ) { - // FIXME - consider more graceful recovery than just exiting if a thread cannot be launched. - // Note that there are some tricky situations to deal with, such that the thread is already - // grabbed as part of an OpenMP team. - pthread_attr_t s; - check(pthread_attr_init( &s ), "pthread_attr_init"); - if( stack_size>0 ) - check(pthread_attr_setstacksize( &s, stack_size ), "pthread_attr_setstack_size" ); - pthread_t handle; - check( pthread_create( &handle, &s, thread_routine, arg ), "pthread_create" ); - check( pthread_attr_destroy( &s ), "pthread_attr_destroy" ); - return handle; -} - -void thread_monitor::join(handle_type handle) { - check(pthread_join(handle, NULL), "pthread_join"); -} - -void thread_monitor::detach_thread(handle_type handle) { - check(pthread_detach(handle), "pthread_detach"); -} - -inline void thread_monitor::yield() { - sched_yield(); -} -#endif /* USE_PTHREAD */ - -inline void thread_monitor::notify() { - my_cookie.my_epoch = my_cookie.my_epoch + 1; - bool do_signal = in_wait.fetch_and_store( false ); - if( do_signal ) - my_sema.V(); -} - -inline void thread_monitor::prepare_wait( cookie& c ) { - if( spurious ) { - spurious = false; - // consumes a spurious posted signal. don't wait on my_sema. - my_sema.P(); - } - c = my_cookie; - in_wait = true; - __TBB_full_memory_fence(); -} - -inline void thread_monitor::commit_wait( cookie& c ) { - bool do_it = ( c.my_epoch == my_cookie.my_epoch); - if( do_it ) my_sema.P(); - else cancel_wait(); -} - -inline void thread_monitor::cancel_wait() { - spurious = ! in_wait.fetch_and_store( false ); -} - -} // namespace internal -} // namespace rml - -#endif /* __RML_thread_monitor_H */ diff --git a/src/tbb/src/rml/server/wait_counter.h b/src/tbb/src/rml/server/wait_counter.h deleted file mode 100644 index 8beca6dc9..000000000 --- a/src/tbb/src/rml/server/wait_counter.h +++ /dev/null @@ -1,73 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __RML_wait_counter_H -#define __RML_wait_counter_H - -#include "thread_monitor.h" -#include "tbb/atomic.h" - -namespace rml { -namespace internal { - -class wait_counter { - thread_monitor my_monitor; - tbb::atomic<int> my_count; - tbb::atomic<int> n_transients; -public: - wait_counter() { - // The "1" here is subtracted by the call to "wait". - my_count=1; - n_transients=0; - } - - //! Wait for number of operator-- invocations to match number of operator++ invocations. - /** Exactly one thread should call this method. */ - void wait() { - int k = --my_count; - __TBB_ASSERT( k>=0, "counter underflow" ); - if( k>0 ) { - thread_monitor::cookie c; - my_monitor.prepare_wait(c); - if( my_count ) - my_monitor.commit_wait(c); - else - my_monitor.cancel_wait(); - } - while( n_transients>0 ) - __TBB_Yield(); - } - void operator++() { - ++my_count; - } - void operator--() { - ++n_transients; - int k = --my_count; - __TBB_ASSERT( k>=0, "counter underflow" ); - if( k==0 ) - my_monitor.notify(); - --n_transients; - } -}; - -} // namespace internal -} // namespace rml - -#endif /* __RML_wait_counter_H */ diff --git a/src/tbb/src/rml/server/win32-rml-export.def b/src/tbb/src/rml/server/win32-rml-export.def deleted file mode 100644 index a1f688bad..000000000 --- a/src/tbb/src/rml/server/win32-rml-export.def +++ /dev/null @@ -1,27 +0,0 @@ -; Copyright 2005-2014 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. Threading Building Blocks is free software; -; you can redistribute it and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. Threading Building Blocks is -; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -; See the GNU General Public License for more details. You should have received a copy of -; the GNU General Public License along with Threading Building Blocks; if not, write to the -; Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software library without -; restriction. Specifically, if other files instantiate templates or use macros or inline -; functions from this file, or you compile this file and link it with other files to produce -; an executable, this file does not by itself cause the resulting executable to be covered -; by the GNU General Public License. This exception does not however invalidate any other -; reasons why the executable file might be covered by the GNU General Public License. - -EXPORTS - -__RML_open_factory -__RML_close_factory -__TBB_make_rml_server -__KMP_make_rml_server -__TBB_call_with_my_server_info -__KMP_call_with_my_server_info - diff --git a/src/tbb/src/rml/server/win64-rml-export.def b/src/tbb/src/rml/server/win64-rml-export.def deleted file mode 100644 index a1f688bad..000000000 --- a/src/tbb/src/rml/server/win64-rml-export.def +++ /dev/null @@ -1,27 +0,0 @@ -; Copyright 2005-2014 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. Threading Building Blocks is free software; -; you can redistribute it and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. Threading Building Blocks is -; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -; See the GNU General Public License for more details. You should have received a copy of -; the GNU General Public License along with Threading Building Blocks; if not, write to the -; Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software library without -; restriction. Specifically, if other files instantiate templates or use macros or inline -; functions from this file, or you compile this file and link it with other files to produce -; an executable, this file does not by itself cause the resulting executable to be covered -; by the GNU General Public License. This exception does not however invalidate any other -; reasons why the executable file might be covered by the GNU General Public License. - -EXPORTS - -__RML_open_factory -__RML_close_factory -__TBB_make_rml_server -__KMP_make_rml_server -__TBB_call_with_my_server_info -__KMP_call_with_my_server_info - diff --git a/src/tbb/src/rml/test/rml_omp_stub.cpp b/src/tbb/src/rml/test/rml_omp_stub.cpp deleted file mode 100644 index 0573d502d..000000000 --- a/src/tbb/src/rml/test/rml_omp_stub.cpp +++ /dev/null @@ -1,71 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// This file is compiled with C++, but linked with a program written in C. -// The intent is to find dependencies on the C++ run-time. - -#include <stdlib.h> -#include "harness_defs.h" -#define RML_PURE_VIRTUAL_HANDLER abort - -#if _MSC_VER==1500 && !defined(__INTEL_COMPILER) -// VS2008/VC9 seems to have an issue; -#pragma warning( push ) -#pragma warning( disable: 4100 ) -#elif __TBB_MSVC_UNREACHABLE_CODE_IGNORED -// VS2012-2013 issues "warning C4702: unreachable code" for the code which really -// shouldn't be reached according to the test logic: rml::client has the -// implementation for the "pure" virtual methods to be aborted if they are -// called. -#pragma warning( push ) -#pragma warning( disable: 4702 ) -#endif -#include "rml_omp.h" -#if ( _MSC_VER==1500 && !defined(__INTEL_COMPILER)) || __TBB_MSVC_UNREACHABLE_CODE_IGNORED -#pragma warning( pop ) -#endif - -rml::versioned_object::version_type Version; - -class MyClient: public __kmp::rml::omp_client { -public: - /*override*/rml::versioned_object::version_type version() const {return 0;} - /*override*/size_type max_job_count() const {return 1024;} - /*override*/size_t min_stack_size() const {return 1<<20;} - /*override*/rml::job* create_one_job() {return NULL;} - /*override*/void acknowledge_close_connection() {} - /*override*/void cleanup(job&) {} - /*override*/policy_type policy() const {return throughput;} - /*override*/void process( job&, void*, __kmp::rml::omp_client::size_type ) {} - -}; - -//! Never actually set, because point of test is to find linkage issues. -__kmp::rml::omp_server* MyServerPtr; - -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#define HARNESS_CUSTOM_MAIN 1 -#include "harness.h" - -extern "C" void Cplusplus() { - MyClient client; - Version = client.version(); - REPORT("done\n"); -} diff --git a/src/tbb/src/rml/test/test_job_automaton.cpp b/src/tbb/src/rml/test/test_job_automaton.cpp deleted file mode 100644 index 157a9e9e3..000000000 --- a/src/tbb/src/rml/test/test_job_automaton.cpp +++ /dev/null @@ -1,152 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness.h" -#if __TBB_MIC_OFFLOAD -int TestMain () { - return Harness::Skipped; -} -#else -#include "job_automaton.h" -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#include "harness_barrier.h" - -class State { - Harness::SpinBarrier barrier; - rml::internal::job_automaton ja; - rml::job job; - tbb::atomic<int> job_created; - tbb::atomic<int> job_destroyed; - tbb::atomic<bool> job_received; -public: - State() : barrier(2) { - job_created = 0; - job_destroyed = 0; - job_received = false; - } - void exercise( bool is_owner ); - ~State() { - ASSERT( job_created==job_destroyed, "accounting error" ); - ASSERT( job_destroyed<=1, "destroyed job twice" ); - } -}; - -int DelayMask; -const int N = 14; -tbb::atomic<int> Coverage[N]; - -//! Mark kth interval as covered and insert delay if kth bit of DelayMask is set. -/** An interval is the code between two operations on the job_automaton that we are testing. */ -void Cover( int k ) { - ASSERT( k<N, NULL ); - ++Coverage[k]; - if( DelayMask>>k&1 ) { - // Introduce delay (and possibly a thread context switch) - __TBB_Yield(); - } -} - -void State::exercise( bool is_owner ) { - barrier.wait(); - if( is_owner ) { - Cover(0); - if( ja.try_acquire() ) { - Cover(1); - ++job_created; - ja.set_and_release(job); - Cover(2); - if( ja.try_acquire() ) { - Cover(3); - ja.release(); - Cover(4); - if( ja.try_acquire() ) { - Cover(5); - ja.release(); - } - } - Cover(6); - } else { - Cover(7); - } - if( DelayMask&1<<N ) { - while( !job_received ) - __TBB_Yield(); - } - } else { - // Using extra bit of DelayMask for choosing whether to run wait_for_job or not. - if( DelayMask&1<<N ) { - rml::job* j= &ja.wait_for_job(); - if( j!=&job ) REPORT("%p\n",j); - ASSERT( j==&job, NULL ); - job_received = true; - } - Cover(8); - } - rml::job* j; - if( ja.try_plug(j) ) { - ASSERT( j==&job || !j, NULL ); - if( j ) { - Cover(9+is_owner); - ++job_destroyed; - } else { - __TBB_ASSERT( !is_owner, "owner failed to create job but plugged self" ); - Cover(11); - } - } else { - Cover(12+is_owner); - } -} - -class Loop: NoAssign { - State& s; -public: - Loop(State& s_) : s(s_) {} - void operator()( int i ) const {s.exercise(i==0);} -}; - -/** Return true if coverage is acceptable. - If report==true, issue message if it is unacceptable. */ -bool CheckCoverage( bool report ) { - bool okay = true; - for( int i=0; i<N; ++i ) { - const int min_coverage = 4; - if( Coverage[i]<min_coverage ) { - okay = false; - if( report ) - REPORT("Warning: Coverage[%d]=%d is less than acceptable minimum of %d\n", i, int(Coverage[i]),min_coverage); - } - } - return okay; -} - -int TestMain () { - for( DelayMask=0; DelayMask<8<<N; ++DelayMask ) { - State s; - NativeParallelFor( 2, Loop(s) ); - if( CheckCoverage(false) ) { - // Reached acceptable code coverage level - break; - } - } - CheckCoverage(true); - return Harness::Done; -} - -#endif /* __TBB_MIC_OFFLOAD */ diff --git a/src/tbb/src/rml/test/test_rml_mixed.cpp b/src/tbb/src/rml/test/test_rml_mixed.cpp deleted file mode 100644 index 6037495db..000000000 --- a/src/tbb/src/rml/test/test_rml_mixed.cpp +++ /dev/null @@ -1,319 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include <tbb/tbb_config.h> -#if __TBB_WIN8UI_SUPPORT || __TBB_MIC_OFFLOAD -#include "harness.h" -int TestMain () { - return Harness::Skipped; -} -#else -#include "rml_tbb.h" -#include "rml_omp.h" -#include "tbb/atomic.h" -#include "tbb/tick_count.h" - -#define HARNESS_DEFAULT_MIN_THREADS 4 -#include "harness.h" - -// dynamic_link initializes its data structures in a static constructor. But -// the initialization order of static constructors in different modules is -// non-deterministic. Thus dynamic_link fails on some systems when the -// applicaton changes its current directory after the library (TBB/OpenMP/...) -// is loaded but before the static constructors in the library are executed. -#define CHDIR_SUPPORT_BROKEN ( __GNUC__ == 4 && __GNUC_MINOR__ >= 6 && __GNUC_MINOR__ <= 8 ) - -const int OMP_ParallelRegionSize = 16; -int TBB_MaxThread = 4; // Includes master -int OMP_MaxThread = int(~0u>>1); // Includes master - -template<typename Client> -class ClientBase: public Client { -protected: - typedef typename Client::version_type version_type; - typedef typename Client::job job; - typedef typename Client::policy_type policy_type; - -private: - /*override*/version_type version() const { - return 0; - } - /*override*/size_t min_stack_size() const { - return 1<<20; - } - /*override*/job* create_one_job() { - return new rml::job; - } - /*override*/policy_type policy() const { - return Client::turnaround; - } - /*override*/void acknowledge_close_connection() { - delete this; - } - /*override*/void cleanup( job& j ) {delete &j;} - -public: - virtual ~ClientBase() {} -}; - -#if _WIN32 -#include <direct.h> -#define PATH_LEN MAX_PATH+1 -#define SLASH '\\' -#define ROOT_DIR "\\" -// ROOT_DIR_REST means how many symbols before first slash in the path -#define ROOT_DIR_REST 2 -#else -#include <unistd.h> -#include <limits.h> -#define PATH_LEN PATH_MAX+1 -#define SLASH '/' -#define ROOT_DIR "/" -// ROOT_DIR_REST means how many symbols before first slash in the path -#define ROOT_DIR_REST 0 -#define _getcwd getcwd -#define _chdir chdir -#endif - -#if !CHDIR_SUPPORT_BROKEN -class ChangeCurrentDir { - char dir[PATH_LEN+1]; - char *last_slash; -public: - ChangeCurrentDir() { - if ( !_getcwd( dir, PATH_LEN ) ) { - REPORT_FATAL_ERROR("ERROR: Couldn't get current working directory\n"); - } - - last_slash = strrchr( dir, SLASH ); - ASSERT( last_slash, "The current directory doesn't contain slashes" ); - *last_slash = 0; - - if ( _chdir( last_slash-dir == ROOT_DIR_REST ? ROOT_DIR : dir ) ) { - REPORT_FATAL_ERROR("ERROR: Couldn't change current working directory (%s)\n", dir ); - } - } - - // Restore current dir - ~ChangeCurrentDir() { - *last_slash = SLASH; - if ( _chdir(dir) ) { - REPORT_FATAL_ERROR("ERROR: Couldn't change current working directory\n"); - } - } -}; -#endif - -//! Represents a TBB or OpenMP run-time that uses RML. -template<typename Factory, typename Client> -class RunTime { -public: - //! Factory that run-time uses to make servers. - Factory factory; - Client* client; - typename Factory::server_type* server; -#if _WIN32||_WIN64 - ::rml::server::execution_resource_t me; -#endif - RunTime() { - factory.open(); - } - ~RunTime() { - factory.close(); - } - //! Create server for this run-time - void create_connection(); - - //! Destroy server for this run-time - void destroy_connection(); -}; - -class ThreadLevelRecorder { - tbb::atomic<int> level; - struct record { - tbb::tick_count time; - int nthread; - }; - tbb::atomic<unsigned> next; - /** Must be power of two */ - static const unsigned max_record_count = 1<<20; - record array[max_record_count]; -public: - void change_level( int delta ); - void dump(); -}; - -void ThreadLevelRecorder::change_level( int delta ) { - int x = level+=delta; - tbb::tick_count t = tbb::tick_count::now(); - unsigned k = next++; - if( k<max_record_count ) { - record& r = array[k]; - r.time = t; - r.nthread = x; - } -} - -void ThreadLevelRecorder::dump() { - FILE* f = fopen("time.txt","w"); - if( !f ) { - perror("fopen(time.txt)\n"); - exit(1); - } - unsigned limit = next; - if( limit>max_record_count ) { - // Clip - limit = next; - } - for( unsigned i=0; i<limit; ++i ) { - fprintf(f,"%f\t%d\n",(array[i].time-array[0].time).seconds(),array[i].nthread); - } - fclose(f); -} - -ThreadLevelRecorder TotalThreadLevel; - -class TBB_Client: public ClientBase<tbb::internal::rml::tbb_client> { - /*override*/void process( job& j ); - /*override*/size_type max_job_count() const { - return TBB_MaxThread-1; - } -}; - -class OMP_Client: public ClientBase<__kmp::rml::omp_client> { - /*override*/void process( job&, void* cookie, omp_client::size_type ); - /*override*/size_type max_job_count() const { - return OMP_MaxThread-1; - } -}; - -#if !CHDIR_SUPPORT_BROKEN -// A global instance of ChangeCurrentDir should be declared before TBB_RunTime and OMP_RunTime -// since we want to change current directory before opening factory -ChangeCurrentDir Changer; -#endif -RunTime<tbb::internal::rml::tbb_factory, TBB_Client> TBB_RunTime; -RunTime<__kmp::rml::omp_factory, OMP_Client> OMP_RunTime; - -template<typename Factory, typename Client> -void RunTime<Factory,Client>::create_connection() { - client = new Client; - typename Factory::status_type status = factory.make_server( server, *client ); - ASSERT( status==Factory::st_success, NULL ); -#if _WIN32||_WIN64 - server->register_master( me ); -#endif /* _WIN32||_WIN64 */ -} - -template<typename Factory, typename Client> -void RunTime<Factory,Client>::destroy_connection() { -#if _WIN32||_WIN64 - server->unregister_master( me ); -#endif /* _WIN32||_WIN64 */ - server->request_close_connection(); - server = NULL; -} - -class OMP_Team { -public: - OMP_Team( __kmp::rml::omp_server& ) {} - tbb::atomic<unsigned> barrier; -}; - -tbb::atomic<int> AvailWork; -tbb::atomic<int> CompletionCount; - -void OMPWork() { - tbb::atomic<int> x; - for( x=0; x<2000000; ++x ) { - continue; - } -} - -void TBBWork() { - if( AvailWork>=0 ) { - int k = --AvailWork; - if( k==-1 ) { - TBB_RunTime.server->adjust_job_count_estimate(-(TBB_MaxThread-1)); - ++CompletionCount; - } else if( k>=0 ) { - for( int k=0; k<4; ++k ) { - OMP_Team team( *OMP_RunTime.server ); - int n = OMP_RunTime.server->try_increase_load( OMP_ParallelRegionSize-1, /*strict=*/false ); - team.barrier = 0; - ::rml::job* array[OMP_ParallelRegionSize-1]; - if( n>0) - OMP_RunTime.server->get_threads( n, &team, array ); - // Master does work inside parallel region too. - OMPWork(); - // Master waits for workers to finish - if( n>0 ) - while( team.barrier!=unsigned(n) ) { - __TBB_Yield(); - } - } - ++CompletionCount; - } - } -} - -/*override*/void TBB_Client::process( job& ) { - TotalThreadLevel.change_level(1); - TBBWork(); - TotalThreadLevel.change_level(-1); -} - -/*override*/void OMP_Client::process( job& /* j */, void* cookie, omp_client::size_type ) { - TotalThreadLevel.change_level(1); - ASSERT( OMP_RunTime.server, NULL ); - OMPWork(); - ASSERT( OMP_RunTime.server, NULL ); - static_cast<OMP_Team*>(cookie)->barrier+=1; - TotalThreadLevel.change_level(-1); -} - -void TBBOutSideOpenMPInside() { - TotalThreadLevel.change_level(1); - CompletionCount = 0; - int tbbtasks = 32; - AvailWork = tbbtasks; - TBB_RunTime.server->adjust_job_count_estimate(TBB_MaxThread-1); - while( CompletionCount!=tbbtasks+1 ) { - TBBWork(); - } - TotalThreadLevel.change_level(-1); -} - -int TestMain () { -#if CHDIR_SUPPORT_BROKEN - REPORT("Known issue: dynamic_link does not support current directory changing before its initialization.\n"); -#endif - for( int TBB_MaxThread=MinThread; TBB_MaxThread<=MaxThread; ++TBB_MaxThread ) { - REMARK("Testing with TBB_MaxThread=%d\n", TBB_MaxThread); - TBB_RunTime.create_connection(); - OMP_RunTime.create_connection(); - TBBOutSideOpenMPInside(); - OMP_RunTime.destroy_connection(); - TBB_RunTime.destroy_connection(); - } - TotalThreadLevel.dump(); - return Harness::Done; -} -#endif /* __TBB_WIN8UI_SUPPORT || __TBB_MIC_OFFLOAD */ diff --git a/src/tbb/src/rml/test/test_rml_omp.cpp b/src/tbb/src/rml/test/test_rml_omp.cpp deleted file mode 100644 index b29f69ae9..000000000 --- a/src/tbb/src/rml/test/test_rml_omp.cpp +++ /dev/null @@ -1,203 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include <tbb/tbb_config.h> -#if __TBB_WIN8UI_SUPPORT || __TBB_MIC_OFFLOAD -#include "harness.h" -int TestMain () { - return Harness::Skipped; -} -#else -#include "rml_omp.h" - -typedef __kmp::rml::omp_server MyServer; -typedef __kmp::rml::omp_factory MyFactory; - -// Forward declaration for the function used in test_server.h -void DoClientSpecificVerification( MyServer& , int ); - -#define HARNESS_DEFAULT_MIN_THREADS 0 -#include "test_server.h" -#include "tbb/tbb_misc.h" - -static bool StrictTeam; - -class MyTeam { - MyTeam& operator=( const MyTeam& ) ; -public: - struct info_type { - rml::job* job; - bool ran; - info_type() : job(NULL), ran(false) {} - }; - MyTeam( MyServer& /* server */, size_t max_thread_ ) : - max_thread(max_thread_) - { - self_ptr = this; - info = new info_type[max_thread]; - } - ~MyTeam() { - delete[] info; - } - const size_t max_thread; - size_t n_thread; - tbb::atomic<int> barrier; - /** Indexed with 1-origin index */ - info_type* info; - int iteration; - MyTeam* self_ptr; -}; - -class MyClient: public ClientBase<__kmp::rml::omp_client> { -public: - MyServer* server; - /*override*/void process( job& j, void* cookie, size_type index ) { - MyTeam& t = *static_cast<MyTeam*>(cookie); - ASSERT( t.self_ptr==&t, "trashed cookie" ); - ASSERT( index<t.max_thread, NULL ); - ASSERT( !t.info[index].ran, "duplicate index?" ); - t.info[index].job = &j; - t.info[index].ran = true; - do_process(j); - if( index==1 && nesting.level<nesting.limit ) { - DoOneConnection<MyFactory,MyClient> doc(MaxThread,Nesting(nesting.level+1,nesting.limit),0,false); - doc(0); - } -#if _WIN32||_WIN64 - // test activate/deactivate - if( t.n_thread>1 && t.n_thread%2==0 ) { - if( nesting.level==0 ) { - if( index&1 ) { - size_type target = index-1; - ASSERT( target<t.max_thread, NULL ); - // wait until t.info[target].job is defined - tbb::internal::spin_wait_until_eq( t.info[target].ran, true ); - server->try_increase_load( 1, true ); - server->reactivate( t.info[target].job ); - } else { - server->deactivate( &j ); - } - } - } -#endif /* _WIN32||_WIN64 */ - ++t.barrier; - } - static const bool is_omp = true; - bool is_strict() const {return StrictTeam;} -}; - -void FireUpJobs( MyServer& server, MyClient& client, int max_thread, int n_extra, Checker* checker ) { - ASSERT( max_thread>=0, NULL ); -#if _WIN32||_WIN64 - ::rml::server::execution_resource_t me; - server.register_master( me ); -#endif /* _WIN32||_WIN64 */ - client.server = &server; - MyTeam team(server,size_t(max_thread)); - MyServer::size_type n_thread = 0; - for( int iteration=0; iteration<4; ++iteration ) { - for( size_t i=0; i<team.max_thread; ++i ) - team.info[i].ran = false; - switch( iteration ) { - default: - n_thread = int(max_thread); - break; - case 1: - // No change in number of threads - break; - case 2: - // Decrease number of threads. - n_thread = int(max_thread)/2; - break; - // Case 3 is same code as the default, but has effect of increasing the number of threads. - } - team.barrier = 0; - REMARK("client %d: server.run with n_thread=%d\n", client.client_id(), int(n_thread) ); - server.independent_thread_number_changed( n_extra ); - if( checker ) { - // Give RML time to respond to change in number of threads. - Harness::Sleep(1); - } - int n_delivered = server.try_increase_load( n_thread, StrictTeam ); - ASSERT( !StrictTeam || n_delivered==int(n_thread), "server failed to satisfy strict request" ); - if( n_delivered<0 ) { - REMARK( "client %d: oversubscription occurred (by %d)\n", client.client_id(), -n_delivered ); - server.independent_thread_number_changed( -n_extra ); - n_delivered = 0; - } else { - team.n_thread = n_delivered; - ::rml::job* job_array[JobArraySize]; - job_array[n_delivered] = (::rml::job*)intptr_t(-1); - server.get_threads( n_delivered, &team, job_array ); - __TBB_ASSERT( job_array[n_delivered]== (::rml::job*)intptr_t(-1), NULL ); - for( int i=0; i<n_delivered; ++i ) { - MyJob* j = static_cast<MyJob*>(job_array[i]); - int s = j->state; - ASSERT( s==MyJob::idle||s==MyJob::busy, NULL ); - } - server.independent_thread_number_changed( -n_extra ); - REMARK("client %d: team size is %d\n", client.client_id(), n_delivered); - if( checker ) { - checker->check_number_of_threads_delivered( n_delivered, n_thread, n_extra ); - } - // Protocol requires that master wait until workers have called "done_processing" - while( team.barrier!=n_delivered ) { - ASSERT( team.barrier>=0, NULL ); - ASSERT( team.barrier<=n_delivered, NULL ); - __TBB_Yield(); - } - REMARK("client %d: team completed\n", client.client_id() ); - for( int i=0; i<n_delivered; ++i ) { - ASSERT( team.info[i].ran, "thread on team allegedly delivered, but did not run?" ); - } - } - for( MyServer::size_type i=n_delivered; i<MyServer::size_type(max_thread); ++i ) { - ASSERT( !team.info[i].ran, "thread on team ran with illegal index" ); - } - } -#if _WIN32||_WIN64 - server.unregister_master( me ); -#endif -} - -void DoClientSpecificVerification( MyServer& server, int /*n_thread*/ ) -{ - ASSERT( server.current_balance()==int(tbb::internal::AvailableHwConcurrency())-1, NULL ); -} - -int TestMain () { -#if _MSC_VER == 1600 && RML_USE_WCRM - REPORT("Known issue: RML resets the process mask when Concurrency Runtime is used.\n"); - // AvailableHwConcurrency reads process mask when the first call. That's why it should - // be called before RML initialization. - tbb::internal::AvailableHwConcurrency(); -#endif - - StrictTeam = true; - VerifyInitialization<MyFactory,MyClient>( MaxThread ); - SimpleTest<MyFactory,MyClient>(); - - StrictTeam = false; - VerifyInitialization<MyFactory,MyClient>( MaxThread ); - SimpleTest<MyFactory,MyClient>(); - - return Harness::Done; -} -#endif /* __TBB_WIN8UI_SUPPORT || __TBB_MIC_OFFLOAD */ diff --git a/src/tbb/src/rml/test/test_rml_omp_c_linkage.c b/src/tbb/src/rml/test/test_rml_omp_c_linkage.c deleted file mode 100644 index 21697d3ff..000000000 --- a/src/tbb/src/rml/test/test_rml_omp_c_linkage.c +++ /dev/null @@ -1,26 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -void Cplusplus(); - -int main() { - Cplusplus(); - return 0; -} diff --git a/src/tbb/src/rml/test/test_rml_tbb.cpp b/src/tbb/src/rml/test/test_rml_tbb.cpp deleted file mode 100644 index f311282e6..000000000 --- a/src/tbb/src/rml/test/test_rml_tbb.cpp +++ /dev/null @@ -1,205 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include <tbb/tbb_config.h> -#if __TBB_WIN8UI_SUPPORT || __TBB_MIC_OFFLOAD -#include "harness.h" -int TestMain () { - return Harness::Skipped; -} -#else -#include "rml_tbb.h" - -typedef tbb::internal::rml::tbb_server MyServer; -typedef tbb::internal::rml::tbb_factory MyFactory; - -// Forward declaration of the function used in test_server.h -void DoClientSpecificVerification( MyServer&, int ); - -#define HARNESS_DEFAULT_MIN_THREADS 0 -#include "test_server.h" - -tbb::atomic<int> n_available_hw_threads; - -class MyClient: public ClientBase<tbb::internal::rml::tbb_client> { - tbb::atomic<int> counter; - tbb::atomic<int> gate; - /*override*/void process( job& j ) { - do_process(j); - //wait until the gate is open. - while( gate==0 ) - Harness::Sleep(1); - - __TBB_ASSERT( nesting.limit<=2, NULL ); - if( nesting.level>=nesting.limit ) - return; - - size_type max_outstanding_connections = max_job_count(); // if nesting.level==0 - if( nesting.level==1 ) - max_outstanding_connections *= (1+max_outstanding_connections); - - if( default_concurrency()<=max_outstanding_connections+2 ) - // i.e., if it is not guaranteed that at least two connections may be made without depleting the_balance - return; - - // at this point, ( nesting.level<nesting.limit ) && ( my_server->default_concurrency()-max_outstanding_connections>2 ) - for( ;; ) { - while( n_available_hw_threads<=1 ) - Harness::Sleep(1); - - int n = --n_available_hw_threads; - if( n>0 ) break; - // else I lost - ++n_available_hw_threads; - } - - DoOneConnection<MyFactory,MyClient> doc(max_job_count(),Nesting(nesting.level+1,nesting.limit),0,false); - doc(0); - - ++n_available_hw_threads; - } -public: - MyClient() {counter=1;} - static const bool is_omp = false; - bool is_strict() const {return false;} - void open_the_gate() { gate = 1; } - void close_the_gate() { gate = 0; } -}; - -void FireUpJobs( MyServer& server, MyClient& client, int n_thread, int n_extra, Checker* checker ) { - REMARK("client %d: calling adjust_job_count_estimate(%d)\n", client.client_id(),n_thread); - // Exercise independent_thread_number_changed, even for zero values. - server.independent_thread_number_changed( n_extra ); -#if _WIN32||_WIN64 - ::rml::server::execution_resource_t me; - server.register_master( me ); -#endif /* _WIN32||_WIN64 */ - // Experiments indicate that when oversubscribing, the main thread should wait a little - // while for the RML worker threads to do some work. - if( checker ) { - // Give RML time to respond to change in number of threads. - Harness::Sleep(1); - for( int k=0; k<n_thread; ++k ) - client.job_array[k].processing_count = 0; - } - //close the gate to keep worker threads from returning to RML until a snapshot is taken - client.close_the_gate(); - server.adjust_job_count_estimate( n_thread ); - int n_used = 0; - if( checker ) { - Harness::Sleep(100); - for( int k=0; k<n_thread; ++k ) - if( client.job_array[k].processing_count ) - ++n_used; - } - // open the gate - client.open_the_gate(); - // Logic further below presumes that jobs never starve, so undo previous call - // to independent_thread_number_changed before waiting on those jobs. - server.independent_thread_number_changed( -n_extra ); - REMARK("client %d: wait for each job to be processed at least once\n",client.client_id()); - // Calculate the number of jobs that are expected to get threads. - int expected = n_thread; - // Wait for expected number of jobs to be processed. -#if RML_USE_WCRM - int default_concurrency = server.default_concurrency(); - if( N_TestConnections>0 ) { - if( default_concurrency+1>=8 && n_thread<=3 && N_TestConnections<=3 && (default_concurrency/int(N_TestConnections)-1)>=n_thread ) { -#endif /* RML_USE_WCRM */ - for(;;) { - int n = 0; - for( int k=0; k<n_thread; ++k ) - if( client.job_array[k].processing_count!=0 ) - ++n; - if( n>=expected ) break; - server.yield(); - } -#if RML_USE_WCRM - } else if( n_thread>0 ) { - for( int m=0; m<20; ++m ) { - int n = 0; - for( int k=0; k<n_thread; ++k ) - if( client.job_array[k].processing_count!=0 ) - ++n; - if( n>=expected ) break; - Harness::Sleep(1); - } - } - } -#endif /* RML_USE_WCRM */ - server.adjust_job_count_estimate(-n_thread); -#if _WIN32||_WIN64 - server.unregister_master( me ); -#endif - // Give RML some time to respond - if( checker ) { - Harness::Sleep(1); - checker->check_number_of_threads_delivered( n_used, n_thread, n_extra ); - } -} - -void DoClientSpecificVerification( MyServer&, int n_thread ) -{ - MyClient* client = new MyClient; - client->initialize( n_thread, Nesting(), ClientStackSize[0] ); - MyFactory factory; - memset( &factory, 0, sizeof(factory) ); - MyFactory::status_type status = factory.open(); - ASSERT( status!=MyFactory::st_not_found, "could not find RML library" ); - ASSERT( status!=MyFactory::st_incompatible, NULL ); - ASSERT( status==MyFactory::st_success, NULL ); - MyFactory::server_type* server; - status = factory.make_server( server, *client ); - ASSERT( status==MyFactory::st_success, NULL ); - client->set_server( server ); - client->expect_close_connection = true; - server->request_close_connection(); - // Client deletes itself when it sees call to acknowledge_close_connection from server. - factory.close(); -} - -void Initialize() -{ - MyClient* client = new MyClient; - client->initialize( 1, Nesting(), ClientStackSize[0] ); - MyFactory factory; - memset( &factory, 0, sizeof(factory) ); - factory.open(); - MyFactory::server_type* server; - factory.make_server( server, *client ); - client->set_server( server ); - n_available_hw_threads = server->default_concurrency(); - client->expect_close_connection = true; - server->request_close_connection(); - // Client deletes itself when it sees call to acknowledge_close_connection from server. - factory.close(); -} - -int TestMain () { - VerifyInitialization<MyFactory,MyClient>( MaxThread ); - if ( default_concurrency<1 ) { - REPORT("The test is not intended to run on 1 thread\n"); - return Harness::Skipped; - } - Initialize(); - SimpleTest<MyFactory,MyClient>(); - return Harness::Done; -} -#endif /* __TBB_WIN8UI_SUPPORT || __TBB_MIC_OFFLOAD */ diff --git a/src/tbb/src/rml/test/test_server.h b/src/tbb/src/rml/test/test_server.h deleted file mode 100644 index 9b6f0398c..000000000 --- a/src/tbb/src/rml/test/test_server.h +++ /dev/null @@ -1,433 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -/* This header contains code shared by test_omp_server.cpp and test_tbb_server.cpp - There is no ifndef guard - test is supposed to include this file exactly once. - The test is also exected to have #include of rml_omp.h or rml_tbb.h before - including this header. - - This header should not use any parts of TBB that require linking in the TBB run-time. - It uses a few instances of tbb::atomic<T>, all of which are completely inlined. */ - -#include "tbb/atomic.h" -#include "tbb/tbb_thread.h" -#include "harness.h" -#include "harness_memory.h" -#include "harness_concurrency_tracker.h" - -//! Define TRIVIAL as 1 to test only a single client, no nesting, no extra threads. -#define TRIVIAL 0 - -//! Maximum number of clients -#if TRIVIAL -const size_t MaxClient = 1; -#else -const size_t MaxClient = 4; -#endif - -const size_t ClientStackSize[MaxClient] = { - 1000000 -#if !TRIVIAL - ,2000000 - ,1000000 - ,4000000 -#endif /* TRIVIAL */ -}; - -const size_t OverheadStackSize = 500000; - -const size_t JobArraySize = 1000; - -static bool TestSingleConnection; - -static size_t N_TestConnections; - -static int default_concurrency; - -class MyJob: public ::rml::job { -public: - //! Enumeration for tracking states of a job. - enum state_t { - //! Job has not yet been allocated. - unallocated, - //! Is idle. - idle, - //! Has a thread working on it. - busy, - //! After call to client::cleanup - clean - }; - tbb::atomic<int> state; - tbb::atomic<int> processing_count; - void update( state_t new_state, state_t old_state ) { - int o = state.compare_and_swap(new_state,old_state); - ASSERT( o==old_state, "illegal transition" ); - } - void update_from_either( state_t new_state, state_t old_state1, state_t old_state2 ) { - int snapshot; - do { - snapshot = state; - ASSERT( snapshot==old_state1||snapshot==old_state2, "illegal transition" ); - } while( state.compare_and_swap(new_state,snapshot)!=snapshot ); - } - MyJob() { - state=unallocated; - processing_count=0; - } - ~MyJob() { - // Overwrite so that accidental use after destruction can be detected. - memset(this,-1,sizeof(*this)); - } -}; - -static tbb::atomic<int> ClientConstructions; -static tbb::atomic<int> ClientDestructions; - -struct Nesting { - int level; - int limit; - Nesting() : level(0), limit(0) {} - Nesting( int level_, int limit_ ) : level(level_), limit(limit_) {} -}; - -template<typename Client> -class ClientBase: public Client { -protected: - typedef typename Client::size_type size_type; - typedef typename Client::version_type version_type; - typedef typename Client::policy_type policy_type; - typedef typename Client::job job; -private: - size_type my_max_job_count; - size_t my_stack_size; - tbb::atomic<size_t> next_job_index; - int my_client_id; - rml::server* my_server; - -public: - enum state_t { - //! Treat *this as constructed. - live=0x1234, - //! Treat *this as destroyed. - destroyed=0xDEAD - }; - - tbb::atomic<int> state; - void update( state_t new_state, state_t old_state ) { - int o = state.compare_and_swap(new_state,old_state); - ASSERT( o==old_state, NULL ); - } - - tbb::atomic<bool> expect_close_connection; - - MyJob *job_array; - - /*override*/version_type version() const { - ASSERT( state==live, NULL ); - return 1; - } - - /*override*/size_type max_job_count() const { - ASSERT( state==live, NULL ); - return my_max_job_count; - } - - /*override*/size_t min_stack_size() const { - ASSERT( state==live, NULL ); - return my_stack_size; - } - - /*override*/policy_type policy() const {return Client::throughput;} - - /*override*/void acknowledge_close_connection() { - ASSERT( expect_close_connection, NULL ); - for( size_t k=next_job_index; k>0; ) { - --k; - ASSERT( job_array[k].state==MyJob::clean, NULL ); - } - delete[] job_array; - job_array = NULL; - ASSERT( my_server, NULL ); - update( destroyed, live ); - delete this; - } - - /*override*/void cleanup( job& j_ ) { - REMARK("client %d: cleanup(%p) called\n",client_id(),&j_); - ASSERT( state==live, NULL ); - MyJob& j = static_cast<MyJob&>(j_); - while( j.state==MyJob::busy ) - my_server->yield(); - j.update(MyJob::clean,MyJob::idle); - REMARK("client %d: cleanup(%p) returns\n",client_id(),&j_); - } - - job* create_one_job(); - -protected: - void do_process( job& j_ ) { - ASSERT( state==live, NULL ); - MyJob& j = static_cast<MyJob&>(j_); - ASSERT( &j, NULL ); - j.update(MyJob::busy,MyJob::idle); - // use of the plain addition (not the atomic increment) is intentonial - j.processing_count = j.processing_count + 1; - ASSERT( my_stack_size>OverheadStackSize, NULL ); -#ifdef __ia64__ - // Half of the stack is reserved for RSE, so test only remaining half. - UseStackSpace( (my_stack_size-OverheadStackSize)/2 ); -#else - UseStackSpace( my_stack_size-OverheadStackSize ); -#endif - j.update(MyJob::idle,MyJob::busy); - my_server->yield(); - } -public: - ClientBase() : my_server(NULL) { - my_client_id = ClientConstructions++; - next_job_index = 0; - } - int client_id() const {return my_client_id;} - - Nesting nesting; - - void initialize( size_type max_job_count, Nesting nesting_, size_t stack_size ) { - ASSERT( stack_size>0, NULL ); - my_max_job_count = max_job_count; - nesting = nesting_; - my_stack_size = stack_size; - job_array = new MyJob[JobArraySize]; - expect_close_connection = false; - state = live; - } - - void set_server( rml::server* s ) {my_server=s;} - - unsigned default_concurrency() const { ASSERT( my_server, NULL); return my_server->default_concurrency(); } - - virtual ~ClientBase() { - ASSERT( state==destroyed, NULL ); - ++ClientDestructions; - } -}; - -template<typename Client> -typename Client::job* ClientBase<Client>::create_one_job() { - REMARK("client %d: create_one_job() called\n",client_id()); - size_t k = next_job_index++; - ASSERT( state==live, NULL ); - // Following assertion depends on assumption that implementation does not destroy jobs until - // the connection is closed. If the implementation is changed to destroy jobs sooner, the - // test logic in this header will have to be reworked. - ASSERT( k<my_max_job_count, "RML allocated more than max_job_count jobs simultaneously" ); - ASSERT( k<JobArraySize, "JobArraySize not big enough (problem is in test, not RML)" ); - MyJob& j = job_array[k]; - j.update(MyJob::idle,MyJob::unallocated); - REMARK("client %d: create_one_job() for k=%d returns %p\n",client_id(),int(k),&j); - return &j; -} - -struct warning_tracker { - tbb::atomic<int> n_more_than_available; - tbb::atomic<int> n_too_many_threads; - tbb::atomic<int> n_system_overload; - warning_tracker() { - n_more_than_available = 0; - n_too_many_threads = 0; - n_system_overload = 0; - } - bool all_set() { return n_more_than_available>0 && n_too_many_threads>0 && n_system_overload>0; } -} tracker; - -class Checker { -public: - int default_concurrency; - void check_number_of_threads_delivered( int n_delivered, int n_requested, int n_extra ) const; - Checker( rml::server& server ) : default_concurrency(int(server.default_concurrency())) {} -}; - -void Checker::check_number_of_threads_delivered( int n_delivered, int n_requested, int n_extra ) const { - ASSERT( default_concurrency>=0, NULL ); - if( tracker.all_set() ) return; - // Check that number of threads delivered is reasonable. - int n_avail = default_concurrency; - if( n_extra>0 ) - n_avail-=n_extra; - if( n_avail<0 ) - n_avail=0; - if( n_requested>default_concurrency ) - n_avail += n_requested-default_concurrency; - int n_expected = n_requested; - if( n_expected>n_avail ) - n_expected=n_avail; - const char* msg = NULL; - if( n_delivered>n_avail ) { - if( ++tracker.n_more_than_available>1 ) - return; - msg = "server delivered more threads than were theoretically available"; - } else if( n_delivered>n_expected ) { - if( ++tracker.n_too_many_threads>1 ) - return; - msg = "server delivered more threads than expected"; - } else if( n_delivered<n_expected ) { - if( ++tracker.n_system_overload>1 ) - return; - msg = "server delivered fewer threads than ideal; or, the system is overloaded?"; - } - if( msg ) { - REPORT("Warning: %s (n_delivered=%d n_avail=%d n_requested=%d n_extra=%d default_concurrency=%d)\n", - msg, n_delivered, n_avail, n_requested, n_extra, default_concurrency ); - } -} - -template<typename Factory,typename Client> -class DoOneConnection: NoAssign { - //! Number of threads to request - const int n_thread; - //! Nesting - const Nesting nesting; - //! Number of extra threads to pretend having outside the RML - const int n_extra; - //! If true, check number of threads actually delivered. - const bool check_delivered; -public: - DoOneConnection( int n_thread_, Nesting nesting_, int n_extra_, bool check_delivered_ ) : - n_thread(n_thread_), - nesting(nesting_), - n_extra(n_extra_), - check_delivered(check_delivered_) - { - } - - //! Test ith connection - void operator()( size_t i ) const; -}; - -template<typename Factory,typename Client> -void DoOneConnection<Factory,Client>::operator()( size_t i ) const { - ASSERT( i<MaxClient, NULL ); - Client* client = new Client; - client->initialize( Client::is_omp ? JobArraySize : n_thread, nesting, ClientStackSize[i] ); - Factory factory; - memset( &factory, 0, sizeof(factory) ); - typename Factory::status_type status = factory.open(); - ASSERT( status==Factory::st_success, NULL ); - - typename Factory::server_type* server; - status = factory.make_server( server, *client ); - ASSERT( status==Factory::st_success, NULL ); - Harness::ConcurrencyTracker ct; - REMARK("client %d: opened server n_thread=%d nesting=(%d,%d)\n", - client->client_id(), n_thread, nesting.level, nesting.limit); - client->set_server( server ); - Checker checker( *server ); - - FireUpJobs( *server, *client, n_thread, n_extra, check_delivered && !client->is_strict() ? &checker : NULL ); - - // Close the connection - client->expect_close_connection = true; - REMARK("client %d: calling request_close_connection\n", client->client_id()); -#if !RML_USE_WCRM - int default_concurrency = server->default_concurrency(); -#endif - server->request_close_connection(); - // Client deletes itself when it sees call to acknowledge_close_connection from server. - factory.close(); -#if !RML_USE_WCRM - if( TestSingleConnection ) - __TBB_ASSERT_EX( uintptr_t(factory.scratch_ptr)==uintptr_t(default_concurrency), "under/over subscription?" ); -#endif -} - -//! Test with n_threads threads and n_client clients. -template<typename Factory, typename Client> -void SimpleTest() { - Harness::ConcurrencyTracker::Reset(); - TestSingleConnection = true; - N_TestConnections = 1; - for( int n_thread=MinThread; n_thread<=MaxThread; ++n_thread ) { - // Test a single connection, no nesting, no extra threads - DoOneConnection<Factory,Client> doc(n_thread,Nesting(0,0),0,false); - doc(0); - } -#if !TRIVIAL - TestSingleConnection = false; - for( int n_thread=MinThread; n_thread<=MaxThread; ++n_thread ) { - // Test parallel connections - for( int n_client=1; n_client<=int(MaxClient); ++n_client ) { - N_TestConnections = n_client; - REMARK("SimpleTest: n_thread=%d n_client=%d\n",n_thread,n_client); - NativeParallelFor( n_client, DoOneConnection<Factory,Client>(n_thread,Nesting(0,0),0,false) ); - } - // Test server::independent_thread_number_changed - N_TestConnections = 1; - for( int n_extra=-4; n_extra<=32; n_extra=n_extra+1+n_extra/5 ) { - DoOneConnection<Factory,Client> doc(n_thread,Nesting(0,0),n_extra,true); - doc(0); - } -#if !RML_USE_WCRM - // Test nested connections - DoOneConnection<Factory,Client> doc(n_thread,Nesting(0,2),0,false); - doc(0); -#endif - } - ASSERT( Harness::ConcurrencyTracker::PeakParallelism()>1 || default_concurrency==0, "No multiple connections exercised?" ); -#endif /* !TRIVIAL */ - // Let RML catch up. - while( ClientConstructions!=ClientDestructions ) - Harness::Sleep(1); -} - -static void check_server_info( void* arg, const char* server_info ) -{ - ASSERT( strstr(server_info, (char*)arg), NULL ); -} - -template<typename Factory, typename Client> -void VerifyInitialization( int n_thread ) { - Client* client = new Client; - client->initialize( Client::is_omp ? JobArraySize : n_thread, Nesting(), ClientStackSize[0] ); - Factory factory; - memset( &factory, 0, sizeof(factory) ); - typename Factory::status_type status = factory.open(); - ASSERT( status!=Factory::st_not_found, "could not find RML library" ); - ASSERT( status!=Factory::st_incompatible, NULL ); - ASSERT( status==Factory::st_success, NULL ); - factory.call_with_server_info( check_server_info, (void*)"Intel(R) RML library" ); - typename Factory::server_type* server; - status = factory.make_server( server, *client ); - ASSERT( status!=Factory::st_incompatible, NULL ); - ASSERT( status!=Factory::st_not_found, NULL ); - ASSERT( status==Factory::st_success, NULL ); - REMARK("client %d: opened server n_thread=%d nesting=(%d,%d)\n", - client->client_id(), n_thread, 0, 0); - ASSERT( server, NULL ); - client->set_server( server ); - default_concurrency = server->default_concurrency(); - - DoClientSpecificVerification( *server, n_thread ); - - // Close the connection - client->expect_close_connection = true; - REMARK("client %d: calling request_close_connection\n", client->client_id()); - server->request_close_connection(); - // Client deletes itself when it sees call to acknowledge_close_connection from server. - factory.close(); -} diff --git a/src/tbb/src/rml/test/test_thread_monitor.cpp b/src/tbb/src/rml/test/test_thread_monitor.cpp deleted file mode 100644 index 75db81881..000000000 --- a/src/tbb/src/rml/test/test_thread_monitor.cpp +++ /dev/null @@ -1,117 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness.h" -#if __TBB_MIC_OFFLOAD -int TestMain () { - return Harness::Skipped; -} -#else -#include "thread_monitor.h" -#include "harness_memory.h" -#include "tbb/semaphore.cpp" - -class ThreadState { - void loop(); -public: - static __RML_DECL_THREAD_ROUTINE routine( void* arg ) { - static_cast<ThreadState*>(arg)->loop(); - return 0; - } - typedef rml::internal::thread_monitor thread_monitor; - thread_monitor monitor; - volatile int request; - volatile int ack; - volatile unsigned clock; - volatile unsigned stamp; - ThreadState() : request(-1), ack(-1), clock(0) {} -}; - -void ThreadState::loop() { - for(;;) { - ++clock; - if( ack==request ) { - thread_monitor::cookie c; - monitor.prepare_wait(c); - if( ack==request ) { - REMARK("%p: request=%d ack=%d\n", this, request, ack ); - monitor.commit_wait(c); - } else - monitor.cancel_wait(); - } else { - // Throw in delay occasionally - switch( request%8 ) { - case 0: - case 1: - case 5: - rml::internal::thread_monitor::yield(); - } - int r = request; - ack = request; - if( !r ) return; - } - } -} - -// Linux on IA-64 architecture seems to require at least 1<<18 bytes per stack. -const size_t MinStackSize = 1<<18; -const size_t MaxStackSize = 1<<22; - -int TestMain () { - for( int p=MinThread; p<=MaxThread; ++p ) { - ThreadState* t = new ThreadState[p]; - for( size_t stack_size = MinStackSize; stack_size<=MaxStackSize; stack_size*=2 ) { - REMARK("launching %d threads\n",p); - for( int i=0; i<p; ++i ) - rml::internal::thread_monitor::launch( ThreadState::routine, t+i, stack_size ); - for( int k=1000; k>=0; --k ) { - if( k%8==0 ) { - // Wait for threads to wait. - for( int i=0; i<p; ++i ) { - unsigned count = 0; - do { - t[i].stamp = t[i].clock; - rml::internal::thread_monitor::yield(); - if( ++count>=1000 ) { - REPORT("Warning: thread %d not waiting\n",i); - break; - } - } while( t[i].stamp!=t[i].clock ); - } - } - REMARK("notifying threads\n"); - for( int i=0; i<p; ++i ) { - // Change state visible to launched thread - t[i].request = k; - t[i].monitor.notify(); - } - REMARK("waiting for threads to respond\n"); - for( int i=0; i<p; ++i ) - // Wait for thread to respond - while( t[i].ack!=k ) - rml::internal::thread_monitor::yield(); - } - } - delete[] t; - } - - return Harness::Done; -} -#endif /* __TBB_MIC_OFFLOAD */ diff --git a/src/tbb/src/tbb/CMakeLists.txt b/src/tbb/src/tbb/CMakeLists.txt new file mode 100644 index 000000000..63fd4c73f --- /dev/null +++ b/src/tbb/src/tbb/CMakeLists.txt @@ -0,0 +1,219 @@ +# Copyright (c) 2020-2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +add_library(tbb + address_waiter.cpp + allocator.cpp + arena.cpp + arena_slot.cpp + concurrent_bounded_queue.cpp + dynamic_link.cpp + exception.cpp + governor.cpp + global_control.cpp + itt_notify.cpp + main.cpp + market.cpp + tcm_adaptor.cpp + misc.cpp + misc_ex.cpp + observer_proxy.cpp + parallel_pipeline.cpp + private_server.cpp + profiling.cpp + rml_tbb.cpp + rtm_mutex.cpp + rtm_rw_mutex.cpp + semaphore.cpp + small_object_pool.cpp + task.cpp + task_dispatcher.cpp + task_group_context.cpp + thread_dispatcher.cpp + thread_request_serializer.cpp + threading_control.cpp + version.cpp + queuing_rw_mutex.cpp) + +add_library(TBB::tbb ALIAS tbb) + +if (WIN32) + target_sources(tbb PRIVATE tbb.rc) + set_target_properties(tbb PROPERTIES OUTPUT_NAME "tbb${TBB_BINARY_VERSION}") +endif() + +# TODO: Add statistics.cpp + +target_compile_definitions(tbb + PUBLIC + $<$<CONFIG:DEBUG>:TBB_USE_DEBUG> + PRIVATE + __TBB_BUILD + ${TBB_RESUMABLE_TASKS_USE_THREADS} + $<$<NOT:$<BOOL:${BUILD_SHARED_LIBS}>>:__TBB_DYNAMIC_LOAD_ENABLED=0> + $<$<NOT:$<BOOL:${BUILD_SHARED_LIBS}>>:__TBB_SOURCE_DIRECTLY_INCLUDED=1>) + +if (NOT ("${CMAKE_SYSTEM_PROCESSOR}" MATCHES "(armv7-a|aarch64|mips|arm64|riscv)" OR + "${CMAKE_OSX_ARCHITECTURES}" MATCHES "arm64" OR + WINDOWS_STORE OR + TBB_WINDOWS_DRIVER)) + target_compile_definitions(tbb PRIVATE __TBB_USE_ITT_NOTIFY) +endif() + +target_include_directories(tbb + PUBLIC + $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../../include> + $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>) + +target_compile_options(tbb + PRIVATE + ${TBB_CXX_STD_FLAG} # TODO: consider making it PUBLIC. + ${TBB_MMD_FLAG} + ${TBB_DSE_FLAG} + ${TBB_WARNING_LEVEL} + ${TBB_WARNING_SUPPRESS} + ${TBB_LIB_COMPILE_FLAGS} + ${TBB_COMMON_COMPILE_FLAGS} +) + +# Avoid use of target_link_libraries here as it changes /DEF option to \DEF on Windows. +set_target_properties(tbb PROPERTIES + DEFINE_SYMBOL "" +) + +tbb_handle_ipo(tbb) + +if (TBB_DEF_FILE_PREFIX) # If there's no prefix, assume we're using export directives + set_target_properties(tbb PROPERTIES + LINK_FLAGS "${TBB_LINK_DEF_FILE_FLAG}\"${CMAKE_CURRENT_SOURCE_DIR}/def/${TBB_DEF_FILE_PREFIX}-tbb.def\"" + LINK_DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/def/${TBB_DEF_FILE_PREFIX}-tbb.def" + ) +endif() + +# Prefer using target_link_options instead of target_link_libraries to specify link options because +# target_link_libraries may incorrectly handle some options (on Windows, for example). +if (COMMAND target_link_options) + target_link_options(tbb + PRIVATE + ${TBB_LIB_LINK_FLAGS} + ${TBB_COMMON_LINK_FLAGS} + ) +else() + target_link_libraries(tbb + PRIVATE + ${TBB_LIB_LINK_FLAGS} + ${TBB_COMMON_LINK_FLAGS} + ) +endif() + +target_link_libraries(tbb + PRIVATE + Threads::Threads + ${TBB_LIB_LINK_LIBS} + ${TBB_COMMON_LINK_LIBS} +) + +# Strip debug symbols into a separate .dbg file +if(TBB_LINUX_SEPARATE_DBG) + if(NOT CMAKE_BUILD_TYPE STREQUAL "release") + find_program(OBJCOPY_COMMAND objcopy) + if(NOT OBJCOPY_COMMAND) + message(WARNING "objcopy command not found in the system") + else() + add_custom_command(TARGET tbb POST_BUILD + COMMAND objcopy --only-keep-debug $<TARGET_FILE:tbb> $<TARGET_FILE:tbb>.dbg + COMMAND objcopy --strip-debug $<TARGET_FILE:tbb> + COMMAND objcopy --add-gnu-debuglink=$<TARGET_FILE:tbb>.dbg $<TARGET_FILE:tbb> + COMMENT "Creating and associating .dbg file with tbb" + ) + endif() + else() + message(WARNING " TBB_LINUX_SEPARATE_DBG flag is not used on release config") + endif() +endif() + +if(TBB_BUILD_APPLE_FRAMEWORKS) + set_target_properties(tbb PROPERTIES + FRAMEWORK TRUE + FRAMEWORK_VERSION ${TBB_BINARY_VERSION}.${TBB_BINARY_MINOR_VERSION} + XCODE_ATTRIBUTE_PRODUCT_BUNDLE_IDENTIFIER com.intel.tbb + MACOSX_FRAMEWORK_IDENTIFIER com.intel.tbb + MACOSX_FRAMEWORK_BUNDLE_VERSION ${TBB_BINARY_VERSION}.${TBB_BINARY_MINOR_VERSION} + MACOSX_FRAMEWORK_SHORT_VERSION_STRING ${TBB_BINARY_VERSION}) +endif() + +tbb_install_target(tbb) + +if (TBB_INSTALL) + if (MSVC) + # Create a copy of target linker file (tbb<ver>[_debug].lib) with legacy name (tbb[_debug].lib) + # to support previous user experience for linkage. + install(FILES + $<TARGET_LINKER_FILE:tbb> + DESTINATION lib + CONFIGURATIONS RelWithDebInfo Release MinSizeRel + RENAME tbb.lib + COMPONENT devel + ) + + install(FILES + $<TARGET_LINKER_FILE:tbb> + DESTINATION lib + CONFIGURATIONS Debug + RENAME tbb_debug.lib + COMPONENT devel + ) + endif() + if(TBB_LINUX_SEPARATE_DBG) + install(FILES + $<TARGET_FILE:tbb>.dbg + DESTINATION lib + COMPONENT devel + ) + endif() + set(_tbb_pc_lib_name tbb) + + if (WIN32) + set(_tbb_pc_lib_name ${_tbb_pc_lib_name}${TBB_BINARY_VERSION}) + endif() + + if (CMAKE_SIZEOF_VOID_P EQUAL 8) + set(TBB_PC_NAME tbb) + else() + set(TBB_PC_NAME tbb32) + endif() + + set(_prefix_for_pc_file "${CMAKE_INSTALL_PREFIX}") + + if (IS_ABSOLUTE "${CMAKE_INSTALL_LIBDIR}") + set(_libdir_for_pc_file "${CMAKE_INSTALL_LIBDIR}") + else() + set(_libdir_for_pc_file "\${prefix}/${CMAKE_INSTALL_LIBDIR}") + endif() + + if (IS_ABSOLUTE "${CMAKE_INSTALL_INCLUDEDIR}") + set(_includedir_for_pc_file "${CMAKE_INSTALL_INCLUDEDIR}") + else() + set(_includedir_for_pc_file "\${prefix}/${CMAKE_INSTALL_INCLUDEDIR}") + endif() + + configure_file(${PROJECT_SOURCE_DIR}/integration/pkg-config/tbb.pc.in ${CMAKE_CURRENT_BINARY_DIR}/${TBB_PC_NAME}.pc @ONLY) + install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${TBB_PC_NAME}.pc + DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig/ + COMPONENT devel) +endif() + +if (COMMAND tbb_gen_vars) + tbb_gen_vars(tbb) +endif() diff --git a/src/tbb/src/tbb/address_waiter.cpp b/src/tbb/src/tbb/address_waiter.cpp new file mode 100644 index 000000000..1fd3dea8e --- /dev/null +++ b/src/tbb/src/tbb/address_waiter.cpp @@ -0,0 +1,106 @@ +/* + Copyright (c) 2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "oneapi/tbb/detail/_utils.h" +#include "governor.h" +#include "concurrent_monitor.h" +#include "oneapi/tbb/detail/_waitable_atomic.h" + +#include <type_traits> + +namespace tbb { +namespace detail { +namespace r1 { + +struct address_context { + address_context() = default; + + address_context(void* address, std::uintptr_t context) : + my_address(address), my_context(context) + {} + + void* my_address{nullptr}; + std::uintptr_t my_context{0}; +}; + +class address_waiter : public concurrent_monitor_base<address_context> { + using base_type = concurrent_monitor_base<address_context>; +public: + using base_type::base_type; + /** per-thread descriptor for concurrent_monitor */ + using thread_context = sleep_node<address_context>; +}; + +// 1024 is a rough estimate based on two assumptions: +// 1) there are no more than 1000 threads in the application; +// 2) the mutexes are optimized for short critical sections less than a couple of microseconds, +// which is less than 1/1000 of a time slice. +// In the worst case, we have single mutex that is locked and its thread is preempted. +// Therefore, the probability of a collision while taking unrelated mutex is about 1/size of a table. +static constexpr std::size_t num_address_waiters = 2 << 10; +static_assert(std::is_standard_layout<address_waiter>::value, + "address_waiter must be with standard layout"); +static address_waiter address_waiter_table[num_address_waiters]; + +void clear_address_waiter_table() { + for (std::size_t i = 0; i < num_address_waiters; ++i) { + address_waiter_table[i].destroy(); + } +} + +static address_waiter& get_address_waiter(void* address) { + std::uintptr_t tag = std::uintptr_t(address); + return address_waiter_table[((tag >> 5) ^ tag) % num_address_waiters]; +} + +void wait_on_address(void* address, d1::delegate_base& predicate, std::uintptr_t context) { + address_waiter& waiter = get_address_waiter(address); + waiter.wait<address_waiter::thread_context>(predicate, address_context{address, context}); +} + +void notify_by_address(void* address, std::uintptr_t target_context) { + address_waiter& waiter = get_address_waiter(address); + + auto predicate = [address, target_context] (address_context ctx) { + return ctx.my_address == address && ctx.my_context == target_context; + }; + + waiter.notify_relaxed(predicate); +} + +void notify_by_address_one(void* address) { + address_waiter& waiter = get_address_waiter(address); + + auto predicate = [address] (address_context ctx) { + return ctx.my_address == address; + }; + + waiter.notify_one_relaxed(predicate); +} + +void notify_by_address_all(void* address) { + address_waiter& waiter = get_address_waiter(address); + + auto predicate = [address] (address_context ctx) { + return ctx.my_address == address; + }; + + waiter.notify_relaxed(predicate); +} + +} // namespace r1 +} // namespace detail +} // namespace tbb diff --git a/src/tbb/src/tbb/allocator.cpp b/src/tbb/src/tbb/allocator.cpp new file mode 100644 index 000000000..689c51255 --- /dev/null +++ b/src/tbb/src/tbb/allocator.cpp @@ -0,0 +1,288 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "oneapi/tbb/version.h" + +#include "oneapi/tbb/detail/_exception.h" +#include "oneapi/tbb/detail/_assert.h" +#include "oneapi/tbb/detail/_utils.h" +#include "oneapi/tbb/tbb_allocator.h" // Is this OK? +#include "oneapi/tbb/cache_aligned_allocator.h" + +#include "dynamic_link.h" +#include "misc.h" + +#include <cstdlib> + +#ifdef _WIN32 +#include <windows.h> +#else +#include <dlfcn.h> +#endif + +#if (!defined(_WIN32) && !defined(_WIN64)) || defined(__CYGWIN__) +#include <stdlib.h> // posix_memalign, free +// With glibc, uClibc and musl on Linux and bionic on Android it is safe to use memalign(), as the allocated memory +// can be freed with free(). It is also better to use memalign() since posix_memalign() is just a wrapper on top of +// memalign() and it offers nothing but overhead due to inconvenient interface. This is likely the case with other +// standard libraries as well, and more libraries can be added to the preprocessor check below. Unfortunately, we +// can't detect musl, so we simply enable memalign() on Linux and Android in general. +#if defined(linux) || defined(__linux) || defined(__linux__) || defined(__ANDROID__) +#include <malloc.h> // memalign +#define __TBB_USE_MEMALIGN +#else +#define __TBB_USE_POSIX_MEMALIGN +#endif +#elif defined(_MSC_VER) || defined(__MINGW32__) +#include <malloc.h> // _aligned_malloc, _aligned_free +#define __TBB_USE_MSVC_ALIGNED_MALLOC +#endif + +#if __TBB_WEAK_SYMBOLS_PRESENT + +#pragma weak scalable_malloc +#pragma weak scalable_free +#pragma weak scalable_aligned_malloc +#pragma weak scalable_aligned_free + +extern "C" { + void* scalable_malloc(std::size_t); + void scalable_free(void*); + void* scalable_aligned_malloc(std::size_t, std::size_t); + void scalable_aligned_free(void*); +} + +#endif /* __TBB_WEAK_SYMBOLS_PRESENT */ + +namespace tbb { +namespace detail { +namespace r1 { + +//! Initialization routine used for first indirect call via allocate_handler. +static void* initialize_allocate_handler(std::size_t size); + +//! Handler for memory allocation +using allocate_handler_type = void* (*)(std::size_t size); +static std::atomic<allocate_handler_type> allocate_handler{ &initialize_allocate_handler }; +allocate_handler_type allocate_handler_unsafe = nullptr; + +//! Handler for memory deallocation +static void (*deallocate_handler)(void* pointer) = nullptr; + +//! Initialization routine used for first indirect call via cache_aligned_allocate_handler. +static void* initialize_cache_aligned_allocate_handler(std::size_t n, std::size_t alignment); + +//! Allocates overaligned memory using standard memory allocator. It is used when scalable_allocator is not available. +static void* std_cache_aligned_allocate(std::size_t n, std::size_t alignment); + +//! Deallocates overaligned memory using standard memory allocator. It is used when scalable_allocator is not available. +static void std_cache_aligned_deallocate(void* p); + +//! Handler for padded memory allocation +using cache_aligned_allocate_handler_type = void* (*)(std::size_t n, std::size_t alignment); +static std::atomic<cache_aligned_allocate_handler_type> cache_aligned_allocate_handler{ &initialize_cache_aligned_allocate_handler }; +cache_aligned_allocate_handler_type cache_aligned_allocate_handler_unsafe = nullptr; + +//! Handler for padded memory deallocation +static void (*cache_aligned_deallocate_handler)(void* p) = nullptr; + +//! Table describing how to link the handlers. +static const dynamic_link_descriptor MallocLinkTable[] = { + DLD(scalable_malloc, allocate_handler_unsafe), + DLD(scalable_free, deallocate_handler), + DLD(scalable_aligned_malloc, cache_aligned_allocate_handler_unsafe), + DLD(scalable_aligned_free, cache_aligned_deallocate_handler), +}; + + +#if TBB_USE_DEBUG +#define DEBUG_SUFFIX "_debug" +#else +#define DEBUG_SUFFIX +#endif /* TBB_USE_DEBUG */ + +// MALLOCLIB_NAME is the name of the oneTBB memory allocator library. +#if _WIN32||_WIN64 +#define MALLOCLIB_NAME "tbbmalloc" DEBUG_SUFFIX ".dll" +#elif __APPLE__ +#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX ".2.dylib" +#elif __FreeBSD__ || __NetBSD__ || __OpenBSD__ || __sun || _AIX || __ANDROID__ +#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX ".so" +#elif __unix__ // Note that order of these #elif's is important! +#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX ".so.2" +#else +#error Unknown OS +#endif + +//! Initialize the allocation/free handler pointers. +/** Caller is responsible for ensuring this routine is called exactly once. + The routine attempts to dynamically link with the TBB memory allocator. + If that allocator is not found, it links to malloc and free. */ +void initialize_handler_pointers() { + __TBB_ASSERT(allocate_handler == &initialize_allocate_handler, nullptr); + bool success = dynamic_link(MALLOCLIB_NAME, MallocLinkTable, 4); + if(!success) { + // If unsuccessful, set the handlers to the default routines. + // This must be done now, and not before FillDynamicLinks runs, because if other + // threads call the handlers, we want them to go through the DoOneTimeInitializations logic, + // which forces them to wait. + allocate_handler_unsafe = &std::malloc; + deallocate_handler = &std::free; + cache_aligned_allocate_handler_unsafe = &std_cache_aligned_allocate; + cache_aligned_deallocate_handler = &std_cache_aligned_deallocate; + } + + allocate_handler.store(allocate_handler_unsafe, std::memory_order_release); + cache_aligned_allocate_handler.store(cache_aligned_allocate_handler_unsafe, std::memory_order_release); + + PrintExtraVersionInfo( "ALLOCATOR", success?"scalable_malloc":"malloc" ); +} + +static std::once_flag initialization_state; +void initialize_cache_aligned_allocator() { + std::call_once(initialization_state, &initialize_handler_pointers); +} + +//! Executed on very first call through allocate_handler +/** Only one of initialize_allocate_handler() and initialize_cache_aligned_allocate_handler() + is called, since each one of them also initializes the other. + + In the current implementation of oneTBB library initialization, cache_aligned_allocate() is + used, which in turn calls initialize_cache_aligned_allocate_handler(). As mentioned above, + that also initializes the regular allocate_handler. + + Therefore, initialize_allocate_handler() is not called in the current library implementation. */ +static void* initialize_allocate_handler(std::size_t size) { + initialize_cache_aligned_allocator(); + __TBB_ASSERT(allocate_handler != &initialize_allocate_handler, nullptr); + return (*allocate_handler)(size); +} + +//! Executed on very first call through cache_aligned_allocate_handler +static void* initialize_cache_aligned_allocate_handler(std::size_t bytes, std::size_t alignment) { + initialize_cache_aligned_allocator(); + __TBB_ASSERT(cache_aligned_allocate_handler != &initialize_cache_aligned_allocate_handler, nullptr); + return (*cache_aligned_allocate_handler)(bytes, alignment); +} + +// TODO: use CPUID to find actual line size, though consider backward compatibility +// nfs - no false sharing +static constexpr std::size_t nfs_size = 128; + +std::size_t __TBB_EXPORTED_FUNC cache_line_size() { + return nfs_size; +} + +void* __TBB_EXPORTED_FUNC cache_aligned_allocate(std::size_t size) { + const std::size_t cache_line_size = nfs_size; + __TBB_ASSERT(is_power_of_two(cache_line_size), "must be power of two"); + + // Check for overflow + if (size + cache_line_size < size) { + throw_exception(exception_id::bad_alloc); + } + // scalable_aligned_malloc considers zero size request an error, and returns nullptr + if (size == 0) size = 1; + + void* result = cache_aligned_allocate_handler.load(std::memory_order_acquire)(size, cache_line_size); + if (!result) { + throw_exception(exception_id::bad_alloc); + } + __TBB_ASSERT(is_aligned(result, cache_line_size), "The returned address isn't aligned"); + return result; +} + +void __TBB_EXPORTED_FUNC cache_aligned_deallocate(void* p) { + __TBB_ASSERT(cache_aligned_deallocate_handler, "Initialization has not been yet."); + (*cache_aligned_deallocate_handler)(p); +} + +static void* std_cache_aligned_allocate(std::size_t bytes, std::size_t alignment) { +#if defined(__TBB_USE_MEMALIGN) + return memalign(alignment, bytes); +#elif defined(__TBB_USE_POSIX_MEMALIGN) + void* p = nullptr; + int res = posix_memalign(&p, alignment, bytes); + if (res != 0) + p = nullptr; + return p; +#elif defined(__TBB_USE_MSVC_ALIGNED_MALLOC) + return _aligned_malloc(bytes, alignment); +#else + // TODO: make it common with cache_aligned_resource + std::size_t space = alignment + bytes; + std::uintptr_t base = reinterpret_cast<std::uintptr_t>(std::malloc(space)); + if (!base) { + return nullptr; + } + std::uintptr_t result = (base + nfs_size) & ~(nfs_size - 1); + // Round up to the next cache line (align the base address) + __TBB_ASSERT((result - base) >= sizeof(std::uintptr_t), "Cannot store a base pointer to the header"); + __TBB_ASSERT(space - (result - base) >= bytes, "Not enough space for the storage"); + + // Record where block actually starts. + (reinterpret_cast<std::uintptr_t*>(result))[-1] = base; + return reinterpret_cast<void*>(result); +#endif +} + +static void std_cache_aligned_deallocate(void* p) { +#if defined(__TBB_USE_MEMALIGN) || defined(__TBB_USE_POSIX_MEMALIGN) + free(p); +#elif defined(__TBB_USE_MSVC_ALIGNED_MALLOC) + _aligned_free(p); +#else + if (p) { + __TBB_ASSERT(reinterpret_cast<std::uintptr_t>(p) >= 0x4096, "attempt to free block not obtained from cache_aligned_allocator"); + // Recover where block actually starts + std::uintptr_t base = (reinterpret_cast<std::uintptr_t*>(p))[-1]; + __TBB_ASSERT(((base + nfs_size) & ~(nfs_size - 1)) == reinterpret_cast<std::uintptr_t>(p), "Incorrect alignment or not allocated by std_cache_aligned_deallocate?"); + std::free(reinterpret_cast<void*>(base)); + } +#endif +} + +void* __TBB_EXPORTED_FUNC allocate_memory(std::size_t size) { + void* result = allocate_handler.load(std::memory_order_acquire)(size); + if (!result) { + throw_exception(exception_id::bad_alloc); + } + return result; +} + +void __TBB_EXPORTED_FUNC deallocate_memory(void* p) { + if (p) { + __TBB_ASSERT(deallocate_handler, "Initialization has not been yet."); + (*deallocate_handler)(p); + } +} + +bool __TBB_EXPORTED_FUNC is_tbbmalloc_used() { + auto handler_snapshot = allocate_handler.load(std::memory_order_acquire); + if (handler_snapshot == &initialize_allocate_handler) { + initialize_cache_aligned_allocator(); + } + handler_snapshot = allocate_handler.load(std::memory_order_relaxed); + __TBB_ASSERT(handler_snapshot != &initialize_allocate_handler && deallocate_handler != nullptr, nullptr); + // Cast to void avoids type mismatch errors on some compilers (e.g. __IBMCPP__) + __TBB_ASSERT((reinterpret_cast<void*>(handler_snapshot) == reinterpret_cast<void*>(&std::malloc)) == (reinterpret_cast<void*>(deallocate_handler) == reinterpret_cast<void*>(&std::free)), + "Both shim pointers must refer to routines from the same package (either TBB or CRT)"); + return reinterpret_cast<void*>(handler_snapshot) == reinterpret_cast<void*>(&std::malloc); +} + +} // namespace r1 +} // namespace detail +} // namespace tbb diff --git a/src/tbb/src/tbb/arena.cpp b/src/tbb/src/tbb/arena.cpp index 47349ced8..6ca062d02 100644 --- a/src/tbb/src/tbb/arena.cpp +++ b/src/tbb/src/tbb/arena.cpp @@ -1,867 +1,899 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ -#include "scheduler.h" +#include "task_dispatcher.h" #include "governor.h" +#include "threading_control.h" #include "arena.h" #include "itt_notify.h" #include "semaphore.h" +#include "waiters.h" +#include "oneapi/tbb/detail/_task.h" +#include "oneapi/tbb/info.h" +#include "oneapi/tbb/tbb_allocator.h" +#include <atomic> +#include <cstring> #include <functional> -#if __TBB_STATISTICS_STDOUT -#include <cstdio> -#endif - namespace tbb { -namespace internal { +namespace detail { +namespace r1 { -void arena::process( generic_scheduler& s ) { - __TBB_ASSERT( is_alive(my_guard), NULL ); - __TBB_ASSERT( governor::is_set(&s), NULL ); - __TBB_ASSERT( !s.my_innermost_running_task, NULL ); - __TBB_ASSERT( !s.my_dispatching_task, NULL ); +#if __TBB_ARENA_BINDING +class numa_binding_observer : public tbb::task_scheduler_observer { + binding_handler* my_binding_handler; +public: + numa_binding_observer( d1::task_arena* ta, int num_slots, int numa_id, core_type_id core_type, int max_threads_per_core ) + : task_scheduler_observer(*ta) + , my_binding_handler(construct_binding_handler(num_slots, numa_id, core_type, max_threads_per_core)) + {} - __TBB_ASSERT( my_num_slots != 1, NULL ); - // Start search for an empty slot from the one we occupied the last time - unsigned index = s.my_arena_index < my_num_slots ? s.my_arena_index : s.my_random.get() % (my_num_slots - 1) + 1, - end = index; - __TBB_ASSERT( index != 0, "A worker cannot occupy slot 0" ); - __TBB_ASSERT( index < my_num_slots, NULL ); - - // Find a vacant slot - for ( ;; ) { - if ( !my_slots[index].my_scheduler && as_atomic(my_slots[index].my_scheduler).compare_and_swap(&s, NULL ) == NULL ) - break; - if ( ++index == my_num_slots ) - index = 1; - if ( index == end ) { - // Likely this arena is already saturated - goto quit; - } + void on_scheduler_entry( bool ) override { + apply_affinity_mask(my_binding_handler, this_task_arena::current_thread_index()); } - ITT_NOTIFY(sync_acquired, my_slots + index); - s.my_arena = this; - s.my_arena_index = index; - s.my_arena_slot = my_slots + index; -#if __TBB_TASK_PRIORITY - s.my_local_reload_epoch = *s.my_ref_reload_epoch; - __TBB_ASSERT( !s.my_offloaded_tasks, NULL ); -#endif /* __TBB_TASK_PRIORITY */ - s.attach_mailbox( affinity_id(index+1) ); - s.my_arena_slot->hint_for_pop = index; // initial value for round-robin + void on_scheduler_exit( bool ) override { + restore_affinity_mask(my_binding_handler, this_task_arena::current_thread_index()); + } -#if !__TBB_FP_CONTEXT - my_cpu_ctl_env.set_env(); -#endif + ~numa_binding_observer() override{ + destroy_binding_handler(my_binding_handler); + } +}; -#if __TBB_SCHEDULER_OBSERVER - __TBB_ASSERT( !s.my_last_local_observer, "There cannot be notified local observers when entering arena" ); - my_observers.notify_entry_observers( s.my_last_local_observer, /*worker=*/true ); -#endif /* __TBB_SCHEDULER_OBSERVER */ - - atomic_update( my_limit, index + 1, std::less<unsigned>() ); - - for ( ;; ) { - // Try to steal a task. - // Passing reference count is technically unnecessary in this context, - // but omitting it here would add checks inside the function. - __TBB_ASSERT( is_alive(my_guard), NULL ); - task* t = s.receive_or_steal_task( s.my_dummy_task->prefix().ref_count, /*return_if_no_work=*/true ); - if (t) { - // A side effect of receive_or_steal_task is that my_innermost_running_task can be set. - // But for the outermost dispatch loop of a worker it has to be NULL. - s.my_innermost_running_task = NULL; - __TBB_ASSERT( !s.my_dispatching_task, NULL ); - s.local_wait_for_all(*s.my_dummy_task,t); +numa_binding_observer* construct_binding_observer( d1::task_arena* ta, int num_slots, int numa_id, core_type_id core_type, int max_threads_per_core ) { + numa_binding_observer* binding_observer = nullptr; + if ((core_type >= 0 && core_type_count() > 1) || (numa_id >= 0 && numa_node_count() > 1) || max_threads_per_core > 0) { + binding_observer = new(allocate_memory(sizeof(numa_binding_observer))) numa_binding_observer(ta, num_slots, numa_id, core_type, max_threads_per_core); + __TBB_ASSERT(binding_observer, "Failure during NUMA binding observer allocation and construction"); + } + return binding_observer; +} + +void destroy_binding_observer( numa_binding_observer* binding_observer ) { + __TBB_ASSERT(binding_observer, "Trying to deallocate nullptr pointer"); + binding_observer->observe(false); + binding_observer->~numa_binding_observer(); + deallocate_memory(binding_observer); +} +#endif /*!__TBB_ARENA_BINDING*/ + +void arena::on_thread_leaving(unsigned ref_param) { + // + // Implementation of arena destruction synchronization logic contained various + // bugs/flaws at the different stages of its evolution, so below is a detailed + // description of the issues taken into consideration in the framework of the + // current design. + // + // In case of using fire-and-forget tasks (scheduled via task::enqueue()) + // external thread is allowed to leave its arena before all its work is executed, + // and market may temporarily revoke all workers from this arena. Since revoked + // workers never attempt to reset arena state to EMPTY and cancel its request + // to RML for threads, the arena object is destroyed only when both the last + // thread is leaving it and arena's state is EMPTY (that is its external thread + // left and it does not contain any work). + // Thus resetting arena to EMPTY state (as earlier TBB versions did) should not + // be done here (or anywhere else in the external thread to that matter); doing so + // can result either in arena's premature destruction (at least without + // additional costly checks in workers) or in unnecessary arena state changes + // (and ensuing workers migration). + // + // A worker that checks for work presence and transitions arena to the EMPTY + // state (in snapshot taking procedure arena::out_of_work()) updates + // arena::my_pool_state first and only then arena::my_num_workers_requested. + // So the check for work absence must be done against the latter field. + // + // In a time window between decrementing the active threads count and checking + // if there is an outstanding request for workers. New worker thread may arrive, + // finish remaining work, set arena state to empty, and leave decrementing its + // refcount and destroying. Then the current thread will destroy the arena + // the second time. To preclude it a local copy of the outstanding request + // value can be stored before decrementing active threads count. + // + // But this technique may cause two other problem. When the stored request is + // zero, it is possible that arena still has threads and they can generate new + // tasks and thus re-establish non-zero requests. Then all the threads can be + // revoked (as described above) leaving this thread the last one, and causing + // it to destroy non-empty arena. + // + // The other problem takes place when the stored request is non-zero. Another + // thread may complete the work, set arena state to empty, and leave without + // arena destruction before this thread decrements the refcount. This thread + // cannot destroy the arena either. Thus the arena may be "orphaned". + // + // In both cases we cannot dereference arena pointer after the refcount is + // decremented, as our arena may already be destroyed. + // + // If this is the external thread, the market is protected by refcount to it. + // In case of workers market's liveness is ensured by the RML connection + // rundown protocol, according to which the client (i.e. the market) lives + // until RML server notifies it about connection termination, and this + // notification is fired only after all workers return into RML. + // + // Thus if we decremented refcount to zero we ask the market to check arena + // state (including the fact if it is alive) under the lock. + // + + __TBB_ASSERT(my_references.load(std::memory_order_relaxed) >= ref_param, "broken arena reference counter"); + + // When there is no workers someone must free arena, as + // without workers, no one calls out_of_work(). + if (ref_param == ref_external && !my_mandatory_concurrency.test()) { + out_of_work(); + } + + threading_control* tc = my_threading_control; + auto tc_client_snapshot = tc->prepare_client_destruction(my_tc_client); + // Release our reference to sync with destroy_client + unsigned remaining_ref = my_references.fetch_sub(ref_param, std::memory_order_release) - ref_param; + // do not access `this` it might be destroyed already + if (remaining_ref == 0) { + if (tc->try_destroy_client(tc_client_snapshot)) { + // We are requested to destroy ourself + free_arena(); } - __TBB_ASSERT ( __TBB_load_relaxed(s.my_arena_slot->head) == __TBB_load_relaxed(s.my_arena_slot->tail), - "Worker cannot leave arena while its task pool is not empty" ); - __TBB_ASSERT( s.my_arena_slot->task_pool == EmptyTaskPool, "Empty task pool is not marked appropriately" ); - // This check prevents relinquishing more than necessary workers because - // of the non-atomicity of the decision making procedure - if (num_workers_active() > my_num_workers_allotted) - break; - } -#if __TBB_SCHEDULER_OBSERVER - my_observers.notify_exit_observers( s.my_last_local_observer, /*worker=*/true ); - s.my_last_local_observer = NULL; -#endif /* __TBB_SCHEDULER_OBSERVER */ -#if __TBB_TASK_PRIORITY - if ( s.my_offloaded_tasks ) - orphan_offloaded_tasks( s ); -#endif /* __TBB_TASK_PRIORITY */ -#if __TBB_STATISTICS - ++s.my_counters.arena_roundtrips; - *my_slots[index].my_counters += s.my_counters; - s.my_counters.reset(); -#endif /* __TBB_STATISTICS */ - __TBB_store_with_release( my_slots[index].my_scheduler, (generic_scheduler*)NULL ); - s.my_arena_slot = 0; // detached from slot - s.my_inbox.detach(); - __TBB_ASSERT( s.my_inbox.is_idle_state(true), NULL ); - __TBB_ASSERT( !s.my_innermost_running_task, NULL ); - __TBB_ASSERT( !s.my_dispatching_task, NULL ); - __TBB_ASSERT( is_alive(my_guard), NULL ); -quit: + } +} + +std::size_t arena::occupy_free_slot_in_range( thread_data& tls, std::size_t lower, std::size_t upper ) { + if ( lower >= upper ) return out_of_arena; + // Start search for an empty slot from the one we occupied the last time + std::size_t index = tls.my_arena_index; + if ( index < lower || index >= upper ) index = tls.my_random.get() % (upper - lower) + lower; + __TBB_ASSERT( index >= lower && index < upper, nullptr); + // Find a free slot + for ( std::size_t i = index; i < upper; ++i ) + if (my_slots[i].try_occupy()) return i; + for ( std::size_t i = lower; i < index; ++i ) + if (my_slots[i].try_occupy()) return i; + return out_of_arena; +} + +template <bool as_worker> +std::size_t arena::occupy_free_slot(thread_data& tls) { + // Firstly, external threads try to occupy reserved slots + std::size_t index = as_worker ? out_of_arena : occupy_free_slot_in_range( tls, 0, my_num_reserved_slots ); + if ( index == out_of_arena ) { + // Secondly, all threads try to occupy all non-reserved slots + index = occupy_free_slot_in_range(tls, my_num_reserved_slots, my_num_slots ); + // Likely this arena is already saturated + if ( index == out_of_arena ) + return out_of_arena; + } + + atomic_update( my_limit, (unsigned)(index + 1), std::less<unsigned>() ); + return index; +} + +std::uintptr_t arena::calculate_stealing_threshold() { + stack_anchor_type anchor; + return r1::calculate_stealing_threshold(reinterpret_cast<std::uintptr_t>(&anchor), my_threading_control->worker_stack_size()); +} + +void arena::process(thread_data& tls) { + governor::set_thread_data(tls); // TODO: consider moving to create_one_job. + __TBB_ASSERT( is_alive(my_guard), nullptr); + __TBB_ASSERT( my_num_slots >= 1, nullptr); + + std::size_t index = occupy_free_slot</*as_worker*/true>(tls); + if (index == out_of_arena) { + on_thread_leaving(ref_worker); + return; + } + + __TBB_ASSERT( index >= my_num_reserved_slots, "Workers cannot occupy reserved slots" ); + tls.attach_arena(*this, index); + // worker thread enters the dispatch loop to look for a work + tls.my_inbox.set_is_idle(true); + if (tls.my_arena_slot->is_task_pool_published()) { + tls.my_inbox.set_is_idle(false); + } + + task_dispatcher& task_disp = tls.my_arena_slot->default_task_dispatcher(); + tls.enter_task_dispatcher(task_disp, calculate_stealing_threshold()); + __TBB_ASSERT(task_disp.can_steal(), nullptr); + + __TBB_ASSERT( !tls.my_last_observer, "There cannot be notified local observers when entering arena" ); + my_observers.notify_entry_observers(tls.my_last_observer, tls.my_is_worker); + + // Waiting on special object tied to this arena + outermost_worker_waiter waiter(*this); + d1::task* t = tls.my_task_dispatcher->local_wait_for_all(nullptr, waiter); + // For purposes of affinity support, the slot's mailbox is considered idle while no thread is + // attached to it. + tls.my_inbox.set_is_idle(true); + + __TBB_ASSERT_EX(t == nullptr, "Outermost worker must not leave dispatch loop with a task"); + __TBB_ASSERT(governor::is_thread_data_set(&tls), nullptr); + __TBB_ASSERT(tls.my_task_dispatcher == &task_disp, nullptr); + + my_observers.notify_exit_observers(tls.my_last_observer, tls.my_is_worker); + tls.my_last_observer = nullptr; + + tls.leave_task_dispatcher(); + + // Arena slot detach (arena may be used in market::process) + // TODO: Consider moving several calls below into a new method(e.g.detach_arena). + tls.my_arena_slot->release(); + tls.my_arena_slot = nullptr; + tls.my_inbox.detach(); + __TBB_ASSERT(tls.my_inbox.is_idle_state(true), nullptr); + __TBB_ASSERT(is_alive(my_guard), nullptr); + // In contrast to earlier versions of TBB (before 3.0 U5) now it is possible // that arena may be temporarily left unpopulated by threads. See comments in // arena::on_thread_leaving() for more details. -#if !__TBB_TRACK_PRIORITY_LEVEL_SATURATION - on_thread_leaving</*is_master*/false>(); -#endif /* !__TBB_TRACK_PRIORITY_LEVEL_SATURATION */ + on_thread_leaving(ref_worker); + __TBB_ASSERT(tls.my_arena == this, "my_arena is used as a hint when searching the arena to join"); } -arena::arena ( market& m, unsigned max_num_workers ) { +arena::arena(threading_control* control, unsigned num_slots, unsigned num_reserved_slots, unsigned priority_level) { __TBB_ASSERT( !my_guard, "improperly allocated arena?" ); - __TBB_ASSERT( sizeof(my_slots[0]) % NFS_GetLineSize()==0, "arena::slot size not multiple of cache line size" ); - __TBB_ASSERT( (uintptr_t)this % NFS_GetLineSize()==0, "arena misaligned" ); -#if __TBB_TASK_PRIORITY - __TBB_ASSERT( !my_reload_epoch && !my_orphaned_tasks && !my_skipped_fifo_priority, "New arena object is not zeroed" ); -#endif /* __TBB_TASK_PRIORITY */ - my_market = &m; + __TBB_ASSERT( sizeof(my_slots[0]) % cache_line_size()==0, "arena::slot size not multiple of cache line size" ); + __TBB_ASSERT( is_aligned(this, cache_line_size()), "arena misaligned" ); + my_threading_control = control; my_limit = 1; - // Two slots are mandatory: for the master, and for 1 worker (required to support starvation resistant tasks). - my_num_slots = num_slots_to_reserve(max_num_workers); - my_max_num_workers = max_num_workers; - my_references = 1; // accounts for the master -#if __TBB_TASK_PRIORITY - my_bottom_priority = my_top_priority = normalized_normal_priority; -#endif /* __TBB_TASK_PRIORITY */ - my_aba_epoch = m.my_arenas_aba_epoch; -#if __TBB_SCHEDULER_OBSERVER + // Two slots are mandatory: for the external thread, and for 1 worker (required to support starvation resistant tasks). + my_num_slots = num_arena_slots(num_slots, num_reserved_slots); + my_num_reserved_slots = num_reserved_slots; + my_max_num_workers = num_slots-num_reserved_slots; + my_priority_level = priority_level; + my_references = ref_external; // accounts for the external thread my_observers.my_arena = this; -#endif /* __TBB_SCHEDULER_OBSERVER */ - __TBB_ASSERT ( my_max_num_workers < my_num_slots, NULL ); + my_co_cache.init(4 * num_slots); + __TBB_ASSERT ( my_max_num_workers <= my_num_slots, nullptr); + // Initialize the default context. It should be allocated before task_dispatch construction. + my_default_ctx = new (cache_aligned_allocate(sizeof(d1::task_group_context))) + d1::task_group_context{ d1::task_group_context::isolated, d1::task_group_context::fp_settings }; // Construct slots. Mark internal synchronization elements for the tools. + task_dispatcher* base_td_pointer = reinterpret_cast<task_dispatcher*>(my_slots + my_num_slots); for( unsigned i = 0; i < my_num_slots; ++i ) { - __TBB_ASSERT( !my_slots[i].my_scheduler && !my_slots[i].task_pool, NULL ); - __TBB_ASSERT( !my_slots[i].task_pool_ptr, NULL ); - __TBB_ASSERT( !my_slots[i].my_task_pool_size, NULL ); - ITT_SYNC_CREATE(my_slots + i, SyncType_Scheduler, SyncObj_WorkerTaskPool); - mailbox(i+1).construct(); - ITT_SYNC_CREATE(&mailbox(i+1), SyncType_Scheduler, SyncObj_Mailbox); - my_slots[i].hint_for_pop = i; -#if __TBB_STATISTICS - my_slots[i].my_counters = new ( NFS_Allocate(1, sizeof(statistics_counters), NULL) ) statistics_counters; -#endif /* __TBB_STATISTICS */ - } -#if __TBB_TASK_PRIORITY - for ( intptr_t i = 0; i < num_priority_levels; ++i ) { - my_task_stream[i].initialize(my_num_slots); - ITT_SYNC_CREATE(my_task_stream + i, SyncType_Scheduler, SyncObj_TaskStream); - } -#else /* !__TBB_TASK_PRIORITY */ - my_task_stream.initialize(my_num_slots); - ITT_SYNC_CREATE(&my_task_stream, SyncType_Scheduler, SyncObj_TaskStream); -#endif /* !__TBB_TASK_PRIORITY */ - my_mandatory_concurrency = false; -#if __TBB_TASK_GROUP_CONTEXT - // Context to be used by root tasks by default (if the user has not specified one). - // The arena's context should not capture fp settings for the sake of backward compatibility. - my_default_ctx = - new ( NFS_Allocate(1, sizeof(task_group_context), NULL) ) task_group_context(task_group_context::isolated, task_group_context::default_traits); -#endif /* __TBB_TASK_GROUP_CONTEXT */ -#if __TBB_FP_CONTEXT - my_default_ctx->capture_fp_settings(); -#else - my_cpu_ctl_env.get_env(); + // __TBB_ASSERT( !my_slots[i].my_scheduler && !my_slots[i].task_pool, nullptr); + __TBB_ASSERT( !my_slots[i].task_pool_ptr, nullptr); + __TBB_ASSERT( !my_slots[i].my_task_pool_size, nullptr); + mailbox(i).construct(); + my_slots[i].init_task_streams(i); + my_slots[i].my_default_task_dispatcher = new(base_td_pointer + i) task_dispatcher(this); + my_slots[i].my_is_occupied.store(false, std::memory_order_relaxed); + } + my_fifo_task_stream.initialize(my_num_slots); + my_resume_task_stream.initialize(my_num_slots); +#if __TBB_PREVIEW_CRITICAL_TASKS + my_critical_task_stream.initialize(my_num_slots); #endif + my_mandatory_requests = 0; } -arena& arena::allocate_arena( market& m, unsigned max_num_workers ) { +arena& arena::allocate_arena(threading_control* control, unsigned num_slots, unsigned num_reserved_slots, + unsigned priority_level) +{ __TBB_ASSERT( sizeof(base_type) + sizeof(arena_slot) == sizeof(arena), "All arena data fields must go to arena_base" ); - __TBB_ASSERT( sizeof(base_type) % NFS_GetLineSize() == 0, "arena slots area misaligned: wrong padding" ); - __TBB_ASSERT( sizeof(mail_outbox) == NFS_MaxLineSize, "Mailbox padding is wrong" ); - size_t n = allocation_size(max_num_workers); - unsigned char* storage = (unsigned char*)NFS_Allocate( 1, n, NULL ); + __TBB_ASSERT( sizeof(base_type) % cache_line_size() == 0, "arena slots area misaligned: wrong padding" ); + __TBB_ASSERT( sizeof(mail_outbox) == max_nfs_size, "Mailbox padding is wrong" ); + std::size_t n = allocation_size(num_arena_slots(num_slots, num_reserved_slots)); + unsigned char* storage = (unsigned char*)cache_aligned_allocate(n); // Zero all slots to indicate that they are empty - memset( storage, 0, n ); - return *new( storage + num_slots_to_reserve(max_num_workers) * sizeof(mail_outbox) ) arena(m, max_num_workers); + std::memset( storage, 0, n ); + + return *new( storage + num_arena_slots(num_slots, num_reserved_slots) * sizeof(mail_outbox) ) + arena(control, num_slots, num_reserved_slots, priority_level); } void arena::free_arena () { - __TBB_ASSERT( is_alive(my_guard), NULL ); - __TBB_ASSERT( !my_references, "There are threads in the dying arena" ); - __TBB_ASSERT( !my_num_workers_requested && !my_num_workers_allotted, "Dying arena requests workers" ); - __TBB_ASSERT( my_pool_state == SNAPSHOT_EMPTY || !my_max_num_workers, "Inconsistent state of a dying arena" ); -#if !__TBB_STATISTICS_EARLY_DUMP - GATHER_STATISTIC( dump_arena_statistics() ); -#endif + __TBB_ASSERT( is_alive(my_guard), nullptr); + __TBB_ASSERT( !my_references.load(std::memory_order_relaxed), "There are threads in the dying arena" ); + __TBB_ASSERT( !my_total_num_workers_requested && !my_num_workers_allotted, "Dying arena requests workers" ); + __TBB_ASSERT( is_empty(), "Inconsistent state of a dying arena" ); +#if __TBB_ARENA_BINDING + if (my_numa_binding_observer != nullptr) { + destroy_binding_observer(my_numa_binding_observer); + my_numa_binding_observer = nullptr; + } +#endif /*__TBB_ARENA_BINDING*/ poison_value( my_guard ); - intptr_t drained = 0; for ( unsigned i = 0; i < my_num_slots; ++i ) { - __TBB_ASSERT( !my_slots[i].my_scheduler, "arena slot is not empty" ); -#if !__TBB_TASK_ARENA - __TBB_ASSERT( my_slots[i].task_pool == EmptyTaskPool, NULL ); -#else - //TODO: understand the assertion and modify -#endif - __TBB_ASSERT( my_slots[i].head == my_slots[i].tail, NULL ); // TODO: replace by is_quiescent_local_task_pool_empty + // __TBB_ASSERT( !my_slots[i].my_scheduler, "arena slot is not empty" ); + // TODO: understand the assertion and modify + // __TBB_ASSERT( my_slots[i].task_pool == EmptyTaskPool, nullptr); + __TBB_ASSERT( my_slots[i].head == my_slots[i].tail, nullptr); // TODO: replace by is_quiescent_local_task_pool_empty my_slots[i].free_task_pool(); -#if __TBB_STATISTICS - NFS_Free( my_slots[i].my_counters ); -#endif /* __TBB_STATISTICS */ - drained += mailbox(i+1).drain(); - } -#if __TBB_TASK_PRIORITY && TBB_USE_ASSERT - for ( intptr_t i = 0; i < num_priority_levels; ++i ) - __TBB_ASSERT(my_task_stream[i].empty() && my_task_stream[i].drain()==0, "Not all enqueued tasks were executed"); -#elif !__TBB_TASK_PRIORITY - __TBB_ASSERT(my_task_stream.empty() && my_task_stream.drain()==0, "Not all enqueued tasks were executed"); -#endif /* !__TBB_TASK_PRIORITY */ -#if __TBB_COUNT_TASK_NODES - my_market->update_task_node_count( -drained ); -#endif /* __TBB_COUNT_TASK_NODES */ - my_market->release(); -#if __TBB_TASK_GROUP_CONTEXT - __TBB_ASSERT( my_default_ctx, "Master thread never entered the arena?" ); + mailbox(i).drain(); + my_slots[i].my_default_task_dispatcher->~task_dispatcher(); + } + __TBB_ASSERT(my_fifo_task_stream.empty(), "Not all enqueued tasks were executed"); + __TBB_ASSERT(my_resume_task_stream.empty(), "Not all enqueued tasks were executed"); + // Cleanup coroutines/schedulers cache + my_co_cache.cleanup(); my_default_ctx->~task_group_context(); - NFS_Free(my_default_ctx); -#endif /* __TBB_TASK_GROUP_CONTEXT */ -#if __TBB_SCHEDULER_OBSERVER - if ( !my_observers.empty() ) - my_observers.clear(); -#endif /* __TBB_SCHEDULER_OBSERVER */ - void* storage = &mailbox(my_num_slots); - __TBB_ASSERT( my_references == 0, NULL ); - __TBB_ASSERT( my_pool_state == SNAPSHOT_EMPTY || !my_max_num_workers, NULL ); + cache_aligned_deallocate(my_default_ctx); +#if __TBB_PREVIEW_CRITICAL_TASKS + __TBB_ASSERT( my_critical_task_stream.empty(), "Not all critical tasks were executed"); +#endif + // Clear enfources synchronization with observe(false) + my_observers.clear(); + + void* storage = &mailbox(my_num_slots-1); + __TBB_ASSERT( my_references.load(std::memory_order_relaxed) == 0, nullptr); this->~arena(); #if TBB_USE_ASSERT > 1 - memset( storage, 0, allocation_size(my_max_num_workers) ); + std::memset( storage, 0, allocation_size(my_num_slots) ); #endif /* TBB_USE_ASSERT */ - NFS_Free( storage ); + cache_aligned_deallocate( storage ); } -#if __TBB_STATISTICS -void arena::dump_arena_statistics () { - statistics_counters total; - for( unsigned i = 0; i < my_num_slots; ++i ) { -#if __TBB_STATISTICS_EARLY_DUMP - generic_scheduler* s = my_slots[i].my_scheduler; - if ( s ) - *my_slots[i].my_counters += s->my_counters; -#else - __TBB_ASSERT( !my_slots[i].my_scheduler, NULL ); +bool arena::has_enqueued_tasks() { + return !my_fifo_task_stream.empty(); +} + +void arena::request_workers(int mandatory_delta, int workers_delta, bool wakeup_threads) { + my_threading_control->adjust_demand(my_tc_client, mandatory_delta, workers_delta); + + if (wakeup_threads) { + // Notify all sleeping threads that work has appeared in the arena. + get_waiting_threads_monitor().notify([&] (market_context context) { + return this == context.my_arena_addr; + }); + } +} + +bool arena::has_tasks() { + // TODO: rework it to return at least a hint about where a task was found; better if the task itself. + std::size_t n = my_limit.load(std::memory_order_acquire); + bool tasks_are_available = false; + for (std::size_t k = 0; k < n && !tasks_are_available; ++k) { + tasks_are_available = !my_slots[k].is_empty(); + } + tasks_are_available = tasks_are_available || has_enqueued_tasks() || !my_resume_task_stream.empty(); +#if __TBB_PREVIEW_CRITICAL_TASKS + tasks_are_available = tasks_are_available || !my_critical_task_stream.empty(); #endif - if ( i != 0 ) { - total += *my_slots[i].my_counters; - dump_statistics( *my_slots[i].my_counters, i ); + return tasks_are_available; +} + +void arena::out_of_work() { + // We should try unset my_pool_state first due to keep arena invariants in consistent state + // Otherwise, we might have my_pool_state = false and my_mandatory_concurrency = true that is broken invariant + bool disable_mandatory = my_mandatory_concurrency.try_clear_if([this] { return !has_enqueued_tasks(); }); + bool release_workers = my_pool_state.try_clear_if([this] { return !has_tasks(); }); + + if (disable_mandatory || release_workers) { + int mandatory_delta = disable_mandatory ? -1 : 0; + int workers_delta = release_workers ? -(int)my_max_num_workers : 0; + + if (disable_mandatory && is_arena_workerless()) { + // We had set workers_delta to 1 when enabled mandatory concurrency, so revert it now + workers_delta = -1; } + request_workers(mandatory_delta, workers_delta); } - dump_statistics( *my_slots[0].my_counters, 0 ); -#if __TBB_STATISTICS_STDOUT -#if !__TBB_STATISTICS_TOTALS_ONLY - printf( "----------------------------------------------\n" ); -#endif - dump_statistics( total, workers_counters_total ); - total += *my_slots[0].my_counters; - dump_statistics( total, arena_counters_total ); -#if !__TBB_STATISTICS_TOTALS_ONLY - printf( "==============================================\n" ); -#endif -#endif /* __TBB_STATISTICS_STDOUT */ -} -#endif /* __TBB_STATISTICS */ - -#if __TBB_TASK_PRIORITY -// The method inspects a scheduler to determine: -// 1. if it has tasks that can be retrieved and executed (via the return value); -// 2. if it has any tasks at all, including those of lower priority (via tasks_present); -// 3. if it is able to work with enqueued tasks (via dequeuing_possible). -inline bool arena::may_have_tasks ( generic_scheduler* s, bool& tasks_present, bool& dequeuing_possible ) { - if ( !s -#if __TBB_TASK_ARENA - || s->my_arena != this -#endif - ) return false; - dequeuing_possible |= s->worker_outermost_level(); - if ( s->my_pool_reshuffling_pending ) { - // This primary task pool is nonempty and may contain tasks at the current - // priority level. Its owner is winnowing lower priority tasks at the moment. - tasks_present = true; +} + +void arena::set_top_priority(bool is_top_priority) { + my_is_top_priority.store(is_top_priority, std::memory_order_relaxed); +} + +bool arena::is_top_priority() const { + return my_is_top_priority.load(std::memory_order_relaxed); +} + +bool arena::try_join() { + if (is_joinable()) { + my_references += arena::ref_worker; return true; } - if ( s->my_offloaded_tasks ) { - tasks_present = true; - if ( s->my_local_reload_epoch < *s->my_ref_reload_epoch ) { - // This scheduler's offload area is nonempty and may contain tasks at the - // current priority level. - return true; - } - } return false; } -void arena::orphan_offloaded_tasks(generic_scheduler& s) { - __TBB_ASSERT( s.my_offloaded_tasks, NULL ); - GATHER_STATISTIC( ++s.my_counters.prio_orphanings ); - ++my_abandonment_epoch; - __TBB_ASSERT( s.my_offloaded_task_list_tail_link && !*s.my_offloaded_task_list_tail_link, NULL ); - task* orphans; - do { - orphans = const_cast<task*>(my_orphaned_tasks); - *s.my_offloaded_task_list_tail_link = orphans; - } while ( as_atomic(my_orphaned_tasks).compare_and_swap(s.my_offloaded_tasks, orphans) != orphans ); - s.my_offloaded_tasks = NULL; -#if TBB_USE_ASSERT - s.my_offloaded_task_list_tail_link = NULL; -#endif /* TBB_USE_ASSERT */ +void arena::set_allotment(unsigned allotment) { + if (my_num_workers_allotted.load(std::memory_order_relaxed) != allotment) { + my_num_workers_allotted.store(allotment, std::memory_order_relaxed); + } } -#endif /* __TBB_TASK_PRIORITY */ -bool arena::is_out_of_work() { - // TODO: rework it to return at least a hint about where a task was found; better if the task itself. - for(;;) { - pool_state_t snapshot = my_pool_state; - switch( snapshot ) { - case SNAPSHOT_EMPTY: - return true; - case SNAPSHOT_FULL: { - // Use unique id for "busy" in order to avoid ABA problems. - const pool_state_t busy = pool_state_t(&busy); - // Request permission to take snapshot - if( my_pool_state.compare_and_swap( busy, SNAPSHOT_FULL )==SNAPSHOT_FULL ) { - // Got permission. Take the snapshot. - // NOTE: This is not a lock, as the state can be set to FULL at - // any moment by a thread that spawns/enqueues new task. - size_t n = my_limit; - // Make local copies of volatile parameters. Their change during - // snapshot taking procedure invalidates the attempt, and returns - // this thread into the dispatch loop. -#if __TBB_TASK_PRIORITY - intptr_t top_priority = my_top_priority; - uintptr_t reload_epoch = my_reload_epoch; - // Inspect primary task pools first -#endif /* __TBB_TASK_PRIORITY */ - size_t k; - for( k=0; k<n; ++k ) { - if( my_slots[k].task_pool != EmptyTaskPool && - __TBB_load_relaxed(my_slots[k].head) < __TBB_load_relaxed(my_slots[k].tail) ) - { - // k-th primary task pool is nonempty and does contain tasks. - break; - } - if( my_pool_state!=busy ) - return false; // the work was published - } - __TBB_ASSERT( k <= n, NULL ); - bool work_absent = k == n; -#if __TBB_TASK_PRIORITY - // Variable tasks_present indicates presence of tasks at any priority - // level, while work_absent refers only to the current priority. - bool tasks_present = !work_absent || my_orphaned_tasks; - bool dequeuing_possible = false; - if ( work_absent ) { - // Check for the possibility that recent priority changes - // brought some tasks to the current priority level - - uintptr_t abandonment_epoch = my_abandonment_epoch; - // Master thread's scheduler needs special handling as it - // may be destroyed at any moment (workers' schedulers are - // guaranteed to be alive while at least one thread is in arena). - // Have to exclude concurrency with task group state change propagation too. - // TODO: check whether it is still necessary since some pools belong to slots now - my_market->my_arenas_list_mutex.lock(); - generic_scheduler *s = my_slots[0].my_scheduler; - if ( s && as_atomic(my_slots[0].my_scheduler).compare_and_swap(LockedMaster, s) == s ) { //TODO: remove need to lock - __TBB_ASSERT( my_slots[0].my_scheduler == LockedMaster && s != LockedMaster, NULL ); - work_absent = !may_have_tasks( s, tasks_present, dequeuing_possible ); - __TBB_store_with_release( my_slots[0].my_scheduler, s ); - } - my_market->my_arenas_list_mutex.unlock(); - // The following loop is subject to data races. While k-th slot's - // scheduler is being examined, corresponding worker can either - // leave to RML or migrate to another arena. - // But the races are not prevented because all of them are benign. - // First, the code relies on the fact that worker thread's scheduler - // object persists until the whole library is deinitialized. - // Second, in the worst case the races can only cause another - // round of stealing attempts to be undertaken. Introducing complex - // synchronization into this coldest part of the scheduler's control - // flow does not seem to make sense because it both is unlikely to - // ever have any observable performance effect, and will require - // additional synchronization code on the hotter paths. - for( k = 1; work_absent && k < n; ++k ) { - if( my_pool_state!=busy ) - return false; // the work was published - work_absent = !may_have_tasks( my_slots[k].my_scheduler, tasks_present, dequeuing_possible ); - } - // Preclude premature switching arena off because of a race in the previous loop. - work_absent = work_absent - && !__TBB_load_with_acquire(my_orphaned_tasks) - && abandonment_epoch == my_abandonment_epoch; - } -#endif /* __TBB_TASK_PRIORITY */ - // Test and test-and-set. - if( my_pool_state==busy ) { -#if __TBB_TASK_PRIORITY - bool no_fifo_tasks = my_task_stream[top_priority].empty(); - work_absent = work_absent && (!dequeuing_possible || no_fifo_tasks) - && top_priority == my_top_priority && reload_epoch == my_reload_epoch; -#else - bool no_fifo_tasks = my_task_stream.empty(); - work_absent = work_absent && no_fifo_tasks; -#endif /* __TBB_TASK_PRIORITY */ - if( work_absent ) { -#if __TBB_TASK_PRIORITY - if ( top_priority > my_bottom_priority ) { - if ( my_market->lower_arena_priority(*this, top_priority - 1, reload_epoch) - && !my_task_stream[top_priority].empty() ) - { - atomic_update( my_skipped_fifo_priority, top_priority, std::less<intptr_t>()); - } - } - else if ( !tasks_present && !my_orphaned_tasks && no_fifo_tasks ) { -#endif /* __TBB_TASK_PRIORITY */ - // save current demand value before setting SNAPSHOT_EMPTY, - // to avoid race with advertise_new_work. - int current_demand = (int)my_max_num_workers; - if( my_pool_state.compare_and_swap( SNAPSHOT_EMPTY, busy )==busy ) { - // This thread transitioned pool to empty state, and thus is - // responsible for telling RML that there is no other work to do. - my_market->adjust_demand( *this, -current_demand ); -#if __TBB_TASK_PRIORITY - // Check for the presence of enqueued tasks "lost" on some of - // priority levels because updating arena priority and switching - // arena into "populated" (FULL) state happen non-atomically. - // Imposing atomicity would require task::enqueue() to use a lock, - // which is unacceptable. - bool switch_back = false; - for ( int p = 0; p < num_priority_levels; ++p ) { - if ( !my_task_stream[p].empty() ) { - switch_back = true; - if ( p < my_bottom_priority || p > my_top_priority ) - my_market->update_arena_priority(*this, p); - } - } - if ( switch_back ) - advertise_new_work</*Spawned*/false>(); -#endif /* __TBB_TASK_PRIORITY */ - return true; - } - return false; -#if __TBB_TASK_PRIORITY - } -#endif /* __TBB_TASK_PRIORITY */ - } - // Undo previous transition SNAPSHOT_FULL-->busy, unless another thread undid it. - my_pool_state.compare_and_swap( SNAPSHOT_FULL, busy ); - } - } - return false; - } - default: - // Another thread is taking a snapshot. - return false; - } +int arena::update_concurrency(unsigned allotment) { + int delta = allotment - my_num_workers_allotted.load(std::memory_order_relaxed); + if (delta != 0) { + my_num_workers_allotted.store(allotment, std::memory_order_relaxed); } + return delta; } -#if __TBB_COUNT_TASK_NODES -intptr_t arena::workers_task_node_count() { - intptr_t result = 0; - for( unsigned i = 1; i < my_num_slots; ++i ) { - generic_scheduler* s = my_slots[i].my_scheduler; - if( s ) - result += s->my_task_node_count; - } - return result; +std::pair<int, int> arena::update_request(int mandatory_delta, int workers_delta) { + __TBB_ASSERT(-1 <= mandatory_delta && mandatory_delta <= 1, nullptr); + + int min_workers_request = 0; + int max_workers_request = 0; + + // Calculate min request + my_mandatory_requests += mandatory_delta; + min_workers_request = my_mandatory_requests > 0 ? 1 : 0; + + // Calculate max request + my_total_num_workers_requested += workers_delta; + // Clamp worker request into interval [0, my_max_num_workers] + max_workers_request = clamp(my_total_num_workers_requested, 0, + min_workers_request > 0 && is_arena_workerless() ? 1 : (int)my_max_num_workers); + + return { min_workers_request, max_workers_request }; } -#endif /* __TBB_COUNT_TASK_NODES */ -void arena::enqueue_task( task& t, intptr_t prio, FastRandom &random ) -{ -#if __TBB_RECYCLE_TO_ENQUEUE - __TBB_ASSERT( t.state()==task::allocated || t.state()==task::to_enqueue, "attempt to enqueue task with inappropriate state" ); +thread_control_monitor& arena::get_waiting_threads_monitor() { + return my_threading_control->get_waiting_threads_monitor(); +} + +void arena::enqueue_task(d1::task& t, d1::task_group_context& ctx, thread_data& td) { + task_group_context_impl::bind_to(ctx, &td); + task_accessor::context(t) = &ctx; + task_accessor::isolation(t) = no_isolation; + my_fifo_task_stream.push( &t, random_lane_selector(td.my_random) ); + advertise_new_work<work_enqueued>(); +} + +arena& arena::create(threading_control* control, unsigned num_slots, unsigned num_reserved_slots, unsigned arena_priority_level, d1::constraints constraints) { + __TBB_ASSERT(num_slots > 0, NULL); + __TBB_ASSERT(num_reserved_slots <= num_slots, NULL); + // Add public market reference for an external thread/task_arena (that adds an internal reference in exchange). + arena& a = arena::allocate_arena(control, num_slots, num_reserved_slots, arena_priority_level); + a.my_tc_client = control->create_client(a); + // We should not publish arena until all fields are initialized + control->publish_client(a.my_tc_client, constraints); + return a; +} + +} // namespace r1 +} // namespace detail +} // namespace tbb + +// Enable task_arena.h +#include "oneapi/tbb/task_arena.h" // task_arena_base + +namespace tbb { +namespace detail { +namespace r1 { + +#if TBB_USE_ASSERT +void assert_arena_priority_valid( tbb::task_arena::priority a_priority ) { + bool is_arena_priority_correct = + a_priority == tbb::task_arena::priority::high || + a_priority == tbb::task_arena::priority::normal || + a_priority == tbb::task_arena::priority::low; + __TBB_ASSERT( is_arena_priority_correct, + "Task arena priority should be equal to one of the predefined values." ); +} #else - __TBB_ASSERT( t.state()==task::allocated, "attempt to enqueue task that is not in 'allocated' state" ); +void assert_arena_priority_valid( tbb::task_arena::priority ) {} #endif - t.prefix().state = task::ready; - t.prefix().extra_state |= es_task_enqueued; // enqueued task marker -#if TBB_USE_ASSERT - if( task* parent = t.parent() ) { - internal::reference_count ref_count = parent->prefix().ref_count; - __TBB_ASSERT( ref_count!=0, "attempt to enqueue task whose parent has a ref_count==0 (forgot to set_ref_count?)" ); - __TBB_ASSERT( ref_count>0, "attempt to enqueue task whose parent has a ref_count<0" ); - parent->prefix().extra_state |= es_ref_count_active; - } - __TBB_ASSERT(t.prefix().affinity==affinity_id(0), "affinity is ignored for enqueued tasks"); -#endif /* TBB_USE_ASSERT */ +unsigned arena_priority_level( tbb::task_arena::priority a_priority ) { + assert_arena_priority_valid( a_priority ); + return d1::num_priority_levels - unsigned(int(a_priority) / d1::priority_stride); +} -#if __TBB_TASK_PRIORITY - intptr_t p = prio ? normalize_priority(priority_t(prio)) : normalized_normal_priority; - assert_priority_valid(p); - task_stream &ts = my_task_stream[p]; -#else /* !__TBB_TASK_PRIORITY */ - __TBB_ASSERT_EX(prio == 0, "the library is not configured to respect the task priority"); - task_stream &ts = my_task_stream; -#endif /* !__TBB_TASK_PRIORITY */ - ITT_NOTIFY(sync_releasing, &ts); - ts.push( &t, random ); -#if __TBB_TASK_PRIORITY - if ( p != my_top_priority ) - my_market->update_arena_priority( *this, p ); -#endif /* __TBB_TASK_PRIORITY */ - advertise_new_work< /*Spawned=*/ false >(); -#if __TBB_TASK_PRIORITY - if ( p != my_top_priority ) - my_market->update_arena_priority( *this, p ); -#endif /* __TBB_TASK_PRIORITY */ -} - -#if __TBB_TASK_ARENA -struct nested_arena_context : no_copy { - generic_scheduler &my_scheduler; - scheduler_state const my_orig_state; - void *my_orig_ptr; - bool my_adjusting; - nested_arena_context(generic_scheduler *s, arena* a, bool needs_adjusting, bool as_worker = false) - : my_scheduler(*s), my_orig_state(*s), my_orig_ptr(NULL), my_adjusting(needs_adjusting) { - s->nested_arena_entry(a, *this, as_worker); - } - ~nested_arena_context() { - my_scheduler.nested_arena_exit(*this); - (scheduler_state&)my_scheduler = my_orig_state; // restore arena settings - } +tbb::task_arena::priority arena_priority( unsigned priority_level ) { + auto priority = tbb::task_arena::priority( + (d1::num_priority_levels - priority_level) * d1::priority_stride + ); + assert_arena_priority_valid( priority ); + return priority; +} + +struct task_arena_impl { + static void initialize(d1::task_arena_base&); + static void terminate(d1::task_arena_base&); + static bool attach(d1::task_arena_base&); + static void execute(d1::task_arena_base&, d1::delegate_base&); + static void wait(d1::task_arena_base&); + static int max_concurrency(const d1::task_arena_base*); + static void enqueue(d1::task&, d1::task_group_context*, d1::task_arena_base*); + static d1::slot_id execution_slot(const d1::task_arena_base&); }; -void generic_scheduler::nested_arena_entry(arena* a, nested_arena_context& c, bool as_worker) { - if( a == my_arena ) { -#if __TBB_TASK_GROUP_CONTEXT - c.my_orig_ptr = my_innermost_running_task = - new(&allocate_task(sizeof(empty_task), NULL, a->my_default_ctx)) empty_task; -#endif - return; - } - __TBB_ASSERT( is_alive(a->my_guard), NULL ); - // overwrite arena settings -#if __TBB_TASK_PRIORITY - if ( my_offloaded_tasks ) - my_arena->orphan_offloaded_tasks( *this ); - my_ref_top_priority = &a->my_top_priority; - my_ref_reload_epoch = &a->my_reload_epoch; - my_local_reload_epoch = a->my_reload_epoch; -#endif /* __TBB_TASK_PRIORITY */ - my_arena = a; - my_arena_index = 0; - my_arena_slot = my_arena->my_slots + my_arena_index; - my_inbox.detach(); // TODO: mailboxes were not designed for switching, add copy constructor? - attach_mailbox( affinity_id(my_arena_index+1) ); - my_innermost_running_task = my_dispatching_task = as_worker? NULL : my_dummy_task; -#if __TBB_TASK_GROUP_CONTEXT - // save dummy's context and replace it by arena's context - c.my_orig_ptr = my_dummy_task->prefix().context; - my_dummy_task->prefix().context = a->my_default_ctx; -#endif -#if __TBB_ARENA_OBSERVER - my_last_local_observer = 0; // TODO: try optimize number of calls - my_arena->my_observers.notify_entry_observers( my_last_local_observer, /*worker=*/false ); -#endif - // TODO? ITT_NOTIFY(sync_acquired, a->my_slots + index); - // TODO: it requires market to have P workers (not P-1) - // TODO: it still allows temporary oversubscription by 1 worker (due to my_max_num_workers) - // TODO: a preempted worker should be excluded from assignment to other arenas e.g. my_slack-- - if( c.my_adjusting ) my_arena->my_market->adjust_demand(*my_arena, -1); +void __TBB_EXPORTED_FUNC initialize(d1::task_arena_base& ta) { + task_arena_impl::initialize(ta); +} +void __TBB_EXPORTED_FUNC terminate(d1::task_arena_base& ta) { + task_arena_impl::terminate(ta); +} +bool __TBB_EXPORTED_FUNC attach(d1::task_arena_base& ta) { + return task_arena_impl::attach(ta); +} +void __TBB_EXPORTED_FUNC execute(d1::task_arena_base& ta, d1::delegate_base& d) { + task_arena_impl::execute(ta, d); +} +void __TBB_EXPORTED_FUNC wait(d1::task_arena_base& ta) { + task_arena_impl::wait(ta); } -void generic_scheduler::nested_arena_exit(nested_arena_context& c) { - if( my_arena == c.my_orig_state.my_arena ) { -#if __TBB_TASK_GROUP_CONTEXT - free_task<small_local_task>(*(task*)c.my_orig_ptr); // TODO: use scoped_task instead? -#endif - return; - } - if( c.my_adjusting ) my_arena->my_market->adjust_demand(*my_arena, 1); -#if __TBB_ARENA_OBSERVER - my_arena->my_observers.notify_exit_observers( my_last_local_observer, /*worker=*/false ); -#endif /* __TBB_SCHEDULER_OBSERVER */ - -#if __TBB_TASK_PRIORITY - if ( my_offloaded_tasks ) - my_arena->orphan_offloaded_tasks( *this ); - my_local_reload_epoch = *c.my_orig_state.my_ref_reload_epoch; - while ( as_atomic(my_arena->my_slots[0].my_scheduler).compare_and_swap( NULL, this) != this ) - __TBB_Yield(); // TODO: task priority can use master slot for locking while accessing the scheduler -#else - // Free the master slot. TODO: support multiple masters - __TBB_store_with_release(my_arena->my_slots[0].my_scheduler, (generic_scheduler*)NULL); -#endif - my_arena->my_exit_monitors.notify_all_relaxed(); // TODO: fix concurrent monitor to use notify_one (test MultipleMastersPart4 fails) -#if __TBB_TASK_GROUP_CONTEXT - // restore context of dummy task - my_dummy_task->prefix().context = (task_group_context*)c.my_orig_ptr; -#endif +int __TBB_EXPORTED_FUNC max_concurrency(const d1::task_arena_base* ta) { + return task_arena_impl::max_concurrency(ta); } -void generic_scheduler::wait_until_empty() { - my_dummy_task->prefix().ref_count++; // prevents exit from local_wait_for_all when local work is done enforcing the stealing - while( my_arena->my_pool_state != arena::SNAPSHOT_EMPTY ) - local_wait_for_all(*my_dummy_task, NULL); - my_dummy_task->prefix().ref_count--; +void __TBB_EXPORTED_FUNC enqueue(d1::task& t, d1::task_arena_base* ta) { + task_arena_impl::enqueue(t, nullptr, ta); } -#endif /* __TBB_TASK_ARENA */ +void __TBB_EXPORTED_FUNC enqueue(d1::task& t, d1::task_group_context& ctx, d1::task_arena_base* ta) { + task_arena_impl::enqueue(t, &ctx, ta); +} -} // namespace internal -} // namespace tbb +d1::slot_id __TBB_EXPORTED_FUNC execution_slot(const d1::task_arena_base& arena) { + return task_arena_impl::execution_slot(arena); +} -#if __TBB_TASK_ARENA -#include "scheduler_utility.h" +void task_arena_impl::initialize(d1::task_arena_base& ta) { + // Enforce global market initialization to properly initialize soft limit + (void)governor::get_thread_data(); + d1::constraints arena_constraints; + +#if __TBB_ARENA_BINDING + arena_constraints = d1::constraints{} + .set_core_type(ta.core_type()) + .set_max_threads_per_core(ta.max_threads_per_core()) + .set_numa_id(ta.my_numa_id); +#endif /*__TBB_ARENA_BINDING*/ + + if (ta.my_max_concurrency < 1) { +#if __TBB_ARENA_BINDING + ta.my_max_concurrency = (int)default_concurrency(arena_constraints); +#else /*!__TBB_ARENA_BINDING*/ + ta.my_max_concurrency = (int)governor::default_num_threads(); +#endif /*!__TBB_ARENA_BINDING*/ + } -namespace tbb { -namespace interface7 { -namespace internal { - -void task_arena_base::internal_initialize( ) { - __TBB_ASSERT( my_master_slots <= 1, "Number of slots reserved for master can be only [0,1]"); - if( my_master_slots > 1 ) my_master_slots = 1; // TODO: make more masters - if( my_max_concurrency < 1 ) - my_max_concurrency = (int)governor::default_num_threads(); - // TODO: reimplement in an efficient way. We need a scheduler instance in this thread - // but the scheduler is only required for task allocation and fifo random seeds until - // master wants to join the arena. (Idea - to create a restricted specialization) - // It is excessive to create an implicit arena for master here anyway. But scheduler - // instance implies master thread to be always connected with arena. - // browse recursively into init_scheduler and arena::process for details - if( !governor::local_scheduler_if_initialized() ) - governor::init_scheduler( (unsigned)my_max_concurrency - my_master_slots + 1/*TODO: address in market instead*/, 0, true ); - // TODO: we will need to introduce a mechanism for global settings, including stack size, used by all arenas - arena* new_arena = &market::create_arena( my_max_concurrency - my_master_slots/*it's +1 slot for num_masters=0*/, ThreadStackSize ); - if(as_atomic(my_arena).compare_and_swap(new_arena, NULL) != NULL) { // there is a race possible on my_initialized - __TBB_ASSERT(my_arena, NULL); // other thread was the first - new_arena->on_thread_leaving</*is_master*/true>(); // deallocate new arena - } -#if __TBB_TASK_GROUP_CONTEXT - else { - my_context = new_arena->my_default_ctx; - my_context->my_version_and_traits |= my_version_and_traits & exact_exception_flag; +#if __TBB_CPUBIND_PRESENT + numa_binding_observer* observer = construct_binding_observer( + static_cast<d1::task_arena*>(&ta), arena::num_arena_slots(ta.my_max_concurrency, ta.my_num_reserved_slots), + ta.my_numa_id, ta.core_type(), ta.max_threads_per_core()); + if (observer) { + // TODO: Consider lazy initialization for internal arena so + // the direct calls to observer might be omitted until actual initialization. + observer->on_scheduler_entry(true); } -#endif +#endif /*__TBB_CPUBIND_PRESENT*/ + + __TBB_ASSERT(ta.my_arena.load(std::memory_order_relaxed) == nullptr, "Arena already initialized"); + unsigned priority_level = arena_priority_level(ta.my_priority); + threading_control* thr_control = threading_control::register_public_reference(); + arena& a = arena::create(thr_control, unsigned(ta.my_max_concurrency), ta.my_num_reserved_slots, priority_level, arena_constraints); + + ta.my_arena.store(&a, std::memory_order_release); +#if __TBB_CPUBIND_PRESENT + a.my_numa_binding_observer = observer; + if (observer) { + observer->on_scheduler_exit(true); + observer->observe(true); + } +#endif /*__TBB_CPUBIND_PRESENT*/ } -void task_arena_base::internal_terminate( ) { - if( my_arena ) {// task_arena was initialized -#if __TBB_STATISTICS_EARLY_DUMP - GATHER_STATISTIC( my_arena->dump_arena_statistics() ); -#endif - my_arena->on_thread_leaving</*is_master*/true>(); - my_arena = 0; -#if __TBB_TASK_GROUP_CONTEXT - my_context = 0; -#endif +void task_arena_impl::terminate(d1::task_arena_base& ta) { + arena* a = ta.my_arena.load(std::memory_order_relaxed); + assert_pointer_valid(a); + threading_control::unregister_public_reference(/*blocking_terminate=*/false); + a->on_thread_leaving(arena::ref_external); + ta.my_arena.store(nullptr, std::memory_order_relaxed); +} + +bool task_arena_impl::attach(d1::task_arena_base& ta) { + __TBB_ASSERT(!ta.my_arena.load(std::memory_order_relaxed), nullptr); + thread_data* td = governor::get_thread_data_if_initialized(); + if( td && td->my_arena ) { + arena* a = td->my_arena; + // There is an active arena to attach to. + // It's still used by s, so won't be destroyed right away. + __TBB_ASSERT(a->my_references > 0, nullptr); + a->my_references += arena::ref_external; + ta.my_num_reserved_slots = a->my_num_reserved_slots; + ta.my_priority = arena_priority(a->my_priority_level); + ta.my_max_concurrency = ta.my_num_reserved_slots + a->my_max_num_workers; + __TBB_ASSERT(arena::num_arena_slots(ta.my_max_concurrency, ta.my_num_reserved_slots) == a->my_num_slots, nullptr); + ta.my_arena.store(a, std::memory_order_release); + // increases threading_control's ref count for task_arena + threading_control::register_public_reference(); + return true; } + return false; } -void task_arena_base::internal_enqueue( task& t, intptr_t prio ) const { - __TBB_ASSERT(my_arena, NULL); - generic_scheduler* s = governor::local_scheduler_if_initialized(); - __TBB_ASSERT(s, "Scheduler is not initialized"); // we allocated a task so can expect the scheduler -#if __TBB_TASK_GROUP_CONTEXT - __TBB_ASSERT(my_arena->my_default_ctx == t.prefix().context, NULL); - __TBB_ASSERT(!my_arena->my_default_ctx->is_group_execution_cancelled(), // TODO: any better idea? - "The task will not be executed because default task_group_context of task_arena is cancelled. Has previously enqueued task thrown an exception?"); -#endif - my_arena->enqueue_task( t, prio, s->my_random ); -} - -class delegated_task : public task { - internal::delegate_base & my_delegate; - concurrent_monitor & my_monitor; - task * my_root; - /*override*/ task* execute() { - generic_scheduler& s = *(generic_scheduler*)prefix().owner; - __TBB_ASSERT(s.worker_outermost_level() || s.master_outermost_level(), "expected to be enqueued and received on the outermost level"); - // but this task can mimics outermost level, detect it - if( s.master_outermost_level() && s.my_dummy_task->state() == task::executing ) { -#if TBB_USE_EXCEPTIONS - // RTTI is available, check whether the cast is valid - __TBB_ASSERT(dynamic_cast<delegated_task*>(s.my_dummy_task), 0); -#endif - set_ref_count(1); // required by the semantics of recycle_to_enqueue() - recycle_to_enqueue(); - return NULL; - } - struct outermost_context : internal::no_copy { - delegated_task * t; - generic_scheduler & s; - task * orig_dummy; - task_group_context * orig_ctx; - outermost_context(delegated_task *_t, generic_scheduler &_s) : t(_t), s(_s) { - orig_dummy = s.my_dummy_task; -#if __TBB_TASK_GROUP_CONTEXT - orig_ctx = t->prefix().context; - t->prefix().context = s.my_arena->my_default_ctx; -#endif - s.my_dummy_task = t; // mimics outermost master - __TBB_ASSERT(s.my_innermost_running_task == t, NULL); +void task_arena_impl::enqueue(d1::task& t, d1::task_group_context* c, d1::task_arena_base* ta) { + thread_data* td = governor::get_thread_data(); // thread data is only needed for FastRandom instance + assert_pointer_valid(td, "thread_data pointer should not be null"); + arena* a = ta ? + ta->my_arena.load(std::memory_order_relaxed) + : td->my_arena + ; + assert_pointer_valid(a, "arena pointer should not be null"); + auto* ctx = c ? c : a->my_default_ctx; + assert_pointer_valid(ctx, "context pointer should not be null"); + // Is there a better place for checking the state of ctx? + __TBB_ASSERT(!a->my_default_ctx->is_group_execution_cancelled(), + "The task will not be executed because its task_group_context is cancelled."); + a->enqueue_task(t, *ctx, *td); +} + +d1::slot_id task_arena_impl::execution_slot(const d1::task_arena_base& ta) { + thread_data* td = governor::get_thread_data_if_initialized(); + if (td && (td->is_attached_to(ta.my_arena.load(std::memory_order_relaxed)))) { + return td->my_arena_index; + } + return d1::slot_id(-1); +} + +class nested_arena_context : no_copy { +public: + nested_arena_context(thread_data& td, arena& nested_arena, std::size_t slot_index) + : m_orig_execute_data_ext(td.my_task_dispatcher->m_execute_data_ext) + { + if (td.my_arena != &nested_arena) { + m_orig_arena = td.my_arena; + m_orig_slot_index = td.my_arena_index; + m_orig_last_observer = td.my_last_observer; + m_orig_is_thread_registered = td.my_is_registered; + + td.detach_task_dispatcher(); + td.attach_arena(nested_arena, slot_index); + td.my_is_registered = false; + if (td.my_inbox.is_idle_state(true)) + td.my_inbox.set_is_idle(false); + task_dispatcher& task_disp = td.my_arena_slot->default_task_dispatcher(); + td.enter_task_dispatcher(task_disp, m_orig_execute_data_ext.task_disp->m_stealing_threshold); + + // If the calling thread occupies the slots out of external thread reserve we need to notify the + // market that this arena requires one worker less. + if (td.my_arena_index >= td.my_arena->my_num_reserved_slots) { + td.my_arena->request_workers(/* mandatory_delta = */ 0, /* workers_delta = */ -1); } - ~outermost_context() { - s.my_dummy_task = orig_dummy; -#if TBB_USE_EXCEPTIONS - // restore context for sake of registering potential exception - t->prefix().context = orig_ctx; -#endif + + td.my_last_observer = nullptr; + // The task_arena::execute method considers each calling thread as an external thread. + td.my_arena->my_observers.notify_entry_observers(td.my_last_observer, /* worker*/false); + } + + m_task_dispatcher = td.my_task_dispatcher; + m_orig_fifo_tasks_allowed = m_task_dispatcher->allow_fifo_task(true); + m_orig_critical_task_allowed = m_task_dispatcher->m_properties.critical_task_allowed; + m_task_dispatcher->m_properties.critical_task_allowed = true; + + execution_data_ext& ed_ext = td.my_task_dispatcher->m_execute_data_ext; + ed_ext.context = td.my_arena->my_default_ctx; + ed_ext.original_slot = td.my_arena_index; + ed_ext.affinity_slot = d1::no_slot; + ed_ext.task_disp = td.my_task_dispatcher; + ed_ext.isolation = no_isolation; + + __TBB_ASSERT(td.my_arena_slot, nullptr); + __TBB_ASSERT(td.my_arena_slot->is_occupied(), nullptr); + __TBB_ASSERT(td.my_task_dispatcher, nullptr); + } + ~nested_arena_context() { + thread_data& td = *m_task_dispatcher->m_thread_data; + __TBB_ASSERT(governor::is_thread_data_set(&td), nullptr); + m_task_dispatcher->allow_fifo_task(m_orig_fifo_tasks_allowed); + m_task_dispatcher->m_properties.critical_task_allowed = m_orig_critical_task_allowed; + if (m_orig_arena) { + td.my_arena->my_observers.notify_exit_observers(td.my_last_observer, /*worker*/ false); + td.my_last_observer = m_orig_last_observer; + + // Notify the market that this thread releasing a one slot + // that can be used by a worker thread. + if (td.my_arena_index >= td.my_arena->my_num_reserved_slots) { + td.my_arena->request_workers(/* mandatory_delta = */ 0, /* workers_delta = */ 1); } - } scope(this, s); - my_delegate(); - return NULL; + + td.leave_task_dispatcher(); + td.my_arena_slot->release(); + td.my_arena->my_exit_monitors.notify_one(); // do not relax! + td.my_is_registered = m_orig_is_thread_registered; + td.attach_arena(*m_orig_arena, m_orig_slot_index); + td.attach_task_dispatcher(*m_orig_execute_data_ext.task_disp); + __TBB_ASSERT(td.my_inbox.is_idle_state(false), nullptr); + } + td.my_task_dispatcher->m_execute_data_ext = m_orig_execute_data_ext; + } + +private: + execution_data_ext m_orig_execute_data_ext{}; + arena* m_orig_arena{ nullptr }; + observer_proxy* m_orig_last_observer{ nullptr }; + task_dispatcher* m_task_dispatcher{ nullptr }; + unsigned m_orig_slot_index{}; + bool m_orig_fifo_tasks_allowed{}; + bool m_orig_critical_task_allowed{}; + bool m_orig_is_thread_registered{}; +}; + +class delegated_task : public d1::task { + d1::delegate_base& m_delegate; + concurrent_monitor& m_monitor; + d1::wait_context& m_wait_ctx; + std::atomic<bool> m_completed; + d1::task* execute(d1::execution_data& ed) override { + const execution_data_ext& ed_ext = static_cast<const execution_data_ext&>(ed); + execution_data_ext orig_execute_data_ext = ed_ext.task_disp->m_execute_data_ext; + __TBB_ASSERT(&ed_ext.task_disp->m_execute_data_ext == &ed, + "The execute data shall point to the current task dispatcher execute data"); + __TBB_ASSERT(ed_ext.task_disp->m_execute_data_ext.isolation == no_isolation, nullptr); + + ed_ext.task_disp->m_execute_data_ext.context = ed_ext.task_disp->get_thread_data().my_arena->my_default_ctx; + bool fifo_task_allowed = ed_ext.task_disp->allow_fifo_task(true); + try_call([&] { + m_delegate(); + }).on_completion([&] { + ed_ext.task_disp->m_execute_data_ext = orig_execute_data_ext; + ed_ext.task_disp->allow_fifo_task(fifo_task_allowed); + }); + + finalize(); + return nullptr; } - ~delegated_task() { - // potential exception was already registered. It must happen before the notification - __TBB_ASSERT(my_root->ref_count()==2, NULL); - __TBB_store_with_release(my_root->prefix().ref_count, 1); // must precede the wakeup - my_monitor.notify_relaxed(*this); + d1::task* cancel(d1::execution_data&) override { + finalize(); + return nullptr; + } + void finalize() { + m_wait_ctx.release(); // must precede the wakeup + m_monitor.notify([this] (std::uintptr_t ctx) { + return ctx == std::uintptr_t(&m_delegate); + }); // do not relax, it needs a fence! + m_completed.store(true, std::memory_order_release); } public: - delegated_task( internal::delegate_base & d, concurrent_monitor & s, task * t ) - : my_delegate(d), my_monitor(s), my_root(t) {} - // predicate for concurrent_monitor notification - bool operator()(uintptr_t ctx) const { return (void*)ctx == (void*)&my_delegate; } + delegated_task(d1::delegate_base& d, concurrent_monitor& s, d1::wait_context& wo) + : m_delegate(d), m_monitor(s), m_wait_ctx(wo), m_completed{ false }{} + ~delegated_task() override { + // The destructor can be called earlier than the m_monitor is notified + // because the waiting thread can be released after m_wait_ctx.release_wait. + // To close that race we wait for the m_completed signal. + spin_wait_until_eq(m_completed, true); + } }; -void task_arena_base::internal_execute( internal::delegate_base& d) const { - __TBB_ASSERT(my_arena, NULL); - generic_scheduler* s = governor::local_scheduler(); - __TBB_ASSERT(s, "Scheduler is not initialized"); - // TODO: is it safe to assign slot to a scheduler which is not yet switched? - // TODO TEMP: one master, make more masters - if( s->my_arena == my_arena || (!__TBB_load_with_acquire(my_arena->my_slots[0].my_scheduler) - && as_atomic(my_arena->my_slots[0].my_scheduler).compare_and_swap(s, NULL ) == NULL) ) { - cpu_ctl_env_helper cpu_ctl_helper; - cpu_ctl_helper.set_env( __TBB_CONTEXT_ARG1(my_context) ); -#if TBB_USE_EXCEPTIONS - try { -#endif - //TODO: replace dummy tasks for workers as well to avoid using of the_dummy_context - nested_arena_context scope(s, my_arena, !my_master_slots); - d(); -#if TBB_USE_EXCEPTIONS - } catch(...) { - cpu_ctl_helper.restore_default(); // TODO: is it needed on Windows? - if( my_version_and_traits & exact_exception_flag ) throw; - else { - task_group_context exception_container( task_group_context::isolated, - task_group_context::default_traits & ~task_group_context::exact_exception ); - exception_container.register_pending_exception(); - __TBB_ASSERT(exception_container.my_exception, NULL); - exception_container.my_exception->throw_self(); - } - } -#endif - } else { - concurrent_monitor::thread_context waiter; -#if __TBB_TASK_GROUP_CONTEXT - task_group_context exec_context( task_group_context::isolated, my_version_and_traits & exact_exception_flag ); -#if __TBB_FP_CONTEXT - exec_context.copy_fp_settings( *my_context ); -#endif -#endif - auto_empty_task root(__TBB_CONTEXT_ARG(s, &exec_context)); - root.prefix().ref_count = 2; - my_arena->enqueue_task( *new( task::allocate_root(__TBB_CONTEXT_ARG1(exec_context)) ) - delegated_task(d, my_arena->my_exit_monitors, &root), - 0, s->my_random ); // TODO: priority? - do { - my_arena->my_exit_monitors.prepare_wait(waiter, (uintptr_t)&d); - if( __TBB_load_with_acquire(root.prefix().ref_count) < 2 ) { - my_arena->my_exit_monitors.cancel_wait(waiter); - break; +void task_arena_impl::execute(d1::task_arena_base& ta, d1::delegate_base& d) { + arena* a = ta.my_arena.load(std::memory_order_relaxed); + __TBB_ASSERT(a != nullptr, nullptr); + thread_data* td = governor::get_thread_data(); + + bool same_arena = td->my_arena == a; + std::size_t index1 = td->my_arena_index; + if (!same_arena) { + index1 = a->occupy_free_slot</*as_worker */false>(*td); + if (index1 == arena::out_of_arena) { + concurrent_monitor::thread_context waiter((std::uintptr_t)&d); + d1::wait_context wo(1); + d1::task_group_context exec_context(d1::task_group_context::isolated); + task_group_context_impl::copy_fp_settings(exec_context, *a->my_default_ctx); + + delegated_task dt(d, a->my_exit_monitors, wo); + a->enqueue_task( dt, exec_context, *td); + size_t index2 = arena::out_of_arena; + do { + a->my_exit_monitors.prepare_wait(waiter); + if (!wo.continue_execution()) { + a->my_exit_monitors.cancel_wait(waiter); + break; + } + index2 = a->occupy_free_slot</*as_worker*/false>(*td); + if (index2 != arena::out_of_arena) { + a->my_exit_monitors.cancel_wait(waiter); + nested_arena_context scope(*td, *a, index2 ); + r1::wait(wo, exec_context); + __TBB_ASSERT(!exec_context.my_exception.load(std::memory_order_relaxed), nullptr); // exception can be thrown above, not deferred + break; + } + a->my_exit_monitors.commit_wait(waiter); + } while (wo.continue_execution()); + if (index2 == arena::out_of_arena) { + // notify a waiting thread even if this thread did not enter arena, + // in case it was woken by a leaving thread but did not need to enter + a->my_exit_monitors.notify_one(); // do not relax! } - else if( !__TBB_load_with_acquire(my_arena->my_slots[0].my_scheduler) // TODO: refactor into a function? - && as_atomic(my_arena->my_slots[0].my_scheduler).compare_and_swap(s, NULL ) == NULL ) { - my_arena->my_exit_monitors.cancel_wait(waiter); - nested_arena_context scope(s, my_arena, !my_master_slots); - s->local_wait_for_all(root, NULL); -#if TBB_USE_EXCEPTIONS - __TBB_ASSERT( !exec_context.my_exception, NULL ); // exception can be thrown above, not deferred -#endif - __TBB_ASSERT( root.prefix().ref_count == 0, NULL ); - break; - } else { - my_arena->my_exit_monitors.commit_wait(waiter); + // process possible exception + auto exception = exec_context.my_exception.load(std::memory_order_acquire); + if (exception) { + __TBB_ASSERT(exec_context.is_group_execution_cancelled(), "The task group context with an exception should be canceled."); + exception->throw_self(); } - } while( __TBB_load_with_acquire(root.prefix().ref_count) == 2 ); -#if TBB_USE_EXCEPTIONS - // process possible exception - if( task_group_context::exception_container_type *pe = exec_context.my_exception ) - pe->throw_self(); + __TBB_ASSERT(governor::is_thread_data_set(td), nullptr); + return; + } // if (index1 == arena::out_of_arena) + } // if (!same_arena) + + context_guard_helper</*report_tasks=*/false> context_guard; + context_guard.set_ctx(a->my_default_ctx); + nested_arena_context scope(*td, *a, index1); +#if _WIN64 + try { #endif + d(); + __TBB_ASSERT(same_arena || governor::is_thread_data_set(td), nullptr); +#if _WIN64 + } catch (...) { + context_guard.restore_default(); + throw; } +#endif } -// this wait task is a temporary approach to wait for arena emptiness for masters without slots -// TODO: it will be rather reworked for one source of notification from is_out_of_work -class wait_task : public task { - binary_semaphore & my_signal; - /*override*/ task* execute() { - generic_scheduler* s = governor::local_scheduler_if_initialized(); - __TBB_ASSERT( s, NULL ); - if( s->my_arena_index && s->worker_outermost_level() ) {// on outermost level of workers only - s->local_wait_for_all( *s->my_dummy_task, NULL ); // run remaining tasks - } else s->my_arena->is_out_of_work(); // avoids starvation of internal_wait: issuing this task makes arena full - my_signal.V(); - return NULL; +void task_arena_impl::wait(d1::task_arena_base& ta) { + arena* a = ta.my_arena.load(std::memory_order_relaxed); + __TBB_ASSERT(a != nullptr, nullptr); + thread_data* td = governor::get_thread_data(); + __TBB_ASSERT_EX(td, "Scheduler is not initialized"); + __TBB_ASSERT(td->my_arena != a || td->my_arena_index == 0, "internal_wait is not supported within a worker context" ); + if (a->my_max_num_workers != 0) { + while (a->num_workers_active() || !a->is_empty()) { + yield(); + } } -public: - wait_task ( binary_semaphore & sema ) : my_signal(sema) {} -}; +} -void task_arena_base::internal_wait() const { - __TBB_ASSERT(my_arena, NULL); - generic_scheduler* s = governor::local_scheduler(); - __TBB_ASSERT(s, "Scheduler is not initialized"); - __TBB_ASSERT(s->my_arena != my_arena || s->my_arena_index == 0, "task_arena::wait_until_empty() is not supported within a worker context" ); - if( s->my_arena == my_arena ) { - //unsupported, but try do something for outermost master - __TBB_ASSERT(s->master_outermost_level(), "unsupported"); - if( !s->my_arena_index ) - while( my_arena->num_workers_active() ) - s->wait_until_empty(); - } else for(;;) { - while( my_arena->my_pool_state != arena::SNAPSHOT_EMPTY ) { - if( !__TBB_load_with_acquire(my_arena->my_slots[0].my_scheduler) // TODO TEMP: one master, make more masters - && as_atomic(my_arena->my_slots[0].my_scheduler).compare_and_swap(s, NULL) == NULL ) { - nested_arena_context a(s, my_arena, !my_master_slots, true); - s->wait_until_empty(); - } else { - binary_semaphore waiter; // TODO: replace by a single event notification from is_out_of_work - internal_enqueue( *new( task::allocate_root(__TBB_CONTEXT_ARG1(*my_context)) ) wait_task(waiter), 0 ); // TODO: priority? - waiter.P(); // TODO: concurrent_monitor - } +int task_arena_impl::max_concurrency(const d1::task_arena_base *ta) { + arena* a = nullptr; + if( ta ) // for special cases of ta->max_concurrency() + a = ta->my_arena.load(std::memory_order_relaxed); + else if( thread_data* td = governor::get_thread_data_if_initialized() ) + a = td->my_arena; // the current arena if any + + if( a ) { // Get parameters from the arena + __TBB_ASSERT( !ta || ta->my_max_concurrency==1, nullptr); + int mandatory_worker = 0; + if (a->is_arena_workerless() && a->my_num_reserved_slots == 1) { + mandatory_worker = a->my_mandatory_concurrency.test() ? 1 : 0; } - if( !my_arena->num_workers_active() && !my_arena->my_slots[0].my_scheduler) // no activity - break; // spin until workers active but avoid spinning in a worker - __TBB_Yield(); // wait until workers and master leave + return a->my_num_reserved_slots + a->my_max_num_workers + mandatory_worker; } -} -/*static*/ int task_arena_base::internal_current_slot() { - generic_scheduler* s = governor::local_scheduler_if_initialized(); - return s? int(s->my_arena_index) : -1; + if (ta && ta->my_max_concurrency == 1) { + return 1; + } + +#if __TBB_ARENA_BINDING + if (ta) { + d1::constraints arena_constraints = d1::constraints{} + .set_numa_id(ta->my_numa_id) + .set_core_type(ta->core_type()) + .set_max_threads_per_core(ta->max_threads_per_core()); + return (int)default_concurrency(arena_constraints); + } +#endif /*!__TBB_ARENA_BINDING*/ + + __TBB_ASSERT(!ta || ta->my_max_concurrency==d1::task_arena_base::automatic, nullptr); + return int(governor::default_num_threads()); } +void isolate_within_arena(d1::delegate_base& d, std::intptr_t isolation) { + // TODO: Decide what to do if the scheduler is not initialized. Is there a use case for it? + thread_data* tls = governor::get_thread_data(); + assert_pointers_valid(tls, tls->my_task_dispatcher); + task_dispatcher* dispatcher = tls->my_task_dispatcher; + isolation_type previous_isolation = dispatcher->m_execute_data_ext.isolation; + try_call([&] { + // We temporarily change the isolation tag of the currently running task. It will be restored in the destructor of the guard. + isolation_type current_isolation = isolation ? isolation : reinterpret_cast<isolation_type>(&d); + // Save the current isolation value and set new one + previous_isolation = dispatcher->set_isolation(current_isolation); + // Isolation within this callable + d(); + }).on_completion([&] { + __TBB_ASSERT(governor::get_thread_data()->my_task_dispatcher == dispatcher, nullptr); + dispatcher->set_isolation(previous_isolation); + }); +} -} // tbb::interfaceX::internal -} // tbb::interfaceX -} // tbb -#endif /* __TBB_TASK_ARENA */ +} // namespace r1 +} // namespace detail +} // namespace tbb diff --git a/src/tbb/src/tbb/arena.h b/src/tbb/src/tbb/arena.h index 72e45c74c..1e95f117b 100644 --- a/src/tbb/src/tbb/arena.h +++ b/src/tbb/src/tbb/arena.h @@ -1,396 +1,512 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef _TBB_arena_H #define _TBB_arena_H -#include "tbb/tbb_stddef.h" -#include "tbb/atomic.h" +#include <atomic> +#include <cstring> -#include "tbb/tbb_machine.h" +#include "oneapi/tbb/detail/_task.h" +#include "oneapi/tbb/detail/_utils.h" +#include "oneapi/tbb/spin_mutex.h" #include "scheduler_common.h" #include "intrusive_list.h" #include "task_stream.h" -#include "../rml/include/rml_tbb.h" +#include "arena_slot.h" +#include "rml_tbb.h" #include "mailbox.h" -#include "observer_proxy.h" -#include "market.h" #include "governor.h" -#if __TBB_TASK_ARENA #include "concurrent_monitor.h" -#endif +#include "observer_proxy.h" +#include "thread_control_monitor.h" +#include "threading_control_client.h" namespace tbb { +namespace detail { +namespace r1 { +class task_dispatcher; class task_group_context; +class threading_control; class allocate_root_with_context_proxy; -namespace internal { +#if __TBB_ARENA_BINDING +class numa_binding_observer; +#endif /*__TBB_ARENA_BINDING*/ + +//! Bounded coroutines cache LIFO ring buffer +class arena_co_cache { + //! Ring buffer storage + task_dispatcher** my_co_scheduler_cache; + //! Current cache index + unsigned my_head; + //! Cache capacity for arena + unsigned my_max_index; + //! Accessor lock for modification operations + tbb::spin_mutex my_co_cache_mutex; + + unsigned next_index() { + return ( my_head == my_max_index ) ? 0 : my_head + 1; + } + + unsigned prev_index() { + return ( my_head == 0 ) ? my_max_index : my_head - 1; + } + + bool internal_empty() { + return my_co_scheduler_cache[prev_index()] == nullptr; + } -//! arena data except the array of slots -/** Separated in order to simplify padding. + void internal_task_dispatcher_cleanup(task_dispatcher* to_cleanup) { + to_cleanup->~task_dispatcher(); + cache_aligned_deallocate(to_cleanup); + } + +public: + void init(unsigned cache_capacity) { + std::size_t alloc_size = cache_capacity * sizeof(task_dispatcher*); + my_co_scheduler_cache = (task_dispatcher**)cache_aligned_allocate(alloc_size); + std::memset( my_co_scheduler_cache, 0, alloc_size ); + my_head = 0; + my_max_index = cache_capacity - 1; + } + + void cleanup() { + while (task_dispatcher* to_cleanup = pop()) { + internal_task_dispatcher_cleanup(to_cleanup); + } + cache_aligned_deallocate(my_co_scheduler_cache); + } + + //! Insert scheduler to the current available place. + //! Replace an old value, if necessary. + void push(task_dispatcher* s) { + task_dispatcher* to_cleanup = nullptr; + { + tbb::spin_mutex::scoped_lock lock(my_co_cache_mutex); + // Check if we are replacing some existing buffer entrance + if (my_co_scheduler_cache[my_head] != nullptr) { + to_cleanup = my_co_scheduler_cache[my_head]; + } + // Store the cached value + my_co_scheduler_cache[my_head] = s; + // Move head index to the next slot + my_head = next_index(); + } + // Cleanup replaced buffer if any + if (to_cleanup) { + internal_task_dispatcher_cleanup(to_cleanup); + } + } + + //! Get a cached scheduler if any + task_dispatcher* pop() { + tbb::spin_mutex::scoped_lock lock(my_co_cache_mutex); + // No cached coroutine + if (internal_empty()) { + return nullptr; + } + // Move head index to the currently available value + my_head = prev_index(); + // Retrieve the value from the buffer + task_dispatcher* to_return = my_co_scheduler_cache[my_head]; + // Clear the previous entrance value + my_co_scheduler_cache[my_head] = nullptr; + return to_return; + } +}; + +struct stack_anchor_type { + stack_anchor_type() = default; + stack_anchor_type(const stack_anchor_type&) = delete; +}; + +class atomic_flag { + static const std::uintptr_t SET = 1; + static const std::uintptr_t UNSET = 0; + std::atomic<std::uintptr_t> my_state{UNSET}; +public: + bool test_and_set() { + std::uintptr_t state = my_state.load(std::memory_order_acquire); + switch (state) { + case SET: + return false; + default: /* busy */ + if (my_state.compare_exchange_strong(state, SET)) { + // We interrupted clear transaction + return false; + } + if (state != UNSET) { + // We lost our epoch + return false; + } + // We are too late but still in the same epoch + __TBB_fallthrough; + case UNSET: + return my_state.compare_exchange_strong(state, SET); + } + } + template <typename Pred> + bool try_clear_if(Pred&& pred) { + std::uintptr_t busy = std::uintptr_t(&busy); + std::uintptr_t state = my_state.load(std::memory_order_acquire); + if (state == SET && my_state.compare_exchange_strong(state, busy)) { + if (pred()) { + return my_state.compare_exchange_strong(busy, UNSET); + } + // The result of the next operation is discarded, always false should be returned. + my_state.compare_exchange_strong(busy, SET); + } + return false; + } + bool test(std::memory_order order = std::memory_order_acquire) { + return my_state.load(order) != UNSET; + } +}; + +//! The structure of an arena, except the array of slots. +/** Separated in order to simplify padding. Intrusive list node base class is used by market to form a list of arenas. **/ +// TODO: Analyze arena_base cache lines placement struct arena_base : padded<intrusive_list_node> { - //! Number of workers that have been marked out by the resource manager to service the arena - unsigned my_num_workers_allotted; // heavy use in stealing loop + //! The number of workers that have been marked out by the resource manager to service the arena. + std::atomic<unsigned> my_num_workers_allotted; // heavy use in stealing loop - //! References of the arena - /** Counts workers and master references separately. Bit 0 indicates reference from implicit - master or explicit task_arena; the next bits contain number of workers servicing the arena.*/ - atomic<unsigned> my_references; // heavy use in stealing loop + //! Reference counter for the arena. + /** Worker and external thread references are counted separately: first several bits are for references + from external thread threads or explicit task_arenas (see arena::ref_external_bits below); + the rest counts the number of workers servicing the arena. */ + std::atomic<unsigned> my_references; // heavy use in stealing loop -#if __TBB_TASK_PRIORITY - //! Highest priority of recently spawned or enqueued tasks. - volatile intptr_t my_top_priority; // heavy use in stealing loop + //! The maximal number of currently busy slots. + std::atomic<unsigned> my_limit; // heavy use in stealing loop - //! Maximal currently busy slot. - atomic<unsigned> my_limit; // heavy use in stealing loop - - //! Task pool for the tasks scheduled via task::enqueue() method - /** Such scheduling guarantees eventual execution even if - - new tasks are constantly coming (by extracting scheduled tasks in - relaxed FIFO order); - - the enqueuing thread does not call any of wait_for_all methods. **/ - task_stream my_task_stream[num_priority_levels]; // heavy use in stealing loop -#else /* !__TBB_TASK_PRIORITY */ - //! Task pool for the tasks scheduled via task::enqueue() method + //! Task pool for the tasks scheduled via task::enqueue() method. /** Such scheduling guarantees eventual execution even if - new tasks are constantly coming (by extracting scheduled tasks in relaxed FIFO order); - the enqueuing thread does not call any of wait_for_all methods. **/ - task_stream my_task_stream; // heavy use in stealing loop - - //! Maximal currently busy slot. - atomic<unsigned> my_limit; // heavy use in stealing loop -#endif /* !__TBB_TASK_PRIORITY */ + task_stream<front_accessor> my_fifo_task_stream; // heavy use in stealing loop + + //! Task pool for the tasks scheduled via tbb::resume() function. + task_stream<front_accessor> my_resume_task_stream; // heavy use in stealing loop + +#if __TBB_PREVIEW_CRITICAL_TASKS + //! Task pool for the tasks with critical property set. + /** Critical tasks are scheduled for execution ahead of other sources (including local task pool + and even bypassed tasks) unless the thread already executes a critical task in an outer + dispatch loop **/ + // used on the hot path of the task dispatch loop + task_stream<back_nonnull_accessor> my_critical_task_stream; +#endif - //! Number of workers that are currently requested from the resource manager - int my_num_workers_requested; + //! The total number of workers that are requested from the resource manager. + int my_total_num_workers_requested; - //! Number of slots in the arena - unsigned my_num_slots; + //! The index in the array of per priority lists of arenas this object is in. + /*const*/ unsigned my_priority_level; - //! Number of workers requested by the master thread owning the arena - unsigned my_max_num_workers; + //! The max priority level of arena in permit manager. + std::atomic<bool> my_is_top_priority{false}; - //! Market owning this arena - market* my_market; + //! Current task pool state and estimate of available tasks amount. + atomic_flag my_pool_state; - //! ABA prevention marker - uintptr_t my_aba_epoch; + //! The list of local observers attached to this arena. + observer_list my_observers; -#if !__TBB_FP_CONTEXT - //! FPU control settings of arena's master thread captured at the moment of arena instantiation. - __TBB_cpu_ctl_env_t my_cpu_ctl_env; -#endif +#if __TBB_ARENA_BINDING + //! Pointer to internal observer that allows to bind threads in arena to certain NUMA node. + numa_binding_observer* my_numa_binding_observer{nullptr}; +#endif /*__TBB_ARENA_BINDING*/ -#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION - int my_num_workers_present; -#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */ + // Below are rarely modified members - //! Current task pool state and estimate of available tasks amount. - /** The estimate is either 0 (SNAPSHOT_EMPTY) or infinity (SNAPSHOT_FULL). - Special state is "busy" (any other unsigned value). - Note that the implementation of arena::is_busy_or_empty() requires - my_pool_state to be unsigned. */ - tbb::atomic<uintptr_t> my_pool_state; + threading_control* my_threading_control; -#if __TBB_TASK_GROUP_CONTEXT //! Default task group context. - /** Used by root tasks allocated directly by the master thread (not from inside - a TBB task) without explicit context specification. **/ - task_group_context* my_default_ctx; -#endif /* __TBB_TASK_GROUP_CONTEXT */ + d1::task_group_context* my_default_ctx; -#if __TBB_SCHEDULER_OBSERVER - //! List of local observers attached to this arena. - observer_list my_observers; -#endif /* __TBB_SCHEDULER_OBSERVER */ - -#if __TBB_TASK_PRIORITY - //! Lowest normalized priority of available spawned or enqueued tasks. - intptr_t my_bottom_priority; - - //! Tracks events that may bring tasks in offload areas to the top priority level. - /** Incremented when arena top priority changes or a task group priority - is elevated to the current arena's top level. **/ - uintptr_t my_reload_epoch; - - //! List of offloaded tasks abandoned by workers revoked by the market - task* my_orphaned_tasks; + //! Waiting object for external threads that cannot join the arena. + concurrent_monitor my_exit_monitors; - //! Counter used to track the occurrence of recent orphaning and re-sharing operations. - tbb::atomic<uintptr_t> my_abandonment_epoch; + //! Coroutines (task_dispathers) cache buffer + arena_co_cache my_co_cache; - //! Highest priority level containing enqueued tasks - /** It being greater than 0 means that high priority enqueued tasks had to be - bypassed because all workers were blocked in nested dispatch loops and - were unable to progress at then current priority level. **/ - tbb::atomic<intptr_t> my_skipped_fifo_priority; -#endif /* !__TBB_TASK_PRIORITY */ + // arena needs an extra worker despite the arena limit + atomic_flag my_mandatory_concurrency; + // the number of local mandatory concurrency requests + int my_mandatory_requests; - //! Indicates if there is an oversubscribing worker created to service enqueued tasks. - bool my_mandatory_concurrency; + //! The number of slots in the arena. + unsigned my_num_slots; + //! The number of reserved slots (can be occupied only by external threads). + unsigned my_num_reserved_slots; + //! The number of workers requested by the external thread owning the arena. + unsigned my_max_num_workers; -#if __TBB_TASK_ARENA - //! exit notifications after arena slot is released - concurrent_monitor my_exit_monitors; -#endif + threading_control_client my_tc_client; #if TBB_USE_ASSERT //! Used to trap accesses to the object after its destruction. - uintptr_t my_guard; + std::uintptr_t my_guard; #endif /* TBB_USE_ASSERT */ }; // struct arena_base -class arena -#if (__GNUC__<4 || __GNUC__==4 && __GNUC_MINOR__==0) && !__INTEL_COMPILER - : public padded<arena_base> -#else - : private padded<arena_base> -#endif +class arena: public padded<arena_base> { -private: - friend class generic_scheduler; - template<typename SchedulerTraits> friend class custom_scheduler; - friend class governor; - friend class task_scheduler_observer_v3; - friend class market; - friend class tbb::task; - friend class tbb::task_group_context; - friend class allocate_root_with_context_proxy; - friend class intrusive_list<arena>; - friend class interface7::internal::task_arena_base; // declared in scheduler_common.h - friend class interface7::internal::delegated_task; - friend class interface7::internal::wait_task; - - typedef padded<arena_base> base_type; +public: + using base_type = padded<arena_base>; + + //! Types of work advertised by advertise_new_work() + enum new_work_type { + work_spawned, + wakeup, + work_enqueued + }; //! Constructor - arena ( market&, unsigned max_num_workers ); + arena(threading_control* control, unsigned max_num_workers, unsigned num_reserved_slots, unsigned priority_level); //! Allocate an instance of arena. - static arena& allocate_arena( market&, unsigned max_num_workers ); + static arena& allocate_arena(threading_control* control, unsigned num_slots, unsigned num_reserved_slots, + unsigned priority_level); - static int unsigned num_slots_to_reserve ( unsigned max_num_workers ) { - return max(2u, max_num_workers + 1); - } + static arena& create(threading_control* control, unsigned num_slots, unsigned num_reserved_slots, unsigned arena_priority_level, d1::constraints constraints = d1::constraints{}); - static int allocation_size ( unsigned max_num_workers ) { - return sizeof(base_type) + num_slots_to_reserve(max_num_workers) * (sizeof(mail_outbox) + sizeof(arena_slot)); + static int unsigned num_arena_slots ( unsigned num_slots, unsigned num_reserved_slots ) { + return num_reserved_slots == 0 ? num_slots : max(2u, num_slots); } -#if __TBB_TASK_GROUP_CONTEXT - //! Finds all contexts affected by the state change and propagates the new state to them. - /** The propagation is relayed to the market because tasks created by one - master thread can be passed to and executed by other masters. This means - that context trees can span several arenas at once and thus state change - propagation cannot be generally localized to one arena only. **/ - template <typename T> - bool propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ); -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - //! Get reference to mailbox corresponding to given affinity_id. - mail_outbox& mailbox( affinity_id id ) { - __TBB_ASSERT( 0<id, "affinity id must be positive integer" ); - __TBB_ASSERT( id <= my_num_slots, "affinity id out of bounds" ); - - return ((mail_outbox*)this)[-(int)id]; + static int allocation_size( unsigned num_slots ) { + return sizeof(base_type) + num_slots * (sizeof(mail_outbox) + sizeof(arena_slot) + sizeof(task_dispatcher)); } - //! Completes arena shutdown, destructs and deallocates it. - void free_arena (); + //! Get reference to mailbox corresponding to given slot_id + mail_outbox& mailbox( d1::slot_id slot ) { + __TBB_ASSERT( slot != d1::no_slot, "affinity should be specified" ); - typedef uintptr_t pool_state_t; + return reinterpret_cast<mail_outbox*>(this)[-(int)(slot+1)]; // cast to 'int' is redundant but left for readability + } - //! No tasks to steal since last snapshot was taken - static const pool_state_t SNAPSHOT_EMPTY = 0; + //! Completes arena shutdown, destructs and deallocates it. + void free_arena(); - //! At least one task has been offered for stealing since the last snapshot started - static const pool_state_t SNAPSHOT_FULL = pool_state_t(-1); + //! The number of least significant bits for external references + static const unsigned ref_external_bits = 12; // up to 4095 external and 1M workers - //! No tasks to steal or snapshot is being taken. - static bool is_busy_or_empty( pool_state_t s ) { return s < SNAPSHOT_FULL; } + //! Reference increment values for externals and workers + static const unsigned ref_external = 1; + static const unsigned ref_worker = 1 << ref_external_bits; //! The number of workers active in the arena. - unsigned num_workers_active( ) { - return my_references >> 1; + unsigned num_workers_active() const { + return my_references.load(std::memory_order_acquire) >> ref_external_bits; + } + + //! Check if the recall is requested by the market. + bool is_recall_requested() const { + return num_workers_active() > my_num_workers_allotted.load(std::memory_order_relaxed); } + void request_workers(int mandatory_delta, int workers_delta, bool wakeup_threads = false); + //! If necessary, raise a flag that there is new job in arena. - template<bool Spawned> void advertise_new_work(); + template<arena::new_work_type work_type> void advertise_new_work(); + + //! Attempts to steal a task from a randomly chosen arena slot + d1::task* steal_task(unsigned arena_index, FastRandom& frnd, execution_data_ext& ed, isolation_type isolation); + + //! Get a task from a global starvation resistant queue + template<task_stream_accessor_type accessor> + d1::task* get_stream_task(task_stream<accessor>& stream, unsigned& hint); + +#if __TBB_PREVIEW_CRITICAL_TASKS + //! Tries to find a critical task in global critical task stream + d1::task* get_critical_task(unsigned& hint, isolation_type isolation); +#endif //! Check if there is job anywhere in arena. - /** Return true if no job or if arena is being cleaned up. */ - bool is_out_of_work(); + void out_of_work(); //! enqueue a task into starvation-resistance queue - void enqueue_task( task&, intptr_t, FastRandom & ); + void enqueue_task(d1::task&, d1::task_group_context&, thread_data&); //! Registers the worker with the arena and enters TBB scheduler dispatch loop - void process( generic_scheduler& ); + void process(thread_data&); - //! Notification that worker or master leaves its arena - template<bool is_master> - inline void on_thread_leaving ( ); + //! Notification that the thread leaves its arena -#if __TBB_STATISTICS - //! Outputs internal statistics accumulated by the arena - void dump_arena_statistics (); -#endif /* __TBB_STATISTICS */ + void on_thread_leaving(unsigned ref_param); -#if __TBB_TASK_PRIORITY - //! Check if recent priority changes may bring some tasks to the current priority level soon - /** /param tasks_present indicates presence of tasks at any priority level. **/ - inline bool may_have_tasks ( generic_scheduler*, bool& tasks_present, bool& dequeuing_possible ); + //! Check for the presence of enqueued tasks + bool has_enqueued_tasks(); - //! Puts offloaded tasks into global list of orphaned tasks - void orphan_offloaded_tasks ( generic_scheduler& s ); -#endif /* __TBB_TASK_PRIORITY */ + //! Check for the presence of any tasks + bool has_tasks(); -#if __TBB_COUNT_TASK_NODES - //! Returns the number of task objects "living" in worker threads - intptr_t workers_task_node_count(); -#endif + bool is_empty() { return my_pool_state.test() == /* EMPTY */ false; } + + thread_control_monitor& get_waiting_threads_monitor(); + + static const std::size_t out_of_arena = ~size_t(0); + //! Tries to occupy a slot in the arena. On success, returns the slot index; if no slot is available, returns out_of_arena. + template <bool as_worker> + std::size_t occupy_free_slot(thread_data&); + //! Tries to occupy a slot in the specified range. + std::size_t occupy_free_slot_in_range(thread_data& tls, std::size_t lower, std::size_t upper); + + std::uintptr_t calculate_stealing_threshold(); + + unsigned priority_level() { return my_priority_level; } + + bool has_request() { return my_total_num_workers_requested; } + + unsigned references() const { return my_references.load(std::memory_order_acquire); } + + bool is_arena_workerless() const { return my_max_num_workers == 0; } + + void set_top_priority(bool); + + bool is_top_priority() const; + + bool is_joinable() const { + return num_workers_active() < my_num_workers_allotted.load(std::memory_order_relaxed); + } + + bool try_join(); + + void set_allotment(unsigned allotment); + + int update_concurrency(unsigned concurrency); + + std::pair</*min workers = */ int, /*max workers = */ int> update_request(int mandatory_delta, int workers_delta); /** Must be the last data field */ arena_slot my_slots[1]; }; // class arena +template <arena::new_work_type work_type> +void arena::advertise_new_work() { + bool is_mandatory_needed = false; + bool are_workers_needed = false; -template<bool is_master> -inline void arena::on_thread_leaving ( ) { - // - // Implementation of arena destruction synchronization logic contained various - // bugs/flaws at the different stages of its evolution, so below is a detailed - // description of the issues taken into consideration in the framework of the - // current design. - // - // In case of using fire-and-forget tasks (scheduled via task::enqueue()) - // master thread is allowed to leave its arena before all its work is executed, - // and market may temporarily revoke all workers from this arena. Since revoked - // workers never attempt to reset arena state to EMPTY and cancel its request - // to RML for threads, the arena object is destroyed only when both the last - // thread is leaving it and arena's state is EMPTY (that is its master thread - // left and it does not contain any work). - // - // A worker that checks for work presence and transitions arena to the EMPTY - // state (in snapshot taking procedure arena::is_out_of_work()) updates - // arena::my_pool_state first and only then arena::my_num_workers_requested. - // So the check for work absence must be done against the latter field. - // - // In a time window between decrementing the active threads count and checking - // if there is an outstanding request for workers. New worker thread may arrive, - // finish remaining work, set arena state to empty, and leave decrementing its - // refcount and destroying. Then the current thread will destroy the arena - // the second time. To preclude it a local copy of the outstanding request - // value can be stored before decrementing active threads count. - // - // But this technique may cause two other problem. When the stored request is - // zero, it is possible that arena still has threads and they can generate new - // tasks and thus re-establish non-zero requests. Then all the threads can be - // revoked (as described above) leaving this thread the last one, and causing - // it to destroy non-empty arena. - // - // The other problem takes place when the stored request is non-zero. Another - // thread may complete the work, set arena state to empty, and leave without - // arena destruction before this thread decrements the refcount. This thread - // cannot destroy the arena either. Thus the arena may be "orphaned". - // - // In both cases we cannot dereference arena pointer after the refcount is - // decremented, as our arena may already be destroyed. - // - // If this is the master thread, market can be concurrently destroyed. - // In case of workers market's liveness is ensured by the RML connection - // rundown protocol, according to which the client (i.e. the market) lives - // until RML server notifies it about connection termination, and this - // notification is fired only after all workers return into RML. - // - // Thus if we decremented refcount to zero we ask the market to check arena - // state (including the fact if it is alive) under the lock. - // - uintptr_t aba_epoch = my_aba_epoch; - market* m = my_market; - __TBB_ASSERT(my_references > int(!is_master), "broken arena reference counter"); - if ( (my_references -= is_master? 1:2 ) == 0 ) // worker's counter starts from bit 1 - market::try_destroy_arena( m, this, aba_epoch, is_master ); -} + if (work_type != work_spawned) { + // Local memory fence here and below is required to avoid missed wakeups; see the comment below. + // Starvation resistant tasks require concurrency, so missed wakeups are unacceptable. + atomic_fence_seq_cst(); + } -template<bool Spawned> void arena::advertise_new_work() { - if( !Spawned ) { // i.e. the work was enqueued - if( my_max_num_workers==0 ) { - my_max_num_workers = 1; - __TBB_ASSERT(!my_mandatory_concurrency, ""); - my_mandatory_concurrency = true; - __TBB_ASSERT(!num_workers_active(), ""); - my_pool_state = SNAPSHOT_FULL; - my_market->adjust_demand( *this, 1 ); - return; - } - // Local memory fence is required to avoid missed wakeups; see the comment below. - // Starvation resistant tasks require mandatory concurrency, so missed wakeups are unacceptable. - atomic_fence(); + if (work_type == work_enqueued && my_num_slots > my_num_reserved_slots) { + is_mandatory_needed = my_mandatory_concurrency.test_and_set(); } + // Double-check idiom that, in case of spawning, is deliberately sloppy about memory fences. - // Technically, to avoid missed wakeups, there should be a full memory fence between the point we - // released the task pool (i.e. spawned task) and read the arena's state. However, adding such a - // fence might hurt overall performance more than it helps, because the fence would be executed - // on every task pool release, even when stealing does not occur. Since TBB allows parallelism, + // Technically, to avoid missed wakeups, there should be a full memory fence between the point we + // released the task pool (i.e. spawned task) and read the arena's state. However, adding such a + // fence might hurt overall performance more than it helps, because the fence would be executed + // on every task pool release, even when stealing does not occur. Since TBB allows parallelism, // but never promises parallelism, the missed wakeup is not a correctness problem. - pool_state_t snapshot = my_pool_state; - if( is_busy_or_empty(snapshot) ) { - // Attempt to mark as full. The compare_and_swap below is a little unusual because the - // result is compared to a value that can be different than the comparand argument. - if( my_pool_state.compare_and_swap( SNAPSHOT_FULL, snapshot )==SNAPSHOT_EMPTY ) { - if( snapshot!=SNAPSHOT_EMPTY ) { - // This thread read "busy" into snapshot, and then another thread transitioned - // my_pool_state to "empty" in the meantime, which caused the compare_and_swap above - // to fail. Attempt to transition my_pool_state from "empty" to "full". - if( my_pool_state.compare_and_swap( SNAPSHOT_FULL, SNAPSHOT_EMPTY )!=SNAPSHOT_EMPTY ) { - // Some other thread transitioned my_pool_state from "empty", and hence became - // responsible for waking up workers. - return; - } - } - // This thread transitioned pool from empty to full state, and thus is responsible for - // telling RML that there is work to do. - if( Spawned ) { - if( my_mandatory_concurrency ) { - __TBB_ASSERT(my_max_num_workers==1, ""); - __TBB_ASSERT(!governor::local_scheduler()->is_worker(), ""); - // There was deliberate oversubscription on 1 core for sake of starvation-resistant tasks. - // Now a single active thread (must be the master) supposedly starts a new parallel region - // with relaxed sequential semantics, and oversubscription should be avoided. - // Demand for workers has been decreased to 0 during SNAPSHOT_EMPTY, so just keep it. - my_max_num_workers = 0; - my_mandatory_concurrency = false; - return; - } - } - my_market->adjust_demand( *this, my_max_num_workers ); + are_workers_needed = my_pool_state.test_and_set(); + + if (is_mandatory_needed || are_workers_needed) { + int mandatory_delta = is_mandatory_needed ? 1 : 0; + int workers_delta = are_workers_needed ? my_max_num_workers : 0; + + if (is_mandatory_needed && is_arena_workerless()) { + // Set workers_delta to 1 to keep arena invariants consistent + workers_delta = 1; } + + request_workers(mandatory_delta, workers_delta, /* wakeup_threads = */ true); + } +} + +inline d1::task* arena::steal_task(unsigned arena_index, FastRandom& frnd, execution_data_ext& ed, isolation_type isolation) { + auto slot_num_limit = my_limit.load(std::memory_order_relaxed); + if (slot_num_limit == 1) { + // No slots to steal from + return nullptr; + } + // Try to steal a task from a random victim. + std::size_t k = frnd.get() % (slot_num_limit - 1); + // The following condition excludes the external thread that might have + // already taken our previous place in the arena from the list . + // of potential victims. But since such a situation can take + // place only in case of significant oversubscription, keeping + // the checks simple seems to be preferable to complicating the code. + if (k >= arena_index) { + ++k; // Adjusts random distribution to exclude self + } + arena_slot* victim = &my_slots[k]; + d1::task **pool = victim->task_pool.load(std::memory_order_relaxed); + d1::task *t = nullptr; + if (pool == EmptyTaskPool || !(t = victim->steal_task(*this, isolation, k))) { + return nullptr; + } + if (task_accessor::is_proxy_task(*t)) { + task_proxy &tp = *(task_proxy*)t; + d1::slot_id slot = tp.slot; + t = tp.extract_task<task_proxy::pool_bit>(); + if (!t) { + // Proxy was empty, so it's our responsibility to free it + tp.allocator.delete_object(&tp, ed); + return nullptr; + } + // Note affinity is called for any stolen task (proxy or general) + ed.affinity_slot = slot; + } else { + // Note affinity is called for any stolen task (proxy or general) + ed.affinity_slot = d1::any_slot; + } + // Update task owner thread id to identify stealing + ed.original_slot = k; + return t; +} + +template<task_stream_accessor_type accessor> +inline d1::task* arena::get_stream_task(task_stream<accessor>& stream, unsigned& hint) { + if (stream.empty()) + return nullptr; + return stream.pop(subsequent_lane_selector(hint)); +} + +#if __TBB_PREVIEW_CRITICAL_TASKS +// Retrieves critical task respecting isolation level, if provided. The rule is: +// 1) If no outer critical task and no isolation => take any critical task +// 2) If working on an outer critical task and no isolation => cannot take any critical task +// 3) If no outer critical task but isolated => respect isolation +// 4) If working on an outer critical task and isolated => respect isolation +// Hint is used to keep some LIFO-ness, start search with the lane that was used during push operation. +inline d1::task* arena::get_critical_task(unsigned& hint, isolation_type isolation) { + if (my_critical_task_stream.empty()) + return nullptr; + + if ( isolation != no_isolation ) { + return my_critical_task_stream.pop_specific( hint, isolation ); + } else { + return my_critical_task_stream.pop(preceding_lane_selector(hint)); } } +#endif // __TBB_PREVIEW_CRITICAL_TASKS -} // namespace internal +} // namespace r1 +} // namespace detail } // namespace tbb #endif /* _TBB_arena_H */ diff --git a/src/tbb/src/tbb/arena_slot.cpp b/src/tbb/src/tbb/arena_slot.cpp new file mode 100644 index 000000000..bce5701db --- /dev/null +++ b/src/tbb/src/tbb/arena_slot.cpp @@ -0,0 +1,218 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "arena_slot.h" +#include "arena.h" +#include "thread_data.h" + +namespace tbb { +namespace detail { +namespace r1 { + +//------------------------------------------------------------------------ +// Arena Slot +//------------------------------------------------------------------------ +d1::task* arena_slot::get_task_impl(size_t T, execution_data_ext& ed, bool& tasks_omitted, isolation_type isolation) { + __TBB_ASSERT(tail.load(std::memory_order_relaxed) <= T || is_local_task_pool_quiescent(), + "Is it safe to get a task at position T?"); + + d1::task* result = task_pool_ptr[T]; + __TBB_ASSERT(!is_poisoned( result ), "The poisoned task is going to be processed"); + + if (!result) { + return nullptr; + } + bool omit = isolation != no_isolation && isolation != task_accessor::isolation(*result); + if (!omit && !task_accessor::is_proxy_task(*result)) { + return result; + } else if (omit) { + tasks_omitted = true; + return nullptr; + } + + task_proxy& tp = static_cast<task_proxy&>(*result); + d1::slot_id aff_id = tp.slot; + if ( d1::task *t = tp.extract_task<task_proxy::pool_bit>() ) { + ed.affinity_slot = aff_id; + return t; + } + // Proxy was empty, so it's our responsibility to free it + tp.allocator.delete_object(&tp, ed); + + if ( tasks_omitted ) { + task_pool_ptr[T] = nullptr; + } + return nullptr; +} + +d1::task* arena_slot::get_task(execution_data_ext& ed, isolation_type isolation) { + __TBB_ASSERT(is_task_pool_published(), nullptr); + // The current task position in the task pool. + std::size_t T0 = tail.load(std::memory_order_relaxed); + // The bounds of available tasks in the task pool. H0 is only used when the head bound is reached. + std::size_t H0 = (std::size_t)-1, T = T0; + d1::task* result = nullptr; + bool task_pool_empty = false; + bool tasks_omitted = false; + do { + __TBB_ASSERT( !result, nullptr ); + // The full fence is required to sync the store of `tail` with the load of `head` (write-read barrier) + T = --tail; + // The acquire load of head is required to guarantee consistency of our task pool + // when a thief rolls back the head. + if ( (std::intptr_t)( head.load(std::memory_order_acquire) ) > (std::intptr_t)T ) { + acquire_task_pool(); + H0 = head.load(std::memory_order_relaxed); + if ( (std::intptr_t)H0 > (std::intptr_t)T ) { + // The thief has not backed off - nothing to grab. + __TBB_ASSERT( H0 == head.load(std::memory_order_relaxed) + && T == tail.load(std::memory_order_relaxed) + && H0 == T + 1, "victim/thief arbitration algorithm failure" ); + reset_task_pool_and_leave(); + // No tasks in the task pool. + task_pool_empty = true; + break; + } else if ( H0 == T ) { + // There is only one task in the task pool. + reset_task_pool_and_leave(); + task_pool_empty = true; + } else { + // Release task pool if there are still some tasks. + // After the release, the tail will be less than T, thus a thief + // will not attempt to get a task at position T. + release_task_pool(); + } + } + result = get_task_impl( T, ed, tasks_omitted, isolation ); + if ( result ) { + poison_pointer( task_pool_ptr[T] ); + break; + } else if ( !tasks_omitted ) { + poison_pointer( task_pool_ptr[T] ); + __TBB_ASSERT( T0 == T+1, nullptr ); + T0 = T; + } + } while ( !result && !task_pool_empty ); + + if ( tasks_omitted ) { + if ( task_pool_empty ) { + // All tasks have been checked. The task pool should be in reset state. + // We just restore the bounds for the available tasks. + // TODO: Does it have sense to move them to the beginning of the task pool? + __TBB_ASSERT( is_quiescent_local_task_pool_reset(), nullptr ); + if ( result ) { + // If we have a task, it should be at H0 position. + __TBB_ASSERT( H0 == T, nullptr ); + ++H0; + } + __TBB_ASSERT( H0 <= T0, nullptr ); + if ( H0 < T0 ) { + // Restore the task pool if there are some tasks. + head.store(H0, std::memory_order_relaxed); + tail.store(T0, std::memory_order_relaxed); + // The release fence is used in publish_task_pool. + publish_task_pool(); + // Synchronize with snapshot as we published some tasks. + ed.task_disp->m_thread_data->my_arena->advertise_new_work<arena::wakeup>(); + } + } else { + // A task has been obtained. We need to make a hole in position T. + __TBB_ASSERT( is_task_pool_published(), nullptr ); + __TBB_ASSERT( result, nullptr ); + task_pool_ptr[T] = nullptr; + tail.store(T0, std::memory_order_release); + // Synchronize with snapshot as we published some tasks. + // TODO: consider some approach not to call wakeup for each time. E.g. check if the tail reached the head. + ed.task_disp->m_thread_data->my_arena->advertise_new_work<arena::wakeup>(); + } + } + + __TBB_ASSERT( (std::intptr_t)tail.load(std::memory_order_relaxed) >= 0, nullptr ); + __TBB_ASSERT( result || tasks_omitted || is_quiescent_local_task_pool_reset(), nullptr ); + return result; +} + +d1::task* arena_slot::steal_task(arena& a, isolation_type isolation, std::size_t slot_index) { + d1::task** victim_pool = lock_task_pool(); + if (!victim_pool) { + return nullptr; + } + d1::task* result = nullptr; + std::size_t H = head.load(std::memory_order_relaxed); // mirror + std::size_t H0 = H; + bool tasks_omitted = false; + do { + // The full fence is required to sync the store of `head` with the load of `tail` (write-read barrier) + H = ++head; + // The acquire load of tail is required to guarantee consistency of victim_pool + // because the owner synchronizes task spawning via tail. + if ((std::intptr_t)H > (std::intptr_t)(tail.load(std::memory_order_acquire))) { + // Stealing attempt failed, deque contents has not been changed by us + head.store( /*dead: H = */ H0, std::memory_order_relaxed ); + __TBB_ASSERT( !result, nullptr ); + goto unlock; + } + result = victim_pool[H-1]; + __TBB_ASSERT( !is_poisoned( result ), nullptr ); + + if (result) { + if (isolation == no_isolation || isolation == task_accessor::isolation(*result)) { + if (!task_accessor::is_proxy_task(*result)) { + break; + } + task_proxy& tp = *static_cast<task_proxy*>(result); + // If mailed task is likely to be grabbed by its destination thread, skip it. + if (!task_proxy::is_shared(tp.task_and_tag) || !tp.outbox->recipient_is_idle() || a.mailbox(slot_index).recipient_is_idle()) { + break; + } + } + // The task cannot be executed either due to isolation or proxy constraints. + result = nullptr; + tasks_omitted = true; + } else if (!tasks_omitted) { + // Cleanup the task pool from holes until a task is skipped. + __TBB_ASSERT( H0 == H-1, nullptr ); + poison_pointer( victim_pool[H0] ); + H0 = H; + } + } while (!result); + __TBB_ASSERT( result, nullptr ); + + // emit "task was consumed" signal + poison_pointer( victim_pool[H-1] ); + if (tasks_omitted) { + // Some proxies in the task pool have been omitted. Set the stolen task to nullptr. + victim_pool[H-1] = nullptr; + // The release store synchronizes the victim_pool update(the store of nullptr). + head.store( /*dead: H = */ H0, std::memory_order_release ); + } +unlock: + unlock_task_pool(victim_pool); + +#if __TBB_PREFETCHING + __TBB_cl_evict(&victim_slot.head); + __TBB_cl_evict(&victim_slot.tail); +#endif + if (tasks_omitted) { + // Synchronize with snapshot as the head and tail can be bumped which can falsely trigger EMPTY state + a.advertise_new_work<arena::wakeup>(); + } + return result; +} + +} // namespace r1 +} // namespace detail +} // namespace tbb diff --git a/src/tbb/src/tbb/arena_slot.h b/src/tbb/src/tbb/arena_slot.h new file mode 100644 index 000000000..c526e4743 --- /dev/null +++ b/src/tbb/src/tbb/arena_slot.h @@ -0,0 +1,414 @@ +/* + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef _TBB_arena_slot_H +#define _TBB_arena_slot_H + +#include "oneapi/tbb/detail/_config.h" +#include "oneapi/tbb/detail/_utils.h" +#include "oneapi/tbb/detail/_template_helpers.h" +#include "oneapi/tbb/detail/_task.h" + +#include "oneapi/tbb/cache_aligned_allocator.h" + +#include "misc.h" +#include "mailbox.h" +#include "scheduler_common.h" + +#include <atomic> + +namespace tbb { +namespace detail { +namespace r1 { + +class arena; +class task_group_context; + +//-------------------------------------------------------------------------------------------------------- +// Arena Slot +//-------------------------------------------------------------------------------------------------------- + +static d1::task** const EmptyTaskPool = nullptr; +static d1::task** const LockedTaskPool = reinterpret_cast<d1::task**>(~std::intptr_t(0)); + +struct alignas(max_nfs_size) arena_slot_shared_state { + //! Scheduler of the thread attached to the slot + /** Marks the slot as busy, and is used to iterate through the schedulers belonging to this arena **/ + std::atomic<bool> my_is_occupied; + + // Synchronization of access to Task pool + /** Also is used to specify if the slot is empty or locked: + 0 - empty + -1 - locked **/ + std::atomic<d1::task**> task_pool; + + //! Index of the first ready task in the deque. + /** Modified by thieves, and by the owner during compaction/reallocation **/ + std::atomic<std::size_t> head; +}; + +struct alignas(max_nfs_size) arena_slot_private_state { + //! Hint provided for operations with the container of starvation-resistant tasks. + /** Modified by the owner thread (during these operations). **/ + unsigned hint_for_fifo_stream; + +#if __TBB_PREVIEW_CRITICAL_TASKS + //! Similar to 'hint_for_fifo_stream' but for critical tasks. + unsigned hint_for_critical_stream; +#endif + + //! Similar to 'hint_for_fifo_stream' but for the resume tasks. + unsigned hint_for_resume_stream; + + //! Index of the element following the last ready task in the deque. + /** Modified by the owner thread. **/ + std::atomic<std::size_t> tail; + + //! Capacity of the primary task pool (number of elements - pointers to task). + std::size_t my_task_pool_size; + + //! Task pool of the scheduler that owns this slot + // TODO: previously was task**__TBB_atomic, but seems like not accessed on other thread + d1::task** task_pool_ptr; +}; + +class arena_slot : private arena_slot_shared_state, private arena_slot_private_state { + friend class arena; + friend class outermost_worker_waiter; + friend class task_dispatcher; + friend class thread_data; + friend class nested_arena_context; + + //! The original task dispather associated with this slot + task_dispatcher* my_default_task_dispatcher; + +#if TBB_USE_ASSERT + void fill_with_canary_pattern ( std::size_t first, std::size_t last ) { + for ( std::size_t i = first; i < last; ++i ) + poison_pointer(task_pool_ptr[i]); + } +#else + void fill_with_canary_pattern ( size_t, std::size_t ) {} +#endif /* TBB_USE_ASSERT */ + + static constexpr std::size_t min_task_pool_size = 64; + + void allocate_task_pool( std::size_t n ) { + std::size_t byte_size = ((n * sizeof(d1::task*) + max_nfs_size - 1) / max_nfs_size) * max_nfs_size; + my_task_pool_size = byte_size / sizeof(d1::task*); + task_pool_ptr = (d1::task**)cache_aligned_allocate(byte_size); + // No need to clear the fresh deque since valid items are designated by the head and tail members. + // But fill it with a canary pattern in the high vigilance debug mode. + fill_with_canary_pattern( 0, my_task_pool_size ); + } + +public: + //! Deallocate task pool that was allocated by means of allocate_task_pool. + void free_task_pool( ) { + // TODO: understand the assertion and modify + // __TBB_ASSERT( !task_pool /* TODO: == EmptyTaskPool */, nullptr); + if( task_pool_ptr ) { + __TBB_ASSERT( my_task_pool_size, nullptr); + cache_aligned_deallocate( task_pool_ptr ); + task_pool_ptr = nullptr; + my_task_pool_size = 0; + } + } + + //! Get a task from the local pool. + /** Called only by the pool owner. + Returns the pointer to the task or nullptr if a suitable task is not found. + Resets the pool if it is empty. **/ + d1::task* get_task(execution_data_ext&, isolation_type); + + //! Steal task from slot's ready pool + d1::task* steal_task(arena&, isolation_type, std::size_t); + + //! Some thread is now the owner of this slot + void occupy() { + __TBB_ASSERT(!my_is_occupied.load(std::memory_order_relaxed), nullptr); + my_is_occupied.store(true, std::memory_order_release); + } + + //! Try to occupy the slot + bool try_occupy() { + return !is_occupied() && my_is_occupied.exchange(true) == false; + } + + //! Some thread is now the owner of this slot + void release() { + __TBB_ASSERT(my_is_occupied.load(std::memory_order_relaxed), nullptr); + my_is_occupied.store(false, std::memory_order_release); + } + + //! Spawn newly created tasks + void spawn(d1::task& t) { + std::size_t T = prepare_task_pool(1); + __TBB_ASSERT(is_poisoned(task_pool_ptr[T]), nullptr); + task_pool_ptr[T] = &t; + commit_spawned_tasks(T + 1); + if (!is_task_pool_published()) { + publish_task_pool(); + } + } + + bool is_task_pool_published() const { + return task_pool.load(std::memory_order_relaxed) != EmptyTaskPool; + } + + bool is_empty() const { + return task_pool.load(std::memory_order_relaxed) == EmptyTaskPool || + head.load(std::memory_order_relaxed) >= tail.load(std::memory_order_relaxed); + } + + bool is_occupied() const { + return my_is_occupied.load(std::memory_order_relaxed); + } + + task_dispatcher& default_task_dispatcher() { + __TBB_ASSERT(my_default_task_dispatcher != nullptr, nullptr); + return *my_default_task_dispatcher; + } + + void init_task_streams(unsigned h) { + hint_for_fifo_stream = h; +#if __TBB_RESUMABLE_TASKS + hint_for_resume_stream = h; +#endif +#if __TBB_PREVIEW_CRITICAL_TASKS + hint_for_critical_stream = h; +#endif + } + +#if __TBB_PREVIEW_CRITICAL_TASKS + unsigned& critical_hint() { + return hint_for_critical_stream; + } +#endif +private: + //! Get a task from the local pool at specified location T. + /** Returns the pointer to the task or nullptr if the task cannot be executed, + e.g. proxy has been deallocated or isolation constraint is not met. + tasks_omitted tells if some tasks have been omitted. + Called only by the pool owner. The caller should guarantee that the + position T is not available for a thief. **/ + d1::task* get_task_impl(size_t T, execution_data_ext& ed, bool& tasks_omitted, isolation_type isolation); + + //! Makes sure that the task pool can accommodate at least n more elements + /** If necessary relocates existing task pointers or grows the ready task deque. + * Returns (possible updated) tail index (not accounting for n). **/ + std::size_t prepare_task_pool(std::size_t num_tasks) { + std::size_t T = tail.load(std::memory_order_relaxed); // mirror + if ( T + num_tasks <= my_task_pool_size ) { + return T; + } + + std::size_t new_size = num_tasks; + if ( !my_task_pool_size ) { + __TBB_ASSERT( !is_task_pool_published() && is_quiescent_local_task_pool_reset(), nullptr); + __TBB_ASSERT( !task_pool_ptr, nullptr); + if ( num_tasks < min_task_pool_size ) new_size = min_task_pool_size; + allocate_task_pool( new_size ); + return 0; + } + acquire_task_pool(); + std::size_t H = head.load(std::memory_order_relaxed); // mirror + d1::task** new_task_pool = task_pool_ptr; + __TBB_ASSERT( my_task_pool_size >= min_task_pool_size, nullptr); + // Count not skipped tasks. Consider using std::count_if. + for ( std::size_t i = H; i < T; ++i ) + if ( new_task_pool[i] ) ++new_size; + // If the free space at the beginning of the task pool is too short, we + // are likely facing a pathological single-producer-multiple-consumers + // scenario, and thus it's better to expand the task pool + bool allocate = new_size > my_task_pool_size - min_task_pool_size/4; + if ( allocate ) { + // Grow task pool. As this operation is rare, and its cost is asymptotically + // amortizable, we can tolerate new task pool allocation done under the lock. + if ( new_size < 2 * my_task_pool_size ) + new_size = 2 * my_task_pool_size; + allocate_task_pool( new_size ); // updates my_task_pool_size + } + // Filter out skipped tasks. Consider using std::copy_if. + std::size_t T1 = 0; + for ( std::size_t i = H; i < T; ++i ) { + if ( new_task_pool[i] ) { + task_pool_ptr[T1++] = new_task_pool[i]; + } + } + // Deallocate the previous task pool if a new one has been allocated. + if ( allocate ) + cache_aligned_deallocate( new_task_pool ); + else + fill_with_canary_pattern( T1, tail ); + // Publish the new state. + commit_relocated_tasks( T1 ); + // assert_task_pool_valid(); + return T1; + } + + //! Makes newly spawned tasks visible to thieves + void commit_spawned_tasks(std::size_t new_tail) { + __TBB_ASSERT (new_tail <= my_task_pool_size, "task deque end was overwritten"); + // emit "task was released" signal + // Release fence is necessary to make sure that previously stored task pointers + // are visible to thieves. + tail.store(new_tail, std::memory_order_release); + } + + //! Used by workers to enter the task pool + /** Does not lock the task pool in case if arena slot has been successfully grabbed. **/ + void publish_task_pool() { + __TBB_ASSERT ( task_pool == EmptyTaskPool, "someone else grabbed my arena slot?" ); + __TBB_ASSERT ( head.load(std::memory_order_relaxed) < tail.load(std::memory_order_relaxed), + "entering arena without tasks to share" ); + // Release signal on behalf of previously spawned tasks (when this thread was not in arena yet) + task_pool.store(task_pool_ptr, std::memory_order_release ); + } + + //! Locks the local task pool + /** Garbles task_pool for the duration of the lock. Requires correctly set task_pool_ptr. + ATTENTION: This method is mostly the same as generic_scheduler::lock_task_pool(), with + a little different logic of slot state checks (slot is either locked or points + to our task pool). Thus if either of them is changed, consider changing the counterpart as well. **/ + void acquire_task_pool() { + if (!is_task_pool_published()) { + return; // we are not in arena - nothing to lock + } + bool sync_prepare_done = false; + for( atomic_backoff b;;b.pause() ) { +#if TBB_USE_ASSERT + // Local copy of the arena slot task pool pointer is necessary for the next + // assertion to work correctly to exclude asynchronous state transition effect. + d1::task** tp = task_pool.load(std::memory_order_relaxed); + __TBB_ASSERT( tp == LockedTaskPool || tp == task_pool_ptr, "slot ownership corrupt?" ); +#endif + d1::task** expected = task_pool_ptr; + if( task_pool.load(std::memory_order_relaxed) != LockedTaskPool && + task_pool.compare_exchange_strong(expected, LockedTaskPool ) ) { + // We acquired our own slot + break; + } else if( !sync_prepare_done ) { + // Start waiting + sync_prepare_done = true; + } + // Someone else acquired a lock, so pause and do exponential backoff. + } + __TBB_ASSERT( task_pool.load(std::memory_order_relaxed) == LockedTaskPool, "not really acquired task pool" ); + } + + //! Unlocks the local task pool + /** Restores task_pool munged by acquire_task_pool. Requires + correctly set task_pool_ptr. **/ + void release_task_pool() { + if ( !(task_pool.load(std::memory_order_relaxed) != EmptyTaskPool) ) + return; // we are not in arena - nothing to unlock + __TBB_ASSERT( task_pool.load(std::memory_order_relaxed) == LockedTaskPool, "arena slot is not locked" ); + task_pool.store( task_pool_ptr, std::memory_order_release ); + } + + //! Locks victim's task pool, and returns pointer to it. The pointer can be nullptr. + /** Garbles victim_arena_slot->task_pool for the duration of the lock. **/ + d1::task** lock_task_pool() { + d1::task** victim_task_pool; + for ( atomic_backoff backoff;; /*backoff pause embedded in the loop*/) { + victim_task_pool = task_pool.load(std::memory_order_relaxed); + // Microbenchmarks demonstrated that aborting stealing attempt when the + // victim's task pool is locked degrade performance. + // NOTE: Do not use comparison of head and tail indices to check for + // the presence of work in the victim's task pool, as they may give + // incorrect indication because of task pool relocations and resizes. + if (victim_task_pool == EmptyTaskPool) { + break; + } + d1::task** expected = victim_task_pool; + if (victim_task_pool != LockedTaskPool && task_pool.compare_exchange_strong(expected, LockedTaskPool) ) { + // We've locked victim's task pool + break; + } + // Someone else acquired a lock, so pause and do exponential backoff. + backoff.pause(); + } + __TBB_ASSERT(victim_task_pool == EmptyTaskPool || + (task_pool.load(std::memory_order_relaxed) == LockedTaskPool && + victim_task_pool != LockedTaskPool), "not really locked victim's task pool?"); + return victim_task_pool; + } + + //! Unlocks victim's task pool + /** Restores victim_arena_slot->task_pool munged by lock_task_pool. **/ + void unlock_task_pool(d1::task** victim_task_pool) { + __TBB_ASSERT(task_pool.load(std::memory_order_relaxed) == LockedTaskPool, "victim arena slot is not locked"); + __TBB_ASSERT(victim_task_pool != LockedTaskPool, nullptr); + task_pool.store(victim_task_pool, std::memory_order_release); + } + +#if TBB_USE_ASSERT + bool is_local_task_pool_quiescent() const { + d1::task** tp = task_pool.load(std::memory_order_relaxed); + return tp == EmptyTaskPool || tp == LockedTaskPool; + } + + bool is_quiescent_local_task_pool_empty() const { + __TBB_ASSERT(is_local_task_pool_quiescent(), "Task pool is not quiescent"); + return head.load(std::memory_order_relaxed) == tail.load(std::memory_order_relaxed); + } + + bool is_quiescent_local_task_pool_reset() const { + __TBB_ASSERT(is_local_task_pool_quiescent(), "Task pool is not quiescent"); + return head.load(std::memory_order_relaxed) == 0 && tail.load(std::memory_order_relaxed) == 0; + } +#endif // TBB_USE_ASSERT + + //! Leave the task pool + /** Leaving task pool automatically releases the task pool if it is locked. **/ + void leave_task_pool() { + __TBB_ASSERT(is_task_pool_published(), "Not in arena"); + // Do not reset my_arena_index. It will be used to (attempt to) re-acquire the slot next time + __TBB_ASSERT(task_pool.load(std::memory_order_relaxed) == LockedTaskPool, "Task pool must be locked when leaving arena"); + __TBB_ASSERT(is_quiescent_local_task_pool_empty(), "Cannot leave arena when the task pool is not empty"); + // No release fence is necessary here as this assignment precludes external + // accesses to the local task pool when becomes visible. Thus it is harmless + // if it gets hoisted above preceding local bookkeeping manipulations. + task_pool.store(EmptyTaskPool, std::memory_order_relaxed); + } + + //! Resets head and tail indices to 0, and leaves task pool + /** The task pool must be locked by the owner (via acquire_task_pool).**/ + void reset_task_pool_and_leave() { + __TBB_ASSERT(task_pool.load(std::memory_order_relaxed) == LockedTaskPool, "Task pool must be locked when resetting task pool"); + tail.store(0, std::memory_order_relaxed); + head.store(0, std::memory_order_relaxed); + leave_task_pool(); + } + + //! Makes relocated tasks visible to thieves and releases the local task pool. + /** Obviously, the task pool must be locked when calling this method. **/ + void commit_relocated_tasks(std::size_t new_tail) { + __TBB_ASSERT(is_local_task_pool_quiescent(), "Task pool must be locked when calling commit_relocated_tasks()"); + head.store(0, std::memory_order_relaxed); + // Tail is updated last to minimize probability of a thread making arena + // snapshot being misguided into thinking that this task pool is empty. + tail.store(new_tail, std::memory_order_release); + release_task_pool(); + } +}; + +} // namespace r1 +} // namespace detail +} // namespace tbb + +#endif // __TBB_arena_slot_H diff --git a/src/tbb/src/tbb/assert_impl.h b/src/tbb/src/tbb/assert_impl.h new file mode 100644 index 000000000..5628658b7 --- /dev/null +++ b/src/tbb/src/tbb/assert_impl.h @@ -0,0 +1,97 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_assert_impl_H +#define __TBB_assert_impl_H + +#include "oneapi/tbb/detail/_config.h" +#include "oneapi/tbb/detail/_utils.h" + +#include <cstdio> +#include <cstdlib> +#include <cstring> +#include <cstdarg> +#if _MSC_VER && _DEBUG +#include <crtdbg.h> +#endif + +#include <mutex> + +#if __TBBMALLOC_BUILD +namespace rml { namespace internal { +#else +namespace tbb { +namespace detail { +namespace r1 { +#endif +// TODO: consider extension for formatted error description string +static void assertion_failure_impl(const char* location, int line, const char* expression, const char* comment) { + + std::fprintf(stderr, "Assertion %s failed (located in the %s function, line in file: %d)\n", + expression, location, line); + + if (comment) { + std::fprintf(stderr, "Detailed description: %s\n", comment); + } +#if _MSC_VER && _DEBUG + if (1 == _CrtDbgReport(_CRT_ASSERT, location, line, "tbb_debug.dll", "%s\r\n%s", expression, comment?comment:"")) { + _CrtDbgBreak(); + } else +#endif + { + std::fflush(stderr); + std::abort(); + } +} + +// Do not move the definition into the assertion_failure function because it will require "magic statics". +// It will bring a dependency on C++ runtime on some platforms while assert_impl.h is reused in tbbmalloc +// that should not depend on C++ runtime +static std::atomic<tbb::detail::do_once_state> assertion_state; + +void __TBB_EXPORTED_FUNC assertion_failure(const char* location, int line, const char* expression, const char* comment) { +#if __TBB_MSVC_UNREACHABLE_CODE_IGNORED + // Workaround for erroneous "unreachable code" during assertion throwing using call_once + // #pragma warning (push) + // #pragma warning (disable: 4702) +#endif + // We cannot use std::call_once because it brings a dependency on C++ runtime on some platforms + // while assert_impl.h is reused in tbbmalloc that should not depend on C++ runtime + atomic_do_once([&](){ assertion_failure_impl(location, line, expression, comment); }, assertion_state); +#if __TBB_MSVC_UNREACHABLE_CODE_IGNORED + // #pragma warning (pop) +#endif +} + +//! Report a runtime warning. +void runtime_warning( const char* format, ... ) { + char str[1024]; std::memset(str, 0, 1024); + va_list args; va_start(args, format); + vsnprintf( str, 1024-1, format, args); + va_end(args); + fprintf(stderr, "TBB Warning: %s\n", str); +} + +#if __TBBMALLOC_BUILD +}} // namespaces rml::internal +#else +} // namespace r1 +} // namespace detail +} // namespace tbb +#endif + +#endif // __TBB_assert_impl_H + diff --git a/src/tbb/src/tbb/cache_aligned_allocator.cpp b/src/tbb/src/tbb/cache_aligned_allocator.cpp deleted file mode 100644 index 6a90f2468..000000000 --- a/src/tbb/src/tbb/cache_aligned_allocator.cpp +++ /dev/null @@ -1,256 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" -#include "tbb/cache_aligned_allocator.h" -#include "tbb/tbb_allocator.h" -#include "tbb/tbb_exception.h" -#include "tbb_misc.h" -#include "dynamic_link.h" -#include <cstdlib> - -#if _WIN32||_WIN64 -#include "tbb/machine/windows_api.h" -#else -#include <dlfcn.h> -#endif /* _WIN32||_WIN64 */ - -using namespace std; - -#if __TBB_WEAK_SYMBOLS_PRESENT - -#pragma weak scalable_malloc -#pragma weak scalable_free -#pragma weak scalable_aligned_malloc -#pragma weak scalable_aligned_free - -extern "C" { - void* scalable_malloc( size_t ); - void scalable_free( void* ); - void* scalable_aligned_malloc( size_t, size_t ); - void scalable_aligned_free( void* ); -} - -#endif /* __TBB_WEAK_SYMBOLS_PRESENT */ - -namespace tbb { - -namespace internal { - -//! Dummy routine used for first indirect call via MallocHandler. -static void* DummyMalloc( size_t size ); - -//! Dummy routine used for first indirect call via FreeHandler. -static void DummyFree( void * ptr ); - -//! Handler for memory allocation -static void* (*MallocHandler)( size_t size ) = &DummyMalloc; - -//! Handler for memory deallocation -static void (*FreeHandler)( void* pointer ) = &DummyFree; - -//! Dummy routine used for first indirect call via padded_allocate_handler. -static void* dummy_padded_allocate( size_t bytes, size_t alignment ); - -//! Dummy routine used for first indirect call via padded_free_handler. -static void dummy_padded_free( void * ptr ); - -// ! Allocates memory using standard malloc. It is used when scalable_allocator is not available -static void* padded_allocate( size_t bytes, size_t alignment ); - -// ! Allocates memory using standard free. It is used when scalable_allocator is not available -static void padded_free( void* p ); - -//! Handler for padded memory allocation -static void* (*padded_allocate_handler)( size_t bytes, size_t alignment ) = &dummy_padded_allocate; - -//! Handler for padded memory deallocation -static void (*padded_free_handler)( void* p ) = &dummy_padded_free; - -//! Table describing how to link the handlers. -static const dynamic_link_descriptor MallocLinkTable[] = { - DLD(scalable_malloc, MallocHandler), - DLD(scalable_free, FreeHandler), - DLD(scalable_aligned_malloc, padded_allocate_handler), - DLD(scalable_aligned_free, padded_free_handler), -}; - - -#if TBB_USE_DEBUG -#define DEBUG_SUFFIX "_debug" -#else -#define DEBUG_SUFFIX -#endif /* TBB_USE_DEBUG */ - -// MALLOCLIB_NAME is the name of the TBB memory allocator library. -#if _WIN32||_WIN64 -#define MALLOCLIB_NAME "tbbmalloc" DEBUG_SUFFIX ".dll" -#elif __APPLE__ -#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX ".dylib" -#elif __FreeBSD__ || __NetBSD__ || __sun || _AIX || __ANDROID__ -#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX ".so" -#elif __linux__ // Note that order of these #elif's is important! -#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX __TBB_STRING(.so.TBB_COMPATIBLE_INTERFACE_VERSION) -#else -#error Unknown OS -#endif - -//! Initialize the allocation/free handler pointers. -/** Caller is responsible for ensuring this routine is called exactly once. - The routine attempts to dynamically link with the TBB memory allocator. - If that allocator is not found, it links to malloc and free. */ -void initialize_handler_pointers() { - __TBB_ASSERT( MallocHandler==&DummyMalloc, NULL ); - bool success = dynamic_link( MALLOCLIB_NAME, MallocLinkTable, 4 ); - if( !success ) { - // If unsuccessful, set the handlers to the default routines. - // This must be done now, and not before FillDynamicLinks runs, because if other - // threads call the handlers, we want them to go through the DoOneTimeInitializations logic, - // which forces them to wait. - FreeHandler = &free; - MallocHandler = &malloc; - padded_allocate_handler = &padded_allocate; - padded_free_handler = &padded_free; - } -#if !__TBB_RML_STATIC - PrintExtraVersionInfo( "ALLOCATOR", success?"scalable_malloc":"malloc" ); -#endif -} - -static tbb::atomic<do_once_state> initialization_state; -void initialize_cache_aligned_allocator() { - atomic_do_once( &initialize_handler_pointers, initialization_state ); -} - -//! Executed on very first call through MallocHandler -static void* DummyMalloc( size_t size ) { - initialize_cache_aligned_allocator(); - __TBB_ASSERT( MallocHandler!=&DummyMalloc, NULL ); - return (*MallocHandler)( size ); -} - -//! Executed on very first call through FreeHandler -static void DummyFree( void * ptr ) { - initialize_cache_aligned_allocator(); - __TBB_ASSERT( FreeHandler!=&DummyFree, NULL ); - (*FreeHandler)( ptr ); -} - -//! Executed on very first call through padded_allocate_handler -static void* dummy_padded_allocate( size_t bytes, size_t alignment ) { - initialize_cache_aligned_allocator(); - __TBB_ASSERT( padded_allocate_handler!=&dummy_padded_allocate, NULL ); - return (*padded_allocate_handler)(bytes, alignment); -} - -//! Executed on very first call through padded_free_handler -static void dummy_padded_free( void * ptr ) { - initialize_cache_aligned_allocator(); - __TBB_ASSERT( padded_free_handler!=&dummy_padded_free, NULL ); - (*padded_free_handler)( ptr ); -} - -static size_t NFS_LineSize = 128; - -size_t NFS_GetLineSize() { - return NFS_LineSize; -} - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // unary minus operator applied to unsigned type, result still unsigned - #pragma warning( disable: 4146 4706 ) -#endif - -void* NFS_Allocate( size_t n, size_t element_size, void* /*hint*/ ) { - size_t m = NFS_LineSize; - __TBB_ASSERT( m<=NFS_MaxLineSize, "illegal value for NFS_LineSize" ); - __TBB_ASSERT( (m & (m-1))==0, "must be power of two" ); - size_t bytes = n*element_size; - - if (bytes<n || bytes+m<bytes) { - // Overflow - throw_exception(eid_bad_alloc); - } - // scalable_aligned_malloc considers zero size request an error, and returns NULL - if (bytes==0) bytes = 1; - - void* result = (*padded_allocate_handler)( bytes, m ); - if (!result) - throw_exception(eid_bad_alloc); - - __TBB_ASSERT( ((size_t)result&(m-1)) == 0, "The address returned isn't aligned to cache line size" ); - return result; -} - -void NFS_Free( void* p ) { - (*padded_free_handler)( p ); -} - -static void* padded_allocate( size_t bytes, size_t alignment ) { - unsigned char* result = NULL; - unsigned char* base = (unsigned char*)malloc(alignment+bytes); - if( base ) { - // Round up to the next line - result = (unsigned char*)((uintptr_t)(base+alignment)&-alignment); - // Record where block actually starts. - ((uintptr_t*)result)[-1] = uintptr_t(base); - } - return result; -} - -static void padded_free( void* p ) { - if( p ) { - __TBB_ASSERT( (uintptr_t)p>=0x4096, "attempt to free block not obtained from cache_aligned_allocator" ); - // Recover where block actually starts - unsigned char* base = ((unsigned char**)p)[-1]; - __TBB_ASSERT( (void*)((uintptr_t)(base+NFS_LineSize)&-NFS_LineSize)==p, "not allocated by NFS_Allocate?" ); - free(base); - } -} - -void* __TBB_EXPORTED_FUNC allocate_via_handler_v3( size_t n ) { - void* result = (*MallocHandler) (n); - if (!result) { - throw_exception(eid_bad_alloc); - } - return result; -} - -void __TBB_EXPORTED_FUNC deallocate_via_handler_v3( void *p ) { - if( p ) { - (*FreeHandler)( p ); - } -} - -bool __TBB_EXPORTED_FUNC is_malloc_used_v3() { - if (MallocHandler == &DummyMalloc) { - void* void_ptr = (*MallocHandler)(1); - (*FreeHandler)(void_ptr); - } - __TBB_ASSERT( MallocHandler!=&DummyMalloc && FreeHandler!=&DummyFree, NULL ); - // Cast to void avoids type mismatch errors on some compilers (e.g. __IBMCPP__) - __TBB_ASSERT( !(((void*)MallocHandler==(void*)&malloc) ^ ((void*)FreeHandler==(void*)&free)), - "Both shim pointers must refer to routines from the same package (either TBB or CRT)" ); - return (void*)MallocHandler == (void*)&malloc; -} - -} // namespace internal - -} // namespace tbb diff --git a/src/tbb/src/tbb/cancellation_disseminator.h b/src/tbb/src/tbb/cancellation_disseminator.h new file mode 100644 index 000000000..724458296 --- /dev/null +++ b/src/tbb/src/tbb/cancellation_disseminator.h @@ -0,0 +1,85 @@ +/* + Copyright (c) 2022-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef _TBB_cancellation_disseminator_H +#define _TBB_cancellation_disseminator_H + +#include "oneapi/tbb/mutex.h" +#include "oneapi/tbb/task_group.h" + +#include "intrusive_list.h" +#include "thread_data.h" + +namespace tbb { +namespace detail { +namespace r1 { + +class cancellation_disseminator { +public: + //! Finds all contexts affected by the state change and propagates the new state to them. + /* The propagation is relayed to the cancellation_disseminator because tasks created by one + external thread can be passed to and executed by other external threads. This means + that context trees can span several arenas at once and thus state change + propagation cannot be generally localized to one arena only. + */ + bool propagate_task_group_state(std::atomic<uint32_t> d1::task_group_context::*mptr_state, d1::task_group_context& src, uint32_t new_state) { + if (src.my_may_have_children.load(std::memory_order_relaxed) != d1::task_group_context::may_have_children) { + return true; + } + + // The whole propagation algorithm is under the lock in order to ensure correctness + // in case of concurrent state changes at the different levels of the context tree. + threads_list_mutex_type::scoped_lock lock(my_threads_list_mutex); + // TODO: consider to use double-check idiom + if ((src.*mptr_state).load(std::memory_order_relaxed) != new_state) { + // Another thread has concurrently changed the state. Back down. + return false; + } + + // Advance global state propagation epoch + ++the_context_state_propagation_epoch; + // Propagate to all workers and external threads and sync up their local epochs with the global one + // The whole propagation sequence is locked, thus no contention is expected + for (auto& thr_data : my_threads_list) { + thr_data.propagate_task_group_state(mptr_state, src, new_state); + } + + return true; + } + + void register_thread(thread_data& td) { + threads_list_mutex_type::scoped_lock lock(my_threads_list_mutex); + my_threads_list.push_front(td); + } + + void unregister_thread(thread_data& td) { + threads_list_mutex_type::scoped_lock lock(my_threads_list_mutex); + my_threads_list.remove(td); + } + +private: + using thread_data_list_type = intrusive_list<thread_data>; + using threads_list_mutex_type = d1::mutex; + + threads_list_mutex_type my_threads_list_mutex; + thread_data_list_type my_threads_list; +}; + +} // namespace r1 +} // namespace detail +} // namespace tbb + +#endif // _TBB_cancellation_disseminator_H diff --git a/src/tbb/src/tbb/cilk-tbb-interop.h b/src/tbb/src/tbb/cilk-tbb-interop.h deleted file mode 100644 index bc1390240..000000000 --- a/src/tbb/src/tbb/cilk-tbb-interop.h +++ /dev/null @@ -1,115 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -/* The API to enable interoperability between Intel(R) Cilk(TM) Plus and - Intel(R) Threading Building Blocks. */ - -#ifndef CILK_TBB_INTEROP_H -#define CILK_TBB_INTEROP_H - -#ifndef _WIN32 -#ifdef IN_CILK_RUNTIME -#define CILK_EXPORT __attribute__((visibility("protected"))) -#else -#define CILK_EXPORT /* nothing */ -#endif -#else -#ifdef IN_CILK_RUNTIME -#define CILK_EXPORT __declspec(dllexport) -#else -#define CILK_EXPORT __declspec(dllimport) -#endif // IN_CILK_RUNTIME -#endif // _WIN32 - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -/* A return code. 0 indicates success */ -typedef int __cilk_tbb_retcode; - -enum __cilk_tbb_stack_op { - CILK_TBB_STACK_ORPHAN, // disconnecting stack from a thread - CILK_TBB_STACK_ADOPT, // reconnecting orphaned stack to a trhead - CILK_TBB_STACK_RELEASE // releasing stack -}; - -typedef __cilk_tbb_retcode (*__cilk_tbb_pfn_stack_op)(enum __cilk_tbb_stack_op, void* data); - -typedef __cilk_tbb_retcode (*__cilk_tbb_pfn_unwatch_stacks)(void *data); - -/* Each thunk structure has two pointers: "routine" and "data". - The caller of the thunk invokes *routine, passing "data" as the void* parameter. */ - -/* Thunk invoked by Intel Cilk Plus runtime (cilkrts) when it changes the relationship - between a stack and a thread. It does not matter what stack the thunk runs on. - The thread (not fiber) on which the thunk runs is important. - - CILK_TBB_STACK_ORPHAN - The thunk must be invoked on the thread disconnecting itself from the stack. - Must "happen before" the stack is adopted elsewhere. - CILK_TBB_STACK_ADOPT - The thunk must be invoked on the thread adopting the stack. - CILK_TBB_STACK_RELEASE - The thunk must be invoked on the thread doing the releasing, - Must "happen before" the stack is used elsewhere. - - When a non-empty stack is transfered between threads, the first thread must orphan it - and the second thread must adopt it. - - An empty stack can be transfered similarly, or simply released by the first thread. - - Here is a summary of the actions as transitions on a state machine. - - watch ORPHAN - -->--> -->-- - / \ / \ - (freed empty stack) (TBB sees stack running on thread) (stack in limbo) - | \ / \ / | - | --<-- --<-- | - ^ RELEASE or ADOPT V - \ unwatch / - \ / - --------------------------<--------------------------- - RELEASE -*/ -struct __cilk_tbb_stack_op_thunk { - __cilk_tbb_pfn_stack_op routine; - void* data; /* Set by TBB */ -}; - -/* Thunk invoked by TBB when it is no longer interested in watching the stack bound to the current thread. */ -struct __cilk_tbb_unwatch_thunk { - __cilk_tbb_pfn_unwatch_stacks routine; - void* data; -}; - -/* Defined by cilkrts, called by TBB. - Requests that cilkrts invoke __cilk_tbb_stack_op_thunk when it orphans a stack. - cilkrts sets *u to a thunk that TBB should call when it is no longer interested in watching the stack. */ -CILK_EXPORT -__cilk_tbb_retcode __cilkrts_watch_stack(struct __cilk_tbb_unwatch_thunk* u, - struct __cilk_tbb_stack_op_thunk o); - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif // CILK_TBB_INTEROP_H diff --git a/src/tbb/src/tbb/co_context.h b/src/tbb/src/tbb/co_context.h new file mode 100644 index 000000000..ed5143275 --- /dev/null +++ b/src/tbb/src/tbb/co_context.h @@ -0,0 +1,378 @@ +/* + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef _TBB_co_context_H +#define _TBB_co_context_H + +#include "oneapi/tbb/detail/_config.h" + +#if __TBB_RESUMABLE_TASKS + +#include <cstddef> +#include <cstdint> + +#if __TBB_RESUMABLE_TASKS_USE_THREADS + +#if _WIN32 || _WIN64 +#include <windows.h> +#else +#include <pthread.h> +#endif + +#include <condition_variable> +#include "governor.h" + +#elif _WIN32 || _WIN64 +#include <windows.h> +#else +// ucontext.h API is deprecated since macOS 10.6 +#if __APPLE__ + #if __INTEL_COMPILER + // #pragma warning(push) + // #pragma warning(disable:1478) + #elif __clang__ + // #pragma clang diagnostic push + // #pragma clang diagnostic ignored "-Wdeprecated-declarations" + #endif +#endif // __APPLE__ + +#include <ucontext.h> +#include <sys/mman.h> // mprotect + +#include "governor.h" // default_page_size() + +#ifndef MAP_STACK +// macOS* does not define MAP_STACK +#define MAP_STACK 0 +#endif +#ifndef MAP_ANONYMOUS +// macOS* defines MAP_ANON, which is deprecated in Linux*. +#define MAP_ANONYMOUS MAP_ANON +#endif +#endif // _WIN32 || _WIN64 + +namespace tbb { +namespace detail { +namespace r1 { + +#if __TBB_RESUMABLE_TASKS_USE_THREADS + struct coroutine_type { +#if _WIN32 || _WIN64 + using handle_type = HANDLE; +#else + using handle_type = pthread_t; +#endif + + handle_type my_thread; + std::condition_variable my_condvar; + std::mutex my_mutex; + thread_data* my_thread_data{ nullptr }; + bool my_is_active{ true }; + }; +#elif _WIN32 || _WIN64 + typedef LPVOID coroutine_type; +#else + struct coroutine_type { + coroutine_type() : my_context(), my_stack(), my_stack_size() {} + ucontext_t my_context; + void* my_stack; + std::size_t my_stack_size; + }; +#endif + + // Forward declaration of the coroutine API. + void create_coroutine(coroutine_type& c, std::size_t stack_size, void* arg); + void current_coroutine(coroutine_type& c); + void swap_coroutine(coroutine_type& prev_coroutine, coroutine_type& new_coroutine); + void destroy_coroutine(coroutine_type& c); + +class co_context { + enum co_state { + co_invalid, + co_suspended, + co_executing, + co_destroyed + }; + coroutine_type my_coroutine; + co_state my_state; + +public: + co_context(std::size_t stack_size, void* arg) + : my_state(stack_size ? co_suspended : co_executing) + { + if (stack_size) { + __TBB_ASSERT(arg != nullptr, nullptr); + create_coroutine(my_coroutine, stack_size, arg); + } else { + current_coroutine(my_coroutine); + } + } + + ~co_context() { + __TBB_ASSERT(1 << my_state & (1 << co_suspended | 1 << co_executing), nullptr); + if (my_state == co_suspended) { +#if __TBB_RESUMABLE_TASKS_USE_THREADS + my_state = co_executing; +#endif + destroy_coroutine(my_coroutine); + } + my_state = co_destroyed; + } + + void resume(co_context& target) { + // Do not create non-trivial objects on the stack of this function. They might never be destroyed. + __TBB_ASSERT(my_state == co_executing, nullptr); + __TBB_ASSERT(target.my_state == co_suspended, nullptr); + + my_state = co_suspended; + target.my_state = co_executing; + + // 'target' can reference an invalid object after swap_coroutine. Do not access it. + swap_coroutine(my_coroutine, target.my_coroutine); + + __TBB_ASSERT(my_state == co_executing, nullptr); + } +}; + +#if _WIN32 || _WIN64 +/* [[noreturn]] */ void __stdcall co_local_wait_for_all(void* arg) noexcept; +#else +/* [[noreturn]] */ void co_local_wait_for_all(unsigned hi, unsigned lo) noexcept; +#endif + +#if __TBB_RESUMABLE_TASKS_USE_THREADS +void handle_perror(int error_code, const char* what); + +inline void check(int error_code, const char* routine) { + if (error_code) { + handle_perror(error_code, routine); + } +} + +using thread_data_t = std::pair<coroutine_type&, void*&>; + +#if _WIN32 || _WIN64 +inline unsigned WINAPI coroutine_thread_func(void* d) +#else +inline void* coroutine_thread_func(void* d) +#endif +{ + thread_data_t& data = *static_cast<thread_data_t*>(d); + coroutine_type& c = data.first; + void* arg = data.second; + { + std::unique_lock<std::mutex> lock(c.my_mutex); + __TBB_ASSERT(c.my_thread_data == nullptr, nullptr); + c.my_is_active = false; + + // We read the data notify the waiting thread + data.second = nullptr; + c.my_condvar.notify_one(); + + c.my_condvar.wait(lock, [&c] { return c.my_is_active == true; }); + } + __TBB_ASSERT(c.my_thread_data != nullptr, nullptr); + governor::set_thread_data(*c.my_thread_data); + +#if _WIN32 || _WIN64 + co_local_wait_for_all(arg); + + return 0; +#else + std::uintptr_t addr = std::uintptr_t(arg); + unsigned lo = unsigned(addr); + unsigned hi = unsigned(std::uint64_t(addr) >> 32); + __TBB_ASSERT(sizeof(addr) == 8 || hi == 0, nullptr); + + co_local_wait_for_all(hi, lo); + + return nullptr; +#endif +}; + +inline void create_coroutine(coroutine_type& c, std::size_t stack_size, void* arg) { + thread_data_t data{ c, arg }; + +#if _WIN32 || _WIN64 + c.my_thread = (HANDLE)_beginthreadex(nullptr, unsigned(stack_size), coroutine_thread_func, &data, STACK_SIZE_PARAM_IS_A_RESERVATION, nullptr); + if (!c.my_thread) { + handle_perror(0, "create_coroutine: _beginthreadex failed\n"); + } +#else + pthread_attr_t s; + check(pthread_attr_init(&s), "pthread_attr_init has failed"); + if (stack_size > 0) { + check(pthread_attr_setstacksize(&s, stack_size), "pthread_attr_setstack_size has failed"); + } + check(pthread_create(&c.my_thread, &s, coroutine_thread_func, &data), "pthread_create has failed"); + check(pthread_attr_destroy(&s), "pthread_attr_destroy has failed"); +#endif + + // Wait for the just created thread to read the data + std::unique_lock<std::mutex> lock(c.my_mutex); + c.my_condvar.wait(lock, [&arg] { return arg == nullptr; }); +} + +inline void current_coroutine(coroutine_type& c) { +#if _WIN32 || _WIN64 + c.my_thread = GetCurrentThread(); +#else + c.my_thread = pthread_self(); +#endif +} + +inline void swap_coroutine(coroutine_type& prev_coroutine, coroutine_type& new_coroutine) { + thread_data* td = governor::get_thread_data(); + __TBB_ASSERT(prev_coroutine.my_is_active == true, "The current thread should be active"); + + // Detach our state before notification other thread + // (because we might be notified just after other thread notification) + prev_coroutine.my_thread_data = nullptr; + prev_coroutine.my_is_active = false; + governor::clear_thread_data(); + + { + std::unique_lock<std::mutex> lock(new_coroutine.my_mutex); + __TBB_ASSERT(new_coroutine.my_is_active == false, "The sleeping thread should not be active"); + __TBB_ASSERT(new_coroutine.my_thread_data == nullptr, "The sleeping thread should not be active"); + + new_coroutine.my_thread_data = td; + new_coroutine.my_is_active = true; + new_coroutine.my_condvar.notify_one(); + } + + std::unique_lock<std::mutex> lock(prev_coroutine.my_mutex); + prev_coroutine.my_condvar.wait(lock, [&prev_coroutine] { return prev_coroutine.my_is_active == true; }); + __TBB_ASSERT(governor::get_thread_data() != nullptr, nullptr); + governor::set_thread_data(*prev_coroutine.my_thread_data); +} + +inline void destroy_coroutine(coroutine_type& c) { + { + std::unique_lock<std::mutex> lock(c.my_mutex); + __TBB_ASSERT(c.my_thread_data == nullptr, "The sleeping thread should not be active"); + __TBB_ASSERT(c.my_is_active == false, "The sleeping thread should not be active"); + c.my_is_active = true; + c.my_condvar.notify_one(); + } +#if _WIN32 || _WIN64 + WaitForSingleObject(c.my_thread, INFINITE); + CloseHandle(c.my_thread); +#else + check(pthread_join(c.my_thread, nullptr), "pthread_join has failed"); +#endif +} +#elif _WIN32 || _WIN64 +inline void create_coroutine(coroutine_type& c, std::size_t stack_size, void* arg) { + __TBB_ASSERT(arg, nullptr); + c = CreateFiber(stack_size, co_local_wait_for_all, arg); + __TBB_ASSERT(c, nullptr); +} + +inline void current_coroutine(coroutine_type& c) { + c = IsThreadAFiber() ? GetCurrentFiber() : + ConvertThreadToFiberEx(nullptr, FIBER_FLAG_FLOAT_SWITCH); + __TBB_ASSERT(c, nullptr); +} + +inline void swap_coroutine(coroutine_type& prev_coroutine, coroutine_type& new_coroutine) { + if (!IsThreadAFiber()) { + ConvertThreadToFiberEx(nullptr, FIBER_FLAG_FLOAT_SWITCH); + } + __TBB_ASSERT(new_coroutine, nullptr); + prev_coroutine = GetCurrentFiber(); + __TBB_ASSERT(prev_coroutine, nullptr); + SwitchToFiber(new_coroutine); +} + +inline void destroy_coroutine(coroutine_type& c) { + __TBB_ASSERT(c, nullptr); + DeleteFiber(c); +} +#else // !(_WIN32 || _WIN64) + +inline void create_coroutine(coroutine_type& c, std::size_t stack_size, void* arg) { + const std::size_t REG_PAGE_SIZE = governor::default_page_size(); + const std::size_t page_aligned_stack_size = (stack_size + (REG_PAGE_SIZE - 1)) & ~(REG_PAGE_SIZE - 1); + const std::size_t protected_stack_size = page_aligned_stack_size + 2 * REG_PAGE_SIZE; + + // Allocate the stack with protection property + std::uintptr_t stack_ptr = (std::uintptr_t)mmap(nullptr, protected_stack_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0); + __TBB_ASSERT((void*)stack_ptr != MAP_FAILED, nullptr); + + // Allow read write on our stack (guarded pages are still protected) + int err = mprotect((void*)(stack_ptr + REG_PAGE_SIZE), page_aligned_stack_size, PROT_READ | PROT_WRITE); + __TBB_ASSERT_EX(!err, nullptr); + + // Remember the stack state + c.my_stack = (void*)(stack_ptr + REG_PAGE_SIZE); + c.my_stack_size = page_aligned_stack_size; + + err = getcontext(&c.my_context); + __TBB_ASSERT_EX(!err, nullptr); + + c.my_context.uc_link = nullptr; + // cast to char* to disable FreeBSD clang-3.4.1 'incompatible type' error + c.my_context.uc_stack.ss_sp = (char*)c.my_stack; + c.my_context.uc_stack.ss_size = c.my_stack_size; + c.my_context.uc_stack.ss_flags = 0; + + typedef void(*coroutine_func_t)(); + + std::uintptr_t addr = std::uintptr_t(arg); + unsigned lo = unsigned(addr); + unsigned hi = unsigned(std::uint64_t(addr) >> 32); + __TBB_ASSERT(sizeof(addr) == 8 || hi == 0, nullptr); + + makecontext(&c.my_context, (coroutine_func_t)co_local_wait_for_all, 2, hi, lo); +} + +inline void current_coroutine(coroutine_type& c) { + int err = getcontext(&c.my_context); + __TBB_ASSERT_EX(!err, nullptr); +} + +inline void swap_coroutine(coroutine_type& prev_coroutine, coroutine_type& new_coroutine) { + int err = swapcontext(&prev_coroutine.my_context, &new_coroutine.my_context); + __TBB_ASSERT_EX(!err, nullptr); +} + +inline void destroy_coroutine(coroutine_type& c) { + const std::size_t REG_PAGE_SIZE = governor::default_page_size(); + // Free stack memory with guarded pages + munmap((void*)((std::uintptr_t)c.my_stack - REG_PAGE_SIZE), c.my_stack_size + 2 * REG_PAGE_SIZE); + // Clear the stack state afterwards + c.my_stack = nullptr; + c.my_stack_size = 0; +} + +#if __APPLE__ + #if __INTEL_COMPILER + // #pragma warning(pop) // 1478 warning + #elif __clang__ + // #pragma clang diagnostic pop // "-Wdeprecated-declarations" + #endif +#endif + +#endif // _WIN32 || _WIN64 + +} // namespace r1 +} // namespace detail +} // namespace tbb + +#endif /* __TBB_RESUMABLE_TASKS */ + +#endif /* _TBB_co_context_H */ diff --git a/src/tbb/src/tbb/concurrent_bounded_queue.cpp b/src/tbb/src/tbb/concurrent_bounded_queue.cpp new file mode 100644 index 000000000..14617175d --- /dev/null +++ b/src/tbb/src/tbb/concurrent_bounded_queue.cpp @@ -0,0 +1,84 @@ +/* + Copyright (c) 2020-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "oneapi/tbb/detail/_utils.h" +#include "oneapi/tbb/concurrent_queue.h" +#include "oneapi/tbb/cache_aligned_allocator.h" +#include "concurrent_monitor.h" + +namespace tbb { +namespace detail { +namespace r1 { + +static constexpr std::size_t monitors_number = 2; + +std::uint8_t* __TBB_EXPORTED_FUNC allocate_bounded_queue_rep( std::size_t queue_rep_size ) +{ + std::size_t monitors_mem_size = sizeof(concurrent_monitor) * monitors_number; + std::uint8_t* mem = static_cast<std::uint8_t*>(cache_aligned_allocate(queue_rep_size + monitors_mem_size)); + + concurrent_monitor* monitors = reinterpret_cast<concurrent_monitor*>(mem + queue_rep_size); + for (std::size_t i = 0; i < monitors_number; ++i) { + new (monitors + i) concurrent_monitor(); + } + + return mem; +} + +void __TBB_EXPORTED_FUNC deallocate_bounded_queue_rep( std::uint8_t* mem, std::size_t queue_rep_size ) +{ + concurrent_monitor* monitors = reinterpret_cast<concurrent_monitor*>(mem + queue_rep_size); + for (std::size_t i = 0; i < monitors_number; ++i) { + monitors[i].~concurrent_monitor(); + } + + cache_aligned_deallocate(mem); +} + +void __TBB_EXPORTED_FUNC wait_bounded_queue_monitor( concurrent_monitor* monitors, std::size_t monitor_tag, + std::ptrdiff_t target, d1::delegate_base& predicate ) +{ + __TBB_ASSERT(monitor_tag < monitors_number, nullptr); + concurrent_monitor& monitor = monitors[monitor_tag]; + + monitor.wait<concurrent_monitor::thread_context>([&] { return !predicate(); }, std::uintptr_t(target)); +} + +void __TBB_EXPORTED_FUNC abort_bounded_queue_monitors( concurrent_monitor* monitors ) { + concurrent_monitor& items_avail = monitors[d2::cbq_items_avail_tag]; + concurrent_monitor& slots_avail = monitors[d2::cbq_slots_avail_tag]; + + items_avail.abort_all(); + slots_avail.abort_all(); +} + +struct predicate_leq { + std::size_t my_ticket; + predicate_leq( std::size_t ticket ) : my_ticket(ticket) {} + bool operator() ( std::uintptr_t ticket ) const { return static_cast<std::size_t>(ticket) <= my_ticket; } +}; + +void __TBB_EXPORTED_FUNC notify_bounded_queue_monitor( concurrent_monitor* monitors, + std::size_t monitor_tag, std::size_t ticket) +{ + __TBB_ASSERT(monitor_tag < monitors_number, nullptr); + concurrent_monitor& monitor = monitors[monitor_tag]; + monitor.notify(predicate_leq(ticket)); +} + +} // namespace r1 +} // namespace detail +} // namespace tbb diff --git a/src/tbb/src/tbb/concurrent_hash_map.cpp b/src/tbb/src/tbb/concurrent_hash_map.cpp deleted file mode 100644 index 8ea35a188..000000000 --- a/src/tbb/src/tbb/concurrent_hash_map.cpp +++ /dev/null @@ -1,58 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/concurrent_hash_map.h" - -namespace tbb { - -namespace internal { -#if !TBB_NO_LEGACY -struct hash_map_segment_base { - typedef spin_rw_mutex segment_mutex_t; - //! Type of a hash code. - typedef size_t hashcode_t; - //! Log2 of n_segment - static const size_t n_segment_bits = 6; - //! Maximum size of array of chains - static const size_t max_physical_size = size_t(1)<<(8*sizeof(hashcode_t)-n_segment_bits); - //! Mutex that protects this segment - segment_mutex_t my_mutex; - // Number of nodes - atomic<size_t> my_logical_size; - // Size of chains - /** Always zero or a power of two */ - size_t my_physical_size; - //! True if my_logical_size>=my_physical_size. - /** Used to support Intel(R) Thread Checker. */ - bool __TBB_EXPORTED_METHOD internal_grow_predicate() const; -}; - -bool hash_map_segment_base::internal_grow_predicate() const { - // Intel(R) Thread Checker considers the following reads to be races, so we hide them in the - // library so that Intel(R) Thread Checker will ignore them. The reads are used in a double-check - // context, so the program is nonetheless correct despite the race. - return my_logical_size >= my_physical_size && my_physical_size < max_physical_size; -} -#endif//!TBB_NO_LEGACY - -} // namespace internal - -} // namespace tbb - diff --git a/src/tbb/src/tbb/concurrent_monitor.cpp b/src/tbb/src/tbb/concurrent_monitor.cpp deleted file mode 100644 index 0f670c8e1..000000000 --- a/src/tbb/src/tbb/concurrent_monitor.cpp +++ /dev/null @@ -1,137 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "concurrent_monitor.h" - -namespace tbb { -namespace internal { - -void concurrent_monitor::thread_context::init() { - new (sema.begin()) binary_semaphore; - ready = true; -} - -concurrent_monitor::~concurrent_monitor() { - abort_all(); - __TBB_ASSERT( waitset_ec.empty(), "waitset not empty?" ); -} - -void concurrent_monitor::prepare_wait( thread_context& thr, uintptr_t ctx ) { - if( !thr.ready ) - thr.init(); - // this is good place to pump previous spurious wakeup - else if( thr.spurious ) { - thr.spurious = false; - thr.semaphore().P(); - } - thr.context = ctx; - thr.in_waitset = true; - { - tbb::spin_mutex::scoped_lock l( mutex_ec ); - __TBB_store_relaxed( thr.epoch, __TBB_load_relaxed(epoch) ); - waitset_ec.add( (waitset_t::node_t*)&thr ); - } - atomic_fence(); -} - -void concurrent_monitor::cancel_wait( thread_context& thr ) { - // spurious wakeup will be pumped in the following prepare_wait() - thr.spurious = true; - // try to remove node from waitset - bool th_in_waitset = thr.in_waitset; - if( th_in_waitset ) { - tbb::spin_mutex::scoped_lock l( mutex_ec ); - if (thr.in_waitset) { - // successfully removed from waitset, - // so there will be no spurious wakeup - thr.in_waitset = false; - thr.spurious = false; - waitset_ec.remove( (waitset_t::node_t&)thr ); - } - } -} - -void concurrent_monitor::notify_one_relaxed() { - if( waitset_ec.empty() ) - return; - waitset_node_t* n; - const waitset_node_t* end = waitset_ec.end(); - { - tbb::spin_mutex::scoped_lock l( mutex_ec ); - __TBB_store_relaxed( epoch, __TBB_load_relaxed(epoch) + 1 ); - n = waitset_ec.front(); - if( n!=end ) { - waitset_ec.remove( *n ); - to_thread_context(n)->in_waitset = false; - } - } - if( n!=end ) - to_thread_context(n)->semaphore().V(); -} - -void concurrent_monitor::notify_all_relaxed() { - if( waitset_ec.empty() ) - return; - dllist_t temp; - const waitset_node_t* end; - { - tbb::spin_mutex::scoped_lock l( mutex_ec ); - __TBB_store_relaxed( epoch, __TBB_load_relaxed(epoch) + 1 ); - waitset_ec.flush_to( temp ); - end = temp.end(); - for( waitset_node_t* n=temp.front(); n!=end; n=n->next ) - to_thread_context(n)->in_waitset = false; - } - waitset_node_t* nxt; - for( waitset_node_t* n=temp.front(); n!=end; n=nxt ) { - nxt = n->next; - to_thread_context(n)->semaphore().V(); - } -#if TBB_USE_ASSERT - temp.clear(); -#endif -} - -void concurrent_monitor::abort_all_relaxed() { - if( waitset_ec.empty() ) - return; - dllist_t temp; - const waitset_node_t* end; - { - tbb::spin_mutex::scoped_lock l( mutex_ec ); - __TBB_store_relaxed( epoch, __TBB_load_relaxed(epoch) + 1 ); - waitset_ec.flush_to( temp ); - end = temp.end(); - for( waitset_node_t* n=temp.front(); n!=end; n=n->next ) - to_thread_context(n)->in_waitset = false; - } - waitset_node_t* nxt; - for( waitset_node_t* n=temp.front(); n!=end; n=nxt ) { - nxt = n->next; - to_thread_context(n)->aborted = true; - to_thread_context(n)->semaphore().V(); - } -#if TBB_USE_ASSERT - temp.clear(); -#endif -} - -} // namespace internal -} // namespace tbb diff --git a/src/tbb/src/tbb/concurrent_monitor.h b/src/tbb/src/tbb/concurrent_monitor.h index a7d6ca881..b9f8a5897 100644 --- a/src/tbb/src/tbb/concurrent_monitor.h +++ b/src/tbb/src/tbb/concurrent_monitor.h @@ -1,62 +1,61 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef __TBB_concurrent_monitor_H #define __TBB_concurrent_monitor_H -#include "tbb/tbb_stddef.h" -#include "tbb/atomic.h" -#include "tbb/spin_mutex.h" -#include "tbb/tbb_exception.h" -#include "tbb/aligned_space.h" - +#include "oneapi/tbb/spin_mutex.h" +#include "oneapi/tbb/detail/_exception.h" +#include "oneapi/tbb/detail/_aligned_space.h" +#include "concurrent_monitor_mutex.h" #include "semaphore.h" +#include <atomic> + namespace tbb { -namespace internal { +namespace detail { +namespace r1 { //! Circular doubly-linked list with sentinel /** head.next points to the front and head.prev points to the back */ -class circular_doubly_linked_list_with_sentinel : no_copy { +class circular_doubly_linked_list_with_sentinel { public: - struct node_t { - node_t* next; - node_t* prev; - explicit node_t() : next((node_t*)(uintptr_t)0xcdcdcdcd), prev((node_t*)(uintptr_t)0xcdcdcdcd) {} + struct base_node { + base_node* next; + base_node* prev; + + constexpr base_node(base_node* n, base_node* p) : next(n), prev(p) {} + explicit base_node() : next((base_node*)(uintptr_t)0xcdcdcdcd), prev((base_node*)(uintptr_t)0xcdcdcdcd) {} }; // ctor - circular_doubly_linked_list_with_sentinel() {clear();} - // dtor - ~circular_doubly_linked_list_with_sentinel() {__TBB_ASSERT( head.next==&head && head.prev==&head, "the list is not empty" );} + constexpr circular_doubly_linked_list_with_sentinel() : count(0), head(&head, &head) {} + + circular_doubly_linked_list_with_sentinel(const circular_doubly_linked_list_with_sentinel&) = delete; + circular_doubly_linked_list_with_sentinel& operator=(const circular_doubly_linked_list_with_sentinel&) = delete; - inline size_t size() const {return count;} - inline bool empty() const {return size()==0;} - inline node_t* front() const {return head.next;} - inline node_t* last() const {return head.prev;} - inline node_t* begin() const {return front();} - inline const node_t* end() const {return &head;} + inline std::size_t size() const { return count.load(std::memory_order_relaxed); } + inline bool empty() const { return size() == 0; } + inline base_node* front() const { return head.next; } + inline base_node* last() const { return head.prev; } + inline const base_node* end() const { return &head; } //! add to the back of the list - inline void add( node_t* n ) { - __TBB_store_relaxed(count, __TBB_load_relaxed(count) + 1); + inline void add( base_node* n ) { + count.store(count.load(std::memory_order_relaxed) + 1, std::memory_order_relaxed); n->prev = head.prev; n->next = &head; head.prev->next = n; @@ -64,16 +63,18 @@ class circular_doubly_linked_list_with_sentinel : no_copy { } //! remove node 'n' - inline void remove( node_t& n ) { - __TBB_store_relaxed(count, __TBB_load_relaxed(count) - 1); + inline void remove( base_node& n ) { + __TBB_ASSERT(count.load(std::memory_order_relaxed) > 0, "attempt to remove an item from an empty list"); + count.store(count.load( std::memory_order_relaxed ) - 1, std::memory_order_relaxed); n.prev->next = n.next; n.next->prev = n.prev; } //! move all elements to 'lst' and initialize the 'this' list inline void flush_to( circular_doubly_linked_list_with_sentinel& lst ) { - if( const size_t l_count = __TBB_load_relaxed(count) ) { - __TBB_store_relaxed(lst.count, l_count); + const std::size_t l_count = size(); + if (l_count > 0) { + lst.count.store(l_count, std::memory_order_relaxed); lst.head.next = head.next; lst.head.prev = head.prev; head.next->prev = &lst.head; @@ -82,160 +83,416 @@ class circular_doubly_linked_list_with_sentinel : no_copy { } } - void clear() {head.next = head.prev = &head; __TBB_store_relaxed(count, 0);} + void clear() { + head.next = &head; + head.prev = &head; + count.store(0, std::memory_order_relaxed); + } private: - __TBB_atomic size_t count; - node_t head; + std::atomic<std::size_t> count; + base_node head; }; -typedef circular_doubly_linked_list_with_sentinel waitset_t; -typedef circular_doubly_linked_list_with_sentinel dllist_t; -typedef circular_doubly_linked_list_with_sentinel::node_t waitset_node_t; +using base_list = circular_doubly_linked_list_with_sentinel; +using base_node = circular_doubly_linked_list_with_sentinel::base_node; -//! concurrent_monitor -/** fine-grained concurrent_monitor implementation */ -class concurrent_monitor : no_copy { +template <typename Context> +class concurrent_monitor_base; + +template <typename Context> +class wait_node : public base_node { public: - /** per-thread descriptor for concurrent_monitor */ - class thread_context : waitset_node_t, no_copy { - friend class concurrent_monitor; - public: - thread_context() : spurious(false), aborted(false), ready(false), context(0) { - epoch = 0; - in_waitset = false; - } - ~thread_context() { - if (ready) { - if( spurious ) semaphore().P(); - semaphore().~binary_semaphore(); - } + +#if __TBB_GLIBCXX_VERSION >= 40800 && __TBB_GLIBCXX_VERSION < 40900 + wait_node(Context ctx) : my_context(ctx), my_is_in_list(false) {} +#else + wait_node(Context ctx) : my_context(ctx) {} +#endif + + virtual ~wait_node() = default; + + virtual void init() { + __TBB_ASSERT(!my_initialized, nullptr); + my_initialized = true; + } + + virtual void wait() = 0; + + virtual void reset() { + __TBB_ASSERT(my_skipped_wakeup, nullptr); + my_skipped_wakeup = false; + } + + virtual void notify() = 0; + +protected: + friend class concurrent_monitor_base<Context>; + friend class thread_data; + + Context my_context{}; +#if __TBB_GLIBCXX_VERSION >= 40800 && __TBB_GLIBCXX_VERSION < 40900 + std::atomic<bool> my_is_in_list; +#else + std::atomic<bool> my_is_in_list{false}; +#endif + + bool my_initialized{false}; + bool my_skipped_wakeup{false}; + bool my_aborted{false}; + unsigned my_epoch{0}; +}; + +template <typename Context> +class sleep_node : public wait_node<Context> { + using base_type = wait_node<Context>; +public: + using base_type::base_type; + + ~sleep_node() override { + if (this->my_initialized) { + if (this->my_skipped_wakeup) semaphore().P(); + semaphore().~binary_semaphore(); } - binary_semaphore& semaphore() { return *sema.begin(); } - private: - //! The method for lazy initialization of the thread_context's semaphore. - // Inlining of the method is undesirable, due to extra instructions for - // exception support added at caller side. - __TBB_NOINLINE( void init() ); - tbb::aligned_space<binary_semaphore> sema; - __TBB_atomic unsigned epoch; - tbb::atomic<bool> in_waitset; - bool spurious; - bool aborted; - bool ready; - uintptr_t context; - }; + } - //! ctor - concurrent_monitor() {__TBB_store_relaxed(epoch, 0);} + binary_semaphore& semaphore() { return *sema.begin(); } + void init() override { + if (!this->my_initialized) { + new (sema.begin()) binary_semaphore; + base_type::init(); + } + } + + void wait() override { + __TBB_ASSERT(this->my_initialized, + "Use of commit_wait() without prior prepare_wait()"); + semaphore().P(); + __TBB_ASSERT(!this->my_is_in_list.load(std::memory_order_relaxed), "Still in the queue?"); + if (this->my_aborted) + throw_exception(exception_id::user_abort); + } + + void reset() override { + base_type::reset(); + semaphore().P(); + } + + void notify() override { + semaphore().V(); + } + +private: + tbb::detail::aligned_space<binary_semaphore> sema; +}; + +//! concurrent_monitor +/** fine-grained concurrent_monitor implementation */ +template <typename Context> +class concurrent_monitor_base { +public: + //! ctor + constexpr concurrent_monitor_base() {} //! dtor - ~concurrent_monitor() ; + ~concurrent_monitor_base() = default; + + concurrent_monitor_base(const concurrent_monitor_base&) = delete; + concurrent_monitor_base& operator=(const concurrent_monitor_base&) = delete; //! prepare wait by inserting 'thr' into the wait queue - void prepare_wait( thread_context& thr, uintptr_t ctx = 0 ); + void prepare_wait( wait_node<Context>& node) { + // TODO: consider making even more lazy instantiation of the semaphore, that is only when it is actually needed, e.g. move it in node::wait() + if (!node.my_initialized) { + node.init(); + } + // this is good place to pump previous skipped wakeup + else if (node.my_skipped_wakeup) { + node.reset(); + } + + node.my_is_in_list.store(true, std::memory_order_relaxed); + + { + concurrent_monitor_mutex::scoped_lock l(my_mutex); + node.my_epoch = my_epoch.load(std::memory_order_relaxed); + my_waitset.add(&node); + } + + // Prepare wait guarantees Write Read memory barrier. + // In C++ only full fence covers this type of barrier. + atomic_fence_seq_cst(); + } //! Commit wait if event count has not changed; otherwise, cancel wait. /** Returns true if committed, false if canceled. */ - inline bool commit_wait( thread_context& thr ) { - const bool do_it = thr.epoch == __TBB_load_relaxed(epoch); + inline bool commit_wait( wait_node<Context>& node ) { + const bool do_it = node.my_epoch == my_epoch.load(std::memory_order_relaxed); // this check is just an optimization - if( do_it ) { - __TBB_ASSERT( thr.ready, "use of commit_wait() without prior prepare_wait()"); - thr.semaphore().P(); - __TBB_ASSERT( !thr.in_waitset, "still in the queue?" ); - if( thr.aborted ) - throw_exception( eid_user_abort ); + if (do_it) { + node.wait(); } else { - cancel_wait( thr ); + cancel_wait( node ); } return do_it; } + //! Cancel the wait. Removes the thread from the wait queue if not removed yet. - void cancel_wait( thread_context& thr ); + void cancel_wait( wait_node<Context>& node ) { + // possible skipped wakeup will be pumped in the following prepare_wait() + node.my_skipped_wakeup = true; + // try to remove node from waitset + // Cancel wait guarantees acquire memory barrier. + bool in_list = node.my_is_in_list.load(std::memory_order_acquire); + if (in_list) { + concurrent_monitor_mutex::scoped_lock l(my_mutex); + if (node.my_is_in_list.load(std::memory_order_relaxed)) { + my_waitset.remove(node); + // node is removed from waitset, so there will be no wakeup + node.my_is_in_list.store(false, std::memory_order_relaxed); + node.my_skipped_wakeup = false; + } + } + } + + //! Wait for a condition to be satisfied with waiting-on my_context + template <typename NodeType, typename Pred> + bool wait(Pred&& pred, NodeType&& node) { + prepare_wait(node); + while (!guarded_call(std::forward<Pred>(pred), node)) { + if (commit_wait(node)) { + return true; + } - //! Wait for a condition to be satisfied with waiting-on context - template<typename WaitUntil, typename Context> - void wait( WaitUntil until, Context on ); + prepare_wait(node); + } + + cancel_wait(node); + return false; + } //! Notify one thread about the event - void notify_one() {atomic_fence(); notify_one_relaxed();} + void notify_one() { + atomic_fence_seq_cst(); + notify_one_relaxed(); + } //! Notify one thread about the event. Relaxed version. - void notify_one_relaxed(); + void notify_one_relaxed() { + if (my_waitset.empty()) { + return; + } - //! Notify all waiting threads of the event - void notify_all() {atomic_fence(); notify_all_relaxed();} + base_node* n; + const base_node* end = my_waitset.end(); + { + concurrent_monitor_mutex::scoped_lock l(my_mutex); + my_epoch.store(my_epoch.load(std::memory_order_relaxed) + 1, std::memory_order_relaxed); + n = my_waitset.front(); + if (n != end) { + my_waitset.remove(*n); + +// GCC 12.x-13.x issues a warning here that to_wait_node(n)->my_is_in_list might have size 0, since n is +// a base_node pointer. (This cannot happen, because only wait_node pointers are added to my_waitset.) +#if (__TBB_GCC_VERSION >= 120100 && __TBB_GCC_VERSION < 140000 ) && !__clang__ && !__INTEL_COMPILER +// #pragma GCC diagnostic push +// #pragma GCC diagnostic ignored "-Wstringop-overflow" +#endif + to_wait_node(n)->my_is_in_list.store(false, std::memory_order_relaxed); +#if (__TBB_GCC_VERSION >= 120100 && __TBB_GCC_VERSION < 140000 ) && !__clang__ && !__INTEL_COMPILER +// #pragma GCC diagnostic pop +#endif + } + } - //! Notify all waiting threads of the event; Relaxed version - void notify_all_relaxed(); + if (n != end) { + to_wait_node(n)->notify(); + } + } - //! Notify waiting threads of the event that satisfies the given predicate - template<typename P> void notify( const P& predicate ) {atomic_fence(); notify_relaxed( predicate );} + //! Notify all waiting threads of the event + void notify_all() { + atomic_fence_seq_cst(); + notify_all_relaxed(); + } - //! Notify waiting threads of the event that satisfies the given predicate; Relaxed version - template<typename P> void notify_relaxed( const P& predicate ); + // ! Notify all waiting threads of the event; Relaxed version + void notify_all_relaxed() { + if (my_waitset.empty()) { + return; + } - //! Abort any sleeping threads at the time of the call - void abort_all() {atomic_fence(); abort_all_relaxed(); } - - //! Abort any sleeping threads at the time of the call; Relaxed version - void abort_all_relaxed(); + base_list temp; + const base_node* end; + { + concurrent_monitor_mutex::scoped_lock l(my_mutex); + my_epoch.store(my_epoch.load(std::memory_order_relaxed) + 1, std::memory_order_relaxed); + // TODO: Possible optimization, don't change node state under lock, just do flush + my_waitset.flush_to(temp); + end = temp.end(); + for (base_node* n = temp.front(); n != end; n = n->next) { + to_wait_node(n)->my_is_in_list.store(false, std::memory_order_relaxed); + } + } -private: - tbb::spin_mutex mutex_ec; - waitset_t waitset_ec; - __TBB_atomic unsigned epoch; - thread_context* to_thread_context( waitset_node_t* n ) { return static_cast<thread_context*>(n); } -}; + base_node* nxt; + for (base_node* n = temp.front(); n != end; n=nxt) { + nxt = n->next; + to_wait_node(n)->notify(); + } +#if TBB_USE_ASSERT + temp.clear(); +#endif + } -template<typename WaitUntil, typename Context> -void concurrent_monitor::wait( WaitUntil until, Context on ) -{ - bool slept = false; - thread_context thr_ctx; - prepare_wait( thr_ctx, on() ); - while( !until() ) { - if( (slept = commit_wait( thr_ctx ) )==true ) - if( until() ) break; - slept = false; - prepare_wait( thr_ctx, on() ); - } - if( !slept ) - cancel_wait( thr_ctx ); -} - -template<typename P> -void concurrent_monitor::notify_relaxed( const P& predicate ) { - if( waitset_ec.empty() ) + //! Notify waiting threads of the event that satisfies the given predicate + template <typename P> + void notify( const P& predicate ) { + atomic_fence_seq_cst(); + notify_relaxed( predicate ); + } + + //! Notify waiting threads of the event that satisfies the given predicate; + //! the predicate is called under the lock. Relaxed version. + template<typename P> + void notify_relaxed( const P& predicate ) { + if (my_waitset.empty()) { return; - dllist_t temp; - waitset_node_t* nxt; - const waitset_node_t* end = waitset_ec.end(); + } + + base_list temp; + base_node* nxt; + const base_node* end = my_waitset.end(); { - tbb::spin_mutex::scoped_lock l( mutex_ec ); - __TBB_store_relaxed(epoch, __TBB_load_relaxed(epoch) + 1); - for( waitset_node_t* n=waitset_ec.last(); n!=end; n=nxt ) { + concurrent_monitor_mutex::scoped_lock l(my_mutex); + my_epoch.store(my_epoch.load( std::memory_order_relaxed ) + 1, std::memory_order_relaxed); + for (base_node* n = my_waitset.last(); n != end; n = nxt) { nxt = n->prev; - thread_context* thr = to_thread_context( n ); - if( predicate( thr->context ) ) { - waitset_ec.remove( *n ); - thr->in_waitset = false; - temp.add( n ); + auto* node = static_cast<wait_node<Context>*>(n); + if (predicate(node->my_context)) { + my_waitset.remove(*n); + node->my_is_in_list.store(false, std::memory_order_relaxed); + temp.add(n); } } } end = temp.end(); - for( waitset_node_t* n=temp.front(); n!=end; n=nxt ) { + for (base_node* n=temp.front(); n != end; n = nxt) { nxt = n->next; - to_thread_context(n)->semaphore().V(); + to_wait_node(n)->notify(); } #if TBB_USE_ASSERT temp.clear(); #endif -} + } + + //! Notify waiting threads of the event that satisfies the given predicate; + //! the predicate is called under the lock. Relaxed version. + template<typename P> + void notify_one_relaxed( const P& predicate ) { + if (my_waitset.empty()) { + return; + } + + base_node* tmp = nullptr; + base_node* next{}; + const base_node* end = my_waitset.end(); + { + concurrent_monitor_mutex::scoped_lock l(my_mutex); + my_epoch.store(my_epoch.load( std::memory_order_relaxed ) + 1, std::memory_order_relaxed); + for (base_node* n = my_waitset.last(); n != end; n = next) { + next = n->prev; + auto* node = static_cast<wait_node<Context>*>(n); + if (predicate(node->my_context)) { + my_waitset.remove(*n); + node->my_is_in_list.store(false, std::memory_order_relaxed); + tmp = n; + break; + } + } + } + + if (tmp) { + to_wait_node(tmp)->notify(); + } + } + + //! Abort any sleeping threads at the time of the call + void abort_all() { + atomic_fence_seq_cst(); + abort_all_relaxed(); + } + + //! Abort any sleeping threads at the time of the call; Relaxed version + void abort_all_relaxed() { + if (my_waitset.empty()) { + return; + } + + base_list temp; + const base_node* end; + { + concurrent_monitor_mutex::scoped_lock l(my_mutex); + my_epoch.store(my_epoch.load(std::memory_order_relaxed) + 1, std::memory_order_relaxed); + my_waitset.flush_to(temp); + end = temp.end(); + for (base_node* n = temp.front(); n != end; n = n->next) { + to_wait_node(n)->my_is_in_list.store(false, std::memory_order_relaxed); + } + } + + base_node* nxt; + for (base_node* n = temp.front(); n != end; n = nxt) { + nxt = n->next; + to_wait_node(n)->my_aborted = true; + to_wait_node(n)->notify(); + } +#if TBB_USE_ASSERT + temp.clear(); +#endif + } + + void destroy() { + this->abort_all(); + my_mutex.destroy(); + __TBB_ASSERT(this->my_waitset.empty(), "waitset not empty?"); + } + +private: + template <typename NodeType, typename Pred> + bool guarded_call(Pred&& predicate, NodeType& node) { + bool res = false; + tbb::detail::d0::try_call( [&] { + res = std::forward<Pred>(predicate)(); + }).on_exception( [&] { + cancel_wait(node); + }); + + return res; + } + + concurrent_monitor_mutex my_mutex{}; + base_list my_waitset{}; + std::atomic<unsigned> my_epoch{}; + + wait_node<Context>* to_wait_node( base_node* node ) { return static_cast<wait_node<Context>*>(node); } +}; + +class concurrent_monitor : public concurrent_monitor_base<std::uintptr_t> { + using base_type = concurrent_monitor_base<std::uintptr_t>; +public: + using base_type::base_type; + + ~concurrent_monitor() { + destroy(); + } + + /** per-thread descriptor for concurrent_monitor */ + using thread_context = sleep_node<std::uintptr_t>; +}; -} // namespace internal +} // namespace r1 +} // namespace detail } // namespace tbb #endif /* __TBB_concurrent_monitor_H */ diff --git a/src/tbb/src/tbb/concurrent_monitor_mutex.h b/src/tbb/src/tbb/concurrent_monitor_mutex.h new file mode 100644 index 000000000..cae6526dd --- /dev/null +++ b/src/tbb/src/tbb/concurrent_monitor_mutex.h @@ -0,0 +1,113 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_monitor_mutex_H +#define __TBB_monitor_mutex_H + +#include "oneapi/tbb/detail/_utils.h" +#include "oneapi/tbb/detail/_aligned_space.h" +#include "semaphore.h" + +#include <mutex> + +namespace tbb { +namespace detail { +namespace r1 { + +class concurrent_monitor_mutex { +public: + using scoped_lock = std::lock_guard<concurrent_monitor_mutex>; + + constexpr concurrent_monitor_mutex() {} + + ~concurrent_monitor_mutex() = default; + + void destroy() { +#if !__TBB_USE_FUTEX + if (my_init_flag.load(std::memory_order_relaxed)) { + get_semaphore().~semaphore(); + } +#endif + } + + void lock() { + auto wakeup_condition = [&] { + return my_flag.load(std::memory_order_relaxed) == 0; + }; + + while (my_flag.exchange(1)) { + if (!timed_spin_wait_until(wakeup_condition)) { + ++my_waiters; + while (!wakeup_condition()) { + wait(); + } + --my_waiters; + } + } + } + + void unlock() { + my_flag.exchange(0); // full fence, so the next load is relaxed + if (my_waiters.load(std::memory_order_relaxed)) { + wakeup(); + } + } + +private: + void wait() { +#if __TBB_USE_FUTEX + futex_wait(&my_flag, 1); +#else + get_semaphore().P(); +#endif + } + + void wakeup() { +#if __TBB_USE_FUTEX + futex_wakeup_one(&my_flag); +#else + get_semaphore().V(); +#endif + } + + // The flag should be int for the futex operations + std::atomic<int> my_flag{0}; + std::atomic<int> my_waiters{0}; + +#if !__TBB_USE_FUTEX + semaphore& get_semaphore() { + if (!my_init_flag.load(std::memory_order_acquire)) { + std::lock_guard<std::mutex> lock(my_init_mutex); + if (!my_init_flag.load(std::memory_order_relaxed)) { + new (my_semaphore.begin()) semaphore(); + my_init_flag.store(true, std::memory_order_release); + } + } + + return *my_semaphore.begin(); + } + + static std::mutex my_init_mutex; + std::atomic<bool> my_init_flag{false}; + aligned_space<semaphore> my_semaphore{}; +#endif +}; + +} // namespace r1 +} // namespace detail +} // namespace tbb + +#endif // __TBB_monitor_mutex_H diff --git a/src/tbb/src/tbb/concurrent_queue.cpp b/src/tbb/src/tbb/concurrent_queue.cpp deleted file mode 100644 index 71af714f0..000000000 --- a/src/tbb/src/tbb/concurrent_queue.cpp +++ /dev/null @@ -1,670 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_stddef.h" -#include "tbb/tbb_machine.h" -#include "tbb/tbb_exception.h" -// Define required to satisfy test in internal file. -#define __TBB_concurrent_queue_H -#include "tbb/internal/_concurrent_queue_impl.h" -#include "concurrent_monitor.h" -#include "itt_notify.h" -#include <new> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include <cstring> // for memset() - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -using namespace std; - -#if defined(_MSC_VER) && defined(_Wp64) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (disable: 4267) -#endif - -#define RECORD_EVENTS 0 - - -namespace tbb { - -namespace internal { - -typedef concurrent_queue_base_v3 concurrent_queue_base; - -typedef size_t ticket; - -//! A queue using simple locking. -/** For efficiency, this class has no constructor. - The caller is expected to zero-initialize it. */ -struct micro_queue { - typedef concurrent_queue_base::page page; - - friend class micro_queue_pop_finalizer; - - atomic<page*> head_page; - atomic<ticket> head_counter; - - atomic<page*> tail_page; - atomic<ticket> tail_counter; - - spin_mutex page_mutex; - - void push( const void* item, ticket k, concurrent_queue_base& base, - concurrent_queue_base::copy_specifics op_type ); - - void abort_push( ticket k, concurrent_queue_base& base ); - - bool pop( void* dst, ticket k, concurrent_queue_base& base ); - - micro_queue& assign( const micro_queue& src, concurrent_queue_base& base, - concurrent_queue_base::copy_specifics op_type ); - - page* make_copy ( concurrent_queue_base& base, const page* src_page, size_t begin_in_page, - size_t end_in_page, ticket& g_index, concurrent_queue_base::copy_specifics op_type ) ; - - void make_invalid( ticket k ); -}; - -// we need to yank it out of micro_queue because of concurrent_queue_base::deallocate_page being virtual. -class micro_queue_pop_finalizer: no_copy { - typedef concurrent_queue_base::page page; - ticket my_ticket; - micro_queue& my_queue; - page* my_page; - concurrent_queue_base &base; -public: - micro_queue_pop_finalizer( micro_queue& queue, concurrent_queue_base& b, ticket k, page* p ) : - my_ticket(k), my_queue(queue), my_page(p), base(b) - {} - ~micro_queue_pop_finalizer() { - page* p = my_page; - if( p ) { - spin_mutex::scoped_lock lock( my_queue.page_mutex ); - page* q = p->next; - my_queue.head_page = q; - if( !q ) { - my_queue.tail_page = NULL; - } - } - my_queue.head_counter = my_ticket; - if( p ) - base.deallocate_page( p ); - } -}; - -struct predicate_leq { - ticket t; - predicate_leq( ticket t_ ) : t(t_) {} - bool operator() ( uintptr_t p ) const {return (ticket)p<=t;} -}; - -//! Internal representation of a ConcurrentQueue. -/** For efficiency, this class has no constructor. - The caller is expected to zero-initialize it. */ -class concurrent_queue_rep { -public: -private: - friend struct micro_queue; - - //! Approximately n_queue/golden ratio - static const size_t phi = 3; - -public: - //! Must be power of 2 - static const size_t n_queue = 8; - - //! Map ticket to an array index - static size_t index( ticket k ) { - return k*phi%n_queue; - } - - atomic<ticket> head_counter; - concurrent_monitor items_avail; - atomic<size_t> n_invalid_entries; - char pad1[NFS_MaxLineSize-((sizeof(atomic<ticket>)+sizeof(concurrent_monitor)+sizeof(atomic<size_t>))&(NFS_MaxLineSize-1))]; - - atomic<ticket> tail_counter; - concurrent_monitor slots_avail; - char pad2[NFS_MaxLineSize-((sizeof(atomic<ticket>)+sizeof(concurrent_monitor))&(NFS_MaxLineSize-1))]; - micro_queue array[n_queue]; - - micro_queue& choose( ticket k ) { - // The formula here approximates LRU in a cache-oblivious way. - return array[index(k)]; - } - - //! Value for effective_capacity that denotes unbounded queue. - static const ptrdiff_t infinite_capacity = ptrdiff_t(~size_t(0)/2); -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // unary minus operator applied to unsigned type, result still unsigned - #pragma warning( push ) - #pragma warning( disable: 4146 ) -#endif - -static void* invalid_page; - -//------------------------------------------------------------------------ -// micro_queue -//------------------------------------------------------------------------ -void micro_queue::push( const void* item, ticket k, concurrent_queue_base& base, - concurrent_queue_base::copy_specifics op_type ) { - k &= -concurrent_queue_rep::n_queue; - page* p = NULL; - // find index on page where we would put the data - size_t index = modulo_power_of_two( k/concurrent_queue_rep::n_queue, base.items_per_page ); - if( !index ) { // make a new page - __TBB_TRY { - p = base.allocate_page(); - } __TBB_CATCH(...) { - ++base.my_rep->n_invalid_entries; - make_invalid( k ); - } - p->mask = 0; - p->next = NULL; - } - - // wait for my turn - if( tail_counter!=k ) // The developer insisted on keeping first check out of the backoff loop - for( atomic_backoff b(true);;b.pause() ) { - ticket tail = tail_counter; - if( tail==k ) break; - else if( tail&0x1 ) { - // no memory. throws an exception; assumes concurrent_queue_rep::n_queue>1 - ++base.my_rep->n_invalid_entries; - throw_exception( eid_bad_last_alloc ); - } - } - - if( p ) { // page is newly allocated; insert in micro_queue - spin_mutex::scoped_lock lock( page_mutex ); - if( page* q = tail_page ) - q->next = p; - else - head_page = p; - tail_page = p; - } - - if (item) { - p = tail_page; - ITT_NOTIFY( sync_acquired, p ); - __TBB_TRY { - if( concurrent_queue_base::copy == op_type ) { - base.copy_item( *p, index, item ); - } else { - __TBB_ASSERT( concurrent_queue_base::move == op_type, NULL ); - static_cast<concurrent_queue_base_v8&>(base).move_item( *p, index, item ); - } - } __TBB_CATCH(...) { - ++base.my_rep->n_invalid_entries; - tail_counter += concurrent_queue_rep::n_queue; - __TBB_RETHROW(); - } - ITT_NOTIFY( sync_releasing, p ); - // If no exception was thrown, mark item as present. - p->mask |= uintptr_t(1)<<index; - } - else // no item; this was called from abort_push - ++base.my_rep->n_invalid_entries; - - tail_counter += concurrent_queue_rep::n_queue; -} - - -void micro_queue::abort_push( ticket k, concurrent_queue_base& base ) { - push(NULL, k, base, concurrent_queue_base::copy); -} - -bool micro_queue::pop( void* dst, ticket k, concurrent_queue_base& base ) { - k &= -concurrent_queue_rep::n_queue; - spin_wait_until_eq( head_counter, k ); - spin_wait_while_eq( tail_counter, k ); - page& p = *head_page; - __TBB_ASSERT( &p, NULL ); - size_t index = modulo_power_of_two( k/concurrent_queue_rep::n_queue, base.items_per_page ); - bool success = false; - { - micro_queue_pop_finalizer finalizer( *this, base, k+concurrent_queue_rep::n_queue, index==base.items_per_page-1 ? &p : NULL ); - if( p.mask & uintptr_t(1)<<index ) { - success = true; - ITT_NOTIFY( sync_acquired, dst ); - ITT_NOTIFY( sync_acquired, head_page ); - base.assign_and_destroy_item( dst, p, index ); - ITT_NOTIFY( sync_releasing, head_page ); - } else { - --base.my_rep->n_invalid_entries; - } - } - return success; -} - -micro_queue& micro_queue::assign( const micro_queue& src, concurrent_queue_base& base, - concurrent_queue_base::copy_specifics op_type ) -{ - head_counter = src.head_counter; - tail_counter = src.tail_counter; - - const page* srcp = src.head_page; - if( srcp ) { - ticket g_index = head_counter; - __TBB_TRY { - size_t n_items = (tail_counter-head_counter)/concurrent_queue_rep::n_queue; - size_t index = modulo_power_of_two( head_counter/concurrent_queue_rep::n_queue, base.items_per_page ); - size_t end_in_first_page = (index+n_items<base.items_per_page)?(index+n_items):base.items_per_page; - - head_page = make_copy( base, srcp, index, end_in_first_page, g_index, op_type ); - page* cur_page = head_page; - - if( srcp != src.tail_page ) { - for( srcp = srcp->next; srcp!=src.tail_page; srcp=srcp->next ) { - cur_page->next = make_copy( base, srcp, 0, base.items_per_page, g_index, op_type ); - cur_page = cur_page->next; - } - - __TBB_ASSERT( srcp==src.tail_page, NULL ); - - size_t last_index = modulo_power_of_two( tail_counter/concurrent_queue_rep::n_queue, base.items_per_page ); - if( last_index==0 ) last_index = base.items_per_page; - - cur_page->next = make_copy( base, srcp, 0, last_index, g_index, op_type ); - cur_page = cur_page->next; - } - tail_page = cur_page; - } __TBB_CATCH(...) { - make_invalid( g_index ); - } - } else { - head_page = tail_page = NULL; - } - return *this; -} - -concurrent_queue_base::page* micro_queue::make_copy( concurrent_queue_base& base, - const concurrent_queue_base::page* src_page, size_t begin_in_page, size_t end_in_page, - ticket& g_index, concurrent_queue_base::copy_specifics op_type ) -{ - page* new_page = base.allocate_page(); - new_page->next = NULL; - new_page->mask = src_page->mask; - for( ; begin_in_page!=end_in_page; ++begin_in_page, ++g_index ) - if( new_page->mask & uintptr_t(1)<<begin_in_page ) - if( concurrent_queue_base::copy == op_type ) { - base.copy_page_item( *new_page, begin_in_page, *src_page, begin_in_page ); - } else { - __TBB_ASSERT( concurrent_queue_base::move == op_type, NULL ); - static_cast<concurrent_queue_base_v8&>(base).move_page_item( *new_page, begin_in_page, *src_page, begin_in_page ); - } - return new_page; -} - -void micro_queue::make_invalid( ticket k ) -{ - static concurrent_queue_base::page dummy = {static_cast<page*>((void*)1), 0}; - // mark it so that no more pushes are allowed. - invalid_page = &dummy; - { - spin_mutex::scoped_lock lock( page_mutex ); - tail_counter = k+concurrent_queue_rep::n_queue+1; - if( page* q = tail_page ) - q->next = static_cast<page*>(invalid_page); - else - head_page = static_cast<page*>(invalid_page); - tail_page = static_cast<page*>(invalid_page); - } - __TBB_RETHROW(); -} - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning( pop ) -#endif // warning 4146 is back - -//------------------------------------------------------------------------ -// concurrent_queue_base -//------------------------------------------------------------------------ -concurrent_queue_base_v3::concurrent_queue_base_v3( size_t item_sz ) { - items_per_page = item_sz<= 8 ? 32 : - item_sz<= 16 ? 16 : - item_sz<= 32 ? 8 : - item_sz<= 64 ? 4 : - item_sz<=128 ? 2 : - 1; - my_capacity = size_t(-1)/(item_sz>1 ? item_sz : 2); - my_rep = cache_aligned_allocator<concurrent_queue_rep>().allocate(1); - __TBB_ASSERT( (size_t)my_rep % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->head_counter % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->tail_counter % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->array % NFS_GetLineSize()==0, "alignment error" ); - memset(my_rep,0,sizeof(concurrent_queue_rep)); - new ( &my_rep->items_avail ) concurrent_monitor(); - new ( &my_rep->slots_avail ) concurrent_monitor(); - this->item_size = item_sz; -} - -concurrent_queue_base_v3::~concurrent_queue_base_v3() { - size_t nq = my_rep->n_queue; - for( size_t i=0; i<nq; i++ ) - __TBB_ASSERT( my_rep->array[i].tail_page==NULL, "pages were not freed properly" ); - cache_aligned_allocator<concurrent_queue_rep>().deallocate(my_rep,1); -} - -void concurrent_queue_base_v3::internal_push( const void* src ) { - internal_insert_item( src, copy ); -} - -void concurrent_queue_base_v8::internal_push_move( const void* src ) { - internal_insert_item( src, move ); -} - -void concurrent_queue_base_v3::internal_insert_item( const void* src, copy_specifics op_type ) { - concurrent_queue_rep& r = *my_rep; - ticket k = r.tail_counter++; - ptrdiff_t e = my_capacity; -#if DO_ITT_NOTIFY - bool sync_prepare_done = false; -#endif - if( (ptrdiff_t)(k-r.head_counter)>=e ) { // queue is full -#if DO_ITT_NOTIFY - if( !sync_prepare_done ) { - ITT_NOTIFY( sync_prepare, &sync_prepare_done ); - sync_prepare_done = true; - } -#endif - bool slept = false; - concurrent_monitor::thread_context thr_ctx; - r.slots_avail.prepare_wait( thr_ctx, ((ptrdiff_t)(k-e)) ); - while( (ptrdiff_t)(k-r.head_counter)>=const_cast<volatile ptrdiff_t&>(e = my_capacity) ) { - __TBB_TRY { - slept = r.slots_avail.commit_wait( thr_ctx ); - } __TBB_CATCH( tbb::user_abort& ) { - r.choose(k).abort_push(k, *this); - __TBB_RETHROW(); - } __TBB_CATCH(...) { - __TBB_RETHROW(); - } - if (slept == true) break; - r.slots_avail.prepare_wait( thr_ctx, ((ptrdiff_t)(k-e)) ); - } - if( !slept ) - r.slots_avail.cancel_wait( thr_ctx ); - } - ITT_NOTIFY( sync_acquired, &sync_prepare_done ); - __TBB_ASSERT( (ptrdiff_t)(k-r.head_counter)<my_capacity, NULL); - r.choose( k ).push( src, k, *this, op_type ); - r.items_avail.notify( predicate_leq(k) ); -} - -void concurrent_queue_base_v3::internal_pop( void* dst ) { - concurrent_queue_rep& r = *my_rep; - ticket k; -#if DO_ITT_NOTIFY - bool sync_prepare_done = false; -#endif - do { - k=r.head_counter++; - if ( (ptrdiff_t)(r.tail_counter-k)<=0 ) { // queue is empty -#if DO_ITT_NOTIFY - if( !sync_prepare_done ) { - ITT_NOTIFY( sync_prepare, dst ); - sync_prepare_done = true; - } -#endif - bool slept = false; - concurrent_monitor::thread_context thr_ctx; - r.items_avail.prepare_wait( thr_ctx, k ); - while( (ptrdiff_t)(r.tail_counter-k)<=0 ) { - __TBB_TRY { - slept = r.items_avail.commit_wait( thr_ctx ); - } __TBB_CATCH( tbb::user_abort& ) { - r.head_counter--; - __TBB_RETHROW(); - } __TBB_CATCH(...) { - __TBB_RETHROW(); - } - if (slept == true) break; - r.items_avail.prepare_wait( thr_ctx, k ); - } - if( !slept ) - r.items_avail.cancel_wait( thr_ctx ); - } - __TBB_ASSERT((ptrdiff_t)(r.tail_counter-k)>0, NULL); - } while( !r.choose(k).pop(dst,k,*this) ); - - // wake up a producer.. - r.slots_avail.notify( predicate_leq(k) ); -} - -void concurrent_queue_base_v3::internal_abort() { - concurrent_queue_rep& r = *my_rep; - r.items_avail.abort_all(); - r.slots_avail.abort_all(); -} - -bool concurrent_queue_base_v3::internal_pop_if_present( void* dst ) { - concurrent_queue_rep& r = *my_rep; - ticket k; - do { - k = r.head_counter; - for(;;) { - if( (ptrdiff_t)(r.tail_counter-k)<=0 ) { - // Queue is empty - return false; - } - // Queue had item with ticket k when we looked. Attempt to get that item. - ticket tk=k; - k = r.head_counter.compare_and_swap( tk+1, tk ); - if( k==tk ) - break; - // Another thread snatched the item, retry. - } - } while( !r.choose( k ).pop( dst, k, *this ) ); - - r.slots_avail.notify( predicate_leq(k) ); - - return true; -} - -bool concurrent_queue_base_v3::internal_push_if_not_full( const void* src ) { - return internal_insert_if_not_full( src, copy ); -} - -bool concurrent_queue_base_v8::internal_push_move_if_not_full( const void* src ) { - return internal_insert_if_not_full( src, move ); -} - -bool concurrent_queue_base_v3::internal_insert_if_not_full( const void* src, copy_specifics op_type ) { - concurrent_queue_rep& r = *my_rep; - ticket k = r.tail_counter; - for(;;) { - if( (ptrdiff_t)(k-r.head_counter)>=my_capacity ) { - // Queue is full - return false; - } - // Queue had empty slot with ticket k when we looked. Attempt to claim that slot. - ticket tk=k; - k = r.tail_counter.compare_and_swap( tk+1, tk ); - if( k==tk ) - break; - // Another thread claimed the slot, so retry. - } - r.choose(k).push(src, k, *this, op_type); - r.items_avail.notify( predicate_leq(k) ); - return true; -} - -ptrdiff_t concurrent_queue_base_v3::internal_size() const { - __TBB_ASSERT( sizeof(ptrdiff_t)<=sizeof(size_t), NULL ); - return ptrdiff_t(my_rep->tail_counter-my_rep->head_counter-my_rep->n_invalid_entries); -} - -bool concurrent_queue_base_v3::internal_empty() const { - ticket tc = my_rep->tail_counter; - ticket hc = my_rep->head_counter; - // if tc!=r.tail_counter, the queue was not empty at some point between the two reads. - return ( tc==my_rep->tail_counter && ptrdiff_t(tc-hc-my_rep->n_invalid_entries)<=0 ); -} - -void concurrent_queue_base_v3::internal_set_capacity( ptrdiff_t capacity, size_t /*item_sz*/ ) { - my_capacity = capacity<0 ? concurrent_queue_rep::infinite_capacity : capacity; -} - -void concurrent_queue_base_v3::internal_finish_clear() { - size_t nq = my_rep->n_queue; - for( size_t i=0; i<nq; ++i ) { - page* tp = my_rep->array[i].tail_page; - __TBB_ASSERT( my_rep->array[i].head_page==tp, "at most one page should remain" ); - if( tp!=NULL) { - if( tp!=invalid_page ) deallocate_page( tp ); - my_rep->array[i].tail_page = NULL; - } - } -} - -void concurrent_queue_base_v3::internal_throw_exception() const { - throw_exception( eid_bad_alloc ); -} - -void concurrent_queue_base_v3::internal_assign( const concurrent_queue_base& src, copy_specifics op_type ) { - items_per_page = src.items_per_page; - my_capacity = src.my_capacity; - - // copy concurrent_queue_rep. - my_rep->head_counter = src.my_rep->head_counter; - my_rep->tail_counter = src.my_rep->tail_counter; - my_rep->n_invalid_entries = src.my_rep->n_invalid_entries; - - // copy micro_queues - for( size_t i = 0; i<my_rep->n_queue; ++i ) - my_rep->array[i].assign( src.my_rep->array[i], *this, op_type ); - - __TBB_ASSERT( my_rep->head_counter==src.my_rep->head_counter && my_rep->tail_counter==src.my_rep->tail_counter, - "the source concurrent queue should not be concurrently modified." ); -} - -void concurrent_queue_base_v3::assign( const concurrent_queue_base& src ) { - internal_assign( src, copy ); -} - -void concurrent_queue_base_v8::move_content( concurrent_queue_base_v8& src ) { - internal_assign( src, move ); -} - -//------------------------------------------------------------------------ -// concurrent_queue_iterator_rep -//------------------------------------------------------------------------ -class concurrent_queue_iterator_rep: no_assign { -public: - ticket head_counter; - const concurrent_queue_base& my_queue; - const size_t offset_of_last; - concurrent_queue_base::page* array[concurrent_queue_rep::n_queue]; - concurrent_queue_iterator_rep( const concurrent_queue_base& queue, size_t offset_of_last_ ) : - head_counter(queue.my_rep->head_counter), - my_queue(queue), - offset_of_last(offset_of_last_) - { - const concurrent_queue_rep& rep = *queue.my_rep; - for( size_t k=0; k<concurrent_queue_rep::n_queue; ++k ) - array[k] = rep.array[k].head_page; - } - //! Set item to point to kth element. Return true if at end of queue or item is marked valid; false otherwise. - bool get_item( void*& item, size_t k ) { - if( k==my_queue.my_rep->tail_counter ) { - item = NULL; - return true; - } else { - concurrent_queue_base::page* p = array[concurrent_queue_rep::index(k)]; - __TBB_ASSERT(p,NULL); - size_t i = modulo_power_of_two( k/concurrent_queue_rep::n_queue, my_queue.items_per_page ); - item = static_cast<unsigned char*>(static_cast<void*>(p)) + offset_of_last + my_queue.item_size*i; - return (p->mask & uintptr_t(1)<<i)!=0; - } - } -}; - -//------------------------------------------------------------------------ -// concurrent_queue_iterator_base -//------------------------------------------------------------------------ - -void concurrent_queue_iterator_base_v3::initialize( const concurrent_queue_base& queue, size_t offset_of_last ) { - my_rep = cache_aligned_allocator<concurrent_queue_iterator_rep>().allocate(1); - new( my_rep ) concurrent_queue_iterator_rep(queue,offset_of_last); - size_t k = my_rep->head_counter; - if( !my_rep->get_item(my_item, k) ) advance(); -} - -concurrent_queue_iterator_base_v3::concurrent_queue_iterator_base_v3( const concurrent_queue_base& queue ) { - initialize(queue,0); -} - -concurrent_queue_iterator_base_v3::concurrent_queue_iterator_base_v3( const concurrent_queue_base& queue, size_t offset_of_last ) { - initialize(queue,offset_of_last); -} - -void concurrent_queue_iterator_base_v3::assign( const concurrent_queue_iterator_base& other ) { - if( my_rep!=other.my_rep ) { - if( my_rep ) { - cache_aligned_allocator<concurrent_queue_iterator_rep>().deallocate(my_rep, 1); - my_rep = NULL; - } - if( other.my_rep ) { - my_rep = cache_aligned_allocator<concurrent_queue_iterator_rep>().allocate(1); - new( my_rep ) concurrent_queue_iterator_rep( *other.my_rep ); - } - } - my_item = other.my_item; -} - -void concurrent_queue_iterator_base_v3::advance() { - __TBB_ASSERT( my_item, "attempt to increment iterator past end of queue" ); - size_t k = my_rep->head_counter; - const concurrent_queue_base& queue = my_rep->my_queue; -#if TBB_USE_ASSERT - void* tmp; - my_rep->get_item(tmp,k); - __TBB_ASSERT( my_item==tmp, NULL ); -#endif /* TBB_USE_ASSERT */ - size_t i = modulo_power_of_two( k/concurrent_queue_rep::n_queue, queue.items_per_page ); - if( i==queue.items_per_page-1 ) { - concurrent_queue_base::page*& root = my_rep->array[concurrent_queue_rep::index(k)]; - root = root->next; - } - // advance k - my_rep->head_counter = ++k; - if( !my_rep->get_item(my_item, k) ) advance(); -} - -concurrent_queue_iterator_base_v3::~concurrent_queue_iterator_base_v3() { - //delete my_rep; - cache_aligned_allocator<concurrent_queue_iterator_rep>().deallocate(my_rep, 1); - my_rep = NULL; -} - -} // namespace internal - -} // namespace tbb diff --git a/src/tbb/src/tbb/concurrent_vector.cpp b/src/tbb/src/tbb/concurrent_vector.cpp deleted file mode 100644 index facbbbbd9..000000000 --- a/src/tbb/src/tbb/concurrent_vector.cpp +++ /dev/null @@ -1,631 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if (_MSC_VER) - //MSVC 10 "deprecated" application of some std:: algorithms to raw pointers as not safe. - //The reason is that destination is not checked against bounds/having enough place. - #define _SCL_SECURE_NO_WARNINGS -#endif - -#include "tbb/concurrent_vector.h" -#include "tbb/cache_aligned_allocator.h" -#include "tbb/tbb_exception.h" -#include "tbb_misc.h" -#include "itt_notify.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include <cstring> -#include <memory> //for uninitialized_fill_n - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#if defined(_MSC_VER) && defined(_Wp64) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (disable: 4267) -#endif - -using namespace std; - -namespace tbb { - -namespace internal { - class concurrent_vector_base_v3::helper :no_assign { -public: - //! memory page size - static const size_type page_size = 4096; - - inline static bool incompact_predicate(size_type size) { // assert size != 0, see source/test/test_vector_layout.cpp - return size < page_size || ((size-1)%page_size < page_size/2 && size < page_size * 128); // for more details - } - - inline static size_type find_segment_end(const concurrent_vector_base_v3 &v) { - segment_t *s = v.my_segment; - segment_index_t u = s==v.my_storage? pointers_per_short_table : pointers_per_long_table; - segment_index_t k = 0; - while( k < u && (s[k].load<relaxed>()==segment_allocated() )) - ++k; - return k; - } - - // TODO: optimize accesses to my_first_block - //! assign first segment size. k - is index of last segment to be allocated, not a count of segments - inline static void assign_first_segment_if_necessary(concurrent_vector_base_v3 &v, segment_index_t k) { - if( !v.my_first_block ) { - /* There was a suggestion to set first segment according to incompact_predicate: - while( k && !helper::incompact_predicate(segment_size( k ) * element_size) ) - --k; // while previous vector size is compact, decrement - // reasons to not do it: - // * constructor(n) is not ready to accept fragmented segments - // * backward compatibility due to that constructor - // * current version gives additional guarantee and faster init. - // * two calls to reserve() will give the same effect. - */ - v.my_first_block.compare_and_swap(k+1, 0); // store number of segments - } - } - - inline static void *allocate_segment(concurrent_vector_base_v3 &v, size_type n) { - void *ptr = v.vector_allocator_ptr(v, n); - if(!ptr) throw_exception(eid_bad_alloc); // check for bad allocation, throw exception - return ptr; - } - - //! Publish segment so other threads can see it. - template<typename argument_type> - inline static void publish_segment( segment_t& s, argument_type rhs ) { - // see also itt_store_pointer_with_release_v3() - ITT_NOTIFY( sync_releasing, &s ); - s.store<release>(rhs); - } - - static size_type enable_segment(concurrent_vector_base_v3 &v, size_type k, size_type element_size, bool mark_as_not_used_on_failure = false); - - // TODO: rename as get_segments_table() and return segment pointer - inline static void extend_table_if_necessary(concurrent_vector_base_v3 &v, size_type k, size_type start ) { - if(k >= pointers_per_short_table && v.my_segment == v.my_storage) - extend_segment_table(v, start ); - } - - static void extend_segment_table(concurrent_vector_base_v3 &v, size_type start); - - struct segment_not_used_predicate: no_assign { - segment_t &s; - segment_not_used_predicate(segment_t &segment) : s(segment) {} - bool operator()() const { return s.load<relaxed>() == segment_not_used ();} - }; - inline static segment_t& acquire_segment(concurrent_vector_base_v3 &v, size_type index, size_type element_size, bool owner) { - segment_t &s = v.my_segment[index]; // TODO: pass v.my_segment as argument - if( s.load<acquire>() == segment_not_used() ) { // do not check for segment_allocation_failed state - if( owner ) { - enable_segment( v, index, element_size ); - } else { - ITT_NOTIFY(sync_prepare, &s); - spin_wait_while(segment_not_used_predicate(s)); - ITT_NOTIFY(sync_acquired, &s); - } - } else { - ITT_NOTIFY(sync_acquired, &s); - } - if(s.load<relaxed>() != segment_allocated()) - throw_exception(eid_bad_last_alloc); // throw custom exception, because it's hard to recover correctly after segment_allocation_failed state - return s; - } - - ///// non-static fields of helper for exception-safe iteration across segments - segment_t *table;// TODO: review all segment_index_t as just short type - size_type first_block, k, sz, start, finish, element_size; - helper(segment_t *segments, size_type fb, size_type esize, size_type index, size_type s, size_type f) throw() - : table(segments), first_block(fb), k(index), sz(0), start(s), finish(f), element_size(esize) {} - inline void first_segment() throw() { - __TBB_ASSERT( start <= finish, NULL ); - __TBB_ASSERT( first_block || !finish, NULL ); - if( k < first_block ) k = 0; // process solid segment at a time - size_type base = segment_base( k ); - __TBB_ASSERT( base <= start, NULL ); - finish -= base; start -= base; // rebase as offsets from segment k - sz = k ? base : segment_size( first_block ); // sz==base for k>0 - } - inline void next_segment() throw() { - finish -= sz; start = 0; // offsets from next segment - if( !k ) k = first_block; - else { ++k; sz = segment_size( k ); } - } - template<typename F> - inline size_type apply(const F &func) { - first_segment(); - while( sz < finish ) { // work for more than one segment - //TODO: remove extra load() of table[k] inside func - func( table[k], table[k].load<relaxed>().pointer<char>() + element_size*start, sz - start ); - next_segment(); - } - func( table[k], table[k].load<relaxed>().pointer<char>() + element_size*start, finish - start ); - return k; - } - inline segment_value_t get_segment_value(size_type index, bool wait) { - segment_t &s = table[index]; - if( wait && (s.load<acquire>() == segment_not_used()) ) { - ITT_NOTIFY(sync_prepare, &s); - spin_wait_while(segment_not_used_predicate(s)); - ITT_NOTIFY(sync_acquired, &s); - } - return s.load<relaxed>(); - } - ~helper() { - if( sz >= finish ) return; // the work is done correctly - cleanup(); - } - - //! Out of line code to assists destructor in infrequent cases. - void cleanup(); - - /// TODO: turn into lambda functions when available - struct init_body { - internal_array_op2 func; - const void *arg; - init_body(internal_array_op2 init, const void *src) : func(init), arg(src) {} - void operator()(segment_t &, void *begin, size_type n) const { - func( begin, arg, n ); - } - }; - struct safe_init_body { - internal_array_op2 func; - const void *arg; - safe_init_body(internal_array_op2 init, const void *src) : func(init), arg(src) {} - void operator()(segment_t &s, void *begin, size_type n) const { - if(s.load<relaxed>() != segment_allocated()) - throw_exception(eid_bad_last_alloc); // throw custom exception - func( begin, arg, n ); - } - }; - struct destroy_body { - internal_array_op1 func; - destroy_body(internal_array_op1 destroy) : func(destroy) {} - void operator()(segment_t &s, void *begin, size_type n) const { - if(s.load<relaxed>() == segment_allocated()) - func( begin, n ); - } - }; -}; - -void concurrent_vector_base_v3::helper::extend_segment_table(concurrent_vector_base_v3 &v, concurrent_vector_base_v3::size_type start) { - if( start > segment_size(pointers_per_short_table) ) start = segment_size(pointers_per_short_table); - // If other threads are trying to set pointers in the short segment, wait for them to finish their - // assignments before we copy the short segment to the long segment. Note: grow_to_at_least depends on it - for( segment_index_t i = 0; segment_base(i) < start && v.my_segment == v.my_storage; i++ ){ - if(v.my_storage[i].load<relaxed>() == segment_not_used()) { - ITT_NOTIFY(sync_prepare, &v.my_storage[i]); - atomic_backoff backoff(true); - while( v.my_segment == v.my_storage && (v.my_storage[i].load<relaxed>() == segment_not_used()) ) - backoff.pause(); - ITT_NOTIFY(sync_acquired, &v.my_storage[i]); - } - } - if( v.my_segment != v.my_storage ) return; - - segment_t* new_segment_table = (segment_t*)NFS_Allocate( pointers_per_long_table, sizeof(segment_t), NULL ); - __TBB_ASSERT(new_segment_table, "NFS_Allocate should throws exception if it cannot allocate the requested storage, and not returns zero pointer" ); - std::uninitialized_fill_n(new_segment_table,size_t(pointers_per_long_table),segment_t()); //init newly allocated table - //TODO: replace with static assert - __TBB_STATIC_ASSERT(pointers_per_long_table >= pointers_per_short_table, "size of the big table should be not lesser than of the small one, as we copy values to it" ); - std::copy(v.my_storage, v.my_storage+pointers_per_short_table, new_segment_table);//copy values from old table, here operator= of segment_t is used - if( v.my_segment.compare_and_swap( new_segment_table, v.my_storage ) != v.my_storage ) - NFS_Free( new_segment_table ); - // else TODO: add ITT_NOTIFY signals for v.my_segment? -} - -concurrent_vector_base_v3::size_type concurrent_vector_base_v3::helper::enable_segment(concurrent_vector_base_v3 &v, concurrent_vector_base_v3::size_type k, concurrent_vector_base_v3::size_type element_size, - bool mark_as_not_used_on_failure ) { - - struct segment_scope_guard : no_copy{ - segment_t* my_segment_ptr; - bool my_mark_as_not_used; - segment_scope_guard(segment_t& segment, bool mark_as_not_used) : my_segment_ptr(&segment), my_mark_as_not_used(mark_as_not_used){} - void dismiss(){ my_segment_ptr = 0;} - ~segment_scope_guard(){ - if (my_segment_ptr){ - if (!my_mark_as_not_used){ - publish_segment(*my_segment_ptr, segment_allocation_failed()); - }else{ - publish_segment(*my_segment_ptr, segment_not_used()); - } - } - } - }; - - segment_t* s = v.my_segment; // TODO: optimize out as argument? Optimize accesses to my_first_block - __TBB_ASSERT(s[k].load<relaxed>() != segment_allocated(), "concurrent operation during growth?"); - - size_type size_of_enabled_segment = segment_size(k); - size_type size_to_allocate = size_of_enabled_segment; - if( !k ) { - assign_first_segment_if_necessary(v, default_initial_segments-1); - size_of_enabled_segment = 2 ; - size_to_allocate = segment_size(v.my_first_block); - - } else { - spin_wait_while_eq( v.my_first_block, segment_index_t(0) ); - } - - if( k && (k < v.my_first_block)){ //no need to allocate anything - // s[0].array is changed only once ( 0 -> !0 ) and points to uninitialized memory - segment_value_t array0 = s[0].load<acquire>(); - if(array0 == segment_not_used()){ - // sync_prepare called only if there is a wait - ITT_NOTIFY(sync_prepare, &s[0]); - spin_wait_while( segment_not_used_predicate(s[0])); - array0 = s[0].load<acquire>(); - } - ITT_NOTIFY(sync_acquired, &s[0]); - if(array0 != segment_allocated()) { // check for segment_allocation_failed state of initial segment - publish_segment(s[k], segment_allocation_failed()); // and assign segment_allocation_failed state here - throw_exception(eid_bad_last_alloc); // throw custom exception - } - publish_segment( s[k], - static_cast<void*>(array0.pointer<char>() + segment_base(k)*element_size ) - ); - } else { - segment_scope_guard k_segment_guard(s[k], mark_as_not_used_on_failure); - publish_segment(s[k], allocate_segment(v, size_to_allocate)); - k_segment_guard.dismiss(); - } - return size_of_enabled_segment; -} - -void concurrent_vector_base_v3::helper::cleanup() { - if( !sz ) { // allocation failed, restore the table - segment_index_t k_start = k, k_end = segment_index_of(finish-1); - if( segment_base( k_start ) < start ) - get_segment_value(k_start++, true); // wait - if( k_start < first_block ) { - segment_value_t segment0 = get_segment_value(0, start>0); // wait if necessary - if((segment0 != segment_not_used()) && !k_start ) ++k_start; - if(segment0 != segment_allocated()) - for(; k_start < first_block && k_start <= k_end; ++k_start ) - publish_segment(table[k_start], segment_allocation_failed()); - else for(; k_start < first_block && k_start <= k_end; ++k_start ) - publish_segment(table[k_start], static_cast<void*>( - (segment0.pointer<char>()) + segment_base(k_start)*element_size) ); - } - for(; k_start <= k_end; ++k_start ) // not in first block - if(table[k_start].load<acquire>() == segment_not_used()) - publish_segment(table[k_start], segment_allocation_failed()); - // fill allocated items - first_segment(); - goto recover; - } - while( sz <= finish ) { // there is still work for at least one segment - next_segment(); -recover: - segment_value_t array = table[k].load<relaxed>(); - if(array == segment_allocated()) - std::memset( (array.pointer<char>()) + element_size*start, 0, ((sz<finish?sz:finish) - start)*element_size ); - else __TBB_ASSERT( array == segment_allocation_failed(), NULL ); - } -} - -concurrent_vector_base_v3::~concurrent_vector_base_v3() { - segment_t* s = my_segment; - if( s != my_storage ) { -#if TBB_USE_ASSERT - //to please assert in segment_t destructor - std::fill_n(my_storage,size_t(pointers_per_short_table),segment_t()); -#endif /* TBB_USE_ASSERT */ -#if TBB_USE_DEBUG - for( segment_index_t i = 0; i < pointers_per_long_table; i++) - __TBB_ASSERT( my_segment[i].load<relaxed>() != segment_allocated(), "Segment should have been freed. Please recompile with new TBB before using exceptions."); -#endif - my_segment = my_storage; - NFS_Free( s ); - } -} - -concurrent_vector_base_v3::size_type concurrent_vector_base_v3::internal_capacity() const { - return segment_base( helper::find_segment_end(*this) ); -} - -void concurrent_vector_base_v3::internal_throw_exception(size_type t) const { - switch(t) { - case 0: throw_exception(eid_out_of_range); - case 1: throw_exception(eid_segment_range_error); - case 2: throw_exception(eid_index_range_error); - } -} - -void concurrent_vector_base_v3::internal_reserve( size_type n, size_type element_size, size_type max_size ) { - if( n>max_size ) - throw_exception(eid_reservation_length_error); - __TBB_ASSERT( n, NULL ); - helper::assign_first_segment_if_necessary(*this, segment_index_of(n-1)); - segment_index_t k = helper::find_segment_end(*this); - - for( ; segment_base(k)<n; ++k ) { - helper::extend_table_if_necessary(*this, k, 0); - if(my_segment[k].load<relaxed>() != segment_allocated()) - helper::enable_segment(*this, k, element_size, true ); //in case of failure mark segments as not used - } -} - -//TODO: Looks like atomic loads can be done relaxed here, as the only place this method is called from -//is the constructor, which does not require synchronization (for more details see comment in the -// concurrent_vector_base constructor). -void concurrent_vector_base_v3::internal_copy( const concurrent_vector_base_v3& src, size_type element_size, internal_array_op2 copy ) { - size_type n = src.my_early_size; - __TBB_ASSERT( my_segment == my_storage, NULL); - if( n ) { - helper::assign_first_segment_if_necessary(*this, segment_index_of(n-1)); - size_type b; - for( segment_index_t k=0; (b=segment_base(k))<n; ++k ) { - if( (src.my_segment.load<acquire>() == src.my_storage && k >= pointers_per_short_table) - || (src.my_segment[k].load<relaxed>() != segment_allocated())) { - my_early_size = b; break; - } - helper::extend_table_if_necessary(*this, k, 0); - size_type m = helper::enable_segment(*this, k, element_size); - if( m > n-b ) m = n-b; - my_early_size = b+m; - copy( my_segment[k].load<relaxed>().pointer<void>(), src.my_segment[k].load<relaxed>().pointer<void>(), m ); - } - } -} - -void concurrent_vector_base_v3::internal_assign( const concurrent_vector_base_v3& src, size_type element_size, internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy ) { - size_type n = src.my_early_size; - while( my_early_size>n ) { // TODO: improve - segment_index_t k = segment_index_of( my_early_size-1 ); - size_type b=segment_base(k); - size_type new_end = b>=n ? b : n; - __TBB_ASSERT( my_early_size>new_end, NULL ); - if( my_segment[k].load<relaxed>() != segment_allocated()) // check vector was broken before - throw_exception(eid_bad_last_alloc); // throw custom exception - // destructors are supposed to not throw any exceptions - destroy( my_segment[k].load<relaxed>().pointer<char>() + element_size*(new_end-b), my_early_size-new_end ); - my_early_size = new_end; - } - size_type dst_initialized_size = my_early_size; - my_early_size = n; - helper::assign_first_segment_if_necessary(*this, segment_index_of(n)); - size_type b; - for( segment_index_t k=0; (b=segment_base(k))<n; ++k ) { - if( (src.my_segment.load<acquire>() == src.my_storage && k >= pointers_per_short_table) - || src.my_segment[k].load<relaxed>() != segment_allocated() ) { // if source is damaged - my_early_size = b; break; // TODO: it may cause undestructed items - } - helper::extend_table_if_necessary(*this, k, 0); - if( my_segment[k].load<relaxed>() == segment_not_used()) - helper::enable_segment(*this, k, element_size); - else if( my_segment[k].load<relaxed>() != segment_allocated() ) - throw_exception(eid_bad_last_alloc); // throw custom exception - size_type m = k? segment_size(k) : 2; - if( m > n-b ) m = n-b; - size_type a = 0; - if( dst_initialized_size>b ) { - a = dst_initialized_size-b; - if( a>m ) a = m; - assign( my_segment[k].load<relaxed>().pointer<void>(), src.my_segment[k].load<relaxed>().pointer<void>(), a ); - m -= a; - a *= element_size; - } - if( m>0 ) - copy( my_segment[k].load<relaxed>().pointer<char>() + a, src.my_segment[k].load<relaxed>().pointer<char>() + a, m ); - } - __TBB_ASSERT( src.my_early_size==n, "detected use of concurrent_vector::operator= with right side that was concurrently modified" ); -} - -void* concurrent_vector_base_v3::internal_push_back( size_type element_size, size_type& index ) { - __TBB_ASSERT( sizeof(my_early_size)==sizeof(uintptr_t), NULL ); - size_type tmp = my_early_size.fetch_and_increment<acquire>(); - index = tmp; - segment_index_t k_old = segment_index_of( tmp ); - size_type base = segment_base(k_old); - helper::extend_table_if_necessary(*this, k_old, tmp); - segment_t& s = helper::acquire_segment(*this, k_old, element_size, base==tmp); - size_type j_begin = tmp-base; - return (void*)(s.load<relaxed>().pointer<char>() + element_size*j_begin); -} - -void concurrent_vector_base_v3::internal_grow_to_at_least( size_type new_size, size_type element_size, internal_array_op2 init, const void *src ) { - internal_grow_to_at_least_with_result( new_size, element_size, init, src ); -} - -concurrent_vector_base_v3::size_type concurrent_vector_base_v3::internal_grow_to_at_least_with_result( size_type new_size, size_type element_size, internal_array_op2 init, const void *src ) { - size_type e = my_early_size; - while( e<new_size ) { - size_type f = my_early_size.compare_and_swap(new_size,e); - if( f==e ) { - internal_grow( e, new_size, element_size, init, src ); - break; - } - e = f; - } - // Check/wait for segments allocation completes - segment_index_t i, k_old = segment_index_of( new_size-1 ); - if( k_old >= pointers_per_short_table && my_segment == my_storage ) { - spin_wait_while_eq( my_segment, my_storage ); - } - for( i = 0; i <= k_old; ++i ) { - segment_t &s = my_segment[i]; - if(s.load<relaxed>() == segment_not_used()) { - ITT_NOTIFY(sync_prepare, &s); - atomic_backoff backoff(true); - while( my_segment[i].load<acquire>() == segment_not_used() ) // my_segment may change concurrently - backoff.pause(); - ITT_NOTIFY(sync_acquired, &s); - } - if( my_segment[i].load<relaxed>() != segment_allocated() ) - throw_exception(eid_bad_last_alloc); - } -#if TBB_USE_DEBUG - size_type capacity = internal_capacity(); - __TBB_ASSERT( capacity >= new_size, NULL); -#endif - return e; -} - -concurrent_vector_base_v3::size_type concurrent_vector_base_v3::internal_grow_by( size_type delta, size_type element_size, internal_array_op2 init, const void *src ) { - size_type result = my_early_size.fetch_and_add(delta); - internal_grow( result, result+delta, element_size, init, src ); - return result; -} - -void concurrent_vector_base_v3::internal_grow( const size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src ) { - __TBB_ASSERT( start<finish, "start must be less than finish" ); - segment_index_t k_start = segment_index_of(start), k_end = segment_index_of(finish-1); - helper::assign_first_segment_if_necessary(*this, k_end); - helper::extend_table_if_necessary(*this, k_end, start); - helper range(my_segment, my_first_block, element_size, k_start, start, finish); - for(; k_end > k_start && k_end >= range.first_block; --k_end ) // allocate segments in reverse order - helper::acquire_segment(*this, k_end, element_size, true/*for k_end>k_start*/); - for(; k_start <= k_end; ++k_start ) // but allocate first block in straight order - helper::acquire_segment(*this, k_start, element_size, segment_base( k_start ) >= start ); - range.apply( helper::init_body(init, src) ); -} - -void concurrent_vector_base_v3::internal_resize( size_type n, size_type element_size, size_type max_size, const void *src, - internal_array_op1 destroy, internal_array_op2 init ) { - size_type j = my_early_size; - if( n > j ) { // construct items - internal_reserve(n, element_size, max_size); - my_early_size = n; - helper for_each(my_segment, my_first_block, element_size, segment_index_of(j), j, n); - for_each.apply( helper::safe_init_body(init, src) ); - } else { - my_early_size = n; - helper for_each(my_segment, my_first_block, element_size, segment_index_of(n), n, j); - for_each.apply( helper::destroy_body(destroy) ); - } -} - -concurrent_vector_base_v3::segment_index_t concurrent_vector_base_v3::internal_clear( internal_array_op1 destroy ) { - __TBB_ASSERT( my_segment, NULL ); - size_type j = my_early_size; - my_early_size = 0; - helper for_each(my_segment, my_first_block, 0, 0, 0, j); // element_size is safe to be zero if 'start' is zero - j = for_each.apply( helper::destroy_body(destroy) ); - size_type i = helper::find_segment_end(*this); - return j < i? i : j+1; -} - -void *concurrent_vector_base_v3::internal_compact( size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy ) -{ - const size_type my_size = my_early_size; - const segment_index_t k_end = helper::find_segment_end(*this); // allocated segments - const segment_index_t k_stop = my_size? segment_index_of(my_size-1) + 1 : 0; // number of segments to store existing items: 0=>0; 1,2=>1; 3,4=>2; [5-8]=>3;.. - const segment_index_t first_block = my_first_block; // number of merged segments, getting values from atomics - - segment_index_t k = first_block; - if(k_stop < first_block) - k = k_stop; - else - while (k < k_stop && helper::incompact_predicate(segment_size( k ) * element_size) ) k++; - if(k_stop == k_end && k == first_block) - return NULL; - - segment_t *const segment_table = my_segment; - internal_segments_table &old = *static_cast<internal_segments_table*>( table ); - //this call is left here for sake of backward compatibility, and as a placeholder for table initialization - std::fill_n(old.table,sizeof(old.table)/sizeof(old.table[0]),segment_t()); - old.first_block=0; - - if ( k != first_block && k ) // first segment optimization - { - // exception can occur here - void *seg = helper::allocate_segment(*this, segment_size(k)); - old.table[0].store<relaxed>(seg); - old.first_block = k; // fill info for freeing new segment if exception occurs - // copy items to the new segment - size_type my_segment_size = segment_size( first_block ); - for (segment_index_t i = 0, j = 0; i < k && j < my_size; j = my_segment_size) { - __TBB_ASSERT( segment_table[i].load<relaxed>() == segment_allocated(), NULL); - void *s = static_cast<void*>( - static_cast<char*>(seg) + segment_base(i)*element_size ); - //TODO: refactor to use std::min - if(j + my_segment_size >= my_size) my_segment_size = my_size - j; - __TBB_TRY { // exception can occur here - copy( s, segment_table[i].load<relaxed>().pointer<void>(), my_segment_size ); - } __TBB_CATCH(...) { // destroy all the already copied items - helper for_each(&old.table[0], old.first_block, element_size, - 0, 0, segment_base(i)+ my_segment_size); - for_each.apply( helper::destroy_body(destroy) ); - __TBB_RETHROW(); - } - my_segment_size = i? segment_size( ++i ) : segment_size( i = first_block ); - } - // commit the changes - std::copy(segment_table,segment_table + k,old.table); - for (segment_index_t i = 0; i < k; i++) { - segment_table[i].store<relaxed>(static_cast<void*>( - static_cast<char*>(seg) + segment_base(i)*element_size )); - } - old.first_block = first_block; my_first_block = k; // now, first_block != my_first_block - // destroy original copies - my_segment_size = segment_size( first_block ); // old.first_block actually - for (segment_index_t i = 0, j = 0; i < k && j < my_size; j = my_segment_size) { - if(j + my_segment_size >= my_size) my_segment_size = my_size - j; - // destructors are supposed to not throw any exceptions - destroy( old.table[i].load<relaxed>().pointer<void>(), my_segment_size ); - my_segment_size = i? segment_size( ++i ) : segment_size( i = first_block ); - } - } - // free unnecessary segments allocated by reserve() call - if ( k_stop < k_end ) { - old.first_block = first_block; - std::copy(segment_table+k_stop, segment_table+k_end, old.table+k_stop ); - std::fill_n(segment_table+k_stop, (k_end-k_stop), segment_t()); - if( !k ) my_first_block = 0; - } - return table; -} - -void concurrent_vector_base_v3::internal_swap(concurrent_vector_base_v3& v) -{ - size_type my_sz = my_early_size.load<acquire>(); - size_type v_sz = v.my_early_size.load<relaxed>(); - if(!my_sz && !v_sz) return; - - bool my_was_short = (my_segment.load<relaxed>() == my_storage); - bool v_was_short = (v.my_segment.load<relaxed>() == v.my_storage); - - //In C++11, this would be: swap(my_storage, v.my_storage); - for (int i=0; i < pointers_per_short_table; ++i){ - swap(my_storage[i], v.my_storage[i]); - } - tbb::internal::swap<relaxed>(my_first_block, v.my_first_block); - tbb::internal::swap<relaxed>(my_segment, v.my_segment); - if (my_was_short){ - v.my_segment.store<relaxed>(v.my_storage); - } - if(v_was_short){ - my_segment.store<relaxed>(my_storage); - } - - my_early_size.store<relaxed>(v_sz); - v.my_early_size.store<release>(my_sz); -} - -} // namespace internal - -} // tbb diff --git a/src/tbb/src/tbb/condition_variable.cpp b/src/tbb/src/tbb/condition_variable.cpp deleted file mode 100644 index 08b5927fd..000000000 --- a/src/tbb/src/tbb/condition_variable.cpp +++ /dev/null @@ -1,199 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" -#include "tbb/compat/condition_variable" -#include "tbb/atomic.h" -#include "tbb_misc.h" -#include "dynamic_link.h" -#include "itt_notify.h" - -namespace tbb { - -namespace internal { - -//condition_variable -#if _WIN32||_WIN64 -using tbb::interface5::internal::condition_variable_using_event; - -static atomic<do_once_state> condvar_api_state; - -void WINAPI init_condvar_using_event( condition_variable_using_event* cv_event ) -{ - // TODO: For Metro port, we can always use the API for condition variables, without dynamic_link etc. - cv_event->event = CreateEventEx(NULL, NULL, 0x1 /*CREATE_EVENT_MANUAL_RESET*/, EVENT_ALL_ACCESS ); - InitializeCriticalSectionEx( &cv_event->mutex, 4000, 0 ); - cv_event->n_waiters = 0; - cv_event->release_count = 0; - cv_event->epoch = 0; -} - -BOOL WINAPI sleep_condition_variable_cs_using_event( condition_variable_using_event* cv_event, LPCRITICAL_SECTION cs, DWORD dwMilliseconds ) -{ - EnterCriticalSection( &cv_event->mutex ); - ++cv_event->n_waiters; - unsigned my_generation = cv_event->epoch; - LeaveCriticalSection( &cv_event->mutex ); - LeaveCriticalSection( cs ); - for (;;) { - // should come here at least once - DWORD rc = WaitForSingleObjectEx( cv_event->event, dwMilliseconds, FALSE ); - EnterCriticalSection( &cv_event->mutex ); - if( rc!=WAIT_OBJECT_0 ) { - --cv_event->n_waiters; - LeaveCriticalSection( &cv_event->mutex ); - if( rc==WAIT_TIMEOUT ) { - SetLastError( WAIT_TIMEOUT ); - EnterCriticalSection( cs ); - } - return false; - } - __TBB_ASSERT( rc==WAIT_OBJECT_0, NULL ); - if( cv_event->release_count>0 && cv_event->epoch!=my_generation ) - break; - LeaveCriticalSection( &cv_event->mutex ); - } - - // still in the critical section - --cv_event->n_waiters; - int count = --cv_event->release_count; - LeaveCriticalSection( &cv_event->mutex ); - - if( count==0 ) { - __TBB_ASSERT( cv_event->event, "Premature destruction of condition variable?" ); - ResetEvent( cv_event->event ); - } - EnterCriticalSection( cs ); - return true; -} - -void WINAPI wake_condition_variable_using_event( condition_variable_using_event* cv_event ) -{ - EnterCriticalSection( &cv_event->mutex ); - if( cv_event->n_waiters>cv_event->release_count ) { - SetEvent( cv_event->event ); // Signal the manual-reset event. - ++cv_event->release_count; - ++cv_event->epoch; - } - LeaveCriticalSection( &cv_event->mutex ); -} - -void WINAPI wake_all_condition_variable_using_event( condition_variable_using_event* cv_event ) -{ - EnterCriticalSection( &cv_event->mutex ); - if( cv_event->n_waiters>0 ) { - SetEvent( cv_event->event ); - cv_event->release_count = cv_event->n_waiters; - ++cv_event->epoch; - } - LeaveCriticalSection( &cv_event->mutex ); -} - -void WINAPI destroy_condvar_using_event( condition_variable_using_event* cv_event ) -{ - HANDLE my_event = cv_event->event; - EnterCriticalSection( &cv_event->mutex ); - // NULL is an invalid HANDLE value - cv_event->event = NULL; - if( cv_event->n_waiters>0 ) { - LeaveCriticalSection( &cv_event->mutex ); - spin_wait_until_eq( cv_event->n_waiters, 0 ); - // make sure the last thread completes its access to cv - EnterCriticalSection( &cv_event->mutex ); - } - LeaveCriticalSection( &cv_event->mutex ); - CloseHandle( my_event ); -} - -void WINAPI destroy_condvar_noop( CONDITION_VARIABLE* /*cv*/ ) { /*no op*/ } - -static void (WINAPI *__TBB_init_condvar)( PCONDITION_VARIABLE ) = (void (WINAPI *)(PCONDITION_VARIABLE))&init_condvar_using_event; -static BOOL (WINAPI *__TBB_condvar_wait)( PCONDITION_VARIABLE, LPCRITICAL_SECTION, DWORD ) = (BOOL (WINAPI *)(PCONDITION_VARIABLE,LPCRITICAL_SECTION, DWORD))&sleep_condition_variable_cs_using_event; -static void (WINAPI *__TBB_condvar_notify_one)( PCONDITION_VARIABLE ) = (void (WINAPI *)(PCONDITION_VARIABLE))&wake_condition_variable_using_event; -static void (WINAPI *__TBB_condvar_notify_all)( PCONDITION_VARIABLE ) = (void (WINAPI *)(PCONDITION_VARIABLE))&wake_all_condition_variable_using_event; -static void (WINAPI *__TBB_destroy_condvar)( PCONDITION_VARIABLE ) = (void (WINAPI *)(PCONDITION_VARIABLE))&destroy_condvar_using_event; - -//! Table describing how to link the handlers. -static const dynamic_link_descriptor CondVarLinkTable[] = { - DLD(InitializeConditionVariable, __TBB_init_condvar), - DLD(SleepConditionVariableCS, __TBB_condvar_wait), - DLD(WakeConditionVariable, __TBB_condvar_notify_one), - DLD(WakeAllConditionVariable, __TBB_condvar_notify_all) -}; - -void init_condvar_module() -{ - __TBB_ASSERT( (uintptr_t)__TBB_init_condvar==(uintptr_t)&init_condvar_using_event, NULL ); - if( dynamic_link( "Kernel32.dll", CondVarLinkTable, 4 ) ) - __TBB_destroy_condvar = (void (WINAPI *)(PCONDITION_VARIABLE))&destroy_condvar_noop; -} -#endif /* _WIN32||_WIN64 */ - -} // namespace internal - -#if _WIN32||_WIN64 - -namespace interface5 { -namespace internal { - -using tbb::internal::condvar_api_state; -using tbb::internal::__TBB_init_condvar; -using tbb::internal::__TBB_condvar_wait; -using tbb::internal::__TBB_condvar_notify_one; -using tbb::internal::__TBB_condvar_notify_all; -using tbb::internal::__TBB_destroy_condvar; -using tbb::internal::init_condvar_module; - -void internal_initialize_condition_variable( condvar_impl_t& cv ) -{ - atomic_do_once( &init_condvar_module, condvar_api_state ); - __TBB_init_condvar( &cv.cv_native ); -} - -void internal_destroy_condition_variable( condvar_impl_t& cv ) -{ - __TBB_destroy_condvar( &cv.cv_native ); -} - -void internal_condition_variable_notify_one( condvar_impl_t& cv ) -{ - __TBB_condvar_notify_one ( &cv.cv_native ); -} - -void internal_condition_variable_notify_all( condvar_impl_t& cv ) -{ - __TBB_condvar_notify_all( &cv.cv_native ); -} - -bool internal_condition_variable_wait( condvar_impl_t& cv, mutex* mtx, const tick_count::interval_t* i ) -{ - DWORD duration = i ? DWORD((i->seconds()*1000)) : INFINITE; - mtx->set_state( mutex::INITIALIZED ); - BOOL res = __TBB_condvar_wait( &cv.cv_native, mtx->native_handle(), duration ); - mtx->set_state( mutex::HELD ); - return res?true:false; -} - -} // namespace internal -} // nameespace interface5 - -#endif /* _WIN32||_WIN64 */ - -} // namespace tbb diff --git a/src/tbb/src/tbb/critical_section.cpp b/src/tbb/src/tbb/critical_section.cpp deleted file mode 100644 index 8a2ac27d2..000000000 --- a/src/tbb/src/tbb/critical_section.cpp +++ /dev/null @@ -1,31 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/critical_section.h" -#include "itt_notify.h" - -namespace tbb { - namespace internal { - -void critical_section_v4::internal_construct() { - ITT_SYNC_CREATE(&my_impl, _T("ppl::critical_section"), _T("")); -} -} // namespace internal -} // namespace tbb diff --git a/src/tbb/src/tbb/custom_scheduler.h b/src/tbb/src/tbb/custom_scheduler.h deleted file mode 100644 index 1248f192f..000000000 --- a/src/tbb/src/tbb/custom_scheduler.h +++ /dev/null @@ -1,684 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef _TBB_custom_scheduler_H -#define _TBB_custom_scheduler_H - -#include "scheduler.h" -#include "observer_proxy.h" -#include "itt_notify.h" - -namespace tbb { -namespace internal { - -//! Amount of time to pause between steals. -/** The default values below were found to be best empirically for K-Means - on the 32-way Altix and 4-way (*2 for HT) fxqlin04. */ -#ifdef __TBB_STEALING_PAUSE -static const long PauseTime = __TBB_STEALING_PAUSE; -#elif __TBB_ipf -static const long PauseTime = 1500; -#else -static const long PauseTime = 80; -#endif - -//------------------------------------------------------------------------ -//! Traits classes for scheduler -//------------------------------------------------------------------------ - -struct DefaultSchedulerTraits { - static const bool itt_possible = true; - static const bool has_slow_atomic = false; -}; - -struct IntelSchedulerTraits { - static const bool itt_possible = false; -#if __TBB_x86_32||__TBB_x86_64 - static const bool has_slow_atomic = true; -#else - static const bool has_slow_atomic = false; -#endif /* __TBB_x86_32||__TBB_x86_64 */ -}; - -//------------------------------------------------------------------------ -// custom_scheduler -//------------------------------------------------------------------------ - -//! A scheduler with a customized evaluation loop. -/** The customization can use SchedulerTraits to make decisions without needing a run-time check. */ -template<typename SchedulerTraits> -class custom_scheduler: private generic_scheduler { - typedef custom_scheduler<SchedulerTraits> scheduler_type; - - //! Scheduler loop that dispatches tasks. - /** If child is non-NULL, it is dispatched first. - Then, until "parent" has a reference count of 1, other task are dispatched or stolen. */ - /*override*/ - void local_wait_for_all( task& parent, task* child ); - - //! Entry point from client code to the scheduler loop that dispatches tasks. - /** The method is virtual, but the *this object is used only for sake of dispatching on the correct vtable, - not necessarily the correct *this object. The correct *this object is looked up in TLS. */ - /*override*/ - void wait_for_all( task& parent, task* child ) { - static_cast<custom_scheduler*>(governor::local_scheduler())->scheduler_type::local_wait_for_all( parent, child ); - } - - //! Construct a custom_scheduler - custom_scheduler( arena* a, size_t index ) : generic_scheduler(a, index) {} - - //! Decrements ref_count of a predecessor. - /** If it achieves 0, the predecessor is scheduled for execution. - When changing, remember that this is a hot path function. */ - void tally_completion_of_predecessor( task& s, task*& bypass_slot ) { - task_prefix& p = s.prefix(); - if( SchedulerTraits::itt_possible ) - ITT_NOTIFY(sync_releasing, &p.ref_count); - if( SchedulerTraits::has_slow_atomic && p.ref_count==1 ) - p.ref_count=0; - else if( __TBB_FetchAndDecrementWrelease(&p.ref_count) > 1 ) {// more references exist - // '__TBB_cl_evict(&p)' degraded performance of parallel_preorder example - return; - } - - // Ordering on p.ref_count (superfluous if SchedulerTraits::has_slow_atomic) - __TBB_control_consistency_helper(); - __TBB_ASSERT(p.ref_count==0, "completion of task caused predecessor's reference count to underflow"); - if( SchedulerTraits::itt_possible ) - ITT_NOTIFY(sync_acquired, &p.ref_count); -#if TBB_USE_ASSERT - p.extra_state &= ~es_ref_count_active; -#endif /* TBB_USE_ASSERT */ - -#if __TBB_RECYCLE_TO_ENQUEUE - if (p.state==task::to_enqueue) { - // related to __TBB_TASK_ARENA TODO: try keep priority of the task - // e.g. rework task_prefix to remember priority of received task and use here - my_arena->enqueue_task(s, 0, my_random ); - } else -#endif /*__TBB_RECYCLE_TO_ENQUEUE*/ - if( bypass_slot==NULL ) - bypass_slot = &s; - else - local_spawn( s, s.prefix().next ); - } - -public: - static generic_scheduler* allocate_scheduler( arena* a, size_t index ) { - scheduler_type* s = (scheduler_type*)NFS_Allocate(1,sizeof(scheduler_type),NULL); - new( s ) scheduler_type( a, index ); - s->assert_task_pool_valid(); - ITT_SYNC_CREATE(s, SyncType_Scheduler, SyncObj_TaskPoolSpinning); - return s; - } - - //! Try getting a task from the mailbox or stealing from another scheduler. - /** Returns the stolen task or NULL if all attempts fail. */ - /* override */ task* receive_or_steal_task( __TBB_atomic reference_count& completion_ref_count, bool return_if_no_work ); - -}; // class custom_scheduler<> - -//------------------------------------------------------------------------ -// custom_scheduler methods -//------------------------------------------------------------------------ -template<typename SchedulerTraits> -task* custom_scheduler<SchedulerTraits>::receive_or_steal_task( __TBB_atomic reference_count& completion_ref_count, - bool return_if_no_work ) { - task* t = NULL; - bool outermost_dispatch_level = return_if_no_work || master_outermost_level(); - bool can_steal_here = can_steal(); - my_inbox.set_is_idle( true ); -#if __TBB_HOARD_NONLOCAL_TASKS - __TBB_ASSERT(!my_nonlocal_free_list, NULL); -#endif -#if __TBB_TASK_PRIORITY - if ( return_if_no_work && my_arena->my_skipped_fifo_priority ) { - // This thread can dequeue FIFO tasks, and some priority levels of - // FIFO tasks have been bypassed (to prevent deadlock caused by - // dynamic priority changes in nested task group hierarchy). - intptr_t skipped_priority = my_arena->my_skipped_fifo_priority; - if ( my_arena->my_skipped_fifo_priority.compare_and_swap(0, skipped_priority) == skipped_priority && - skipped_priority > my_arena->my_top_priority ) - { - my_market->update_arena_priority( *my_arena, skipped_priority ); - } - } - task_stream *ts; -#else /* !__TBB_TASK_PRIORITY */ - task_stream *ts = &my_arena->my_task_stream; -#endif /* !__TBB_TASK_PRIORITY */ - // TODO: Try to find a place to reset my_limit (under market's lock) - // The number of slots potentially used in the arena. Updated once in a while, as my_limit changes rarely. - size_t n = my_arena->my_limit-1; - int yield_count = 0; - // The state "failure_count==-1" is used only when itt_possible is true, - // and denotes that a sync_prepare has not yet been issued. - for( int failure_count = -static_cast<int>(SchedulerTraits::itt_possible);; ++failure_count) { - __TBB_ASSERT( my_arena->my_limit > 0, NULL ); - __TBB_ASSERT( my_arena_index <= n, NULL ); - if( completion_ref_count==1 ) { - if( SchedulerTraits::itt_possible ) { - if( failure_count!=-1 ) { - ITT_NOTIFY(sync_prepare, &completion_ref_count); - // Notify Intel(R) Thread Profiler that thread has stopped spinning. - ITT_NOTIFY(sync_acquired, this); - } - ITT_NOTIFY(sync_acquired, &completion_ref_count); - } - __TBB_ASSERT( !t, NULL ); - __TBB_control_consistency_helper(); // on ref_count - break; // exit stealing loop and return; - } - // Check if the resource manager requires our arena to relinquish some threads - if ( return_if_no_work && my_arena->my_num_workers_allotted < my_arena->num_workers_active() ) { -#if !__TBB_TASK_ARENA - __TBB_ASSERT( is_worker(), NULL ); -#endif - if( SchedulerTraits::itt_possible && failure_count != -1 ) - ITT_NOTIFY(sync_cancel, this); - return NULL; - } -#if __TBB_TASK_PRIORITY - ts = &my_arena->my_task_stream[my_arena->my_top_priority]; -#endif - // Check if there are tasks mailed to this thread via task-to-thread affinity mechanism. - __TBB_ASSERT(my_affinity_id, NULL); - if ( n && !my_inbox.empty() && (t = get_mailbox_task()) ) { - GATHER_STATISTIC( ++my_counters.mails_received ); - } - // Check if there are tasks in starvation-resistant stream. - // Only allowed for workers with empty stack, which is identified by return_if_no_work. - else if ( outermost_dispatch_level && !ts->empty() && (t = ts->pop( my_arena_slot->hint_for_pop)) ) { - ITT_NOTIFY(sync_acquired, ts); - // just proceed with the obtained task - } -#if __TBB_TASK_PRIORITY - // Check if any earlier offloaded non-top priority tasks become returned to the top level - else if ( my_offloaded_tasks && (t=reload_tasks()) ) { - // just proceed with the obtained task - } -#endif /* __TBB_TASK_PRIORITY */ - else if ( can_steal_here && n ) { - // Try to steal a task from a random victim. - size_t k = my_random.get() % n; - arena_slot* victim = &my_arena->my_slots[k]; - // The following condition excludes the master that might have - // already taken our previous place in the arena from the list . - // of potential victims. But since such a situation can take - // place only in case of significant oversubscription, keeping - // the checks simple seems to be preferable to complicating the code. - if( k >= my_arena_index ) - ++victim; // Adjusts random distribution to exclude self - task **pool = victim->task_pool; - if( pool == EmptyTaskPool || !(t = steal_task( *victim )) ) - goto fail; - if( is_proxy(*t) ) { - task_proxy &tp = *(task_proxy*)t; - t = tp.extract_task<task_proxy::pool_bit>(); - if ( !t ) { - // Proxy was empty, so it's our responsibility to free it - free_task<no_cache_small_task>(tp); - goto fail; - } - GATHER_STATISTIC( ++my_counters.proxies_stolen ); - } - t->prefix().extra_state |= es_task_is_stolen; - if( is_version_3_task(*t) ) { - my_innermost_running_task = t; - t->prefix().owner = this; - t->note_affinity( my_affinity_id ); - } - GATHER_STATISTIC( ++my_counters.steals_committed ); - } // end of stealing branch - else - goto fail; - // A task was successfully obtained somewhere - __TBB_ASSERT(t,NULL); -#if __TBB_SCHEDULER_OBSERVER - my_arena->my_observers.notify_entry_observers( my_last_local_observer, is_worker() ); - the_global_observer_list.notify_entry_observers( my_last_global_observer, is_worker() ); -#endif /* __TBB_SCHEDULER_OBSERVER */ - if ( SchedulerTraits::itt_possible && failure_count != -1 ) { - // FIXME - might be victim, or might be selected from a mailbox - // Notify Intel(R) Thread Profiler that thread has stopped spinning. - ITT_NOTIFY(sync_acquired, this); - } - break; // exit stealing loop and return -fail: - GATHER_STATISTIC( ++my_counters.steals_failed ); - if( SchedulerTraits::itt_possible && failure_count==-1 ) { - // The first attempt to steal work failed, so notify Intel(R) Thread Profiler that - // the thread has started spinning. Ideally, we would do this notification - // *before* the first failed attempt to steal, but at that point we do not - // know that the steal will fail. - ITT_NOTIFY(sync_prepare, this); - failure_count = 0; - } - // Pause, even if we are going to yield, because the yield might return immediately. - __TBB_Pause(PauseTime); - const int failure_threshold = 2*int(n+1); - if( failure_count>=failure_threshold ) { -#if __TBB_YIELD2P - failure_count = 0; -#else - failure_count = failure_threshold; -#endif - __TBB_Yield(); -#if __TBB_TASK_PRIORITY - // Check if there are tasks abandoned by other workers - if ( my_arena->my_orphaned_tasks ) { - // Epoch must be advanced before seizing the list pointer - ++my_arena->my_abandonment_epoch; - task* orphans = (task*)__TBB_FetchAndStoreW( &my_arena->my_orphaned_tasks, 0 ); - if ( orphans ) { - task** link = NULL; - // Get local counter out of the way (we've just brought in external tasks) - my_local_reload_epoch--; - t = reload_tasks( orphans, link, effective_reference_priority() ); - if ( orphans ) { - *link = my_offloaded_tasks; - if ( !my_offloaded_tasks ) - my_offloaded_task_list_tail_link = link; - my_offloaded_tasks = orphans; - } - __TBB_ASSERT( !my_offloaded_tasks == !my_offloaded_task_list_tail_link, NULL ); - if ( t ) { - if( SchedulerTraits::itt_possible ) - ITT_NOTIFY(sync_cancel, this); - break; // exit stealing loop and return - } - } - } -#endif /* __TBB_TASK_PRIORITY */ - const int yield_threshold = 100; - if( yield_count++ >= yield_threshold ) { - // When a worker thread has nothing to do, return it to RML. - // For purposes of affinity support, the thread is considered idle while in RML. -#if __TBB_TASK_PRIORITY - if( return_if_no_work || my_arena->my_top_priority > my_arena->my_bottom_priority ) { - if ( my_arena->is_out_of_work() && return_if_no_work ) { -#else /* !__TBB_TASK_PRIORITY */ - if ( return_if_no_work && my_arena->is_out_of_work() ) { -#endif /* !__TBB_TASK_PRIORITY */ - if( SchedulerTraits::itt_possible ) - ITT_NOTIFY(sync_cancel, this); - return NULL; - } -#if __TBB_TASK_PRIORITY - } - if ( my_offloaded_tasks ) { - // Safeguard against any sloppiness in managing reload epoch - // counter (e.g. on the hot path because of performance reasons). - my_local_reload_epoch--; - // Break the deadlock caused by a higher priority dispatch loop - // stealing and offloading a lower priority task. Priority check - // at the stealing moment cannot completely preclude such cases - // because priorities can changes dynamically. - if ( !return_if_no_work && *my_ref_top_priority > my_arena->my_top_priority ) { - GATHER_STATISTIC( ++my_counters.prio_ref_fixups ); - my_ref_top_priority = &my_arena->my_top_priority; - // it's expected that only outermost workers can use global reload epoch - __TBB_ASSERT(!worker_outermost_level(), NULL); - __TBB_ASSERT(my_ref_reload_epoch == &my_arena->my_reload_epoch, NULL); - } - } -#endif /* __TBB_TASK_PRIORITY */ - } // end of arena snapshot branch - // If several attempts did not find work, re-read the arena limit. - n = my_arena->my_limit-1; - } // end of yielding branch - } // end of nonlocal task retrieval loop - my_inbox.set_is_idle( false ); - return t; -} - -template<typename SchedulerTraits> -void custom_scheduler<SchedulerTraits>::local_wait_for_all( task& parent, task* child ) { - __TBB_ASSERT( governor::is_set(this), NULL ); - __TBB_ASSERT( parent.ref_count() >= (child && child->parent() == &parent ? 2 : 1), "ref_count is too small" ); - assert_task_pool_valid(); - // Using parent's refcount in sync_prepare (in the stealing loop below) is - // a workaround for TP. We need to name it here to display correctly in Ampl. - if( SchedulerTraits::itt_possible ) - ITT_SYNC_CREATE(&parent.prefix().ref_count, SyncType_Scheduler, SyncObj_TaskStealingLoop); -#if __TBB_TASK_GROUP_CONTEXT - __TBB_ASSERT( parent.prefix().context || (is_worker() && &parent == my_dummy_task), "parent task does not have context" ); -#endif /* __TBB_TASK_GROUP_CONTEXT */ - task* t = child; - // Constant all_local_work_done is an unreachable refcount value that prevents - // early quitting the dispatch loop. It is defined to be in the middle of the range - // of negative values representable by the reference_count type. - static const reference_count - // For normal dispatch loops - parents_work_done = 1, - // For termination dispatch loops in masters - all_local_work_done = (reference_count)3 << (sizeof(reference_count) * 8 - 2); - reference_count quit_point; -#if __TBB_TASK_PRIORITY - __TBB_ASSERT( (uintptr_t)*my_ref_top_priority < (uintptr_t)num_priority_levels, NULL ); - volatile intptr_t *old_ref_top_priority = my_ref_top_priority; - // When entering nested parallelism level market level counter - // must be replaced with the one local to this arena. - volatile uintptr_t *old_ref_reload_epoch = my_ref_reload_epoch; -#endif /* __TBB_TASK_PRIORITY */ - task* old_dispatching_task = my_dispatching_task; - my_dispatching_task = my_innermost_running_task; - if( master_outermost_level() ) { - // We are in the outermost task dispatch loop of a master thread or a worker which mimics master - __TBB_ASSERT( !is_worker() || my_dispatching_task != old_dispatching_task, NULL ); - quit_point = &parent == my_dummy_task ? all_local_work_done : parents_work_done; - } else { - quit_point = parents_work_done; -#if __TBB_TASK_PRIORITY - if ( &parent != my_dummy_task ) { - // We are in a nested dispatch loop. - // Market or arena priority must not prevent child tasks from being - // executed so that dynamic priority changes did not cause deadlock. - my_ref_top_priority = &parent.prefix().context->my_priority; - my_ref_reload_epoch = &my_arena->my_reload_epoch; - if(my_ref_reload_epoch != old_ref_reload_epoch) - my_local_reload_epoch = *my_ref_reload_epoch-1; - } -#endif /* __TBB_TASK_PRIORITY */ - } - - cpu_ctl_env_helper cpu_ctl_helper; - if ( t ) - cpu_ctl_helper.set_env( __TBB_CONTEXT_ARG1(t->prefix().context) ); - -#if TBB_USE_EXCEPTIONS - // Infinite safeguard EH loop - for (;;) { - try { -#endif /* TBB_USE_EXCEPTIONS */ - // Outer loop receives tasks from global environment (via mailbox, FIFO queue(s), - // and by stealing from other threads' task pools). - // All exit points from the dispatch loop are located in its immediate scope. - for(;;) { - // Middle loop retrieves tasks from the local task pool. - for(;;) { - // Inner loop evaluates tasks coming from nesting loops and those returned - // by just executed tasks (bypassing spawn or enqueue calls). - while(t) { - __TBB_ASSERT( my_inbox.is_idle_state(false), NULL ); - __TBB_ASSERT(!is_proxy(*t),"unexpected proxy"); - __TBB_ASSERT( t->prefix().owner, NULL ); - assert_task_valid(*t); -#if __TBB_TASK_GROUP_CONTEXT && TBB_USE_ASSERT - assert_context_valid(t->prefix().context); - if ( !t->prefix().context->my_cancellation_requested ) -#endif - __TBB_ASSERT( 1L<<t->state() & (1L<<task::allocated|1L<<task::ready|1L<<task::reexecute), NULL ); - assert_task_pool_valid(); -#if __TBB_TASK_PRIORITY - intptr_t p = priority(*t); - if ( p != *my_ref_top_priority && (t->prefix().extra_state & es_task_enqueued) == 0) { - assert_priority_valid(p); - if ( p != my_arena->my_top_priority ) { - my_market->update_arena_priority( *my_arena, p ); - } - if ( p < effective_reference_priority() ) { - if ( !my_offloaded_tasks ) { - my_offloaded_task_list_tail_link = &t->prefix().next_offloaded; - // Erase possible reference to the owner scheduler (next_offloaded is a union member) - *my_offloaded_task_list_tail_link = NULL; - } - offload_task( *t, p ); - if ( in_arena() ) { - t = winnow_task_pool(); - if ( t ) - continue; - } - else { - // Mark arena as full to unlock arena priority level adjustment - // by arena::is_out_of_work(), and ensure worker's presence. - my_arena->advertise_new_work<false>(); - } - goto stealing_ground; - } - } -#endif /* __TBB_TASK_PRIORITY */ - task* t_next = NULL; - my_innermost_running_task = t; - t->prefix().owner = this; - t->prefix().state = task::executing; -#if __TBB_TASK_GROUP_CONTEXT - if ( !t->prefix().context->my_cancellation_requested ) -#endif - { - GATHER_STATISTIC( ++my_counters.tasks_executed ); - GATHER_STATISTIC( my_counters.avg_arena_concurrency += my_arena->num_workers_active() ); - GATHER_STATISTIC( my_counters.avg_assigned_workers += my_arena->my_num_workers_allotted ); -#if __TBB_TASK_PRIORITY - GATHER_STATISTIC( my_counters.avg_arena_prio += p ); - GATHER_STATISTIC( my_counters.avg_market_prio += my_market->my_global_top_priority ); -#endif /* __TBB_TASK_PRIORITY */ - ITT_STACK(SchedulerTraits::itt_possible, callee_enter, t->prefix().context->itt_caller); - t_next = t->execute(); - ITT_STACK(SchedulerTraits::itt_possible, callee_leave, t->prefix().context->itt_caller); - if (t_next) { - __TBB_ASSERT( t_next->state()==task::allocated, - "if task::execute() returns task, it must be marked as allocated" ); - reset_extra_state(t_next); -#if TBB_USE_ASSERT - affinity_id next_affinity=t_next->prefix().affinity; - if (next_affinity != 0 && next_affinity != my_affinity_id) - GATHER_STATISTIC( ++my_counters.affinity_ignored ); -#endif - } - } - assert_task_pool_valid(); - switch( t->state() ) { - case task::executing: { - task* s = t->parent(); - __TBB_ASSERT( my_innermost_running_task==t, NULL ); - __TBB_ASSERT( t->prefix().ref_count==0, "Task still has children after it has been executed" ); - t->~task(); - if( s ) - tally_completion_of_predecessor(*s, t_next); - free_task<no_hint>( *t ); - assert_task_pool_valid(); - break; - } - - case task::recycle: // set by recycle_as_safe_continuation() - t->prefix().state = task::allocated; -#if __TBB_RECYCLE_TO_ENQUEUE - case task::to_enqueue: // set by recycle_to_enqueue() -#endif - __TBB_ASSERT( t_next != t, "a task returned from method execute() can not be recycled in another way" ); - reset_extra_state(t); - // for safe continuation, need atomically decrement ref_count; - tally_completion_of_predecessor(*t, t_next); - assert_task_pool_valid(); - break; - - case task::reexecute: // set by recycle_to_reexecute() - __TBB_ASSERT( t_next, "reexecution requires that method execute() return another task" ); - __TBB_ASSERT( t_next != t, "a task returned from method execute() can not be recycled in another way" ); - t->prefix().state = task::allocated; - reset_extra_state(t); - local_spawn( *t, t->prefix().next ); - assert_task_pool_valid(); - break; - case task::allocated: - reset_extra_state(t); - break; -#if TBB_USE_ASSERT - case task::ready: - __TBB_ASSERT( false, "task is in READY state upon return from method execute()" ); - break; - default: - __TBB_ASSERT( false, "illegal state" ); -#else - default: // just to shut up some compilation warnings - break; -#endif /* TBB_USE_ASSERT */ - } - GATHER_STATISTIC( t_next ? ++my_counters.spawns_bypassed : 0 ); - t = t_next; - } // end of scheduler bypass loop - - assert_task_pool_valid(); - if ( parent.prefix().ref_count == quit_point ) { - __TBB_ASSERT( quit_point != all_local_work_done, NULL ); - __TBB_control_consistency_helper(); // on ref_count - ITT_NOTIFY(sync_acquired, &parent.prefix().ref_count); - goto done; - } - if ( in_arena() ) { - t = get_task(); - } - else { - __TBB_ASSERT( is_quiescent_local_task_pool_reset(), NULL ); - break; - } - __TBB_ASSERT(!t || !is_proxy(*t),"unexpected proxy"); - assert_task_pool_valid(); - - if ( !t ) break; - - cpu_ctl_helper.set_env( __TBB_CONTEXT_ARG1(t->prefix().context) ); - }; // end of local task pool retrieval loop - -#if __TBB_TASK_PRIORITY -stealing_ground: -#endif /* __TBB_TASK_PRIORITY */ -#if __TBB_HOARD_NONLOCAL_TASKS - // before stealing, previously stolen task objects are returned - for (; my_nonlocal_free_list; my_nonlocal_free_list = t ) { - t = my_nonlocal_free_list->prefix().next; - free_nonlocal_small_task( *my_nonlocal_free_list ); - } -#endif - if ( quit_point == all_local_work_done ) { - __TBB_ASSERT( !in_arena() && is_quiescent_local_task_pool_reset(), NULL ); - __TBB_ASSERT( !worker_outermost_level(), NULL ); - my_innermost_running_task = my_dispatching_task; - my_dispatching_task = old_dispatching_task; -#if __TBB_TASK_PRIORITY - my_ref_top_priority = old_ref_top_priority; - if(my_ref_reload_epoch != old_ref_reload_epoch) - my_local_reload_epoch = *old_ref_reload_epoch-1; - my_ref_reload_epoch = old_ref_reload_epoch; -#endif /* __TBB_TASK_PRIORITY */ - return; - } - // The following assertion may be falsely triggered in the presence of enqueued tasks - //__TBB_ASSERT( my_arena->my_max_num_workers > 0 || my_market->my_ref_count > 1 - // || parent.prefix().ref_count == 1, "deadlock detected" ); - - // Dispatching task pointer is NULL *iff* this is a worker thread in its outermost - // dispatch loop (i.e. its execution stack is empty). In this case it should exit it - // either when there is no more work in the current arena, or when revoked by the market. - - t = receive_or_steal_task( parent.prefix().ref_count, worker_outermost_level() ); - if ( !t ) - goto done; - __TBB_ASSERT(!is_proxy(*t),"unexpected proxy"); - - // The user can capture another the FPU settings to the context so the - // cached data in the helper can be out-of-date and we cannot do fast - // check. - cpu_ctl_helper.set_env( __TBB_CONTEXT_ARG1(t->prefix().context) ); - } // end of infinite stealing loop -#if TBB_USE_EXCEPTIONS - __TBB_ASSERT( false, "Must never get here" ); - } // end of try-block - TbbCatchAll( t->prefix().context ); - // Complete post-processing ... - if( t->state() == task::recycle -#if __TBB_RECYCLE_TO_ENQUEUE - // TODO: the enqueue semantics gets lost below, consider reimplementing - || t->state() == task::to_enqueue -#endif - ) { - // ... for recycled tasks to atomically decrement ref_count - t->prefix().state = task::allocated; - if( SchedulerTraits::itt_possible ) - ITT_NOTIFY(sync_releasing, &t->prefix().ref_count); - if( __TBB_FetchAndDecrementWrelease(&t->prefix().ref_count)==1 ) { - if( SchedulerTraits::itt_possible ) - ITT_NOTIFY(sync_acquired, &t->prefix().ref_count); - }else{ - t = NULL; - } - } - } // end of infinite EH loop - __TBB_ASSERT( false, "Must never get here too" ); -#endif /* TBB_USE_EXCEPTIONS */ -done: - my_innermost_running_task = my_dispatching_task; - my_dispatching_task = old_dispatching_task; -#if __TBB_TASK_PRIORITY - my_ref_top_priority = old_ref_top_priority; - if(my_ref_reload_epoch != old_ref_reload_epoch) - my_local_reload_epoch = *old_ref_reload_epoch-1; - my_ref_reload_epoch = old_ref_reload_epoch; -#endif /* __TBB_TASK_PRIORITY */ - if ( !ConcurrentWaitsEnabled(parent) ) { - if ( parent.prefix().ref_count != parents_work_done ) { - // This is a worker that was revoked by the market. -#if __TBB_TASK_ARENA - __TBB_ASSERT( worker_outermost_level(), - "Worker thread exits nested dispatch loop prematurely" ); -#else - __TBB_ASSERT( is_worker() && worker_outermost_level(), - "Worker thread exits nested dispatch loop prematurely" ); -#endif - return; - } - parent.prefix().ref_count = 0; - } -#if TBB_USE_ASSERT - parent.prefix().extra_state &= ~es_ref_count_active; -#endif /* TBB_USE_ASSERT */ -#if __TBB_TASK_GROUP_CONTEXT - __TBB_ASSERT(parent.prefix().context && default_context(), NULL); - task_group_context* parent_ctx = parent.prefix().context; - if ( parent_ctx->my_cancellation_requested ) { - task_group_context::exception_container_type *pe = parent_ctx->my_exception; - if ( master_outermost_level() && parent_ctx == default_context() ) { - // We are in the outermost task dispatch loop of a master thread, and - // the whole task tree has been collapsed. So we may clear cancellation data. - parent_ctx->my_cancellation_requested = 0; - // TODO: Add assertion that master's dummy task context does not have children - parent_ctx->my_state &= ~(uintptr_t)task_group_context::may_have_children; - } - if ( pe ) { - // On Windows, FPU control settings changed in the helper destructor are not visible - // outside a catch block. So restore the default settings manually before rethrowing - // the exception. - cpu_ctl_helper.restore_default(); - pe->throw_self(); - } - } - __TBB_ASSERT(!is_worker() || !CancellationInfoPresent(*my_dummy_task), - "Worker's dummy task context modified"); - __TBB_ASSERT(!master_outermost_level() || !CancellationInfoPresent(*my_dummy_task), - "Unexpected exception or cancellation data in the master's dummy task"); -#endif /* __TBB_TASK_GROUP_CONTEXT */ - assert_task_pool_valid(); -} - -} // namespace internal -} // namespace tbb - -#endif /* _TBB_custom_scheduler_H */ diff --git a/src/tbb/src/tbb/def/lin32-tbb.def b/src/tbb/src/tbb/def/lin32-tbb.def new file mode 100644 index 000000000..2aef269a6 --- /dev/null +++ b/src/tbb/src/tbb/def/lin32-tbb.def @@ -0,0 +1,183 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +{ +global: + +/* Needed for backwards compatibility */ +_ZNSt13runtime_errorD1Ev; +_ZTISt13runtime_error; +_ZTSSt13runtime_error; +_ZNSt16invalid_argumentD1Ev; +_ZTISt16invalid_argument; +_ZTSSt16invalid_argument; +_ZNSt11range_errorD1Ev; +_ZTISt11range_error; +_ZTSSt11range_error; +_ZNSt12length_errorD1Ev; +_ZTISt12length_error; +_ZTSSt12length_error; +_ZNSt12out_of_rangeD1Ev; +_ZTISt12out_of_range; +_ZTSSt12out_of_range; + +/* Needed by rstan */ +_ZN3tbb8internal26task_scheduler_observer_v37observeEb; + +/* Assertions (assert.cpp) */ +_ZN3tbb6detail2r117assertion_failureEPKciS3_S3_; + +/* ITT (profiling.cpp) */ +_ZN3tbb6detail2r112itt_task_endENS0_2d115itt_domain_enumE; +_ZN3tbb6detail2r114itt_region_endENS0_2d115itt_domain_enumEPvy; +_ZN3tbb6detail2r114itt_task_beginENS0_2d115itt_domain_enumEPvyS4_yNS0_2d021string_resource_indexE; +_ZN3tbb6detail2r115call_itt_notifyEiPv; +_ZN3tbb6detail2r115create_itt_syncEPvPKcS4_; +_ZN3tbb6detail2r116itt_region_beginENS0_2d115itt_domain_enumEPvyS4_yNS0_2d021string_resource_indexE; +_ZN3tbb6detail2r116itt_relation_addENS0_2d115itt_domain_enumEPvyNS0_2d012itt_relationES4_y; +_ZN3tbb6detail2r117itt_set_sync_nameEPvPKc; +_ZN3tbb6detail2r119itt_make_task_groupENS0_2d115itt_domain_enumEPvyS4_yNS0_2d021string_resource_indexE; +_ZN3tbb6detail2r120itt_metadata_str_addENS0_2d115itt_domain_enumEPvyNS0_2d021string_resource_indexEPKc; +_ZN3tbb6detail2r120itt_metadata_ptr_addENS0_2d115itt_domain_enumEPvyNS0_2d021string_resource_indexES4_; + +/* Allocators (allocator.cpp) */ +_ZN3tbb6detail2r115allocate_memoryEj; +_ZN3tbb6detail2r117deallocate_memoryEPv; +_ZN3tbb6detail2r122cache_aligned_allocateEj; +_ZN3tbb6detail2r124cache_aligned_deallocateEPv; +_ZN3tbb6detail2r115cache_line_sizeEv; +_ZN3tbb6detail2r117is_tbbmalloc_usedEv; + +/* Small object pool (small_object_pool.cpp) */ +_ZN3tbb6detail2r18allocateERPNS0_2d117small_object_poolEj; +_ZN3tbb6detail2r18allocateERPNS0_2d117small_object_poolEjRKNS2_14execution_dataE; +_ZN3tbb6detail2r110deallocateERNS0_2d117small_object_poolEPvj; +_ZN3tbb6detail2r110deallocateERNS0_2d117small_object_poolEPvjRKNS2_14execution_dataE; + +/* Error handling (exception.cpp) */ +_ZN3tbb6detail2r115throw_exceptionENS0_2d012exception_idE; +_ZTIN3tbb6detail2r114bad_last_allocE; +_ZTVN3tbb6detail2r114bad_last_allocE; +_ZTIN3tbb6detail2r112missing_waitE; +_ZTVN3tbb6detail2r112missing_waitE; +_ZTIN3tbb6detail2r110user_abortE; +_ZTVN3tbb6detail2r110user_abortE; +_ZTIN3tbb6detail2r111unsafe_waitE; +_ZTVN3tbb6detail2r111unsafe_waitE; + +/* RTM Mutex (rtm_mutex.cpp) */ +_ZN3tbb6detail2r17acquireERNS0_2d19rtm_mutexERNS3_11scoped_lockEb; +_ZN3tbb6detail2r17releaseERNS0_2d19rtm_mutex11scoped_lockE; +_ZN3tbb6detail2r111try_acquireERNS0_2d19rtm_mutexERNS3_11scoped_lockE; + +/* RTM RW Mutex (rtm_rw_mutex.cpp) */ +_ZN3tbb6detail2r114acquire_readerERNS0_2d112rtm_rw_mutexERNS3_11scoped_lockEb; +_ZN3tbb6detail2r114acquire_writerERNS0_2d112rtm_rw_mutexERNS3_11scoped_lockEb; +_ZN3tbb6detail2r118try_acquire_readerERNS0_2d112rtm_rw_mutexERNS3_11scoped_lockE; +_ZN3tbb6detail2r118try_acquire_writerERNS0_2d112rtm_rw_mutexERNS3_11scoped_lockE; +_ZN3tbb6detail2r17releaseERNS0_2d112rtm_rw_mutex11scoped_lockE; +_ZN3tbb6detail2r17upgradeERNS0_2d112rtm_rw_mutex11scoped_lockE; +_ZN3tbb6detail2r19downgradeERNS0_2d112rtm_rw_mutex11scoped_lockE; + +/* Tasks and partitioners (task.cpp) */ +_ZN3tbb6detail2r17suspendEPFvPvPNS1_18suspend_point_typeEES2_; +_ZN3tbb6detail2r16resumeEPNS1_18suspend_point_typeE; +_ZN3tbb6detail2r121current_suspend_pointEv; +_ZN3tbb6detail2r114notify_waitersEj; +_ZN3tbb6detail2r127get_thread_reference_vertexEPNS0_2d126wait_tree_vertex_interfaceE; + +/* Task dispatcher (task_dispatcher.cpp) */ +_ZN3tbb6detail2r114execution_slotEPKNS0_2d114execution_dataE; +_ZN3tbb6detail2r14waitERNS0_2d112wait_contextERNS2_18task_group_contextE; +_ZN3tbb6detail2r15spawnERNS0_2d14taskERNS2_18task_group_contextE; +_ZN3tbb6detail2r15spawnERNS0_2d14taskERNS2_18task_group_contextEt; +_ZN3tbb6detail2r116execute_and_waitERNS0_2d14taskERNS2_18task_group_contextERNS2_12wait_contextES6_; +_ZN3tbb6detail2r16submitERNS0_2d14taskERNS2_18task_group_contextEPNS1_5arenaEj; +_ZN3tbb6detail2r115current_contextEv; + +/* Task group context (task_group_context.cpp) */ +_ZN3tbb6detail2r110initializeERNS0_2d118task_group_contextE; +_ZN3tbb6detail2r122cancel_group_executionERNS0_2d118task_group_contextE; +_ZN3tbb6detail2r128is_group_execution_cancelledERNS0_2d118task_group_contextE; +_ZN3tbb6detail2r15resetERNS0_2d118task_group_contextE; +_ZN3tbb6detail2r17destroyERNS0_2d118task_group_contextE; +_ZN3tbb6detail2r119capture_fp_settingsERNS0_2d118task_group_contextE; + +/* Task arena (arena.cpp) */ +_ZN3tbb6detail2r115max_concurrencyEPKNS0_2d115task_arena_baseE; +_ZN3tbb6detail2r110initializeERNS0_2d115task_arena_baseE; +_ZN3tbb6detail2r16attachERNS0_2d115task_arena_baseE; +_ZN3tbb6detail2r17executeERNS0_2d115task_arena_baseERNS2_13delegate_baseE; +_ZN3tbb6detail2r19terminateERNS0_2d115task_arena_baseE; +_ZN3tbb6detail2r120isolate_within_arenaERNS0_2d113delegate_baseEi; +_ZN3tbb6detail2r17enqueueERNS0_2d14taskEPNS2_15task_arena_baseE; +_ZN3tbb6detail2r17enqueueERNS0_2d14taskERNS2_18task_group_contextEPNS2_15task_arena_baseE; +_ZN3tbb6detail2r14waitERNS0_2d115task_arena_baseE; +_ZN3tbb6detail2r114execution_slotERKNS0_2d115task_arena_baseE; + +/* System topology parsing and threads pinning (governor.cpp) */ +_ZN3tbb6detail2r115numa_node_countEv; +_ZN3tbb6detail2r117fill_numa_indicesEPi; +_ZN3tbb6detail2r115core_type_countEi; +_ZN3tbb6detail2r122fill_core_type_indicesEPii; +_ZN3tbb6detail2r131constraints_default_concurrencyERKNS0_2d111constraintsEi; +_ZN3tbb6detail2r128constraints_threads_per_coreERKNS0_2d111constraintsEi; +_ZN3tbb6detail2r124numa_default_concurrencyEi; + +/* Observer (observer_proxy.cpp) */ +_ZN3tbb6detail2r17observeERNS0_2d123task_scheduler_observerEb; + +/* Queuing RW Mutex (queuing_rw_mutex.cpp) */ +_ZN3tbb6detail2r111try_acquireERNS0_2d116queuing_rw_mutexERNS3_11scoped_lockEb; +_ZN3tbb6detail2r117upgrade_to_writerERNS0_2d116queuing_rw_mutex11scoped_lockE; +_ZN3tbb6detail2r119downgrade_to_readerERNS0_2d116queuing_rw_mutex11scoped_lockE; +_ZN3tbb6detail2r17acquireERNS0_2d116queuing_rw_mutexERNS3_11scoped_lockEb; +_ZN3tbb6detail2r17releaseERNS0_2d116queuing_rw_mutex11scoped_lockE; +_ZN3tbb6detail2r19constructERNS0_2d116queuing_rw_mutexE; +_ZN3tbb6detail2r19is_writerERKNS0_2d116queuing_rw_mutex11scoped_lockE; + +/* Global control (global_control.cpp) */ +_ZN3tbb6detail2r16createERNS0_2d114global_controlE; +_ZN3tbb6detail2r17destroyERNS0_2d114global_controlE; +_ZN3tbb6detail2r127global_control_active_valueEi; +_ZN3tbb6detail2r18finalizeERNS0_2d121task_scheduler_handleEi; +_ZN3tbb6detail2r13getERNS0_2d121task_scheduler_handleE; + +/* Parallel pipeline (parallel_pipeline.cpp) */ +_ZN3tbb6detail2r117parallel_pipelineERNS0_2d118task_group_contextEjRKNS2_11filter_nodeE; +_ZN3tbb6detail2r116set_end_of_inputERNS0_2d111base_filterE; + +/* Concurrent bounded queue (concurrent_bounded_queue.cpp) */ +_ZN3tbb6detail2r126allocate_bounded_queue_repEj; +_ZN3tbb6detail2r126wait_bounded_queue_monitorEPNS1_18concurrent_monitorEjiRNS0_2d113delegate_baseE; +_ZN3tbb6detail2r128abort_bounded_queue_monitorsEPNS1_18concurrent_monitorE; +_ZN3tbb6detail2r128deallocate_bounded_queue_repEPhj; +_ZN3tbb6detail2r128notify_bounded_queue_monitorEPNS1_18concurrent_monitorEjj; + +/* Concurrent monitor (address_waiter.cpp) */ +_ZN3tbb6detail2r115wait_on_addressEPvRNS0_2d113delegate_baseEj; +_ZN3tbb6detail2r117notify_by_addressEPvj; +_ZN3tbb6detail2r121notify_by_address_oneEPv; +_ZN3tbb6detail2r121notify_by_address_allEPv; + +/* Versioning (version.cpp) */ +TBB_runtime_interface_version; +TBB_runtime_version; + +local: +/* TODO: fill more precisely */ +*; +}; diff --git a/src/tbb/src/tbb/def/lin64-tbb.def b/src/tbb/src/tbb/def/lin64-tbb.def new file mode 100644 index 000000000..365ef6c35 --- /dev/null +++ b/src/tbb/src/tbb/def/lin64-tbb.def @@ -0,0 +1,183 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +{ +global: + +/* Needed for backwards compatibility */ +_ZNSt13runtime_errorD1Ev; +_ZTISt13runtime_error; +_ZTSSt13runtime_error; +_ZNSt16invalid_argumentD1Ev; +_ZTISt16invalid_argument; +_ZTSSt16invalid_argument; +_ZNSt11range_errorD1Ev; +_ZTISt11range_error; +_ZTSSt11range_error; +_ZNSt12length_errorD1Ev; +_ZTISt12length_error; +_ZTSSt12length_error; +_ZNSt12out_of_rangeD1Ev; +_ZTISt12out_of_range; +_ZTSSt12out_of_range; + +/* Needed by rstan */ +_ZN3tbb8internal26task_scheduler_observer_v37observeEb; + +/* Assertions (assert.cpp) */ +_ZN3tbb6detail2r117assertion_failureEPKciS3_S3_; + +/* ITT (profiling.cpp) */ +_ZN3tbb6detail2r112itt_task_endENS0_2d115itt_domain_enumE; +_ZN3tbb6detail2r114itt_region_endENS0_2d115itt_domain_enumEPvy; +_ZN3tbb6detail2r114itt_task_beginENS0_2d115itt_domain_enumEPvyS4_yNS0_2d021string_resource_indexE; +_ZN3tbb6detail2r115call_itt_notifyEiPv; +_ZN3tbb6detail2r115create_itt_syncEPvPKcS4_; +_ZN3tbb6detail2r116itt_region_beginENS0_2d115itt_domain_enumEPvyS4_yNS0_2d021string_resource_indexE; +_ZN3tbb6detail2r116itt_relation_addENS0_2d115itt_domain_enumEPvyNS0_2d012itt_relationES4_y; +_ZN3tbb6detail2r117itt_set_sync_nameEPvPKc; +_ZN3tbb6detail2r119itt_make_task_groupENS0_2d115itt_domain_enumEPvyS4_yNS0_2d021string_resource_indexE; +_ZN3tbb6detail2r120itt_metadata_str_addENS0_2d115itt_domain_enumEPvyNS0_2d021string_resource_indexEPKc; +_ZN3tbb6detail2r120itt_metadata_ptr_addENS0_2d115itt_domain_enumEPvyNS0_2d021string_resource_indexES4_; + +/* Allocators (allocator.cpp) */ +_ZN3tbb6detail2r115allocate_memoryEm; +_ZN3tbb6detail2r117deallocate_memoryEPv; +_ZN3tbb6detail2r122cache_aligned_allocateEm; +_ZN3tbb6detail2r124cache_aligned_deallocateEPv; +_ZN3tbb6detail2r115cache_line_sizeEv; +_ZN3tbb6detail2r117is_tbbmalloc_usedEv; + +/* Small object pool (small_object_pool.cpp) */ +_ZN3tbb6detail2r18allocateERPNS0_2d117small_object_poolEm; +_ZN3tbb6detail2r18allocateERPNS0_2d117small_object_poolEmRKNS2_14execution_dataE; +_ZN3tbb6detail2r110deallocateERNS0_2d117small_object_poolEPvm; +_ZN3tbb6detail2r110deallocateERNS0_2d117small_object_poolEPvmRKNS2_14execution_dataE; + +/* Error handling (exception.cpp) */ +_ZN3tbb6detail2r115throw_exceptionENS0_2d012exception_idE; +_ZTIN3tbb6detail2r114bad_last_allocE; +_ZTVN3tbb6detail2r114bad_last_allocE; +_ZTIN3tbb6detail2r112missing_waitE; +_ZTVN3tbb6detail2r112missing_waitE; +_ZTIN3tbb6detail2r110user_abortE; +_ZTVN3tbb6detail2r110user_abortE; +_ZTIN3tbb6detail2r111unsafe_waitE; +_ZTVN3tbb6detail2r111unsafe_waitE; + +/* RTM Mutex (rtm_mutex.cpp) */ +_ZN3tbb6detail2r17acquireERNS0_2d19rtm_mutexERNS3_11scoped_lockEb; +_ZN3tbb6detail2r17releaseERNS0_2d19rtm_mutex11scoped_lockE; +_ZN3tbb6detail2r111try_acquireERNS0_2d19rtm_mutexERNS3_11scoped_lockE; + +/* RTM RW Mutex (rtm_rw_mutex.cpp) */ +_ZN3tbb6detail2r114acquire_readerERNS0_2d112rtm_rw_mutexERNS3_11scoped_lockEb; +_ZN3tbb6detail2r114acquire_writerERNS0_2d112rtm_rw_mutexERNS3_11scoped_lockEb; +_ZN3tbb6detail2r118try_acquire_readerERNS0_2d112rtm_rw_mutexERNS3_11scoped_lockE; +_ZN3tbb6detail2r118try_acquire_writerERNS0_2d112rtm_rw_mutexERNS3_11scoped_lockE; +_ZN3tbb6detail2r17releaseERNS0_2d112rtm_rw_mutex11scoped_lockE; +_ZN3tbb6detail2r17upgradeERNS0_2d112rtm_rw_mutex11scoped_lockE; +_ZN3tbb6detail2r19downgradeERNS0_2d112rtm_rw_mutex11scoped_lockE; + +/* Tasks and partitioners (task.cpp) */ +_ZN3tbb6detail2r17suspendEPFvPvPNS1_18suspend_point_typeEES2_; +_ZN3tbb6detail2r16resumeEPNS1_18suspend_point_typeE; +_ZN3tbb6detail2r121current_suspend_pointEv; +_ZN3tbb6detail2r114notify_waitersEm; +_ZN3tbb6detail2r127get_thread_reference_vertexEPNS0_2d126wait_tree_vertex_interfaceE; + +/* Task dispatcher (task_dispatcher.cpp) */ +_ZN3tbb6detail2r114execution_slotEPKNS0_2d114execution_dataE; +_ZN3tbb6detail2r14waitERNS0_2d112wait_contextERNS2_18task_group_contextE; +_ZN3tbb6detail2r15spawnERNS0_2d14taskERNS2_18task_group_contextE; +_ZN3tbb6detail2r15spawnERNS0_2d14taskERNS2_18task_group_contextEt; +_ZN3tbb6detail2r116execute_and_waitERNS0_2d14taskERNS2_18task_group_contextERNS2_12wait_contextES6_; +_ZN3tbb6detail2r16submitERNS0_2d14taskERNS2_18task_group_contextEPNS1_5arenaEm; +_ZN3tbb6detail2r115current_contextEv; + +/* Task group context (task_group_context.cpp) */ +_ZN3tbb6detail2r110initializeERNS0_2d118task_group_contextE; +_ZN3tbb6detail2r122cancel_group_executionERNS0_2d118task_group_contextE; +_ZN3tbb6detail2r128is_group_execution_cancelledERNS0_2d118task_group_contextE; +_ZN3tbb6detail2r15resetERNS0_2d118task_group_contextE; +_ZN3tbb6detail2r17destroyERNS0_2d118task_group_contextE; +_ZN3tbb6detail2r119capture_fp_settingsERNS0_2d118task_group_contextE; + +/* Task arena (arena.cpp) */ +_ZN3tbb6detail2r115max_concurrencyEPKNS0_2d115task_arena_baseE; +_ZN3tbb6detail2r110initializeERNS0_2d115task_arena_baseE; +_ZN3tbb6detail2r16attachERNS0_2d115task_arena_baseE; +_ZN3tbb6detail2r17executeERNS0_2d115task_arena_baseERNS2_13delegate_baseE; +_ZN3tbb6detail2r19terminateERNS0_2d115task_arena_baseE; +_ZN3tbb6detail2r120isolate_within_arenaERNS0_2d113delegate_baseEl; +_ZN3tbb6detail2r17enqueueERNS0_2d14taskEPNS2_15task_arena_baseE; +_ZN3tbb6detail2r17enqueueERNS0_2d14taskERNS2_18task_group_contextEPNS2_15task_arena_baseE; +_ZN3tbb6detail2r14waitERNS0_2d115task_arena_baseE; +_ZN3tbb6detail2r114execution_slotERKNS0_2d115task_arena_baseE; + +/* System topology parsing and threads pinning (governor.cpp) */ +_ZN3tbb6detail2r115numa_node_countEv; +_ZN3tbb6detail2r117fill_numa_indicesEPi; +_ZN3tbb6detail2r115core_type_countEl; +_ZN3tbb6detail2r122fill_core_type_indicesEPil; +_ZN3tbb6detail2r131constraints_default_concurrencyERKNS0_2d111constraintsEl; +_ZN3tbb6detail2r128constraints_threads_per_coreERKNS0_2d111constraintsEl; +_ZN3tbb6detail2r124numa_default_concurrencyEi; + +/* Observer (observer_proxy.cpp) */ +_ZN3tbb6detail2r17observeERNS0_2d123task_scheduler_observerEb; + +/* Queuing RW Mutex (queuing_rw_mutex.cpp) */ +_ZN3tbb6detail2r111try_acquireERNS0_2d116queuing_rw_mutexERNS3_11scoped_lockEb; +_ZN3tbb6detail2r117upgrade_to_writerERNS0_2d116queuing_rw_mutex11scoped_lockE; +_ZN3tbb6detail2r119downgrade_to_readerERNS0_2d116queuing_rw_mutex11scoped_lockE; +_ZN3tbb6detail2r17acquireERNS0_2d116queuing_rw_mutexERNS3_11scoped_lockEb; +_ZN3tbb6detail2r17releaseERNS0_2d116queuing_rw_mutex11scoped_lockE; +_ZN3tbb6detail2r19constructERNS0_2d116queuing_rw_mutexE; +_ZN3tbb6detail2r19is_writerERKNS0_2d116queuing_rw_mutex11scoped_lockE; + +/* Global control (global_control.cpp) */ +_ZN3tbb6detail2r16createERNS0_2d114global_controlE; +_ZN3tbb6detail2r17destroyERNS0_2d114global_controlE; +_ZN3tbb6detail2r127global_control_active_valueEi; +_ZN3tbb6detail2r18finalizeERNS0_2d121task_scheduler_handleEl; +_ZN3tbb6detail2r13getERNS0_2d121task_scheduler_handleE; + +/* Parallel pipeline (parallel_pipeline.cpp) */ +_ZN3tbb6detail2r117parallel_pipelineERNS0_2d118task_group_contextEmRKNS2_11filter_nodeE; +_ZN3tbb6detail2r116set_end_of_inputERNS0_2d111base_filterE; + +/* Concurrent bounded queue (concurrent_bounded_queue.cpp) */ +_ZN3tbb6detail2r126allocate_bounded_queue_repEm; +_ZN3tbb6detail2r126wait_bounded_queue_monitorEPNS1_18concurrent_monitorEmlRNS0_2d113delegate_baseE; +_ZN3tbb6detail2r128abort_bounded_queue_monitorsEPNS1_18concurrent_monitorE; +_ZN3tbb6detail2r128deallocate_bounded_queue_repEPhm; +_ZN3tbb6detail2r128notify_bounded_queue_monitorEPNS1_18concurrent_monitorEmm; + +/* Concurrent monitor (address_waiter.cpp) */ +_ZN3tbb6detail2r115wait_on_addressEPvRNS0_2d113delegate_baseEm; +_ZN3tbb6detail2r117notify_by_addressEPvm; +_ZN3tbb6detail2r121notify_by_address_oneEPv; +_ZN3tbb6detail2r121notify_by_address_allEPv; + +/* Versioning (version.cpp) */ +TBB_runtime_interface_version; +TBB_runtime_version; + +local: +/* TODO: fill more precisely */ +*; +}; diff --git a/src/tbb/src/tbb/def/mac64-tbb.def b/src/tbb/src/tbb/def/mac64-tbb.def new file mode 100644 index 000000000..c98e886ff --- /dev/null +++ b/src/tbb/src/tbb/def/mac64-tbb.def @@ -0,0 +1,180 @@ +# Copyright (c) 2005-2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# TODO: check the legacy comment below, currently use extra leading underscore everywhere. +# Sometimes macOS* requires leading underscore (e. g. in export list file), but sometimes not +# (e. g. when searching symbol in a dynamic library via dlsym()). Symbols in this file SHOULD +# be listed WITHOUT one leading underscore. __TBB_SYMBOL macro should add underscore when +# necessary, depending on the intended usage. + +# Needed for backwards compatibility +__ZNSt13runtime_errorD1Ev +__ZTISt13runtime_error +__ZTSSt13runtime_error +__ZNSt16invalid_argumentD1Ev +__ZTISt16invalid_argument +__ZTSSt16invalid_argument +__ZNSt11range_errorD1Ev +__ZTISt11range_error +__ZTSSt11range_error +__ZNSt12length_errorD1Ev +__ZTISt12length_error +__ZTSSt12length_error +__ZNSt12out_of_rangeD1Ev +__ZTISt12out_of_range +__ZTSSt12out_of_range + +# Needed by rstan +__ZN3tbb8internal26task_scheduler_observer_v37observeEb + +# Assertions (assert.cpp) +__ZN3tbb6detail2r117assertion_failureEPKciS3_S3_ + +# ITT (profiling.cpp) +__ZN3tbb6detail2r112itt_task_endENS0_2d115itt_domain_enumE +__ZN3tbb6detail2r114itt_region_endENS0_2d115itt_domain_enumEPvy +__ZN3tbb6detail2r114itt_task_beginENS0_2d115itt_domain_enumEPvyS4_yNS0_2d021string_resource_indexE +__ZN3tbb6detail2r115call_itt_notifyEiPv +__ZN3tbb6detail2r115create_itt_syncEPvPKcS4_ +__ZN3tbb6detail2r116itt_region_beginENS0_2d115itt_domain_enumEPvyS4_yNS0_2d021string_resource_indexE +__ZN3tbb6detail2r116itt_relation_addENS0_2d115itt_domain_enumEPvyNS0_2d012itt_relationES4_y +__ZN3tbb6detail2r117itt_set_sync_nameEPvPKc +__ZN3tbb6detail2r119itt_make_task_groupENS0_2d115itt_domain_enumEPvyS4_yNS0_2d021string_resource_indexE +__ZN3tbb6detail2r120itt_metadata_str_addENS0_2d115itt_domain_enumEPvyNS0_2d021string_resource_indexEPKc +__ZN3tbb6detail2r120itt_metadata_ptr_addENS0_2d115itt_domain_enumEPvyNS0_2d021string_resource_indexES4_ + +# Allocators (allocator.cpp) +__ZN3tbb6detail2r115allocate_memoryEm +__ZN3tbb6detail2r117deallocate_memoryEPv +__ZN3tbb6detail2r122cache_aligned_allocateEm +__ZN3tbb6detail2r124cache_aligned_deallocateEPv +__ZN3tbb6detail2r115cache_line_sizeEv +__ZN3tbb6detail2r117is_tbbmalloc_usedEv + +# Small object pool (small_object_pool.cpp) +__ZN3tbb6detail2r18allocateERPNS0_2d117small_object_poolEm +__ZN3tbb6detail2r18allocateERPNS0_2d117small_object_poolEmRKNS2_14execution_dataE +__ZN3tbb6detail2r110deallocateERNS0_2d117small_object_poolEPvm +__ZN3tbb6detail2r110deallocateERNS0_2d117small_object_poolEPvmRKNS2_14execution_dataE + +# Error handling (exception.cpp) +__ZN3tbb6detail2r115throw_exceptionENS0_2d012exception_idE +__ZTIN3tbb6detail2r114bad_last_allocE +__ZTVN3tbb6detail2r114bad_last_allocE +__ZTIN3tbb6detail2r112missing_waitE +__ZTVN3tbb6detail2r112missing_waitE +__ZTIN3tbb6detail2r110user_abortE +__ZTVN3tbb6detail2r110user_abortE +__ZTIN3tbb6detail2r111unsafe_waitE +__ZTVN3tbb6detail2r111unsafe_waitE + +# RTM Mutex (rtm_mutex.cpp) +__ZN3tbb6detail2r17acquireERNS0_2d19rtm_mutexERNS3_11scoped_lockEb +__ZN3tbb6detail2r17releaseERNS0_2d19rtm_mutex11scoped_lockE +__ZN3tbb6detail2r111try_acquireERNS0_2d19rtm_mutexERNS3_11scoped_lockE + +# RTM RW Mutex (rtm_rw_mutex.cpp) +__ZN3tbb6detail2r114acquire_readerERNS0_2d112rtm_rw_mutexERNS3_11scoped_lockEb +__ZN3tbb6detail2r114acquire_writerERNS0_2d112rtm_rw_mutexERNS3_11scoped_lockEb +__ZN3tbb6detail2r118try_acquire_readerERNS0_2d112rtm_rw_mutexERNS3_11scoped_lockE +__ZN3tbb6detail2r118try_acquire_writerERNS0_2d112rtm_rw_mutexERNS3_11scoped_lockE +__ZN3tbb6detail2r17releaseERNS0_2d112rtm_rw_mutex11scoped_lockE +__ZN3tbb6detail2r17upgradeERNS0_2d112rtm_rw_mutex11scoped_lockE +__ZN3tbb6detail2r19downgradeERNS0_2d112rtm_rw_mutex11scoped_lockE + +# Tasks and partitioners (task.cpp) +__ZN3tbb6detail2r17suspendEPFvPvPNS1_18suspend_point_typeEES2_ +__ZN3tbb6detail2r16resumeEPNS1_18suspend_point_typeE +__ZN3tbb6detail2r121current_suspend_pointEv +__ZN3tbb6detail2r114notify_waitersEm +__ZN3tbb6detail2r127get_thread_reference_vertexEPNS0_2d126wait_tree_vertex_interfaceE + +# Task dispatcher (task_dispatcher.cpp) +__ZN3tbb6detail2r114execution_slotEPKNS0_2d114execution_dataE +__ZN3tbb6detail2r14waitERNS0_2d112wait_contextERNS2_18task_group_contextE +__ZN3tbb6detail2r15spawnERNS0_2d14taskERNS2_18task_group_contextE +__ZN3tbb6detail2r15spawnERNS0_2d14taskERNS2_18task_group_contextEt +__ZN3tbb6detail2r116execute_and_waitERNS0_2d14taskERNS2_18task_group_contextERNS2_12wait_contextES6_ +__ZN3tbb6detail2r16submitERNS0_2d14taskERNS2_18task_group_contextEPNS1_5arenaEm +__ZN3tbb6detail2r115current_contextEv + +# Task group context (task_group_context.cpp) +__ZN3tbb6detail2r110initializeERNS0_2d118task_group_contextE +__ZN3tbb6detail2r122cancel_group_executionERNS0_2d118task_group_contextE +__ZN3tbb6detail2r128is_group_execution_cancelledERNS0_2d118task_group_contextE +__ZN3tbb6detail2r15resetERNS0_2d118task_group_contextE +__ZN3tbb6detail2r17destroyERNS0_2d118task_group_contextE +__ZN3tbb6detail2r119capture_fp_settingsERNS0_2d118task_group_contextE + +# Task arena (arena.cpp) +__ZN3tbb6detail2r115max_concurrencyEPKNS0_2d115task_arena_baseE +__ZN3tbb6detail2r110initializeERNS0_2d115task_arena_baseE +__ZN3tbb6detail2r16attachERNS0_2d115task_arena_baseE +__ZN3tbb6detail2r17executeERNS0_2d115task_arena_baseERNS2_13delegate_baseE +__ZN3tbb6detail2r19terminateERNS0_2d115task_arena_baseE +__ZN3tbb6detail2r120isolate_within_arenaERNS0_2d113delegate_baseEl +__ZN3tbb6detail2r17enqueueERNS0_2d14taskEPNS2_15task_arena_baseE +__ZN3tbb6detail2r17enqueueERNS0_2d14taskERNS2_18task_group_contextEPNS2_15task_arena_baseE +__ZN3tbb6detail2r14waitERNS0_2d115task_arena_baseE +__ZN3tbb6detail2r114execution_slotERKNS0_2d115task_arena_baseE + +# System topology parsing and threads pinning (governor.cpp) +__ZN3tbb6detail2r115numa_node_countEv +__ZN3tbb6detail2r117fill_numa_indicesEPi +__ZN3tbb6detail2r115core_type_countEl +__ZN3tbb6detail2r122fill_core_type_indicesEPil +__ZN3tbb6detail2r131constraints_default_concurrencyERKNS0_2d111constraintsEl +__ZN3tbb6detail2r128constraints_threads_per_coreERKNS0_2d111constraintsEl +__ZN3tbb6detail2r124numa_default_concurrencyEi + +# Observer (observer_proxy.cpp) +__ZN3tbb6detail2r17observeERNS0_2d123task_scheduler_observerEb + +# Queuing RW Mutex (queuing_rw_mutex.cpp) +__ZN3tbb6detail2r111try_acquireERNS0_2d116queuing_rw_mutexERNS3_11scoped_lockEb +__ZN3tbb6detail2r117upgrade_to_writerERNS0_2d116queuing_rw_mutex11scoped_lockE +__ZN3tbb6detail2r119downgrade_to_readerERNS0_2d116queuing_rw_mutex11scoped_lockE +__ZN3tbb6detail2r17acquireERNS0_2d116queuing_rw_mutexERNS3_11scoped_lockEb +__ZN3tbb6detail2r17releaseERNS0_2d116queuing_rw_mutex11scoped_lockE +__ZN3tbb6detail2r19constructERNS0_2d116queuing_rw_mutexE +__ZN3tbb6detail2r19is_writerERKNS0_2d116queuing_rw_mutex11scoped_lockE + +# Global control (global_control.cpp) +__ZN3tbb6detail2r16createERNS0_2d114global_controlE +__ZN3tbb6detail2r17destroyERNS0_2d114global_controlE +__ZN3tbb6detail2r127global_control_active_valueEi +__ZN3tbb6detail2r18finalizeERNS0_2d121task_scheduler_handleEl +__ZN3tbb6detail2r13getERNS0_2d121task_scheduler_handleE + +# Parallel pipeline (parallel_pipeline.cpp) +__ZN3tbb6detail2r117parallel_pipelineERNS0_2d118task_group_contextEmRKNS2_11filter_nodeE +__ZN3tbb6detail2r116set_end_of_inputERNS0_2d111base_filterE + +# Concurrent bounded queue (concurrent_bounded_queue.cpp) +__ZN3tbb6detail2r126allocate_bounded_queue_repEm +__ZN3tbb6detail2r126wait_bounded_queue_monitorEPNS1_18concurrent_monitorEmlRNS0_2d113delegate_baseE +__ZN3tbb6detail2r128abort_bounded_queue_monitorsEPNS1_18concurrent_monitorE +__ZN3tbb6detail2r128deallocate_bounded_queue_repEPhm +__ZN3tbb6detail2r128notify_bounded_queue_monitorEPNS1_18concurrent_monitorEmm + +# Concurrent monitor (address_waiter.cpp) +__ZN3tbb6detail2r115wait_on_addressEPvRNS0_2d113delegate_baseEm +__ZN3tbb6detail2r117notify_by_addressEPvm +__ZN3tbb6detail2r121notify_by_address_oneEPv +__ZN3tbb6detail2r121notify_by_address_allEPv + +# Versioning (version.cpp) +_TBB_runtime_interface_version +_TBB_runtime_version diff --git a/src/tbb/src/tbb/def/win32-tbb.def b/src/tbb/src/tbb/def/win32-tbb.def new file mode 100644 index 000000000..94b544170 --- /dev/null +++ b/src/tbb/src/tbb/def/win32-tbb.def @@ -0,0 +1,152 @@ +; Copyright (c) 2005-2024 Intel Corporation +; +; Licensed under the Apache License, Version 2.0 (the "License"); +; you may not use this file except in compliance with the License. +; You may obtain a copy of the License at +; +; http://www.apache.org/licenses/LICENSE-2.0 +; +; Unless required by applicable law or agreed to in writing, software +; distributed under the License is distributed on an "AS IS" BASIS, +; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +; See the License for the specific language governing permissions and +; limitations under the License. + +; This file is organized with a section for each .cpp file. + +EXPORTS + +; Assertions (assert.cpp) +?assertion_failure@r1@detail@tbb@@YAXPBDH00@Z + +; ITT (tbb_profiling.cpp) +?call_itt_notify@r1@detail@tbb@@YAXHPAX@Z +?create_itt_sync@r1@detail@tbb@@YAXPAXPB_W1@Z +?itt_make_task_group@r1@detail@tbb@@YAXW4itt_domain_enum@d1@23@PAX_K12W4string_resource_index@d0@23@@Z +?itt_task_begin@r1@detail@tbb@@YAXW4itt_domain_enum@d1@23@PAX_K12W4string_resource_index@d0@23@@Z +?itt_task_end@r1@detail@tbb@@YAXW4itt_domain_enum@d1@23@@Z +?itt_metadata_str_add@r1@detail@tbb@@YAXW4itt_domain_enum@d1@23@PAX_KW4string_resource_index@d0@23@PBD@Z +?itt_relation_add@r1@detail@tbb@@YAXW4itt_domain_enum@d1@23@PAX_KW4itt_relation@d0@23@12@Z +?itt_region_begin@r1@detail@tbb@@YAXW4itt_domain_enum@d1@23@PAX_K12W4string_resource_index@d0@23@@Z +?itt_region_end@r1@detail@tbb@@YAXW4itt_domain_enum@d1@23@PAX_K@Z +?itt_set_sync_name@r1@detail@tbb@@YAXPAXPB_W@Z +?itt_metadata_ptr_add@r1@detail@tbb@@YAXW4itt_domain_enum@d1@23@PAX_KW4string_resource_index@d0@23@1@Z + +; Allocators (tbb_allocator.cpp) +?cache_aligned_allocate@r1@detail@tbb@@YAPAXI@Z +?cache_aligned_deallocate@r1@detail@tbb@@YAXPAX@Z +?cache_line_size@r1@detail@tbb@@YAIXZ +?allocate_memory@r1@detail@tbb@@YAPAXI@Z +?deallocate_memory@r1@detail@tbb@@YAXPAX@Z +?is_tbbmalloc_used@r1@detail@tbb@@YA_NXZ + +; Small object pool (small_object_pool.cpp) +?allocate@r1@detail@tbb@@YAPAXAAPAVsmall_object_pool@d1@23@IABUexecution_data@523@@Z +?allocate@r1@detail@tbb@@YAPAXAAPAVsmall_object_pool@d1@23@I@Z +?deallocate@r1@detail@tbb@@YAXAAVsmall_object_pool@d1@23@PAXIABUexecution_data@523@@Z +?deallocate@r1@detail@tbb@@YAXAAVsmall_object_pool@d1@23@PAXI@Z + +; Error handling (exception.cpp) +?throw_exception@r1@detail@tbb@@YAXW4exception_id@d0@23@@Z +?what@bad_last_alloc@r1@detail@tbb@@UBEPBDXZ +?what@user_abort@r1@detail@tbb@@UBEPBDXZ +?what@missing_wait@r1@detail@tbb@@UBEPBDXZ + +; RTM Mutex (rtm_mutex.cpp) +?acquire@r1@detail@tbb@@YAXAAVrtm_mutex@d1@23@AAVscoped_lock@4523@_N@Z +?release@r1@detail@tbb@@YAXAAVscoped_lock@rtm_mutex@d1@23@@Z +?try_acquire@r1@detail@tbb@@YA_NAAVrtm_mutex@d1@23@AAVscoped_lock@4523@@Z + +; RTM RW Mutex (rtm_rw_mutex.cpp) +?acquire_reader@r1@detail@tbb@@YAXAAVrtm_rw_mutex@d1@23@AAVscoped_lock@4523@_N@Z +?acquire_writer@r1@detail@tbb@@YAXAAVrtm_rw_mutex@d1@23@AAVscoped_lock@4523@_N@Z +?downgrade@r1@detail@tbb@@YA_NAAVscoped_lock@rtm_rw_mutex@d1@23@@Z +?release@r1@detail@tbb@@YAXAAVscoped_lock@rtm_rw_mutex@d1@23@@Z +?try_acquire_reader@r1@detail@tbb@@YA_NAAVrtm_rw_mutex@d1@23@AAVscoped_lock@4523@@Z +?try_acquire_writer@r1@detail@tbb@@YA_NAAVrtm_rw_mutex@d1@23@AAVscoped_lock@4523@@Z +?upgrade@r1@detail@tbb@@YA_NAAVscoped_lock@rtm_rw_mutex@d1@23@@Z + +; Tasks and partitioners (task.cpp) +?current_suspend_point@r1@detail@tbb@@YAPAUsuspend_point_type@123@XZ +?resume@r1@detail@tbb@@YAXPAUsuspend_point_type@123@@Z +?suspend@r1@detail@tbb@@YAXP6AXPAXPAUsuspend_point_type@123@@Z0@Z +?notify_waiters@r1@detail@tbb@@YAXI@Z +?get_thread_reference_vertex@r1@detail@tbb@@YAPAVwait_tree_vertex_interface@d1@23@PAV4523@@Z + +; Task dispatcher (task_dispatcher.cpp) +?spawn@r1@detail@tbb@@YAXAAVtask@d1@23@AAVtask_group_context@523@G@Z +?spawn@r1@detail@tbb@@YAXAAVtask@d1@23@AAVtask_group_context@523@@Z +?execute_and_wait@r1@detail@tbb@@YAXAAVtask@d1@23@AAVtask_group_context@523@AAVwait_context@523@1@Z +?execution_slot@r1@detail@tbb@@YAGPBUexecution_data@d1@23@@Z +?wait@r1@detail@tbb@@YAXAAVwait_context@d1@23@AAVtask_group_context@523@@Z +?submit@r1@detail@tbb@@YAXAAVtask@d1@23@AAVtask_group_context@523@PAVarena@123@I@Z +?current_context@r1@detail@tbb@@YAPAVtask_group_context@d1@23@XZ + +; Task group context (task_group_context.cpp) +?cancel_group_execution@r1@detail@tbb@@YA_NAAVtask_group_context@d1@23@@Z +?capture_fp_settings@r1@detail@tbb@@YAXAAVtask_group_context@d1@23@@Z +?destroy@r1@detail@tbb@@YAXAAVtask_group_context@d1@23@@Z +?initialize@r1@detail@tbb@@YAXAAVtask_group_context@d1@23@@Z +?is_group_execution_cancelled@r1@detail@tbb@@YA_NAAVtask_group_context@d1@23@@Z +?reset@r1@detail@tbb@@YAXAAVtask_group_context@d1@23@@Z + +; Task arena (arena.cpp) +?attach@r1@detail@tbb@@YA_NAAVtask_arena_base@d1@23@@Z +?enqueue@r1@detail@tbb@@YAXAAVtask@d1@23@PAVtask_arena_base@523@@Z +?execute@r1@detail@tbb@@YAXAAVtask_arena_base@d1@23@AAVdelegate_base@523@@Z +?initialize@r1@detail@tbb@@YAXAAVtask_arena_base@d1@23@@Z +?isolate_within_arena@r1@detail@tbb@@YAXAAVdelegate_base@d1@23@H@Z +?max_concurrency@r1@detail@tbb@@YAHPBVtask_arena_base@d1@23@@Z +?terminate@r1@detail@tbb@@YAXAAVtask_arena_base@d1@23@@Z +?wait@r1@detail@tbb@@YAXAAVtask_arena_base@d1@23@@Z +?enqueue@r1@detail@tbb@@YAXAAVtask@d1@23@AAVtask_group_context@523@PAVtask_arena_base@523@@Z +?execution_slot@r1@detail@tbb@@YAGABVtask_arena_base@d1@23@@Z + +; System topology parsing and threads pinning (governor.cpp) +?numa_node_count@r1@detail@tbb@@YAIXZ +?fill_numa_indices@r1@detail@tbb@@YAXPAH@Z +?core_type_count@r1@detail@tbb@@YAIH@Z +?fill_core_type_indices@r1@detail@tbb@@YAXPAHH@Z +?numa_default_concurrency@r1@detail@tbb@@YAHH@Z +?constraints_default_concurrency@r1@detail@tbb@@YAHABUconstraints@d1@23@H@Z +?constraints_threads_per_core@r1@detail@tbb@@YAHABUconstraints@d1@23@H@Z + +; Observer (observer_proxy.cpp) +?observe@r1@detail@tbb@@YAXAAVtask_scheduler_observer@d1@23@_N@Z + +; Queuing RW Mutex (queuing_rw_mutex.cpp) +?acquire@r1@detail@tbb@@YAXAAVqueuing_rw_mutex@d1@23@AAVscoped_lock@4523@_N@Z +?construct@r1@detail@tbb@@YAXAAVqueuing_rw_mutex@d1@23@@Z +?downgrade_to_reader@r1@detail@tbb@@YA_NAAVscoped_lock@queuing_rw_mutex@d1@23@@Z +?release@r1@detail@tbb@@YAXAAVscoped_lock@queuing_rw_mutex@d1@23@@Z +?try_acquire@r1@detail@tbb@@YA_NAAVqueuing_rw_mutex@d1@23@AAVscoped_lock@4523@_N@Z +?upgrade_to_writer@r1@detail@tbb@@YA_NAAVscoped_lock@queuing_rw_mutex@d1@23@@Z +?is_writer@r1@detail@tbb@@YA_NABVscoped_lock@queuing_rw_mutex@d1@23@@Z + +; Global control (global_control.cpp) +?create@r1@detail@tbb@@YAXAAVglobal_control@d1@23@@Z +?destroy@r1@detail@tbb@@YAXAAVglobal_control@d1@23@@Z +?global_control_active_value@r1@detail@tbb@@YAIH@Z +?get@r1@detail@tbb@@YAXAAVtask_scheduler_handle@d1@23@@Z +?finalize@r1@detail@tbb@@YA_NAAVtask_scheduler_handle@d1@23@H@Z + +; Parallel pipeline (parallel_pipeline.cpp) +?parallel_pipeline@r1@detail@tbb@@YAXAAVtask_group_context@d1@23@IABVfilter_node@523@@Z +?set_end_of_input@r1@detail@tbb@@YAXAAVbase_filter@d1@23@@Z + +; Concurrent bounded queue (concurrent_bounded_queue.cpp) +?abort_bounded_queue_monitors@r1@detail@tbb@@YAXPAVconcurrent_monitor@123@@Z +?allocate_bounded_queue_rep@r1@detail@tbb@@YAPAEI@Z +?deallocate_bounded_queue_rep@r1@detail@tbb@@YAXPAEI@Z +?notify_bounded_queue_monitor@r1@detail@tbb@@YAXPAVconcurrent_monitor@123@II@Z +?wait_bounded_queue_monitor@r1@detail@tbb@@YAXPAVconcurrent_monitor@123@IHAAVdelegate_base@d1@23@@Z + +; Concurrent monitor (address_waiter.cpp) +?wait_on_address@r1@detail@tbb@@YAXPAXAAVdelegate_base@d1@23@I@Z +?notify_by_address@r1@detail@tbb@@YAXPAXI@Z +?notify_by_address_one@r1@detail@tbb@@YAXPAX@Z +?notify_by_address_all@r1@detail@tbb@@YAXPAX@Z + +;; Versioning (version.cpp) +TBB_runtime_interface_version +TBB_runtime_version diff --git a/src/tbb/src/tbb/def/win64-tbb.def b/src/tbb/src/tbb/def/win64-tbb.def new file mode 100644 index 000000000..96bafc016 --- /dev/null +++ b/src/tbb/src/tbb/def/win64-tbb.def @@ -0,0 +1,152 @@ +; Copyright (c) 2005-2024 Intel Corporation +; +; Licensed under the Apache License, Version 2.0 (the "License"); +; you may not use this file except in compliance with the License. +; You may obtain a copy of the License at +; +; http://www.apache.org/licenses/LICENSE-2.0 +; +; Unless required by applicable law or agreed to in writing, software +; distributed under the License is distributed on an "AS IS" BASIS, +; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +; See the License for the specific language governing permissions and +; limitations under the License. + +; This file is organized with a section for each .cpp file. + +EXPORTS + +; Assertions (assert.cpp) +?assertion_failure@r1@detail@tbb@@YAXPEBDH00@Z + +; ITT (tbb_profiling.cpp) +?call_itt_notify@r1@detail@tbb@@YAXHPEAX@Z +?create_itt_sync@r1@detail@tbb@@YAXPEAXPEB_W1@Z +?itt_make_task_group@r1@detail@tbb@@YAXW4itt_domain_enum@d1@23@PEAX_K12W4string_resource_index@d0@23@@Z +?itt_task_begin@r1@detail@tbb@@YAXW4itt_domain_enum@d1@23@PEAX_K12W4string_resource_index@d0@23@@Z +?itt_task_end@r1@detail@tbb@@YAXW4itt_domain_enum@d1@23@@Z +?itt_set_sync_name@r1@detail@tbb@@YAXPEAXPEB_W@Z +?itt_metadata_str_add@r1@detail@tbb@@YAXW4itt_domain_enum@d1@23@PEAX_KW4string_resource_index@d0@23@PEBD@Z +?itt_relation_add@r1@detail@tbb@@YAXW4itt_domain_enum@d1@23@PEAX_KW4itt_relation@d0@23@12@Z +?itt_region_begin@r1@detail@tbb@@YAXW4itt_domain_enum@d1@23@PEAX_K12W4string_resource_index@d0@23@@Z +?itt_region_end@r1@detail@tbb@@YAXW4itt_domain_enum@d1@23@PEAX_K@Z +?itt_metadata_ptr_add@r1@detail@tbb@@YAXW4itt_domain_enum@d1@23@PEAX_KW4string_resource_index@d0@23@1@Z + +; Allocators (tbb_allocator.cpp) +?cache_aligned_allocate@r1@detail@tbb@@YAPEAX_K@Z +?cache_aligned_deallocate@r1@detail@tbb@@YAXPEAX@Z +?cache_line_size@r1@detail@tbb@@YA_KXZ +?allocate_memory@r1@detail@tbb@@YAPEAX_K@Z +?deallocate_memory@r1@detail@tbb@@YAXPEAX@Z +?is_tbbmalloc_used@r1@detail@tbb@@YA_NXZ + +; Small object pool (small_object_pool.cpp) +?allocate@r1@detail@tbb@@YAPEAXAEAPEAVsmall_object_pool@d1@23@_KAEBUexecution_data@523@@Z +?allocate@r1@detail@tbb@@YAPEAXAEAPEAVsmall_object_pool@d1@23@_K@Z +?deallocate@r1@detail@tbb@@YAXAEAVsmall_object_pool@d1@23@PEAX_KAEBUexecution_data@523@@Z +?deallocate@r1@detail@tbb@@YAXAEAVsmall_object_pool@d1@23@PEAX_K@Z + +; Error handling (exception.cpp) +?throw_exception@r1@detail@tbb@@YAXW4exception_id@d0@23@@Z +?what@bad_last_alloc@r1@detail@tbb@@UEBAPEBDXZ +?what@user_abort@r1@detail@tbb@@UEBAPEBDXZ +?what@missing_wait@r1@detail@tbb@@UEBAPEBDXZ + +; RTM Mutex (rtm_mutex.cpp) +?try_acquire@r1@detail@tbb@@YA_NAEAVrtm_mutex@d1@23@AEAVscoped_lock@4523@@Z +?acquire@r1@detail@tbb@@YAXAEAVrtm_mutex@d1@23@AEAVscoped_lock@4523@_N@Z +?release@r1@detail@tbb@@YAXAEAVscoped_lock@rtm_mutex@d1@23@@Z + +; RTM RW Mutex (rtm_rw_mutex.cpp) +?acquire_writer@r1@detail@tbb@@YAXAEAVrtm_rw_mutex@d1@23@AEAVscoped_lock@4523@_N@Z +?acquire_reader@r1@detail@tbb@@YAXAEAVrtm_rw_mutex@d1@23@AEAVscoped_lock@4523@_N@Z +?upgrade@r1@detail@tbb@@YA_NAEAVscoped_lock@rtm_rw_mutex@d1@23@@Z +?downgrade@r1@detail@tbb@@YA_NAEAVscoped_lock@rtm_rw_mutex@d1@23@@Z +?try_acquire_writer@r1@detail@tbb@@YA_NAEAVrtm_rw_mutex@d1@23@AEAVscoped_lock@4523@@Z +?try_acquire_reader@r1@detail@tbb@@YA_NAEAVrtm_rw_mutex@d1@23@AEAVscoped_lock@4523@@Z +?release@r1@detail@tbb@@YAXAEAVscoped_lock@rtm_rw_mutex@d1@23@@Z + +; Tasks and partitioners (task.cpp) +?suspend@r1@detail@tbb@@YAXP6AXPEAXPEAUsuspend_point_type@123@@Z0@Z +?resume@r1@detail@tbb@@YAXPEAUsuspend_point_type@123@@Z +?current_suspend_point@r1@detail@tbb@@YAPEAUsuspend_point_type@123@XZ +?notify_waiters@r1@detail@tbb@@YAX_K@Z +?get_thread_reference_vertex@r1@detail@tbb@@YAPEAVwait_tree_vertex_interface@d1@23@PEAV4523@@Z + +; Task dispatcher (task_dispatcher.cpp) +?spawn@r1@detail@tbb@@YAXAEAVtask@d1@23@AEAVtask_group_context@523@@Z +?spawn@r1@detail@tbb@@YAXAEAVtask@d1@23@AEAVtask_group_context@523@G@Z +?execute_and_wait@r1@detail@tbb@@YAXAEAVtask@d1@23@AEAVtask_group_context@523@AEAVwait_context@523@1@Z +?execution_slot@r1@detail@tbb@@YAGPEBUexecution_data@d1@23@@Z +?wait@r1@detail@tbb@@YAXAEAVwait_context@d1@23@AEAVtask_group_context@523@@Z +?submit@r1@detail@tbb@@YAXAEAVtask@d1@23@AEAVtask_group_context@523@PEAVarena@123@_K@Z +?current_context@r1@detail@tbb@@YAPEAVtask_group_context@d1@23@XZ + +; Task group context (task_group_context.cpp) +?initialize@r1@detail@tbb@@YAXAEAVtask_group_context@d1@23@@Z +?destroy@r1@detail@tbb@@YAXAEAVtask_group_context@d1@23@@Z +?is_group_execution_cancelled@r1@detail@tbb@@YA_NAEAVtask_group_context@d1@23@@Z +?reset@r1@detail@tbb@@YAXAEAVtask_group_context@d1@23@@Z +?cancel_group_execution@r1@detail@tbb@@YA_NAEAVtask_group_context@d1@23@@Z +?capture_fp_settings@r1@detail@tbb@@YAXAEAVtask_group_context@d1@23@@Z + +; Task arena (arena.cpp) +?max_concurrency@r1@detail@tbb@@YAHPEBVtask_arena_base@d1@23@@Z +?initialize@r1@detail@tbb@@YAXAEAVtask_arena_base@d1@23@@Z +?terminate@r1@detail@tbb@@YAXAEAVtask_arena_base@d1@23@@Z +?execute@r1@detail@tbb@@YAXAEAVtask_arena_base@d1@23@AEAVdelegate_base@523@@Z +?wait@r1@detail@tbb@@YAXAEAVtask_arena_base@d1@23@@Z +?attach@r1@detail@tbb@@YA_NAEAVtask_arena_base@d1@23@@Z +?isolate_within_arena@r1@detail@tbb@@YAXAEAVdelegate_base@d1@23@_J@Z +?enqueue@r1@detail@tbb@@YAXAEAVtask@d1@23@PEAVtask_arena_base@523@@Z +?enqueue@r1@detail@tbb@@YAXAEAVtask@d1@23@AEAVtask_group_context@523@PEAVtask_arena_base@523@@Z +?execution_slot@r1@detail@tbb@@YAGAEBVtask_arena_base@d1@23@@Z + +; System topology parsing and threads pinning (governor.cpp) +?numa_node_count@r1@detail@tbb@@YAIXZ +?fill_numa_indices@r1@detail@tbb@@YAXPEAH@Z +?core_type_count@r1@detail@tbb@@YAI_J@Z +?fill_core_type_indices@r1@detail@tbb@@YAXPEAH_J@Z +?numa_default_concurrency@r1@detail@tbb@@YAHH@Z +?constraints_default_concurrency@r1@detail@tbb@@YAHAEBUconstraints@d1@23@_J@Z +?constraints_threads_per_core@r1@detail@tbb@@YAHAEBUconstraints@d1@23@_J@Z + +; Observer (observer_proxy.cpp) +?observe@r1@detail@tbb@@YAXAEAVtask_scheduler_observer@d1@23@_N@Z + +; Queuing RW Mutex (queuing_rw_mutex.cpp) +?construct@r1@detail@tbb@@YAXAEAVqueuing_rw_mutex@d1@23@@Z +?try_acquire@r1@detail@tbb@@YA_NAEAVqueuing_rw_mutex@d1@23@AEAVscoped_lock@4523@_N@Z +?acquire@r1@detail@tbb@@YAXAEAVqueuing_rw_mutex@d1@23@AEAVscoped_lock@4523@_N@Z +?release@r1@detail@tbb@@YAXAEAVscoped_lock@queuing_rw_mutex@d1@23@@Z +?upgrade_to_writer@r1@detail@tbb@@YA_NAEAVscoped_lock@queuing_rw_mutex@d1@23@@Z +?downgrade_to_reader@r1@detail@tbb@@YA_NAEAVscoped_lock@queuing_rw_mutex@d1@23@@Z +?is_writer@r1@detail@tbb@@YA_NAEBVscoped_lock@queuing_rw_mutex@d1@23@@Z + +; Global control (global_control.cpp) +?global_control_active_value@r1@detail@tbb@@YA_KH@Z +?create@r1@detail@tbb@@YAXAEAVglobal_control@d1@23@@Z +?destroy@r1@detail@tbb@@YAXAEAVglobal_control@d1@23@@Z +?get@r1@detail@tbb@@YAXAEAVtask_scheduler_handle@d1@23@@Z +?finalize@r1@detail@tbb@@YA_NAEAVtask_scheduler_handle@d1@23@_J@Z + +; Parallel pipeline (parallel_pipeline.cpp) +?set_end_of_input@r1@detail@tbb@@YAXAEAVbase_filter@d1@23@@Z +?parallel_pipeline@r1@detail@tbb@@YAXAEAVtask_group_context@d1@23@_KAEBVfilter_node@523@@Z + +; Concurrent bounded queue (concurrent_bounded_queue.cpp) +?allocate_bounded_queue_rep@r1@detail@tbb@@YAPEAE_K@Z +?deallocate_bounded_queue_rep@r1@detail@tbb@@YAXPEAE_K@Z +?wait_bounded_queue_monitor@r1@detail@tbb@@YAXPEAVconcurrent_monitor@123@_K_JAEAVdelegate_base@d1@23@@Z +?abort_bounded_queue_monitors@r1@detail@tbb@@YAXPEAVconcurrent_monitor@123@@Z +?notify_bounded_queue_monitor@r1@detail@tbb@@YAXPEAVconcurrent_monitor@123@_K1@Z + +; Concurrent monitor (address_waiter.cpp) +?wait_on_address@r1@detail@tbb@@YAXPEAXAEAVdelegate_base@d1@23@_K@Z +?notify_by_address@r1@detail@tbb@@YAXPEAX_K@Z +?notify_by_address_one@r1@detail@tbb@@YAXPEAX@Z +?notify_by_address_all@r1@detail@tbb@@YAXPEAX@Z + +;; Versioning (version.cpp) +TBB_runtime_interface_version +TBB_runtime_version diff --git a/src/tbb/src/tbb/dynamic_link.cpp b/src/tbb/src/tbb/dynamic_link.cpp index 395268500..4911b97df 100644 --- a/src/tbb/src/tbb/dynamic_link.cpp +++ b/src/tbb/src/tbb/dynamic_link.cpp @@ -1,69 +1,63 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #include "dynamic_link.h" -#include "tbb/tbb_config.h" +#include "environment.h" + +#include "oneapi/tbb/detail/_template_helpers.h" +#include "oneapi/tbb/detail/_utils.h" /* This file is used by both TBB and OpenMP RTL. Do not use __TBB_ASSERT() macro and runtime_warning() function because they are not available in OpenMP. Use - LIBRARY_ASSERT and DYNAMIC_LINK_WARNING instead. + __TBB_ASSERT_EX and DYNAMIC_LINK_WARNING instead. */ #include <cstdarg> // va_list etc. +#include <cstring> // strrchr #if _WIN32 #include <malloc.h> // Unify system calls #define dlopen( name, flags ) LoadLibrary( name ) #define dlsym( handle, name ) GetProcAddress( handle, name ) - #define dlclose( handle ) ( ! FreeLibrary( handle ) ) + // FreeLibrary return bool value that is not used. + #define dlclose( handle ) (void)( ! FreeLibrary( handle ) ) #define dlerror() GetLastError() #ifndef PATH_MAX #define PATH_MAX MAX_PATH #endif #else /* _WIN32 */ #include <dlfcn.h> - #include <string.h> #include <unistd.h> - #include <limits.h> - #include <stdlib.h> + + #include <climits> + #include <cstdlib> #endif /* _WIN32 */ -#if __TBB_WEAK_SYMBOLS_PRESENT +#if __TBB_WEAK_SYMBOLS_PRESENT && !__TBB_DYNAMIC_LOAD_ENABLED //TODO: use function attribute for weak symbols instead of the pragma. #pragma weak dlopen #pragma weak dlsym #pragma weak dlclose - #pragma weak dlerror - #pragma weak dladdr -#endif /* __TBB_WEAK_SYMBOLS_PRESENT */ +#endif /* __TBB_WEAK_SYMBOLS_PRESENT && !__TBB_DYNAMIC_LOAD_ENABLED */ -#include "tbb/tbb_misc.h" -#define __USE_TBB_ATOMICS ( !(__linux__&&__ia64__) || __TBB_BUILD ) -#define __USE_STATIC_DL_INIT (!__ANDROID__) +#define __USE_STATIC_DL_INIT ( !__ANDROID__ ) -#if !__USE_TBB_ATOMICS -#include <pthread.h> -#endif /* dynamic_link is a common interface for searching for required symbols in an @@ -72,7 +66,7 @@ executable and dynamic libraries. dynamic_link provides certain guarantees: 1. Either all or none of the requested symbols are resolved. Moreover, if symbols are not resolved, the dynamic_link_descriptor table is not modified; - 2. All returned symbols have secured life time: this means that none of them + 2. All returned symbols have secured lifetime: this means that none of them can be invalidated until dynamic_unlink is called; 3. Any loaded library is loaded only via the full path. The full path is that from which the runtime itself was loaded. (This is done to avoid security @@ -86,7 +80,7 @@ soon as all of the symbols have been resolved. library and if it succeeds it resolves the symbols via that handle. b. On Linux: dynamic_link tries to search for the symbols in the global scope via the main program handle. If the symbols are present in the global - scope their life time is not guaranteed (since dynamic_link does not know + scope their lifetime is not guaranteed (since dynamic_link does not know anything about the library from which they are exported). Therefore it tries to "pin" the symbols by obtaining the library name and reopening it. dlopen may fail to reopen the library in two cases: @@ -107,33 +101,35 @@ soon as all of the symbols have been resolved. 3. Weak symbols: if weak symbols are available they are returned. */ -OPEN_INTERNAL_NAMESPACE +namespace tbb { +namespace detail { +namespace r1 { #if __TBB_WEAK_SYMBOLS_PRESENT || __TBB_DYNAMIC_LOAD_ENABLED -#if !defined(DYNAMIC_LINK_WARNING) && !__TBB_WIN8UI_SUPPORT +#if !defined(DYNAMIC_LINK_WARNING) && !__TBB_WIN8UI_SUPPORT && __TBB_DYNAMIC_LOAD_ENABLED // Report runtime errors and continue. #define DYNAMIC_LINK_WARNING dynamic_link_warning static void dynamic_link_warning( dynamic_link_error_t code, ... ) { - (void) code; + suppress_unused_warning(code); } // library_warning -#endif /* DYNAMIC_LINK_WARNING */ - static bool resolve_symbols( dynamic_link_handle module, const dynamic_link_descriptor descriptors[], size_t required ) +#endif /* !defined(DYNAMIC_LINK_WARNING) && !__TBB_WIN8UI_SUPPORT && __TBB_DYNAMIC_LOAD_ENABLED */ + + static bool resolve_symbols( dynamic_link_handle module, const dynamic_link_descriptor descriptors[], std::size_t required ) { - LIBRARY_ASSERT( module != NULL, "Module handle is NULL" ); - if ( module == NULL ) + if ( !module ) return false; - #if __TBB_WEAK_SYMBOLS_PRESENT + #if !__TBB_DYNAMIC_LOAD_ENABLED /* only __TBB_WEAK_SYMBOLS_PRESENT is defined */ if ( !dlsym ) return false; - #endif /* __TBB_WEAK_SYMBOLS_PRESENT */ + #endif /* !__TBB_DYNAMIC_LOAD_ENABLED */ - const size_t n_desc=20; // Usually we don't have more than 20 descriptors per library - LIBRARY_ASSERT( required <= n_desc, "Too many descriptors is required" ); + const std::size_t n_desc=20; // Usually we don't have more than 20 descriptors per library + __TBB_ASSERT_EX( required <= n_desc, "Too many descriptors is required" ); if ( required > n_desc ) return false; pointer_to_handler h[n_desc]; - for ( size_t k = 0; k < required; ++k ) { + for ( std::size_t k = 0; k < required; ++k ) { dynamic_link_descriptor const & desc = descriptors[k]; pointer_to_handler addr = (pointer_to_handler)dlsym( module, desc.name ); if ( !addr ) { @@ -144,29 +140,28 @@ OPEN_INTERNAL_NAMESPACE // Commit the entry points. // Cannot use memset here, because the writes must be atomic. - for( size_t k = 0; k < required; ++k ) + for( std::size_t k = 0; k < required; ++k ) *descriptors[k].handler = h[k]; return true; } #if __TBB_WIN8UI_SUPPORT - bool dynamic_link( const char* library, const dynamic_link_descriptor descriptors[], size_t required, dynamic_link_handle*, int flags ) { - dynamic_link_handle tmp_handle = NULL; + bool dynamic_link( const char* library, const dynamic_link_descriptor descriptors[], std::size_t required, dynamic_link_handle*, int flags ) { + dynamic_link_handle tmp_handle = nullptr; TCHAR wlibrary[256]; if ( MultiByteToWideChar(CP_UTF8, 0, library, -1, wlibrary, 255) == 0 ) return false; if ( flags & DYNAMIC_LINK_LOAD ) tmp_handle = LoadPackagedLibrary( wlibrary, 0 ); - if (tmp_handle != NULL){ + if (tmp_handle != nullptr){ return resolve_symbols(tmp_handle, descriptors, required); }else{ return false; } } - void dynamic_unlink( dynamic_link_handle ) { - } - void dynamic_unlink_all() { - } + void dynamic_unlink( dynamic_link_handle ) {} + void dynamic_unlink_all() {} #else +#if __TBB_DYNAMIC_LOAD_ENABLED /* There is a security issue on Windows: LoadLibrary() may load and execute malicious code. See http://www.microsoft.com/technet/security/advisory/2269637.mspx for details. @@ -178,8 +173,8 @@ OPEN_INTERNAL_NAMESPACE current one, it is the directory tbb.dll loaded from. Example: - Let us assume "tbb.dll" is located in "c:\program files\common\intel\" directory, e. g. - absolute path of tbb library is "c:\program files\common\intel\tbb.dll". Absolute path for + Let us assume "tbb.dll" is located in "c:\program files\common\intel\" directory, e.g. + absolute path of the library is "c:\program files\common\intel\tbb.dll". Absolute path for "tbbmalloc.dll" would be "c:\program files\common\intel\tbbmalloc.dll". Absolute path for "malloc\tbbmalloc.dll" would be "c:\program files\common\intel\malloc\tbbmalloc.dll". */ @@ -191,55 +186,30 @@ OPEN_INTERNAL_NAMESPACE // the constructor is called. #define MAX_LOADED_MODULES 8 // The number of maximum possible modules which can be loaded - struct handle_storage { - #if __USE_TBB_ATOMICS - ::tbb::atomic<size_t> my_size; - #else - size_t my_size; - pthread_spinlock_t my_lock; - #endif + using atomic_incrementer = std::atomic<std::size_t>; + + static struct handles_t { + atomic_incrementer my_size; dynamic_link_handle my_handles[MAX_LOADED_MODULES]; - void add_handle(const dynamic_link_handle &handle) { - #if !__USE_TBB_ATOMICS - int res = pthread_spin_lock( &my_lock ); - LIBRARY_ASSERT( res==0, "pthread_spin_lock failed" ); - #endif - const size_t ind = my_size++; - #if !__USE_TBB_ATOMICS - res = pthread_spin_unlock( &my_lock ); - LIBRARY_ASSERT( res==0, "pthread_spin_unlock failed" ); - #endif - LIBRARY_ASSERT( ind < MAX_LOADED_MODULES, "Too many modules are loaded" ); + void add(const dynamic_link_handle &handle) { + const std::size_t ind = my_size++; + __TBB_ASSERT_EX( ind < MAX_LOADED_MODULES, "Too many modules are loaded" ); my_handles[ind] = handle; } - void free_handles() { - const size_t size = my_size; - for (size_t i=0; i<size; ++i) + void free() { + const std::size_t size = my_size; + for (std::size_t i=0; i<size; ++i) dynamic_unlink( my_handles[i] ); } - }; + } handles; - handle_storage handles; + static std::once_flag init_dl_data_state; -#if __USE_TBB_ATOMICS - static void atomic_once ( void (*func) (void), tbb::atomic< tbb::internal::do_once_state > &once_state ) { - tbb::internal::atomic_do_once( func, once_state ); - } -#define ATOMIC_ONCE_DECL( var ) tbb::atomic< tbb::internal::do_once_state > var -#else - static void atomic_once ( void (*func) (), pthread_once_t &once_state ) { - pthread_once( &once_state, func ); - } -#define ATOMIC_ONCE_DECL( var ) pthread_once_t var = PTHREAD_ONCE_INIT -#endif - - ATOMIC_ONCE_DECL( init_dl_data_state ); - - static struct _ap_data { + static struct ap_data_t { char _path[PATH_MAX+1]; - size_t _len; + std::size_t _len; } ap_data; static void init_ap_data() { @@ -268,20 +238,17 @@ OPEN_INTERNAL_NAMESPACE return; } // Find the position of the last backslash. - char *backslash = strrchr( ap_data._path, '\\' ); + char *backslash = std::strrchr( ap_data._path, '\\' ); if ( !backslash ) { // Backslash not found. - LIBRARY_ASSERT( backslash!=NULL, "Unbelievable."); + __TBB_ASSERT_EX( backslash != nullptr, "Unbelievable."); return; } - LIBRARY_ASSERT( backslash >= ap_data._path, "Unbelievable."); - ap_data._len = (size_t)(backslash - ap_data._path) + 1; + __TBB_ASSERT_EX( backslash >= ap_data._path, "Unbelievable."); + ap_data._len = (std::size_t)(backslash - ap_data._path) + 1; *(backslash+1) = 0; #else // Get the library path - #if __TBB_WEAK_SYMBOLS_PRESENT - if ( !dladdr || !dlerror ) return; - #endif /* __TBB_WEAK_SYMBOLS_PRESENT */ Dl_info dlinfo; int res = dladdr( (void*)&dynamic_link, &dlinfo ); // any function inside the library can be used for the address if ( !res ) { @@ -289,17 +256,17 @@ OPEN_INTERNAL_NAMESPACE DYNAMIC_LINK_WARNING( dl_sys_fail, "dladdr", err ); return; } else { - LIBRARY_ASSERT( dlinfo.dli_fname!=NULL, "Unbelievable." ); + __TBB_ASSERT_EX( dlinfo.dli_fname!=nullptr, "Unbelievable." ); } - char const *slash = strrchr( dlinfo.dli_fname, '/' ); - size_t fname_len=0; + char const *slash = std::strrchr( dlinfo.dli_fname, '/' ); + std::size_t fname_len=0; if ( slash ) { - LIBRARY_ASSERT( slash >= dlinfo.dli_fname, "Unbelievable."); - fname_len = (size_t)(slash - dlinfo.dli_fname) + 1; + __TBB_ASSERT_EX( slash >= dlinfo.dli_fname, "Unbelievable."); + fname_len = (std::size_t)(slash - dlinfo.dli_fname) + 1; } - size_t rc; + std::size_t rc; if ( dlinfo.dli_fname[0]=='/' ) { // The library path is absolute rc = 0; @@ -310,19 +277,19 @@ OPEN_INTERNAL_NAMESPACE DYNAMIC_LINK_WARNING( dl_buff_too_small ); return; } - ap_data._len = strlen( ap_data._path ); + ap_data._len = std::strlen( ap_data._path ); ap_data._path[ap_data._len++]='/'; rc = ap_data._len; } if ( fname_len>0 ) { + ap_data._len += fname_len; if ( ap_data._len>PATH_MAX ) { DYNAMIC_LINK_WARNING( dl_buff_too_small ); ap_data._len=0; return; } - strncpy( ap_data._path+rc, dlinfo.dli_fname, fname_len ); - ap_data._len += fname_len; + std::strncpy( ap_data._path+rc, dlinfo.dli_fname, fname_len ); ap_data._path[ap_data._len]=0; } #endif /* _WIN32 */ @@ -330,32 +297,8 @@ OPEN_INTERNAL_NAMESPACE static void init_dl_data() { init_ap_data(); - #if !__USE_TBB_ATOMICS - int res; - res = pthread_spin_init( &handles.my_lock, PTHREAD_PROCESS_SHARED ); - LIBRARY_ASSERT( res==0, "pthread_spin_init failed" ); - #endif } - // ap_data structure is initialized with current directory on Linux. - // So it should be initialized as soon as possible since the current directory may be changed. - // static_init_ap_data object provides this initialization during library loading. - static class _static_init_dl_data { - public: - _static_init_dl_data() { - #if __USE_STATIC_DL_INIT - atomic_once( &init_dl_data, init_dl_data_state ); - #endif - } - #if !__USE_TBB_ATOMICS - ~_static_init_dl_data() { - int res; - res = pthread_spin_destroy( &handles.my_lock ); - LIBRARY_ASSERT( res==0, "pthread_spin_destroy failed" ); - } - #endif - } static_init_dl_data; - /* The function constructs absolute path for given relative path. Important: Base directory is not current one, it is the directory libtbb.so loaded from. @@ -366,195 +309,196 @@ OPEN_INTERNAL_NAMESPACE in len -- Size of buffer. ret -- 0 -- Error occurred. > len -- Buffer too short, required size returned. - otherwise -- Ok, number of characters (not counting terminating null) written to - buffer. + otherwise -- Ok, number of characters (incl. terminating null) written to buffer. */ - #if __TBB_DYNAMIC_LOAD_ENABLED - static size_t abs_path( char const * name, char * path, size_t len ) { - atomic_once( &init_dl_data, init_dl_data_state ); - - if ( !ap_data._len ) + static std::size_t abs_path( char const * name, char * path, std::size_t len ) { + if ( ap_data._len == 0 ) return 0; - size_t name_len = strlen( name ); - size_t full_len = name_len+ap_data._len; + std::size_t name_len = std::strlen( name ); + std::size_t full_len = name_len+ap_data._len; if ( full_len < len ) { - strncpy( path, ap_data._path, ap_data._len ); - strncpy( path+ap_data._len, name, name_len ); - path[full_len] = 0; + __TBB_ASSERT( ap_data._path[ap_data._len] == 0, nullptr); + __TBB_ASSERT( std::strlen(ap_data._path) == ap_data._len, nullptr); + std::strncpy( path, ap_data._path, ap_data._len + 1 ); + __TBB_ASSERT( path[ap_data._len] == 0, nullptr); + std::strncat( path, name, len - ap_data._len ); + __TBB_ASSERT( std::strlen(path) == full_len, nullptr); } - return full_len; + return full_len+1; // +1 for null character + } +#endif // __TBB_DYNAMIC_LOAD_ENABLED + void init_dynamic_link_data() { + #if __TBB_DYNAMIC_LOAD_ENABLED + std::call_once( init_dl_data_state, init_dl_data ); + #endif } - #endif // __TBB_DYNAMIC_LOAD_ENABLED + + #if __USE_STATIC_DL_INIT + // ap_data structure is initialized with current directory on Linux. + // So it should be initialized as soon as possible since the current directory may be changed. + // static_init_ap_data object provides this initialization during library loading. + static struct static_init_dl_data_t { + static_init_dl_data_t() { + init_dynamic_link_data(); + } + } static_init_dl_data; + #endif #if __TBB_WEAK_SYMBOLS_PRESENT - static bool weak_symbol_link( const dynamic_link_descriptor descriptors[], size_t required ) + static bool weak_symbol_link( const dynamic_link_descriptor descriptors[], std::size_t required ) { // Check if the required entries are present in what was loaded into our process. - for ( size_t k = 0; k < required; ++k ) + for ( std::size_t k = 0; k < required; ++k ) if ( !descriptors[k].ptr ) return false; // Commit the entry points. - for ( size_t k = 0; k < required; ++k ) + for ( std::size_t k = 0; k < required; ++k ) *descriptors[k].handler = (pointer_to_handler) descriptors[k].ptr; return true; } #else - static bool weak_symbol_link( const dynamic_link_descriptor[], size_t ) { + static bool weak_symbol_link( const dynamic_link_descriptor[], std::size_t ) { return false; } #endif /* __TBB_WEAK_SYMBOLS_PRESENT */ void dynamic_unlink( dynamic_link_handle handle ) { + #if !__TBB_DYNAMIC_LOAD_ENABLED /* only __TBB_WEAK_SYMBOLS_PRESENT is defined */ + if ( !dlclose ) return; + #endif if ( handle ) { - #if __TBB_WEAK_SYMBOLS_PRESENT - LIBRARY_ASSERT( dlclose != NULL, "dlopen is present but dlclose is NOT present!?" ); - #endif /* __TBB_WEAK_SYMBOLS_PRESENT */ - #if __TBB_DYNAMIC_LOAD_ENABLED dlclose( handle ); - #endif /* __TBB_DYNAMIC_LOAD_ENABLED */ } } void dynamic_unlink_all() { - handles.free_handles(); + #if __TBB_DYNAMIC_LOAD_ENABLED + handles.free(); + #endif } - #if _WIN32 - static dynamic_link_handle global_symbols_link( const char* library, const dynamic_link_descriptor descriptors[], size_t required ) { - dynamic_link_handle library_handle; - if ( GetModuleHandleEx( 0, library, &library_handle ) ) { - if ( resolve_symbols( library_handle, descriptors, required ) ) - return library_handle; - else - FreeLibrary( library_handle ); - } - return 0; - } - #else /* _WIN32 */ - // It is supposed that all symbols are from the only one library - static dynamic_link_handle pin_symbols( dynamic_link_descriptor desc, const dynamic_link_descriptor descriptors[], size_t required ) { - // The library has been loaded by another module and contains at least one requested symbol. - // But after we obtained the symbol the library can be unloaded by another thread - // invalidating our symbol. Therefore we need to pin the library in memory. - dynamic_link_handle library_handle; - Dl_info info; - // Get library's name from earlier found symbol - if ( dladdr( (void*)*desc.handler, &info ) ) { - // Pin the library - library_handle = dlopen( info.dli_fname, RTLD_LAZY ); - if ( library_handle ) { - // If original library was unloaded before we pinned it - // and then another module loaded in its place, the earlier - // found symbol would become invalid. So revalidate them. - if ( !resolve_symbols( library_handle, descriptors, required ) ) { - // Wrong library. - dynamic_unlink(library_handle); - library_handle = 0; - } - } else { - char const * err = dlerror(); - DYNAMIC_LINK_WARNING( dl_lib_not_found, info.dli_fname, err ); + static dynamic_link_handle global_symbols_link( const char* library, const dynamic_link_descriptor descriptors[], std::size_t required ) { + dynamic_link_handle library_handle{}; +#if _WIN32 + auto res = GetModuleHandleEx(0, library, &library_handle); + __TBB_ASSERT_EX((res && library_handle) || (!res && !library_handle), nullptr); +#else /* _WIN32 */ + #if !__TBB_DYNAMIC_LOAD_ENABLED /* only __TBB_WEAK_SYMBOLS_PRESENT is defined */ + if ( !dlopen ) return 0; + #endif /* !__TBB_DYNAMIC_LOAD_ENABLED */ + // RTLD_GLOBAL - to guarantee that old TBB will find the loaded library + // RTLD_NOLOAD - not to load the library without the full path + library_handle = dlopen(library, RTLD_LAZY | RTLD_GLOBAL | RTLD_NOLOAD); +#endif /* _WIN32 */ + if (library_handle) { + if (!resolve_symbols(library_handle, descriptors, required)) { + dynamic_unlink(library_handle); + library_handle = nullptr; } } - else { - // The library have been unloaded by another thread - library_handle = 0; - } return library_handle; } - static dynamic_link_handle global_symbols_link( const char*, const dynamic_link_descriptor descriptors[], size_t required ) { - #if __TBB_WEAK_SYMBOLS_PRESENT - if ( !dlopen ) return 0; - #endif /* __TBB_WEAK_SYMBOLS_PRESENT */ - dynamic_link_handle library_handle = dlopen( NULL, RTLD_LAZY ); - #if __ANDROID__ - // On Android dlopen( NULL ) returns NULL if it is called during dynamic module initialization. - if ( !library_handle ) - return 0; - #endif - // Check existence of only the first symbol, then use it to find the library and load all necessary symbols - pointer_to_handler handler; - dynamic_link_descriptor desc = { descriptors[0].name, &handler }; - if ( resolve_symbols( library_handle, &desc, 1 ) ) - return pin_symbols( desc, descriptors, required ); - return 0; - } - #endif /* _WIN32 */ - static void save_library_handle( dynamic_link_handle src, dynamic_link_handle *dst ) { + __TBB_ASSERT_EX( src, "The library handle to store must be non-zero" ); if ( dst ) *dst = src; + #if __TBB_DYNAMIC_LOAD_ENABLED else - handles.add_handle( src ); + handles.add( src ); + #endif /* __TBB_DYNAMIC_LOAD_ENABLED */ } - dynamic_link_handle dynamic_load( const char* library, const dynamic_link_descriptor descriptors[], size_t required ) { - #if __TBB_DYNAMIC_LOAD_ENABLED - #if _XBOX - return LoadLibrary (library); - #else /* _XBOX */ - size_t const len = PATH_MAX + 1; +#if !_WIN32 + int loading_flags(bool local_binding) { + int flags = RTLD_NOW; + if (local_binding) { + flags = flags | RTLD_LOCAL; +#if (__linux__ && __GLIBC__) && !__TBB_USE_SANITIZERS + if( !GetBoolEnvironmentVariable("TBB_ENABLE_SANITIZERS") ) { + flags = flags | RTLD_DEEPBIND; + } +#endif + } else { + flags = flags | RTLD_GLOBAL; + } + return flags; + } +#endif + + dynamic_link_handle dynamic_load( const char* library, const dynamic_link_descriptor descriptors[], std::size_t required, bool local_binding ) { + ::tbb::detail::suppress_unused_warning( library, descriptors, required, local_binding ); +#if __TBB_DYNAMIC_LOAD_ENABLED + std::size_t const len = PATH_MAX + 1; char path[ len ]; - size_t rc = abs_path( library, path, len ); - if ( 0 < rc && rc < len ) { - #if _WIN32 + std::size_t rc = abs_path( library, path, len ); + if ( 0 < rc && rc <= len ) { +#if _WIN32 // Prevent Windows from displaying silly message boxes if it fails to load library // (e.g. because of MS runtime problems - one of those crazy manifest related ones) UINT prev_mode = SetErrorMode (SEM_FAILCRITICALERRORS); - #endif /* _WIN32 */ - #if __TBB_WEAK_SYMBOLS_PRESENT - if ( !dlopen ) return 0; - #endif /* __TBB_WEAK_SYMBOLS_PRESENT */ - dynamic_link_handle library_handle = dlopen( path, RTLD_LAZY ); - #if _WIN32 +#endif /* _WIN32 */ + // The second argument (loading_flags) is ignored on Windows + dynamic_link_handle library_handle = dlopen( path, loading_flags(local_binding) ); +#if _WIN32 SetErrorMode (prev_mode); - #endif /* _WIN32 */ +#endif /* _WIN32 */ if( library_handle ) { if( !resolve_symbols( library_handle, descriptors, required ) ) { // The loaded library does not contain all the expected entry points dynamic_unlink( library_handle ); - library_handle = NULL; + library_handle = nullptr; } } else DYNAMIC_LINK_WARNING( dl_lib_not_found, path, dlerror() ); return library_handle; - } else if ( rc>=len ) + } else if ( rc>len ) DYNAMIC_LINK_WARNING( dl_buff_too_small ); // rc == 0 means failing of init_ap_data so the warning has already been issued. - #endif /* _XBOX */ - #endif /* __TBB_DYNAMIC_LOAD_ENABLED */ - return 0; + +#endif /* __TBB_DYNAMIC_LOAD_ENABLED */ + return nullptr; } - bool dynamic_link( const char* library, const dynamic_link_descriptor descriptors[], size_t required, dynamic_link_handle *handle, int flags ) { + bool dynamic_link( const char* library, const dynamic_link_descriptor descriptors[], std::size_t required, dynamic_link_handle *handle, int flags ) { + init_dynamic_link_data(); + // TODO: May global_symbols_link find weak symbols? - dynamic_link_handle library_handle = ( flags & DYNAMIC_LINK_GLOBAL ) ? global_symbols_link( library, descriptors, required ) : 0; + dynamic_link_handle library_handle = ( flags & DYNAMIC_LINK_GLOBAL ) ? global_symbols_link( library, descriptors, required ) : nullptr; +#if defined(_MSC_VER) && _MSC_VER <= 1900 +// #pragma warning (push) +// MSVC 2015 warning: 'int': forcing value to bool 'true' or 'false' +// #pragma warning (disable: 4800) +#endif if ( !library_handle && ( flags & DYNAMIC_LINK_LOAD ) ) - library_handle = dynamic_load( library, descriptors, required ); + library_handle = dynamic_load( library, descriptors, required, flags & DYNAMIC_LINK_LOCAL ); +#if defined(_MSC_VER) && _MSC_VER <= 1900 +// #pragma warning (pop) +#endif if ( !library_handle && ( flags & DYNAMIC_LINK_WEAK ) ) return weak_symbol_link( descriptors, required ); - save_library_handle( library_handle, handle ); - return true; + if ( library_handle ) { + save_library_handle( library_handle, handle ); + return true; + } + return false; } #endif /*__TBB_WIN8UI_SUPPORT*/ #else /* __TBB_WEAK_SYMBOLS_PRESENT || __TBB_DYNAMIC_LOAD_ENABLED */ - bool dynamic_link( const char*, const dynamic_link_descriptor*, size_t, dynamic_link_handle *handle, int ) { + bool dynamic_link( const char*, const dynamic_link_descriptor*, std::size_t, dynamic_link_handle *handle, int ) { if ( handle ) *handle=0; return false; } - - void dynamic_unlink( dynamic_link_handle ) { - } - - void dynamic_unlink_all() { - } + void dynamic_unlink( dynamic_link_handle ) {} + void dynamic_unlink_all() {} #endif /* __TBB_WEAK_SYMBOLS_PRESENT || __TBB_DYNAMIC_LOAD_ENABLED */ -CLOSE_INTERNAL_NAMESPACE +} // namespace r1 +} // namespace detail +} // namespace tbb diff --git a/src/tbb/src/tbb/dynamic_link.h b/src/tbb/src/tbb/dynamic_link.h index d5931f8b6..f07750b66 100644 --- a/src/tbb/src/tbb/dynamic_link.h +++ b/src/tbb/src/tbb/dynamic_link.h @@ -1,21 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef __TBB_dynamic_link @@ -23,29 +19,23 @@ // Support for dynamic loading entry points from other shared libraries. -#include "tbb/tbb_stddef.h" +#include "oneapi/tbb/detail/_config.h" -#ifdef LIBRARY_ASSERT - #undef __TBB_ASSERT - #define __TBB_ASSERT(x,y) LIBRARY_ASSERT(x,y) -#else - #define LIBRARY_ASSERT(x,y) __TBB_ASSERT_EX(x,y) -#endif /* !LIBRARY_ASSERT */ +#include <atomic> +#include <mutex> /** By default, symbols declared and defined here go into namespace tbb::internal. To put them in other namespace, define macros OPEN_INTERNAL_NAMESPACE and CLOSE_INTERNAL_NAMESPACE to override the following default definitions. **/ -#ifndef OPEN_INTERNAL_NAMESPACE -#define OPEN_INTERNAL_NAMESPACE namespace tbb { namespace internal { -#define CLOSE_INTERNAL_NAMESPACE }} -#endif /* OPEN_INTERNAL_NAMESPACE */ -#include <stddef.h> -#if _WIN32 -#include "tbb/machine/windows_api.h" +#include <cstddef> +#ifdef _WIN32 +#include <windows.h> #endif /* _WIN32 */ -OPEN_INTERNAL_NAMESPACE +namespace tbb { +namespace detail { +namespace r1 { //! Type definition for a pointer to a void somefunc(void) typedef void (*pointer_to_handler)(); @@ -55,8 +45,10 @@ typedef void (*pointer_to_handler)(); // prevent warnings from some compilers (g++ 4.1) #if __TBB_WEAK_SYMBOLS_PRESENT #define DLD(s,h) {#s, (pointer_to_handler*)(void*)(&h), (pointer_to_handler)&s} -#else +#define DLD_NOWEAK(s,h) {#s, (pointer_to_handler*)(void*)(&h), nullptr} +#else #define DLD(s,h) {#s, (pointer_to_handler*)(void*)(&h)} +#define DLD_NOWEAK(s,h) DLD(s,h) #endif /* __TBB_WEAK_SYMBOLS_PRESENT */ //! Association between a handler name and location of pointer to it. struct dynamic_link_descriptor { @@ -71,15 +63,18 @@ struct dynamic_link_descriptor { }; #if _WIN32 -typedef HMODULE dynamic_link_handle; +using dynamic_link_handle = HMODULE; #else -typedef void* dynamic_link_handle; +using dynamic_link_handle = void*; #endif /* _WIN32 */ -const int DYNAMIC_LINK_GLOBAL = 0x01; -const int DYNAMIC_LINK_LOAD = 0x02; -const int DYNAMIC_LINK_WEAK = 0x04; -const int DYNAMIC_LINK_ALL = DYNAMIC_LINK_GLOBAL | DYNAMIC_LINK_LOAD | DYNAMIC_LINK_WEAK; +const int DYNAMIC_LINK_GLOBAL = 0x01; +const int DYNAMIC_LINK_LOAD = 0x02; +const int DYNAMIC_LINK_WEAK = 0x04; +const int DYNAMIC_LINK_LOCAL = 0x08; + +const int DYNAMIC_LINK_LOCAL_BINDING = DYNAMIC_LINK_LOCAL | DYNAMIC_LINK_LOAD; +const int DYNAMIC_LINK_DEFAULT = DYNAMIC_LINK_GLOBAL | DYNAMIC_LINK_LOAD | DYNAMIC_LINK_WEAK; //! Fill in dynamically linked handlers. /** 'library' is the name of the requested library. It should not contain a full @@ -99,9 +94,9 @@ const int DYNAMIC_LINK_ALL = DYNAMIC_LINK_GLOBAL | DYNAMIC_LINK_LOAD | DYNAMI **/ bool dynamic_link( const char* library, const dynamic_link_descriptor descriptors[], - size_t required, - dynamic_link_handle* handle = 0, - int flags = DYNAMIC_LINK_ALL ); + std::size_t required, + dynamic_link_handle* handle = nullptr, + int flags = DYNAMIC_LINK_DEFAULT ); void dynamic_unlink( dynamic_link_handle handle ); @@ -111,11 +106,13 @@ enum dynamic_link_error_t { dl_success = 0, dl_lib_not_found, // char const * lib, dlerr_t err dl_sym_not_found, // char const * sym, dlerr_t err - // Note: dlerr_t depends on OS: it is char const * on Linux* and OS X*, int on Windows*. + // Note: dlerr_t depends on OS: it is char const * on Linux* and macOS*, int on Windows*. dl_sys_fail, // char const * func, int err dl_buff_too_small // none }; // dynamic_link_error_t -CLOSE_INTERNAL_NAMESPACE +} // namespace r1 +} // namespace detail +} // namespace tbb #endif /* __TBB_dynamic_link */ diff --git a/src/tbb/src/tbb/environment.h b/src/tbb/src/tbb/environment.h new file mode 100644 index 000000000..eac6f2023 --- /dev/null +++ b/src/tbb/src/tbb/environment.h @@ -0,0 +1,81 @@ +/* + Copyright (c) 2018-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_tbb_environment_H +#define __TBB_tbb_environment_H + +#include <cstdlib> +#include <cstring> +#include <cerrno> +#include <cctype> + +namespace tbb { +namespace detail { +namespace r1 { + +#if __TBB_WIN8UI_SUPPORT +static inline bool GetBoolEnvironmentVariable( const char * ) { + return false; +} + +static inline long GetIntegralEnvironmentVariable( const char * ) { + return -1; +} +#else /* __TBB_WIN8UI_SUPPORT */ +static inline bool GetBoolEnvironmentVariable( const char * name ) { + if ( const char* s = std::getenv(name) ) { + // The result is defined as true only if the environment variable contains + // no characters except one '1' character and an arbitrary number of spaces + // (including the absence of spaces). + size_t index = std::strspn(s, " "); + if (s[index] != '1') return false; + index++; + // Memory access after incrementing is safe, since the getenv() returns a + // null-terminated string, and even if the character getting by index is '1', + // and this character is the end of string, after incrementing we will get + // an index of character, that contains '\0' + index += std::strspn(&s[index], " "); + return !s[index]; + } + return false; +} + +static inline long GetIntegralEnvironmentVariable( const char * name ) { + if ( const char* s = std::getenv(name) ) { + char* end = nullptr; + errno = 0; + long value = std::strtol(s, &end, 10); + + // We have exceeded the range, value is negative or string is incovertable + if ( errno == ERANGE || value < 0 || end==s ) { + return -1; + } + for ( ; *end != '\0'; end++ ) { + if ( !std::isspace(*end) ) { + return -1; + } + } + return value; + } + return -1; +} +#endif /* __TBB_WIN8UI_SUPPORT */ + +} // namespace r1 +} // namespace detail +} // namespace tbb + +#endif // __TBB_tbb_environment_H diff --git a/src/tbb/src/tbb/exception.cpp b/src/tbb/src/tbb/exception.cpp new file mode 100644 index 000000000..efc9b2a4d --- /dev/null +++ b/src/tbb/src/tbb/exception.cpp @@ -0,0 +1,166 @@ +/* + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "oneapi/tbb/detail/_exception.h" +#include "oneapi/tbb/detail/_assert.h" +#include "oneapi/tbb/detail/_template_helpers.h" + +#include <cstring> +#include <cstdio> +#include <stdexcept> // std::runtime_error +#include <new> +#include <stdexcept> + +#define __TBB_STD_RETHROW_EXCEPTION_POSSIBLY_BROKEN \ + (__GLIBCXX__ && __TBB_GLIBCXX_VERSION>=40700 && __TBB_GLIBCXX_VERSION<60000 && TBB_USE_EXCEPTIONS) + +#if __TBB_STD_RETHROW_EXCEPTION_POSSIBLY_BROKEN +// GCC ABI declarations necessary for a workaround +#include <cxxabi.h> +#endif + +namespace tbb { +namespace detail { +namespace r1 { + +const char* bad_last_alloc::what() const noexcept(true) { return "bad allocation in previous or concurrent attempt"; } +const char* user_abort::what() const noexcept(true) { return "User-initiated abort has terminated this operation"; } +const char* missing_wait::what() const noexcept(true) { return "wait() was not called on the structured_task_group"; } + +#if TBB_USE_EXCEPTIONS + template <typename F> + /*[[noreturn]]*/ void do_throw_noexcept(F throw_func) noexcept { + throw_func(); + } + + /*[[noreturn]]*/ void do_throw_noexcept(void (*throw_func)()) noexcept { + throw_func(); +#if __GNUC__ == 7 + // In release, GCC 7 loses noexcept attribute during tail call optimization. + // The following statement prevents tail call optimization. + volatile bool reach_this_point = true; + suppress_unused_warning(reach_this_point); +#endif + } + + bool terminate_on_exception(); // defined in global_control.cpp and ipc_server.cpp + + template <typename F> + /*[[noreturn]]*/ void do_throw(F throw_func) { + if (terminate_on_exception()) { + do_throw_noexcept(throw_func); + } + throw_func(); + } + + #define DO_THROW(exc, init_args) do_throw( []{ throw exc init_args; } ); +#else /* !TBB_USE_EXCEPTIONS */ + #define PRINT_ERROR_AND_ABORT(exc_name, msg) \ + std::fprintf (stderr, "Exception %s with message %s would have been thrown, " \ + "if exception handling had not been disabled. Aborting.\n", exc_name, msg); \ + std::fflush(stderr); \ + std::abort(); + #define DO_THROW(exc, init_args) PRINT_ERROR_AND_ABORT(#exc, #init_args) +#endif /* !TBB_USE_EXCEPTIONS */ + +void throw_exception ( exception_id eid ) { + switch ( eid ) { + case exception_id::bad_alloc: DO_THROW(std::bad_alloc, ()); break; + case exception_id::bad_last_alloc: DO_THROW(bad_last_alloc, ()); break; + case exception_id::user_abort: DO_THROW( user_abort, () ); break; + case exception_id::nonpositive_step: DO_THROW(std::invalid_argument, ("Step must be positive") ); break; + case exception_id::out_of_range: DO_THROW(std::out_of_range, ("Index out of requested size range")); break; + case exception_id::reservation_length_error: DO_THROW(std::length_error, ("Attempt to exceed implementation defined length limits")); break; + case exception_id::missing_wait: DO_THROW(missing_wait, ()); break; + case exception_id::invalid_load_factor: DO_THROW(std::out_of_range, ("Invalid hash load factor")); break; + case exception_id::invalid_key: DO_THROW(std::out_of_range, ("invalid key")); break; + case exception_id::bad_tagged_msg_cast: DO_THROW(std::runtime_error, ("Illegal tagged_msg cast")); break; + case exception_id::unsafe_wait: DO_THROW(unsafe_wait, ("Unsafe to wait further")); break; + default: __TBB_ASSERT ( false, "Unknown exception ID" ); + } + __TBB_ASSERT(false, "Unreachable code"); +} + +/* The "what" should be fairly short, not more than about 128 characters. + Because we control all the call sites to handle_perror, it is pointless + to bullet-proof it for very long strings. + + Design note: ADR put this routine off to the side in tbb_misc.cpp instead of + Task.cpp because the throw generates a pathetic lot of code, and ADR wanted + this large chunk of code to be placed on a cold page. */ +void handle_perror( int error_code, const char* what ) { + const int BUF_SIZE = 255; + char buf[BUF_SIZE + 1] = { 0 }; + std::strncat(buf, what, BUF_SIZE); + std::size_t buf_len = std::strlen(buf); + if (error_code) { + std::strncat(buf, ": ", BUF_SIZE - buf_len); + buf_len = std::strlen(buf); + std::strncat(buf, std::strerror(error_code), BUF_SIZE - buf_len); + buf_len = std::strlen(buf); + } + __TBB_ASSERT(buf_len <= BUF_SIZE && buf[buf_len] == 0, nullptr); +#if TBB_USE_EXCEPTIONS + do_throw([&buf] { throw std::runtime_error(buf); }); +#else + PRINT_ERROR_AND_ABORT( "runtime_error", buf); +#endif /* !TBB_USE_EXCEPTIONS */ +} + +#if __TBB_STD_RETHROW_EXCEPTION_POSSIBLY_BROKEN +// Runtime detection and workaround for the GCC bug 62258. +// The problem is that std::rethrow_exception() does not increment a counter +// of active exceptions, causing std::uncaught_exception() to return a wrong value. +// The code is created after, and roughly reflects, the workaround +// at https://gcc.gnu.org/bugzilla/attachment.cgi?id=34683 + +void fix_broken_rethrow() { + struct gcc_eh_data { + void * caughtExceptions; + unsigned int uncaughtExceptions; + }; + gcc_eh_data* eh_data = punned_cast<gcc_eh_data*>( abi::__cxa_get_globals() ); + ++eh_data->uncaughtExceptions; +} + +bool gcc_rethrow_exception_broken() { + bool is_broken; + __TBB_ASSERT( !std::uncaught_exception(), + "gcc_rethrow_exception_broken() must not be called when an exception is active" ); + try { + // Throw, catch, and rethrow an exception + try { + throw __TBB_GLIBCXX_VERSION; + } catch(...) { + std::rethrow_exception( std::current_exception() ); + } + } catch(...) { + // Check the bug presence + is_broken = std::uncaught_exception(); + } + if( is_broken ) fix_broken_rethrow(); + __TBB_ASSERT( !std::uncaught_exception(), nullptr); + return is_broken; +} +#else +void fix_broken_rethrow() {} +bool gcc_rethrow_exception_broken() { return false; } +#endif /* __TBB_STD_RETHROW_EXCEPTION_POSSIBLY_BROKEN */ + +} // namespace r1 +} // namespace detail +} // namespace tbb + diff --git a/src/tbb/src/tbb/global_control.cpp b/src/tbb/src/tbb/global_control.cpp new file mode 100644 index 000000000..f45c66b87 --- /dev/null +++ b/src/tbb/src/tbb/global_control.cpp @@ -0,0 +1,285 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "oneapi/tbb/detail/_config.h" +#include "oneapi/tbb/detail/_template_helpers.h" + +#include "oneapi/tbb/cache_aligned_allocator.h" +#include "oneapi/tbb/global_control.h" +#include "oneapi/tbb/tbb_allocator.h" +#include "oneapi/tbb/spin_mutex.h" + +#include "governor.h" +#include "threading_control.h" +#include "market.h" +#include "misc.h" + +#include <atomic> +#include <set> + +namespace tbb { +namespace detail { +namespace r1 { + +//! Comparator for a set of global_control objects +struct control_storage_comparator { + bool operator()(const d1::global_control* lhs, const d1::global_control* rhs) const; +}; + +class control_storage { + friend struct global_control_impl; + friend std::size_t global_control_active_value(int); + friend void global_control_lock(); + friend void global_control_unlock(); + friend std::size_t global_control_active_value_unsafe(d1::global_control::parameter); +protected: + std::size_t my_active_value{0}; + std::set<d1::global_control*, control_storage_comparator, tbb_allocator<d1::global_control*>> my_list{}; + spin_mutex my_list_mutex{}; +public: + virtual ~control_storage() = default; + virtual std::size_t default_value() const = 0; + virtual void apply_active(std::size_t new_active) { + my_active_value = new_active; + } + virtual bool is_first_arg_preferred(std::size_t a, std::size_t b) const { + return a>b; // prefer max by default + } + virtual std::size_t active_value() { + spin_mutex::scoped_lock lock(my_list_mutex); // protect my_list.empty() call + return !my_list.empty() ? my_active_value : default_value(); + } + + std::size_t active_value_unsafe() { + return !my_list.empty() ? my_active_value : default_value(); + } +}; + +class alignas(max_nfs_size) allowed_parallelism_control : public control_storage { + std::size_t default_value() const override { + return max(1U, governor::default_num_threads()); + } + bool is_first_arg_preferred(std::size_t a, std::size_t b) const override { + return a<b; // prefer min allowed parallelism + } + void apply_active(std::size_t new_active) override { + control_storage::apply_active(new_active); + __TBB_ASSERT(my_active_value >= 1, nullptr); + // -1 to take external thread into account + threading_control::set_active_num_workers(my_active_value - 1); + } + std::size_t active_value() override { + spin_mutex::scoped_lock lock(my_list_mutex); // protect my_list.empty() call + if (my_list.empty()) { + return default_value(); + } + + // non-zero, if market is active + const std::size_t workers = threading_control::max_num_workers(); + // We can't exceed market's maximal number of workers. + // +1 to take external thread into account + return workers ? min(workers + 1, my_active_value) : my_active_value; + } +}; + +class alignas(max_nfs_size) stack_size_control : public control_storage { + std::size_t default_value() const override { +#if _WIN32_WINNT >= 0x0602 /* _WIN32_WINNT_WIN8 */ + static auto ThreadStackSizeDefault = [] { + ULONG_PTR hi, lo; + GetCurrentThreadStackLimits(&lo, &hi); + return hi - lo; + }(); + return ThreadStackSizeDefault; +#elif defined(EMSCRIPTEN) + return __TBB_EMSCRIPTEN_STACK_SIZE; +#else + return ThreadStackSize; +#endif + } + void apply_active(std::size_t new_active) override { + control_storage::apply_active(new_active); +#if __TBB_WIN8UI_SUPPORT && (_WIN32_WINNT < 0x0A00) + __TBB_ASSERT( false, "For Windows 8 Store* apps we must not set stack size" ); +#endif + } +}; + +class alignas(max_nfs_size) terminate_on_exception_control : public control_storage { + std::size_t default_value() const override { + return 0; + } +}; + +class alignas(max_nfs_size) lifetime_control : public control_storage { + bool is_first_arg_preferred(std::size_t, std::size_t) const override { + return false; // not interested + } + std::size_t default_value() const override { + return 0; + } + void apply_active(std::size_t new_active) override { + if (new_active == 1) { + // reserve the market reference + threading_control::register_lifetime_control(); + } else if (new_active == 0) { // new_active == 0 + threading_control::unregister_lifetime_control(/*blocking_terminate*/ false); + } + control_storage::apply_active(new_active); + } +}; + +static control_storage* controls[] = {nullptr, nullptr, nullptr, nullptr}; + +void global_control_acquire() { + controls[0] = new (cache_aligned_allocate(sizeof(allowed_parallelism_control))) allowed_parallelism_control{}; + controls[1] = new (cache_aligned_allocate(sizeof(stack_size_control))) stack_size_control{}; + controls[2] = new (cache_aligned_allocate(sizeof(terminate_on_exception_control))) terminate_on_exception_control{}; + controls[3] = new (cache_aligned_allocate(sizeof(lifetime_control))) lifetime_control{}; +} + +void global_control_release() { + for (auto& ptr : controls) { + ptr->~control_storage(); + cache_aligned_deallocate(ptr); + ptr = nullptr; + } +} + +void global_control_lock() { + for (auto& ctl : controls) { + ctl->my_list_mutex.lock(); + } +} + +void global_control_unlock() { + int N = std::distance(std::begin(controls), std::end(controls)); + for (int i = N - 1; i >= 0; --i) { + controls[i]->my_list_mutex.unlock(); + } +} + +std::size_t global_control_active_value_unsafe(d1::global_control::parameter param) { + __TBB_ASSERT_RELEASE(param < d1::global_control::parameter_max, nullptr); + return controls[param]->active_value_unsafe(); +} + +//! Comparator for a set of global_control objects +inline bool control_storage_comparator::operator()(const d1::global_control* lhs, const d1::global_control* rhs) const { + __TBB_ASSERT_RELEASE(lhs->my_param < d1::global_control::parameter_max , nullptr); + return lhs->my_value < rhs->my_value || (lhs->my_value == rhs->my_value && lhs < rhs); +} + +bool terminate_on_exception() { + return d1::global_control::active_value(d1::global_control::terminate_on_exception) == 1; +} + +struct global_control_impl { +private: + static bool erase_if_present(control_storage* const c, d1::global_control& gc) { + auto it = c->my_list.find(&gc); + if (it != c->my_list.end()) { + c->my_list.erase(it); + return true; + } + return false; + } + +public: + + static void create(d1::global_control& gc) { + __TBB_ASSERT_RELEASE(gc.my_param < d1::global_control::parameter_max, nullptr); + control_storage* const c = controls[gc.my_param]; + + spin_mutex::scoped_lock lock(c->my_list_mutex); + if (c->my_list.empty() || c->is_first_arg_preferred(gc.my_value, c->my_active_value)) { + // to guarantee that apply_active() is called with current active value, + // calls it here and in internal_destroy() under my_list_mutex + c->apply_active(gc.my_value); + } + c->my_list.insert(&gc); + } + + static void destroy(d1::global_control& gc) { + __TBB_ASSERT_RELEASE(gc.my_param < d1::global_control::parameter_max, nullptr); + control_storage* const c = controls[gc.my_param]; + // Concurrent reading and changing global parameter is possible. + spin_mutex::scoped_lock lock(c->my_list_mutex); + __TBB_ASSERT(gc.my_param == d1::global_control::scheduler_handle || !c->my_list.empty(), nullptr); + std::size_t new_active = (std::size_t)(-1), old_active = c->my_active_value; + + if (!erase_if_present(c, gc)) { + __TBB_ASSERT(gc.my_param == d1::global_control::scheduler_handle , nullptr); + return; + } + if (c->my_list.empty()) { + __TBB_ASSERT(new_active == (std::size_t) - 1, nullptr); + new_active = c->default_value(); + } else { + new_active = (*c->my_list.begin())->my_value; + } + if (new_active != old_active) { + c->apply_active(new_active); + } + } + + static bool remove_and_check_if_empty(d1::global_control& gc) { + __TBB_ASSERT_RELEASE(gc.my_param < d1::global_control::parameter_max, nullptr); + control_storage* const c = controls[gc.my_param]; + + spin_mutex::scoped_lock lock(c->my_list_mutex); + __TBB_ASSERT(!c->my_list.empty(), nullptr); + erase_if_present(c, gc); + return c->my_list.empty(); + } +#if TBB_USE_ASSERT + static bool is_present(d1::global_control& gc) { + __TBB_ASSERT_RELEASE(gc.my_param < d1::global_control::parameter_max, nullptr); + control_storage* const c = controls[gc.my_param]; + + spin_mutex::scoped_lock lock(c->my_list_mutex); + auto it = c->my_list.find(&gc); + if (it != c->my_list.end()) { + return true; + } + return false; + } +#endif // TBB_USE_ASSERT +}; + +void __TBB_EXPORTED_FUNC create(d1::global_control& gc) { + global_control_impl::create(gc); +} +void __TBB_EXPORTED_FUNC destroy(d1::global_control& gc) { + global_control_impl::destroy(gc); +} + +bool remove_and_check_if_empty(d1::global_control& gc) { + return global_control_impl::remove_and_check_if_empty(gc); +} +#if TBB_USE_ASSERT +bool is_present(d1::global_control& gc) { + return global_control_impl::is_present(gc); +} +#endif // TBB_USE_ASSERT +std::size_t __TBB_EXPORTED_FUNC global_control_active_value(int param) { + __TBB_ASSERT_RELEASE(param < d1::global_control::parameter_max, nullptr); + return controls[param]->active_value(); +} + +} // namespace r1 +} // namespace detail +} // namespace tbb diff --git a/src/tbb/src/tbb/governor.cpp b/src/tbb/src/tbb/governor.cpp index 99649c6b4..218a2bc53 100644 --- a/src/tbb/src/tbb/governor.cpp +++ b/src/tbb/src/tbb/governor.cpp @@ -1,98 +1,112 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ -#include <stdio.h> -#include <stdlib.h> #include "governor.h" -#include "tbb_main.h" -#include "scheduler.h" +#include "threading_control.h" +#include "main.h" +#include "thread_data.h" #include "market.h" #include "arena.h" - -#include "tbb/task_scheduler_init.h" - #include "dynamic_link.h" +#include "concurrent_monitor.h" +#include "thread_dispatcher.h" -namespace tbb { -namespace internal { +#include "oneapi/tbb/task_group.h" +#include "oneapi/tbb/global_control.h" +#include "oneapi/tbb/tbb_allocator.h" +#include "oneapi/tbb/info.h" -//------------------------------------------------------------------------ -// governor -//------------------------------------------------------------------------ +#include "task_dispatcher.h" -#if __TBB_SURVIVE_THREAD_SWITCH -// Support for interoperability with Intel(R) Cilk(TM) Plus. +#include <cstdio> +#include <cstdlib> +#include <cstring> +#include <atomic> +#include <algorithm> -#if _WIN32 -#define CILKLIB_NAME "cilkrts20.dll" -#else -#define CILKLIB_NAME "libcilkrts.so" +#ifdef EMSCRIPTEN +#include <emscripten/stack.h> #endif -//! Handler for interoperation with cilkrts library. -static __cilk_tbb_retcode (*watch_stack_handler)(struct __cilk_tbb_unwatch_thunk* u, - struct __cilk_tbb_stack_op_thunk o); +namespace tbb { +namespace detail { +namespace r1 { -//! Table describing how to link the handlers. -static const dynamic_link_descriptor CilkLinkTable[] = { - { "__cilkrts_watch_stack", (pointer_to_handler*)(void*)(&watch_stack_handler) } -}; +#if TBB_USE_ASSERT +std::atomic<int> the_observer_proxy_count; +#endif /* TBB_USE_ASSERT */ -static atomic<do_once_state> cilkrts_load_state; +void clear_address_waiter_table(); +void global_control_acquire(); +void global_control_release(); -bool initialize_cilk_interop() { - // Pinning can fail. This is a normal situation, and means that the current - // thread does not use cilkrts and consequently does not need interop. - return dynamic_link( CILKLIB_NAME, CilkLinkTable, 1, /*handle=*/0, DYNAMIC_LINK_GLOBAL ); -} -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ +//! global_control.cpp contains definition +bool remove_and_check_if_empty(d1::global_control& gc); +bool is_present(d1::global_control& gc); namespace rml { - tbb_server* make_private_server( tbb_client& client ); +tbb_server* make_private_server( tbb_client& client ); +} // namespace rml + +namespace system_topology { + void destroy(); } +//------------------------------------------------------------------------ +// governor +//------------------------------------------------------------------------ + void governor::acquire_resources () { -#if USE_PTHREAD + global_control_acquire(); +#if __TBB_USE_POSIX int status = theTLS.create(auto_terminate); #else int status = theTLS.create(); #endif if( status ) handle_perror(status, "TBB failed to initialize task scheduler TLS\n"); - is_speculation_enabled = cpu_has_speculation(); + detect_cpu_features(cpu_features); + + is_rethrow_broken = gcc_rethrow_exception_broken(); } void governor::release_resources () { theRMLServerFactory.close(); -#if TBB_USE_ASSERT - if( __TBB_InitOnce::initialization_done() && theTLS.get() ) - runtime_warning( "TBB is unloaded while tbb::task_scheduler_init object is alive?" ); -#endif + destroy_process_mask(); + + __TBB_ASSERT(!(__TBB_InitOnce::initialization_done() && theTLS.get()), "TBB is unloaded while thread data still alive?"); + int status = theTLS.destroy(); if( status ) - runtime_warning("failed to destroy task scheduler TLS: %s", strerror(status)); + runtime_warning("failed to destroy task scheduler TLS: %s", std::strerror(status)); + clear_address_waiter_table(); + +#if TBB_USE_ASSERT + if (the_observer_proxy_count != 0) { + runtime_warning("Leaked %ld observer_proxy objects\n", long(the_observer_proxy_count)); + } +#endif /* TBB_USE_ASSERT */ + + system_topology::destroy(); dynamic_unlink_all(); + global_control_release(); } rml::tbb_server* governor::create_rml_server ( rml::tbb_client& client ) { - rml::tbb_server* server = NULL; + rml::tbb_server* server = nullptr; if( !UsePrivateRML ) { ::rml::factory::status_type status = theRMLServerFactory.make_server( server, client ); if( status != ::rml::factory::st_success ) { @@ -101,246 +115,492 @@ rml::tbb_server* governor::create_rml_server ( rml::tbb_client& client ) { } } if ( !server ) { - __TBB_ASSERT( UsePrivateRML, NULL ); + __TBB_ASSERT( UsePrivateRML, nullptr); server = rml::make_private_server( client ); } __TBB_ASSERT( server, "Failed to create RML server" ); return server; } -void governor::sign_on(generic_scheduler* s) { - __TBB_ASSERT( !theTLS.get(), NULL ); - theTLS.set(s); -#if __TBB_SURVIVE_THREAD_SWITCH - if( watch_stack_handler ) { - __cilk_tbb_stack_op_thunk o; - o.routine = &stack_op_handler; - o.data = s; - if( (*watch_stack_handler)(&s->my_cilk_unwatch_thunk, o) ) { - // Failed to register with cilkrts, make sure we are clean - s->my_cilk_unwatch_thunk.routine = NULL; - } -#if TBB_USE_ASSERT - else - s->my_cilk_state = generic_scheduler::cs_running; -#endif /* TBB_USE_ASSERT */ +void governor::one_time_init() { + if ( !__TBB_InitOnce::initialization_done() ) { + DoOneTimeInitialization(); } -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ -} - -void governor::sign_off(generic_scheduler* s) { - suppress_unused_warning(s); - __TBB_ASSERT( theTLS.get()==s, "attempt to unregister a wrong scheduler instance" ); - theTLS.set(NULL); -#if __TBB_SURVIVE_THREAD_SWITCH - __cilk_tbb_unwatch_thunk &ut = s->my_cilk_unwatch_thunk; - if ( ut.routine ) - (*ut.routine)(ut.data); -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ -} - -void governor::setBlockingTerminate(const task_scheduler_init *tsi) { - __TBB_ASSERT(!IsBlockingTerminationInProgress, "It's impossible to create task_scheduler_init while blocking termination is in progress."); - if (BlockingTSI) - throw_exception(eid_blocking_sch_init); - BlockingTSI = tsi; -} - -generic_scheduler* governor::init_scheduler( unsigned num_threads, stack_size_type stack_size, bool auto_init ) { - if( !__TBB_InitOnce::initialization_done() ) - DoOneTimeInitializations(); - generic_scheduler* s = theTLS.get(); - if( s ) { - s->my_ref_count += 1; - return s; - } -#if __TBB_SURVIVE_THREAD_SWITCH - atomic_do_once( &initialize_cilk_interop, cilkrts_load_state ); -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ - if( (int)num_threads == task_scheduler_init::automatic ) - num_threads = default_num_threads(); - s = generic_scheduler::create_master( - market::create_arena( num_threads - 1, stack_size ? stack_size : ThreadStackSize ) ); - __TBB_ASSERT(s, "Somehow a local scheduler creation for a master thread failed"); - s->my_auto_initialized = auto_init; - return s; -} - -void governor::terminate_scheduler( generic_scheduler* s, const task_scheduler_init* tsi_ptr ) { - __TBB_ASSERT( s == theTLS.get(), "Attempt to terminate non-local scheduler instance" ); - if (--(s->my_ref_count)) { - if (BlockingTSI && BlockingTSI==tsi_ptr) { - // can't throw exception, because this is on dtor's call chain - fprintf(stderr, "Attempt to terminate nested scheduler in blocking mode\n"); - exit(1); +} + +bool governor::does_client_join_workers(const rml::tbb_client &client) { + return ((const thread_dispatcher&)client).must_join_workers(); +} + +/* + There is no portable way to get stack base address in Posix, however the modern + Linux versions provide pthread_attr_np API that can be used to obtain thread's + stack size and base address. Unfortunately even this function does not provide + enough information for the main thread on IA-64 architecture (RSE spill area + and memory stack are allocated as two separate discontinuous chunks of memory), + and there is no portable way to discern the main and the secondary threads. + Thus for macOS* and IA-64 architecture for Linux* OS we use the TBB worker stack size for + all threads and use the current stack top as the stack base. This simplified + approach is based on the following assumptions: + 1) If the default stack size is insufficient for the user app needs, the + required amount will be explicitly specified by the user at the point of the + TBB scheduler initialization (as an argument to tbb::task_scheduler_init + constructor). + 2) When an external thread initializes the scheduler, it has enough space on its + stack. Here "enough" means "at least as much as worker threads have". + 3) If the user app strives to conserve the memory by cutting stack size, it + should do this for TBB workers too (as in the #1). +*/ +static std::uintptr_t get_stack_base(std::size_t stack_size) { + // Stacks are growing top-down. Highest address is called "stack base", + // and the lowest is "stack limit". +#if __TBB_USE_WINAPI + suppress_unused_warning(stack_size); + NT_TIB* pteb = (NT_TIB*)NtCurrentTeb(); + __TBB_ASSERT(&pteb < pteb->StackBase && &pteb > pteb->StackLimit, "invalid stack info in TEB"); + return reinterpret_cast<std::uintptr_t>(pteb->StackBase); +#elif defined(EMSCRIPTEN) + suppress_unused_warning(stack_size); + return reinterpret_cast<std::uintptr_t>(emscripten_stack_get_base()); +#else + // There is no portable way to get stack base address in Posix, so we use + // non-portable method (on all modern Linux) or the simplified approach + // based on the common sense assumptions. The most important assumption + // is that the main thread's stack size is not less than that of other threads. + + // Points to the lowest addressable byte of a stack. + void* stack_limit = nullptr; +#if __linux__ && !__bg__ + size_t np_stack_size = 0; + pthread_attr_t np_attr_stack; + if (0 == pthread_getattr_np(pthread_self(), &np_attr_stack)) { + if (0 == pthread_attr_getstack(&np_attr_stack, &stack_limit, &np_stack_size)) { + __TBB_ASSERT( &stack_limit > stack_limit, "stack size must be positive" ); } + pthread_attr_destroy(&np_attr_stack); + } +#endif /* __linux__ */ + std::uintptr_t stack_base{}; + if (stack_limit) { + stack_base = reinterpret_cast<std::uintptr_t>(stack_limit) + stack_size; } else { -#if TBB_USE_ASSERT - if (BlockingTSI) { - __TBB_ASSERT( BlockingTSI == tsi_ptr, "For blocking termination last terminate_scheduler must be blocking." ); - IsBlockingTerminationInProgress = true; - } -#endif - s->cleanup_master(); - BlockingTSI = NULL; -#if TBB_USE_ASSERT - IsBlockingTerminationInProgress = false; -#endif + // Use an anchor as a base stack address. + int anchor{}; + stack_base = reinterpret_cast<std::uintptr_t>(&anchor); } + return stack_base; +#endif /* __TBB_USE_WINAPI */ } -void governor::auto_terminate(void* arg){ - generic_scheduler* s = static_cast<generic_scheduler*>(arg); - if( s && s->my_auto_initialized ) { - if( !--(s->my_ref_count) ) { - __TBB_ASSERT( !BlockingTSI, "Blocking auto-terminate is not supported." ); - // If the TLS slot is already cleared by OS or underlying concurrency - // runtime, restore its value. - if ( !theTLS.get() ) - theTLS.set(s); - else __TBB_ASSERT( s == theTLS.get(), NULL ); - s->cleanup_master(); - __TBB_ASSERT( !theTLS.get(), "cleanup_master has not cleared its TLS slot" ); +#if (_WIN32||_WIN64) && !__TBB_DYNAMIC_LOAD_ENABLED +static void register_external_thread_destructor() { + struct thread_destructor { + ~thread_destructor() { + governor::terminate_external_thread(); } - } + }; + // ~thread_destructor() will be call during the calling thread termination + static thread_local thread_destructor thr_destructor; +} +#endif // (_WIN32||_WIN64) && !__TBB_DYNAMIC_LOAD_ENABLED + +void governor::init_external_thread() { + one_time_init(); + // Create new scheduler instance with arena + int num_slots = default_num_threads(); + // TODO_REVAMP: support an external thread without an implicit arena + int num_reserved_slots = 1; + unsigned arena_priority_level = 1; // corresponds to tbb::task_arena::priority::normal + std::size_t stack_size = 0; + threading_control* thr_control = threading_control::register_public_reference(); + arena& a = arena::create(thr_control, num_slots, num_reserved_slots, arena_priority_level); + // External thread always occupies the first slot + thread_data& td = *new(cache_aligned_allocate(sizeof(thread_data))) thread_data(0, false); + td.attach_arena(a, /*slot index*/ 0); + __TBB_ASSERT(td.my_inbox.is_idle_state(false), nullptr); + + stack_size = a.my_threading_control->worker_stack_size(); + std::uintptr_t stack_base = get_stack_base(stack_size); + task_dispatcher& task_disp = td.my_arena_slot->default_task_dispatcher(); + td.enter_task_dispatcher(task_disp, calculate_stealing_threshold(stack_base, stack_size)); + + td.my_arena_slot->occupy(); + thr_control->register_thread(td); + set_thread_data(td); +#if (_WIN32||_WIN64) && !__TBB_DYNAMIC_LOAD_ENABLED + // The external thread destructor is called from dllMain but it is not available with a static build. + // Therefore, we need to register the current thread to call the destructor during thread termination. + register_external_thread_destructor(); +#endif } -void governor::print_version_info () { - if ( UsePrivateRML ) - PrintExtraVersionInfo( "RML", "private" ); - else { - PrintExtraVersionInfo( "RML", "shared" ); - theRMLServerFactory.call_with_server_info( PrintRMLVersionInfo, (void*)"" ); +void governor::auto_terminate(void* tls) { + __TBB_ASSERT(get_thread_data_if_initialized() == nullptr || + get_thread_data_if_initialized() == tls, nullptr); + if (tls) { + thread_data* td = static_cast<thread_data*>(tls); + + auto clear_tls = [td] { + td->~thread_data(); + cache_aligned_deallocate(td); + clear_thread_data(); + }; + + // Only external thread can be inside an arena during termination. + if (td->my_arena_slot) { + arena* a = td->my_arena; + threading_control* thr_control = a->my_threading_control; + + // If the TLS slot is already cleared by OS or underlying concurrency + // runtime, restore its value to properly clean up arena + if (!is_thread_data_set(td)) { + set_thread_data(*td); + } + + a->my_observers.notify_exit_observers(td->my_last_observer, td->my_is_worker); + + td->leave_task_dispatcher(); + td->my_arena_slot->release(); + // Release an arena + a->on_thread_leaving(arena::ref_external); + + thr_control->unregister_thread(*td); + + // The tls should be cleared before market::release because + // market can destroy the tls key if we keep the last reference + clear_tls(); + + // If there was an associated arena, it added a public market reference + thr_control->unregister_public_reference(/* blocking terminate =*/ false); + } else { + clear_tls(); + } } -#if __TBB_SURVIVE_THREAD_SWITCH - if( watch_stack_handler ) - PrintExtraVersionInfo( "CILK", CILKLIB_NAME ); -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ + __TBB_ASSERT(get_thread_data_if_initialized() == nullptr, nullptr); } void governor::initialize_rml_factory () { - ::rml::factory::status_type res = theRMLServerFactory.open(); + ::rml::factory::status_type res = theRMLServerFactory.open(); UsePrivateRML = res != ::rml::factory::st_success; } -#if __TBB_SURVIVE_THREAD_SWITCH -__cilk_tbb_retcode governor::stack_op_handler( __cilk_tbb_stack_op op, void* data ) { - __TBB_ASSERT(data,NULL); - generic_scheduler* s = static_cast<generic_scheduler*>(data); -#if TBB_USE_ASSERT - void* current = theTLS.get(); -#if _WIN32||_WIN64 - uintptr_t thread_id = GetCurrentThreadId(); -#else - uintptr_t thread_id = uintptr_t(pthread_self()); -#endif +void __TBB_EXPORTED_FUNC get(d1::task_scheduler_handle& handle) { + handle.m_ctl = new(allocate_memory(sizeof(global_control))) global_control(global_control::scheduler_handle, 1); +} -#endif /* TBB_USE_ASSERT */ - switch( op ) { - default: - __TBB_ASSERT( 0, "invalid op" ); - case CILK_TBB_STACK_ADOPT: { - __TBB_ASSERT( !current && s->my_cilk_state==generic_scheduler::cs_limbo || - current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid adoption" ); -#if TBB_USE_ASSERT - if( current==s ) - runtime_warning( "redundant adoption of %p by thread %p\n", s, (void*)thread_id ); - s->my_cilk_state = generic_scheduler::cs_running; -#endif /* TBB_USE_ASSERT */ - theTLS.set(s); - break; +void release_impl(d1::task_scheduler_handle& handle) { + if (handle.m_ctl != nullptr) { + handle.m_ctl->~global_control(); + deallocate_memory(handle.m_ctl); + handle.m_ctl = nullptr; + } +} + +bool finalize_impl(d1::task_scheduler_handle& handle) { + __TBB_ASSERT_RELEASE(handle, "trying to finalize with null handle"); + __TBB_ASSERT(is_present(*handle.m_ctl), "finalize or release was already called on this object"); + + bool ok = true; // ok if threading_control does not exist yet + if (threading_control::is_present()) { + thread_data* td = governor::get_thread_data_if_initialized(); + if (td) { + task_dispatcher* task_disp = td->my_task_dispatcher; + __TBB_ASSERT(task_disp, nullptr); + if (task_disp->m_properties.outermost && !td->my_is_worker) { // is not inside a parallel region + governor::auto_terminate(td); + } } - case CILK_TBB_STACK_ORPHAN: { - __TBB_ASSERT( current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid orphaning" ); -#if TBB_USE_ASSERT - s->my_cilk_state = generic_scheduler::cs_limbo; -#endif /* TBB_USE_ASSERT */ - theTLS.set(NULL); - break; + + if (remove_and_check_if_empty(*handle.m_ctl)) { + ok = threading_control::unregister_lifetime_control(/*blocking_terminate*/ true); + } else { + ok = false; } - case CILK_TBB_STACK_RELEASE: { - __TBB_ASSERT( !current && s->my_cilk_state==generic_scheduler::cs_limbo || - current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid release" ); -#if TBB_USE_ASSERT - s->my_cilk_state = generic_scheduler::cs_freed; -#endif /* TBB_USE_ASSERT */ - s->my_cilk_unwatch_thunk.routine = NULL; - auto_terminate( s ); - } } - return 0; + + return ok; } -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ -} // namespace internal +bool __TBB_EXPORTED_FUNC finalize(d1::task_scheduler_handle& handle, std::intptr_t mode) { + if (mode == d1::release_nothrowing) { + release_impl(handle); + return true; + } else { + bool ok = finalize_impl(handle); + // TODO: it is unsafe when finalize is called concurrently and further library unload + release_impl(handle); + if (mode == d1::finalize_throwing && !ok) { + throw_exception(exception_id::unsafe_wait); + } + return ok; + } +} -//------------------------------------------------------------------------ -// task_scheduler_init -//------------------------------------------------------------------------ +#if __TBB_ARENA_BINDING + +#if __TBB_WEAK_SYMBOLS_PRESENT +#pragma weak __TBB_internal_initialize_system_topology +#pragma weak __TBB_internal_destroy_system_topology +#pragma weak __TBB_internal_allocate_binding_handler +#pragma weak __TBB_internal_deallocate_binding_handler +#pragma weak __TBB_internal_apply_affinity +#pragma weak __TBB_internal_restore_affinity +#pragma weak __TBB_internal_get_default_concurrency + +extern "C" { +void __TBB_internal_initialize_system_topology( + size_t groups_num, + int& numa_nodes_count, int*& numa_indexes_list, + int& core_types_count, int*& core_types_indexes_list +); +void __TBB_internal_destroy_system_topology( ); + +//TODO: consider renaming to `create_binding_handler` and `destroy_binding_handler` +binding_handler* __TBB_internal_allocate_binding_handler( int slot_num, int numa_id, int core_type_id, int max_threads_per_core ); +void __TBB_internal_deallocate_binding_handler( binding_handler* handler_ptr ); + +void __TBB_internal_apply_affinity( binding_handler* handler_ptr, int slot_num ); +void __TBB_internal_restore_affinity( binding_handler* handler_ptr, int slot_num ); + +int __TBB_internal_get_default_concurrency( int numa_id, int core_type_id, int max_threads_per_core ); +} +#endif /* __TBB_WEAK_SYMBOLS_PRESENT */ + +// Stubs that will be used if TBBbind library is unavailable. +static void dummy_destroy_system_topology ( ) { } +static binding_handler* dummy_allocate_binding_handler ( int, int, int, int ) { return nullptr; } +static void dummy_deallocate_binding_handler ( binding_handler* ) { } +static void dummy_apply_affinity ( binding_handler*, int ) { } +static void dummy_restore_affinity ( binding_handler*, int ) { } +static int dummy_get_default_concurrency( int, int, int ) { return governor::default_num_threads(); } + +// Handlers for communication with TBBbind +static void (*initialize_system_topology_ptr)( + size_t groups_num, + int& numa_nodes_count, int*& numa_indexes_list, + int& core_types_count, int*& core_types_indexes_list +) = nullptr; +static void (*destroy_system_topology_ptr)( ) = dummy_destroy_system_topology; + +static binding_handler* (*allocate_binding_handler_ptr)( int slot_num, int numa_id, int core_type_id, int max_threads_per_core ) + = dummy_allocate_binding_handler; +static void (*deallocate_binding_handler_ptr)( binding_handler* handler_ptr ) + = dummy_deallocate_binding_handler; +static void (*apply_affinity_ptr)( binding_handler* handler_ptr, int slot_num ) + = dummy_apply_affinity; +static void (*restore_affinity_ptr)( binding_handler* handler_ptr, int slot_num ) + = dummy_restore_affinity; +int (*get_default_concurrency_ptr)( int numa_id, int core_type_id, int max_threads_per_core ) + = dummy_get_default_concurrency; + +#if _WIN32 || _WIN64 || __unix__ || __APPLE__ + +// Table describing how to link the handlers. +static const dynamic_link_descriptor TbbBindLinkTable[] = { + DLD(__TBB_internal_initialize_system_topology, initialize_system_topology_ptr), + DLD(__TBB_internal_destroy_system_topology, destroy_system_topology_ptr), +#if __TBB_CPUBIND_PRESENT + DLD(__TBB_internal_allocate_binding_handler, allocate_binding_handler_ptr), + DLD(__TBB_internal_deallocate_binding_handler, deallocate_binding_handler_ptr), + DLD(__TBB_internal_apply_affinity, apply_affinity_ptr), + DLD(__TBB_internal_restore_affinity, restore_affinity_ptr), +#endif + DLD(__TBB_internal_get_default_concurrency, get_default_concurrency_ptr) +}; -using namespace internal; +static const unsigned LinkTableSize = sizeof(TbbBindLinkTable) / sizeof(dynamic_link_descriptor); -/** Left out-of-line for the sake of the backward binary compatibility **/ -void task_scheduler_init::initialize( int number_of_threads ) { - initialize( number_of_threads, 0 ); +#if TBB_USE_DEBUG +#define DEBUG_SUFFIX "_debug" +#else +#define DEBUG_SUFFIX +#endif /* TBB_USE_DEBUG */ + +#if _WIN32 || _WIN64 +#define LIBRARY_EXTENSION ".dll" +#define LIBRARY_PREFIX +#elif __APPLE__ +#define LIBRARY_EXTENSION __TBB_STRING(.3.dylib) +#define LIBRARY_PREFIX "lib" +#elif __unix__ +#define LIBRARY_EXTENSION __TBB_STRING(.so.3) +#define LIBRARY_PREFIX "lib" +#endif /* __unix__ */ + +#define TBBBIND_NAME LIBRARY_PREFIX "tbbbind" DEBUG_SUFFIX LIBRARY_EXTENSION +#define TBBBIND_2_0_NAME LIBRARY_PREFIX "tbbbind_2_0" DEBUG_SUFFIX LIBRARY_EXTENSION + +#define TBBBIND_2_5_NAME LIBRARY_PREFIX "tbbbind_2_5" DEBUG_SUFFIX LIBRARY_EXTENSION +#endif /* _WIN32 || _WIN64 || __unix__ */ + +// Representation of system hardware topology information on the TBB side. +// System topology may be initialized by third-party component (e.g. hwloc) +// or just filled in with default stubs. +namespace system_topology { + +constexpr int automatic = -1; + +static std::atomic<do_once_state> initialization_state; + +namespace { +int numa_nodes_count = 0; +int* numa_nodes_indexes = nullptr; + +int core_types_count = 0; +int* core_types_indexes = nullptr; + +const char* load_tbbbind_shared_object() { +#if _WIN32 || _WIN64 || __unix__ || __APPLE__ +#if _WIN32 && !_WIN64 + // For 32-bit Windows applications, process affinity masks can only support up to 32 logical CPUs. + SYSTEM_INFO si; + GetNativeSystemInfo(&si); + if (si.dwNumberOfProcessors > 32) return nullptr; +#endif /* _WIN32 && !_WIN64 */ + for (const auto& tbbbind_version : {TBBBIND_2_5_NAME, TBBBIND_2_0_NAME, TBBBIND_NAME}) { + if (dynamic_link(tbbbind_version, TbbBindLinkTable, LinkTableSize, nullptr, DYNAMIC_LINK_LOCAL_BINDING)) { + return tbbbind_version; + } + } +#endif /* _WIN32 || _WIN64 || __unix__ || __APPLE__ */ + return nullptr; } -void task_scheduler_init::initialize( int number_of_threads, stack_size_type thread_stack_size ) { -#if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS - uintptr_t new_mode = thread_stack_size & propagation_mode_mask; +int processor_groups_num() { +#if _WIN32 + return NumberOfProcessorGroups(); +#else + // Stub to improve code readability by reducing number of the compile-time conditions + return 1; #endif - thread_stack_size &= ~(stack_size_type)propagation_mode_mask; - if( number_of_threads!=deferred ) { - bool blocking_terminate = false; - if (my_scheduler == (scheduler*)wait_workers_in_terminate_flag) { - blocking_terminate = true; - my_scheduler = NULL; - } - __TBB_ASSERT( !my_scheduler, "task_scheduler_init already initialized" ); - __TBB_ASSERT( number_of_threads==-1 || number_of_threads>=1, - "number_of_threads for task_scheduler_init must be -1 or positive" ); - if (blocking_terminate) - governor::setBlockingTerminate(this); - internal::generic_scheduler *s = governor::init_scheduler( number_of_threads, thread_stack_size, /*auto_init=*/false ); -#if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS - if ( s->master_outermost_level() ) { - uintptr_t &vt = s->default_context()->my_version_and_traits; - uintptr_t prev_mode = vt & task_group_context::exact_exception ? propagation_mode_exact : 0; - vt = new_mode & propagation_mode_exact ? vt | task_group_context::exact_exception - : new_mode & propagation_mode_captured ? vt & ~task_group_context::exact_exception : vt; - // Use least significant bit of the scheduler pointer to store previous mode. - // This is necessary when components compiled with different compilers and/or - // TBB versions initialize the - my_scheduler = static_cast<scheduler*>((generic_scheduler*)((uintptr_t)s | prev_mode)); - } - else -#endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */ - my_scheduler = s; - } else { - __TBB_ASSERT( !thread_stack_size, "deferred initialization ignores stack size setting" ); +} +} // internal namespace + +// Tries to load TBBbind library API, if success, gets NUMA topology information from it, +// in another case, fills NUMA topology by stubs. +void initialization_impl() { + governor::one_time_init(); + + if (const char* tbbbind_name = load_tbbbind_shared_object()) { + initialize_system_topology_ptr( + processor_groups_num(), + numa_nodes_count, numa_nodes_indexes, + core_types_count, core_types_indexes + ); + + PrintExtraVersionInfo("TBBBIND", tbbbind_name); + return; } + + static int dummy_index = automatic; + + numa_nodes_count = 1; + numa_nodes_indexes = &dummy_index; + + core_types_count = 1; + core_types_indexes = &dummy_index; + + PrintExtraVersionInfo("TBBBIND", "UNAVAILABLE"); +} + +void initialize() { + atomic_do_once(initialization_impl, initialization_state); +} + +void destroy() { + destroy_system_topology_ptr(); +} +} // namespace system_topology + +binding_handler* construct_binding_handler(int slot_num, int numa_id, int core_type_id, int max_threads_per_core) { + system_topology::initialize(); + return allocate_binding_handler_ptr(slot_num, numa_id, core_type_id, max_threads_per_core); +} + +void destroy_binding_handler(binding_handler* handler_ptr) { + __TBB_ASSERT(deallocate_binding_handler_ptr, "tbbbind loading was not performed"); + deallocate_binding_handler_ptr(handler_ptr); +} + +void apply_affinity_mask(binding_handler* handler_ptr, int slot_index) { + __TBB_ASSERT(slot_index >= 0, "Negative thread index"); + __TBB_ASSERT(apply_affinity_ptr, "tbbbind loading was not performed"); + apply_affinity_ptr(handler_ptr, slot_index); +} + +void restore_affinity_mask(binding_handler* handler_ptr, int slot_index) { + __TBB_ASSERT(slot_index >= 0, "Negative thread index"); + __TBB_ASSERT(restore_affinity_ptr, "tbbbind loading was not performed"); + restore_affinity_ptr(handler_ptr, slot_index); } -void task_scheduler_init::terminate() { -#if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS - uintptr_t prev_mode = (uintptr_t)my_scheduler & propagation_mode_exact; - my_scheduler = (scheduler*)((uintptr_t)my_scheduler & ~(uintptr_t)propagation_mode_exact); -#endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */ - generic_scheduler* s = static_cast<generic_scheduler*>(my_scheduler); - my_scheduler = NULL; - __TBB_ASSERT( s, "task_scheduler_init::terminate without corresponding task_scheduler_init::initialize()"); -#if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS - if ( s->master_outermost_level() ) { - uintptr_t &vt = s->default_context()->my_version_and_traits; - vt = prev_mode & propagation_mode_exact ? vt | task_group_context::exact_exception - : vt & ~task_group_context::exact_exception; +unsigned __TBB_EXPORTED_FUNC numa_node_count() { + system_topology::initialize(); + return system_topology::numa_nodes_count; +} + +void __TBB_EXPORTED_FUNC fill_numa_indices(int* index_array) { + system_topology::initialize(); + std::memcpy(index_array, system_topology::numa_nodes_indexes, system_topology::numa_nodes_count * sizeof(int)); +} + +int __TBB_EXPORTED_FUNC numa_default_concurrency(int node_id) { + if (node_id >= 0) { + system_topology::initialize(); + int result = get_default_concurrency_ptr( + node_id, + /*core_type*/system_topology::automatic, + /*threads_per_core*/system_topology::automatic + ); + if (result > 0) return result; } -#endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */ - governor::terminate_scheduler(s, this); + return governor::default_num_threads(); +} + +unsigned __TBB_EXPORTED_FUNC core_type_count(intptr_t /*reserved*/) { + system_topology::initialize(); + return system_topology::core_types_count; } -int task_scheduler_init::default_num_threads() { +void __TBB_EXPORTED_FUNC fill_core_type_indices(int* index_array, intptr_t /*reserved*/) { + system_topology::initialize(); + std::memcpy(index_array, system_topology::core_types_indexes, system_topology::core_types_count * sizeof(int)); +} + +void constraints_assertion(d1::constraints c) { + bool is_topology_initialized = system_topology::initialization_state == do_once_state::initialized; + __TBB_ASSERT_RELEASE(c.max_threads_per_core == system_topology::automatic || c.max_threads_per_core > 0, + "Wrong max_threads_per_core constraints field value."); + + auto numa_nodes_begin = system_topology::numa_nodes_indexes; + auto numa_nodes_end = system_topology::numa_nodes_indexes + system_topology::numa_nodes_count; + __TBB_ASSERT_RELEASE( + c.numa_id == system_topology::automatic || + (is_topology_initialized && std::find(numa_nodes_begin, numa_nodes_end, c.numa_id) != numa_nodes_end), + "The constraints::numa_id value is not known to the library. Use tbb::info::numa_nodes() to get the list of possible values."); + + int* core_types_begin = system_topology::core_types_indexes; + int* core_types_end = system_topology::core_types_indexes + system_topology::core_types_count; + __TBB_ASSERT_RELEASE(c.core_type == system_topology::automatic || + (is_topology_initialized && std::find(core_types_begin, core_types_end, c.core_type) != core_types_end), + "The constraints::core_type value is not known to the library. Use tbb::info::core_types() to get the list of possible values."); +} + +int __TBB_EXPORTED_FUNC constraints_default_concurrency(const d1::constraints& c, intptr_t /*reserved*/) { + constraints_assertion(c); + + if (c.numa_id >= 0 || c.core_type >= 0 || c.max_threads_per_core > 0) { + system_topology::initialize(); + return get_default_concurrency_ptr(c.numa_id, c.core_type, c.max_threads_per_core); + } return governor::default_num_threads(); } +int __TBB_EXPORTED_FUNC constraints_threads_per_core(const d1::constraints&, intptr_t /*reserved*/) { + return system_topology::automatic; +} +#endif /* __TBB_ARENA_BINDING */ + +} // namespace r1 +} // namespace detail } // namespace tbb diff --git a/src/tbb/src/tbb/governor.h b/src/tbb/src/tbb/governor.h index d51cfc589..573443d72 100644 --- a/src/tbb/src/tbb/governor.h +++ b/src/tbb/src/tbb/governor.h @@ -1,43 +1,42 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef _TBB_governor_H #define _TBB_governor_H -#include "tbb/task_scheduler_init.h" -#include "../rml/include/rml_tbb.h" +#include "rml_tbb.h" -#include "tbb_misc.h" // for AvailableHwConcurrency and ThreadStackSize +#include "misc.h" // for AvailableHwConcurrency #include "tls.h" -#if __TBB_SURVIVE_THREAD_SWITCH -#include "cilk-tbb-interop.h" -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ - namespace tbb { -namespace internal { +namespace detail { +namespace r1 { class market; -class generic_scheduler; +class thread_data; class __TBB_InitOnce; +#if __TBB_USE_ITT_NOTIFY +//! Defined in profiling.cpp +extern bool ITT_Present; +#endif + +typedef std::size_t stack_size_type; + //------------------------------------------------------------------------ // Class governor //------------------------------------------------------------------------ @@ -46,27 +45,23 @@ class __TBB_InitOnce; /** It also supports automatic on-demand initialization of the TBB scheduler. The class contains only static data members and methods.*/ class governor { +private: friend class __TBB_InitOnce; - friend class market; + friend class thread_dispatcher; + friend class threading_control_impl; + // TODO: consider using thread_local (measure performance and side effects) //! TLS for scheduler instances associated with individual threads - static basic_tls<generic_scheduler*> theTLS; - - //! Caches the maximal level of parallelism supported by the hardware - static unsigned DefaultNumberOfThreads; + static basic_tls<thread_data*> theTLS; + // TODO (TBB_REVAMP_TODO): reconsider constant names static rml::tbb_factory theRMLServerFactory; static bool UsePrivateRML; - //! Instance of task_scheduler_init that requested blocking termination. - static const task_scheduler_init *BlockingTSI; - -#if TBB_USE_ASSERT - static bool IsBlockingTerminationInProgress; -#endif - - static bool is_speculation_enabled; + // Flags for runtime-specific conditions + static cpu_features_type cpu_features; + static bool is_rethrow_broken; //! Create key for thread-local storage and initialize RML. static void acquire_resources (); @@ -76,71 +71,88 @@ class governor { static rml::tbb_server* create_rml_server ( rml::tbb_client& ); - //! The internal routine to undo automatic initialization. - /** The signature is written with void* so that the routine - can be the destructor argument to pthread_key_create. */ - static void auto_terminate(void* scheduler); - public: static unsigned default_num_threads () { - // No memory fence required, because at worst each invoking thread calls AvailableHwConcurrency once. - return DefaultNumberOfThreads ? DefaultNumberOfThreads : - DefaultNumberOfThreads = AvailableHwConcurrency(); + // Caches the maximal level of parallelism supported by the hardware + static unsigned num_threads = AvailableHwConcurrency(); + return num_threads; + } + static std::size_t default_page_size () { + // Caches the size of OS regular memory page + static std::size_t page_size = DefaultSystemPageSize(); + return page_size; } - //! Processes scheduler initialization request (possibly nested) in a master thread + static void one_time_init(); + //! Processes scheduler initialization request (possibly nested) in an external thread /** If necessary creates new instance of arena and/or local scheduler. The auto_init argument specifies if the call is due to automatic initialization. **/ - static generic_scheduler* init_scheduler( unsigned num_threads, stack_size_type stack_size, bool auto_init = false ); - - //! Processes scheduler termination request (possibly nested) in a master thread - static void terminate_scheduler( generic_scheduler* s, const task_scheduler_init *tsi_ptr ); - - //! Register TBB scheduler instance in thread-local storage. - static void sign_on(generic_scheduler* s); + static void init_external_thread(); - //! Unregister TBB scheduler instance from thread-local storage. - static void sign_off(generic_scheduler* s); - - //! Used to check validity of the local scheduler TLS contents. - static bool is_set ( generic_scheduler* s ) { return theTLS.get() == s; } - - //! Temporarily set TLS slot to the given scheduler - static void assume_scheduler( generic_scheduler* s ) { theTLS.set( s ); } + //! The routine to undo automatic initialization. + /** The signature is written with void* so that the routine + can be the destructor argument to pthread_key_create. */ + static void auto_terminate(void* tls); - //! Obtain the thread-local instance of the TBB scheduler. + //! Obtain the thread-local instance of the thread data. /** If the scheduler has not been initialized yet, initialization is done automatically. Note that auto-initialized scheduler instance is destroyed only when its thread terminates. **/ - static generic_scheduler* local_scheduler () { - generic_scheduler* s = theTLS.get(); - return s ? s : init_scheduler( (unsigned)task_scheduler_init::automatic, 0, true ); + static thread_data* get_thread_data() { + thread_data* td = theTLS.get(); + if (td) { + return td; + } + init_external_thread(); + td = theTLS.get(); + __TBB_ASSERT(td, nullptr); + return td; + } + + static void set_thread_data(thread_data& td) { + theTLS.set(&td); + } + + static void clear_thread_data() { + theTLS.set(nullptr); } - static generic_scheduler* local_scheduler_if_initialized () { + static thread_data* get_thread_data_if_initialized () { return theTLS.get(); } - //! Undo automatic initialization if necessary; call when a thread exits. - static void terminate_auto_initialized_scheduler() { - auto_terminate( theTLS.get() ); + static bool is_thread_data_set(thread_data* td) { + return theTLS.get() == td; } - static void print_version_info (); + //! Undo automatic initialization if necessary; call when a thread exits. + static void terminate_external_thread() { + auto_terminate(get_thread_data_if_initialized()); + } static void initialize_rml_factory (); - static bool needsWaitWorkers () { return BlockingTSI!=NULL; } + static bool does_client_join_workers (const rml::tbb_client &client); + + static bool speculation_enabled() { return cpu_features.rtm_enabled; } - //! Must be called before init_scheduler - static void setBlockingTerminate(const task_scheduler_init *tsi); +#if __TBB_WAITPKG_INTRINSICS_PRESENT + static bool wait_package_enabled() { return cpu_features.waitpkg_enabled; } +#endif + + static bool hybrid_cpu() { return cpu_features.hybrid; } -#if __TBB_SURVIVE_THREAD_SWITCH - static __cilk_tbb_retcode stack_op_handler( __cilk_tbb_stack_op op, void* ); -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ - static bool speculation_enabled() { return is_speculation_enabled; } + static bool rethrow_exception_broken() { return is_rethrow_broken; } + static bool is_itt_present() { +#if __TBB_USE_ITT_NOTIFY + return ITT_Present; +#else + return false; +#endif + } }; // class governor -} // namespace internal +} // namespace r1 +} // namespace detail } // namespace tbb #endif /* _TBB_governor_H */ diff --git a/src/tbb/src/tbb/ia32-masm/atomic_support.asm b/src/tbb/src/tbb/ia32-masm/atomic_support.asm deleted file mode 100644 index bb7224c72..000000000 --- a/src/tbb/src/tbb/ia32-masm/atomic_support.asm +++ /dev/null @@ -1,188 +0,0 @@ -; Copyright 2005-2014 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. Threading Building Blocks is free software; -; you can redistribute it and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. Threading Building Blocks is -; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -; See the GNU General Public License for more details. You should have received a copy of -; the GNU General Public License along with Threading Building Blocks; if not, write to the -; Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software library without -; restriction. Specifically, if other files instantiate templates or use macros or inline -; functions from this file, or you compile this file and link it with other files to produce -; an executable, this file does not by itself cause the resulting executable to be covered -; by the GNU General Public License. This exception does not however invalidate any other -; reasons why the executable file might be covered by the GNU General Public License. - -.686 -.model flat,c -.code - ALIGN 4 - PUBLIC c __TBB_machine_fetchadd1 -__TBB_machine_fetchadd1: - mov edx,4[esp] - mov eax,8[esp] - lock xadd [edx],al - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_fetchstore1 -__TBB_machine_fetchstore1: - mov edx,4[esp] - mov eax,8[esp] - lock xchg [edx],al - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_cmpswp1 -__TBB_machine_cmpswp1: - mov edx,4[esp] - mov ecx,8[esp] - mov eax,12[esp] - lock cmpxchg [edx],cl - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_fetchadd2 -__TBB_machine_fetchadd2: - mov edx,4[esp] - mov eax,8[esp] - lock xadd [edx],ax - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_fetchstore2 -__TBB_machine_fetchstore2: - mov edx,4[esp] - mov eax,8[esp] - lock xchg [edx],ax - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_cmpswp2 -__TBB_machine_cmpswp2: - mov edx,4[esp] - mov ecx,8[esp] - mov eax,12[esp] - lock cmpxchg [edx],cx - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_fetchadd4 -__TBB_machine_fetchadd4: - mov edx,4[esp] - mov eax,8[esp] - lock xadd [edx],eax - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_fetchstore4 -__TBB_machine_fetchstore4: - mov edx,4[esp] - mov eax,8[esp] - lock xchg [edx],eax - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_cmpswp4 -__TBB_machine_cmpswp4: - mov edx,4[esp] - mov ecx,8[esp] - mov eax,12[esp] - lock cmpxchg [edx],ecx - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_fetchadd8 -__TBB_machine_fetchadd8: - push ebx - push edi - mov edi,12[esp] - mov eax,[edi] - mov edx,4[edi] -__TBB_machine_fetchadd8_loop: - mov ebx,16[esp] - mov ecx,20[esp] - add ebx,eax - adc ecx,edx - lock cmpxchg8b qword ptr [edi] - jnz __TBB_machine_fetchadd8_loop - pop edi - pop ebx - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_fetchstore8 -__TBB_machine_fetchstore8: - push ebx - push edi - mov edi,12[esp] - mov ebx,16[esp] - mov ecx,20[esp] - mov eax,[edi] - mov edx,4[edi] -__TBB_machine_fetchstore8_loop: - lock cmpxchg8b qword ptr [edi] - jnz __TBB_machine_fetchstore8_loop - pop edi - pop ebx - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_cmpswp8 -__TBB_machine_cmpswp8: - push ebx - push edi - mov edi,12[esp] - mov ebx,16[esp] - mov ecx,20[esp] - mov eax,24[esp] - mov edx,28[esp] - lock cmpxchg8b qword ptr [edi] - pop edi - pop ebx - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_load8 -__TBB_machine_Load8: - ; If location is on stack, compiler may have failed to align it correctly, so we do dynamic check. - mov ecx,4[esp] - test ecx,7 - jne load_slow - ; Load within a cache line - sub esp,12 - fild qword ptr [ecx] - fistp qword ptr [esp] - mov eax,[esp] - mov edx,4[esp] - add esp,12 - ret -load_slow: - ; Load is misaligned. Use cmpxchg8b. - push ebx - push edi - mov edi,ecx - xor eax,eax - xor ebx,ebx - xor ecx,ecx - xor edx,edx - lock cmpxchg8b qword ptr [edi] - pop edi - pop ebx - ret -EXTRN __TBB_machine_store8_slow:PROC -.code - ALIGN 4 - PUBLIC c __TBB_machine_store8 -__TBB_machine_Store8: - ; If location is on stack, compiler may have failed to align it correctly, so we do dynamic check. - mov ecx,4[esp] - test ecx,7 - jne __TBB_machine_store8_slow ;; tail call to tbb_misc.cpp - fild qword ptr 8[esp] - fistp qword ptr [ecx] - ret -end diff --git a/src/tbb/src/tbb/ia32-masm/itsx.asm b/src/tbb/src/tbb/ia32-masm/itsx.asm deleted file mode 100644 index 10a00854b..000000000 --- a/src/tbb/src/tbb/ia32-masm/itsx.asm +++ /dev/null @@ -1,80 +0,0 @@ -; Copyright 2005-2014 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. Threading Building Blocks is free software; -; you can redistribute it and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. Threading Building Blocks is -; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -; See the GNU General Public License for more details. You should have received a copy of -; the GNU General Public License along with Threading Building Blocks; if not, write to the -; Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software library without -; restriction. Specifically, if other files instantiate templates or use macros or inline -; functions from this file, or you compile this file and link it with other files to produce -; an executable, this file does not by itself cause the resulting executable to be covered -; by the GNU General Public License. This exception does not however invalidate any other -; reasons why the executable file might be covered by the GNU General Public License. - -.686 -.model flat,c -.code - ALIGN 4 - PUBLIC c __TBB_machine_try_lock_elided -__TBB_machine_try_lock_elided: - mov ecx, 4[esp] - xor eax, eax - mov al, 1 - BYTE 0F2H - xchg al, byte ptr [ecx] - xor al, 1 - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_unlock_elided -__TBB_machine_unlock_elided: - mov ecx, 4[esp] - BYTE 0F3H - mov byte ptr [ecx], 0 - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_begin_transaction -__TBB_machine_begin_transaction: - mov eax, -1 - BYTE 0C7H - BYTE 0F8H - BYTE 000H - BYTE 000H - BYTE 000H - BYTE 000H - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_end_transaction -__TBB_machine_end_transaction: - BYTE 00FH - BYTE 001H - BYTE 0D5H - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_transaction_conflict_abort -__TBB_machine_transaction_conflict_abort: - BYTE 0C6H - BYTE 0F8H - BYTE 0FFH ; 12.4.5 Abort argument: lock not free when tested - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_is_in_transaction -__TBB_machine_is_in_transaction: - xor eax, eax - BYTE 00FH - BYTE 001H - BYTE 0D6H - JZ rset - MOV al,1 -rset: - RET -end diff --git a/src/tbb/src/tbb/ia32-masm/lock_byte.asm b/src/tbb/src/tbb/ia32-masm/lock_byte.asm deleted file mode 100644 index 9acd675cd..000000000 --- a/src/tbb/src/tbb/ia32-masm/lock_byte.asm +++ /dev/null @@ -1,38 +0,0 @@ -; Copyright 2005-2014 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. Threading Building Blocks is free software; -; you can redistribute it and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. Threading Building Blocks is -; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -; See the GNU General Public License for more details. You should have received a copy of -; the GNU General Public License along with Threading Building Blocks; if not, write to the -; Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software library without -; restriction. Specifically, if other files instantiate templates or use macros or inline -; functions from this file, or you compile this file and link it with other files to produce -; an executable, this file does not by itself cause the resulting executable to be covered -; by the GNU General Public License. This exception does not however invalidate any other -; reasons why the executable file might be covered by the GNU General Public License. - -; DO NOT EDIT - AUTOMATICALLY GENERATED FROM .s FILE -.686 -.model flat,c -.code - ALIGN 4 - PUBLIC c __TBB_machine_trylockbyte -__TBB_machine_trylockbyte: - mov edx,4[esp] - mov al,[edx] - mov cl,1 - test al,1 - jnz __TBB_machine_trylockbyte_contended - lock cmpxchg [edx],cl - jne __TBB_machine_trylockbyte_contended - mov eax,1 - ret -__TBB_machine_trylockbyte_contended: - xor eax,eax - ret -end diff --git a/src/tbb/src/tbb/ia64-gas/atomic_support.s b/src/tbb/src/tbb/ia64-gas/atomic_support.s deleted file mode 100644 index e01044d2b..000000000 --- a/src/tbb/src/tbb/ia64-gas/atomic_support.s +++ /dev/null @@ -1,670 +0,0 @@ -// Copyright 2005-2014 Intel Corporation. All Rights Reserved. -// -// This file is part of Threading Building Blocks. Threading Building Blocks is free software; -// you can redistribute it and/or modify it under the terms of the GNU General Public License -// version 2 as published by the Free Software Foundation. Threading Building Blocks is -// distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. You should have received a copy of -// the GNU General Public License along with Threading Building Blocks; if not, write to the -// Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -// -// As a special exception, you may use this file as part of a free software library without -// restriction. Specifically, if other files instantiate templates or use macros or inline -// functions from this file, or you compile this file and link it with other files to produce -// an executable, this file does not by itself cause the resulting executable to be covered -// by the GNU General Public License. This exception does not however invalidate any other -// reasons why the executable file might be covered by the GNU General Public License. - -// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh -# 1 "<stdin>" -# 1 "<built-in>" -# 1 "<command line>" -# 1 "<stdin>" - - - - - - .section .text - .align 16 - - - .proc __TBB_machine_fetchadd1__TBB_full_fence# - .global __TBB_machine_fetchadd1__TBB_full_fence# -__TBB_machine_fetchadd1__TBB_full_fence: -{ - mf - br __TBB_machine_fetchadd1acquire -} - .endp __TBB_machine_fetchadd1__TBB_full_fence# - - .proc __TBB_machine_fetchadd1acquire# - .global __TBB_machine_fetchadd1acquire# -__TBB_machine_fetchadd1acquire: - - - - - - - - ld1 r9=[r32] -;; -Retry_1acquire: - mov ar.ccv=r9 - mov r8=r9; - add r10=r9,r33 -;; - cmpxchg1.acq r9=[r32],r10,ar.ccv -;; - cmp.ne p7,p0=r8,r9 - (p7) br.cond.dpnt Retry_1acquire - br.ret.sptk.many b0 -# 49 "<stdin>" - .endp __TBB_machine_fetchadd1acquire# -# 62 "<stdin>" - .section .text - .align 16 - .proc __TBB_machine_fetchstore1__TBB_full_fence# - .global __TBB_machine_fetchstore1__TBB_full_fence# -__TBB_machine_fetchstore1__TBB_full_fence: - mf -;; - xchg1 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore1__TBB_full_fence# - - - .proc __TBB_machine_fetchstore1acquire# - .global __TBB_machine_fetchstore1acquire# -__TBB_machine_fetchstore1acquire: - xchg1 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore1acquire# -# 88 "<stdin>" - .section .text - .align 16 - - - .proc __TBB_machine_cmpswp1__TBB_full_fence# - .global __TBB_machine_cmpswp1__TBB_full_fence# -__TBB_machine_cmpswp1__TBB_full_fence: -{ - mf - br __TBB_machine_cmpswp1acquire -} - .endp __TBB_machine_cmpswp1__TBB_full_fence# - - .proc __TBB_machine_cmpswp1acquire# - .global __TBB_machine_cmpswp1acquire# -__TBB_machine_cmpswp1acquire: - - zxt1 r34=r34 -;; - - mov ar.ccv=r34 -;; - cmpxchg1.acq r8=[r32],r33,ar.ccv - br.ret.sptk.many b0 - .endp __TBB_machine_cmpswp1acquire# -// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh -# 1 "<stdin>" -# 1 "<built-in>" -# 1 "<command line>" -# 1 "<stdin>" - - - - - - .section .text - .align 16 - - - .proc __TBB_machine_fetchadd2__TBB_full_fence# - .global __TBB_machine_fetchadd2__TBB_full_fence# -__TBB_machine_fetchadd2__TBB_full_fence: -{ - mf - br __TBB_machine_fetchadd2acquire -} - .endp __TBB_machine_fetchadd2__TBB_full_fence# - - .proc __TBB_machine_fetchadd2acquire# - .global __TBB_machine_fetchadd2acquire# -__TBB_machine_fetchadd2acquire: - - - - - - - - ld2 r9=[r32] -;; -Retry_2acquire: - mov ar.ccv=r9 - mov r8=r9; - add r10=r9,r33 -;; - cmpxchg2.acq r9=[r32],r10,ar.ccv -;; - cmp.ne p7,p0=r8,r9 - (p7) br.cond.dpnt Retry_2acquire - br.ret.sptk.many b0 -# 49 "<stdin>" - .endp __TBB_machine_fetchadd2acquire# -# 62 "<stdin>" - .section .text - .align 16 - .proc __TBB_machine_fetchstore2__TBB_full_fence# - .global __TBB_machine_fetchstore2__TBB_full_fence# -__TBB_machine_fetchstore2__TBB_full_fence: - mf -;; - xchg2 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore2__TBB_full_fence# - - - .proc __TBB_machine_fetchstore2acquire# - .global __TBB_machine_fetchstore2acquire# -__TBB_machine_fetchstore2acquire: - xchg2 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore2acquire# -# 88 "<stdin>" - .section .text - .align 16 - - - .proc __TBB_machine_cmpswp2__TBB_full_fence# - .global __TBB_machine_cmpswp2__TBB_full_fence# -__TBB_machine_cmpswp2__TBB_full_fence: -{ - mf - br __TBB_machine_cmpswp2acquire -} - .endp __TBB_machine_cmpswp2__TBB_full_fence# - - .proc __TBB_machine_cmpswp2acquire# - .global __TBB_machine_cmpswp2acquire# -__TBB_machine_cmpswp2acquire: - - zxt2 r34=r34 -;; - - mov ar.ccv=r34 -;; - cmpxchg2.acq r8=[r32],r33,ar.ccv - br.ret.sptk.many b0 - .endp __TBB_machine_cmpswp2acquire# -// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh -# 1 "<stdin>" -# 1 "<built-in>" -# 1 "<command line>" -# 1 "<stdin>" - - - - - - .section .text - .align 16 - - - .proc __TBB_machine_fetchadd4__TBB_full_fence# - .global __TBB_machine_fetchadd4__TBB_full_fence# -__TBB_machine_fetchadd4__TBB_full_fence: -{ - mf - br __TBB_machine_fetchadd4acquire -} - .endp __TBB_machine_fetchadd4__TBB_full_fence# - - .proc __TBB_machine_fetchadd4acquire# - .global __TBB_machine_fetchadd4acquire# -__TBB_machine_fetchadd4acquire: - - cmp.eq p6,p0=1,r33 - cmp.eq p8,p0=-1,r33 - (p6) br.cond.dptk Inc_4acquire - (p8) br.cond.dpnt Dec_4acquire -;; - - ld4 r9=[r32] -;; -Retry_4acquire: - mov ar.ccv=r9 - mov r8=r9; - add r10=r9,r33 -;; - cmpxchg4.acq r9=[r32],r10,ar.ccv -;; - cmp.ne p7,p0=r8,r9 - (p7) br.cond.dpnt Retry_4acquire - br.ret.sptk.many b0 - -Inc_4acquire: - fetchadd4.acq r8=[r32],1 - br.ret.sptk.many b0 -Dec_4acquire: - fetchadd4.acq r8=[r32],-1 - br.ret.sptk.many b0 - - .endp __TBB_machine_fetchadd4acquire# -# 62 "<stdin>" - .section .text - .align 16 - .proc __TBB_machine_fetchstore4__TBB_full_fence# - .global __TBB_machine_fetchstore4__TBB_full_fence# -__TBB_machine_fetchstore4__TBB_full_fence: - mf -;; - xchg4 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore4__TBB_full_fence# - - - .proc __TBB_machine_fetchstore4acquire# - .global __TBB_machine_fetchstore4acquire# -__TBB_machine_fetchstore4acquire: - xchg4 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore4acquire# -# 88 "<stdin>" - .section .text - .align 16 - - - .proc __TBB_machine_cmpswp4__TBB_full_fence# - .global __TBB_machine_cmpswp4__TBB_full_fence# -__TBB_machine_cmpswp4__TBB_full_fence: -{ - mf - br __TBB_machine_cmpswp4acquire -} - .endp __TBB_machine_cmpswp4__TBB_full_fence# - - .proc __TBB_machine_cmpswp4acquire# - .global __TBB_machine_cmpswp4acquire# -__TBB_machine_cmpswp4acquire: - - zxt4 r34=r34 -;; - - mov ar.ccv=r34 -;; - cmpxchg4.acq r8=[r32],r33,ar.ccv - br.ret.sptk.many b0 - .endp __TBB_machine_cmpswp4acquire# -// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh -# 1 "<stdin>" -# 1 "<built-in>" -# 1 "<command line>" -# 1 "<stdin>" - - - - - - .section .text - .align 16 - - - .proc __TBB_machine_fetchadd8__TBB_full_fence# - .global __TBB_machine_fetchadd8__TBB_full_fence# -__TBB_machine_fetchadd8__TBB_full_fence: -{ - mf - br __TBB_machine_fetchadd8acquire -} - .endp __TBB_machine_fetchadd8__TBB_full_fence# - - .proc __TBB_machine_fetchadd8acquire# - .global __TBB_machine_fetchadd8acquire# -__TBB_machine_fetchadd8acquire: - - cmp.eq p6,p0=1,r33 - cmp.eq p8,p0=-1,r33 - (p6) br.cond.dptk Inc_8acquire - (p8) br.cond.dpnt Dec_8acquire -;; - - ld8 r9=[r32] -;; -Retry_8acquire: - mov ar.ccv=r9 - mov r8=r9; - add r10=r9,r33 -;; - cmpxchg8.acq r9=[r32],r10,ar.ccv -;; - cmp.ne p7,p0=r8,r9 - (p7) br.cond.dpnt Retry_8acquire - br.ret.sptk.many b0 - -Inc_8acquire: - fetchadd8.acq r8=[r32],1 - br.ret.sptk.many b0 -Dec_8acquire: - fetchadd8.acq r8=[r32],-1 - br.ret.sptk.many b0 - - .endp __TBB_machine_fetchadd8acquire# -# 62 "<stdin>" - .section .text - .align 16 - .proc __TBB_machine_fetchstore8__TBB_full_fence# - .global __TBB_machine_fetchstore8__TBB_full_fence# -__TBB_machine_fetchstore8__TBB_full_fence: - mf -;; - xchg8 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore8__TBB_full_fence# - - - .proc __TBB_machine_fetchstore8acquire# - .global __TBB_machine_fetchstore8acquire# -__TBB_machine_fetchstore8acquire: - xchg8 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore8acquire# -# 88 "<stdin>" - .section .text - .align 16 - - - .proc __TBB_machine_cmpswp8__TBB_full_fence# - .global __TBB_machine_cmpswp8__TBB_full_fence# -__TBB_machine_cmpswp8__TBB_full_fence: -{ - mf - br __TBB_machine_cmpswp8acquire -} - .endp __TBB_machine_cmpswp8__TBB_full_fence# - - .proc __TBB_machine_cmpswp8acquire# - .global __TBB_machine_cmpswp8acquire# -__TBB_machine_cmpswp8acquire: - - - - - mov ar.ccv=r34 -;; - cmpxchg8.acq r8=[r32],r33,ar.ccv - br.ret.sptk.many b0 - .endp __TBB_machine_cmpswp8acquire# -// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh -# 1 "<stdin>" -# 1 "<built-in>" -# 1 "<command line>" -# 1 "<stdin>" - - - - - - .section .text - .align 16 -# 19 "<stdin>" - .proc __TBB_machine_fetchadd1release# - .global __TBB_machine_fetchadd1release# -__TBB_machine_fetchadd1release: - - - - - - - - ld1 r9=[r32] -;; -Retry_1release: - mov ar.ccv=r9 - mov r8=r9; - add r10=r9,r33 -;; - cmpxchg1.rel r9=[r32],r10,ar.ccv -;; - cmp.ne p7,p0=r8,r9 - (p7) br.cond.dpnt Retry_1release - br.ret.sptk.many b0 -# 49 "<stdin>" - .endp __TBB_machine_fetchadd1release# -# 62 "<stdin>" - .section .text - .align 16 - .proc __TBB_machine_fetchstore1release# - .global __TBB_machine_fetchstore1release# -__TBB_machine_fetchstore1release: - mf -;; - xchg1 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore1release# -# 88 "<stdin>" - .section .text - .align 16 -# 101 "<stdin>" - .proc __TBB_machine_cmpswp1release# - .global __TBB_machine_cmpswp1release# -__TBB_machine_cmpswp1release: - - zxt1 r34=r34 -;; - - mov ar.ccv=r34 -;; - cmpxchg1.rel r8=[r32],r33,ar.ccv - br.ret.sptk.many b0 - .endp __TBB_machine_cmpswp1release# -// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh -# 1 "<stdin>" -# 1 "<built-in>" -# 1 "<command line>" -# 1 "<stdin>" - - - - - - .section .text - .align 16 -# 19 "<stdin>" - .proc __TBB_machine_fetchadd2release# - .global __TBB_machine_fetchadd2release# -__TBB_machine_fetchadd2release: - - - - - - - - ld2 r9=[r32] -;; -Retry_2release: - mov ar.ccv=r9 - mov r8=r9; - add r10=r9,r33 -;; - cmpxchg2.rel r9=[r32],r10,ar.ccv -;; - cmp.ne p7,p0=r8,r9 - (p7) br.cond.dpnt Retry_2release - br.ret.sptk.many b0 -# 49 "<stdin>" - .endp __TBB_machine_fetchadd2release# -# 62 "<stdin>" - .section .text - .align 16 - .proc __TBB_machine_fetchstore2release# - .global __TBB_machine_fetchstore2release# -__TBB_machine_fetchstore2release: - mf -;; - xchg2 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore2release# -# 88 "<stdin>" - .section .text - .align 16 -# 101 "<stdin>" - .proc __TBB_machine_cmpswp2release# - .global __TBB_machine_cmpswp2release# -__TBB_machine_cmpswp2release: - - zxt2 r34=r34 -;; - - mov ar.ccv=r34 -;; - cmpxchg2.rel r8=[r32],r33,ar.ccv - br.ret.sptk.many b0 - .endp __TBB_machine_cmpswp2release# -// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh -# 1 "<stdin>" -# 1 "<built-in>" -# 1 "<command line>" -# 1 "<stdin>" - - - - - - .section .text - .align 16 -# 19 "<stdin>" - .proc __TBB_machine_fetchadd4release# - .global __TBB_machine_fetchadd4release# -__TBB_machine_fetchadd4release: - - cmp.eq p6,p0=1,r33 - cmp.eq p8,p0=-1,r33 - (p6) br.cond.dptk Inc_4release - (p8) br.cond.dpnt Dec_4release -;; - - ld4 r9=[r32] -;; -Retry_4release: - mov ar.ccv=r9 - mov r8=r9; - add r10=r9,r33 -;; - cmpxchg4.rel r9=[r32],r10,ar.ccv -;; - cmp.ne p7,p0=r8,r9 - (p7) br.cond.dpnt Retry_4release - br.ret.sptk.many b0 - -Inc_4release: - fetchadd4.rel r8=[r32],1 - br.ret.sptk.many b0 -Dec_4release: - fetchadd4.rel r8=[r32],-1 - br.ret.sptk.many b0 - - .endp __TBB_machine_fetchadd4release# -# 62 "<stdin>" - .section .text - .align 16 - .proc __TBB_machine_fetchstore4release# - .global __TBB_machine_fetchstore4release# -__TBB_machine_fetchstore4release: - mf -;; - xchg4 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore4release# -# 88 "<stdin>" - .section .text - .align 16 -# 101 "<stdin>" - .proc __TBB_machine_cmpswp4release# - .global __TBB_machine_cmpswp4release# -__TBB_machine_cmpswp4release: - - zxt4 r34=r34 -;; - - mov ar.ccv=r34 -;; - cmpxchg4.rel r8=[r32],r33,ar.ccv - br.ret.sptk.many b0 - .endp __TBB_machine_cmpswp4release# -// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh -# 1 "<stdin>" -# 1 "<built-in>" -# 1 "<command line>" -# 1 "<stdin>" - - - - - - .section .text - .align 16 -# 19 "<stdin>" - .proc __TBB_machine_fetchadd8release# - .global __TBB_machine_fetchadd8release# -__TBB_machine_fetchadd8release: - - cmp.eq p6,p0=1,r33 - cmp.eq p8,p0=-1,r33 - (p6) br.cond.dptk Inc_8release - (p8) br.cond.dpnt Dec_8release -;; - - ld8 r9=[r32] -;; -Retry_8release: - mov ar.ccv=r9 - mov r8=r9; - add r10=r9,r33 -;; - cmpxchg8.rel r9=[r32],r10,ar.ccv -;; - cmp.ne p7,p0=r8,r9 - (p7) br.cond.dpnt Retry_8release - br.ret.sptk.many b0 - -Inc_8release: - fetchadd8.rel r8=[r32],1 - br.ret.sptk.many b0 -Dec_8release: - fetchadd8.rel r8=[r32],-1 - br.ret.sptk.many b0 - - .endp __TBB_machine_fetchadd8release# -# 62 "<stdin>" - .section .text - .align 16 - .proc __TBB_machine_fetchstore8release# - .global __TBB_machine_fetchstore8release# -__TBB_machine_fetchstore8release: - mf -;; - xchg8 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore8release# -# 88 "<stdin>" - .section .text - .align 16 -# 101 "<stdin>" - .proc __TBB_machine_cmpswp8release# - .global __TBB_machine_cmpswp8release# -__TBB_machine_cmpswp8release: - - - - - mov ar.ccv=r34 -;; - cmpxchg8.rel r8=[r32],r33,ar.ccv - br.ret.sptk.many b0 - .endp __TBB_machine_cmpswp8release# diff --git a/src/tbb/src/tbb/ia64-gas/ia64_misc.s b/src/tbb/src/tbb/ia64-gas/ia64_misc.s deleted file mode 100644 index 22ecfee30..000000000 --- a/src/tbb/src/tbb/ia64-gas/ia64_misc.s +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2005-2014 Intel Corporation. All Rights Reserved. -// -// This file is part of Threading Building Blocks. Threading Building Blocks is free software; -// you can redistribute it and/or modify it under the terms of the GNU General Public License -// version 2 as published by the Free Software Foundation. Threading Building Blocks is -// distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. You should have received a copy of -// the GNU General Public License along with Threading Building Blocks; if not, write to the -// Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -// -// As a special exception, you may use this file as part of a free software library without -// restriction. Specifically, if other files instantiate templates or use macros or inline -// functions from this file, or you compile this file and link it with other files to produce -// an executable, this file does not by itself cause the resulting executable to be covered -// by the GNU General Public License. This exception does not however invalidate any other -// reasons why the executable file might be covered by the GNU General Public License. - - // RSE backing store pointer retrieval - .section .text - .align 16 - .proc __TBB_get_bsp# - .global __TBB_get_bsp# -__TBB_get_bsp: - mov r8=ar.bsp - br.ret.sptk.many b0 - .endp __TBB_get_bsp# - - .section .text - .align 16 - .proc __TBB_machine_load8_relaxed# - .global __TBB_machine_load8_relaxed# -__TBB_machine_load8_relaxed: - ld8 r8=[r32] - br.ret.sptk.many b0 - .endp __TBB_machine_load8_relaxed# - - .section .text - .align 16 - .proc __TBB_machine_store8_relaxed# - .global __TBB_machine_store8_relaxed# -__TBB_machine_store8_relaxed: - st8 [r32]=r33 - br.ret.sptk.many b0 - .endp __TBB_machine_store8_relaxed# - - .section .text - .align 16 - .proc __TBB_machine_load4_relaxed# - .global __TBB_machine_load4_relaxed# -__TBB_machine_load4_relaxed: - ld4 r8=[r32] - br.ret.sptk.many b0 - .endp __TBB_machine_load4_relaxed# - - .section .text - .align 16 - .proc __TBB_machine_store4_relaxed# - .global __TBB_machine_store4_relaxed# -__TBB_machine_store4_relaxed: - st4 [r32]=r33 - br.ret.sptk.many b0 - .endp __TBB_machine_store4_relaxed# - - .section .text - .align 16 - .proc __TBB_machine_load2_relaxed# - .global __TBB_machine_load2_relaxed# -__TBB_machine_load2_relaxed: - ld2 r8=[r32] - br.ret.sptk.many b0 - .endp __TBB_machine_load2_relaxed# - - .section .text - .align 16 - .proc __TBB_machine_store2_relaxed# - .global __TBB_machine_store2_relaxed# -__TBB_machine_store2_relaxed: - st2 [r32]=r33 - br.ret.sptk.many b0 - .endp __TBB_machine_store2_relaxed# - - .section .text - .align 16 - .proc __TBB_machine_load1_relaxed# - .global __TBB_machine_load1_relaxed# -__TBB_machine_load1_relaxed: - ld1 r8=[r32] - br.ret.sptk.many b0 - .endp __TBB_machine_load1_relaxed# - - .section .text - .align 16 - .proc __TBB_machine_store1_relaxed# - .global __TBB_machine_store1_relaxed# -__TBB_machine_store1_relaxed: - st1 [r32]=r33 - br.ret.sptk.many b0 - .endp __TBB_machine_store1_relaxed# diff --git a/src/tbb/src/tbb/ia64-gas/lock_byte.s b/src/tbb/src/tbb/ia64-gas/lock_byte.s deleted file mode 100644 index 7b78a5a7a..000000000 --- a/src/tbb/src/tbb/ia64-gas/lock_byte.s +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2005-2014 Intel Corporation. All Rights Reserved. -// -// This file is part of Threading Building Blocks. Threading Building Blocks is free software; -// you can redistribute it and/or modify it under the terms of the GNU General Public License -// version 2 as published by the Free Software Foundation. Threading Building Blocks is -// distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. You should have received a copy of -// the GNU General Public License along with Threading Building Blocks; if not, write to the -// Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -// -// As a special exception, you may use this file as part of a free software library without -// restriction. Specifically, if other files instantiate templates or use macros or inline -// functions from this file, or you compile this file and link it with other files to produce -// an executable, this file does not by itself cause the resulting executable to be covered -// by the GNU General Public License. This exception does not however invalidate any other -// reasons why the executable file might be covered by the GNU General Public License. - - // Support for class TinyLock - .section .text - .align 16 - // unsigned int __TBB_machine_trylockbyte( byte& flag ); - // r32 = address of flag - .proc __TBB_machine_trylockbyte# - .global __TBB_machine_trylockbyte# -ADDRESS_OF_FLAG=r32 -RETCODE=r8 -FLAG=r9 -BUSY=r10 -SCRATCH=r11 -__TBB_machine_trylockbyte: - ld1.acq FLAG=[ADDRESS_OF_FLAG] - mov BUSY=1 - mov RETCODE=0 -;; - cmp.ne p6,p0=0,FLAG - mov ar.ccv=r0 -(p6) br.ret.sptk.many b0 -;; - cmpxchg1.acq SCRATCH=[ADDRESS_OF_FLAG],BUSY,ar.ccv // Try to acquire lock -;; - cmp.eq p6,p0=0,SCRATCH -;; -(p6) mov RETCODE=1 - br.ret.sptk.many b0 - .endp __TBB_machine_trylockbyte# diff --git a/src/tbb/src/tbb/ia64-gas/log2.s b/src/tbb/src/tbb/ia64-gas/log2.s deleted file mode 100644 index 742c0a2aa..000000000 --- a/src/tbb/src/tbb/ia64-gas/log2.s +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2005-2014 Intel Corporation. All Rights Reserved. -// -// This file is part of Threading Building Blocks. Threading Building Blocks is free software; -// you can redistribute it and/or modify it under the terms of the GNU General Public License -// version 2 as published by the Free Software Foundation. Threading Building Blocks is -// distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. You should have received a copy of -// the GNU General Public License along with Threading Building Blocks; if not, write to the -// Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -// -// As a special exception, you may use this file as part of a free software library without -// restriction. Specifically, if other files instantiate templates or use macros or inline -// functions from this file, or you compile this file and link it with other files to produce -// an executable, this file does not by itself cause the resulting executable to be covered -// by the GNU General Public License. This exception does not however invalidate any other -// reasons why the executable file might be covered by the GNU General Public License. - - .section .text - .align 16 - // unsigned long __TBB_machine_lg( unsigned long x ); - // r32 = x - .proc __TBB_machine_lg# - .global __TBB_machine_lg# -__TBB_machine_lg: - shr r16=r32,1 // .x -;; - shr r17=r32,2 // ..x - or r32=r32,r16 // xx -;; - shr r16=r32,3 // ...xx - or r32=r32,r17 // xxx -;; - shr r17=r32,5 // .....xxx - or r32=r32,r16 // xxxxx -;; - shr r16=r32,8 // ........xxxxx - or r32=r32,r17 // xxxxxxxx -;; - shr r17=r32,13 - or r32=r32,r16 // 13x -;; - shr r16=r32,21 - or r32=r32,r17 // 21x -;; - shr r17=r32,34 - or r32=r32,r16 // 34x -;; - shr r16=r32,55 - or r32=r32,r17 // 55x -;; - or r32=r32,r16 // 64x -;; - popcnt r8=r32 -;; - add r8=-1,r8 - br.ret.sptk.many b0 - .endp __TBB_machine_lg# diff --git a/src/tbb/src/tbb/ia64-gas/pause.s b/src/tbb/src/tbb/ia64-gas/pause.s deleted file mode 100644 index b48149581..000000000 --- a/src/tbb/src/tbb/ia64-gas/pause.s +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2005-2014 Intel Corporation. All Rights Reserved. -// -// This file is part of Threading Building Blocks. Threading Building Blocks is free software; -// you can redistribute it and/or modify it under the terms of the GNU General Public License -// version 2 as published by the Free Software Foundation. Threading Building Blocks is -// distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. You should have received a copy of -// the GNU General Public License along with Threading Building Blocks; if not, write to the -// Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -// -// As a special exception, you may use this file as part of a free software library without -// restriction. Specifically, if other files instantiate templates or use macros or inline -// functions from this file, or you compile this file and link it with other files to produce -// an executable, this file does not by itself cause the resulting executable to be covered -// by the GNU General Public License. This exception does not however invalidate any other -// reasons why the executable file might be covered by the GNU General Public License. - - .section .text - .align 16 - // void __TBB_machine_pause( long count ); - // r32 = count - .proc __TBB_machine_pause# - .global __TBB_machine_pause# -count = r32 -__TBB_machine_pause: - hint.m 0 - add count=-1,count -;; - cmp.eq p6,p7=0,count -(p7) br.cond.dpnt __TBB_machine_pause -(p6) br.ret.sptk.many b0 - .endp __TBB_machine_pause# diff --git a/src/tbb/src/tbb/ibm_aix51/atomic_support.c b/src/tbb/src/tbb/ibm_aix51/atomic_support.c deleted file mode 100644 index 3cba3f0d0..000000000 --- a/src/tbb/src/tbb/ibm_aix51/atomic_support.c +++ /dev/null @@ -1,55 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include <stdint.h> -#include <sys/atomic_op.h> - -/* This file must be compiled with gcc. The IBM compiler doesn't seem to - support inline assembly statements (October 2007). */ - -#ifdef __GNUC__ - -int32_t __TBB_machine_cas_32 (volatile void* ptr, int32_t value, int32_t comparand) { - __asm__ __volatile__ ("sync\n"); /* memory release operation */ - compare_and_swap ((atomic_p) ptr, &comparand, value); - __asm__ __volatile__ ("isync\n"); /* memory acquire operation */ - return comparand; -} - -int64_t __TBB_machine_cas_64 (volatile void* ptr, int64_t value, int64_t comparand) { - __asm__ __volatile__ ("sync\n"); /* memory release operation */ - compare_and_swaplp ((atomic_l) ptr, &comparand, value); - __asm__ __volatile__ ("isync\n"); /* memory acquire operation */ - return comparand; -} - -void __TBB_machine_flush () { - __asm__ __volatile__ ("sync\n"); -} - -void __TBB_machine_lwsync () { - __asm__ __volatile__ ("lwsync\n"); -} - -void __TBB_machine_isync () { - __asm__ __volatile__ ("isync\n"); -} - -#endif /* __GNUC__ */ diff --git a/src/tbb/src/tbb/index.html b/src/tbb/src/tbb/index.html deleted file mode 100644 index 2280d6ef4..000000000 --- a/src/tbb/src/tbb/index.html +++ /dev/null @@ -1,31 +0,0 @@ -<HTML> -<BODY> - -<H2>Overview</H2> -This directory contains the source code of the TBB core components. - -<H2>Directories</H2> -<DL> -<DT><A HREF="tools_api">tools_api</A> -<DD>Source code of the interface components provided by the Intel® Parallel Studio tools. -<DT><A HREF="intel64-masm">intel64-masm</A> -<DD>Assembly code for the Intel® 64 architecture. -<DT><A HREF="ia32-masm">ia32-masm</A> -<DD>Assembly code for IA32 architecture. -<DT><A HREF="ia64-gas">ia64-gas</A> -<DD>Assembly code for IA-64 architecture. -<DT><A HREF="ibm_aix51">ibm_aix51</A> -<DD>Assembly code for AIX 5.1 port. -</DL> - -<HR> -<A HREF="../index.html">Up to parent directory</A> -<p></p> -Copyright © 2005-2014 Intel Corporation. All Rights Reserved. -<P></P> -Intel is a registered trademark or trademark of Intel Corporation -or its subsidiaries in the United States and other countries. -<p></p> -* Other names and brands may be claimed as the property of others. -</BODY> -</HTML> diff --git a/src/tbb/src/tbb/intel64-masm/atomic_support.asm b/src/tbb/src/tbb/intel64-masm/atomic_support.asm deleted file mode 100644 index 33f2bc9b2..000000000 --- a/src/tbb/src/tbb/intel64-masm/atomic_support.asm +++ /dev/null @@ -1,72 +0,0 @@ -; Copyright 2005-2014 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. Threading Building Blocks is free software; -; you can redistribute it and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. Threading Building Blocks is -; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -; See the GNU General Public License for more details. You should have received a copy of -; the GNU General Public License along with Threading Building Blocks; if not, write to the -; Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software library without -; restriction. Specifically, if other files instantiate templates or use macros or inline -; functions from this file, or you compile this file and link it with other files to produce -; an executable, this file does not by itself cause the resulting executable to be covered -; by the GNU General Public License. This exception does not however invalidate any other -; reasons why the executable file might be covered by the GNU General Public License. - -; DO NOT EDIT - AUTOMATICALLY GENERATED FROM .s FILE -.code - ALIGN 8 - PUBLIC __TBB_machine_fetchadd1 -__TBB_machine_fetchadd1: - mov rax,rdx - lock xadd [rcx],al - ret -.code - ALIGN 8 - PUBLIC __TBB_machine_fetchstore1 -__TBB_machine_fetchstore1: - mov rax,rdx - lock xchg [rcx],al - ret -.code - ALIGN 8 - PUBLIC __TBB_machine_cmpswp1 -__TBB_machine_cmpswp1: - mov rax,r8 - lock cmpxchg [rcx],dl - ret -.code - ALIGN 8 - PUBLIC __TBB_machine_fetchadd2 -__TBB_machine_fetchadd2: - mov rax,rdx - lock xadd [rcx],ax - ret -.code - ALIGN 8 - PUBLIC __TBB_machine_fetchstore2 -__TBB_machine_fetchstore2: - mov rax,rdx - lock xchg [rcx],ax - ret -.code - ALIGN 8 - PUBLIC __TBB_machine_cmpswp2 -__TBB_machine_cmpswp2: - mov rax,r8 - lock cmpxchg [rcx],dx - ret -.code - ALIGN 8 - PUBLIC __TBB_machine_pause -__TBB_machine_pause: -L1: - dw 090f3H; pause - add ecx,-1 - jne L1 - ret -end - diff --git a/src/tbb/src/tbb/intel64-masm/intel64_misc.asm b/src/tbb/src/tbb/intel64-masm/intel64_misc.asm deleted file mode 100644 index ef1f2436b..000000000 --- a/src/tbb/src/tbb/intel64-masm/intel64_misc.asm +++ /dev/null @@ -1,33 +0,0 @@ -; Copyright 2005-2014 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. Threading Building Blocks is free software; -; you can redistribute it and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. Threading Building Blocks is -; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -; See the GNU General Public License for more details. You should have received a copy of -; the GNU General Public License along with Threading Building Blocks; if not, write to the -; Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software library without -; restriction. Specifically, if other files instantiate templates or use macros or inline -; functions from this file, or you compile this file and link it with other files to produce -; an executable, this file does not by itself cause the resulting executable to be covered -; by the GNU General Public License. This exception does not however invalidate any other -; reasons why the executable file might be covered by the GNU General Public License. - -.code - ALIGN 8 - PUBLIC __TBB_get_cpu_ctl_env -__TBB_get_cpu_ctl_env: - stmxcsr [rcx] - fstcw [rcx+4] - ret -.code - ALIGN 8 - PUBLIC __TBB_set_cpu_ctl_env -__TBB_set_cpu_ctl_env: - ldmxcsr [rcx] - fldcw [rcx+4] - ret -end diff --git a/src/tbb/src/tbb/intel64-masm/itsx.asm b/src/tbb/src/tbb/intel64-masm/itsx.asm deleted file mode 100644 index c3bf2f24e..000000000 --- a/src/tbb/src/tbb/intel64-masm/itsx.asm +++ /dev/null @@ -1,76 +0,0 @@ -; Copyright 2005-2014 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. Threading Building Blocks is free software; -; you can redistribute it and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. Threading Building Blocks is -; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -; See the GNU General Public License for more details. You should have received a copy of -; the GNU General Public License along with Threading Building Blocks; if not, write to the -; Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software library without -; restriction. Specifically, if other files instantiate templates or use macros or inline -; functions from this file, or you compile this file and link it with other files to produce -; an executable, this file does not by itself cause the resulting executable to be covered -; by the GNU General Public License. This exception does not however invalidate any other -; reasons why the executable file might be covered by the GNU General Public License. - -.code - ALIGN 8 - PUBLIC __TBB_machine_try_lock_elided -__TBB_machine_try_lock_elided: - xor rax, rax - mov al, 1 - BYTE 0F2H - xchg al, byte ptr [rcx] - xor al, 1 - ret -.code - ALIGN 8 - PUBLIC __TBB_machine_unlock_elided -__TBB_machine_unlock_elided: - BYTE 0F3H - mov byte ptr [rcx], 0 - ret -.code - ALIGN 8 - PUBLIC __TBB_machine_begin_transaction -__TBB_machine_begin_transaction: - mov eax, -1 - BYTE 0C7H - BYTE 0F8H - BYTE 000H - BYTE 000H - BYTE 000H - BYTE 000H - ret -.code - ALIGN 8 - PUBLIC __TBB_machine_end_transaction -__TBB_machine_end_transaction: - BYTE 00FH - BYTE 001H - BYTE 0D5H - ret -.code - ALIGN 8 - PUBLIC __TBB_machine_transaction_conflict_abort -__TBB_machine_transaction_conflict_abort: - BYTE 0C6H - BYTE 0F8H - BYTE 0FFH ; 12.4.5 Abort argument: lock not free when tested - ret -.code - ALIGN 8 - PUBLIC __TBB_machine_is_in_transaction -__TBB_machine_is_in_transaction: - xor eax, eax - BYTE 00FH ; _xtest sets or clears ZF - BYTE 001H - BYTE 0D6H - jz rset - mov al,1 -rset: - ret -end diff --git a/src/tbb/src/tbb/intrusive_list.h b/src/tbb/src/tbb/intrusive_list.h index 78f625e0c..d317f5554 100644 --- a/src/tbb/src/tbb/intrusive_list.h +++ b/src/tbb/src/tbb/intrusive_list.h @@ -1,43 +1,29 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef _TBB_intrusive_list_H #define _TBB_intrusive_list_H -#include "tbb/tbb_stddef.h" +#include "oneapi/tbb/detail/_intrusive_list_node.h" namespace tbb { -namespace internal { - -//! Data structure to be inherited by the types that can form intrusive lists. -/** Intrusive list is formed by means of the member_intrusive_list<T> template class. - Note that type T must derive from intrusive_list_node either publicly or - declare instantiation member_intrusive_list<T> as a friend. - This class implements a limited subset of std::list interface. **/ -struct intrusive_list_node { - intrusive_list_node *my_prev_node, - *my_next_node; -#if TBB_USE_ASSERT - intrusive_list_node () { my_prev_node = my_next_node = this; } -#endif /* TBB_USE_ASSERT */ -}; +namespace detail { +namespace r1 { + +using d1::intrusive_list_node; //! List of element of type T, where T is derived from intrusive_list_node /** The class is not thread safe. **/ @@ -47,75 +33,76 @@ class intrusive_list_base { intrusive_list_node my_head; //! Number of list elements - size_t my_size; + std::size_t my_size; static intrusive_list_node& node ( T& item ) { return List::node(item); } static T& item ( intrusive_list_node* node ) { return List::item(node); } - template<class Iterator> - class iterator_impl { - Iterator& self () { return *static_cast<Iterator*>(this); } - - //! Node the iterator points to at the moment - intrusive_list_node *my_pos; + static const T& item( const intrusive_list_node* node ) { return List::item(node); } - protected: - iterator_impl (intrusive_list_node* pos ) - : my_pos(pos) - {} + template <typename DereferenceType> + class iterator_impl { + static_assert(std::is_same<DereferenceType, T>::value || + std::is_same<DereferenceType, const T>::value, + "Incorrect DereferenceType in iterator_impl"); - T& item () const { - return intrusive_list_base::item(my_pos); - } + using pointer_type = typename std::conditional<std::is_same<DereferenceType, T>::value, + intrusive_list_node*, + const intrusive_list_node*>::type; public: - iterator_impl () : my_pos(NULL) {} + iterator_impl() : my_pos(nullptr) {} - Iterator& operator = ( const Iterator& it ) { - return my_pos = it.my_pos; + iterator_impl( pointer_type pos ) : my_pos(pos) {} + + iterator_impl& operator++() { + my_pos = my_pos->my_next_node; + return *this; } - Iterator& operator = ( const T& val ) { - return my_pos = &node(val); + iterator_impl operator++( int ) { + iterator_impl it(*this); + ++*this; + return it; } - bool operator == ( const Iterator& it ) const { - return my_pos == it.my_pos; + iterator_impl& operator--() { + my_pos = my_pos->my_prev_node; + return *this; } - bool operator != ( const Iterator& it ) const { - return my_pos != it.my_pos; + iterator_impl operator--( int ) { + iterator_impl it(*this); + --*this; + return it; } - Iterator& operator++ () { - my_pos = my_pos->my_next_node; - return self(); + bool operator==( const iterator_impl& rhs ) const { + return my_pos == rhs.my_pos; } - Iterator& operator-- () { - my_pos = my_pos->my_prev_node; - return self(); + bool operator!=( const iterator_impl& rhs ) const { + return my_pos != rhs.my_pos; } - Iterator operator++ ( int ) { - Iterator result = self(); - ++(*this); - return result; + DereferenceType& operator*() const { + return intrusive_list_base::item(my_pos); } - Iterator operator-- ( int ) { - Iterator result = self(); - --(*this); - return result; + DereferenceType* operator->() const { + return &intrusive_list_base::item(my_pos); } - }; // intrusive_list_base::iterator_impl + private: + // Node the iterator points to at the moment + pointer_type my_pos; + }; // class iterator_impl void assert_ok () const { - __TBB_ASSERT( (my_head.my_prev_node == &my_head && !my_size) || + __TBB_ASSERT( (my_head.my_prev_node == &my_head && !my_size) || (my_head.my_next_node != &my_head && my_size >0), "intrusive_list_base corrupted" ); #if TBB_USE_ASSERT >= 2 - size_t i = 0; + std::size_t i = 0; for ( intrusive_list_node *n = my_head.my_next_node; n != &my_head; n = n->my_next_node ) ++i; __TBB_ASSERT( my_size == i, "Wrong size" ); @@ -123,31 +110,8 @@ class intrusive_list_base { } public: - class iterator : public iterator_impl<iterator> { - template <class U, class V> friend class intrusive_list_base; - public: - iterator (intrusive_list_node* pos ) - : iterator_impl<iterator>(pos ) - {} - iterator () {} - - T* operator-> () const { return &this->item(); } - - T& operator* () const { return this->item(); } - }; // class iterator - - class const_iterator : public iterator_impl<const_iterator> { - template <class U, class V> friend class intrusive_list_base; - public: - const_iterator (const intrusive_list_node* pos ) - : iterator_impl<const_iterator>(const_cast<intrusive_list_node*>(pos) ) - {} - const_iterator () {} - - const T* operator-> () const { return &this->item(); } - - const T& operator* () const { return this->item(); } - }; // class iterator + using iterator = iterator_impl<T>; + using const_iterator = iterator_impl<const T>; intrusive_list_base () : my_size(0) { my_head.my_prev_node = &my_head; @@ -156,7 +120,7 @@ class intrusive_list_base { bool empty () const { return my_head.my_next_node == &my_head; } - size_t size () const { return my_size; } + std::size_t size () const { return my_size; } iterator begin () { return iterator(my_head.my_next_node); } @@ -167,9 +131,9 @@ class intrusive_list_base { const_iterator end () const { return const_iterator(&my_head); } void push_front ( T& val ) { - __TBB_ASSERT( node(val).my_prev_node == &node(val) && node(val).my_next_node == &node(val), + __TBB_ASSERT( node(val).my_prev_node == &node(val) && node(val).my_next_node == &node(val), "Object with intrusive list node can be part of only one intrusive list simultaneously" ); - // An object can be part of only one intrusive list at the given moment via the given node member + // An object can be part of only one intrusive list at the given moment via the given node member node(val).my_prev_node = &my_head; node(val).my_next_node = my_head.my_next_node; my_head.my_next_node->my_prev_node = &node(val); @@ -199,13 +163,28 @@ class intrusive_list_base { }; // intrusive_list_base +#if __TBB_TODO +// With standard compliant compilers memptr_intrusive_list could be named simply intrusive_list, +// and inheritance based intrusive_list version would become its partial specialization. +// Here are the corresponding declarations: + +struct dummy_intrusive_list_item { intrusive_list_node my_node; }; + +template <class T, class U = dummy_intrusive_list_item, intrusive_list_node U::*NodePtr = &dummy_intrusive_list_item::my_node> +class intrusive_list : public intrusive_list_base<intrusive_list<T, U, NodePtr>, T>; + +template <class T> +class intrusive_list<T, dummy_intrusive_list_item, &dummy_intrusive_list_item::my_node> + : public intrusive_list_base<intrusive_list<T>, T>; + +#endif /* __TBB_TODO */ //! Double linked list of items of type T containing a member of type intrusive_list_node. -/** NodePtr is a member pointer to the node data field. Class U is either T or +/** NodePtr is a member pointer to the node data field. Class U is either T or a base class of T containing the node member. Default values exist for the sake of a partial specialization working with inheritance case. - The list does not have ownership of its items. Its purpose is to avoid dynamic + The list does not have ownership of its items. Its purpose is to avoid dynamic memory allocation when forming lists of existing objects. The class is not thread safe. **/ @@ -217,16 +196,21 @@ class memptr_intrusive_list : public intrusive_list_base<memptr_intrusive_list<T static intrusive_list_node& node ( T& val ) { return val.*NodePtr; } static T& item ( intrusive_list_node* node ) { - // Cannot use __TBB_offsetof (and consequently __TBB_get_object_ref) macro + // Cannot use __TBB_offsetof (and consequently __TBB_get_object_ref) macro // with *NodePtr argument because gcc refuses to interpret pasted "->" and "*" - // as member pointer dereferencing operator, and explicit usage of ## in + // as member pointer dereferencing operator, and explicit usage of ## in // __TBB_offsetof implementation breaks operations with normal member names. return *reinterpret_cast<T*>((char*)node - ((ptrdiff_t)&(reinterpret_cast<T*>(0x1000)->*NodePtr) - 0x1000)); } + + static const T& item( const intrusive_list_node* node ) { + return item(const_cast<intrusive_list_node*>(node)); + } + }; // intrusive_list<T, U, NodePtr> //! Double linked list of items of type T that is derived from intrusive_list_node class. -/** The list does not have ownership of its items. Its purpose is to avoid dynamic +/** The list does not have ownership of its items. Its purpose is to avoid dynamic memory allocation when forming lists of existing objects. The class is not thread safe. **/ @@ -238,9 +222,12 @@ class intrusive_list : public intrusive_list_base<intrusive_list<T>, T> static intrusive_list_node& node ( T& val ) { return val; } static T& item ( intrusive_list_node* node ) { return *static_cast<T*>(node); } + + static const T& item( const intrusive_list_node* node ) { return *static_cast<const T*>(node); } }; // intrusive_list<T> -} // namespace internal +} // namespace r1 +} // namespace detail } // namespace tbb #endif /* _TBB_intrusive_list_H */ diff --git a/src/tbb/src/tbb/itt_notify.cpp b/src/tbb/src/tbb/itt_notify.cpp index 26ce2081e..eda5e6ad5 100644 --- a/src/tbb/src/tbb/itt_notify.cpp +++ b/src/tbb/src/tbb/itt_notify.cpp @@ -1,24 +1,20 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2022 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ -#if DO_ITT_NOTIFY +#if __TBB_USE_ITT_NOTIFY #if _WIN32||_WIN64 #ifndef UNICODE @@ -47,46 +43,27 @@ extern "C" void MallocInitializeITT(); #include "tools_api/ittnotify_static.c" namespace tbb { -namespace internal { +namespace detail { +namespace r1 { + +/** This extra proxy method is necessary since __itt_init_lib is declared as static **/ int __TBB_load_ittnotify() { - return __itt_init_ittlib(NULL, // groups for: +#if !(_WIN32||_WIN64) + // tool_api crashes without dlopen, check that it's present. Common case + // for lack of dlopen is static binaries, i.e. ones build with -static. + if (dlopen == nullptr) + return 0; +#endif + return __itt_init_ittlib(nullptr, // groups for: (__itt_group_id)(__itt_group_sync // prepare/cancel/acquired/releasing | __itt_group_thread // name threads | __itt_group_stitch // stack stitching -#if __TBB_CPF_BUILD | __itt_group_structure -#endif )); } -}} // namespaces - -#endif /* DO_ITT_NOTIFY */ - -#define __TBB_NO_IMPLICIT_LINKAGE 1 -#include "itt_notify.h" - -namespace tbb { - -#if DO_ITT_NOTIFY - const tchar - *SyncType_GlobalLock = _T("TbbGlobalLock"), - *SyncType_Scheduler = _T("%Constant") - ; - const tchar - *SyncObj_SchedulerInitialization = _T("TbbSchedulerInitialization"), - *SyncObj_SchedulersList = _T("TbbSchedulersList"), - *SyncObj_WorkerLifeCycleMgmt = _T("TBB Scheduler"), - *SyncObj_TaskStealingLoop = _T("TBB Scheduler"), - *SyncObj_WorkerTaskPool = _T("TBB Scheduler"), - *SyncObj_MasterTaskPool = _T("TBB Scheduler"), - *SyncObj_TaskPoolSpinning = _T("TBB Scheduler"), - *SyncObj_Mailbox = _T("TBB Scheduler"), - *SyncObj_TaskReturnList = _T("TBB Scheduler"), - *SyncObj_TaskStream = _T("TBB Scheduler"), - *SyncObj_ContextsList = _T("TBB Scheduler") - ; -#endif /* DO_ITT_NOTIFY */ - +} //namespace r1 +} //namespace detail } // namespace tbb +#endif /* __TBB_USE_ITT_NOTIFY */ diff --git a/src/tbb/src/tbb/itt_notify.h b/src/tbb/src/tbb/itt_notify.h index 37fe5c421..48ddc5cae 100644 --- a/src/tbb/src/tbb/itt_notify.h +++ b/src/tbb/src/tbb/itt_notify.h @@ -1,29 +1,25 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef _TBB_ITT_NOTIFY #define _TBB_ITT_NOTIFY -#include "tbb/tbb_stddef.h" +#include "oneapi/tbb/detail/_config.h" -#if DO_ITT_NOTIFY +#if __TBB_USE_ITT_NOTIFY #if _WIN32||_WIN64 #ifndef UNICODE @@ -38,91 +34,84 @@ #include "tools_api/ittnotify.h" #include "tools_api/legacy/ittnotify.h" extern "C" void __itt_fini_ittlib(void); +extern "C" void __itt_release_resources(void); #if _WIN32||_WIN64 #undef _T - #undef __itt_event_create - #define __itt_event_create __itt_event_createA #endif /* WIN */ - -#endif /* DO_ITT_NOTIFY */ +#endif /* __TBB_USE_ITT_NOTIFY */ #if !ITT_CALLER_NULL #define ITT_CALLER_NULL ((__itt_caller)0) #endif namespace tbb { +namespace detail { +namespace r1 { + //! Unicode support -#if (_WIN32||_WIN64) && !__MINGW32__ +#if (_WIN32||_WIN64) //! Unicode character type. Always wchar_t on Windows. /** We do not use typedefs from Windows TCHAR family to keep consistence of TBB coding style. **/ - typedef wchar_t tchar; - //! Standard Windows macro to markup the string literals. + using tchar = wchar_t; + //! Standard Windows macro to markup the string literals. #define _T(string_literal) L ## string_literal #else /* !WIN */ - typedef char tchar; + using tchar = char; //! Standard Windows style macro to markup the string literals. #define _T(string_literal) string_literal #endif /* !WIN */ -} // namespace tbb - -#if DO_ITT_NOTIFY -namespace tbb { - //! Display names of internal synchronization types - extern const tchar - *SyncType_GlobalLock, - *SyncType_Scheduler; - //! Display names of internal synchronization components/scenarios - extern const tchar - *SyncObj_SchedulerInitialization, - *SyncObj_SchedulersList, - *SyncObj_WorkerLifeCycleMgmt, - *SyncObj_TaskStealingLoop, - *SyncObj_WorkerTaskPool, - *SyncObj_MasterTaskPool, - *SyncObj_TaskPoolSpinning, - *SyncObj_Mailbox, - *SyncObj_TaskReturnList, - *SyncObj_TaskStream, - *SyncObj_ContextsList - ; - - namespace internal { - void __TBB_EXPORTED_FUNC itt_set_sync_name_v3( void* obj, const tchar* name); - - } // namespace internal -} // namespace tbb +//! Display names of internal synchronization types +extern const tchar + *SyncType_Scheduler; +//! Display names of internal synchronization components/scenarios +extern const tchar + *SyncObj_ContextsList + ; +#if __TBB_USE_ITT_NOTIFY // const_cast<void*>() is necessary to cast off volatility -#define ITT_NOTIFY(name,obj) __itt_notify_##name(const_cast<void*>(static_cast<volatile void*>(obj))) +#define ITT_NOTIFY(name,obj) __itt_##name(const_cast<void*>(static_cast<volatile void*>(obj))) #define ITT_THREAD_SET_NAME(name) __itt_thread_set_name(name) #define ITT_FINI_ITTLIB() __itt_fini_ittlib() +#define ITT_RELEASE_RESOURCES() __itt_release_resources() #define ITT_SYNC_CREATE(obj, type, name) __itt_sync_create((void*)(obj), type, name, 2) -#define ITT_SYNC_RENAME(obj, name) __itt_sync_rename(obj, name) #define ITT_STACK_CREATE(obj) obj = __itt_stack_caller_create() -#if __TBB_TASK_GROUP_CONTEXT -#define ITT_STACK(precond, name, obj) (precond) ? __itt_stack_##name(obj) : ((void)0); -#else -#define ITT_STACK(precond, name, obj) ((void)0) -#endif /* !__TBB_TASK_GROUP_CONTEXT */ +#define ITT_STACK_DESTROY(obj) (obj!=nullptr) ? __itt_stack_caller_destroy(static_cast<__itt_caller>(obj)) : ((void)0) +#define ITT_CALLEE_ENTER(cond, t, obj) if(cond) {\ + __itt_stack_callee_enter(static_cast<__itt_caller>(obj));\ + __itt_sync_acquired(t);\ + } +#define ITT_CALLEE_LEAVE(cond, obj) (cond) ? __itt_stack_callee_leave(static_cast<__itt_caller>(obj)) : ((void)0) + +#define ITT_TASK_GROUP(obj,name,parent) r1::itt_make_task_group(d1::ITT_DOMAIN_MAIN,(void*)(obj),ALGORITHM,(void*)(parent),(parent!=nullptr) ? ALGORITHM : FLOW_NULL,name) +#define ITT_TASK_BEGIN(obj,name,id) r1::itt_task_begin(d1::ITT_DOMAIN_MAIN,(void*)(id),ALGORITHM,(void*)(obj),ALGORITHM,name) +#define ITT_TASK_END r1::itt_task_end(d1::ITT_DOMAIN_MAIN) + -#else /* !DO_ITT_NOTIFY */ +#else /* !__TBB_USE_ITT_NOTIFY */ #define ITT_NOTIFY(name,obj) ((void)0) #define ITT_THREAD_SET_NAME(name) ((void)0) #define ITT_FINI_ITTLIB() ((void)0) +#define ITT_RELEASE_RESOURCES() ((void)0) #define ITT_SYNC_CREATE(obj, type, name) ((void)0) -#define ITT_SYNC_RENAME(obj, name) ((void)0) #define ITT_STACK_CREATE(obj) ((void)0) -#define ITT_STACK(precond, name, obj) ((void)0) +#define ITT_STACK_DESTROY(obj) ((void)0) +#define ITT_CALLEE_ENTER(cond, t, obj) ((void)0) +#define ITT_CALLEE_LEAVE(cond, obj) ((void)0) +#define ITT_TASK_GROUP(type,name,parent) ((void)0) +#define ITT_TASK_BEGIN(type,name,id) ((void)0) +#define ITT_TASK_END ((void)0) -#endif /* !DO_ITT_NOTIFY */ +#endif /* !__TBB_USE_ITT_NOTIFY */ -namespace tbb { -namespace internal { int __TBB_load_ittnotify(); -}} + +} // namespace r1 +} // namespace detail +} // namespace tbb #endif /* _TBB_ITT_NOTIFY */ diff --git a/src/tbb/src/tbb/lin32-tbb-export.def b/src/tbb/src/tbb/lin32-tbb-export.def deleted file mode 100644 index e6a6856d7..000000000 --- a/src/tbb/src/tbb/lin32-tbb-export.def +++ /dev/null @@ -1,49 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -{ -global: - -#define __TBB_SYMBOL( sym ) sym; -#include "lin32-tbb-export.lst" - -local: - -/* TBB symbols */ -*3tbb*; -*__TBB*; - -/* ITT symbols */ -__itt_*; - -/* Intel Compiler (libirc) symbols */ -__intel_*; -_intel_*; -get_memcpy_largest_cachelinesize; -get_memcpy_largest_cache_size; -get_mem_ops_method; -init_mem_ops_method; -irc__get_msg; -irc__print; -override_mem_ops_method; -set_memcpy_largest_cachelinesize; -set_memcpy_largest_cache_size; - -}; diff --git a/src/tbb/src/tbb/lin32-tbb-export.lst b/src/tbb/src/tbb/lin32-tbb-export.lst deleted file mode 100644 index e959c0d4e..000000000 --- a/src/tbb/src/tbb/lin32-tbb-export.lst +++ /dev/null @@ -1,391 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" - -/* cache_aligned_allocator.cpp */ -__TBB_SYMBOL( _ZN3tbb8internal12NFS_AllocateEjjPv ) -__TBB_SYMBOL( _ZN3tbb8internal15NFS_GetLineSizeEv ) -__TBB_SYMBOL( _ZN3tbb8internal8NFS_FreeEPv ) -__TBB_SYMBOL( _ZN3tbb8internal23allocate_via_handler_v3Ej ) -__TBB_SYMBOL( _ZN3tbb8internal25deallocate_via_handler_v3EPv ) -__TBB_SYMBOL( _ZN3tbb8internal17is_malloc_used_v3Ev ) - -/* task.cpp v3 */ -__TBB_SYMBOL( _ZN3tbb4task13note_affinityEt ) -__TBB_SYMBOL( _ZN3tbb4task22internal_set_ref_countEi ) -__TBB_SYMBOL( _ZN3tbb4task28internal_decrement_ref_countEv ) -__TBB_SYMBOL( _ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE ) -__TBB_SYMBOL( _ZN3tbb4task4selfEv ) -__TBB_SYMBOL( _ZN3tbb10interface58internal9task_base7destroyERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb4task26is_owned_by_current_threadEv ) -__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy8allocateEj ) -__TBB_SYMBOL( _ZN3tbb8internal28affinity_partitioner_base_v36resizeEj ) -__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy8allocateEj ) -__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy8allocateEj ) -__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEj ) -__TBB_SYMBOL( _ZTIN3tbb4taskE ) -__TBB_SYMBOL( _ZTSN3tbb4taskE ) -__TBB_SYMBOL( _ZTVN3tbb4taskE ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init19default_num_threadsEv ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEij ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEi ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init9terminateEv ) -#if __TBB_SCHEDULER_OBSERVER -__TBB_SYMBOL( _ZN3tbb8internal26task_scheduler_observer_v37observeEb ) -#endif /* __TBB_SCHEDULER_OBSERVER */ -__TBB_SYMBOL( _ZN3tbb10empty_task7executeEv ) -__TBB_SYMBOL( _ZN3tbb10empty_taskD0Ev ) -__TBB_SYMBOL( _ZN3tbb10empty_taskD1Ev ) -__TBB_SYMBOL( _ZTIN3tbb10empty_taskE ) -__TBB_SYMBOL( _ZTSN3tbb10empty_taskE ) -__TBB_SYMBOL( _ZTVN3tbb10empty_taskE ) - -#if __TBB_TASK_ARENA -/* arena.cpp */ -__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base19internal_initializeEv ) -__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base18internal_terminateEv ) -__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_enqueueERNS_4taskEi ) -__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_executeERNS1_13delegate_baseE ) -__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base13internal_waitEv ) -__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base21internal_current_slotEv ) -#endif /* __TBB_TASK_ARENA */ - -#if !TBB_NO_LEGACY -/* task_v2.cpp */ -__TBB_SYMBOL( _ZN3tbb4task7destroyERS0_ ) -#endif /* !TBB_NO_LEGACY */ - -/* Exception handling in task scheduler */ -#if __TBB_TASK_GROUP_CONTEXT -__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEj ) -__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZN3tbb4task12change_groupERNS_18task_group_contextE ) -__TBB_SYMBOL( _ZNK3tbb18task_group_context28is_group_execution_cancelledEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context22cancel_group_executionEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context26register_pending_exceptionEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context5resetEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context19capture_fp_settingsEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context4initEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_contextD1Ev ) -__TBB_SYMBOL( _ZN3tbb18task_group_contextD2Ev ) -#if __TBB_TASK_PRIORITY -__TBB_SYMBOL( _ZN3tbb18task_group_context12set_priorityENS_10priority_tE ) -__TBB_SYMBOL( _ZNK3tbb18task_group_context8priorityEv ) -#endif /* __TBB_TASK_PRIORITY */ -__TBB_SYMBOL( _ZNK3tbb18captured_exception4nameEv ) -__TBB_SYMBOL( _ZNK3tbb18captured_exception4whatEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception10throw_selfEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception3setEPKcS2_ ) -__TBB_SYMBOL( _ZN3tbb18captured_exception4moveEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception5clearEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception7destroyEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception8allocateEPKcS2_ ) -__TBB_SYMBOL( _ZN3tbb18captured_exceptionD0Ev ) -__TBB_SYMBOL( _ZN3tbb18captured_exceptionD1Ev ) -__TBB_SYMBOL( _ZN3tbb18captured_exceptionD2Ev ) -__TBB_SYMBOL( _ZTIN3tbb18captured_exceptionE ) -__TBB_SYMBOL( _ZTSN3tbb18captured_exceptionE ) -__TBB_SYMBOL( _ZTVN3tbb18captured_exceptionE ) -__TBB_SYMBOL( _ZN3tbb13tbb_exceptionD2Ev ) -__TBB_SYMBOL( _ZTIN3tbb13tbb_exceptionE ) -__TBB_SYMBOL( _ZTSN3tbb13tbb_exceptionE ) -__TBB_SYMBOL( _ZTVN3tbb13tbb_exceptionE ) -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -/* Symbols for exceptions thrown from TBB */ -__TBB_SYMBOL( _ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev ) -__TBB_SYMBOL( _ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE ) -__TBB_SYMBOL( _ZN3tbb14bad_last_allocD0Ev ) -__TBB_SYMBOL( _ZN3tbb14bad_last_allocD1Ev ) -__TBB_SYMBOL( _ZNK3tbb14bad_last_alloc4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb14bad_last_allocE ) -__TBB_SYMBOL( _ZTSN3tbb14bad_last_allocE ) -__TBB_SYMBOL( _ZTVN3tbb14bad_last_allocE ) -__TBB_SYMBOL( _ZN3tbb12missing_waitD0Ev ) -__TBB_SYMBOL( _ZN3tbb12missing_waitD1Ev ) -__TBB_SYMBOL( _ZNK3tbb12missing_wait4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb12missing_waitE ) -__TBB_SYMBOL( _ZTSN3tbb12missing_waitE ) -__TBB_SYMBOL( _ZTVN3tbb12missing_waitE ) -__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD0Ev ) -__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD1Ev ) -__TBB_SYMBOL( _ZNK3tbb27invalid_multiple_scheduling4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb27invalid_multiple_schedulingE ) -__TBB_SYMBOL( _ZTSN3tbb27invalid_multiple_schedulingE ) -__TBB_SYMBOL( _ZTVN3tbb27invalid_multiple_schedulingE ) -__TBB_SYMBOL( _ZN3tbb13improper_lockD0Ev ) -__TBB_SYMBOL( _ZN3tbb13improper_lockD1Ev ) -__TBB_SYMBOL( _ZNK3tbb13improper_lock4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb13improper_lockE ) -__TBB_SYMBOL( _ZTSN3tbb13improper_lockE ) -__TBB_SYMBOL( _ZTVN3tbb13improper_lockE ) -__TBB_SYMBOL( _ZN3tbb10user_abortD0Ev ) -__TBB_SYMBOL( _ZN3tbb10user_abortD1Ev ) -__TBB_SYMBOL( _ZNK3tbb10user_abort4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb10user_abortE ) -__TBB_SYMBOL( _ZTSN3tbb10user_abortE ) -__TBB_SYMBOL( _ZTVN3tbb10user_abortE ) - -/* tbb_misc.cpp */ -__TBB_SYMBOL( _ZN3tbb17assertion_failureEPKciS1_S1_ ) -__TBB_SYMBOL( _ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E ) -__TBB_SYMBOL( _ZN3tbb8internal36get_initial_auto_partitioner_divisorEv ) -__TBB_SYMBOL( _ZN3tbb8internal13handle_perrorEiPKc ) -__TBB_SYMBOL( _ZN3tbb8internal15runtime_warningEPKcz ) -#if __TBB_x86_32 -__TBB_SYMBOL( __TBB_machine_store8_slow_perf_warning ) -__TBB_SYMBOL( __TBB_machine_store8_slow ) -#endif -__TBB_SYMBOL( TBB_runtime_interface_version ) - -/* tbb_main.cpp */ -__TBB_SYMBOL( _ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv ) -__TBB_SYMBOL( _ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal18call_itt_notify_v5EiPv ) -__TBB_SYMBOL( _ZN3tbb8internal20itt_set_sync_name_v3EPvPKc ) -__TBB_SYMBOL( _ZN3tbb8internal19itt_load_pointer_v3EPKv ) -#if __TBB_ITT_STRUCTURE_API -__TBB_SYMBOL( _ZN3tbb8internal22itt_make_task_group_v7ENS0_15itt_domain_enumEPvyS2_yNS0_12string_indexE ) -__TBB_SYMBOL( _ZN3tbb8internal23itt_metadata_str_add_v7ENS0_15itt_domain_enumEPvyNS0_12string_indexEPKc ) -__TBB_SYMBOL( _ZN3tbb8internal19itt_relation_add_v7ENS0_15itt_domain_enumEPvyNS0_12itt_relationES2_y ) -__TBB_SYMBOL( _ZN3tbb8internal17itt_task_begin_v7ENS0_15itt_domain_enumEPvyS2_yNS0_12string_indexE ) -__TBB_SYMBOL( _ZN3tbb8internal15itt_task_end_v7ENS0_15itt_domain_enumE ) -#endif - -/* pipeline.cpp */ -__TBB_SYMBOL( _ZTIN3tbb6filterE ) -__TBB_SYMBOL( _ZTSN3tbb6filterE ) -__TBB_SYMBOL( _ZTVN3tbb6filterE ) -__TBB_SYMBOL( _ZN3tbb6filterD2Ev ) -__TBB_SYMBOL( _ZN3tbb8pipeline10add_filterERNS_6filterE ) -__TBB_SYMBOL( _ZN3tbb8pipeline12inject_tokenERNS_4taskE ) -__TBB_SYMBOL( _ZN3tbb8pipeline13remove_filterERNS_6filterE ) -__TBB_SYMBOL( _ZN3tbb8pipeline3runEj ) -#if __TBB_TASK_GROUP_CONTEXT -__TBB_SYMBOL( _ZN3tbb8pipeline3runEjRNS_18task_group_contextE ) -#endif -__TBB_SYMBOL( _ZN3tbb8pipeline5clearEv ) -__TBB_SYMBOL( _ZN3tbb19thread_bound_filter12process_itemEv ) -__TBB_SYMBOL( _ZN3tbb19thread_bound_filter16try_process_itemEv ) -__TBB_SYMBOL( _ZTIN3tbb8pipelineE ) -__TBB_SYMBOL( _ZTSN3tbb8pipelineE ) -__TBB_SYMBOL( _ZTVN3tbb8pipelineE ) -__TBB_SYMBOL( _ZN3tbb8pipelineC1Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineC2Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineD0Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineD1Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineD2Ev ) -__TBB_SYMBOL( _ZN3tbb6filter16set_end_of_inputEv ) - -/* queuing_rw_mutex.cpp */ -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex18internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b ) - -/* reader_writer_lock.cpp */ -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_ ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock13try_lock_readEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_ ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock18internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock4lockEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock6unlockEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock8try_lockEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock9lock_readEv ) - -#if !TBB_NO_LEGACY -/* spin_rw_mutex.cpp v2 */ -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_ ) -#endif - -/* spin_rw_mutex v3 */ -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v316internal_upgradeEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_downgradeEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_readerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_writerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv ) - -// x86_rtm_rw_mutex.cpp -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex16internal_releaseERNS2_11scoped_lockE ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex18internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex23internal_acquire_readerERNS2_11scoped_lockEb ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex23internal_acquire_writerERNS2_11scoped_lockEb ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex16internal_upgradeERNS2_11scoped_lockE ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex18internal_downgradeERNS2_11scoped_lockE ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex27internal_try_acquire_writerERNS2_11scoped_lockE ) - -/* spin_mutex.cpp */ -__TBB_SYMBOL( _ZN3tbb10spin_mutex18internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv ) -__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_ ) - -/* mutex.cpp */ -__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_releaseEv ) -__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb5mutex16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb5mutex18internal_constructEv ) - -/* recursive_mutex.cpp */ -__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex18internal_constructEv ) - -/* QueuingMutex.cpp */ -__TBB_SYMBOL( _ZN3tbb13queuing_mutex18internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7releaseEv ) -__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_ ) - -/* critical_section.cpp */ -__TBB_SYMBOL( _ZN3tbb8internal19critical_section_v418internal_constructEv ) - -#if !TBB_NO_LEGACY -/* concurrent_hash_map */ -__TBB_SYMBOL( _ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv ) - -/* concurrent_queue.cpp v2 */ -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base12internal_popEPv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base21internal_set_capacityEij ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseC2Ej ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseD2Ev ) -__TBB_SYMBOL( _ZTIN3tbb8internal21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZTSN3tbb8internal21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZTVN3tbb8internal21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev ) -__TBB_SYMBOL( _ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv ) -#endif - -/* concurrent_queue v3 */ -/* constructors */ -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3C2Ej ) -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E ) -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Ej ) -/* destructors */ -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3D2Ev ) -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev ) -/* typeinfo */ -__TBB_SYMBOL( _ZTIN3tbb8internal24concurrent_queue_base_v3E ) -__TBB_SYMBOL( _ZTSN3tbb8internal24concurrent_queue_base_v3E ) -/* vtable */ -__TBB_SYMBOL( _ZTVN3tbb8internal24concurrent_queue_base_v3E ) -/* methods */ -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv ) -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v818internal_push_moveEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v830internal_push_move_if_not_fullEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v314internal_abortEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityEij ) -__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv ) -__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv ) -__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v812move_contentERS1_ ) - -#if !TBB_NO_LEGACY -/* concurrent_vector.cpp v2 */ -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_jPFvPvPKvjE ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvjEb ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_jPFvPvjEPFvS4_PKvjESA_ ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_grow_byEjjPFvPvjE ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_reserveEjjj ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base18internal_push_backEjRj ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEjjPFvPvjE ) -__TBB_SYMBOL( _ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv ) -#endif - -/* concurrent_vector v3 */ -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_jPFvPvPKvjE ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvjE ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_jPFvPvjEPFvS4_PKvjESA_ ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEjjPFvPvPKvjES4_ ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEjjj ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEjRj ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEjjPFvPvPKvjES4_ ) -__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_compactEjPvPFvS2_jEPFvS2_PKvjE ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_ ) -__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEj ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v3D2Ev ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEjjjPKvPFvPvjEPFvS4_S3_jE ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEjjPFvPvPKvjES4_ ) - -/* tbb_thread */ -#if __MINGW32__ -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v314internal_startEPFjPvES2_ ) -#else -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v314internal_startEPFPvS2_ES2_ ) -#endif -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv ) -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v34joinEv ) -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v36detachEv ) -__TBB_SYMBOL( _ZN3tbb8internal15free_closure_v3EPv ) -__TBB_SYMBOL( _ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE ) -__TBB_SYMBOL( _ZN3tbb8internal15thread_yield_v3Ev ) -__TBB_SYMBOL( _ZN3tbb8internal16thread_get_id_v3Ev ) -__TBB_SYMBOL( _ZN3tbb8internal19allocate_closure_v3Ej ) -__TBB_SYMBOL( _ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_ ) - -#if __MINGW32__ -/* condition_variable */ -__TBB_SYMBOL( _ZN3tbb10interface58internal32internal_condition_variable_waitERNS1_14condvar_impl_tEPNS_5mutexEPKNS_10tick_count10interval_tE ) -__TBB_SYMBOL( _ZN3tbb10interface58internal35internal_destroy_condition_variableERNS1_14condvar_impl_tE ) -__TBB_SYMBOL( _ZN3tbb10interface58internal38internal_condition_variable_notify_allERNS1_14condvar_impl_tE ) -__TBB_SYMBOL( _ZN3tbb10interface58internal38internal_condition_variable_notify_oneERNS1_14condvar_impl_tE ) -__TBB_SYMBOL( _ZN3tbb10interface58internal38internal_initialize_condition_variableERNS1_14condvar_impl_tE ) -#endif - -#undef __TBB_SYMBOL diff --git a/src/tbb/src/tbb/lin64-tbb-export.def b/src/tbb/src/tbb/lin64-tbb-export.def deleted file mode 100644 index edfeca428..000000000 --- a/src/tbb/src/tbb/lin64-tbb-export.def +++ /dev/null @@ -1,46 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -{ -global: - -#define __TBB_SYMBOL( sym ) sym; -#include "lin64-tbb-export.lst" - -local: - -/* TBB symbols */ -*3tbb*; -*__TBB*; - -/* ITT symbols */ -__itt_*; - -/* Intel Compiler (libirc) symbols */ -__intel_*; -_intel_*; -get_msg_buf; -get_text_buf; -message_catalog; -print_buf; -irc__get_msg; -irc__print; - -}; diff --git a/src/tbb/src/tbb/lin64-tbb-export.lst b/src/tbb/src/tbb/lin64-tbb-export.lst deleted file mode 100644 index c81fadaf4..000000000 --- a/src/tbb/src/tbb/lin64-tbb-export.lst +++ /dev/null @@ -1,373 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" - -/* cache_aligned_allocator.cpp */ -__TBB_SYMBOL( _ZN3tbb8internal12NFS_AllocateEmmPv ) -__TBB_SYMBOL( _ZN3tbb8internal15NFS_GetLineSizeEv ) -__TBB_SYMBOL( _ZN3tbb8internal8NFS_FreeEPv ) -__TBB_SYMBOL( _ZN3tbb8internal23allocate_via_handler_v3Em ) -__TBB_SYMBOL( _ZN3tbb8internal25deallocate_via_handler_v3EPv ) -__TBB_SYMBOL( _ZN3tbb8internal17is_malloc_used_v3Ev ) - -/* task.cpp v3 */ -__TBB_SYMBOL( _ZN3tbb4task13note_affinityEt ) -__TBB_SYMBOL( _ZN3tbb4task22internal_set_ref_countEi ) -__TBB_SYMBOL( _ZN3tbb4task28internal_decrement_ref_countEv ) -__TBB_SYMBOL( _ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE ) -__TBB_SYMBOL( _ZN3tbb4task4selfEv ) -__TBB_SYMBOL( _ZN3tbb10interface58internal9task_base7destroyERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb4task26is_owned_by_current_threadEv ) -__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy8allocateEm ) -__TBB_SYMBOL( _ZN3tbb8internal28affinity_partitioner_base_v36resizeEj ) -__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy8allocateEm ) -__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy8allocateEm ) -__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEm ) -__TBB_SYMBOL( _ZTIN3tbb4taskE ) -__TBB_SYMBOL( _ZTSN3tbb4taskE ) -__TBB_SYMBOL( _ZTVN3tbb4taskE ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init19default_num_threadsEv ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEim ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEi ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init9terminateEv ) -#if __TBB_SCHEDULER_OBSERVER -__TBB_SYMBOL( _ZN3tbb8internal26task_scheduler_observer_v37observeEb ) -#endif /* __TBB_SCHEDULER_OBSERVER */ -__TBB_SYMBOL( _ZN3tbb10empty_task7executeEv ) -__TBB_SYMBOL( _ZN3tbb10empty_taskD0Ev ) -__TBB_SYMBOL( _ZN3tbb10empty_taskD1Ev ) -__TBB_SYMBOL( _ZTIN3tbb10empty_taskE ) -__TBB_SYMBOL( _ZTSN3tbb10empty_taskE ) -__TBB_SYMBOL( _ZTVN3tbb10empty_taskE ) - -#if __TBB_TASK_ARENA -/* arena.cpp */ -__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base19internal_initializeEv ) -__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base18internal_terminateEv ) -__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_enqueueERNS_4taskEl ) -__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_executeERNS1_13delegate_baseE ) -__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base13internal_waitEv ) -__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base21internal_current_slotEv ) -#endif /* __TBB_TASK_ARENA */ - -#if !TBB_NO_LEGACY -/* task_v2.cpp */ -__TBB_SYMBOL( _ZN3tbb4task7destroyERS0_ ) -#endif /* !TBB_NO_LEGACY */ - -/* Exception handling in task scheduler */ -#if __TBB_TASK_GROUP_CONTEXT -__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEm ) -__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZN3tbb4task12change_groupERNS_18task_group_contextE ) -__TBB_SYMBOL( _ZNK3tbb18task_group_context28is_group_execution_cancelledEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context22cancel_group_executionEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context26register_pending_exceptionEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context5resetEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context19capture_fp_settingsEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context4initEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_contextD1Ev ) -__TBB_SYMBOL( _ZN3tbb18task_group_contextD2Ev ) -#if __TBB_TASK_PRIORITY -__TBB_SYMBOL( _ZN3tbb18task_group_context12set_priorityENS_10priority_tE ) -__TBB_SYMBOL( _ZNK3tbb18task_group_context8priorityEv ) -#endif /* __TBB_TASK_PRIORITY */ -__TBB_SYMBOL( _ZNK3tbb18captured_exception4nameEv ) -__TBB_SYMBOL( _ZNK3tbb18captured_exception4whatEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception10throw_selfEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception3setEPKcS2_ ) -__TBB_SYMBOL( _ZN3tbb18captured_exception4moveEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception5clearEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception7destroyEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception8allocateEPKcS2_ ) -__TBB_SYMBOL( _ZN3tbb18captured_exceptionD0Ev ) -__TBB_SYMBOL( _ZN3tbb18captured_exceptionD1Ev ) -__TBB_SYMBOL( _ZN3tbb18captured_exceptionD2Ev ) -__TBB_SYMBOL( _ZTIN3tbb18captured_exceptionE ) -__TBB_SYMBOL( _ZTSN3tbb18captured_exceptionE ) -__TBB_SYMBOL( _ZTVN3tbb18captured_exceptionE ) -__TBB_SYMBOL( _ZN3tbb13tbb_exceptionD2Ev ) -__TBB_SYMBOL( _ZTIN3tbb13tbb_exceptionE ) -__TBB_SYMBOL( _ZTSN3tbb13tbb_exceptionE ) -__TBB_SYMBOL( _ZTVN3tbb13tbb_exceptionE ) -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -/* Symbols for exceptions thrown from TBB */ -__TBB_SYMBOL( _ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev ) -__TBB_SYMBOL( _ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE ) -__TBB_SYMBOL( _ZN3tbb14bad_last_allocD0Ev ) -__TBB_SYMBOL( _ZN3tbb14bad_last_allocD1Ev ) -__TBB_SYMBOL( _ZNK3tbb14bad_last_alloc4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb14bad_last_allocE ) -__TBB_SYMBOL( _ZTSN3tbb14bad_last_allocE ) -__TBB_SYMBOL( _ZTVN3tbb14bad_last_allocE ) -__TBB_SYMBOL( _ZN3tbb12missing_waitD0Ev ) -__TBB_SYMBOL( _ZN3tbb12missing_waitD1Ev ) -__TBB_SYMBOL( _ZNK3tbb12missing_wait4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb12missing_waitE ) -__TBB_SYMBOL( _ZTSN3tbb12missing_waitE ) -__TBB_SYMBOL( _ZTVN3tbb12missing_waitE ) -__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD0Ev ) -__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD1Ev ) -__TBB_SYMBOL( _ZNK3tbb27invalid_multiple_scheduling4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb27invalid_multiple_schedulingE ) -__TBB_SYMBOL( _ZTSN3tbb27invalid_multiple_schedulingE ) -__TBB_SYMBOL( _ZTVN3tbb27invalid_multiple_schedulingE ) -__TBB_SYMBOL( _ZN3tbb13improper_lockD0Ev ) -__TBB_SYMBOL( _ZN3tbb13improper_lockD1Ev ) -__TBB_SYMBOL( _ZNK3tbb13improper_lock4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb13improper_lockE ) -__TBB_SYMBOL( _ZTSN3tbb13improper_lockE ) -__TBB_SYMBOL( _ZTVN3tbb13improper_lockE ) -__TBB_SYMBOL( _ZN3tbb10user_abortD0Ev ) -__TBB_SYMBOL( _ZN3tbb10user_abortD1Ev ) -__TBB_SYMBOL( _ZNK3tbb10user_abort4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb10user_abortE ) -__TBB_SYMBOL( _ZTSN3tbb10user_abortE ) -__TBB_SYMBOL( _ZTVN3tbb10user_abortE ) -/* tbb_misc.cpp */ -__TBB_SYMBOL( _ZN3tbb17assertion_failureEPKciS1_S1_ ) -__TBB_SYMBOL( _ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E ) -__TBB_SYMBOL( _ZN3tbb8internal36get_initial_auto_partitioner_divisorEv ) -__TBB_SYMBOL( _ZN3tbb8internal13handle_perrorEiPKc ) -__TBB_SYMBOL( _ZN3tbb8internal15runtime_warningEPKcz ) -__TBB_SYMBOL( TBB_runtime_interface_version ) - -/* tbb_main.cpp */ -__TBB_SYMBOL( _ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv ) -__TBB_SYMBOL( _ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal18call_itt_notify_v5EiPv ) -__TBB_SYMBOL( _ZN3tbb8internal20itt_set_sync_name_v3EPvPKc ) -__TBB_SYMBOL( _ZN3tbb8internal19itt_load_pointer_v3EPKv ) -#if __TBB_ITT_STRUCTURE_API -__TBB_SYMBOL( _ZN3tbb8internal23itt_metadata_str_add_v7ENS0_15itt_domain_enumEPvyNS0_12string_indexEPKc ) -__TBB_SYMBOL( _ZN3tbb8internal22itt_make_task_group_v7ENS0_15itt_domain_enumEPvyS2_yNS0_12string_indexE ) -__TBB_SYMBOL( _ZN3tbb8internal17itt_task_begin_v7ENS0_15itt_domain_enumEPvyS2_yNS0_12string_indexE ) -__TBB_SYMBOL( _ZN3tbb8internal19itt_relation_add_v7ENS0_15itt_domain_enumEPvyNS0_12itt_relationES2_y ) -__TBB_SYMBOL( _ZN3tbb8internal15itt_task_end_v7ENS0_15itt_domain_enumE ) -#endif - -/* pipeline.cpp */ -__TBB_SYMBOL( _ZTIN3tbb6filterE ) -__TBB_SYMBOL( _ZTSN3tbb6filterE ) -__TBB_SYMBOL( _ZTVN3tbb6filterE ) -__TBB_SYMBOL( _ZN3tbb6filterD2Ev ) -__TBB_SYMBOL( _ZN3tbb8pipeline10add_filterERNS_6filterE ) -__TBB_SYMBOL( _ZN3tbb8pipeline12inject_tokenERNS_4taskE ) -__TBB_SYMBOL( _ZN3tbb8pipeline13remove_filterERNS_6filterE ) -__TBB_SYMBOL( _ZN3tbb8pipeline3runEm ) -#if __TBB_TASK_GROUP_CONTEXT -__TBB_SYMBOL( _ZN3tbb8pipeline3runEmRNS_18task_group_contextE ) -#endif -__TBB_SYMBOL( _ZN3tbb8pipeline5clearEv ) -__TBB_SYMBOL( _ZN3tbb19thread_bound_filter12process_itemEv ) -__TBB_SYMBOL( _ZN3tbb19thread_bound_filter16try_process_itemEv ) -__TBB_SYMBOL( _ZTIN3tbb8pipelineE ) -__TBB_SYMBOL( _ZTSN3tbb8pipelineE ) -__TBB_SYMBOL( _ZTVN3tbb8pipelineE ) -__TBB_SYMBOL( _ZN3tbb8pipelineC1Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineC2Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineD0Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineD1Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineD2Ev ) -__TBB_SYMBOL( _ZN3tbb6filter16set_end_of_inputEv ) - -/* queuing_rw_mutex.cpp */ -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex18internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b ) - -/* reader_writer_lock.cpp */ -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_ ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock13try_lock_readEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_ ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock18internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock4lockEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock6unlockEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock8try_lockEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock9lock_readEv ) - -#if !TBB_NO_LEGACY -/* spin_rw_mutex.cpp v2 */ -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_ ) -#endif - -// x86_rtm_rw_mutex.cpp -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex18internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex23internal_acquire_writerERNS2_11scoped_lockEb ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex27internal_try_acquire_writerERNS2_11scoped_lockE ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex23internal_acquire_readerERNS2_11scoped_lockEb ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex16internal_releaseERNS2_11scoped_lockE ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex16internal_upgradeERNS2_11scoped_lockE ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex18internal_downgradeERNS2_11scoped_lockE ) - -/* spin_rw_mutex v3 */ -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v316internal_upgradeEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_downgradeEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_readerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_writerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv ) - -/* spin_mutex.cpp */ -__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv ) -__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb10spin_mutex18internal_constructEv ) - -/* mutex.cpp */ -__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_releaseEv ) -__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb5mutex16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb5mutex18internal_constructEv ) - -/* recursive_mutex.cpp */ -__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex18internal_constructEv ) - -/* QueuingMutex.cpp */ -__TBB_SYMBOL( _ZN3tbb13queuing_mutex18internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7releaseEv ) -__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_ ) - -/* critical_section.cpp */ -__TBB_SYMBOL( _ZN3tbb8internal19critical_section_v418internal_constructEv ) - -#if !TBB_NO_LEGACY -/* concurrent_hash_map */ -__TBB_SYMBOL( _ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv ) - -/* concurrent_queue.cpp v2 */ -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base12internal_popEPv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base21internal_set_capacityElm ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseC2Em ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseD2Ev ) -__TBB_SYMBOL( _ZTIN3tbb8internal21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZTSN3tbb8internal21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZTVN3tbb8internal21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev ) -__TBB_SYMBOL( _ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv ) -#endif - -/* concurrent_queue v3 */ -/* constructors */ -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3C2Em ) -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E ) -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Em ) -/* destructors */ -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3D2Ev ) -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev ) -/* typeinfo */ -__TBB_SYMBOL( _ZTIN3tbb8internal24concurrent_queue_base_v3E ) -__TBB_SYMBOL( _ZTSN3tbb8internal24concurrent_queue_base_v3E ) -/* vtable */ -__TBB_SYMBOL( _ZTVN3tbb8internal24concurrent_queue_base_v3E ) -/* methods */ -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v818internal_push_moveEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v830internal_push_move_if_not_fullEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v314internal_abortEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityElm ) -__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv ) -__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv ) -__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v812move_contentERS1_ ) - -#if !TBB_NO_LEGACY -/* concurrent_vector.cpp v2 */ -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_mPFvPvPKvmE ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvmEb ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_ ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_grow_byEmmPFvPvmE ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_reserveEmmm ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base18internal_push_backEmRm ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEmmPFvPvmE ) -__TBB_SYMBOL( _ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv ) -#endif - -/* concurrent_vector v3 */ -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_mPFvPvPKvmE ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvmE ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_ ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEmmPFvPvPKvmES4_ ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEmmm ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEmRm ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEmmPFvPvPKvmES4_ ) -__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_compactEmPvPFvS2_mEPFvS2_PKvmE ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_ ) -__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEm ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v3D2Ev ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEmmmPKvPFvPvmEPFvS4_S3_mE ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEmmPFvPvPKvmES4_ ) - -/* tbb_thread */ -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv ) -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v36detachEv ) -__TBB_SYMBOL( _ZN3tbb8internal16thread_get_id_v3Ev ) -__TBB_SYMBOL( _ZN3tbb8internal15free_closure_v3EPv ) -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v34joinEv ) -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v314internal_startEPFPvS2_ES2_ ) -__TBB_SYMBOL( _ZN3tbb8internal19allocate_closure_v3Em ) -__TBB_SYMBOL( _ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_ ) -__TBB_SYMBOL( _ZN3tbb8internal15thread_yield_v3Ev ) -__TBB_SYMBOL( _ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE ) - -#undef __TBB_SYMBOL diff --git a/src/tbb/src/tbb/lin64ipf-tbb-export.def b/src/tbb/src/tbb/lin64ipf-tbb-export.def deleted file mode 100644 index 337d416fb..000000000 --- a/src/tbb/src/tbb/lin64ipf-tbb-export.def +++ /dev/null @@ -1,48 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -{ -global: - -#define __TBB_SYMBOL( sym ) sym; -#include "lin64ipf-tbb-export.lst" - -local: - -/* TBB symbols */ -*3tbb*; -*__TBB*; - -/* ITT symbols */ -__itt_*; - -/* Intel Compiler (libirc) symbols */ -__intel_*; -_intel_*; -?0_memcopyA; -?0_memcopyDu; -?0_memcpyD; -?1__memcpy; -?1__memmove; -?1__serial_memmove; -memcpy; -memset; - -}; diff --git a/src/tbb/src/tbb/lin64ipf-tbb-export.lst b/src/tbb/src/tbb/lin64ipf-tbb-export.lst deleted file mode 100644 index ca1c88a8f..000000000 --- a/src/tbb/src/tbb/lin64ipf-tbb-export.lst +++ /dev/null @@ -1,408 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" - -/* cache_aligned_allocator.cpp */ -__TBB_SYMBOL( _ZN3tbb8internal12NFS_AllocateEmmPv ) -__TBB_SYMBOL( _ZN3tbb8internal15NFS_GetLineSizeEv ) -__TBB_SYMBOL( _ZN3tbb8internal8NFS_FreeEPv ) -__TBB_SYMBOL( _ZN3tbb8internal23allocate_via_handler_v3Em ) -__TBB_SYMBOL( _ZN3tbb8internal25deallocate_via_handler_v3EPv ) -__TBB_SYMBOL( _ZN3tbb8internal17is_malloc_used_v3Ev ) - -/* task.cpp v3 */ -__TBB_SYMBOL( _ZN3tbb4task13note_affinityEt ) -__TBB_SYMBOL( _ZN3tbb4task22internal_set_ref_countEi ) -__TBB_SYMBOL( _ZN3tbb4task28internal_decrement_ref_countEv ) -__TBB_SYMBOL( _ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE ) -__TBB_SYMBOL( _ZN3tbb4task4selfEv ) -__TBB_SYMBOL( _ZN3tbb10interface58internal9task_base7destroyERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb4task26is_owned_by_current_threadEv ) -__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy8allocateEm ) -__TBB_SYMBOL( _ZN3tbb8internal28affinity_partitioner_base_v36resizeEj ) -__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy8allocateEm ) -__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy8allocateEm ) -__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEm ) -__TBB_SYMBOL( _ZTIN3tbb4taskE ) -__TBB_SYMBOL( _ZTSN3tbb4taskE ) -__TBB_SYMBOL( _ZTVN3tbb4taskE ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init19default_num_threadsEv ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEim ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEi ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init9terminateEv ) -#if __TBB_SCHEDULER_OBSERVER -__TBB_SYMBOL( _ZN3tbb8internal26task_scheduler_observer_v37observeEb ) -#endif /* __TBB_SCHEDULER_OBSERVER */ -__TBB_SYMBOL( _ZN3tbb10empty_task7executeEv ) -__TBB_SYMBOL( _ZN3tbb10empty_taskD0Ev ) -__TBB_SYMBOL( _ZN3tbb10empty_taskD1Ev ) -__TBB_SYMBOL( _ZTIN3tbb10empty_taskE ) -__TBB_SYMBOL( _ZTSN3tbb10empty_taskE ) -__TBB_SYMBOL( _ZTVN3tbb10empty_taskE ) - -#if __TBB_TASK_ARENA -/* arena.cpp */ -__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base19internal_initializeEv ) -__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base18internal_terminateEv ) -__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_enqueueERNS_4taskEl ) -__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_executeERNS1_13delegate_baseE ) -__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base13internal_waitEv ) -__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base21internal_current_slotEv ) -#endif /* __TBB_TASK_ARENA */ - -#if !TBB_NO_LEGACY -/* task_v2.cpp */ -__TBB_SYMBOL( _ZN3tbb4task7destroyERS0_ ) -#endif /* !TBB_NO_LEGACY */ - -/* Exception handling in task scheduler */ -#if __TBB_TASK_GROUP_CONTEXT -__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEm ) -__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZN3tbb4task12change_groupERNS_18task_group_contextE ) -__TBB_SYMBOL( _ZNK3tbb18task_group_context28is_group_execution_cancelledEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context22cancel_group_executionEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context26register_pending_exceptionEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context5resetEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context19capture_fp_settingsEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context4initEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_contextD1Ev ) -__TBB_SYMBOL( _ZN3tbb18task_group_contextD2Ev ) -#if __TBB_TASK_PRIORITY -__TBB_SYMBOL( _ZN3tbb18task_group_context12set_priorityENS_10priority_tE ) -__TBB_SYMBOL( _ZNK3tbb18task_group_context8priorityEv ) -#endif /* __TBB_TASK_PRIORITY */ -__TBB_SYMBOL( _ZNK3tbb18captured_exception4nameEv ) -__TBB_SYMBOL( _ZNK3tbb18captured_exception4whatEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception10throw_selfEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception3setEPKcS2_ ) -__TBB_SYMBOL( _ZN3tbb18captured_exception4moveEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception5clearEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception7destroyEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception8allocateEPKcS2_ ) -__TBB_SYMBOL( _ZN3tbb18captured_exceptionD0Ev ) -__TBB_SYMBOL( _ZN3tbb18captured_exceptionD1Ev ) -__TBB_SYMBOL( _ZN3tbb18captured_exceptionD2Ev ) -__TBB_SYMBOL( _ZTIN3tbb18captured_exceptionE ) -__TBB_SYMBOL( _ZTSN3tbb18captured_exceptionE ) -__TBB_SYMBOL( _ZTVN3tbb18captured_exceptionE ) -__TBB_SYMBOL( _ZN3tbb13tbb_exceptionD2Ev ) -__TBB_SYMBOL( _ZTIN3tbb13tbb_exceptionE ) -__TBB_SYMBOL( _ZTSN3tbb13tbb_exceptionE ) -__TBB_SYMBOL( _ZTVN3tbb13tbb_exceptionE ) -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -/* Symbols for exceptions thrown from TBB */ -__TBB_SYMBOL( _ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev ) -__TBB_SYMBOL( _ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE ) -__TBB_SYMBOL( _ZN3tbb14bad_last_allocD0Ev ) -__TBB_SYMBOL( _ZN3tbb14bad_last_allocD1Ev ) -__TBB_SYMBOL( _ZNK3tbb14bad_last_alloc4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb14bad_last_allocE ) -__TBB_SYMBOL( _ZTSN3tbb14bad_last_allocE ) -__TBB_SYMBOL( _ZTVN3tbb14bad_last_allocE ) -__TBB_SYMBOL( _ZN3tbb12missing_waitD0Ev ) -__TBB_SYMBOL( _ZN3tbb12missing_waitD1Ev ) -__TBB_SYMBOL( _ZNK3tbb12missing_wait4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb12missing_waitE ) -__TBB_SYMBOL( _ZTSN3tbb12missing_waitE ) -__TBB_SYMBOL( _ZTVN3tbb12missing_waitE ) -__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD0Ev ) -__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD1Ev ) -__TBB_SYMBOL( _ZNK3tbb27invalid_multiple_scheduling4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb27invalid_multiple_schedulingE ) -__TBB_SYMBOL( _ZTSN3tbb27invalid_multiple_schedulingE ) -__TBB_SYMBOL( _ZTVN3tbb27invalid_multiple_schedulingE ) -__TBB_SYMBOL( _ZN3tbb13improper_lockD0Ev ) -__TBB_SYMBOL( _ZN3tbb13improper_lockD1Ev ) -__TBB_SYMBOL( _ZNK3tbb13improper_lock4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb13improper_lockE ) -__TBB_SYMBOL( _ZTSN3tbb13improper_lockE ) -__TBB_SYMBOL( _ZTVN3tbb13improper_lockE ) -__TBB_SYMBOL( _ZN3tbb10user_abortD0Ev ) -__TBB_SYMBOL( _ZN3tbb10user_abortD1Ev ) -__TBB_SYMBOL( _ZNK3tbb10user_abort4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb10user_abortE ) -__TBB_SYMBOL( _ZTSN3tbb10user_abortE ) -__TBB_SYMBOL( _ZTVN3tbb10user_abortE ) - -/* tbb_misc.cpp */ -__TBB_SYMBOL( _ZN3tbb17assertion_failureEPKciS1_S1_ ) -__TBB_SYMBOL( _ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E ) -__TBB_SYMBOL( _ZN3tbb8internal36get_initial_auto_partitioner_divisorEv ) -__TBB_SYMBOL( _ZN3tbb8internal13handle_perrorEiPKc ) -__TBB_SYMBOL( _ZN3tbb8internal15runtime_warningEPKcz ) -__TBB_SYMBOL( TBB_runtime_interface_version ) - -/* tbb_main.cpp */ -__TBB_SYMBOL( _ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv ) -__TBB_SYMBOL( _ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal18call_itt_notify_v5EiPv ) -__TBB_SYMBOL( _ZN3tbb8internal20itt_set_sync_name_v3EPvPKc ) -__TBB_SYMBOL( _ZN3tbb8internal19itt_load_pointer_v3EPKv ) - -/* pipeline.cpp */ -__TBB_SYMBOL( _ZTIN3tbb6filterE ) -__TBB_SYMBOL( _ZTSN3tbb6filterE ) -__TBB_SYMBOL( _ZTVN3tbb6filterE ) -__TBB_SYMBOL( _ZN3tbb6filterD2Ev ) -__TBB_SYMBOL( _ZN3tbb8pipeline10add_filterERNS_6filterE ) -__TBB_SYMBOL( _ZN3tbb8pipeline12inject_tokenERNS_4taskE ) -__TBB_SYMBOL( _ZN3tbb8pipeline13remove_filterERNS_6filterE ) -__TBB_SYMBOL( _ZN3tbb8pipeline3runEm ) -#if __TBB_TASK_GROUP_CONTEXT -__TBB_SYMBOL( _ZN3tbb8pipeline3runEmRNS_18task_group_contextE ) -#endif -__TBB_SYMBOL( _ZN3tbb8pipeline5clearEv ) -__TBB_SYMBOL( _ZN3tbb19thread_bound_filter12process_itemEv ) -__TBB_SYMBOL( _ZN3tbb19thread_bound_filter16try_process_itemEv ) -__TBB_SYMBOL( _ZTIN3tbb8pipelineE ) -__TBB_SYMBOL( _ZTSN3tbb8pipelineE ) -__TBB_SYMBOL( _ZTVN3tbb8pipelineE ) -__TBB_SYMBOL( _ZN3tbb8pipelineC1Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineC2Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineD0Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineD1Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineD2Ev ) -__TBB_SYMBOL( _ZN3tbb6filter16set_end_of_inputEv ) - -/* queuing_rw_mutex.cpp */ -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex18internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b ) - -/* reader_writer_lock.cpp */ -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_ ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock13try_lock_readEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_ ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock18internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock4lockEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock6unlockEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock8try_lockEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock9lock_readEv ) - -#if !TBB_NO_LEGACY -/* spin_rw_mutex.cpp v2 */ -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_ ) -#endif - -/* spin_rw_mutex v3 */ -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v316internal_upgradeEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_downgradeEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_readerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_writerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv ) - -/* spin_mutex.cpp */ -__TBB_SYMBOL( _ZN3tbb10spin_mutex18internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv ) -__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_ ) - -/* mutex.cpp */ -__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_releaseEv ) -__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb5mutex16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb5mutex18internal_constructEv ) - -/* recursive_mutex.cpp */ -__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex18internal_constructEv ) - -/* QueuingMutex.cpp */ -__TBB_SYMBOL( _ZN3tbb13queuing_mutex18internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7releaseEv ) -__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_ ) - -/* critical_section.cpp */ -__TBB_SYMBOL( _ZN3tbb8internal19critical_section_v418internal_constructEv ) - -#if !TBB_NO_LEGACY -/* concurrent_hash_map */ -__TBB_SYMBOL( _ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv ) - -/* concurrent_queue.cpp v2 */ -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base12internal_popEPv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base21internal_set_capacityElm ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseC2Em ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseD2Ev ) -__TBB_SYMBOL( _ZTIN3tbb8internal21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZTSN3tbb8internal21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZTVN3tbb8internal21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev ) -__TBB_SYMBOL( _ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv ) -#endif - -/* concurrent_queue v3 */ -/* constructors */ -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3C2Em ) -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E ) -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Em ) -/* destructors */ -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3D2Ev ) -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev ) -/* typeinfo */ -__TBB_SYMBOL( _ZTIN3tbb8internal24concurrent_queue_base_v3E ) -__TBB_SYMBOL( _ZTSN3tbb8internal24concurrent_queue_base_v3E ) -/* vtable */ -__TBB_SYMBOL( _ZTVN3tbb8internal24concurrent_queue_base_v3E ) -/* methods */ -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v818internal_push_moveEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v830internal_push_move_if_not_fullEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v314internal_abortEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityElm ) -__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv ) -__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv ) -__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v812move_contentERS1_ ) - -#if !TBB_NO_LEGACY -/* concurrent_vector.cpp v2 */ -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_mPFvPvPKvmE ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvmEb ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_ ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_grow_byEmmPFvPvmE ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_reserveEmmm ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base18internal_push_backEmRm ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEmmPFvPvmE ) -__TBB_SYMBOL( _ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv ) -#endif - -/* concurrent_vector v3 */ -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_mPFvPvPKvmE ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvmE ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_ ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEmmPFvPvPKvmES4_ ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEmmm ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEmRm ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEmmPFvPvPKvmES4_ ) -__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_compactEmPvPFvS2_mEPFvS2_PKvmE ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_ ) -__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEm ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v3D2Ev ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEmmmPKvPFvPvmEPFvS4_S3_mE ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEmmPFvPvPKvmES4_ ) - -/* tbb_thread */ -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv ) -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v36detachEv ) -__TBB_SYMBOL( _ZN3tbb8internal16thread_get_id_v3Ev ) -__TBB_SYMBOL( _ZN3tbb8internal15free_closure_v3EPv ) -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v34joinEv ) -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v314internal_startEPFPvS2_ES2_ ) -__TBB_SYMBOL( _ZN3tbb8internal19allocate_closure_v3Em ) -__TBB_SYMBOL( _ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_ ) -__TBB_SYMBOL( _ZN3tbb8internal15thread_yield_v3Ev ) -__TBB_SYMBOL( _ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE ) - -/* asm functions */ -__TBB_SYMBOL( __TBB_machine_fetchadd1__TBB_full_fence ) -__TBB_SYMBOL( __TBB_machine_fetchadd2__TBB_full_fence ) -__TBB_SYMBOL( __TBB_machine_fetchadd4__TBB_full_fence ) -__TBB_SYMBOL( __TBB_machine_fetchadd8__TBB_full_fence ) -__TBB_SYMBOL( __TBB_machine_fetchstore1__TBB_full_fence ) -__TBB_SYMBOL( __TBB_machine_fetchstore2__TBB_full_fence ) -__TBB_SYMBOL( __TBB_machine_fetchstore4__TBB_full_fence ) -__TBB_SYMBOL( __TBB_machine_fetchstore8__TBB_full_fence ) -__TBB_SYMBOL( __TBB_machine_fetchadd1acquire ) -__TBB_SYMBOL( __TBB_machine_fetchadd1release ) -__TBB_SYMBOL( __TBB_machine_fetchadd2acquire ) -__TBB_SYMBOL( __TBB_machine_fetchadd2release ) -__TBB_SYMBOL( __TBB_machine_fetchadd4acquire ) -__TBB_SYMBOL( __TBB_machine_fetchadd4release ) -__TBB_SYMBOL( __TBB_machine_fetchadd8acquire ) -__TBB_SYMBOL( __TBB_machine_fetchadd8release ) -__TBB_SYMBOL( __TBB_machine_fetchstore1acquire ) -__TBB_SYMBOL( __TBB_machine_fetchstore1release ) -__TBB_SYMBOL( __TBB_machine_fetchstore2acquire ) -__TBB_SYMBOL( __TBB_machine_fetchstore2release ) -__TBB_SYMBOL( __TBB_machine_fetchstore4acquire ) -__TBB_SYMBOL( __TBB_machine_fetchstore4release ) -__TBB_SYMBOL( __TBB_machine_fetchstore8acquire ) -__TBB_SYMBOL( __TBB_machine_fetchstore8release ) -__TBB_SYMBOL( __TBB_machine_cmpswp1acquire ) -__TBB_SYMBOL( __TBB_machine_cmpswp1release ) -__TBB_SYMBOL( __TBB_machine_cmpswp1__TBB_full_fence ) -__TBB_SYMBOL( __TBB_machine_cmpswp2acquire ) -__TBB_SYMBOL( __TBB_machine_cmpswp2release ) -__TBB_SYMBOL( __TBB_machine_cmpswp2__TBB_full_fence ) -__TBB_SYMBOL( __TBB_machine_cmpswp4acquire ) -__TBB_SYMBOL( __TBB_machine_cmpswp4release ) -__TBB_SYMBOL( __TBB_machine_cmpswp4__TBB_full_fence ) -__TBB_SYMBOL( __TBB_machine_cmpswp8acquire ) -__TBB_SYMBOL( __TBB_machine_cmpswp8release ) -__TBB_SYMBOL( __TBB_machine_cmpswp8__TBB_full_fence ) -__TBB_SYMBOL( __TBB_machine_lg ) -__TBB_SYMBOL( __TBB_machine_lockbyte ) -__TBB_SYMBOL( __TBB_machine_pause ) -__TBB_SYMBOL( __TBB_machine_trylockbyte ) -__TBB_SYMBOL( __TBB_machine_load8_relaxed ) -__TBB_SYMBOL( __TBB_machine_store8_relaxed ) -__TBB_SYMBOL( __TBB_machine_load4_relaxed ) -__TBB_SYMBOL( __TBB_machine_store4_relaxed ) -__TBB_SYMBOL( __TBB_machine_load2_relaxed ) -__TBB_SYMBOL( __TBB_machine_store2_relaxed ) -__TBB_SYMBOL( __TBB_machine_load1_relaxed ) -__TBB_SYMBOL( __TBB_machine_store1_relaxed ) - -#undef __TBB_SYMBOL diff --git a/src/tbb/src/tbb/mac32-tbb-export.def b/src/tbb/src/tbb/mac32-tbb-export.def deleted file mode 100644 index fe994b835..000000000 --- a/src/tbb/src/tbb/mac32-tbb-export.def +++ /dev/null @@ -1,23 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#define __TBB_SYMBOL( sym ) _##sym -#include "mac32-tbb-export.lst" - diff --git a/src/tbb/src/tbb/mac32-tbb-export.lst b/src/tbb/src/tbb/mac32-tbb-export.lst deleted file mode 100644 index 3c36c2490..000000000 --- a/src/tbb/src/tbb/mac32-tbb-export.lst +++ /dev/null @@ -1,392 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" - -/* - Sometimes OS X* requires leading underscore (e. g. in export list file), but sometimes not - (e. g. when searching symbol in a dynamic library via dlsym()). Symbols in this file SHOULD - be listed WITHOUT one leading underscore. __TBB_SYMBOL macro should add underscore when - necessary, depending on the indended usage. -*/ - -// cache_aligned_allocator.cpp -__TBB_SYMBOL( _ZN3tbb8internal12NFS_AllocateEmmPv ) -__TBB_SYMBOL( _ZN3tbb8internal15NFS_GetLineSizeEv ) -__TBB_SYMBOL( _ZN3tbb8internal8NFS_FreeEPv ) -__TBB_SYMBOL( _ZN3tbb8internal23allocate_via_handler_v3Em ) -__TBB_SYMBOL( _ZN3tbb8internal25deallocate_via_handler_v3EPv ) -__TBB_SYMBOL( _ZN3tbb8internal17is_malloc_used_v3Ev ) - -// task.cpp v3 -__TBB_SYMBOL( _ZN3tbb4task13note_affinityEt ) -__TBB_SYMBOL( _ZN3tbb4task22internal_set_ref_countEi ) -__TBB_SYMBOL( _ZN3tbb4task28internal_decrement_ref_countEv ) -__TBB_SYMBOL( _ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE ) -__TBB_SYMBOL( _ZN3tbb4task4selfEv ) -__TBB_SYMBOL( _ZN3tbb10interface58internal9task_base7destroyERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb4task26is_owned_by_current_threadEv ) -__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy8allocateEm ) -__TBB_SYMBOL( _ZN3tbb8internal28affinity_partitioner_base_v36resizeEj ) -__TBB_SYMBOL( _ZN3tbb8internal36get_initial_auto_partitioner_divisorEv ) -__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy8allocateEm ) -__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy8allocateEm ) -__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEm ) -__TBB_SYMBOL( _ZTIN3tbb4taskE ) -__TBB_SYMBOL( _ZTSN3tbb4taskE ) -__TBB_SYMBOL( _ZTVN3tbb4taskE ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init19default_num_threadsEv ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEim ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEi ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init9terminateEv ) -#if __TBB_SCHEDULER_OBSERVER -__TBB_SYMBOL( _ZN3tbb8internal26task_scheduler_observer_v37observeEb ) -#endif /* __TBB_SCHEDULER_OBSERVER */ -__TBB_SYMBOL( _ZN3tbb10empty_task7executeEv ) -__TBB_SYMBOL( _ZN3tbb10empty_taskD0Ev ) -__TBB_SYMBOL( _ZN3tbb10empty_taskD1Ev ) -__TBB_SYMBOL( _ZTIN3tbb10empty_taskE ) -__TBB_SYMBOL( _ZTSN3tbb10empty_taskE ) -__TBB_SYMBOL( _ZTVN3tbb10empty_taskE ) - -#if __TBB_TASK_ARENA -/* arena.cpp */ -__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base19internal_initializeEv ) -__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base18internal_terminateEv ) -__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_enqueueERNS_4taskEl ) -__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_executeERNS1_13delegate_baseE ) -__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base13internal_waitEv ) -__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base21internal_current_slotEv ) -#endif /* __TBB_TASK_ARENA */ - -#if !TBB_NO_LEGACY -// task_v2.cpp -__TBB_SYMBOL( _ZN3tbb4task7destroyERS0_ ) -#endif - -// Exception handling in task scheduler -#if __TBB_TASK_GROUP_CONTEXT -__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEm ) -__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZN3tbb4task12change_groupERNS_18task_group_contextE ) -__TBB_SYMBOL( _ZNK3tbb18task_group_context28is_group_execution_cancelledEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context22cancel_group_executionEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context26register_pending_exceptionEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context5resetEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context19capture_fp_settingsEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context4initEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_contextD1Ev ) -__TBB_SYMBOL( _ZN3tbb18task_group_contextD2Ev ) -#if __TBB_TASK_PRIORITY -__TBB_SYMBOL( _ZN3tbb18task_group_context12set_priorityENS_10priority_tE ) -__TBB_SYMBOL( _ZNK3tbb18task_group_context8priorityEv ) -#endif /* __TBB_TASK_PRIORITY */ -__TBB_SYMBOL( _ZNK3tbb18captured_exception4nameEv ) -__TBB_SYMBOL( _ZNK3tbb18captured_exception4whatEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception10throw_selfEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception3setEPKcS2_ ) -__TBB_SYMBOL( _ZN3tbb18captured_exception4moveEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception5clearEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception7destroyEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception8allocateEPKcS2_ ) -__TBB_SYMBOL( _ZN3tbb18captured_exceptionD0Ev ) -__TBB_SYMBOL( _ZN3tbb18captured_exceptionD1Ev ) -__TBB_SYMBOL( _ZN3tbb18captured_exceptionD2Ev ) -__TBB_SYMBOL( _ZTIN3tbb18captured_exceptionE ) -__TBB_SYMBOL( _ZTSN3tbb18captured_exceptionE ) -__TBB_SYMBOL( _ZTVN3tbb18captured_exceptionE ) -__TBB_SYMBOL( _ZTIN3tbb13tbb_exceptionE ) -__TBB_SYMBOL( _ZTSN3tbb13tbb_exceptionE ) -__TBB_SYMBOL( _ZTVN3tbb13tbb_exceptionE ) -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -// Symbols for exceptions thrown from TBB -__TBB_SYMBOL( _ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev ) -__TBB_SYMBOL( _ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE ) -__TBB_SYMBOL( _ZNSt13runtime_errorD1Ev ) -__TBB_SYMBOL( _ZTISt13runtime_error ) -__TBB_SYMBOL( _ZTSSt13runtime_error ) -__TBB_SYMBOL( _ZNSt16invalid_argumentD1Ev ) -__TBB_SYMBOL( _ZTISt16invalid_argument ) -__TBB_SYMBOL( _ZTSSt16invalid_argument ) -__TBB_SYMBOL( _ZNSt11range_errorD1Ev ) -__TBB_SYMBOL( _ZTISt11range_error ) -__TBB_SYMBOL( _ZTSSt11range_error ) -__TBB_SYMBOL( _ZNSt12length_errorD1Ev ) -__TBB_SYMBOL( _ZTISt12length_error ) -__TBB_SYMBOL( _ZTSSt12length_error ) -__TBB_SYMBOL( _ZNSt12out_of_rangeD1Ev ) -__TBB_SYMBOL( _ZTISt12out_of_range ) -__TBB_SYMBOL( _ZTSSt12out_of_range ) -__TBB_SYMBOL( _ZN3tbb14bad_last_allocD0Ev ) -__TBB_SYMBOL( _ZN3tbb14bad_last_allocD1Ev ) -__TBB_SYMBOL( _ZNK3tbb14bad_last_alloc4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb14bad_last_allocE ) -__TBB_SYMBOL( _ZTSN3tbb14bad_last_allocE ) -__TBB_SYMBOL( _ZTVN3tbb14bad_last_allocE ) -__TBB_SYMBOL( _ZN3tbb12missing_waitD0Ev ) -__TBB_SYMBOL( _ZN3tbb12missing_waitD1Ev ) -__TBB_SYMBOL( _ZNK3tbb12missing_wait4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb12missing_waitE ) -__TBB_SYMBOL( _ZTSN3tbb12missing_waitE ) -__TBB_SYMBOL( _ZTVN3tbb12missing_waitE ) -__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD0Ev ) -__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD1Ev ) -__TBB_SYMBOL( _ZNK3tbb27invalid_multiple_scheduling4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb27invalid_multiple_schedulingE ) -__TBB_SYMBOL( _ZTSN3tbb27invalid_multiple_schedulingE ) -__TBB_SYMBOL( _ZTVN3tbb27invalid_multiple_schedulingE ) -__TBB_SYMBOL( _ZN3tbb13improper_lockD0Ev ) -__TBB_SYMBOL( _ZN3tbb13improper_lockD1Ev ) -__TBB_SYMBOL( _ZNK3tbb13improper_lock4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb13improper_lockE ) -__TBB_SYMBOL( _ZTSN3tbb13improper_lockE ) -__TBB_SYMBOL( _ZTVN3tbb13improper_lockE ) -__TBB_SYMBOL( _ZN3tbb10user_abortD0Ev ) -__TBB_SYMBOL( _ZN3tbb10user_abortD1Ev ) -__TBB_SYMBOL( _ZNK3tbb10user_abort4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb10user_abortE ) -__TBB_SYMBOL( _ZTSN3tbb10user_abortE ) -__TBB_SYMBOL( _ZTVN3tbb10user_abortE ) - -// tbb_misc.cpp -__TBB_SYMBOL( _ZN3tbb17assertion_failureEPKciS1_S1_ ) -__TBB_SYMBOL( _ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E ) -__TBB_SYMBOL( _ZN3tbb8internal13handle_perrorEiPKc ) -__TBB_SYMBOL( _ZN3tbb8internal15runtime_warningEPKcz ) -#if __TBB_x86_32 -__TBB_SYMBOL( __TBB_machine_store8_slow_perf_warning ) -__TBB_SYMBOL( __TBB_machine_store8_slow ) -#endif -__TBB_SYMBOL( TBB_runtime_interface_version ) - -// tbb_main.cpp -__TBB_SYMBOL( _ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv ) -__TBB_SYMBOL( _ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal18call_itt_notify_v5EiPv ) -__TBB_SYMBOL( _ZN3tbb8internal19itt_load_pointer_v3EPKv ) -__TBB_SYMBOL( _ZN3tbb8internal20itt_set_sync_name_v3EPvPKc ) - -// pipeline.cpp -__TBB_SYMBOL( _ZTIN3tbb6filterE ) -__TBB_SYMBOL( _ZTSN3tbb6filterE ) -__TBB_SYMBOL( _ZTVN3tbb6filterE ) -__TBB_SYMBOL( _ZN3tbb6filterD2Ev ) -__TBB_SYMBOL( _ZN3tbb8pipeline10add_filterERNS_6filterE ) -__TBB_SYMBOL( _ZN3tbb8pipeline12inject_tokenERNS_4taskE ) -__TBB_SYMBOL( _ZN3tbb8pipeline13remove_filterERNS_6filterE ) -__TBB_SYMBOL( _ZN3tbb8pipeline3runEm ) -#if __TBB_TASK_GROUP_CONTEXT -__TBB_SYMBOL( _ZN3tbb8pipeline3runEmRNS_18task_group_contextE ) -#endif -__TBB_SYMBOL( _ZN3tbb8pipeline5clearEv ) -__TBB_SYMBOL( _ZN3tbb19thread_bound_filter12process_itemEv ) -__TBB_SYMBOL( _ZN3tbb19thread_bound_filter16try_process_itemEv ) -__TBB_SYMBOL( _ZN3tbb8pipelineC1Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineC2Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineD0Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineD1Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineD2Ev ) -__TBB_SYMBOL( _ZTIN3tbb8pipelineE ) -__TBB_SYMBOL( _ZTSN3tbb8pipelineE ) -__TBB_SYMBOL( _ZTVN3tbb8pipelineE ) -__TBB_SYMBOL( _ZN3tbb6filter16set_end_of_inputEv ) - -// queuing_rw_mutex.cpp -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex18internal_constructEv ) - -// reader_writer_lock.cpp -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_ ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock13try_lock_readEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_ ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock18internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock4lockEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock6unlockEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock8try_lockEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock9lock_readEv ) - -#if !TBB_NO_LEGACY -// spin_rw_mutex.cpp v2 -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_ ) -#endif - -// spin_rw_mutex v3 -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v316internal_upgradeEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_downgradeEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_readerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_writerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_constructEv ) - -// x86_rtm_rw_mutex.cpp -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex16internal_releaseERNS2_11scoped_lockE ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex16internal_upgradeERNS2_11scoped_lockE ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex18internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex18internal_downgradeERNS2_11scoped_lockE ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex23internal_acquire_readerERNS2_11scoped_lockEb ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex23internal_acquire_writerERNS2_11scoped_lockEb ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex27internal_try_acquire_writerERNS2_11scoped_lockE ) - -// spin_mutex.cpp -__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv ) -__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb10spin_mutex18internal_constructEv ) - -// mutex.cpp -__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_releaseEv ) -__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb5mutex16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb5mutex18internal_constructEv ) - -// recursive_mutex.cpp -__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex18internal_constructEv ) - -// queuing_mutex.cpp -__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7releaseEv ) -__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb13queuing_mutex18internal_constructEv ) - -// critical_section.cpp -__TBB_SYMBOL( _ZN3tbb8internal19critical_section_v418internal_constructEv ) - -#if !TBB_NO_LEGACY -// concurrent_hash_map -__TBB_SYMBOL( _ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv ) - -// concurrent_queue.cpp v2 -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base12internal_popEPv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base21internal_set_capacityEim ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseC2Em ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseD2Ev ) -__TBB_SYMBOL( _ZTIN3tbb8internal21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZTSN3tbb8internal21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZTVN3tbb8internal21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev ) -__TBB_SYMBOL( _ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv ) -#endif - -// concurrent_queue v3 -// constructors -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E ) -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Em ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3C2Em ) -// destructors -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3D2Ev ) -// typeinfo -__TBB_SYMBOL( _ZTIN3tbb8internal24concurrent_queue_base_v3E ) -__TBB_SYMBOL( _ZTSN3tbb8internal24concurrent_queue_base_v3E ) -// vtable -__TBB_SYMBOL( _ZTVN3tbb8internal24concurrent_queue_base_v3E ) -// methods -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv ) -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v818internal_push_moveEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v830internal_push_move_if_not_fullEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v314internal_abortEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityEim ) -__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv ) -__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv ) -__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v812move_contentERS1_ ) - -#if !TBB_NO_LEGACY -// concurrent_vector.cpp v2 -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_mPFvPvPKvmE ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvmEb ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_ ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_grow_byEmmPFvPvmE ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_reserveEmmm ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base18internal_push_backEmRm ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEmmPFvPvmE ) -__TBB_SYMBOL( _ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv ) -#endif - -// concurrent_vector v3 -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_mPFvPvPKvmE ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvmE ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_ ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEmmPFvPvPKvmES4_ ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEmmm ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEmRm ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEmmPFvPvPKvmES4_ ) -__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_compactEmPvPFvS2_mEPFvS2_PKvmE ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_ ) -__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEm ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v3D2Ev ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEmmmPKvPFvPvmEPFvS4_S3_mE ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEmmPFvPvPKvmES4_ ) - -// tbb_thread -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v314internal_startEPFPvS2_ES2_ ) -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv ) -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v34joinEv ) -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v36detachEv ) -__TBB_SYMBOL( _ZN3tbb8internal15free_closure_v3EPv ) -__TBB_SYMBOL( _ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE ) -__TBB_SYMBOL( _ZN3tbb8internal15thread_yield_v3Ev ) -__TBB_SYMBOL( _ZN3tbb8internal16thread_get_id_v3Ev ) -__TBB_SYMBOL( _ZN3tbb8internal19allocate_closure_v3Em ) -__TBB_SYMBOL( _ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_ ) - -#undef __TBB_SYMBOL diff --git a/src/tbb/src/tbb/mac64-tbb-export.def b/src/tbb/src/tbb/mac64-tbb-export.def deleted file mode 100644 index c530d81fd..000000000 --- a/src/tbb/src/tbb/mac64-tbb-export.def +++ /dev/null @@ -1,23 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#define __TBB_SYMBOL( sym ) _##sym -#include "mac64-tbb-export.lst" - diff --git a/src/tbb/src/tbb/mac64-tbb-export.lst b/src/tbb/src/tbb/mac64-tbb-export.lst deleted file mode 100644 index 3f1f3fddd..000000000 --- a/src/tbb/src/tbb/mac64-tbb-export.lst +++ /dev/null @@ -1,389 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" - -/* - Sometimes OS X* requires leading underscore (e. g. in export list file), but sometimes not - (e. g. when searching symbol in a dynamic library via dlsym()). Symbols in this file SHOULD - be listed WITHOUT one leading underscore. __TBB_SYMBOL macro should add underscore when - necessary, depending on the indended usage. -*/ - -// cache_aligned_allocator.cpp -__TBB_SYMBOL( _ZN3tbb8internal12NFS_AllocateEmmPv ) -__TBB_SYMBOL( _ZN3tbb8internal15NFS_GetLineSizeEv ) -__TBB_SYMBOL( _ZN3tbb8internal8NFS_FreeEPv ) -__TBB_SYMBOL( _ZN3tbb8internal23allocate_via_handler_v3Em ) -__TBB_SYMBOL( _ZN3tbb8internal25deallocate_via_handler_v3EPv ) -__TBB_SYMBOL( _ZN3tbb8internal17is_malloc_used_v3Ev ) - -// task.cpp v3 -__TBB_SYMBOL( _ZN3tbb4task13note_affinityEt ) -__TBB_SYMBOL( _ZN3tbb4task22internal_set_ref_countEi ) -__TBB_SYMBOL( _ZN3tbb4task28internal_decrement_ref_countEv ) -__TBB_SYMBOL( _ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE ) -__TBB_SYMBOL( _ZN3tbb4task4selfEv ) -__TBB_SYMBOL( _ZN3tbb10interface58internal9task_base7destroyERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb4task26is_owned_by_current_threadEv ) -__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy8allocateEm ) -__TBB_SYMBOL( _ZN3tbb8internal28affinity_partitioner_base_v36resizeEj ) -__TBB_SYMBOL( _ZN3tbb8internal36get_initial_auto_partitioner_divisorEv ) -__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy8allocateEm ) -__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy8allocateEm ) -__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEm ) -__TBB_SYMBOL( _ZTIN3tbb4taskE ) -__TBB_SYMBOL( _ZTSN3tbb4taskE ) -__TBB_SYMBOL( _ZTVN3tbb4taskE ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init19default_num_threadsEv ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEim ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEi ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init9terminateEv ) -#if __TBB_SCHEDULER_OBSERVER -__TBB_SYMBOL( _ZN3tbb8internal26task_scheduler_observer_v37observeEb ) -#endif /* __TBB_SCHEDULER_OBSERVER */ -__TBB_SYMBOL( _ZN3tbb10empty_task7executeEv ) -__TBB_SYMBOL( _ZN3tbb10empty_taskD0Ev ) -__TBB_SYMBOL( _ZN3tbb10empty_taskD1Ev ) -__TBB_SYMBOL( _ZTIN3tbb10empty_taskE ) -__TBB_SYMBOL( _ZTSN3tbb10empty_taskE ) -__TBB_SYMBOL( _ZTVN3tbb10empty_taskE ) - -#if __TBB_TASK_ARENA -/* arena.cpp */ -__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base19internal_initializeEv ) -__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base18internal_terminateEv ) -__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_enqueueERNS_4taskEl ) -__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_executeERNS1_13delegate_baseE ) -__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base13internal_waitEv ) -__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base21internal_current_slotEv ) -#endif /* __TBB_TASK_ARENA */ - -#if !TBB_NO_LEGACY -// task_v2.cpp -__TBB_SYMBOL( _ZN3tbb4task7destroyERS0_ ) -#endif - -// Exception handling in task scheduler -#if __TBB_TASK_GROUP_CONTEXT -__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEm ) -__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZN3tbb4task12change_groupERNS_18task_group_contextE ) -__TBB_SYMBOL( _ZNK3tbb18task_group_context28is_group_execution_cancelledEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context22cancel_group_executionEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context26register_pending_exceptionEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context5resetEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context19capture_fp_settingsEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context4initEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_contextD1Ev ) -__TBB_SYMBOL( _ZN3tbb18task_group_contextD2Ev ) -#if __TBB_TASK_PRIORITY -__TBB_SYMBOL( _ZN3tbb18task_group_context12set_priorityENS_10priority_tE ) -__TBB_SYMBOL( _ZNK3tbb18task_group_context8priorityEv ) -#endif /* __TBB_TASK_PRIORITY */ -__TBB_SYMBOL( _ZNK3tbb18captured_exception4nameEv ) -__TBB_SYMBOL( _ZNK3tbb18captured_exception4whatEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception10throw_selfEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception3setEPKcS2_ ) -__TBB_SYMBOL( _ZN3tbb18captured_exception4moveEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception5clearEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception7destroyEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception8allocateEPKcS2_ ) -__TBB_SYMBOL( _ZN3tbb18captured_exceptionD0Ev ) -__TBB_SYMBOL( _ZN3tbb18captured_exceptionD1Ev ) -__TBB_SYMBOL( _ZN3tbb18captured_exceptionD2Ev ) -__TBB_SYMBOL( _ZTIN3tbb18captured_exceptionE ) -__TBB_SYMBOL( _ZTSN3tbb18captured_exceptionE ) -__TBB_SYMBOL( _ZTVN3tbb18captured_exceptionE ) -__TBB_SYMBOL( _ZTIN3tbb13tbb_exceptionE ) -__TBB_SYMBOL( _ZTSN3tbb13tbb_exceptionE ) -__TBB_SYMBOL( _ZTVN3tbb13tbb_exceptionE ) -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -// Symbols for exceptions thrown from TBB -__TBB_SYMBOL( _ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev ) -__TBB_SYMBOL( _ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE ) -__TBB_SYMBOL( _ZNSt13runtime_errorD1Ev ) -__TBB_SYMBOL( _ZTISt13runtime_error ) -__TBB_SYMBOL( _ZTSSt13runtime_error ) -__TBB_SYMBOL( _ZNSt16invalid_argumentD1Ev ) -__TBB_SYMBOL( _ZTISt16invalid_argument ) -__TBB_SYMBOL( _ZTSSt16invalid_argument ) -__TBB_SYMBOL( _ZNSt11range_errorD1Ev ) -__TBB_SYMBOL( _ZTISt11range_error ) -__TBB_SYMBOL( _ZTSSt11range_error ) -__TBB_SYMBOL( _ZNSt12length_errorD1Ev ) -__TBB_SYMBOL( _ZTISt12length_error ) -__TBB_SYMBOL( _ZTSSt12length_error ) -__TBB_SYMBOL( _ZNSt12out_of_rangeD1Ev ) -__TBB_SYMBOL( _ZTISt12out_of_range ) -__TBB_SYMBOL( _ZTSSt12out_of_range ) -__TBB_SYMBOL( _ZN3tbb14bad_last_allocD0Ev ) -__TBB_SYMBOL( _ZN3tbb14bad_last_allocD1Ev ) -__TBB_SYMBOL( _ZNK3tbb14bad_last_alloc4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb14bad_last_allocE ) -__TBB_SYMBOL( _ZTSN3tbb14bad_last_allocE ) -__TBB_SYMBOL( _ZTVN3tbb14bad_last_allocE ) -__TBB_SYMBOL( _ZN3tbb12missing_waitD0Ev ) -__TBB_SYMBOL( _ZN3tbb12missing_waitD1Ev ) -__TBB_SYMBOL( _ZNK3tbb12missing_wait4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb12missing_waitE ) -__TBB_SYMBOL( _ZTSN3tbb12missing_waitE ) -__TBB_SYMBOL( _ZTVN3tbb12missing_waitE ) -__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD0Ev ) -__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD1Ev ) -__TBB_SYMBOL( _ZNK3tbb27invalid_multiple_scheduling4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb27invalid_multiple_schedulingE ) -__TBB_SYMBOL( _ZTSN3tbb27invalid_multiple_schedulingE ) -__TBB_SYMBOL( _ZTVN3tbb27invalid_multiple_schedulingE ) -__TBB_SYMBOL( _ZN3tbb13improper_lockD0Ev ) -__TBB_SYMBOL( _ZN3tbb13improper_lockD1Ev ) -__TBB_SYMBOL( _ZNK3tbb13improper_lock4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb13improper_lockE ) -__TBB_SYMBOL( _ZTSN3tbb13improper_lockE ) -__TBB_SYMBOL( _ZTVN3tbb13improper_lockE ) -__TBB_SYMBOL( _ZN3tbb10user_abortD0Ev ) -__TBB_SYMBOL( _ZN3tbb10user_abortD1Ev ) -__TBB_SYMBOL( _ZNK3tbb10user_abort4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb10user_abortE ) -__TBB_SYMBOL( _ZTSN3tbb10user_abortE ) -__TBB_SYMBOL( _ZTVN3tbb10user_abortE ) - - -// tbb_misc.cpp -__TBB_SYMBOL( _ZN3tbb17assertion_failureEPKciS1_S1_ ) -__TBB_SYMBOL( _ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E ) -__TBB_SYMBOL( _ZN3tbb8internal13handle_perrorEiPKc ) -__TBB_SYMBOL( _ZN3tbb8internal15runtime_warningEPKcz ) -__TBB_SYMBOL( TBB_runtime_interface_version ) - -// tbb_main.cpp -__TBB_SYMBOL( _ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv ) -__TBB_SYMBOL( _ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal18call_itt_notify_v5EiPv ) -__TBB_SYMBOL( _ZN3tbb8internal19itt_load_pointer_v3EPKv ) -__TBB_SYMBOL( _ZN3tbb8internal20itt_set_sync_name_v3EPvPKc ) - -// pipeline.cpp -__TBB_SYMBOL( _ZTIN3tbb6filterE ) -__TBB_SYMBOL( _ZTSN3tbb6filterE ) -__TBB_SYMBOL( _ZTVN3tbb6filterE ) -__TBB_SYMBOL( _ZN3tbb6filterD2Ev ) -__TBB_SYMBOL( _ZN3tbb8pipeline10add_filterERNS_6filterE ) -__TBB_SYMBOL( _ZN3tbb8pipeline12inject_tokenERNS_4taskE ) -__TBB_SYMBOL( _ZN3tbb8pipeline13remove_filterERNS_6filterE ) -__TBB_SYMBOL( _ZN3tbb8pipeline3runEm ) -#if __TBB_TASK_GROUP_CONTEXT -__TBB_SYMBOL( _ZN3tbb8pipeline3runEmRNS_18task_group_contextE ) -#endif -__TBB_SYMBOL( _ZN3tbb8pipeline5clearEv ) -__TBB_SYMBOL( _ZN3tbb19thread_bound_filter12process_itemEv ) -__TBB_SYMBOL( _ZN3tbb19thread_bound_filter16try_process_itemEv ) -__TBB_SYMBOL( _ZN3tbb8pipelineC1Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineC2Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineD0Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineD1Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineD2Ev ) -__TBB_SYMBOL( _ZTIN3tbb8pipelineE ) -__TBB_SYMBOL( _ZTSN3tbb8pipelineE ) -__TBB_SYMBOL( _ZTVN3tbb8pipelineE ) -__TBB_SYMBOL( _ZN3tbb6filter16set_end_of_inputEv ) - -// queuing_rw_mutex.cpp -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex18internal_constructEv ) - -// reader_writer_lock.cpp -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_ ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock13try_lock_readEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_ ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock18internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock4lockEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock6unlockEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock8try_lockEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock9lock_readEv ) - -#if !TBB_NO_LEGACY -// spin_rw_mutex.cpp v2 -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_ ) -#endif - -// spin_rw_mutex v3 -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v316internal_upgradeEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_downgradeEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_readerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_writerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_constructEv ) - -// x86_rtm_rw_mutex.cpp -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex16internal_releaseERNS2_11scoped_lockE ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex16internal_upgradeERNS2_11scoped_lockE ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex18internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex18internal_downgradeERNS2_11scoped_lockE ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex23internal_acquire_readerERNS2_11scoped_lockEb ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex23internal_acquire_writerERNS2_11scoped_lockEb ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex27internal_try_acquire_writerERNS2_11scoped_lockE ) - -// spin_mutex.cpp -__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv ) -__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb10spin_mutex18internal_constructEv ) - -// mutex.cpp -__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_releaseEv ) -__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb5mutex16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb5mutex18internal_constructEv ) - -// recursive_mutex.cpp -__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex18internal_constructEv ) - -// queuing_mutex.cpp -__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7releaseEv ) -__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb13queuing_mutex18internal_constructEv ) - -// critical_section.cpp -__TBB_SYMBOL( _ZN3tbb8internal19critical_section_v418internal_constructEv ) - -#if !TBB_NO_LEGACY -// concurrent_hash_map -__TBB_SYMBOL( _ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv ) - -// concurrent_queue.cpp v2 -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base12internal_popEPv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base21internal_set_capacityElm ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseC2Em ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseD2Ev ) -__TBB_SYMBOL( _ZTIN3tbb8internal21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZTSN3tbb8internal21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZTVN3tbb8internal21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev ) -__TBB_SYMBOL( _ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv ) -#endif - -// concurrent_queue v3 -// constructors -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E ) -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Em ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3C2Em ) -// destructors -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3D2Ev ) -// typeinfo -__TBB_SYMBOL( _ZTIN3tbb8internal24concurrent_queue_base_v3E ) -__TBB_SYMBOL( _ZTSN3tbb8internal24concurrent_queue_base_v3E ) -// vtable -__TBB_SYMBOL( _ZTVN3tbb8internal24concurrent_queue_base_v3E ) -// methods -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v818internal_push_moveEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v830internal_push_move_if_not_fullEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v314internal_abortEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityElm ) -__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv ) -__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv ) -__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v812move_contentERS1_ ) - -#if !TBB_NO_LEGACY -// concurrent_vector.cpp v2 -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_mPFvPvPKvmE ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvmEb ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_ ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_grow_byEmmPFvPvmE ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_reserveEmmm ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base18internal_push_backEmRm ) -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEmmPFvPvmE ) -__TBB_SYMBOL( _ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv ) -#endif - -// concurrent_vector v3 -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_mPFvPvPKvmE ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvmE ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_ ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEmmPFvPvPKvmES4_ ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEmmm ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEmRm ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEmmPFvPvPKvmES4_ ) -__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_compactEmPvPFvS2_mEPFvS2_PKvmE ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_ ) -__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEm ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v3D2Ev ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEmmmPKvPFvPvmEPFvS4_S3_mE ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEmmPFvPvPKvmES4_ ) - -// tbb_thread -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv ) -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v36detachEv ) -__TBB_SYMBOL( _ZN3tbb8internal16thread_get_id_v3Ev ) -__TBB_SYMBOL( _ZN3tbb8internal15free_closure_v3EPv ) -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v34joinEv ) -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v314internal_startEPFPvS2_ES2_ ) -__TBB_SYMBOL( _ZN3tbb8internal19allocate_closure_v3Em ) -__TBB_SYMBOL( _ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_ ) -__TBB_SYMBOL( _ZN3tbb8internal15thread_yield_v3Ev ) -__TBB_SYMBOL( _ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE ) - -#undef __TBB_SYMBOL diff --git a/src/tbb/src/tbb/mailbox.h b/src/tbb/src/tbb/mailbox.h index 111e8b01a..d9166c121 100644 --- a/src/tbb/src/tbb/mailbox.h +++ b/src/tbb/src/tbb/mailbox.h @@ -1,36 +1,34 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef _TBB_mailbox_H #define _TBB_mailbox_H -#include "tbb/tbb_stddef.h" -#include "tbb/cache_aligned_allocator.h" +#include "oneapi/tbb/cache_aligned_allocator.h" +#include "oneapi/tbb/detail/_small_object_pool.h" #include "scheduler_common.h" -#include "tbb/atomic.h" + +#include <atomic> namespace tbb { -namespace internal { +namespace detail { +namespace r1 { -struct task_proxy : public task { +struct task_proxy : public d1::task { static const intptr_t pool_bit = 1<<0; static const intptr_t mailbox_bit = 1<<1; static const intptr_t location_mask = pool_bit | mailbox_bit; @@ -38,29 +36,34 @@ struct task_proxy : public task { Two low-order bits mean: 1 = proxy is/was/will be in task pool 2 = proxy is/was/will be in mailbox */ - intptr_t task_and_tag; + std::atomic<intptr_t> task_and_tag; //! Pointer to next task_proxy in a mailbox - task_proxy *__TBB_atomic next_in_mailbox; + std::atomic<task_proxy*> next_in_mailbox; //! Mailbox to which this was mailed. mail_outbox* outbox; + //! Task affinity id which is referenced + d1::slot_id slot; + + d1::small_object_allocator allocator; + //! True if the proxy is stored both in its sender's pool and in the destination mailbox. static bool is_shared ( intptr_t tat ) { return (tat & location_mask) == location_mask; } - //! Returns a pointer to the encapsulated task or NULL. + //! Returns a pointer to the encapsulated task or nullptr. static task* task_ptr ( intptr_t tat ) { return (task*)(tat & ~location_mask); } - //! Returns a pointer to the encapsulated task or NULL, and frees proxy if necessary. + //! Returns a pointer to the encapsulated task or nullptr, and frees proxy if necessary. template<intptr_t from_bit> inline task* extract_task () { - __TBB_ASSERT( prefix().extra_state == es_task_proxy, "Normal task misinterpreted as a proxy?" ); - intptr_t tat = __TBB_load_with_acquire(task_and_tag); + // __TBB_ASSERT( prefix().extra_state == es_task_proxy, "Normal task misinterpreted as a proxy?" ); + intptr_t tat = task_and_tag.load(std::memory_order_acquire); __TBB_ASSERT( tat == from_bit || (is_shared(tat) && task_ptr(tat)), "Proxy's tag cannot specify both locations if the proxy " "was retrieved from one of its original locations" ); @@ -69,82 +72,104 @@ struct task_proxy : public task { // Attempt to transition the proxy to the "empty" state with // cleaner_bit specifying entity responsible for its eventual freeing. // Explicit cast to void* is to work around a seeming ICC 11.1 bug. - if ( as_atomic(task_and_tag).compare_and_swap(cleaner_bit, tat) == tat ) { + if ( task_and_tag.compare_exchange_strong(tat, cleaner_bit) ) { // Successfully grabbed the task, and left new owner with the job of freeing the proxy return task_ptr(tat); } } // Proxied task has already been claimed from another proxy location. - __TBB_ASSERT( task_and_tag == from_bit, "Empty proxy cannot contain non-zero task pointer" ); - poison_pointer(outbox); - poison_pointer(next_in_mailbox); - poison_value(task_and_tag); - return NULL; + __TBB_ASSERT( task_and_tag.load(std::memory_order_relaxed) == from_bit, "Empty proxy cannot contain non-zero task pointer" ); + return nullptr; + } + + task* execute(d1::execution_data&) override { + __TBB_ASSERT_RELEASE(false, nullptr); + return nullptr; + } + task* cancel(d1::execution_data&) override { + __TBB_ASSERT_RELEASE(false, nullptr); + return nullptr; } }; // struct task_proxy //! Internal representation of mail_outbox, without padding. class unpadded_mail_outbox { protected: - typedef task_proxy*__TBB_atomic proxy_ptr; + typedef std::atomic<task_proxy*> atomic_proxy_ptr; - //! Pointer to first task_proxy in mailbox, or NULL if box is empty. - proxy_ptr my_first; + //! Pointer to first task_proxy in mailbox, or nullptr if box is empty. + atomic_proxy_ptr my_first; - //! Pointer to pointer that will point to next item in the queue. Never NULL. - proxy_ptr* __TBB_atomic my_last; + //! Pointer to pointer that will point to next item in the queue. Never nullptr. + std::atomic<atomic_proxy_ptr*> my_last; //! Owner of mailbox is not executing a task, and has drained its own task pool. - bool my_is_idle; + std::atomic<bool> my_is_idle; }; +// TODO: - consider moving to arena slot //! Class representing where mail is put. /** Padded to occupy a cache line. */ class mail_outbox : padded<unpadded_mail_outbox> { - task_proxy* internal_pop() { - task_proxy* const first = __TBB_load_relaxed(my_first); - if( !first ) - return NULL; - __TBB_control_consistency_helper(); // on my_first + task_proxy* internal_pop( isolation_type isolation ) { + task_proxy* curr = my_first.load(std::memory_order_acquire); + if ( !curr ) + return nullptr; + atomic_proxy_ptr* prev_ptr = &my_first; + if ( isolation != no_isolation ) { + while ( task_accessor::isolation(*curr) != isolation ) { + prev_ptr = &curr->next_in_mailbox; + // The next_in_mailbox should be read with acquire to guarantee (*curr) consistency. + curr = curr->next_in_mailbox.load(std::memory_order_acquire); + if ( !curr ) + return nullptr; + } + } // There is a first item in the mailbox. See if there is a second. - if( task_proxy* second = first->next_in_mailbox ) { + // The next_in_mailbox should be read with acquire to guarantee (*second) consistency. + if ( task_proxy* second = curr->next_in_mailbox.load(std::memory_order_acquire) ) { // There are at least two items, so first item can be popped easily. - my_first = second; + prev_ptr->store(second, std::memory_order_relaxed); } else { - // There is only one item. Some care is required to pop it. - my_first = NULL; - if( as_atomic(my_last).compare_and_swap(&my_first,&first->next_in_mailbox) == &first->next_in_mailbox ) - { + // There is only one item. Some care is required to pop it. + + prev_ptr->store(nullptr, std::memory_order_relaxed); + atomic_proxy_ptr* expected = &curr->next_in_mailbox; + if ( my_last.compare_exchange_strong( expected, prev_ptr ) ) { // Successfully transitioned mailbox from having one item to having none. - __TBB_ASSERT(!first->next_in_mailbox,NULL); + __TBB_ASSERT( !curr->next_in_mailbox.load(std::memory_order_relaxed), nullptr); } else { // Some other thread updated my_last but has not filled in first->next_in_mailbox // Wait until first item points to second item. atomic_backoff backoff; - while( !(second = first->next_in_mailbox) ) backoff.pause(); - my_first = second; + // The next_in_mailbox should be read with acquire to guarantee (*second) consistency. + while ( !(second = curr->next_in_mailbox.load(std::memory_order_acquire)) ) backoff.pause(); + prev_ptr->store( second, std::memory_order_relaxed); } } - return first; + assert_pointer_valid(curr); + return curr; } public: friend class mail_inbox; //! Push task_proxy onto the mailbox queue of another thread. /** Implementation is wait-free. */ - void push( task_proxy& t ) { - __TBB_ASSERT(&t, NULL); - t.next_in_mailbox = NULL; - proxy_ptr * const link = (proxy_ptr *)__TBB_FetchAndStoreW(&my_last,(intptr_t)&t.next_in_mailbox); - // No release fence required for the next store, because there are no memory operations - // between the previous fully fenced atomic operation and the store. - __TBB_store_relaxed(*link, &t); + void push( task_proxy* t ) { + assert_pointer_valid(t); + t->next_in_mailbox.store(nullptr, std::memory_order_relaxed); + atomic_proxy_ptr* const link = my_last.exchange(&t->next_in_mailbox); + // Logically, the release fence is not required because the exchange above provides the + // release-acquire semantic that guarantees that (*t) will be consistent when another thread + // loads the link atomic. However, C++11 memory model guarantees consistency of(*t) only + // when the same atomic is used for synchronization. + link->store(t, std::memory_order_release); } //! Return true if mailbox is empty bool empty() { - return __TBB_load_relaxed(my_first) == NULL; + return my_first.load(std::memory_order_relaxed) == nullptr; } //! Construct *this as a mailbox from zeroed memory. @@ -152,28 +177,26 @@ class mail_outbox : padded<unpadded_mail_outbox> { This method is provided instead of a full constructor since we know the object will be constructed in zeroed memory. */ void construct() { - __TBB_ASSERT( sizeof(*this)==NFS_MaxLineSize, NULL ); - __TBB_ASSERT( !my_first, NULL ); - __TBB_ASSERT( !my_last, NULL ); - __TBB_ASSERT( !my_is_idle, NULL ); - my_last=&my_first; + __TBB_ASSERT( sizeof(*this)==max_nfs_size, nullptr ); + __TBB_ASSERT( !my_first.load(std::memory_order_relaxed), nullptr ); + __TBB_ASSERT( !my_last.load(std::memory_order_relaxed), nullptr ); + __TBB_ASSERT( !my_is_idle.load(std::memory_order_relaxed), nullptr ); + my_last = &my_first; suppress_unused_warning(pad); } - //! Drain the mailbox - intptr_t drain() { - intptr_t k = 0; + //! Drain the mailbox + void drain() { // No fences here because other threads have already quit. - for( ; task_proxy* t = my_first; ++k ) { - my_first = t->next_in_mailbox; - NFS_Free((char*)t - task_prefix_reservation_size); + for( ; task_proxy* t = my_first; ) { + my_first.store(t->next_in_mailbox, std::memory_order_relaxed); + t->allocator.delete_object(t); } - return k; } //! True if thread that owns this mailbox is looking for work. bool recipient_is_idle() { - return my_is_idle; + return my_is_idle.load(std::memory_order_relaxed); } }; // class mail_outbox @@ -183,21 +206,20 @@ class mail_inbox { mail_outbox* my_putter; public: //! Construct unattached inbox - mail_inbox() : my_putter(NULL) {} + mail_inbox() : my_putter(nullptr) {} - //! Attach inbox to a corresponding outbox. + //! Attach inbox to a corresponding outbox. void attach( mail_outbox& putter ) { - __TBB_ASSERT(!my_putter,"already attached"); my_putter = &putter; } //! Detach inbox from its outbox void detach() { __TBB_ASSERT(my_putter,"not attached"); - my_putter = NULL; + my_putter = nullptr; } - //! Get next piece of mail, or NULL if mailbox is empty. - task_proxy* pop() { - return my_putter->internal_pop(); + //! Get next piece of mail, or nullptr if mailbox is empty. + task_proxy* pop( isolation_type isolation ) { + return my_putter->internal_pop( isolation ); } //! Return true if mailbox is empty bool empty() { @@ -207,22 +229,18 @@ class mail_inbox { /** Raises assertion failure if mailbox is redundantly marked as not idle. */ void set_is_idle( bool value ) { if( my_putter ) { - __TBB_ASSERT( my_putter->my_is_idle || value, "attempt to redundantly mark mailbox as not idle" ); - my_putter->my_is_idle = value; + __TBB_ASSERT( my_putter->my_is_idle.load(std::memory_order_relaxed) || value, "attempt to redundantly mark mailbox as not idle" ); + my_putter->my_is_idle.store(value, std::memory_order_relaxed); } } //! Indicate whether thread that reads this mailbox is idle. bool is_idle_state ( bool value ) const { - return !my_putter || my_putter->my_is_idle == value; + return !my_putter || my_putter->my_is_idle.load(std::memory_order_relaxed) == value; } - -#if DO_ITT_NOTIFY - //! Get pointer to corresponding outbox used for ITT_NOTIFY calls. - void* outbox() const {return my_putter;} -#endif /* DO_ITT_NOTIFY */ }; // class mail_inbox -} // namespace internal +} // namespace r1 +} // namespace detail } // namespace tbb #endif /* _TBB_mailbox_H */ diff --git a/src/tbb/src/tbb/main.cpp b/src/tbb/src/tbb/main.cpp new file mode 100644 index 000000000..f43c33f5b --- /dev/null +++ b/src/tbb/src/tbb/main.cpp @@ -0,0 +1,161 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "oneapi/tbb/detail/_config.h" + +#include "main.h" +#include "governor.h" +#include "threading_control.h" +#include "environment.h" +#include "market.h" +#include "tcm_adaptor.h" +#include "misc.h" +#include "itt_notify.h" + +namespace tbb { +namespace detail { +namespace r1 { + +//------------------------------------------------------------------------ +// Begin shared data layout. +// The following global data items are mostly read-only after initialization. +//------------------------------------------------------------------------ + +//------------------------------------------------------------------------ +// governor data +basic_tls<thread_data*> governor::theTLS; +rml::tbb_factory governor::theRMLServerFactory; +bool governor::UsePrivateRML; +bool governor::is_rethrow_broken; + +//------------------------------------------------------------------------ +// threading_control data +threading_control* threading_control::g_threading_control; +threading_control::global_mutex_type threading_control::g_threading_control_mutex; + +//------------------------------------------------------------------------ +// context propagation data +context_state_propagation_mutex_type the_context_state_propagation_mutex; +std::atomic<uintptr_t> the_context_state_propagation_epoch{}; + +//------------------------------------------------------------------------ +// One time initialization data + +//! Counter of references to global shared resources such as TLS. +std::atomic<int> __TBB_InitOnce::count{}; + +std::atomic_flag __TBB_InitOnce::InitializationLock = ATOMIC_FLAG_INIT; + +//! Flag that is set to true after one-time initializations are done. +std::atomic<bool> __TBB_InitOnce::InitializationDone{}; + +#if __TBB_USE_ITT_NOTIFY +//! Defined in profiling.cpp +extern bool ITT_Present; +void ITT_DoUnsafeOneTimeInitialization(); +#endif + +#if !(_WIN32||_WIN64) || __TBB_SOURCE_DIRECTLY_INCLUDED +static __TBB_InitOnce __TBB_InitOnceHiddenInstance; +#endif + +//------------------------------------------------------------------------ +// __TBB_InitOnce +//------------------------------------------------------------------------ + +void __TBB_InitOnce::add_ref() { + if (++count == 1) { + governor::acquire_resources(); + tcm_adaptor::initialize(); + } +} + +void __TBB_InitOnce::remove_ref() { + int k = --count; + __TBB_ASSERT(k>=0,"removed __TBB_InitOnce ref that was not added?"); + if( k==0 ) { + governor::release_resources(); + ITT_FINI_ITTLIB(); + ITT_RELEASE_RESOURCES(); + } +} + +//------------------------------------------------------------------------ +// One-time Initializations +//------------------------------------------------------------------------ + +//! Defined in cache_aligned_allocator.cpp +void initialize_cache_aligned_allocator(); + +//! Performs thread-safe lazy one-time general TBB initialization. +void DoOneTimeInitialization() { + __TBB_InitOnce::lock(); + // No fence required for load of InitializationDone, because we are inside a critical section. + if( !__TBB_InitOnce::InitializationDone ) { + __TBB_InitOnce::add_ref(); + if( GetBoolEnvironmentVariable("TBB_VERSION") ) { + PrintVersion(); + tcm_adaptor::print_version(); + } + bool itt_present = false; +#if __TBB_USE_ITT_NOTIFY + ITT_DoUnsafeOneTimeInitialization(); + itt_present = ITT_Present; +#endif /* __TBB_USE_ITT_NOTIFY */ + initialize_cache_aligned_allocator(); + governor::initialize_rml_factory(); + // Force processor groups support detection + governor::default_num_threads(); + // Force OS regular page size detection + governor::default_page_size(); + PrintExtraVersionInfo( "TOOLS SUPPORT", itt_present ? "enabled" : "disabled" ); + __TBB_InitOnce::InitializationDone = true; + } + __TBB_InitOnce::unlock(); +} + +#if (_WIN32||_WIN64) && !__TBB_SOURCE_DIRECTLY_INCLUDED +//! Windows "DllMain" that handles startup and shutdown of dynamic library. +extern "C" bool WINAPI DllMain( HANDLE /*hinstDLL*/, DWORD reason, LPVOID lpvReserved ) { + switch( reason ) { + case DLL_PROCESS_ATTACH: + __TBB_InitOnce::add_ref(); + break; + case DLL_PROCESS_DETACH: + // Since THREAD_DETACH is not called for the main thread, call auto-termination + // here as well - but not during process shutdown (due to risk of a deadlock). + if ( lpvReserved == nullptr ) { // library unload + governor::terminate_external_thread(); + } + __TBB_InitOnce::remove_ref(); + // It is assumed that InitializationDone is not set after DLL_PROCESS_DETACH, + // and thus no race on InitializationDone is possible. + if ( __TBB_InitOnce::initialization_done() ) { + // Remove reference that we added in DoOneTimeInitialization. + __TBB_InitOnce::remove_ref(); + } + break; + case DLL_THREAD_DETACH: + governor::terminate_external_thread(); + break; + } + return true; +} +#endif /* (_WIN32||_WIN64) && !__TBB_SOURCE_DIRECTLY_INCLUDED */ + +} // namespace r1 +} // namespace detail +} // namespace tbb diff --git a/src/tbb/src/tbb/main.h b/src/tbb/src/tbb/main.h new file mode 100644 index 000000000..c6f54bb47 --- /dev/null +++ b/src/tbb/src/tbb/main.h @@ -0,0 +1,99 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef _TBB_main_H +#define _TBB_main_H + +#include "governor.h" + +#include <atomic> + +namespace tbb { +namespace detail { +namespace r1 { + +void DoOneTimeInitialization(); + +//------------------------------------------------------------------------ +// __TBB_InitOnce +//------------------------------------------------------------------------ + +// TODO (TBB_REVAMP_TODO): consider better names +//! Class that supports TBB initialization. +/** It handles acquisition and release of global resources (e.g. TLS) during startup and shutdown, + as well as synchronization for DoOneTimeInitialization. */ +class __TBB_InitOnce { + friend void DoOneTimeInitialization(); + friend void ITT_DoUnsafeOneTimeInitialization(); + + static std::atomic<int> count; + + //! Platform specific code to acquire resources. + static void acquire_resources(); + + //! Platform specific code to release resources. + static void release_resources(); + + //! Specifies if the one-time initializations has been done. + static std::atomic<bool> InitializationDone; + + //! Global initialization lock + /** Scenarios are possible when tools interop has to be initialized before the + TBB itself. This imposes a requirement that the global initialization lock + has to support valid static initialization, and does not issue any tool + notifications in any build mode. **/ + static std::atomic_flag InitializationLock; + +public: + static void lock() { + tbb::detail::atomic_backoff backoff; + while( InitializationLock.test_and_set() ) backoff.pause(); + } + + static void unlock() { InitializationLock.clear(std::memory_order_release); } + + static bool initialization_done() { return InitializationDone.load(std::memory_order_acquire); } + + //! Add initial reference to resources. + /** We assume that dynamic loading of the library prevents any other threads + from entering the library until this constructor has finished running. **/ + __TBB_InitOnce() { add_ref(); } + + //! Remove the initial reference to resources. + /** This is not necessarily the last reference if other threads are still running. **/ + ~__TBB_InitOnce() { + governor::terminate_external_thread(); // TLS dtor not called for the main thread + remove_ref(); + // We assume that InitializationDone is not set after file-scope destructors + // start running, and thus no race on InitializationDone is possible. + if ( initialization_done() ) { + // Remove an extra reference that was added in DoOneTimeInitialization. + remove_ref(); + } + } + //! Add reference to resources. If first reference added, acquire the resources. + static void add_ref(); + + //! Remove reference to resources. If last reference removed, release the resources. + static void remove_ref(); + +}; // class __TBB_InitOnce + +} // namespace r1 +} // namespace detail +} // namespace tbb + +#endif /* _TBB_main_H */ diff --git a/src/tbb/src/tbb/market.cpp b/src/tbb/src/tbb/market.cpp index c1ddc7bcf..ae3fadd47 100644 --- a/src/tbb/src/tbb/market.cpp +++ b/src/tbb/src/tbb/market.cpp @@ -1,658 +1,143 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2023 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ + http://www.apache.org/licenses/LICENSE-2.0 -#include "tbb/tbb_stddef.h" + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +#include "arena.h" #include "market.h" -#include "tbb_main.h" -#include "governor.h" -#include "scheduler.h" -#include "itt_notify.h" + +#include <algorithm> // std::find namespace tbb { -namespace internal { +namespace detail { +namespace r1 { -void market::insert_arena_into_list ( arena& a ) { -#if __TBB_TASK_PRIORITY - arena_list_type &arenas = my_priority_levels[a.my_top_priority].arenas; - arena *&next = my_priority_levels[a.my_top_priority].next_arena; -#else /* !__TBB_TASK_PRIORITY */ - arena_list_type &arenas = my_arenas; - arena *&next = my_next_arena; -#endif /* !__TBB_TASK_PRIORITY */ - arenas.push_front( a ); - if ( arenas.size() == 1 ) - next = &*arenas.begin(); -} -void market::remove_arena_from_list ( arena& a ) { -#if __TBB_TASK_PRIORITY - arena_list_type &arenas = my_priority_levels[a.my_top_priority].arenas; - arena *&next = my_priority_levels[a.my_top_priority].next_arena; -#else /* !__TBB_TASK_PRIORITY */ - arena_list_type &arenas = my_arenas; - arena *&next = my_next_arena; -#endif /* !__TBB_TASK_PRIORITY */ - arena_list_type::iterator it = next; - __TBB_ASSERT( it != arenas.end(), NULL ); - if ( next == &a ) { - if ( ++it == arenas.end() && arenas.size() > 1 ) - it = arenas.begin(); - next = &*it; +class tbb_permit_manager_client : public pm_client { +public: + tbb_permit_manager_client(arena& a) : pm_client(a) {} + + void register_thread() override {} + + void unregister_thread() override {} + + void set_allotment(unsigned allotment) { + my_arena.set_allotment(allotment); } - arenas.remove( a ); -} +}; //------------------------------------------------------------------------ // market //------------------------------------------------------------------------ -market::market ( unsigned max_num_workers, size_t stack_size ) - : my_ref_count(1) - , my_stack_size(stack_size) - , my_max_num_workers(max_num_workers) -#if __TBB_TASK_PRIORITY - , my_global_top_priority(normalized_normal_priority) - , my_global_bottom_priority(normalized_normal_priority) -#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION - , my_lowest_populated_level(normalized_normal_priority) -#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */ -#endif /* __TBB_TASK_PRIORITY */ -{ -#if __TBB_TASK_PRIORITY - __TBB_ASSERT( my_global_reload_epoch == 0, NULL ); - my_priority_levels[normalized_normal_priority].workers_available = max_num_workers; -#endif /* __TBB_TASK_PRIORITY */ +market::market(unsigned workers_soft_limit) + : my_num_workers_soft_limit(workers_soft_limit) +{} - // Once created RML server will start initializing workers that will need - // global market instance to get worker stack size - my_server = governor::create_rml_server( *this ); - __TBB_ASSERT( my_server, "Failed to create RML server" ); +pm_client* market::create_client(arena& a) { + return new (cache_aligned_allocate(sizeof(tbb_permit_manager_client))) tbb_permit_manager_client(a); } - -market& market::global_market ( unsigned max_num_workers, size_t stack_size ) { - global_market_mutex_type::scoped_lock lock( theMarketMutex ); - market *m = theMarket; - if ( m ) { - ++m->my_ref_count; - if ( m->my_stack_size < stack_size ) - runtime_warning( "Newer master request for larger stack cannot be satisfied\n" ); - } - else { - max_num_workers = max( governor::default_num_threads() - 1, max_num_workers ); - // at least 1 worker is required to support starvation resistant tasks - if( max_num_workers==0 ) max_num_workers = 1; - // Create the global market instance - size_t size = sizeof(market); -#if __TBB_TASK_GROUP_CONTEXT - __TBB_ASSERT( __TBB_offsetof(market, my_workers) + sizeof(generic_scheduler*) == sizeof(market), - "my_workers must be the last data field of the market class"); - size += sizeof(generic_scheduler*) * (max_num_workers - 1); -#endif /* __TBB_TASK_GROUP_CONTEXT */ - __TBB_InitOnce::add_ref(); - void* storage = NFS_Allocate(1, size, NULL); - memset( storage, 0, size ); - // Initialize and publish global market - m = new (storage) market( max_num_workers, stack_size ); - theMarket = m; - } - return *m; +void market::register_client(pm_client* c, d1::constraints&) { + mutex_type::scoped_lock lock(my_mutex); + my_clients[c->priority_level()].push_back(c); } -void market::destroy () { -#if __TBB_COUNT_TASK_NODES - if ( my_task_node_count ) - runtime_warning( "Leaked %ld task objects\n", (long)my_task_node_count ); -#endif /* __TBB_COUNT_TASK_NODES */ - this->~market(); - NFS_Free( this ); - __TBB_InitOnce::remove_ref(); -} - -void market::release () { - __TBB_ASSERT( theMarket == this, "Global market instance was destroyed prematurely?" ); - bool do_release = false; +void market::unregister_and_destroy_client(pm_client& c) { { - global_market_mutex_type::scoped_lock lock(theMarketMutex); - if ( --my_ref_count == 0 ) { - do_release = true; - theMarket = NULL; - } - } - if( do_release ) - my_server->request_close_connection(); -} - -void market::wait_workers () { - // usable for this kind of scheduler only - __TBB_ASSERT(governor::needsWaitWorkers(), NULL); - // wait till terminating last worker decresed my_ref_count - while (__TBB_load_with_acquire(my_ref_count) > 1) - __TBB_Yield(); - __TBB_ASSERT(1 == my_ref_count, NULL); - release(); -} - -arena& market::create_arena ( unsigned max_num_workers, size_t stack_size ) { - market &m = global_market( max_num_workers, stack_size ); // increases market's ref count -#if __TBB_TASK_ARENA - // Prevent cutting an extra slot for task_arena(p,0) with default market (p-1 workers). - // This is a temporary workaround for 1968 until (TODO:) master slot reservation is reworked - arena& a = arena::allocate_arena( m, min(max_num_workers, m.my_max_num_workers+1) ); -#else - arena& a = arena::allocate_arena( m, min(max_num_workers, m.my_max_num_workers) ); -#endif - // Add newly created arena into the existing market's list. - arenas_list_mutex_type::scoped_lock lock(m.my_arenas_list_mutex); - m.insert_arena_into_list(a); - return a; -} - -/** This method must be invoked under my_arenas_list_mutex. **/ -void market::detach_arena ( arena& a ) { - __TBB_ASSERT( theMarket == this, "Global market instance was destroyed prematurely?" ); -#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION - __TBB_ASSERT( !a.my_num_workers_present, NULL ); -#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */ - __TBB_ASSERT( !a.my_slots[0].my_scheduler, NULL ); - remove_arena_from_list(a); - if ( a.my_aba_epoch == my_arenas_aba_epoch ) - ++my_arenas_aba_epoch; -} - -void market::try_destroy_arena ( arena* a, uintptr_t aba_epoch ) { - __TBB_ASSERT ( a, NULL ); - arenas_list_mutex_type::scoped_lock lock(my_arenas_list_mutex); - assert_market_valid(); -#if __TBB_TASK_PRIORITY - for ( int p = my_global_top_priority; p >= my_global_bottom_priority; --p ) { - priority_level_info &pl = my_priority_levels[p]; - arena_list_type &my_arenas = pl.arenas; -#endif /* __TBB_TASK_PRIORITY */ - arena_list_type::iterator it = my_arenas.begin(); - for ( ; it != my_arenas.end(); ++it ) { - if ( a == &*it ) { - if ( it->my_aba_epoch == aba_epoch ) { - // Arena is alive - if ( !a->my_num_workers_requested && !a->my_references ) { - __TBB_ASSERT( !a->my_num_workers_allotted && (a->my_pool_state == arena::SNAPSHOT_EMPTY || !a->my_max_num_workers), "Inconsistent arena state" ); - // Arena is abandoned. Destroy it. - detach_arena( *a ); - lock.release(); - a->free_arena(); - } - } - return; - } - } -#if __TBB_TASK_PRIORITY + mutex_type::scoped_lock lock(my_mutex); + auto& clients = my_clients[c.priority_level()]; + auto it = std::find(clients.begin(), clients.end(), &c); + __TBB_ASSERT(it != clients.end(), "Destroying of an unregistered client"); + clients.erase(it); } -#endif /* __TBB_TASK_PRIORITY */ -} -void market::try_destroy_arena ( market* m, arena* a, uintptr_t aba_epoch, bool master ) { - // Arena may have been orphaned. Or it may have been destroyed. - // Thus we cannot dereference the pointer to it until its liveness is verified. - // Arena is alive if it is found in the market's list. - - if ( m != theMarket ) { - // The market has already been emptied. - return; - } - else if ( master ) { - // If this is a master thread, market can be destroyed at any moment. - // So protect it with an extra refcount. - global_market_mutex_type::scoped_lock lock(theMarketMutex); - if ( m != theMarket ) - return; - ++m->my_ref_count; - } - m->try_destroy_arena( a, aba_epoch ); - if ( master ) - m->release(); + auto client = static_cast<tbb_permit_manager_client*>(&c); + client->~tbb_permit_manager_client(); + cache_aligned_deallocate(client); } -/** This method must be invoked under my_arenas_list_mutex. **/ -arena* market::arena_in_need ( arena_list_type &arenas, arena *&next ) { - if ( arenas.empty() ) - return NULL; - arena_list_type::iterator it = next; - __TBB_ASSERT( it != arenas.end(), NULL ); - do { - arena& a = *it; - if ( ++it == arenas.end() ) - it = arenas.begin(); - if ( a.num_workers_active() < a.my_num_workers_allotted ) { - a.my_references += 2; // add a worker -#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION - ++a.my_num_workers_present; - ++my_priority_levels[a.my_top_priority].workers_present; -#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */ - as_atomic(next) = &*it; // a subject for innocent data race under the reader lock - // TODO: rework global round robin policy to local or random to avoid this write - return &a; - } - } while ( it != next ); - return NULL; -} +void market::update_allotment() { + int effective_soft_limit = my_mandatory_num_requested > 0 && my_num_workers_soft_limit == 0 ? 1 : my_num_workers_soft_limit; + int max_workers = min(my_total_demand, effective_soft_limit); + __TBB_ASSERT(max_workers >= 0, nullptr); -void market::update_allotment ( arena_list_type& arenas, int workers_demand, int max_workers ) { - __TBB_ASSERT( workers_demand, NULL ); - max_workers = min(workers_demand, max_workers); - int carry = 0; -#if TBB_USE_ASSERT + int unassigned_workers = max_workers; int assigned = 0; -#endif /* TBB_USE_ASSERT */ - arena_list_type::iterator it = arenas.begin(); - for ( ; it != arenas.end(); ++it ) { - arena& a = *it; - if ( a.my_num_workers_requested <= 0 ) { - __TBB_ASSERT( !a.my_num_workers_allotted, NULL ); - continue; - } - int tmp = a.my_num_workers_requested * max_workers + carry; - int allotted = tmp / workers_demand; - carry = tmp % workers_demand; - // a.my_num_workers_requested may temporarily exceed a.my_max_num_workers - a.my_num_workers_allotted = min( allotted, (int)a.my_max_num_workers ); -#if TBB_USE_ASSERT - assigned += a.my_num_workers_allotted; -#endif /* TBB_USE_ASSERT */ - } - __TBB_ASSERT( assigned <= workers_demand, NULL ); -} - -#if __TBB_TASK_PRIORITY -inline void market::update_global_top_priority ( intptr_t newPriority ) { - GATHER_STATISTIC( ++governor::local_scheduler_if_initialized()->my_counters.market_prio_switches ); - my_global_top_priority = newPriority; - my_priority_levels[newPriority].workers_available = my_max_num_workers; - advance_global_reload_epoch(); -} - -inline void market::reset_global_priority () { - my_global_bottom_priority = normalized_normal_priority; - update_global_top_priority(normalized_normal_priority); -#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION - my_lowest_populated_level = normalized_normal_priority; -#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */ -} - -arena* market::arena_in_need ( arena* prev_arena ) -{ - if( !has_any_demand() ) - return NULL; - arenas_list_mutex_type::scoped_lock lock(my_arenas_list_mutex, /*is_writer=*/false); - assert_market_valid(); -#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION - if ( prev_arena ) { - priority_level_info &pl = my_priority_levels[prev_arena->my_top_priority]; - --prev_arena->my_num_workers_present; - --pl.workers_present; - if ( !--prev_arena->my_references && !prev_arena->my_num_workers_requested ) { - detach_arena( *a ); - lock.release(); - a->free_arena(); - lock.acquire(); - } - } -#else - suppress_unused_warning(prev_arena); -#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */ - int p = my_global_top_priority; - arena *a = NULL; - do { - priority_level_info &pl = my_priority_levels[p]; -#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION - __TBB_ASSERT( p >= my_lowest_populated_level, NULL ); - if ( pl.workers_present >= pl.workers_requested ) - continue; -#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */ - a = arena_in_need( pl.arenas, pl.next_arena ); - } while ( !a && --p >= my_global_bottom_priority ); - return a; -} - -void market::update_allotment ( intptr_t highest_affected_priority ) { - intptr_t i = highest_affected_priority; - int available = my_priority_levels[i].workers_available; -#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION - my_lowest_populated_level = my_global_bottom_priority; -#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */ - for ( ; i >= my_global_bottom_priority; --i ) { - priority_level_info &pl = my_priority_levels[i]; - pl.workers_available = available; - if ( pl.workers_requested ) { - update_allotment( pl.arenas, pl.workers_requested, available ); - available -= pl.workers_requested; - if ( available < 0 ) { - available = 0; -#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION - my_lowest_populated_level = i; -#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */ - break; + int carry = 0; + unsigned max_priority_level = num_priority_levels; + for (unsigned list_idx = 0; list_idx < num_priority_levels; ++list_idx ) { + int assigned_per_priority = min(my_priority_level_demand[list_idx], unassigned_workers); + unassigned_workers -= assigned_per_priority; + // We use reverse iterator there to serve last added clients first + for (auto it = my_clients[list_idx].rbegin(); it != my_clients[list_idx].rend(); ++it) { + tbb_permit_manager_client& client = static_cast<tbb_permit_manager_client&>(**it); + if (client.max_workers() == 0) { + client.set_allotment(0); + continue; } - } - } - __TBB_ASSERT( i <= my_global_bottom_priority || !available, NULL ); - for ( --i; i >= my_global_bottom_priority; --i ) { - priority_level_info &pl = my_priority_levels[i]; - pl.workers_available = 0; - arena_list_type::iterator it = pl.arenas.begin(); - for ( ; it != pl.arenas.end(); ++it ) { - __TBB_ASSERT( it->my_num_workers_requested || !it->my_num_workers_allotted, NULL ); - it->my_num_workers_allotted = 0; - } - } -} -#endif /* __TBB_TASK_PRIORITY */ -void market::adjust_demand ( arena& a, int delta ) { - __TBB_ASSERT( theMarket, "market instance was destroyed prematurely?" ); - if ( !delta ) - return; - my_arenas_list_mutex.lock(); - int prev_req = a.my_num_workers_requested; - a.my_num_workers_requested += delta; - if ( a.my_num_workers_requested <= 0 ) { - a.my_num_workers_allotted = 0; - if ( prev_req <= 0 ) { - my_arenas_list_mutex.unlock(); - return; - } - delta = -prev_req; - } -#if __TBB_TASK_ARENA - else if ( prev_req < 0 ) { - delta = a.my_num_workers_requested; - } -#else /* __TBB_TASK_ARENA */ - __TBB_ASSERT( prev_req >= 0, "Part-size request to RML?" ); -#endif /* __TBB_TASK_ARENA */ -#if __TBB_TASK_PRIORITY - intptr_t p = a.my_top_priority; - priority_level_info &pl = my_priority_levels[p]; - pl.workers_requested += delta; - __TBB_ASSERT( pl.workers_requested >= 0, NULL ); -#if !__TBB_TASK_ARENA - __TBB_ASSERT( a.my_num_workers_requested >= 0, NULL ); -#else - //TODO: understand the assertion and modify -#endif - if ( a.my_num_workers_requested <= 0 ) { - if ( a.my_top_priority != normalized_normal_priority ) { - GATHER_STATISTIC( ++governor::local_scheduler_if_initialized()->my_counters.arena_prio_resets ); - update_arena_top_priority( a, normalized_normal_priority ); - } - a.my_bottom_priority = normalized_normal_priority; - } - if ( p == my_global_top_priority ) { - if ( !pl.workers_requested ) { - while ( --p >= my_global_bottom_priority && !my_priority_levels[p].workers_requested ) - continue; - if ( p < my_global_bottom_priority ) - reset_global_priority(); - else - update_global_top_priority(p); - } - update_allotment( my_global_top_priority ); - } - else if ( p > my_global_top_priority ) { -#if !__TBB_TASK_ARENA - __TBB_ASSERT( pl.workers_requested > 0, NULL ); -#else - //TODO: understand the assertion and modify -#endif - update_global_top_priority(p); - a.my_num_workers_allotted = min( (int)my_max_num_workers, a.my_num_workers_requested ); - my_priority_levels[p - 1].workers_available = my_max_num_workers - a.my_num_workers_allotted; - update_allotment( p - 1 ); - } - else if ( p == my_global_bottom_priority ) { - if ( !pl.workers_requested ) { - while ( ++p <= my_global_top_priority && !my_priority_levels[p].workers_requested ) - continue; - if ( p > my_global_top_priority ) - reset_global_priority(); - else { - my_global_bottom_priority = p; -#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION - my_lowest_populated_level = max( my_lowest_populated_level, p ); -#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */ + if (max_priority_level == num_priority_levels) { + max_priority_level = list_idx; } - } - else - update_allotment( p ); - } - else if ( p < my_global_bottom_priority ) { - __TBB_ASSERT( a.my_num_workers_requested > 0, NULL ); - int prev_bottom = my_global_bottom_priority; - my_global_bottom_priority = p; - update_allotment( prev_bottom ); - } - else { - __TBB_ASSERT( my_global_bottom_priority < p && p < my_global_top_priority, NULL ); - update_allotment( p ); - } - __TBB_ASSERT( my_global_top_priority >= a.my_top_priority || a.my_num_workers_requested<=0, NULL ); - assert_market_valid(); -#else /* !__TBB_TASK_PRIORITY */ - my_total_demand += delta; - update_allotment(); -#endif /* !__TBB_TASK_PRIORITY */ - my_arenas_list_mutex.unlock(); - // Must be called outside of any locks - my_server->adjust_job_count_estimate( delta ); - GATHER_STATISTIC( governor::local_scheduler_if_initialized() ? ++governor::local_scheduler_if_initialized()->my_counters.gate_switches : 0 ); -} -void market::process( job& j ) { - generic_scheduler& s = static_cast<generic_scheduler&>(j); - arena *a = NULL; - __TBB_ASSERT( governor::is_set(&s), NULL ); -#if !__TBB_SLEEP_PERMISSION - while ( (a = arena_in_need(a)) ) - a->process(s); -#else//__TBB_SLEEP_PERMISSION - enum { - query_interval = 1000, - first_interval = 1, - pause_time = 100 // similar to PauseTime used for the stealing loop - }; - for(int i = first_interval; ; i--) { - while ( (a = arena_in_need(a)) ) - { - a->process(s); - i = first_interval; + int allotted = 0; + if (my_num_workers_soft_limit == 0) { + __TBB_ASSERT(max_workers == 0 || max_workers == 1, nullptr); + allotted = client.min_workers() > 0 && assigned < max_workers ? 1 : 0; + } else { + int tmp = client.max_workers() * assigned_per_priority + carry; + allotted = tmp / my_priority_level_demand[list_idx]; + carry = tmp % my_priority_level_demand[list_idx]; + __TBB_ASSERT(allotted <= client.max_workers(), nullptr); + } + client.set_allotment(allotted); + client.set_top_priority(list_idx == max_priority_level); + assigned += allotted; } - if( i == 0 ) { -#if __TBB_TASK_PRIORITY - arena_list_type &al = my_priority_levels[my_global_top_priority].arenas; -#else /* __TBB_TASK_PRIORITY */ - arena_list_type &al = my_arenas; -#endif /* __TBB_TASK_PRIORITY */ - if( al.empty() ) // races if any are innocent TODO: replace by an RML query interface - break; // no arenas left, perhaps going to shut down - if( the_global_observer_list.ask_permission_to_leave() ) - break; // go sleep - __TBB_Yield(); - i = query_interval; - } else __TBB_Pause(pause_time); - } -#endif//__TBB_SLEEP_PERMISSION - GATHER_STATISTIC( ++s.my_counters.market_roundtrips ); -} - -void market::cleanup( job& j ) { - __TBB_ASSERT( theMarket != this, NULL ); - generic_scheduler& s = static_cast<generic_scheduler&>(j); - generic_scheduler* mine = governor::local_scheduler_if_initialized(); - __TBB_ASSERT( !mine || mine->my_arena_index!=0, NULL ); - if( mine!=&s ) { - governor::assume_scheduler( &s ); - generic_scheduler::cleanup_worker( &s, mine!=NULL ); - governor::assume_scheduler( mine ); - } else { - generic_scheduler::cleanup_worker( &s, true ); } + __TBB_ASSERT(assigned == max_workers, nullptr); } -void market::acknowledge_close_connection() { - destroy(); -} - -::rml::job* market::create_one_job() { - unsigned index = ++my_num_workers; - __TBB_ASSERT( index > 0, NULL ); - ITT_THREAD_SET_NAME(_T("TBB Worker Thread")); - // index serves as a hint decreasing conflicts between workers when they migrate between arenas - generic_scheduler* s = generic_scheduler::create_worker( *this, index ); -#if __TBB_TASK_GROUP_CONTEXT - __TBB_ASSERT( index <= my_max_num_workers, NULL ); - __TBB_ASSERT( !my_workers[index - 1], NULL ); - my_workers[index - 1] = s; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - governor::sign_on(s); - return s; -} - -#if __TBB_TASK_PRIORITY -void market::update_arena_top_priority ( arena& a, intptr_t new_priority ) { - GATHER_STATISTIC( ++governor::local_scheduler_if_initialized()->my_counters.arena_prio_switches ); - __TBB_ASSERT( a.my_top_priority != new_priority, NULL ); - priority_level_info &prev_level = my_priority_levels[a.my_top_priority], - &new_level = my_priority_levels[new_priority]; - remove_arena_from_list(a); - a.my_top_priority = new_priority; - insert_arena_into_list(a); - ++a.my_reload_epoch; // TODO: synch with global reload epoch in order to optimize usage of local reload epoch -#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION - // Arena's my_num_workers_present may remain positive for some time after its - // my_num_workers_requested becomes zero. Thus the following two lines are - // executed unconditionally. - prev_level.workers_present -= a.my_num_workers_present; - new_level.workers_present += a.my_num_workers_present; -#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */ - prev_level.workers_requested -= a.my_num_workers_requested; - new_level.workers_requested += a.my_num_workers_requested; - __TBB_ASSERT( prev_level.workers_requested >= 0 && new_level.workers_requested >= 0, NULL ); -} - -bool market::lower_arena_priority ( arena& a, intptr_t new_priority, uintptr_t old_reload_epoch ) { - // TODO: replace the lock with a try_lock loop which performs a double check of the epoch - arenas_list_mutex_type::scoped_lock lock(my_arenas_list_mutex); - if ( a.my_reload_epoch != old_reload_epoch ) { - assert_market_valid(); - return false; - } - __TBB_ASSERT( a.my_top_priority > new_priority, NULL ); - __TBB_ASSERT( my_global_top_priority >= a.my_top_priority, NULL ); - - intptr_t p = a.my_top_priority; - update_arena_top_priority( a, new_priority ); - if ( a.my_num_workers_requested > 0 ) { - if ( my_global_bottom_priority > new_priority ) { - my_global_bottom_priority = new_priority; - } - if ( p == my_global_top_priority && !my_priority_levels[p].workers_requested ) { - // Global top level became empty - for ( --p; !my_priority_levels[p].workers_requested; --p ) continue; - __TBB_ASSERT( p >= my_global_bottom_priority, NULL ); - update_global_top_priority(p); - } - update_allotment( p ); +void market::set_active_num_workers(int soft_limit) { + mutex_type::scoped_lock lock(my_mutex); + if (my_num_workers_soft_limit != soft_limit) { + my_num_workers_soft_limit = soft_limit; + update_allotment(); } - - __TBB_ASSERT( my_global_top_priority >= a.my_top_priority, NULL ); - assert_market_valid(); - return true; } -bool market::update_arena_priority ( arena& a, intptr_t new_priority ) { - arenas_list_mutex_type::scoped_lock lock(my_arenas_list_mutex); +void market::adjust_demand(pm_client& c, int mandatory_delta, int workers_delta) { + __TBB_ASSERT(-1 <= mandatory_delta && mandatory_delta <= 1, nullptr); - __TBB_ASSERT( my_global_top_priority >= a.my_top_priority || a.my_num_workers_requested <= 0, NULL ); - assert_market_valid(); - if ( a.my_top_priority == new_priority ) { - return false; - } - else if ( a.my_top_priority > new_priority ) { - if ( a.my_bottom_priority > new_priority ) - a.my_bottom_priority = new_priority; - return false; - } - else if ( a.my_num_workers_requested <= 0 ) { - return false; - } - - __TBB_ASSERT( my_global_top_priority >= a.my_top_priority, NULL ); + int delta{}; + { + mutex_type::scoped_lock lock(my_mutex); + // Update client's state + delta = c.update_request(mandatory_delta, workers_delta); - intptr_t p = a.my_top_priority; - intptr_t highest_affected_level = max(p, new_priority); - update_arena_top_priority( a, new_priority ); + // Update market's state + my_total_demand += delta; + my_priority_level_demand[c.priority_level()] += delta; + my_mandatory_num_requested += mandatory_delta; - if ( my_global_top_priority < new_priority ) { - update_global_top_priority(new_priority); - } - else if ( my_global_top_priority == new_priority ) { - advance_global_reload_epoch(); + update_allotment(); } - else { - __TBB_ASSERT( new_priority < my_global_top_priority, NULL ); - __TBB_ASSERT( new_priority > my_global_bottom_priority, NULL ); - if ( p == my_global_top_priority && !my_priority_levels[p].workers_requested ) { - // Global top level became empty - __TBB_ASSERT( my_global_bottom_priority < p, NULL ); - for ( --p; !my_priority_levels[p].workers_requested; --p ) continue; - __TBB_ASSERT( p >= new_priority, NULL ); - update_global_top_priority(p); - highest_affected_level = p; - } - } - if ( p == my_global_bottom_priority ) { - // Arena priority was increased from the global bottom level. - __TBB_ASSERT( p < new_priority, NULL ); // n - __TBB_ASSERT( new_priority <= my_global_top_priority, NULL ); - while ( !my_priority_levels[my_global_bottom_priority].workers_requested ) - ++my_global_bottom_priority; - __TBB_ASSERT( my_global_bottom_priority <= new_priority, NULL ); - __TBB_ASSERT( my_priority_levels[my_global_bottom_priority].workers_requested > 0, NULL ); - } - update_allotment( highest_affected_level ); - - __TBB_ASSERT( my_global_top_priority >= a.my_top_priority, NULL ); - assert_market_valid(); - return true; -} -#endif /* __TBB_TASK_PRIORITY */ -#if __TBB_COUNT_TASK_NODES -intptr_t market::workers_task_node_count() { - intptr_t result = 0; - ForEachArena(a) { - result += a.workers_task_node_count(); - } EndForEach(); - return result; + notify_thread_request(delta); } -#endif /* __TBB_COUNT_TASK_NODES */ -} // namespace internal +} // namespace r1 +} // namespace detail } // namespace tbb diff --git a/src/tbb/src/tbb/market.h b/src/tbb/src/tbb/market.h index 682683b48..85532ff10 100644 --- a/src/tbb/src/tbb/market.h +++ b/src/tbb/src/tbb/market.h @@ -1,368 +1,78 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2023 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef _TBB_market_H #define _TBB_market_H -#include "tbb/tbb_stddef.h" - -#include "scheduler_common.h" -#include "tbb/atomic.h" -#include "tbb/spin_rw_mutex.h" -#include "../rml/include/rml_tbb.h" +#include "oneapi/tbb/rw_mutex.h" +#include "oneapi/tbb/tbb_allocator.h" +#include "oneapi/tbb/task_arena.h" -#include "intrusive_list.h" +#include "permit_manager.h" +#include "pm_client.h" -#if defined(_MSC_VER) && defined(_Wp64) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (push) - #pragma warning (disable: 4244) -#endif +#include <atomic> +#include <vector> namespace tbb { +namespace detail { +namespace r1 { -class task_group_context; - -namespace internal { - -//------------------------------------------------------------------------ -// Class market -//------------------------------------------------------------------------ - -class market : no_copy, rml::tbb_client { - friend class generic_scheduler; - friend class arena; - template<typename SchedulerTraits> friend class custom_scheduler; - friend class tbb::task_group_context; -private: - friend void ITT_DoUnsafeOneTimeInitialization (); - - typedef intrusive_list<arena> arena_list_type; - - //! Currently active global market - static market* theMarket; - - typedef scheduler_mutex_type global_market_mutex_type; - - //! Mutex guarding creation/destruction of theMarket, insertions/deletions in my_arenas, and cancellation propagation - static global_market_mutex_type theMarketMutex; - - //! Reference count controlling market object lifetime - intptr_t my_ref_count; - - //! Lightweight mutex guarding accounting operations with arenas list - typedef spin_rw_mutex arenas_list_mutex_type; - arenas_list_mutex_type my_arenas_list_mutex; - - //! Pointer to the RML server object that services this TBB instance. - rml::tbb_server* my_server; - - //! Stack size of worker threads - size_t my_stack_size; - - //! Number of workers requested from the underlying resource manager - unsigned my_max_num_workers; - - //! Number of workers that have been delivered by RML - /** Used to assign indices to the new workers coming from RML, and busy part - of my_workers array. **/ - atomic<unsigned> my_num_workers; - -#if __TBB_TASK_PRIORITY - //! Highest priority among active arenas in the market. - /** Arena priority level is its tasks highest priority (specified by arena's - my_top_priority member). - Arena is active when it has outstanding request for workers. Note that - inactive arena may have workers lingering there for some time. **/ - intptr_t my_global_top_priority; - - //! Lowest priority among active arenas in the market. - /** See also my_global_top_priority **/ - intptr_t my_global_bottom_priority; - - //! Tracks events that may bring tasks in offload areas to the top priority level. - /** Incremented when global top priority is decremented or a task group priority - is elevated to the current top level. **/ - uintptr_t my_global_reload_epoch; - - //! Information about arenas at a particular priority level - struct priority_level_info { - //! List of arenas at this priority level - arena_list_type arenas; - - //! The first arena to be checked when idle worker seeks for an arena to enter - /** The check happens in round-robin fashion. **/ - arena *next_arena; - - //! Total amount of workers requested by arenas at this priority level. - int workers_requested; - - //! Maximal amount of workers the market can tell off to this priority level. - int workers_available; - -#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION - //! Total amount of workers that are in arenas at this priority level. - int workers_present; -#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */ - }; // struct priority_level_info - - //! Information about arenas at different priority levels - priority_level_info my_priority_levels[num_priority_levels]; - -#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION - //! Lowest priority level having workers available. - intptr_t my_lowest_populated_level; -#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */ - -#else /* !__TBB_TASK_PRIORITY */ - - //! List of registered arenas - arena_list_type my_arenas; - - //! The first arena to be checked when idle worker seeks for an arena to enter - /** The check happens in round-robin fashion. **/ - arena *my_next_arena; - - //! Number of workers that were requested by all arenas - int my_total_demand; -#endif /* !__TBB_TASK_PRIORITY */ - - //! ABA prevention marker to assign to newly created arenas - uintptr_t my_arenas_aba_epoch; - -#if __TBB_COUNT_TASK_NODES - //! Net number of nodes that have been allocated from heap. - /** Updated each time a scheduler or arena is destroyed. */ - atomic<intptr_t> my_task_node_count; -#endif /* __TBB_COUNT_TASK_NODES */ - - //! Constructor - market ( unsigned max_num_workers, size_t stack_size ); - - //! Factory method creating new market object - static market& global_market ( unsigned max_num_workers, size_t stack_size ); - - //! Destroys and deallocates market object created by market::create() - void destroy (); - - void try_destroy_arena ( arena*, uintptr_t aba_epoch ); - -#if __TBB_TASK_PRIORITY - //! Returns next arena that needs more workers, or NULL. - arena* arena_in_need ( arena* prev_arena ); - - //! Recalculates the number of workers assigned to each arena at and below the specified priority. - /** The actual number of workers servicing a particular arena may temporarily - deviate from the calculated value. **/ - void update_allotment ( intptr_t highest_affected_priority ); - - //! Changes arena's top priority and updates affected priority levels info in the market. - void update_arena_top_priority ( arena& a, intptr_t newPriority ); - - //! Changes market's global top priority and related settings. - inline void update_global_top_priority ( intptr_t newPriority ); - - //! Resets empty market's global top and bottom priority to the normal level. - inline void reset_global_priority (); - - inline void advance_global_reload_epoch () { - __TBB_store_with_release( my_global_reload_epoch, my_global_reload_epoch + 1 ); - } - - void assert_market_valid () const { - __TBB_ASSERT( (my_priority_levels[my_global_top_priority].workers_requested > 0 - && !my_priority_levels[my_global_top_priority].arenas.empty()) - || (my_global_top_priority == my_global_bottom_priority && - my_global_top_priority == normalized_normal_priority), NULL ); - } - - bool has_any_demand() const { - for(int p = 0; p < num_priority_levels; p++) - if( __TBB_load_with_acquire(my_priority_levels[p].workers_requested) > 0 ) // TODO: use as_atomic here and below - return true; - return false; - } - -#else /* !__TBB_TASK_PRIORITY */ - - //! Recalculates the number of workers assigned to each arena in the list. - /** The actual number of workers servicing a particular arena may temporarily - deviate from the calculated value. **/ - void update_allotment () { - if ( my_total_demand ) - update_allotment( my_arenas, my_total_demand, (int)my_max_num_workers ); - } - - //! Returns next arena that needs more workers, or NULL. - arena* arena_in_need (arena*) { - if(__TBB_load_with_acquire(my_total_demand) <= 0) - return NULL; - arenas_list_mutex_type::scoped_lock lock(my_arenas_list_mutex, /*is_writer=*/false); - return arena_in_need(my_arenas, my_next_arena); - } - void assert_market_valid () const {} -#endif /* !__TBB_TASK_PRIORITY */ - - //! Returns number of masters doing computational (CPU-intensive) work - int num_active_masters () { return 1; } // APM TODO: replace with a real mechanism - - - //////////////////////////////////////////////////////////////////////////////// - // Helpers to unify code branches dependent on priority feature presence - - void insert_arena_into_list ( arena& a ); - - void remove_arena_from_list ( arena& a ); - - arena* arena_in_need ( arena_list_type &arenas, arena *&next ); - - static void update_allotment ( arena_list_type& arenas, int total_demand, int max_workers ); - - - //////////////////////////////////////////////////////////////////////////////// - // Implementation of rml::tbb_client interface methods - - /*override*/ version_type version () const { return 0; } - - /*override*/ unsigned max_job_count () const { return my_max_num_workers; } - - /*override*/ size_t min_stack_size () const { return worker_stack_size(); } - - /*override*/ policy_type policy () const { return throughput; } - - /*override*/ job* create_one_job (); - - /*override*/ void cleanup( job& j ); - - /*override*/ void acknowledge_close_connection (); - - /*override*/ void process( job& j ); - +class market : public permit_manager { public: - //! Creates an arena object - /** If necessary, also creates global market instance, and boosts its ref count. - Each call to create_arena() must be matched by the call to arena::free_arena(). **/ - static arena& create_arena ( unsigned max_num_workers, size_t stack_size ); - - //! Removes the arena from the market's list - static void try_destroy_arena ( market*, arena*, uintptr_t aba_epoch, bool master ); - - //! Removes the arena from the market's list - void detach_arena ( arena& ); + market(unsigned soft_limit); - //! Decrements market's refcount and destroys it in the end - void release (); + pm_client* create_client(arena& a) override; + void register_client(pm_client* client, d1::constraints&) override; + void unregister_and_destroy_client(pm_client& c) override; //! Request that arena's need in workers should be adjusted. - /** Concurrent invocations are possible only on behalf of different arenas. **/ - void adjust_demand ( arena&, int delta ); - - //! Guarantee that request_close_connection() is called by master, not some worker - /** Must be called before arena::on_thread_leaving() **/ - void prepare_wait_workers() { ++my_ref_count; } - - //! Wait workers termination - void wait_workers (); + void adjust_demand(pm_client&, int mandatory_delta, int workers_delta) override; - //! Returns the requested stack size of worker threads. - size_t worker_stack_size () const { return my_stack_size; } - -#if _WIN32||_WIN64 - //! register master with the resource manager - void register_master( ::rml::server::execution_resource_t& rsc_handle ) { - __TBB_ASSERT( my_server, "RML server not defined?" ); - // the server may ignore registration and set master_exec_resource to NULL. - my_server->register_master( rsc_handle ); - } - - //! unregister master with the resource manager - void unregister_master( ::rml::server::execution_resource_t& rsc_handle ) const { - my_server->unregister_master( rsc_handle ); - } -#endif /* WIN */ - -#if __TBB_TASK_GROUP_CONTEXT - //! Finds all contexts affected by the state change and propagates the new state to them. - template <typename T> - bool propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ); -#endif /* __TBB_TASK_GROUP_CONTEXT */ + //! Set number of active workers + void set_active_num_workers(int soft_limit) override; +private: + //! Recalculates the number of workers assigned to each arena in the list. + void update_allotment(); -#if __TBB_TASK_PRIORITY - //! Lowers arena's priority is not higher than newPriority - /** Returns true if arena priority was actually elevated. **/ - bool lower_arena_priority ( arena& a, intptr_t new_priority, uintptr_t old_reload_epoch ); + //! Keys for the arena map array. The lower the value the higher priority of the arena list. + static constexpr unsigned num_priority_levels = d1::num_priority_levels; - //! Makes sure arena's priority is not lower than newPriority - /** Returns true if arena priority was elevated. Also updates arena's bottom - priority boundary if necessary. + using mutex_type = d1::rw_mutex; + mutex_type my_mutex; - This method is called whenever a user changes priority, because whether - it was hiked or sunk can be determined for sure only under the lock used - by this function. **/ - bool update_arena_priority ( arena& a, intptr_t new_priority ); -#endif /* __TBB_TASK_PRIORITY */ + //! Current application-imposed limit on the number of workers + int my_num_workers_soft_limit; -#if __TBB_COUNT_TASK_NODES - //! Returns the number of task objects "living" in worker threads - intptr_t workers_task_node_count(); + //! Number of workers that were requested by all arenas on all priority levels + int my_total_demand{0}; - //! Net number of nodes that have been allocated from heap. - /** Updated each time a scheduler or arena is destroyed. */ - void update_task_node_count( intptr_t delta ) { my_task_node_count += delta; } -#endif /* __TBB_COUNT_TASK_NODES */ + //! Number of workers that were requested by arenas per single priority list item + int my_priority_level_demand[num_priority_levels] = {0}; -#if __TBB_TASK_GROUP_CONTEXT - //! Array of pointers to the registered workers - /** Used by cancellation propagation mechanism. - Must be the last data member of the class market. **/ - generic_scheduler* my_workers[1]; -#endif /* __TBB_TASK_GROUP_CONTEXT */ + //! How many times mandatory concurrency was requested from the market + int my_mandatory_num_requested{0}; + //! Per priority list of registered arenas + using clients_container_type = std::vector<pm_client*, tbb::tbb_allocator<pm_client*>>; + clients_container_type my_clients[num_priority_levels]; }; // class market -#if __TBB_TASK_PRIORITY - #define BeginForEachArena(a) \ - arenas_list_mutex_type::scoped_lock arena_list_lock(my_arenas_list_mutex); \ - for ( intptr_t i = my_global_top_priority; i >= my_global_bottom_priority; --i ) { \ - /*arenas_list_mutex_type::scoped_lock arena_list_lock(my_priority_levels[i].my_arenas_list_mutex);*/ \ - arena_list_type &arenas = my_priority_levels[i].arenas; -#else /* !__TBB_TASK_PRIORITY */ - #define BeginForEachArena(a) \ - arena_list_type &arenas = my_arenas; { -#endif /* !__TBB_TASK_PRIORITY */ - -#define ForEachArena(a) \ - BeginForEachArena(a) \ - arena_list_type::iterator it = arenas.begin(); \ - for ( ; it != arenas.end(); ++it ) { \ - arena &a = *it; - -#define EndForEach() }} - - -} // namespace internal +} // namespace r1 +} // namespace detail } // namespace tbb -#if defined(_MSC_VER) && defined(_Wp64) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (pop) -#endif // warning 4244 is back - #endif /* _TBB_market_H */ diff --git a/src/tbb/src/tbb/market_concurrent_monitor.h b/src/tbb/src/tbb/market_concurrent_monitor.h new file mode 100644 index 000000000..37927617b --- /dev/null +++ b/src/tbb/src/tbb/market_concurrent_monitor.h @@ -0,0 +1,116 @@ +/* + Copyright (c) 2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_market_concurrent_monitor_H +#define __TBB_market_concurrent_monitor_H + +#include "concurrent_monitor.h" +#include "scheduler_common.h" + +#include <atomic> + +namespace tbb { +namespace detail { +namespace r1 { + +struct market_context { + market_context() = default; + + market_context(std::uintptr_t first_addr, arena* a) : + my_uniq_addr(first_addr), my_arena_addr(a) + {} + + std::uintptr_t my_uniq_addr{0}; + arena* my_arena_addr{nullptr}; +}; + +#if __TBB_RESUMABLE_TASKS +class resume_node : public wait_node<market_context> { + using base_type = wait_node<market_context>; +public: + resume_node(market_context ctx, execution_data_ext& ed_ext, task_dispatcher& target) + : base_type(ctx), my_curr_dispatcher(ed_ext.task_disp), my_target_dispatcher(&target) + , my_suspend_point(my_curr_dispatcher->get_suspend_point()) + {} + + ~resume_node() override { + if (this->my_skipped_wakeup) { + spin_wait_until_eq(this->my_notify_calls, 1); + } + + poison_pointer(my_curr_dispatcher); + poison_pointer(my_target_dispatcher); + poison_pointer(my_suspend_point); + } + + void init() override { + base_type::init(); + } + + void wait() override { + my_curr_dispatcher->resume(*my_target_dispatcher); + __TBB_ASSERT(!this->my_is_in_list.load(std::memory_order_relaxed), "Still in the queue?"); + } + + void reset() override { + base_type::reset(); + spin_wait_until_eq(this->my_notify_calls, 1); + my_notify_calls.store(0, std::memory_order_relaxed); + } + + // notify is called (perhaps, concurrently) twice from: + // - concurrent_monitor::notify + // - post_resume_action::register_waiter + // The second notify is called after thread switches the stack + // (Because we can not call resume while the stack is occupied) + // We need calling resume only when both notifications are performed. + void notify() override { + if (++my_notify_calls == 2) { + r1::resume(my_suspend_point); + } + } + +private: + friend class thread_data; + friend struct suspend_point_type::resume_task; + task_dispatcher* my_curr_dispatcher; + task_dispatcher* my_target_dispatcher; + suspend_point_type* my_suspend_point; + std::atomic<int> my_notify_calls{0}; +}; +#endif // __TBB_RESUMABLE_TASKS + +class market_concurrent_monitor : public concurrent_monitor_base<market_context> { + using base_type = concurrent_monitor_base<market_context>; +public: + using base_type::base_type; + + ~market_concurrent_monitor() { + destroy(); + } + + /** per-thread descriptor for concurrent_monitor */ + using thread_context = sleep_node<market_context>; +#if __TBB_RESUMABLE_TASKS + using resume_context = resume_node; +#endif +}; + +} // namespace r1 +} // namespace detail +} // namespace tbb + +#endif // __TBB_market_concurrent_monitor_H diff --git a/src/tbb/src/tbb/misc.cpp b/src/tbb/src/tbb/misc.cpp new file mode 100644 index 000000000..115a5f388 --- /dev/null +++ b/src/tbb/src/tbb/misc.cpp @@ -0,0 +1,156 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Source file for miscellaneous entities that are infrequently referenced by +// an executing program. + +#include "oneapi/tbb/detail/_exception.h" +#include "oneapi/tbb/detail/_machine.h" + +#include "oneapi/tbb/version.h" + +#include "misc.h" +#include "governor.h" +#include "assert_impl.h" // Out-of-line TBB assertion handling routines are instantiated here. +#include "concurrent_monitor_mutex.h" + +#include <cstdio> +#include <cstdlib> +#include <stdexcept> +#include <cstring> +#include <cstdarg> + +#if _WIN32||_WIN64 +#include <windows.h> +#endif + +#if !_WIN32 +#include <unistd.h> // sysconf(_SC_PAGESIZE) +#endif + +namespace tbb { +namespace detail { +namespace r1 { + +//------------------------------------------------------------------------ +// governor data +//------------------------------------------------------------------------ +cpu_features_type governor::cpu_features; + +//------------------------------------------------------------------------ +// concurrent_monitor_mutex data +//------------------------------------------------------------------------ +#if !__TBB_USE_FUTEX +std::mutex concurrent_monitor_mutex::my_init_mutex; +#endif + + +size_t DefaultSystemPageSize() { +#if _WIN32 + SYSTEM_INFO si; + GetSystemInfo(&si); + return si.dwPageSize; +#else + return sysconf(_SC_PAGESIZE); +#endif +} + +/** The leading "\0" is here so that applying "strings" to the binary delivers a clean result. */ +static const char VersionString[] = "\0" TBB_VERSION_STRINGS; + +static bool PrintVersionFlag = false; + +void PrintVersion() { + PrintVersionFlag = true; + std::fputs(VersionString+1,stderr); +} + +void PrintExtraVersionInfo( const char* category, const char* format, ... ) { + if( PrintVersionFlag ) { + char str[1024]; std::memset(str, 0, 1024); + va_list args; va_start(args, format); + // Note: correct vsnprintf definition obtained from tbb_assert_impl.h + std::vsnprintf( str, 1024-1, format, args); + va_end(args); + std::fprintf(stderr, "oneTBB: %s\t%s\n", category, str ); + } +} + +//! check for transaction support. +#if _MSC_VER +#include <intrin.h> // for __cpuid +#elif __APPLE__ +#include <sys/sysctl.h> +#endif + +#if __TBB_x86_32 || __TBB_x86_64 +void check_cpuid(int leaf, int sub_leaf, int registers[4]) { +#if _MSC_VER + __cpuidex(registers, leaf, sub_leaf); +#else + int reg_eax = 0; + int reg_ebx = 0; + int reg_ecx = 0; + int reg_edx = 0; +#if __TBB_x86_32 && __PIC__ + // On 32-bit systems with position-independent code GCC fails to work around the stuff in EBX + // register. We help it using backup and restore. + __asm__("mov %%ebx, %%esi\n\t" + "cpuid\n\t" + "xchg %%ebx, %%esi" + : "=a"(reg_eax), "=S"(reg_ebx), "=c"(reg_ecx), "=d"(reg_edx) + : "0"(leaf), "2"(sub_leaf) // read value from eax and ecx + ); +#else + __asm__("cpuid" + : "=a"(reg_eax), "=b"(reg_ebx), "=c"(reg_ecx), "=d"(reg_edx) + : "0"(leaf), "2"(sub_leaf) // read value from eax and ecx + ); +#endif + registers[0] = reg_eax; + registers[1] = reg_ebx; + registers[2] = reg_ecx; + registers[3] = reg_edx; +#endif +} +#endif + +void detect_cpu_features(cpu_features_type& cpu_features) { + suppress_unused_warning(cpu_features); +#if __TBB_x86_32 || __TBB_x86_64 + const int rtm_ebx_mask = 1 << 11; + const int waitpkg_ecx_mask = 1 << 5; + const int hybrid_edx_mask = 1 << 15; + int registers[4] = {0}; + + // Check RTM, WAITPKG, HYBRID + check_cpuid(7, 0, registers); + cpu_features.rtm_enabled = (registers[1] & rtm_ebx_mask) != 0; + cpu_features.waitpkg_enabled = (registers[2] & waitpkg_ecx_mask) != 0; + cpu_features.hybrid = (registers[3] & hybrid_edx_mask) != 0; +#elif __APPLE__ + // Check HYBRID (hw.nperflevels > 1) + uint64_t nperflevels = 0; + size_t nperflevels_size = sizeof(nperflevels); + if (!sysctlbyname("hw.nperflevels", &nperflevels, &nperflevels_size, nullptr, 0)) { + cpu_features.hybrid = (nperflevels > 1); + } +#endif +} + +} // namespace r1 +} // namespace detail +} // namespace tbb diff --git a/src/tbb/src/tbb/misc.h b/src/tbb/src/tbb/misc.h new file mode 100644 index 000000000..988c29b17 --- /dev/null +++ b/src/tbb/src/tbb/misc.h @@ -0,0 +1,287 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef _TBB_tbb_misc_H +#define _TBB_tbb_misc_H + +#include "oneapi/tbb/detail/_config.h" +#include "oneapi/tbb/detail/_assert.h" +#include "oneapi/tbb/detail/_utils.h" + +#if __TBB_ARENA_BINDING +#include "oneapi/tbb/info.h" +#endif /*__TBB_ARENA_BINDING*/ + +#if __unix__ +#include <sys/param.h> // __FreeBSD_version +#if __FreeBSD_version >= 701000 +#include <sys/cpuset.h> +#endif +#endif + +#include <atomic> + +// Does the operating system have a system call to pin a thread to a set of OS processors? +#define __TBB_OS_AFFINITY_SYSCALL_PRESENT ((__linux__ && !__ANDROID__) || (__FreeBSD_version >= 701000)) +// On IBM* Blue Gene* CNK nodes, the affinity API has restrictions that prevent its usability for TBB, +// and also sysconf(_SC_NPROCESSORS_ONLN) already takes process affinity into account. +#define __TBB_USE_OS_AFFINITY_SYSCALL (__TBB_OS_AFFINITY_SYSCALL_PRESENT && !__bg__) + +namespace tbb { +namespace detail { +namespace r1 { + +void runtime_warning(const char* format, ... ); + +#if __TBB_ARENA_BINDING +class task_arena; +class task_scheduler_observer; +#endif /*__TBB_ARENA_BINDING*/ + +const std::size_t MByte = 1024*1024; + +#if __TBB_USE_WINAPI +// The Microsoft Documentation about Thread Stack Size states that +// "The default stack reservation size used by the linker is 1 MB" +const std::size_t ThreadStackSize = 1*MByte; +#else +const std::size_t ThreadStackSize = (sizeof(uintptr_t) <= 4 ? 2 : 4 )*MByte; +#endif + +#ifndef __TBB_HardwareConcurrency + +//! Returns maximal parallelism level supported by the current OS configuration. +int AvailableHwConcurrency(); + +#else + +inline int AvailableHwConcurrency() { + int n = __TBB_HardwareConcurrency(); + return n > 0 ? n : 1; // Fail safety strap +} +#endif /* __TBB_HardwareConcurrency */ + +//! Returns OS regular memory page size +size_t DefaultSystemPageSize(); + +//! Returns number of processor groups in the current OS configuration. +/** AvailableHwConcurrency must be called at least once before calling this method. **/ +int NumberOfProcessorGroups(); + +#if _WIN32||_WIN64 + +//! Retrieves index of processor group containing processor with the given index +int FindProcessorGroupIndex ( int processorIndex ); + +//! Affinitizes the thread to the specified processor group +void MoveThreadIntoProcessorGroup( void* hThread, int groupIndex ); + +#endif /* _WIN32||_WIN64 */ + +//! Prints TBB version information on stderr +void PrintVersion(); + +//! Prints arbitrary extra TBB version information on stderr +void PrintExtraVersionInfo( const char* category, const char* format, ... ); + +//! A callback routine to print RML version information on stderr +void PrintRMLVersionInfo( void* arg, const char* server_info ); + +// For TBB compilation only; not to be used in public headers +#if defined(min) || defined(max) +#undef min +#undef max +#endif + +//! Utility template function returning lesser of the two values. +/** Provided here to avoid including not strict safe <algorithm>.\n + In case operands cause signed/unsigned or size mismatch warnings it is caller's + responsibility to do the appropriate cast before calling the function. **/ +template<typename T> +T min ( const T& val1, const T& val2 ) { + return val1 < val2 ? val1 : val2; +} + +//! Utility template function returning greater of the two values. +/** Provided here to avoid including not strict safe <algorithm>.\n + In case operands cause signed/unsigned or size mismatch warnings it is caller's + responsibility to do the appropriate cast before calling the function. **/ +template<typename T> +T max ( const T& val1, const T& val2 ) { + return val1 < val2 ? val2 : val1; +} + +//! Utility helper structure to ease overload resolution +template<int > struct int_to_type {}; + +//------------------------------------------------------------------------ +// FastRandom +//------------------------------------------------------------------------ + +//! A fast random number generator. +/** Uses linear congruential method. */ +class FastRandom { +private: + unsigned x, c; + static const unsigned a = 0x9e3779b1; // a big prime number +public: + //! Get a random number. + unsigned short get() { + return get(x); + } + //! Get a random number for the given seed; update the seed for next use. + unsigned short get( unsigned& seed ) { + unsigned short r = (unsigned short)(seed>>16); + __TBB_ASSERT(c&1, "c must be odd for big rng period"); + seed = seed*a+c; + return r; + } + //! Construct a random number generator. + FastRandom( void* unique_ptr ) { init(uintptr_t(unique_ptr)); } + + template <typename T> + void init( T seed ) { + init(seed,int_to_type<sizeof(seed)>()); + } + void init( uint64_t seed , int_to_type<8> ) { + init(uint32_t((seed>>32)+seed), int_to_type<4>()); + } + void init( uint32_t seed, int_to_type<4> ) { + // threads use different seeds for unique sequences + c = (seed|1)*0xba5703f5; // c must be odd, shuffle by a prime number + x = c^(seed>>1); // also shuffle x for the first get() invocation + } +}; + +//------------------------------------------------------------------------ +// Atomic extensions +//------------------------------------------------------------------------ + +//! Atomically replaces value of dst with newValue if they satisfy condition of compare predicate +/** Return value semantics is the same as for CAS. **/ +template<typename T1, class Pred> +T1 atomic_update(std::atomic<T1>& dst, T1 newValue, Pred compare) { + T1 oldValue = dst.load(std::memory_order_acquire); + while ( compare(oldValue, newValue) ) { + if ( dst.compare_exchange_strong(oldValue, newValue) ) + break; + } + return oldValue; +} + +#if __TBB_USE_OS_AFFINITY_SYSCALL + #if __linux__ + typedef cpu_set_t basic_mask_t; + #elif __FreeBSD_version >= 701000 + typedef cpuset_t basic_mask_t; + #else + #error affinity_helper is not implemented in this OS + #endif + class affinity_helper : no_copy { + basic_mask_t* threadMask; + int is_changed; + public: + affinity_helper() : threadMask(nullptr), is_changed(0) {} + ~affinity_helper(); + void protect_affinity_mask( bool restore_process_mask ); + void dismiss(); + }; + void destroy_process_mask(); +#else + class affinity_helper : no_copy { + public: + void protect_affinity_mask( bool ) {} + }; + inline void destroy_process_mask(){} +#endif /* __TBB_USE_OS_AFFINITY_SYSCALL */ + +struct cpu_features_type { + bool rtm_enabled{false}; + bool waitpkg_enabled{false}; + bool hybrid{false}; +}; + +void detect_cpu_features(cpu_features_type& cpu_features); + +#if __TBB_ARENA_BINDING +class binding_handler; + +binding_handler* construct_binding_handler(int slot_num, int numa_id, int core_type_id, int max_threads_per_core); +void destroy_binding_handler(binding_handler* handler_ptr); +void apply_affinity_mask(binding_handler* handler_ptr, int slot_num); +void restore_affinity_mask(binding_handler* handler_ptr, int slot_num); + +#endif /*__TBB_ARENA_BINDING*/ + +// RTM specific section +// abort code for mutexes that detect a conflict with another thread. +enum { + speculation_not_supported = 0x00, + speculation_transaction_aborted = 0x01, + speculation_can_retry = 0x02, + speculation_memadd_conflict = 0x04, + speculation_buffer_overflow = 0x08, + speculation_breakpoint_hit = 0x10, + speculation_nested_abort = 0x20, + speculation_xabort_mask = 0xFF000000, + speculation_xabort_shift = 24, + speculation_xabort_not_free = 0xFF, // The value (0xFF) below comes from the Intel(R) 64 and IA-32 Architectures Optimization Reference Manual 12.4.5 lock not free + speculation_successful_begin = 0xFFFFFFFF, + speculation_retry = speculation_transaction_aborted + | speculation_can_retry + | speculation_memadd_conflict +}; + +// We suppose that successful transactions are sequentially ordered and +// do not require additional memory fences around them. +// Technically it can be achieved only if xbegin has implicit +// acquire memory semantics an xend/xabort has release memory semantics on compiler and hardware level. +// See the article: https://arxiv.org/pdf/1710.04839.pdf +static inline unsigned int begin_transaction() { +#if __TBB_TSX_INTRINSICS_PRESENT + return _xbegin(); +#else + return speculation_not_supported; // return unsuccessful code +#endif +} + +static inline void end_transaction() { +#if __TBB_TSX_INTRINSICS_PRESENT + _xend(); +#endif +} + +static inline void abort_transaction() { +#if __TBB_TSX_INTRINSICS_PRESENT + _xabort(speculation_xabort_not_free); +#endif +} + +#if TBB_USE_ASSERT +static inline unsigned char is_in_transaction() { +#if __TBB_TSX_INTRINSICS_PRESENT + return _xtest(); +#else + return 0; +#endif +} +#endif // TBB_USE_ASSERT + +} // namespace r1 +} // namespace detail +} // namespace tbb + +#endif /* _TBB_tbb_misc_H */ diff --git a/src/tbb/src/tbb/tbb_misc_ex.cpp b/src/tbb/src/tbb/misc_ex.cpp similarity index 52% rename from src/tbb/src/tbb/tbb_misc_ex.cpp rename to src/tbb/src/tbb/misc_ex.cpp index 82a8eb008..13b7b04fb 100644 --- a/src/tbb/src/tbb/tbb_misc_ex.cpp +++ b/src/tbb/src/tbb/misc_ex.cpp @@ -1,27 +1,23 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ -// Source file for miscellaneous entities that are infrequently referenced by +// Source file for miscellaneous entities that are infrequently referenced by // an executing program, and implementation of which requires dynamic linking. -#include "tbb_misc.h" +#include "misc.h" #if !defined(__TBB_HardwareConcurrency) @@ -30,84 +26,107 @@ #include <limits.h> #if _WIN32||_WIN64 -#include "tbb/machine/windows_api.h" +#include <windows.h> #if __TBB_WIN8UI_SUPPORT #include <thread> #endif #else #include <unistd.h> +#if __unix__ #if __linux__ #include <sys/sysinfo.h> -#include <string.h> +#endif +#include <cstring> #include <sched.h> -#include <errno.h> +#include <cerrno> #elif __sun #include <sys/sysinfo.h> #elif __FreeBSD__ -#include <errno.h> -#include <string.h> +#include <cerrno> +#include <cstring> #include <sys/param.h> // Required by <sys/cpuset.h> #include <sys/cpuset.h> #endif #endif namespace tbb { -namespace internal { +namespace detail { +namespace r1 { #if __TBB_USE_OS_AFFINITY_SYSCALL -static void set_affinity_mask( size_t maskSize, const basic_mask_t* threadMask ) { -#if __linux__ - if( sched_setaffinity( 0, maskSize, threadMask ) ) -#else /* FreeBSD */ +#if __unix__ +// Handlers for interoperation with libiomp +static int (*libiomp_try_restoring_original_mask)(); +// Table for mapping to libiomp entry points +static const dynamic_link_descriptor iompLinkTable[] = { + DLD_NOWEAK( kmp_set_thread_affinity_mask_initial, libiomp_try_restoring_original_mask ) +}; +#endif + +static void set_thread_affinity_mask( std::size_t maskSize, const basic_mask_t* threadMask ) { +#if __FreeBSD__ || __NetBSD__ || __OpenBSD__ if( cpuset_setaffinity( CPU_LEVEL_WHICH, CPU_WHICH_TID, -1, maskSize, threadMask ) ) +#else /* __unix__ */ + if( sched_setaffinity( 0, maskSize, threadMask ) ) #endif + // Here and below the error severity is lowered from critical level + // because it may happen during TBB library unload because of not + // waiting for workers to complete (current RML policy, to be fixed). + // handle_perror( errno, "setaffinity syscall" ); runtime_warning( "setaffinity syscall failed" ); } -static void get_affinity_mask( size_t maskSize, basic_mask_t* threadMask ) { -#if __linux__ - if( sched_getaffinity( 0, maskSize, threadMask ) ) -#else /* FreeBSD */ +static void get_thread_affinity_mask( std::size_t maskSize, basic_mask_t* threadMask ) { +#if __FreeBSD__ || __NetBSD__ || __OpenBSD__ if( cpuset_getaffinity( CPU_LEVEL_WHICH, CPU_WHICH_TID, -1, maskSize, threadMask ) ) +#else /* __unix__ */ + if( sched_getaffinity( 0, maskSize, threadMask ) ) #endif - runtime_warning( "getaffinity syscall failed" ); + runtime_warning( "getaffinity syscall failed" ); } static basic_mask_t* process_mask; static int num_masks; -struct process_mask_cleanup_helper { - ~process_mask_cleanup_helper() { - if( process_mask ) { - delete [] process_mask; - } - } -}; -static process_mask_cleanup_helper process_mask_cleanup; + +void destroy_process_mask() { + delete [] process_mask; + process_mask = nullptr; +} #define curMaskSize sizeof(basic_mask_t) * num_masks affinity_helper::~affinity_helper() { if( threadMask ) { if( is_changed ) { - set_affinity_mask( curMaskSize, threadMask ); + set_thread_affinity_mask( curMaskSize, threadMask ); } delete [] threadMask; } } -void affinity_helper::protect_affinity_mask() { - if( threadMask == NULL && num_masks && process_mask ) { +void affinity_helper::protect_affinity_mask( bool restore_process_mask ) { + if( threadMask == nullptr && num_masks ) { // TODO: assert num_masks validity? threadMask = new basic_mask_t [num_masks]; - memset( threadMask, 0, curMaskSize ); - get_affinity_mask( curMaskSize, threadMask ); - is_changed = memcmp( process_mask, threadMask, curMaskSize ); - if( is_changed ) { - set_affinity_mask( curMaskSize, process_mask ); + std::memset( threadMask, 0, curMaskSize ); + get_thread_affinity_mask( curMaskSize, threadMask ); + if( restore_process_mask ) { + __TBB_ASSERT( process_mask, "A process mask is requested but not yet stored" ); + is_changed = memcmp( process_mask, threadMask, curMaskSize ); + if( is_changed ) + set_thread_affinity_mask( curMaskSize, process_mask ); + } else { + // Assume that the mask will be changed by the caller. + is_changed = 1; } } } +void affinity_helper::dismiss() { + delete [] threadMask; + threadMask = nullptr; + is_changed = 0; +} #undef curMaskSize -static atomic<do_once_state> hardware_concurrency_info; +static std::atomic<do_once_state> hardware_concurrency_info; static int theNumProcs; @@ -115,62 +134,64 @@ static void initialize_hardware_concurrency_info () { int err; int availableProcs = 0; int numMasks = 1; -#if __linux__ -#if __TBB_MAIN_THREAD_AFFINITY_BROKEN - int maxProcs = INT_MAX; // To check the entire mask. - int pid = 0; // Get the mask of the calling thread. -#else - int maxProcs = sysconf(_SC_NPROCESSORS_ONLN); - int pid = getpid(); -#endif - cpu_set_t *processMask; - const size_t BasicMaskSize = sizeof(cpu_set_t); - for (;;) { - int curMaskSize = BasicMaskSize * numMasks; - processMask = new cpu_set_t[numMasks]; - memset( processMask, 0, curMaskSize ); - err = sched_getaffinity( pid, curMaskSize, processMask ); - if ( !err || errno != EINVAL || curMaskSize * CHAR_BIT >= 256 * 1024 ) - break; - delete[] processMask; - numMasks <<= 1; - } -#else /* FreeBSD >= 7.1 */ int maxProcs = sysconf(_SC_NPROCESSORS_ONLN); - cpuset_t *processMask; - const size_t BasicMaskSize = sizeof(cpuset_t); + basic_mask_t* processMask; + const std::size_t BasicMaskSize = sizeof(basic_mask_t); for (;;) { - int curMaskSize = BasicMaskSize * numMasks; - processMask = new cpuset_t[numMasks]; - memset( processMask, 0, curMaskSize ); + const int curMaskSize = BasicMaskSize * numMasks; + processMask = new basic_mask_t[numMasks]; + std::memset( processMask, 0, curMaskSize ); +#if __FreeBSD__ || __NetBSD__ || __OpenBSD__ // CPU_LEVEL_WHICH - anonymous (current) mask, CPU_LEVEL_CPUSET - assigned mask -#if __TBB_MAIN_THREAD_AFFINITY_BROKEN - err = cpuset_getaffinity( CPU_LEVEL_WHICH, CPU_WHICH_TID, -1, curMaskSize, processMask ); -#else err = cpuset_getaffinity( CPU_LEVEL_WHICH, CPU_WHICH_PID, -1, curMaskSize, processMask ); -#endif if ( !err || errno != ERANGE || curMaskSize * CHAR_BIT >= 16 * 1024 ) break; +#else /* __unix__ */ + int pid = getpid(); + err = sched_getaffinity( pid, curMaskSize, processMask ); + if ( !err || errno != EINVAL || curMaskSize * CHAR_BIT >= 256 * 1024 ) + break; +#endif delete[] processMask; numMasks <<= 1; } -#endif /* FreeBSD >= 7.1 */ if ( !err ) { + // We have found the mask size and captured the process affinity mask into processMask. + num_masks = numMasks; // do here because it's needed for affinity_helper to work +#if __unix__ + // For better coexistence with libiomp which might have changed the mask already, + // check for its presence and ask it to restore the mask. + dynamic_link_handle libhandle; + if ( dynamic_link( "libiomp5.so", iompLinkTable, 1, &libhandle, DYNAMIC_LINK_GLOBAL ) ) { + // We have found the symbol provided by libiomp5 for restoring original thread affinity. + affinity_helper affhelp; + affhelp.protect_affinity_mask( /*restore_process_mask=*/false ); + if ( libiomp_try_restoring_original_mask()==0 ) { + // Now we have the right mask to capture, restored by libiomp. + const int curMaskSize = BasicMaskSize * numMasks; + std::memset( processMask, 0, curMaskSize ); + get_thread_affinity_mask( curMaskSize, processMask ); + } else + affhelp.dismiss(); // thread mask has not changed + dynamic_unlink( libhandle ); + // Destructor of affinity_helper restores the thread mask (unless dismissed). + } +#endif for ( int m = 0; availableProcs < maxProcs && m < numMasks; ++m ) { - for ( size_t i = 0; (availableProcs < maxProcs) && (i < BasicMaskSize * CHAR_BIT); ++i ) { + for ( std::size_t i = 0; (availableProcs < maxProcs) && (i < BasicMaskSize * CHAR_BIT); ++i ) { if ( CPU_ISSET( i, processMask + m ) ) ++availableProcs; } } - num_masks = numMasks; process_mask = processMask; } else { + // Failed to get the process affinity mask; assume the whole machine can be used. availableProcs = (maxProcs == INT_MAX) ? sysconf(_SC_NPROCESSORS_ONLN) : maxProcs; delete[] processMask; } theNumProcs = availableProcs > 0 ? availableProcs : 1; // Fail safety strap - __TBB_ASSERT( theNumProcs <= sysconf(_SC_NPROCESSORS_ONLN), NULL ); + __TBB_ASSERT( theNumProcs <= sysconf(_SC_NPROCESSORS_ONLN), nullptr); } int AvailableHwConcurrency() { @@ -178,12 +199,14 @@ int AvailableHwConcurrency() { return theNumProcs; } +/* End of __TBB_USE_OS_AFFINITY_SYSCALL implementation */ #elif __ANDROID__ + // Work-around for Android that reads the correct number of available CPUs since system calls are unreliable. // Format of "present" file is: ([<int>-<int>|<int>],)+ int AvailableHwConcurrency() { FILE *fp = fopen("/sys/devices/system/cpu/present", "r"); - if (fp == NULL) return 1; + if (fp == nullptr) return 1; int num_args, lower, upper, num_cpus=0; while ((num_args = fscanf(fp, "%u-%u", &lower, &upper)) != EOF) { switch(num_args) { @@ -192,10 +215,12 @@ int AvailableHwConcurrency() { } fscanf(fp, ","); } + fclose(fp); return (num_cpus > 0) ? num_cpus : 1; } #elif defined(_SC_NPROCESSORS_ONLN) + int AvailableHwConcurrency() { int n = sysconf(_SC_NPROCESSORS_ONLN); return (n > 0) ? n : 1; @@ -203,7 +228,7 @@ int AvailableHwConcurrency() { #elif _WIN32||_WIN64 -static atomic<do_once_state> hardware_concurrency_info; +static std::atomic<do_once_state> hardware_concurrency_info; static const WORD TBB_ALL_PROCESSOR_GROUPS = 0xffff; @@ -217,33 +242,33 @@ struct ProcessorGroupInfo { int numProcsRunningTotal; ///< Subtotal of processors in this and preceding groups //! Total number of processor groups in the system - static int NumGroups; + static int NumGroups; - //! Index of the group with a slot reserved for the first master thread + //! Index of the group with a slot reserved for the first external thread /** In the context of multiple processor groups support current implementation - defines "the first master thread" as the first thread to invoke - AvailableHwConcurrency(). + defines "the first external thread" as the first thread to invoke + AvailableHwConcurrency(). TODO: Implement a dynamic scheme remapping workers depending on the pending - master threads affinity. **/ + external threads affinity. **/ static int HoleIndex; }; int ProcessorGroupInfo::NumGroups = 1; int ProcessorGroupInfo::HoleIndex = 0; - ProcessorGroupInfo theProcessorGroups[MaxProcessorGroups]; - +int calculate_numa[MaxProcessorGroups]; //Array needed for FindProcessorGroupIndex to calculate Processor Group when number of threads > number of cores to distribute threads evenly between processor groups +int numaSum; struct TBB_GROUP_AFFINITY { DWORD_PTR Mask; WORD Group; WORD Reserved[3]; }; -static DWORD (WINAPI *TBB_GetActiveProcessorCount)( WORD groupIndex ) = NULL; -static WORD (WINAPI *TBB_GetActiveProcessorGroupCount)() = NULL; -static BOOL (WINAPI *TBB_SetThreadGroupAffinity)( HANDLE hThread, +static DWORD (WINAPI *TBB_GetActiveProcessorCount)( WORD groupIndex ) = nullptr; +static WORD (WINAPI *TBB_GetActiveProcessorGroupCount)() = nullptr; +static BOOL (WINAPI *TBB_SetThreadGroupAffinity)( HANDLE hThread, const TBB_GROUP_AFFINITY* newAff, TBB_GROUP_AFFINITY *prevAff ); static BOOL (WINAPI *TBB_GetThreadGroupAffinity)( HANDLE hThread, TBB_GROUP_AFFINITY* ); @@ -255,6 +280,7 @@ static const dynamic_link_descriptor ProcessorGroupsApiLinkTable[] = { }; static void initialize_hardware_concurrency_info () { + suppress_unused_warning(TBB_ALL_PROCESSOR_GROUPS); #if __TBB_WIN8UI_SUPPORT // For these applications processor groups info is unavailable // Setting up a number of processors for one processor group @@ -267,18 +293,18 @@ static void initialize_hardware_concurrency_info () { DWORD_PTR pam, sam, m = 1; GetProcessAffinityMask( GetCurrentProcess(), &pam, &sam ); int nproc = 0; - for ( size_t i = 0; i < sizeof(DWORD_PTR) * CHAR_BIT; ++i, m <<= 1 ) { + for ( std::size_t i = 0; i < sizeof(DWORD_PTR) * CHAR_BIT; ++i, m <<= 1 ) { if ( pam & m ) ++nproc; } - __TBB_ASSERT( nproc <= (int)si.dwNumberOfProcessors, NULL ); + __TBB_ASSERT( nproc <= (int)si.dwNumberOfProcessors, nullptr); // By default setting up a number of processors for one processor group theProcessorGroups[0].numProcs = theProcessorGroups[0].numProcsRunningTotal = nproc; // Setting up processor groups in case the process does not restrict affinity mask and more than one processor group is present if ( nproc == (int)si.dwNumberOfProcessors && TBB_GetActiveProcessorCount ) { // The process does not have restricting affinity mask and multiple processor groups are possible ProcessorGroupInfo::NumGroups = (int)TBB_GetActiveProcessorGroupCount(); - __TBB_ASSERT( ProcessorGroupInfo::NumGroups <= MaxProcessorGroups, NULL ); + __TBB_ASSERT( ProcessorGroupInfo::NumGroups <= MaxProcessorGroups, nullptr); // Fail safety bootstrap. Release versions will limit available concurrency // level, while debug ones would assert. if ( ProcessorGroupInfo::NumGroups > MaxProcessorGroups ) @@ -288,15 +314,27 @@ static void initialize_hardware_concurrency_info () { if ( TBB_GetThreadGroupAffinity( GetCurrentThread(), &ga ) ) ProcessorGroupInfo::HoleIndex = ga.Group; int nprocs = 0; + int min_procs = INT_MAX; for ( WORD i = 0; i < ProcessorGroupInfo::NumGroups; ++i ) { ProcessorGroupInfo &pgi = theProcessorGroups[i]; pgi.numProcs = (int)TBB_GetActiveProcessorCount(i); - __TBB_ASSERT( pgi.numProcs <= (int)sizeof(DWORD_PTR) * CHAR_BIT, NULL ); + if (pgi.numProcs < min_procs) min_procs = pgi.numProcs; //Finding the minimum number of processors in the Processor Groups + calculate_numa[i] = pgi.numProcs; + __TBB_ASSERT( pgi.numProcs <= (int)sizeof(DWORD_PTR) * CHAR_BIT, nullptr); pgi.mask = pgi.numProcs == sizeof(DWORD_PTR) * CHAR_BIT ? ~(DWORD_PTR)0 : (DWORD_PTR(1) << pgi.numProcs) - 1; pgi.numProcsRunningTotal = nprocs += pgi.numProcs; } - __TBB_ASSERT( nprocs == (int)TBB_GetActiveProcessorCount( TBB_ALL_PROCESSOR_GROUPS ), NULL ); + __TBB_ASSERT( nprocs == (int)TBB_GetActiveProcessorCount( TBB_ALL_PROCESSOR_GROUPS ), nullptr); + + calculate_numa[0] = (calculate_numa[0] / min_procs)-1; + for (WORD i = 1; i < ProcessorGroupInfo::NumGroups; ++i) { + calculate_numa[i] = calculate_numa[i-1] + (calculate_numa[i] / min_procs); + } + + numaSum = calculate_numa[ProcessorGroupInfo::NumGroups - 1]; + } + } #endif /* __TBB_WIN8UI_SUPPORT */ @@ -306,63 +344,56 @@ static void initialize_hardware_concurrency_info () { PrintExtraVersionInfo( "----- Group", "%d: size %d", i, theProcessorGroups[i].numProcs); } -int AvailableHwConcurrency() { - atomic_do_once( &initialize_hardware_concurrency_info, hardware_concurrency_info ); - return theProcessorGroups[ProcessorGroupInfo::NumGroups - 1].numProcsRunningTotal; -} - int NumberOfProcessorGroups() { - __TBB_ASSERT( hardware_concurrency_info == initialization_complete, "NumberOfProcessorGroups is used before AvailableHwConcurrency" ); + __TBB_ASSERT( hardware_concurrency_info == do_once_state::initialized, "NumberOfProcessorGroups is used before AvailableHwConcurrency" ); return ProcessorGroupInfo::NumGroups; } -// Offset for the slot reserved for the first master thread -#define HoleAdjusted(procIdx, grpIdx) (procIdx + (holeIdx <= grpIdx)) - int FindProcessorGroupIndex ( int procIdx ) { - // In case of oversubscription spread extra workers in a round robin manner - int holeIdx; - const int numProcs = theProcessorGroups[ProcessorGroupInfo::NumGroups - 1].numProcsRunningTotal; - if ( procIdx >= numProcs - 1 ) { - holeIdx = INT_MAX; - procIdx = (procIdx - numProcs + 1) % numProcs; + int current_grp_idx = ProcessorGroupInfo::HoleIndex; + if (procIdx >= theProcessorGroups[current_grp_idx].numProcs && procIdx < theProcessorGroups[ProcessorGroupInfo::NumGroups - 1].numProcsRunningTotal) { + procIdx = procIdx - theProcessorGroups[current_grp_idx].numProcs; + do { + current_grp_idx = (current_grp_idx + 1) % (ProcessorGroupInfo::NumGroups); + procIdx = procIdx - theProcessorGroups[current_grp_idx].numProcs; + + } while (procIdx >= 0); } - else - holeIdx = ProcessorGroupInfo::HoleIndex; - __TBB_ASSERT( hardware_concurrency_info == initialization_complete, "FindProcessorGroupIndex is used before AvailableHwConcurrency" ); - // Approximate the likely group index assuming all groups are of the same size - int i = procIdx / theProcessorGroups[0].numProcs; - // Make sure the approximation is a valid group index - if (i >= ProcessorGroupInfo::NumGroups) i = ProcessorGroupInfo::NumGroups-1; - // Now adjust the approximation up or down - if ( theProcessorGroups[i].numProcsRunningTotal > HoleAdjusted(procIdx, i) ) { - while ( theProcessorGroups[i].numProcsRunningTotal - theProcessorGroups[i].numProcs > HoleAdjusted(procIdx, i) ) { - __TBB_ASSERT( i > 0, NULL ); - --i; + else if (procIdx >= theProcessorGroups[ProcessorGroupInfo::NumGroups - 1].numProcsRunningTotal) { + int temp_grp_index = 0; + procIdx = procIdx - theProcessorGroups[ProcessorGroupInfo::NumGroups - 1].numProcsRunningTotal; + procIdx = procIdx % (numaSum+1); //ProcIdx to stay between 0 and numaSum + + while (procIdx - calculate_numa[temp_grp_index] > 0) { + temp_grp_index = (temp_grp_index + 1) % ProcessorGroupInfo::NumGroups; } + current_grp_idx = temp_grp_index; } - else { - do { - ++i; - } while ( theProcessorGroups[i].numProcsRunningTotal <= HoleAdjusted(procIdx, i) ); - } - __TBB_ASSERT( i < ProcessorGroupInfo::NumGroups, NULL ); - return i; + __TBB_ASSERT(current_grp_idx < ProcessorGroupInfo::NumGroups, nullptr); + + return current_grp_idx; } void MoveThreadIntoProcessorGroup( void* hThread, int groupIndex ) { - __TBB_ASSERT( hardware_concurrency_info == initialization_complete, "MoveThreadIntoProcessorGroup is used before AvailableHwConcurrency" ); + __TBB_ASSERT( hardware_concurrency_info == do_once_state::initialized, "MoveThreadIntoProcessorGroup is used before AvailableHwConcurrency" ); if ( !TBB_SetThreadGroupAffinity ) return; TBB_GROUP_AFFINITY ga = { theProcessorGroups[groupIndex].mask, (WORD)groupIndex, {0,0,0} }; - TBB_SetThreadGroupAffinity( hThread, &ga, NULL ); + TBB_SetThreadGroupAffinity( hThread, &ga, nullptr); +} + +int AvailableHwConcurrency() { + atomic_do_once( &initialize_hardware_concurrency_info, hardware_concurrency_info ); + return theProcessorGroups[ProcessorGroupInfo::NumGroups - 1].numProcsRunningTotal; } +/* End of _WIN32||_WIN64 implementation */ #else - #error AvailableHwConcurrency is not implemented in this OS -#endif /* OS */ + #error AvailableHwConcurrency is not implemented for this OS +#endif -} // namespace internal +} // namespace r1 +} // namespace detail } // namespace tbb #endif /* !__TBB_HardwareConcurrency */ diff --git a/src/tbb/src/tbb/mutex.cpp b/src/tbb/src/tbb/mutex.cpp deleted file mode 100644 index 72319a57b..000000000 --- a/src/tbb/src/tbb/mutex.cpp +++ /dev/null @@ -1,145 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if _WIN32||_WIN64 -#include <errno.h> // EDEADLK -#endif -#include "tbb/mutex.h" -#include "itt_notify.h" - -namespace tbb { - void mutex::scoped_lock::internal_acquire( mutex& m ) { - -#if _WIN32||_WIN64 - switch( m.state ) { - case INITIALIZED: - case HELD: - EnterCriticalSection( &m.impl ); - // If a thread comes here, and another thread holds the lock, it will block - // in EnterCriticalSection. When it returns from EnterCriticalSection, - // m.state must be set to INITIALIZED. If the same thread tries to acquire a lock it - // aleady holds, the lock is in HELD state, thus will cause throwing the exception. - if (m.state==HELD) - tbb::internal::handle_perror(EDEADLK,"mutex::scoped_lock: deadlock caused by attempt to reacquire held mutex"); - m.state = HELD; - break; - case DESTROYED: - __TBB_ASSERT(false,"mutex::scoped_lock: mutex already destroyed"); - break; - default: - __TBB_ASSERT(false,"mutex::scoped_lock: illegal mutex state"); - break; - } -#else - int error_code = pthread_mutex_lock(&m.impl); - if( error_code ) - tbb::internal::handle_perror(error_code,"mutex::scoped_lock: pthread_mutex_lock failed"); -#endif /* _WIN32||_WIN64 */ - my_mutex = &m; - } - -void mutex::scoped_lock::internal_release() { - __TBB_ASSERT( my_mutex, "mutex::scoped_lock: not holding a mutex" ); -#if _WIN32||_WIN64 - switch( my_mutex->state ) { - case INITIALIZED: - __TBB_ASSERT(false,"mutex::scoped_lock: try to release the lock without acquisition"); - break; - case HELD: - my_mutex->state = INITIALIZED; - LeaveCriticalSection(&my_mutex->impl); - break; - case DESTROYED: - __TBB_ASSERT(false,"mutex::scoped_lock: mutex already destroyed"); - break; - default: - __TBB_ASSERT(false,"mutex::scoped_lock: illegal mutex state"); - break; - } -#else - int error_code = pthread_mutex_unlock(&my_mutex->impl); - __TBB_ASSERT_EX(!error_code, "mutex::scoped_lock: pthread_mutex_unlock failed"); -#endif /* _WIN32||_WIN64 */ - my_mutex = NULL; -} - -bool mutex::scoped_lock::internal_try_acquire( mutex& m ) { -#if _WIN32||_WIN64 - switch( m.state ) { - case INITIALIZED: - case HELD: - break; - case DESTROYED: - __TBB_ASSERT(false,"mutex::scoped_lock: mutex already destroyed"); - break; - default: - __TBB_ASSERT(false,"mutex::scoped_lock: illegal mutex state"); - break; - } -#endif /* _WIN32||_WIN64 */ - - bool result; -#if _WIN32||_WIN64 - result = TryEnterCriticalSection(&m.impl)!=0; - if( result ) { - __TBB_ASSERT(m.state!=HELD, "mutex::scoped_lock: deadlock caused by attempt to reacquire held mutex"); - m.state = HELD; - } -#else - result = pthread_mutex_trylock(&m.impl)==0; -#endif /* _WIN32||_WIN64 */ - if( result ) - my_mutex = &m; - return result; -} - -void mutex::internal_construct() { -#if _WIN32||_WIN64 - InitializeCriticalSectionEx(&impl, 4000, 0); - state = INITIALIZED; -#else - int error_code = pthread_mutex_init(&impl,NULL); - if( error_code ) - tbb::internal::handle_perror(error_code,"mutex: pthread_mutex_init failed"); -#endif /* _WIN32||_WIN64*/ - ITT_SYNC_CREATE(&impl, _T("tbb::mutex"), _T("")); -} - -void mutex::internal_destroy() { -#if _WIN32||_WIN64 - switch( state ) { - case INITIALIZED: - DeleteCriticalSection(&impl); - break; - case DESTROYED: - __TBB_ASSERT(false,"mutex: already destroyed"); - break; - default: - __TBB_ASSERT(false,"mutex: illegal state for destruction"); - break; - } - state = DESTROYED; -#else - int error_code = pthread_mutex_destroy(&impl); - __TBB_ASSERT_EX(!error_code,"mutex: pthread_mutex_destroy failed"); -#endif /* _WIN32||_WIN64 */ -} - -} // namespace tbb diff --git a/src/tbb/src/tbb/observer_proxy.cpp b/src/tbb/src/tbb/observer_proxy.cpp index 9c4a0d552..012c9e4a2 100644 --- a/src/tbb/src/tbb/observer_proxy.cpp +++ b/src/tbb/src/tbb/observer_proxy.cpp @@ -1,211 +1,174 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2022 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ + http://www.apache.org/licenses/LICENSE-2.0 -#include "tbb/tbb_config.h" -#if !__TBB_ARENA_OBSERVER - #error __TBB_ARENA_OBSERVER must be defined -#endif + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#if __TBB_SCHEDULER_OBSERVER +#include "oneapi/tbb/detail/_config.h" +#include "oneapi/tbb/detail/_utils.h" #include "observer_proxy.h" -#include "tbb_main.h" -#include "governor.h" -#include "scheduler.h" #include "arena.h" +#include "main.h" +#include "thread_data.h" -namespace tbb { -namespace internal { +#include <atomic> -padded<observer_list> the_global_observer_list; +namespace tbb { +namespace detail { +namespace r1 { #if TBB_USE_ASSERT -static atomic<int> observer_proxy_count; - -struct check_observer_proxy_count { - ~check_observer_proxy_count() { - if( observer_proxy_count!=0 ) { - runtime_warning( "Leaked %ld observer_proxy objects\n", long(observer_proxy_count) ); - } - } -}; - -static check_observer_proxy_count the_check_observer_proxy_count; +extern std::atomic<int> the_observer_proxy_count; #endif /* TBB_USE_ASSERT */ -interface6::task_scheduler_observer* observer_proxy::get_v6_observer() { - if(my_version != 6) return NULL; - return static_cast<interface6::task_scheduler_observer*>(my_observer); -} - -bool observer_proxy::is_global() { - return !get_v6_observer() || get_v6_observer()->my_context_tag == interface6::task_scheduler_observer::global_tag; -} - -observer_proxy::observer_proxy( task_scheduler_observer_v3& tso ) - : my_list(NULL), my_next(NULL), my_prev(NULL), my_observer(&tso) +observer_proxy::observer_proxy( d1::task_scheduler_observer& tso ) + : my_ref_count(1), my_list(nullptr), my_next(nullptr), my_prev(nullptr), my_observer(&tso) { #if TBB_USE_ASSERT - ++observer_proxy_count; + ++the_observer_proxy_count; #endif /* TBB_USE_ASSERT */ - // 1 for observer - my_ref_count = 1; - my_version = load<relaxed>(my_observer->my_busy_count) - == interface6::task_scheduler_observer::v6_trait ? 6 : 0; - __TBB_ASSERT( my_version >= 6 || !load<relaxed>(my_observer->my_busy_count), NULL ); } -#if TBB_USE_ASSERT -observer_proxy::~observer_proxy () { +observer_proxy::~observer_proxy() { __TBB_ASSERT( !my_ref_count, "Attempt to destroy proxy still in use" ); poison_value(my_ref_count); poison_pointer(my_prev); poison_pointer(my_next); - --observer_proxy_count; -} +#if TBB_USE_ASSERT + --the_observer_proxy_count; #endif /* TBB_USE_ASSERT */ - -template<memory_semantics M, class T, class V> -T atomic_fetch_and_store ( T* addr, const V& val ) { - return (T)atomic_traits<sizeof(T), M>::fetch_and_store( addr, (T)val ); } -void observer_list::clear () { - __TBB_ASSERT( this != &the_global_observer_list, "Method clear() cannot be used on the list of global observers" ); - // Though the method will work fine for the empty list, we require the caller - // to check for the list emptiness before invoking it to avoid extra overhead. - __TBB_ASSERT( !empty(), NULL ); +void observer_list::clear() { { scoped_lock lock(mutex(), /*is_writer=*/true); - observer_proxy *next = my_head; + observer_proxy *next = my_head.load(std::memory_order_relaxed); while ( observer_proxy *p = next ) { - __TBB_ASSERT( p->my_version >= 6, NULL ); next = p->my_next; // Both proxy p and observer p->my_observer (if non-null) are guaranteed // to be alive while the list is locked. - task_scheduler_observer_v3 *obs = p->my_observer; + d1::task_scheduler_observer *obs = p->my_observer; // Make sure that possible concurrent observer destruction does not // conflict with the proxy list cleanup. - if ( !obs || !(p = (observer_proxy*)__TBB_FetchAndStoreW(&obs->my_proxy, 0)) ) + if (!obs || !(p = obs->my_proxy.exchange(nullptr))) { continue; + } // accessing 'obs' after detaching of obs->my_proxy leads to the race with observer destruction - __TBB_ASSERT( !next || p == next->my_prev, NULL ); - __TBB_ASSERT( is_alive(p->my_ref_count), "Observer's proxy died prematurely" ); - __TBB_ASSERT( p->my_ref_count == 1, "Reference for observer is missing" ); -#if TBB_USE_ASSERT - p->my_observer = NULL; - p->my_ref_count = 0; -#endif /* TBB_USE_ASSERT */ + __TBB_ASSERT(!next || p == next->my_prev, nullptr); + __TBB_ASSERT(is_alive(p->my_ref_count), "Observer's proxy died prematurely"); + __TBB_ASSERT(p->my_ref_count.load(std::memory_order_relaxed) == 1, "Reference for observer is missing"); + poison_pointer(p->my_observer); remove(p); + --p->my_ref_count; delete p; } } - while( my_head ) - __TBB_Yield(); + + // If observe(false) is called concurrently with the destruction of the arena, + // need to wait until all proxies are removed. + for (atomic_backoff backoff; ; backoff.pause()) { + scoped_lock lock(mutex(), /*is_writer=*/false); + if (my_head.load(std::memory_order_relaxed) == nullptr) { + break; + } + } + + __TBB_ASSERT(my_head.load(std::memory_order_relaxed) == nullptr && my_tail.load(std::memory_order_relaxed) == nullptr, nullptr); } -void observer_list::insert ( observer_proxy* p ) { +void observer_list::insert( observer_proxy* p ) { scoped_lock lock(mutex(), /*is_writer=*/true); - if ( my_head ) { - p->my_prev = my_tail; - my_tail->my_next = p; + if (my_head.load(std::memory_order_relaxed)) { + p->my_prev = my_tail.load(std::memory_order_relaxed); + my_tail.load(std::memory_order_relaxed)->my_next = p; + } else { + my_head.store(p, std::memory_order_relaxed); } - else - my_head = p; - my_tail = p; + my_tail.store(p, std::memory_order_relaxed); } -void observer_list::remove ( observer_proxy* p ) { - __TBB_ASSERT( my_head, "Attempt to remove an item from an empty list" ); - __TBB_ASSERT( !my_tail->my_next, "Last item's my_next must be NULL" ); - if( p == my_tail ) { - __TBB_ASSERT( !p->my_next, NULL ); - my_tail = p->my_prev; - } - else { - __TBB_ASSERT( p->my_next, NULL ); +void observer_list::remove(observer_proxy* p) { + __TBB_ASSERT(my_head.load(std::memory_order_relaxed), "Attempt to remove an item from an empty list"); + __TBB_ASSERT(!my_tail.load(std::memory_order_relaxed)->my_next, "Last item's my_next must be nullptr"); + if (p == my_tail.load(std::memory_order_relaxed)) { + __TBB_ASSERT(!p->my_next, nullptr); + my_tail.store(p->my_prev, std::memory_order_relaxed); + } else { + __TBB_ASSERT(p->my_next, nullptr); p->my_next->my_prev = p->my_prev; } - if ( p == my_head ) { - __TBB_ASSERT( !p->my_prev, NULL ); - my_head = p->my_next; - } - else { - __TBB_ASSERT( p->my_prev, NULL ); + if (p == my_head.load(std::memory_order_relaxed)) { + __TBB_ASSERT(!p->my_prev, nullptr); + my_head.store(p->my_next, std::memory_order_relaxed); + } else { + __TBB_ASSERT(p->my_prev, nullptr); p->my_prev->my_next = p->my_next; } - __TBB_ASSERT( (my_head && my_tail) || (!my_head && !my_tail), NULL ); + __TBB_ASSERT((my_head.load(std::memory_order_relaxed) && my_tail.load(std::memory_order_relaxed)) || + (!my_head.load(std::memory_order_relaxed) && !my_tail.load(std::memory_order_relaxed)), nullptr); } -void observer_list::remove_ref( observer_proxy* p ) { - int r = p->my_ref_count; - __TBB_ASSERT( is_alive(r), NULL ); - while(r>1) { - __TBB_ASSERT( r!=0, NULL ); - int r_old = p->my_ref_count.compare_and_swap(r-1,r); - if( r_old==r ) { - // Successfully decremented count. +void observer_list::remove_ref(observer_proxy* p) { + std::uintptr_t r = p->my_ref_count.load(std::memory_order_acquire); + __TBB_ASSERT(is_alive(r), nullptr); + while (r > 1) { + if (p->my_ref_count.compare_exchange_strong(r, r - 1)) { return; } - r = r_old; } - __TBB_ASSERT( r==1, NULL ); + __TBB_ASSERT(r == 1, nullptr); // Reference count might go to zero { // Use lock to avoid resurrection by a thread concurrently walking the list observer_list::scoped_lock lock(mutex(), /*is_writer=*/true); r = --p->my_ref_count; - if( !r ) + if (!r) { remove(p); + } } - __TBB_ASSERT( r || !p->my_ref_count, NULL ); - if( !r ) + __TBB_ASSERT(r || !p->my_ref_count, nullptr); + if (!r) { delete p; + } } -void observer_list::do_notify_entry_observers( observer_proxy*& last, bool worker ) { +void observer_list::do_notify_entry_observers(observer_proxy*& last, bool worker) { // Pointer p marches though the list from last (exclusively) to the end. - observer_proxy *p = last, *prev = p; - for(;;) { - task_scheduler_observer_v3* tso=NULL; + observer_proxy* p = last, * prev = p; + for (;;) { + d1::task_scheduler_observer* tso = nullptr; // Hold lock on list only long enough to advance to the next proxy in the list. { scoped_lock lock(mutex(), /*is_writer=*/false); do { - if( p ) { + if (p) { // We were already processing the list. - if( observer_proxy* q = p->my_next ) { - if( p == prev ) - remove_ref_fast(prev); // sets prev to NULL if successful + if (observer_proxy* q = p->my_next) { + if (p == prev) { + remove_ref_fast(prev); // sets prev to nullptr if successful + } p = q; - } - else { + } else { // Reached the end of the list. - if( p == prev ) { + if (p == prev) { // Keep the reference as we store the 'last' pointer in scheduler - __TBB_ASSERT(p->my_ref_count >= 1 + (p->my_observer?1:0), NULL); + __TBB_ASSERT(int(p->my_ref_count.load(std::memory_order_relaxed)) >= 1 + (p->my_observer ? 1 : 0), nullptr); } else { // The last few proxies were empty - __TBB_ASSERT(p->my_ref_count, NULL); + __TBB_ASSERT(int(p->my_ref_count.load(std::memory_order_relaxed)), nullptr); ++p->my_ref_count; - if( prev ) { + if (prev) { lock.release(); remove_ref(prev); } @@ -215,206 +178,153 @@ void observer_list::do_notify_entry_observers( observer_proxy*& last, bool worke } } else { // Starting pass through the list - p = my_head; - if( !p ) + p = my_head.load(std::memory_order_relaxed); + if (!p) { return; + } } tso = p->my_observer; - } while( !tso ); + } while (!tso); ++p->my_ref_count; ++tso->my_busy_count; } - __TBB_ASSERT( !prev || p!=prev, NULL ); + __TBB_ASSERT(!prev || p != prev, nullptr); // Release the proxy pinned before p - if( prev ) + if (prev) { remove_ref(prev); + } // Do not hold any locks on the list while calling user's code. // Do not intercept any exceptions that may escape the callback so that // they are either handled by the TBB scheduler or passed to the debugger. tso->on_scheduler_entry(worker); - __TBB_ASSERT(p->my_ref_count, NULL); + __TBB_ASSERT(p->my_ref_count.load(std::memory_order_relaxed), nullptr); intptr_t bc = --tso->my_busy_count; - __TBB_ASSERT_EX( bc>=0, "my_busy_count underflowed" ); + __TBB_ASSERT_EX(bc >= 0, "my_busy_count underflowed"); prev = p; } } -void observer_list::do_notify_exit_observers( observer_proxy* last, bool worker ) { +void observer_list::do_notify_exit_observers(observer_proxy* last, bool worker) { // Pointer p marches though the list from the beginning to last (inclusively). - observer_proxy *p = NULL, *prev = NULL; - for(;;) { - task_scheduler_observer_v3* tso=NULL; + observer_proxy* p = nullptr, * prev = nullptr; + for (;;) { + d1::task_scheduler_observer* tso = nullptr; // Hold lock on list only long enough to advance to the next proxy in the list. { scoped_lock lock(mutex(), /*is_writer=*/false); do { - if( p ) { + if (p) { // We were already processing the list. - if( p != last ) { - __TBB_ASSERT( p->my_next, "List items before 'last' must have valid my_next pointer" ); - if( p == prev ) - remove_ref_fast(prev); // sets prev to NULL if successful + if (p != last) { + __TBB_ASSERT(p->my_next, "List items before 'last' must have valid my_next pointer"); + if (p == prev) + remove_ref_fast(prev); // sets prev to nullptr if successful p = p->my_next; } else { // remove the reference from the last item remove_ref_fast(p); - if( p ) { + if (p) { lock.release(); + if (p != prev && prev) { + remove_ref(prev); + } remove_ref(p); } return; } } else { // Starting pass through the list - p = my_head; - __TBB_ASSERT( p, "Nonzero 'last' must guarantee that the global list is non-empty" ); + p = my_head.load(std::memory_order_relaxed); + __TBB_ASSERT(p, "Nonzero 'last' must guarantee that the global list is non-empty"); } tso = p->my_observer; - } while( !tso ); + } while (!tso); // The item is already refcounted - if ( p != last ) // the last is already referenced since entry notification + if (p != last) // the last is already referenced since entry notification ++p->my_ref_count; ++tso->my_busy_count; } - __TBB_ASSERT( !prev || p!=prev, NULL ); - if( prev ) + __TBB_ASSERT(!prev || p != prev, nullptr); + if (prev) remove_ref(prev); // Do not hold any locks on the list while calling user's code. // Do not intercept any exceptions that may escape the callback so that // they are either handled by the TBB scheduler or passed to the debugger. tso->on_scheduler_exit(worker); - __TBB_ASSERT(p->my_ref_count || p == last, NULL); - intptr_t bc = --tso->my_busy_count; - __TBB_ASSERT_EX( bc>=0, "my_busy_count underflowed" ); - prev = p; - } -} - -#if __TBB_SLEEP_PERMISSION -bool observer_list::ask_permission_to_leave() { - __TBB_ASSERT( this == &the_global_observer_list, "This method cannot be used on lists of arena observers" ); - if( !my_head ) return true; - // Pointer p marches though the list - observer_proxy *p = NULL, *prev = NULL; - bool result = true; - while( result ) { - task_scheduler_observer* tso = NULL; - // Hold lock on list only long enough to advance to the next proxy in the list. - { - scoped_lock lock(mutex(), /*is_writer=*/false); - do { - if( p ) { - // We were already processing the list. - observer_proxy* q = p->my_next; - // read next, remove the previous reference - if( p == prev ) - remove_ref_fast(prev); // sets prev to NULL if successful - if( q ) p = q; - else { - // Reached the end of the list. - if( prev ) { - lock.release(); - remove_ref(prev); - } - return result; - } - } else { - // Starting pass through the list - p = my_head; - if( !p ) - return result; - } - tso = p->get_v6_observer(); - } while( !tso ); - ++p->my_ref_count; - ++tso->my_busy_count; - } - __TBB_ASSERT( !prev || p!=prev, NULL ); - // Release the proxy pinned before p - if( prev ) - remove_ref(prev); - // Do not hold any locks on the list while calling user's code. - // Do not intercept any exceptions that may escape the callback so that - // they are either handled by the TBB scheduler or passed to the debugger. - result = tso->may_sleep(); - __TBB_ASSERT(p->my_ref_count, NULL); + __TBB_ASSERT(p->my_ref_count || p == last, nullptr); intptr_t bc = --tso->my_busy_count; - __TBB_ASSERT_EX( bc>=0, "my_busy_count underflowed" ); + __TBB_ASSERT_EX(bc >= 0, "my_busy_count underflowed"); prev = p; } - if( prev ) - remove_ref(prev); - return result; } -#endif//__TBB_SLEEP_PERMISSION -void task_scheduler_observer_v3::observe( bool enable ) { +void __TBB_EXPORTED_FUNC observe(d1::task_scheduler_observer &tso, bool enable) { if( enable ) { - if( !my_proxy ) { - my_proxy = new observer_proxy( *this ); - my_busy_count = 0; // proxy stores versioning information, clear it - if ( !my_proxy->is_global() ) { - // Local observer activation - generic_scheduler* s = governor::local_scheduler_if_initialized(); -#if __TBB_TASK_ARENA - __TBB_ASSERT( my_proxy->get_v6_observer(), NULL ); - intptr_t tag = my_proxy->get_v6_observer()->my_context_tag; - if( tag != interface6::task_scheduler_observer::implicit_tag ) { // explicit arena - task_arena *a = reinterpret_cast<task_arena*>(tag); - a->initialize(); - my_proxy->my_list = &a->my_arena->my_observers; - } else -#endif - { - if( !s ) s = governor::init_scheduler( (unsigned)task_scheduler_init::automatic, 0, true ); - __TBB_ASSERT( __TBB_InitOnce::initialization_done(), NULL ); - __TBB_ASSERT( s && s->my_arena, NULL ); - my_proxy->my_list = &s->my_arena->my_observers; + if( !tso.my_proxy.load(std::memory_order_relaxed) ) { + observer_proxy* p = new observer_proxy(tso); + tso.my_proxy.store(p, std::memory_order_relaxed); + tso.my_busy_count.store(0, std::memory_order_relaxed); + + thread_data* td = governor::get_thread_data_if_initialized(); + if (p->my_observer->my_task_arena == nullptr) { + if (!(td && td->my_arena)) { + td = governor::get_thread_data(); } - my_proxy->my_list->insert(my_proxy); - // Notify newly activated observer and other pending ones if it belongs to current arena - if(s && &s->my_arena->my_observers == my_proxy->my_list ) - my_proxy->my_list->notify_entry_observers( s->my_last_local_observer, s->is_worker() ); + __TBB_ASSERT(__TBB_InitOnce::initialization_done(), nullptr); + __TBB_ASSERT(td && td->my_arena, nullptr); + p->my_list = &td->my_arena->my_observers; } else { - // Obsolete. Global observer activation - if( !__TBB_InitOnce::initialization_done() ) - DoOneTimeInitializations(); - my_proxy->my_list = &the_global_observer_list; - my_proxy->my_list->insert(my_proxy); - if( generic_scheduler* s = governor::local_scheduler_if_initialized() ) { - // Notify newly created observer of its own thread. - // Any other pending observers are notified too. - the_global_observer_list.notify_entry_observers( s->my_last_global_observer, s->is_worker() ); + d1::task_arena* ta = p->my_observer->my_task_arena; + arena* a = ta->my_arena.load(std::memory_order_acquire); + if (a == nullptr) { // Avoid recursion during arena initialization + ta->initialize(); + a = ta->my_arena.load(std::memory_order_relaxed); } + __TBB_ASSERT(a != nullptr, nullptr); + p->my_list = &a->my_observers; + } + p->my_list->insert(p); + // Notify newly activated observer and other pending ones if it belongs to current arena + if (td && td->my_arena && &td->my_arena->my_observers == p->my_list) { + p->my_list->notify_entry_observers(td->my_last_observer, td->my_is_worker); } } } else { // Make sure that possible concurrent proxy list cleanup does not conflict // with the observer destruction here. - if ( observer_proxy* proxy = (observer_proxy*)__TBB_FetchAndStoreW(&my_proxy, 0) ) { + if ( observer_proxy* proxy = tso.my_proxy.exchange(nullptr) ) { // List destruction should not touch this proxy after we've won the above interlocked exchange. - __TBB_ASSERT( proxy->my_observer == this, NULL ); - __TBB_ASSERT( is_alive(proxy->my_ref_count), "Observer's proxy died prematurely" ); - __TBB_ASSERT( proxy->my_ref_count >= 1, "reference for observer missing" ); + __TBB_ASSERT( proxy->my_observer == &tso, nullptr); + __TBB_ASSERT( is_alive(proxy->my_ref_count.load(std::memory_order_relaxed)), "Observer's proxy died prematurely" ); + __TBB_ASSERT( proxy->my_ref_count.load(std::memory_order_relaxed) >= 1, "reference for observer missing" ); observer_list &list = *proxy->my_list; { // Ensure that none of the list walkers relies on observer pointer validity observer_list::scoped_lock lock(list.mutex(), /*is_writer=*/true); - proxy->my_observer = NULL; + proxy->my_observer = nullptr; // Proxy may still be held by other threads (to track the last notified observer) if( !--proxy->my_ref_count ) {// nobody can increase it under exclusive lock list.remove(proxy); - __TBB_ASSERT( !proxy->my_ref_count, NULL ); + __TBB_ASSERT( !proxy->my_ref_count, nullptr); delete proxy; } } - while( my_busy_count ) // other threads are still accessing the callback - __TBB_Yield(); + spin_wait_until_eq(tso.my_busy_count, 0); // other threads are still accessing the callback } } } -} // namespace internal +} // namespace r1 +} // namespace detail } // namespace tbb -#endif /* __TBB_SCHEDULER_OBSERVER */ +namespace tbb { +namespace internal { + +void __TBB_EXPORTED_FUNC task_scheduler_observer_v3::observe( bool enable ) { + auto* tso = (tbb::detail::d1::task_scheduler_observer*) (this); + tbb::detail::r1::observe(*tso, enable); +} + +} // namespace internal +} // namespace tbb \ No newline at end of file diff --git a/src/tbb/src/tbb/observer_proxy.h b/src/tbb/src/tbb/observer_proxy.h index 17af4f7a5..0ca2839d1 100644 --- a/src/tbb/src/tbb/observer_proxy.h +++ b/src/tbb/src/tbb/observer_proxy.h @@ -1,35 +1,31 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ -#ifndef _TBB_observer_proxy_H -#define _TBB_observer_proxy_H +#ifndef __TBB_observer_proxy_H +#define __TBB_observer_proxy_H -#if __TBB_SCHEDULER_OBSERVER +#include "oneapi/tbb/detail/_config.h" +#include "oneapi/tbb/detail/_aligned_space.h" -#include "scheduler_common.h" // to include task.h -#include "tbb/task_scheduler_observer.h" -#include "tbb/spin_rw_mutex.h" -#include "tbb/aligned_space.h" +#include "oneapi/tbb/task_scheduler_observer.h" +#include "oneapi/tbb/spin_rw_mutex.h" namespace tbb { -namespace internal { +namespace detail { +namespace r1 { class observer_list { friend class arena; @@ -39,10 +35,10 @@ class observer_list { typedef aligned_space<spin_rw_mutex> my_mutex_type; //! Pointer to the head of this list. - observer_proxy* my_head; + std::atomic<observer_proxy*> my_head{nullptr}; //! Pointer to the tail of this list. - observer_proxy* my_tail; + std::atomic<observer_proxy*> my_tail{nullptr}; //! Mutex protecting this list. my_mutex_type my_mutex; @@ -51,7 +47,7 @@ class observer_list { arena* my_arena; //! Decrement refcount of the proxy p if there are other outstanding references. - /** In case of success sets p to NULL. Must be invoked from under the list lock. **/ + /** In case of success sets p to nullptr. Must be invoked from under the list lock. **/ inline static void remove_ref_fast( observer_proxy*& p ); //! Implements notify_entry_observers functionality. @@ -61,7 +57,7 @@ class observer_list { void do_notify_exit_observers( observer_proxy* last, bool worker ); public: - observer_list () : my_head(NULL), my_tail(NULL) {} + observer_list () = default; //! Removes and destroys all observer proxies from the list. /** Cannot be used concurrently with other methods. **/ @@ -83,18 +79,13 @@ class observer_list { //! Accessor to the reader-writer mutex associated with the list. spin_rw_mutex& mutex () { return my_mutex.begin()[0]; } - bool empty () const { return my_head == NULL; } - //! Call entry notifications on observers added after last was notified. /** Updates last to become the last notified observer proxy (in the global list) - or leaves it to be NULL. The proxy has its refcount incremented. **/ + or leaves it to be nullptr. The proxy has its refcount incremented. **/ inline void notify_entry_observers( observer_proxy*& last, bool worker ); //! Call exit notifications on last and observers added before it. inline void notify_exit_observers( observer_proxy*& last, bool worker ); - - //! Call may_sleep callbacks to ask for permission for a worker thread to leave market - bool ask_permission_to_leave(); }; // class observer_list //! Wrapper for an observer object @@ -102,71 +93,60 @@ class observer_list { object into a proxy so that a list item remained valid even after the corresponding proxy object is destroyed by the user code. **/ class observer_proxy { - friend class task_scheduler_observer_v3; + friend class d1::task_scheduler_observer; friend class observer_list; + friend void observe(d1::task_scheduler_observer&, bool); //! Reference count used for garbage collection. /** 1 for reference from my task_scheduler_observer. - 1 for each task dispatcher's last observer pointer. + 1 for each task dispatcher's last observer pointer. No accounting for neighbors in the shared list. */ - atomic<int> my_ref_count; + std::atomic<std::uintptr_t> my_ref_count; //! Reference to the list this observer belongs to. observer_list* my_list; //! Pointer to next observer in the list specified by my_head. - /** NULL for the last item in the list. **/ + /** nullptr for the last item in the list. **/ observer_proxy* my_next; //! Pointer to the previous observer in the list specified by my_head. /** For the head of the list points to the last item. **/ observer_proxy* my_prev; //! Associated observer - task_scheduler_observer_v3* my_observer; - //! Version - char my_version; - - interface6::task_scheduler_observer* get_v6_observer(); - bool is_global(); //TODO: move them back inline when un-CPF'ing + d1::task_scheduler_observer* my_observer; //! Constructs proxy for the given observer and adds it to the specified list. - observer_proxy( task_scheduler_observer_v3& ); + observer_proxy( d1::task_scheduler_observer& ); -#if TBB_USE_ASSERT ~observer_proxy(); -#endif /* TBB_USE_ASSERT */ - - //! Shut up the warning - observer_proxy& operator = ( const observer_proxy& ); }; // class observer_proxy -inline void observer_list::remove_ref_fast( observer_proxy*& p ) { +void observer_list::remove_ref_fast( observer_proxy*& p ) { if( p->my_observer ) { // Can decrement refcount quickly, as it cannot drop to zero while under the lock. - int r = --p->my_ref_count; - __TBB_ASSERT_EX( r, NULL ); - p = NULL; + std::uintptr_t r = --p->my_ref_count; + __TBB_ASSERT_EX( r, nullptr); + p = nullptr; } else { // Use slow form of refcount decrementing, after the lock is released. } } -inline void observer_list::notify_entry_observers( observer_proxy*& last, bool worker ) { - if ( last == my_tail ) +void observer_list::notify_entry_observers(observer_proxy*& last, bool worker) { + if (last == my_tail.load(std::memory_order_relaxed)) return; - do_notify_entry_observers( last, worker ); + do_notify_entry_observers(last, worker); } -inline void observer_list::notify_exit_observers( observer_proxy*& last, bool worker ) { - if ( !last ) +void observer_list::notify_exit_observers( observer_proxy*& last, bool worker ) { + if (last == nullptr) { return; - __TBB_ASSERT(is_alive((uintptr_t)last), NULL); + } + __TBB_ASSERT(!is_poisoned(last), nullptr); do_notify_exit_observers( last, worker ); - __TBB_ASSERT(last, NULL); - poison_value(last); + __TBB_ASSERT(last != nullptr, nullptr); + poison_pointer(last); } -extern padded<observer_list> the_global_observer_list; - -} // namespace internal +} // namespace r1 +} // namespace detail } // namespace tbb -#endif /* __TBB_SCHEDULER_OBSERVER */ - -#endif /* _TBB_observer_proxy_H */ +#endif /* __TBB_observer_proxy_H */ diff --git a/src/tbb/src/tbb/parallel_pipeline.cpp b/src/tbb/src/tbb/parallel_pipeline.cpp new file mode 100644 index 000000000..bb8587b92 --- /dev/null +++ b/src/tbb/src/tbb/parallel_pipeline.cpp @@ -0,0 +1,471 @@ +/* + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "oneapi/tbb/parallel_pipeline.h" +#include "oneapi/tbb/spin_mutex.h" +#include "oneapi/tbb/tbb_allocator.h" +#include "oneapi/tbb/cache_aligned_allocator.h" +#include "itt_notify.h" +#include "tls.h" +#include "oneapi/tbb/detail/_exception.h" +#include "oneapi/tbb/detail/_small_object_pool.h" + +namespace tbb { +namespace detail { +namespace r1 { + +void handle_perror(int error_code, const char* aux_info); + +using Token = unsigned long; + +//! A processing pipeline that applies filters to items. +/** @ingroup algorithms */ +class pipeline { + friend void parallel_pipeline(d1::task_group_context&, std::size_t, const d1::filter_node&); +public: + + //! Construct empty pipeline. + pipeline(d1::task_group_context& cxt, std::size_t max_token) : + my_context(cxt), + first_filter(nullptr), + last_filter(nullptr), + input_tokens(Token(max_token)), + end_of_input(false), + wait_ctx(0) { + __TBB_ASSERT( max_token>0, "pipeline::run must have at least one token" ); + } + + ~pipeline(); + + //! Add filter to end of pipeline. + void add_filter( d1::base_filter& ); + + //! Traverse tree of fitler-node in-order and add filter for each leaf + void fill_pipeline(const d1::filter_node& root) { + if( root.left && root.right ) { + fill_pipeline(*root.left); + fill_pipeline(*root.right); + } + else { + __TBB_ASSERT(!root.left && !root.right, "tree should be full"); + add_filter(*root.create_filter()); + } + } + +private: + friend class stage_task; + friend class base_filter; + friend void set_end_of_input(d1::base_filter& bf); + + task_group_context& my_context; + + //! Pointer to first filter in the pipeline. + d1::base_filter* first_filter; + + //! Pointer to last filter in the pipeline. + d1::base_filter* last_filter; + + //! Number of idle tokens waiting for input stage. + std::atomic<Token> input_tokens; + + //! False until flow_control::stop() is called. + std::atomic<bool> end_of_input; + + d1::wait_context wait_ctx; +}; + +//! This structure is used to store task information in an input buffer +struct task_info { + void* my_object = nullptr; + //! Invalid unless a task went through an ordered stage. + Token my_token = 0; + //! False until my_token is set. + bool my_token_ready = false; + //! True if my_object is valid. + bool is_valid = false; + //! Set to initial state (no object, no token) + void reset() { + my_object = nullptr; + my_token = 0; + my_token_ready = false; + is_valid = false; + } +}; + +//! A buffer of input items for a filter. +/** Each item is a task_info, inserted into a position in the buffer corresponding to a Token. */ +class input_buffer { + friend class base_filter; + friend class stage_task; + friend class pipeline; + friend void set_end_of_input(d1::base_filter& bf); + + using size_type = Token; + + //! Array of deferred tasks that cannot yet start executing. + task_info* array; + + //! Size of array + /** Always 0 or a power of 2 */ + size_type array_size; + + //! Lowest token that can start executing. + /** All prior Token have already been seen. */ + Token low_token; + + //! Serializes updates. + spin_mutex array_mutex; + + //! Resize "array". + /** Caller is responsible to acquiring a lock on "array_mutex". */ + void grow( size_type minimum_size ); + + //! Initial size for "array" + /** Must be a power of 2 */ + static const size_type initial_buffer_size = 4; + + //! Used for out of order buffer, and for assigning my_token if is_ordered and my_token not already assigned + Token high_token; + + //! True for ordered filter, false otherwise. + const bool is_ordered; + + //! for parallel filters that accepts nullptrs, thread-local flag for reaching end_of_input + using end_of_input_tls_t = basic_tls<input_buffer*>; + end_of_input_tls_t end_of_input_tls; + bool end_of_input_tls_allocated; // no way to test pthread creation of TLS + +public: + input_buffer(const input_buffer&) = delete; + input_buffer& operator=(const input_buffer&) = delete; + + //! Construct empty buffer. + input_buffer( bool ordered) : + array(nullptr), + array_size(0), + low_token(0), + high_token(0), + is_ordered(ordered), + end_of_input_tls(), + end_of_input_tls_allocated(false) { + grow(initial_buffer_size); + __TBB_ASSERT( array, nullptr ); + } + + //! Destroy the buffer. + ~input_buffer() { + __TBB_ASSERT( array, nullptr ); + cache_aligned_allocator<task_info>().deallocate(array,array_size); + poison_pointer( array ); + if( end_of_input_tls_allocated ) { + destroy_my_tls(); + } + } + + //! Define order when the first filter is serial_in_order. + Token get_ordered_token(){ + return high_token++; + } + + //! Put a token into the buffer. + /** If task information was placed into buffer, returns true; + otherwise returns false, informing the caller to create and spawn a task. + */ + bool try_put_token( task_info& info ) { + info.is_valid = true; + spin_mutex::scoped_lock lock( array_mutex ); + Token token; + if( is_ordered ) { + if( !info.my_token_ready ) { + info.my_token = high_token++; + info.my_token_ready = true; + } + token = info.my_token; + } else + token = high_token++; + __TBB_ASSERT( (long)(token-low_token)>=0, nullptr ); + if( token!=low_token ) { + // Trying to put token that is beyond low_token. + // Need to wait until low_token catches up before dispatching. + if( token-low_token>=array_size ) + grow( token-low_token+1 ); + ITT_NOTIFY( sync_releasing, this ); + array[token&(array_size-1)] = info; + return true; + } + return false; + } + + //! Note that processing of a token is finished. + /** Fires up processing of the next token, if processing was deferred. */ + // Uses template to avoid explicit dependency on stage_task. + template<typename StageTask> + void try_to_spawn_task_for_next_token(StageTask& spawner, d1::execution_data& ed) { + task_info wakee; + { + spin_mutex::scoped_lock lock( array_mutex ); + // Wake the next task + task_info& item = array[++low_token & (array_size-1)]; + ITT_NOTIFY( sync_acquired, this ); + wakee = item; + item.is_valid = false; + } + if( wakee.is_valid ) + spawner.spawn_stage_task(wakee, ed); + } + + // end_of_input signal for parallel_pipeline, parallel input filters with 0 tokens allowed. + void create_my_tls() { + int status = end_of_input_tls.create(); + if(status) + handle_perror(status, "TLS not allocated for filter"); + end_of_input_tls_allocated = true; + } + void destroy_my_tls() { + int status = end_of_input_tls.destroy(); + if(status) + handle_perror(status, "Failed to destroy filter TLS"); + } + bool my_tls_end_of_input() { + return end_of_input_tls.get() != nullptr; + } + void set_my_tls_end_of_input() { + end_of_input_tls.set(this); + } +}; + +void input_buffer::grow( size_type minimum_size ) { + size_type old_size = array_size; + size_type new_size = old_size ? 2*old_size : initial_buffer_size; + while( new_size<minimum_size ) + new_size*=2; + task_info* new_array = cache_aligned_allocator<task_info>().allocate(new_size); + task_info* old_array = array; + for( size_type i=0; i<new_size; ++i ) + new_array[i].is_valid = false; + Token t=low_token; + for( size_type i=0; i<old_size; ++i, ++t ) + new_array[t&(new_size-1)] = old_array[t&(old_size-1)]; + array = new_array; + array_size = new_size; + if( old_array ) + cache_aligned_allocator<task_info>().deallocate(old_array,old_size); +} + +class stage_task : public d1::task, public task_info { +private: + friend class pipeline; + pipeline& my_pipeline; + d1::base_filter* my_filter; + d1::small_object_allocator m_allocator; + //! True if this task has not yet read the input. + bool my_at_start; + + //! True if this can be executed again. + bool execute_filter(d1::execution_data& ed); + + //! Spawn task if token is available. + void try_spawn_stage_task(d1::execution_data& ed) { + ITT_NOTIFY( sync_releasing, &my_pipeline.input_tokens ); + if( (my_pipeline.input_tokens.fetch_sub(1, std::memory_order_release)) > 1 ) { + d1::small_object_allocator alloc{}; + r1::spawn( *alloc.new_object<stage_task>(ed, my_pipeline, alloc ), my_pipeline.my_context ); + } + } + +public: + + //! Construct stage_task for first stage in a pipeline. + /** Such a stage has not read any input yet. */ + stage_task(pipeline& pipeline, d1::small_object_allocator& alloc ) : + my_pipeline(pipeline), + my_filter(pipeline.first_filter), + m_allocator(alloc), + my_at_start(true) + { + task_info::reset(); + my_pipeline.wait_ctx.reserve(); + } + //! Construct stage_task for a subsequent stage in a pipeline. + stage_task(pipeline& pipeline, d1::base_filter* filter, const task_info& info, d1::small_object_allocator& alloc) : + task_info(info), + my_pipeline(pipeline), + my_filter(filter), + m_allocator(alloc), + my_at_start(false) + { + my_pipeline.wait_ctx.reserve(); + } + //! Roughly equivalent to the constructor of input stage task + void reset() { + task_info::reset(); + my_filter = my_pipeline.first_filter; + my_at_start = true; + } + void finalize(d1::execution_data& ed) { + m_allocator.delete_object(this, ed); + } + //! The virtual task execution method + task* execute(d1::execution_data& ed) override { + if(!execute_filter(ed)) { + finalize(ed); + return nullptr; + } + return this; + } + task* cancel(d1::execution_data& ed) override { + finalize(ed); + return nullptr; + } + + ~stage_task() override { + if ( my_filter && my_object ) { + my_filter->finalize(my_object); + my_object = nullptr; + } + my_pipeline.wait_ctx.release(); + } + //! Creates and spawns stage_task from task_info + void spawn_stage_task(const task_info& info, d1::execution_data& ed) { + d1::small_object_allocator alloc{}; + stage_task* clone = alloc.new_object<stage_task>(ed, my_pipeline, my_filter, info, alloc); + r1::spawn(*clone, my_pipeline.my_context); + } +}; + +bool stage_task::execute_filter(d1::execution_data& ed) { + __TBB_ASSERT( !my_at_start || !my_object, "invalid state of task" ); + if( my_at_start ) { + if( my_filter->is_serial() ) { + my_object = (*my_filter)(my_object); + if( my_object || ( my_filter->object_may_be_null() && !my_pipeline.end_of_input.load(std::memory_order_relaxed)) ) { + if( my_filter->is_ordered() ) { + my_token = my_filter->my_input_buffer->get_ordered_token(); + my_token_ready = true; + } + if( !my_filter->next_filter_in_pipeline ) { // we're only filter in pipeline + reset(); + return true; + } else { + try_spawn_stage_task(ed); + } + } else { + my_pipeline.end_of_input.store(true, std::memory_order_relaxed); + return false; + } + } else /*not is_serial*/ { + if ( my_pipeline.end_of_input.load(std::memory_order_relaxed) ) { + return false; + } + + try_spawn_stage_task(ed); + + my_object = (*my_filter)(my_object); + if( !my_object && (!my_filter->object_may_be_null() || my_filter->my_input_buffer->my_tls_end_of_input()) ){ + my_pipeline.end_of_input.store(true, std::memory_order_relaxed); + return false; + } + } + my_at_start = false; + } else { + my_object = (*my_filter)(my_object); + if( my_filter->is_serial() ) + my_filter->my_input_buffer->try_to_spawn_task_for_next_token(*this, ed); + } + my_filter = my_filter->next_filter_in_pipeline; + if( my_filter ) { + // There is another filter to execute. + if( my_filter->is_serial() ) { + // The next filter must execute tokens when they are available (in order for serial_in_order) + if( my_filter->my_input_buffer->try_put_token(*this) ){ + my_filter = nullptr; // To prevent deleting my_object twice if exception occurs + return false; + } + } + } else { + // Reached end of the pipe. + std::size_t ntokens_avail = my_pipeline.input_tokens.fetch_add(1, std::memory_order_acquire); + + if( ntokens_avail>0 // Only recycle if there is one available token + || my_pipeline.end_of_input.load(std::memory_order_relaxed) ) { + return false; // No need to recycle for new input + } + ITT_NOTIFY( sync_acquired, &my_pipeline.input_tokens ); + // Recycle as an input stage task. + reset(); + } + return true; +} + +pipeline::~pipeline() { + while( first_filter ) { + d1::base_filter* f = first_filter; + if( input_buffer* b = f->my_input_buffer ) { + b->~input_buffer(); + deallocate_memory(b); + } + first_filter = f->next_filter_in_pipeline; + f->~base_filter(); + deallocate_memory(f); + } +} + +void pipeline::add_filter( d1::base_filter& new_fitler ) { + __TBB_ASSERT( new_fitler.next_filter_in_pipeline==d1::base_filter::not_in_pipeline(), "filter already part of pipeline?" ); + new_fitler.my_pipeline = this; + if ( first_filter == nullptr ) + first_filter = &new_fitler; + else + last_filter->next_filter_in_pipeline = &new_fitler; + new_fitler.next_filter_in_pipeline = nullptr; + last_filter = &new_fitler; + if( new_fitler.is_serial() ) { + new_fitler.my_input_buffer = new (allocate_memory(sizeof(input_buffer))) input_buffer( new_fitler.is_ordered() ); + } else { + if( first_filter == &new_fitler && new_fitler.object_may_be_null() ) { + //TODO: buffer only needed to hold TLS; could improve + new_fitler.my_input_buffer = new (allocate_memory(sizeof(input_buffer))) input_buffer( /*is_ordered*/false ); + new_fitler.my_input_buffer->create_my_tls(); + } + } +} + +void __TBB_EXPORTED_FUNC parallel_pipeline(d1::task_group_context& cxt, std::size_t max_token, const d1::filter_node& fn) { + pipeline pipe(cxt, max_token); + + pipe.fill_pipeline(fn); + + d1::small_object_allocator alloc{}; + stage_task& st = *alloc.new_object<stage_task>(pipe, alloc); + + // Start execution of tasks + r1::execute_and_wait(st, cxt, pipe.wait_ctx, cxt); +} + +void __TBB_EXPORTED_FUNC set_end_of_input(d1::base_filter& bf) { + __TBB_ASSERT(bf.my_input_buffer, nullptr); + __TBB_ASSERT(bf.object_may_be_null(), nullptr); + if(bf.is_serial() ) { + bf.my_pipeline->end_of_input.store(true, std::memory_order_relaxed); + } else { + __TBB_ASSERT(bf.my_input_buffer->end_of_input_tls_allocated, nullptr); + bf.my_input_buffer->set_my_tls_end_of_input(); + } +} + +} // namespace r1 +} // namespace detail +} // namespace tbb diff --git a/src/tbb/src/tbb/permit_manager.h b/src/tbb/src/tbb/permit_manager.h new file mode 100644 index 000000000..0a6a737c4 --- /dev/null +++ b/src/tbb/src/tbb/permit_manager.h @@ -0,0 +1,61 @@ +/* + Copyright (c) 2022-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef _TBB_permit_manager_H +#define _TBB_permit_manager_H + +#include "oneapi/tbb/info.h" +#include "oneapi/tbb/detail/_utils.h" +#include "thread_request_serializer.h" + +namespace tbb { +namespace detail { +namespace r1 { + +class arena; +class pm_client; + +class permit_manager : no_copy { +public: + virtual ~permit_manager() {} + virtual pm_client* create_client(arena& a) = 0; + virtual void register_client(pm_client* client, d1::constraints& constraints) = 0; + virtual void unregister_and_destroy_client(pm_client& c) = 0; + + virtual void set_active_num_workers(int soft_limit) = 0; + virtual void adjust_demand(pm_client&, int mandatory_delta, int workers_delta) = 0; + + void set_thread_request_observer(thread_request_observer& tr_observer) { + __TBB_ASSERT(!my_thread_request_observer, "set_thread_request_observer was called already?"); + my_thread_request_observer = &tr_observer; + } +protected: + void notify_thread_request(int delta) { + __TBB_ASSERT(my_thread_request_observer, "set_thread_request_observer was not called?"); + if (delta) { + my_thread_request_observer->update(delta); + } + } +private: + thread_request_observer* my_thread_request_observer{nullptr}; +}; + + +} // namespace r1 +} // namespace detail +} // namespace tbb + +#endif // _TBB_permit_manager_H diff --git a/src/tbb/src/tbb/pipeline.cpp b/src/tbb/src/tbb/pipeline.cpp deleted file mode 100644 index 32e97f808..000000000 --- a/src/tbb/src/tbb/pipeline.cpp +++ /dev/null @@ -1,781 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/pipeline.h" -#include "tbb/spin_mutex.h" -#include "tbb/cache_aligned_allocator.h" -#include "itt_notify.h" -#include "semaphore.h" -#include "tls.h" // for parallel filters that do not use NULL as end_of_input - - -namespace tbb { - -namespace internal { - -//! This structure is used to store task information in a input buffer -struct task_info { - void* my_object; - //! Invalid unless a task went through an ordered stage. - Token my_token; - //! False until my_token is set. - bool my_token_ready; - //! True if my_object is valid. - bool is_valid; - //! Set to initial state (no object, no token) - void reset() { - my_object = NULL; - my_token = 0; - my_token_ready = false; - is_valid = false; - } -}; -//! A buffer of input items for a filter. -/** Each item is a task_info, inserted into a position in the buffer corresponding to a Token. */ -class input_buffer : no_copy { - friend class tbb::internal::pipeline_root_task; - friend class tbb::filter; - friend class tbb::thread_bound_filter; - friend class tbb::internal::stage_task; - friend class tbb::pipeline; - - typedef Token size_type; - - //! Array of deferred tasks that cannot yet start executing. - task_info* array; - - //! for thread-bound filter, semaphore for waiting, NULL otherwise. - semaphore* my_sem; - - //! Size of array - /** Always 0 or a power of 2 */ - size_type array_size; - - //! Lowest token that can start executing. - /** All prior Token have already been seen. */ - Token low_token; - - //! Serializes updates. - spin_mutex array_mutex; - - //! Resize "array". - /** Caller is responsible to acquiring a lock on "array_mutex". */ - void grow( size_type minimum_size ); - - //! Initial size for "array" - /** Must be a power of 2 */ - static const size_type initial_buffer_size = 4; - - //! Used for out of order buffer, and for assigning my_token if is_ordered and my_token not already assigned - Token high_token; - - //! True for ordered filter, false otherwise. - bool is_ordered; - - //! True for thread-bound filter, false otherwise. - bool is_bound; - - //! for parallel filters that accepts NULLs, thread-local flag for reaching end_of_input - typedef basic_tls<intptr_t> end_of_input_tls_t; - end_of_input_tls_t end_of_input_tls; - bool end_of_input_tls_allocated; // no way to test pthread creation of TLS - - void create_sema(size_t initial_tokens) { __TBB_ASSERT(!my_sem,NULL); my_sem = new internal::semaphore(initial_tokens); } - void free_sema() { __TBB_ASSERT(my_sem,NULL); delete my_sem; } - void sema_P() { __TBB_ASSERT(my_sem,NULL); my_sem->P(); } - void sema_V() { __TBB_ASSERT(my_sem,NULL); my_sem->V(); } - -public: - //! Construct empty buffer. - input_buffer( bool is_ordered_, bool is_bound_ ) : - array(NULL), my_sem(NULL), array_size(0), - low_token(0), high_token(0), - is_ordered(is_ordered_), is_bound(is_bound_), - end_of_input_tls_allocated(false) { - grow(initial_buffer_size); - __TBB_ASSERT( array, NULL ); - if(is_bound) create_sema(0); - } - - //! Destroy the buffer. - ~input_buffer() { - __TBB_ASSERT( array, NULL ); - cache_aligned_allocator<task_info>().deallocate(array,array_size); - poison_pointer( array ); - if(my_sem) { - free_sema(); - } - if(end_of_input_tls_allocated) { - destroy_my_tls(); - } - } - - //! Put a token into the buffer. - /** If task information was placed into buffer, returns true; - otherwise returns false, informing the caller to create and spawn a task. - If input buffer owned by thread-bound filter and the item at - low_token was not valid, issue a V() - If the input_buffer is owned by a successor to a thread-bound filter, - the force_put parameter should be true to ensure the token is inserted - in the buffer. - */ - bool put_token( task_info& info_, bool force_put = false ) { - { - info_.is_valid = true; - spin_mutex::scoped_lock lock( array_mutex ); - Token token; - bool was_empty = !array[low_token&(array_size-1)].is_valid; - if( is_ordered ) { - if( !info_.my_token_ready ) { - info_.my_token = high_token++; - info_.my_token_ready = true; - } - token = info_.my_token; - } else - token = high_token++; - __TBB_ASSERT( (tokendiff_t)(token-low_token)>=0, NULL ); - if( token!=low_token || is_bound || force_put ) { - // Trying to put token that is beyond low_token. - // Need to wait until low_token catches up before dispatching. - if( token-low_token>=array_size ) - grow( token-low_token+1 ); - ITT_NOTIFY( sync_releasing, this ); - array[token&(array_size-1)] = info_; - if(was_empty && is_bound) { - sema_V(); - } - return true; - } - } - return false; - } - - //! Note that processing of a token is finished. - /** Fires up processing of the next token, if processing was deferred. */ - // Using template to avoid explicit dependency on stage_task - // this is only called for serial filters, and is the reason for the - // advance parameter in return_item (we're incrementing low_token here.) - // Non-TBF serial stages don't advance the token at the start because the presence - // of the current token in the buffer keeps another stage from being spawned. - template<typename StageTask> - void note_done( Token token, StageTask& spawner ) { - task_info wakee; - wakee.reset(); - { - spin_mutex::scoped_lock lock( array_mutex ); - if( !is_ordered || token==low_token ) { - // Wake the next task - task_info& item = array[++low_token & (array_size-1)]; - ITT_NOTIFY( sync_acquired, this ); - wakee = item; - item.is_valid = false; - } - } - if( wakee.is_valid ) - spawner.spawn_stage_task(wakee); - } - -#if __TBB_TASK_GROUP_CONTEXT - //! The method destroys all data in filters to prevent memory leaks - void clear( filter* my_filter ) { - long t=low_token; - for( size_type i=0; i<array_size; ++i, ++t ){ - task_info& temp = array[t&(array_size-1)]; - if (temp.is_valid ) { - my_filter->finalize(temp.my_object); - temp.is_valid = false; - } - } - } -#endif - - //! return an item, invalidate the queued item, but only advance if advance - // advance == true for parallel filters. If the filter is serial, leave the - // item in the buffer to keep another stage from being spawned. - bool return_item(task_info& info, bool advance) { - spin_mutex::scoped_lock lock( array_mutex ); - task_info& item = array[low_token&(array_size-1)]; - ITT_NOTIFY( sync_acquired, this ); - if( item.is_valid ) { - info = item; - item.is_valid = false; - if (advance) low_token++; - return true; - } - return false; - } - - //! true if the current low_token is valid. - bool has_item() { spin_mutex::scoped_lock lock(array_mutex); return array[low_token&(array_size -1)].is_valid; } - - // end_of_input signal for parallel_pipeline, parallel input filters with 0 tokens allowed. - void create_my_tls() { int status = end_of_input_tls.create(); if(status) handle_perror(status, "TLS not allocated for filter"); end_of_input_tls_allocated = true; } - void destroy_my_tls() { int status = end_of_input_tls.destroy(); if(status) handle_perror(status, "Failed to destroy filter TLS"); } - bool my_tls_end_of_input() { return end_of_input_tls.get() != 0; } - void set_my_tls_end_of_input() { end_of_input_tls.set(1); } -}; - -void input_buffer::grow( size_type minimum_size ) { - size_type old_size = array_size; - size_type new_size = old_size ? 2*old_size : initial_buffer_size; - while( new_size<minimum_size ) - new_size*=2; - task_info* new_array = cache_aligned_allocator<task_info>().allocate(new_size); - task_info* old_array = array; - for( size_type i=0; i<new_size; ++i ) - new_array[i].is_valid = false; - long t=low_token; - for( size_type i=0; i<old_size; ++i, ++t ) - new_array[t&(new_size-1)] = old_array[t&(old_size-1)]; - array = new_array; - array_size = new_size; - if( old_array ) - cache_aligned_allocator<task_info>().deallocate(old_array,old_size); -} - -class stage_task: public task, public task_info { -private: - friend class tbb::pipeline; - pipeline& my_pipeline; - filter* my_filter; - //! True if this task has not yet read the input. - bool my_at_start; - -public: - //! Construct stage_task for first stage in a pipeline. - /** Such a stage has not read any input yet. */ - stage_task( pipeline& pipeline ) : - my_pipeline(pipeline), - my_filter(pipeline.filter_list), - my_at_start(true) - { - task_info::reset(); - } - //! Construct stage_task for a subsequent stage in a pipeline. - stage_task( pipeline& pipeline, filter* filter_, const task_info& info ) : - task_info(info), - my_pipeline(pipeline), - my_filter(filter_), - my_at_start(false) - {} - //! Roughly equivalent to the constructor of input stage task - void reset() { - task_info::reset(); - my_filter = my_pipeline.filter_list; - my_at_start = true; - } - //! The virtual task execution method - /*override*/ task* execute(); -#if __TBB_TASK_GROUP_CONTEXT - ~stage_task() - { - if (my_filter && my_object && (my_filter->my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(4)) { - __TBB_ASSERT(is_cancelled(), "Trying to finalize the task that wasn't cancelled"); - my_filter->finalize(my_object); - my_object = NULL; - } - } -#endif // __TBB_TASK_GROUP_CONTEXT - //! Creates and spawns stage_task from task_info - void spawn_stage_task(const task_info& info) - { - stage_task* clone = new (allocate_additional_child_of(*parent())) - stage_task( my_pipeline, my_filter, info ); - spawn(*clone); - } -}; - -task* stage_task::execute() { - __TBB_ASSERT( !my_at_start || !my_object, NULL ); - __TBB_ASSERT( !my_filter->is_bound(), NULL ); - if( my_at_start ) { - if( my_filter->is_serial() ) { - my_object = (*my_filter)(my_object); - if( my_object || ( my_filter->object_may_be_null() && !my_pipeline.end_of_input) ) - { - if( my_filter->is_ordered() ) { - my_token = my_pipeline.token_counter++; // ideally, with relaxed semantics - my_token_ready = true; - } else if( (my_filter->my_filter_mode & my_filter->version_mask) >= __TBB_PIPELINE_VERSION(5) ) { - if( my_pipeline.has_thread_bound_filters ) - my_pipeline.token_counter++; // ideally, with relaxed semantics - } - if( !my_filter->next_filter_in_pipeline ) { // we're only filter in pipeline - reset(); - goto process_another_stage; - } else { - ITT_NOTIFY( sync_releasing, &my_pipeline.input_tokens ); - if( --my_pipeline.input_tokens>0 ) - spawn( *new( allocate_additional_child_of(*parent()) ) stage_task( my_pipeline ) ); - } - } else { - my_pipeline.end_of_input = true; - return NULL; - } - } else /*not is_serial*/ { - if( my_pipeline.end_of_input ) - return NULL; - if( (my_filter->my_filter_mode & my_filter->version_mask) >= __TBB_PIPELINE_VERSION(5) ) { - if( my_pipeline.has_thread_bound_filters ) - my_pipeline.token_counter++; - } - ITT_NOTIFY( sync_releasing, &my_pipeline.input_tokens ); - if( --my_pipeline.input_tokens>0 ) - spawn( *new( allocate_additional_child_of(*parent()) ) stage_task( my_pipeline ) ); - my_object = (*my_filter)(my_object); - if( !my_object && (!my_filter->object_may_be_null() || my_filter->my_input_buffer->my_tls_end_of_input()) ) - { - my_pipeline.end_of_input = true; - if( (my_filter->my_filter_mode & my_filter->version_mask) >= __TBB_PIPELINE_VERSION(5) ) { - if( my_pipeline.has_thread_bound_filters ) - my_pipeline.token_counter--; // fix token_counter - } - return NULL; - } - } - my_at_start = false; - } else { - my_object = (*my_filter)(my_object); - if( my_filter->is_serial() ) - my_filter->my_input_buffer->note_done(my_token, *this); - } - my_filter = my_filter->next_filter_in_pipeline; - if( my_filter ) { - // There is another filter to execute. - if( my_filter->is_serial() ) { - // The next filter must execute tokens in order - if( my_filter->my_input_buffer->put_token(*this) ){ - // Can't proceed with the same item - if( my_filter->is_bound() ) { - // Find the next non-thread-bound filter - do { - my_filter = my_filter->next_filter_in_pipeline; - } while( my_filter && my_filter->is_bound() ); - // Check if there is an item ready to process - if( my_filter && my_filter->my_input_buffer->return_item(*this, !my_filter->is_serial())) - goto process_another_stage; - } - my_filter = NULL; // To prevent deleting my_object twice if exception occurs - return NULL; - } - } - } else { - // Reached end of the pipe. - size_t ntokens_avail = ++my_pipeline.input_tokens; - if(my_pipeline.filter_list->is_bound() ) { - if(ntokens_avail == 1) { - my_pipeline.filter_list->my_input_buffer->sema_V(); - } - return NULL; - } - if( ntokens_avail>1 // Only recycle if there is one available token - || my_pipeline.end_of_input ) { - return NULL; // No need to recycle for new input - } - ITT_NOTIFY( sync_acquired, &my_pipeline.input_tokens ); - // Recycle as an input stage task. - reset(); - } -process_another_stage: - /* A semi-hackish way to reexecute the same task object immediately without spawning. - recycle_as_continuation marks the task for future execution, - and then 'this' pointer is returned to bypass spawning. */ - recycle_as_continuation(); - return this; -} - -class pipeline_root_task: public task { - pipeline& my_pipeline; - bool do_segment_scanning; - - /*override*/ task* execute() { - if( !my_pipeline.end_of_input ) - if( !my_pipeline.filter_list->is_bound() ) - if( my_pipeline.input_tokens > 0 ) { - recycle_as_continuation(); - set_ref_count(1); - return new( allocate_child() ) stage_task( my_pipeline ); - } - if( do_segment_scanning ) { - filter* current_filter = my_pipeline.filter_list->next_segment; - /* first non-thread-bound filter that follows thread-bound one - and may have valid items to process */ - filter* first_suitable_filter = current_filter; - while( current_filter ) { - __TBB_ASSERT( !current_filter->is_bound(), "filter is thread-bound?" ); - __TBB_ASSERT( current_filter->prev_filter_in_pipeline->is_bound(), "previous filter is not thread-bound?" ); - if( !my_pipeline.end_of_input || current_filter->has_more_work()) - { - task_info info; - info.reset(); - if( current_filter->my_input_buffer->return_item(info, !current_filter->is_serial()) ) { - set_ref_count(1); - recycle_as_continuation(); - return new( allocate_child() ) stage_task( my_pipeline, current_filter, info); - } - current_filter = current_filter->next_segment; - if( !current_filter ) { - if( !my_pipeline.end_of_input ) { - recycle_as_continuation(); - return this; - } - current_filter = first_suitable_filter; - __TBB_Yield(); - } - } else { - /* The preceding pipeline segment is empty. - Fast-forward to the next post-TBF segment. */ - first_suitable_filter = first_suitable_filter->next_segment; - current_filter = first_suitable_filter; - } - } /* while( current_filter ) */ - return NULL; - } else { - if( !my_pipeline.end_of_input ) { - recycle_as_continuation(); - return this; - } - return NULL; - } - } -public: - pipeline_root_task( pipeline& pipeline ): my_pipeline(pipeline), do_segment_scanning(false) - { - __TBB_ASSERT( my_pipeline.filter_list, NULL ); - filter* first = my_pipeline.filter_list; - if( (first->my_filter_mode & first->version_mask) >= __TBB_PIPELINE_VERSION(5) ) { - // Scanning the pipeline for segments - filter* head_of_previous_segment = first; - for( filter* subfilter=first->next_filter_in_pipeline; - subfilter!=NULL; - subfilter=subfilter->next_filter_in_pipeline ) - { - if( subfilter->prev_filter_in_pipeline->is_bound() && !subfilter->is_bound() ) { - do_segment_scanning = true; - head_of_previous_segment->next_segment = subfilter; - head_of_previous_segment = subfilter; - } - } - } - } -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings - // Suppress compiler warning about constant conditional expression - #pragma warning (disable: 4127) -#endif - -// The class destroys end_counter and clears all input buffers if pipeline was cancelled. -class pipeline_cleaner: internal::no_copy { - pipeline& my_pipeline; -public: - pipeline_cleaner(pipeline& _pipeline) : - my_pipeline(_pipeline) - {} - ~pipeline_cleaner(){ -#if __TBB_TASK_GROUP_CONTEXT - if (my_pipeline.end_counter->is_cancelled()) // Pipeline was cancelled - my_pipeline.clear_filters(); -#endif - my_pipeline.end_counter = NULL; - } -}; - -} // namespace internal - -void pipeline::inject_token( task& ) { - __TBB_ASSERT(false,"illegal call to inject_token"); -} - -#if __TBB_TASK_GROUP_CONTEXT -void pipeline::clear_filters() { - for( filter* f = filter_list; f; f = f->next_filter_in_pipeline ) { - if ((f->my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(4)) - if( internal::input_buffer* b = f->my_input_buffer ) - b->clear(f); - } -} -#endif - -pipeline::pipeline() : - filter_list(NULL), - filter_end(NULL), - end_counter(NULL), - end_of_input(false), - has_thread_bound_filters(false) -{ - token_counter = 0; - input_tokens = 0; -} - -pipeline::~pipeline() { - clear(); -} - -void pipeline::clear() { - filter* next; - for( filter* f = filter_list; f; f=next ) { - if( internal::input_buffer* b = f->my_input_buffer ) { - delete b; - f->my_input_buffer = NULL; - } - next=f->next_filter_in_pipeline; - f->next_filter_in_pipeline = filter::not_in_pipeline(); - if ( (f->my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(3) ) { - f->prev_filter_in_pipeline = filter::not_in_pipeline(); - f->my_pipeline = NULL; - } - if ( (f->my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(5) ) - f->next_segment = NULL; - } - filter_list = filter_end = NULL; -} - -void pipeline::add_filter( filter& filter_ ) { -#if TBB_USE_ASSERT - if ( (filter_.my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(3) ) - __TBB_ASSERT( filter_.prev_filter_in_pipeline==filter::not_in_pipeline(), "filter already part of pipeline?" ); - __TBB_ASSERT( filter_.next_filter_in_pipeline==filter::not_in_pipeline(), "filter already part of pipeline?" ); - __TBB_ASSERT( !end_counter, "invocation of add_filter on running pipeline" ); -#endif - if ( (filter_.my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(3) ) { - filter_.my_pipeline = this; - filter_.prev_filter_in_pipeline = filter_end; - if ( filter_list == NULL) - filter_list = &filter_; - else - filter_end->next_filter_in_pipeline = &filter_; - filter_.next_filter_in_pipeline = NULL; - filter_end = &filter_; - } - else - { - if( !filter_end ) - filter_end = reinterpret_cast<filter*>(&filter_list); - - *reinterpret_cast<filter**>(filter_end) = &filter_; - filter_end = reinterpret_cast<filter*>(&filter_.next_filter_in_pipeline); - *reinterpret_cast<filter**>(filter_end) = NULL; - } - if( (filter_.my_filter_mode & filter_.version_mask) >= __TBB_PIPELINE_VERSION(5) ) { - if( filter_.is_serial() ) { - if( filter_.is_bound() ) - has_thread_bound_filters = true; - filter_.my_input_buffer = new internal::input_buffer( filter_.is_ordered(), filter_.is_bound() ); - } - else { - if(filter_.prev_filter_in_pipeline) { - if(filter_.prev_filter_in_pipeline->is_bound()) { - // successors to bound filters must have an input_buffer - filter_.my_input_buffer = new internal::input_buffer( /*is_ordered*/false, false ); - } - } - else { // input filter - if(filter_.object_may_be_null() ) { - //TODO: buffer only needed to hold TLS; could improve - filter_.my_input_buffer = new internal::input_buffer( /*is_ordered*/false, false ); - filter_.my_input_buffer->create_my_tls(); - } - } - } - } else { - if( filter_.is_serial() ) { - filter_.my_input_buffer = new internal::input_buffer( filter_.is_ordered(), false ); - } - } - -} - -void pipeline::remove_filter( filter& filter_ ) { - __TBB_ASSERT( filter_.prev_filter_in_pipeline!=filter::not_in_pipeline(), "filter not part of pipeline" ); - __TBB_ASSERT( filter_.next_filter_in_pipeline!=filter::not_in_pipeline(), "filter not part of pipeline" ); - __TBB_ASSERT( !end_counter, "invocation of remove_filter on running pipeline" ); - if (&filter_ == filter_list) - filter_list = filter_.next_filter_in_pipeline; - else { - __TBB_ASSERT( filter_.prev_filter_in_pipeline, "filter list broken?" ); - filter_.prev_filter_in_pipeline->next_filter_in_pipeline = filter_.next_filter_in_pipeline; - } - if (&filter_ == filter_end) - filter_end = filter_.prev_filter_in_pipeline; - else { - __TBB_ASSERT( filter_.next_filter_in_pipeline, "filter list broken?" ); - filter_.next_filter_in_pipeline->prev_filter_in_pipeline = filter_.prev_filter_in_pipeline; - } - if( internal::input_buffer* b = filter_.my_input_buffer ) { - delete b; - filter_.my_input_buffer = NULL; - } - filter_.next_filter_in_pipeline = filter_.prev_filter_in_pipeline = filter::not_in_pipeline(); - if ( (filter_.my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(5) ) - filter_.next_segment = NULL; - filter_.my_pipeline = NULL; -} - -void pipeline::run( size_t max_number_of_live_tokens -#if __TBB_TASK_GROUP_CONTEXT - , tbb::task_group_context& context -#endif - ) { - __TBB_ASSERT( max_number_of_live_tokens>0, "pipeline::run must have at least one token" ); - __TBB_ASSERT( !end_counter, "pipeline already running?" ); - if( filter_list ) { - internal::pipeline_cleaner my_pipeline_cleaner(*this); - end_of_input = false; - input_tokens = internal::Token(max_number_of_live_tokens); - if(has_thread_bound_filters) { - // release input filter if thread-bound - if(filter_list->is_bound()) { - filter_list->my_input_buffer->sema_V(); - } - } -#if __TBB_TASK_GROUP_CONTEXT - end_counter = new( task::allocate_root(context) ) internal::pipeline_root_task( *this ); -#else - end_counter = new( task::allocate_root() ) internal::pipeline_root_task( *this ); -#endif - // Start execution of tasks - task::spawn_root_and_wait( *end_counter ); - - if(has_thread_bound_filters) { - for(filter* f = filter_list->next_filter_in_pipeline; f; f=f->next_filter_in_pipeline) { - if(f->is_bound()) { - f->my_input_buffer->sema_V(); // wake to end - } - } - } - } -} - -#if __TBB_TASK_GROUP_CONTEXT -void pipeline::run( size_t max_number_of_live_tokens ) { - if( filter_list ) { - // Construct task group context with the exception propagation mode expected - // by the pipeline caller. - uintptr_t ctx_traits = filter_list->my_filter_mode & filter::exact_exception_propagation ? - task_group_context::default_traits : - task_group_context::default_traits & ~task_group_context::exact_exception; - task_group_context context(task_group_context::bound, ctx_traits); - run(max_number_of_live_tokens, context); - } -} -#endif // __TBB_TASK_GROUP_CONTEXT - -bool filter::has_more_work() { - __TBB_ASSERT(my_pipeline, NULL); - __TBB_ASSERT(my_input_buffer, "has_more_work() called for filter with no input buffer"); - return (internal::tokendiff_t)(my_pipeline->token_counter - my_input_buffer->low_token) != 0; -} - -filter::~filter() { - if ( (my_filter_mode & version_mask) >= __TBB_PIPELINE_VERSION(3) ) { - if ( next_filter_in_pipeline != filter::not_in_pipeline() ) - my_pipeline->remove_filter(*this); - else - __TBB_ASSERT( prev_filter_in_pipeline == filter::not_in_pipeline(), "probably filter list is broken" ); - } else { - __TBB_ASSERT( next_filter_in_pipeline==filter::not_in_pipeline(), "cannot destroy filter that is part of pipeline" ); - } -} - -void -filter::set_end_of_input() { - __TBB_ASSERT(my_input_buffer, NULL); - __TBB_ASSERT(object_may_be_null(), NULL); - if(is_serial()) { - my_pipeline->end_of_input = true; - } - else { - __TBB_ASSERT(my_input_buffer->end_of_input_tls_allocated, NULL); - my_input_buffer->set_my_tls_end_of_input(); - } -} - -thread_bound_filter::result_type thread_bound_filter::process_item() { - return internal_process_item(true); -} - -thread_bound_filter::result_type thread_bound_filter::try_process_item() { - return internal_process_item(false); -} - -thread_bound_filter::result_type thread_bound_filter::internal_process_item(bool is_blocking) { - __TBB_ASSERT(my_pipeline != NULL,"It's not supposed that process_item is called for a filter that is not in a pipeline."); - internal::task_info info; - info.reset(); - - if( my_pipeline->end_of_input && !has_more_work() ) - return end_of_stream; - - if( !prev_filter_in_pipeline ) { - if( my_pipeline->end_of_input ) - return end_of_stream; - while( my_pipeline->input_tokens == 0 ) { - if( !is_blocking ) - return item_not_available; - my_input_buffer->sema_P(); - } - info.my_object = (*this)(info.my_object); - if( info.my_object ) { - __TBB_ASSERT(my_pipeline->input_tokens > 0, "Token failed in thread-bound filter"); - my_pipeline->input_tokens--; - if( is_ordered() ) { - info.my_token = my_pipeline->token_counter; - info.my_token_ready = true; - } - my_pipeline->token_counter++; // ideally, with relaxed semantics - } else { - my_pipeline->end_of_input = true; - return end_of_stream; - } - } else { /* this is not an input filter */ - while( !my_input_buffer->has_item() ) { - if( !is_blocking ) { - return item_not_available; - } - my_input_buffer->sema_P(); - if( my_pipeline->end_of_input && !has_more_work() ) { - return end_of_stream; - } - } - if( !my_input_buffer->return_item(info, /*advance*/true) ) { - __TBB_ASSERT(false,"return_item failed"); - } - info.my_object = (*this)(info.my_object); - } - if( next_filter_in_pipeline ) { - if ( !next_filter_in_pipeline->my_input_buffer->put_token(info,/*force_put=*/true) ) { - __TBB_ASSERT(false, "Couldn't put token after thread-bound buffer"); - } - } else { - size_t ntokens_avail = ++(my_pipeline->input_tokens); - if( my_pipeline->filter_list->is_bound() ) { - if( ntokens_avail == 1 ) { - my_pipeline->filter_list->my_input_buffer->sema_V(); - } - } - } - - return success; -} - -} // tbb - diff --git a/src/tbb/src/tbb/pm_client.h b/src/tbb/src/tbb/pm_client.h new file mode 100644 index 000000000..d08af8249 --- /dev/null +++ b/src/tbb/src/tbb/pm_client.h @@ -0,0 +1,76 @@ +/* + Copyright (c) 2022-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef _TBB_pm_client_H +#define _TBB_pm_client_H + +#include "arena.h" + +namespace tbb { +namespace detail { +namespace r1 { + +class pm_client { +public: + pm_client(arena& a) : my_arena(a) {} + virtual ~pm_client() {} + + unsigned priority_level() { + return my_arena.priority_level(); + } + + void set_top_priority(bool b) { + my_arena.set_top_priority(b); + } + + int min_workers() const { + return my_min_workers; + } + + int max_workers() const { + return my_max_workers; + } + + int update_request(int mandatory_delta, int workers_delta) { + auto min_max_workers = my_arena.update_request(mandatory_delta, workers_delta); + int delta = min_max_workers.second - my_max_workers; + set_workers(min_max_workers.first, min_max_workers.second); + return delta; + } + + virtual void register_thread() = 0; + + virtual void unregister_thread() = 0; + + +protected: + void set_workers(int mn_w, int mx_w) { + __TBB_ASSERT(mn_w >= 0, nullptr); + __TBB_ASSERT(mx_w >= 0, nullptr); + my_min_workers = mn_w; + my_max_workers = mx_w; + } + + arena& my_arena; + int my_min_workers{0}; + int my_max_workers{0}; +}; + +} // namespace r1 +} // namespace detail +} // namespace tbb + +#endif // _TBB_pm_client_H diff --git a/src/tbb/src/tbb/private_server.cpp b/src/tbb/src/tbb/private_server.cpp index ee8eea27a..6b04a85c9 100644 --- a/src/tbb/src/tbb/private_server.cpp +++ b/src/tbb/src/tbb/private_server.cpp @@ -1,42 +1,44 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ +#include "oneapi/tbb/cache_aligned_allocator.h" +#include "oneapi/tbb/mutex.h" + #include "rml_tbb.h" -#include "../server/thread_monitor.h" -#include "tbb/atomic.h" -#include "tbb/cache_aligned_allocator.h" +#include "rml_thread_monitor.h" + #include "scheduler_common.h" #include "governor.h" -#include "tbb_misc.h" +#include "misc.h" + +#include <atomic> -using rml::internal::thread_monitor; namespace tbb { -namespace internal { +namespace detail { +namespace r1 { namespace rml { +using rml::internal::thread_monitor; typedef thread_monitor::handle_type thread_handle; class private_server; class private_worker: no_copy { +private: //! State in finite-state machine that controls the worker. /** State diagram: init --> starting --> normal @@ -54,16 +56,16 @@ class private_worker: no_copy { //! Associated thread has ended normal life sequence and promises to never touch *this again. st_quit }; - atomic<state_t> my_state; - + std::atomic<state_t> my_state; + //! Associated server - private_server& my_server; + private_server& my_server; //! Associated client - tbb_client& my_client; + tbb_client& my_client; //! index used for avoiding the 64K aliasing problem - const size_t my_index; + const std::size_t my_index; //! Monitor for sleeping when there is no work to do. /** The invariant that holds for sleeping workers is: @@ -78,8 +80,8 @@ class private_worker: no_copy { friend class private_server; - //! Actions executed by the associated thread - void run(); + //! Actions executed by the associated thread + void run() noexcept; //! Wake up associated thread (or launch a thread if there is none) void wake_or_launch(); @@ -89,66 +91,63 @@ class private_worker: no_copy { static __RML_DECL_THREAD_ROUTINE thread_routine( void* arg ); - static void release_handle(thread_handle my_handle); + static void release_handle(thread_handle my_handle, bool join); protected: - private_worker( private_server& server, tbb_client& client, const size_t i ) : - my_server(server), - my_client(client), - my_index(i) - { - my_state = st_init; - } + private_worker( private_server& server, tbb_client& client, const std::size_t i ) : + my_state(st_init), my_server(server), my_client(client), my_index(i), + my_handle(), my_next() + {} }; -static const size_t cache_line_size = tbb::internal::NFS_MaxLineSize; - +static const std::size_t cache_line_size = tbb::detail::max_nfs_size; #if _MSC_VER && !defined(__INTEL_COMPILER) // Suppress overzealous compiler warnings about uninstantiable class - #pragma warning(push) - #pragma warning(disable:4510 4610) + // #pragma warning(push) + // #pragma warning(disable:4510 4610) #endif class padded_private_worker: public private_worker { char pad[cache_line_size - sizeof(private_worker)%cache_line_size]; public: - padded_private_worker( private_server& server, tbb_client& client, const size_t i ) + padded_private_worker( private_server& server, tbb_client& client, const std::size_t i ) : private_worker(server,client,i) { suppress_unused_warning(pad); } }; #if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning(pop) + // #pragma warning(pop) #endif class private_server: public tbb_server, no_copy { +private: tbb_client& my_client; //! Maximum number of threads to be created. /** Threads are created lazily, so maximum might not actually be reached. */ const tbb_client::size_type my_n_thread; //! Stack size for each thread. */ - const size_t my_stack_size; + const std::size_t my_stack_size; //! Number of jobs that could use their associated thread minus number of active threads. /** If negative, indicates oversubscription. - If positive, indicates that more threads should run. + If positive, indicates that more threads should run. Can be lowered asynchronously, but must be raised only while holding my_asleep_list_mutex, because raising it impacts the invariant for sleeping threads. */ - atomic<int> my_slack; + std::atomic<int> my_slack; //! Counter used to determine when to delete this. - atomic<int> my_ref_count; + std::atomic<int> my_ref_count; padded_private_worker* my_thread_array; //! List of workers that are asleep or committed to sleeping until notified by another thread. - tbb::atomic<private_worker*> my_asleep_list_root; + std::atomic<private_worker*> my_asleep_list_root; //! Protects my_asleep_list_root - typedef scheduler_mutex_type asleep_list_mutex_type; + typedef mutex asleep_list_mutex_type; asleep_list_mutex_type my_asleep_list_mutex; #if TBB_USE_ASSERT - atomic<int> my_net_slack_requests; + std::atomic<int> my_net_slack_requests; #endif /* TBB_USE_ASSERT */ //! Wake up to two sleeping workers, if there are any sleeping. @@ -156,7 +155,7 @@ class private_server: public tbb_server, no_copy { which in turn each wake up two threads, etc. */ void propagate_chain_reaction() { // First test of a double-check idiom. Second test is inside wake_some(0). - if( my_asleep_list_root ) + if( my_asleep_list_root.load(std::memory_order_relaxed) ) wake_some(0); } @@ -166,41 +165,41 @@ class private_server: public tbb_server, no_copy { //! Equivalent of adding additional_slack to my_slack and waking up to 2 threads if my_slack permits. void wake_some( int additional_slack ); - virtual ~private_server(); - + ~private_server() override; + void remove_server_ref() { if( --my_ref_count==0 ) { my_client.acknowledge_close_connection(); this->~private_server(); tbb::cache_aligned_allocator<private_server>().deallocate( this, 1 ); - } + } } friend class private_worker; public: private_server( tbb_client& client ); - /*override*/ version_type version() const { + version_type version() const override { return 0; - } + } - /*override*/ void request_close_connection( bool /*exiting*/ ) { - for( size_t i=0; i<my_n_thread; ++i ) + void request_close_connection( bool /*exiting*/ ) override { + for( std::size_t i=0; i<my_n_thread; ++i ) my_thread_array[i].start_shutdown(); remove_server_ref(); } - /*override*/ void yield() {__TBB_Yield();} + void yield() override { d0::yield(); } - /*override*/ void independent_thread_number_changed( int ) {__TBB_ASSERT(false,NULL);} + void independent_thread_number_changed( int ) override {__TBB_ASSERT(false, nullptr);} - /*override*/ unsigned default_concurrency() const { return governor::default_num_threads() - 1; } + unsigned default_concurrency() const override { return governor::default_num_threads() - 1; } - /*override*/ void adjust_job_count_estimate( int delta ); + void adjust_job_count_estimate( int delta ) override; -#if _WIN32||_WIN64 - /*override*/ void register_master ( ::rml::server::execution_resource_t& ) {} - /*override*/ void unregister_master ( ::rml::server::execution_resource_t ) {} +#if _WIN32 || _WIN64 + void register_external_thread ( ::rml::server::execution_resource_t& ) override {} + void unregister_external_thread ( ::rml::server::execution_resource_t ) override {} #endif /* _WIN32||_WIN64 */ }; @@ -209,8 +208,8 @@ class private_server: public tbb_server, no_copy { //------------------------------------------------------------------------ #if _MSC_VER && !defined(__INTEL_COMPILER) // Suppress overzealous compiler warnings about an initialized variable 'sink_for_alloca' not referenced - #pragma warning(push) - #pragma warning(disable:4189) + // #pragma warning(push) + // #pragma warning(disable:4189) #endif #if __MINGW32__ && __GNUC__==4 &&__GNUC_MINOR__>=2 && !__MINGW64__ // ensure that stack is properly aligned for TBB threads @@ -219,32 +218,33 @@ __attribute__((force_align_arg_pointer)) __RML_DECL_THREAD_ROUTINE private_worker::thread_routine( void* arg ) { private_worker* self = static_cast<private_worker*>(arg); AVOID_64K_ALIASING( self->my_index ); -#if _XBOX - int HWThreadIndex = __TBB_XBOX360_GetHardwareThreadIndex(i); - XSetThreadProcessor(GetCurrentThread(), HWThreadIndex); -#endif self->run(); + // return 0 instead of nullptr due to the difference in the type __RML_DECL_THREAD_ROUTINE on various OSs return 0; } #if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning(pop) + // #pragma warning(pop) #endif -void private_worker::release_handle(thread_handle handle) { - if (governor::needsWaitWorkers()) +void private_worker::release_handle(thread_handle handle, bool join) { + if (join) thread_monitor::join(handle); else thread_monitor::detach_thread(handle); } void private_worker::start_shutdown() { - state_t s; + __TBB_ASSERT(my_state.load(std::memory_order_relaxed) != st_quit, "The quit state is expected to be set only once"); + + // `acq` to acquire my_handle + // `rel` to release market state + state_t prev_state = my_state.exchange(st_quit, std::memory_order_acq_rel); - do { - s = my_state; - __TBB_ASSERT( s!=st_quit, NULL ); - } while( my_state.compare_and_swap( st_quit, s )!=s ); - if( s==st_normal || s==st_starting ) { + if (prev_state == st_init) { + // Perform action that otherwise would be performed by associated thread when it quits. + my_server.remove_server_ref(); + } else { + __TBB_ASSERT(prev_state == st_normal || prev_state == st_starting, nullptr); // May have invalidated invariant for sleeping, so wake up the thread. // Note that the notify() here occurs without maintaining invariants for my_slack. // It does not matter, because my_state==st_quit overrides checking of my_slack. @@ -252,37 +252,27 @@ void private_worker::start_shutdown() { // Do not need release handle in st_init state, // because in this case the thread wasn't started yet. // For st_starting release is done at launch site. - if (s==st_normal) - release_handle(my_handle); - } else if( s==st_init ) { - // Perform action that otherwise would be performed by associated thread when it quits. - my_server.remove_server_ref(); + if (prev_state == st_normal) + release_handle(my_handle, governor::does_client_join_workers(my_client)); } } -void private_worker::run() { +void private_worker::run() noexcept { my_server.propagate_chain_reaction(); // Transiting to st_normal here would require setting my_handle, - // which would create race with the launching thread and + // which would create race with the launching thread and // complications in handle management on Windows. ::rml::job& j = *my_client.create_one_job(); - while( my_state!=st_quit ) { - if( my_server.my_slack>=0 ) { + // memory_order_seq_cst to be strictly ordered after thread_monitor::wait on the next iteration + while( my_state.load(std::memory_order_seq_cst)!=st_quit ) { + if( my_server.my_slack.load(std::memory_order_acquire)>=0 ) { my_client.process(j); - } else { - thread_monitor::cookie c; - // Prepare to wait - my_thread_monitor.prepare_wait(c); - // Check/set the invariant for sleeping - if( my_state!=st_quit && my_server.try_insert_in_asleep_list(*this) ) { - my_thread_monitor.commit_wait(c); - my_server.propagate_chain_reaction(); - } else { - // Invariant broken - my_thread_monitor.cancel_wait(); - } + } else if( my_server.try_insert_in_asleep_list(*this) ) { + my_thread_monitor.wait(); + __TBB_ASSERT(my_state.load(std::memory_order_relaxed) == st_quit || !my_next, "Thread monitor missed a spurious wakeup?" ); + my_server.propagate_chain_reaction(); } } my_client.cleanup(j); @@ -292,61 +282,74 @@ void private_worker::run() { } inline void private_worker::wake_or_launch() { - if( my_state==st_init && my_state.compare_and_swap( st_starting, st_init )==st_init ) { - // after this point, remove_server_ref() must be done by created thread -#if USE_WINTHREAD - my_handle = thread_monitor::launch( thread_routine, this, my_server.my_stack_size, &this->my_index ); -#elif USE_PTHREAD - { - affinity_helper fpa; - fpa.protect_affinity_mask(); - my_handle = thread_monitor::launch( thread_routine, this, my_server.my_stack_size ); - // Implicit destruction of fpa resets original affinity mask. - } -#endif /* USE_PTHREAD */ - state_t s = my_state.compare_and_swap( st_normal, st_starting ); - if (st_starting != s) { - // Do shutdown during startup. my_handle can't be released - // by start_shutdown, because my_handle value might be not set yet - // at time of transition from st_starting to st_quit. - __TBB_ASSERT( s==st_quit, NULL ); - release_handle(my_handle); + state_t state = my_state.load(std::memory_order_relaxed); + + switch (state) { + case st_starting: + __TBB_fallthrough; + case st_normal: + __TBB_ASSERT(!my_next, "Should not wake a thread while it's still in asleep list"); + my_thread_monitor.notify(); + break; + case st_init: + if (my_state.compare_exchange_strong(state, st_starting)) { + // after this point, remove_server_ref() must be done by created thread +#if __TBB_USE_WINAPI + // Win thread_monitor::launch is designed on the assumption that the workers thread id go from 1 to Hard limit set by TBB market::global_market + const std::size_t worker_idx = my_server.my_n_thread - this->my_index; + my_handle = thread_monitor::launch(thread_routine, this, my_server.my_stack_size, &worker_idx); +#elif __TBB_USE_POSIX + { + affinity_helper fpa; + fpa.protect_affinity_mask( /*restore_process_mask=*/true); + my_handle = thread_monitor::launch(thread_routine, this, my_server.my_stack_size); + // Implicit destruction of fpa resets original affinity mask. + } +#endif /* __TBB_USE_POSIX */ + state = st_starting; + if (!my_state.compare_exchange_strong(state, st_normal)) { + // Do shutdown during startup. my_handle can't be released + // by start_shutdown, because my_handle value might be not set yet + // at time of transition from st_starting to st_quit. + __TBB_ASSERT(state == st_quit, nullptr); + release_handle(my_handle, governor::does_client_join_workers(my_client)); + } } + break; + default: + __TBB_ASSERT(state == st_quit, nullptr); } - else - my_thread_monitor.notify(); } //------------------------------------------------------------------------ // Methods of private_server //------------------------------------------------------------------------ -private_server::private_server( tbb_client& client ) : - my_client(client), +private_server::private_server( tbb_client& client ) : + my_client(client), my_n_thread(client.max_job_count()), my_stack_size(client.min_stack_size()), - my_thread_array(NULL) -{ - my_ref_count = my_n_thread+1; - my_slack = 0; + my_slack(0), + my_ref_count(my_n_thread+1), + my_thread_array(nullptr), + my_asleep_list_root(nullptr) #if TBB_USE_ASSERT - my_net_slack_requests = 0; + , my_net_slack_requests(0) #endif /* TBB_USE_ASSERT */ - my_asleep_list_root = NULL; +{ my_thread_array = tbb::cache_aligned_allocator<padded_private_worker>().allocate( my_n_thread ); - memset( my_thread_array, 0, sizeof(private_worker)*my_n_thread ); - for( size_t i=0; i<my_n_thread; ++i ) { - private_worker* t = new( &my_thread_array[i] ) padded_private_worker( *this, client, i ); - t->my_next = my_asleep_list_root; - my_asleep_list_root = t; - } + for( std::size_t i=0; i<my_n_thread; ++i ) { + private_worker* t = new( &my_thread_array[i] ) padded_private_worker( *this, client, i ); + t->my_next = my_asleep_list_root.load(std::memory_order_relaxed); + my_asleep_list_root.store(t, std::memory_order_relaxed); + } } private_server::~private_server() { - __TBB_ASSERT( my_net_slack_requests==0, NULL ); - for( size_t i=my_n_thread; i--; ) + __TBB_ASSERT( my_net_slack_requests==0, nullptr); + for( std::size_t i=my_n_thread; i--; ) my_thread_array[i].~padded_private_worker(); tbb::cache_aligned_allocator<padded_private_worker>().deallocate( my_thread_array, my_n_thread ); - tbb::internal::poison_pointer( my_thread_array ); + tbb::detail::poison_pointer( my_thread_array ); } inline bool private_server::try_insert_in_asleep_list( private_worker& t ) { @@ -355,47 +358,59 @@ inline bool private_server::try_insert_in_asleep_list( private_worker& t ) { return false; // Contribute to slack under lock so that if another takes that unit of slack, // it sees us sleeping on the list and wakes us up. - int k = ++my_slack; - if( k<=0 ) { - t.my_next = my_asleep_list_root; - my_asleep_list_root = &t; - return true; - } else { - --my_slack; - return false; + auto expected = my_slack.load(std::memory_order_relaxed); + while (expected < 0) { + if (my_slack.compare_exchange_strong(expected, expected + 1)) { + t.my_next = my_asleep_list_root.load(std::memory_order_relaxed); + my_asleep_list_root.store(&t, std::memory_order_relaxed); + return true; + } } + + return false; } void private_server::wake_some( int additional_slack ) { - __TBB_ASSERT( additional_slack>=0, NULL ); + __TBB_ASSERT( additional_slack>=0, nullptr ); private_worker* wakee[2]; private_worker**w = wakee; - { + + if (additional_slack) { + // Contribute our unused slack to my_slack. + my_slack += additional_slack; + } + + int allotted_slack = 0; + while (allotted_slack < 2) { + // Chain reaction; Try to claim unit of slack + int old = my_slack.load(std::memory_order_relaxed); + do { + if (old <= 0) goto done; + } while (!my_slack.compare_exchange_strong(old, old - 1)); + ++allotted_slack; + } +done: + + if (allotted_slack) { asleep_list_mutex_type::scoped_lock lock(my_asleep_list_mutex); - while( my_asleep_list_root && w<wakee+2 ) { - if( additional_slack>0 ) { - if (additional_slack+my_slack<=0) // additional demand does not exceed surplus supply - break; - --additional_slack; - } else { - // Chain reaction; Try to claim unit of slack - int old; - do { - old = my_slack; - if( old<=0 ) goto done; - } while( my_slack.compare_and_swap(old-1,old)!=old ); - } + auto root = my_asleep_list_root.load(std::memory_order_relaxed); + while( root && w<wakee+2 && allotted_slack) { + --allotted_slack; // Pop sleeping worker to combine with claimed unit of slack - my_asleep_list_root = (*w++ = my_asleep_list_root)->my_next; + *w++ = root; + root = root->my_next; } - if( additional_slack ) { + my_asleep_list_root.store(root, std::memory_order_relaxed); + if(allotted_slack) { // Contribute our unused slack to my_slack. - my_slack += additional_slack; + my_slack += allotted_slack; } } -done: - while( w>wakee ) - (*--w)->wake_or_launch(); + while( w>wakee ) { + private_worker* ww = *--w; + ww->my_next = nullptr; + ww->wake_or_launch(); + } } void private_server::adjust_job_count_estimate( int delta ) { @@ -415,6 +430,7 @@ tbb_server* make_private_server( tbb_client& client ) { } } // namespace rml -} // namespace internal - +} // namespace r1 +} // namespace detail } // namespace tbb + diff --git a/src/tbb/src/tbb/profiling.cpp b/src/tbb/src/tbb/profiling.cpp new file mode 100644 index 000000000..3cf4da3ce --- /dev/null +++ b/src/tbb/src/tbb/profiling.cpp @@ -0,0 +1,265 @@ +/* + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "oneapi/tbb/detail/_config.h" +#include "oneapi/tbb/detail/_template_helpers.h" + +#include "main.h" +#include "itt_notify.h" + +#include "oneapi/tbb/profiling.h" + +#include <string.h> + +namespace tbb { +namespace detail { +namespace r1 { + +#if __TBB_USE_ITT_NOTIFY +bool ITT_Present; +static std::atomic<bool> ITT_InitializationDone; + +static __itt_domain *tbb_domains[d1::ITT_NUM_DOMAINS] = {}; + +struct resource_string { + const char *str; + __itt_string_handle *itt_str_handle; +}; + +// +// populate resource strings +// +#define TBB_STRING_RESOURCE( index_name, str ) { str, nullptr }, +static resource_string strings_for_itt[] = { + #include "oneapi/tbb/detail/_string_resource.h" + { "num_resource_strings", nullptr } +}; +#undef TBB_STRING_RESOURCE + +static __itt_string_handle* ITT_get_string_handle(std::uintptr_t idx) { + __TBB_ASSERT(idx < NUM_STRINGS, "string handle out of valid range"); + return idx < NUM_STRINGS ? strings_for_itt[idx].itt_str_handle : nullptr; +} + +static void ITT_init_domains() { + tbb_domains[d1::ITT_DOMAIN_MAIN] = __itt_domain_create( _T("tbb") ); + tbb_domains[d1::ITT_DOMAIN_MAIN]->flags = 1; + tbb_domains[d1::ITT_DOMAIN_FLOW] = __itt_domain_create( _T("tbb.flow") ); + tbb_domains[d1::ITT_DOMAIN_FLOW]->flags = 1; + tbb_domains[d1::ITT_DOMAIN_ALGO] = __itt_domain_create( _T("tbb.algorithm") ); + tbb_domains[d1::ITT_DOMAIN_ALGO]->flags = 1; +} + +static void ITT_init_strings() { + for ( std::uintptr_t i = 0; i < NUM_STRINGS; ++i ) { +#if _WIN32||_WIN64 + strings_for_itt[i].itt_str_handle = __itt_string_handle_createA( strings_for_itt[i].str ); +#else + strings_for_itt[i].itt_str_handle = __itt_string_handle_create( strings_for_itt[i].str ); +#endif + } +} + +static void ITT_init() { + ITT_init_domains(); + ITT_init_strings(); +} + +/** Thread-unsafe lazy one-time initialization of tools interop. + Used by both dummy handlers and general TBB one-time initialization routine. **/ +void ITT_DoUnsafeOneTimeInitialization () { + // Double check ITT_InitializationDone is necessary because the first check + // in ITT_DoOneTimeInitialization is not guarded with the __TBB_InitOnce lock. + if ( !ITT_InitializationDone ) { + ITT_Present = (__TBB_load_ittnotify()!=0); + if (ITT_Present) ITT_init(); + ITT_InitializationDone = true; + } +} + +/** Thread-safe lazy one-time initialization of tools interop. + Used by dummy handlers only. **/ +extern "C" +void ITT_DoOneTimeInitialization() { + if ( !ITT_InitializationDone ) { + __TBB_InitOnce::lock(); + ITT_DoUnsafeOneTimeInitialization(); + __TBB_InitOnce::unlock(); + } +} + +void create_itt_sync(void* ptr, const tchar* objtype, const tchar* objname) { + ITT_SYNC_CREATE(ptr, objtype, objname); +} + +void call_itt_notify(int t, void *ptr) { + switch (t) { + case 0: ITT_NOTIFY(sync_prepare, ptr); break; + case 1: ITT_NOTIFY(sync_cancel, ptr); break; + case 2: ITT_NOTIFY(sync_acquired, ptr); break; + case 3: ITT_NOTIFY(sync_releasing, ptr); break; + case 4: ITT_NOTIFY(sync_destroy, ptr); break; + } +} + +void itt_set_sync_name(void* obj, const tchar* name) { + __itt_sync_rename(obj, name); +} + +const __itt_id itt_null_id = { 0, 0, 0 }; + +static inline __itt_domain* get_itt_domain(d1::itt_domain_enum idx) { + if (tbb_domains[idx] == nullptr) { + ITT_DoOneTimeInitialization(); + } + return tbb_domains[idx]; +} + +static inline void itt_id_make(__itt_id* id, void* addr, unsigned long long extra) { + *id = __itt_id_make(addr, extra); +} + +static inline void itt_id_create(const __itt_domain* domain, __itt_id id) { + __itt_id_create(domain, id); +} + +void itt_make_task_group(d1::itt_domain_enum domain, void* group, unsigned long long group_extra, + void* parent, unsigned long long parent_extra, string_resource_index name_index) { + if (__itt_domain* d = get_itt_domain(domain)) { + __itt_id group_id = itt_null_id; + __itt_id parent_id = itt_null_id; + itt_id_make(&group_id, group, group_extra); + itt_id_create(d, group_id); + if (parent) { + itt_id_make(&parent_id, parent, parent_extra); + } + __itt_string_handle* n = ITT_get_string_handle(name_index); + __itt_task_group(d, group_id, parent_id, n); + } +} + +void __TBB_EXPORTED_FUNC itt_metadata_str_add(d1::itt_domain_enum domain, void *addr, unsigned long long addr_extra, + string_resource_index key, const char *value ) { + if ( __itt_domain *d = get_itt_domain( domain ) ) { + __itt_id id = itt_null_id; + itt_id_make( &id, addr, addr_extra ); + __itt_string_handle *k = ITT_get_string_handle(key); + size_t value_length = strlen( value ); +#if _WIN32||_WIN64 + __itt_metadata_str_addA(d, id, k, value, value_length); +#else + __itt_metadata_str_add(d, id, k, value, value_length); +#endif + } +} + +void __TBB_EXPORTED_FUNC itt_metadata_ptr_add(d1::itt_domain_enum domain, void *addr, unsigned long long addr_extra, + string_resource_index key, void *value ) { + if ( __itt_domain *d = get_itt_domain( domain ) ) { + __itt_id id = itt_null_id; + itt_id_make( &id, addr, addr_extra ); + __itt_string_handle *k = ITT_get_string_handle(key); +#if __TBB_x86_32 + __itt_metadata_add(d, id, k, __itt_metadata_u32, 1, value); +#else + __itt_metadata_add(d, id, k, __itt_metadata_u64, 1, value); +#endif + } +} + +void __TBB_EXPORTED_FUNC itt_relation_add(d1::itt_domain_enum domain, void *addr0, unsigned long long addr0_extra, + itt_relation relation, void *addr1, unsigned long long addr1_extra ) { + if ( __itt_domain *d = get_itt_domain( domain ) ) { + __itt_id id0 = itt_null_id; + __itt_id id1 = itt_null_id; + itt_id_make( &id0, addr0, addr0_extra ); + itt_id_make( &id1, addr1, addr1_extra ); + __itt_relation_add( d, id0, (__itt_relation)relation, id1 ); + } +} + +void __TBB_EXPORTED_FUNC itt_task_begin(d1::itt_domain_enum domain, void* task, unsigned long long task_extra, + void* parent, unsigned long long parent_extra, string_resource_index name_index) { + if (__itt_domain* d = get_itt_domain(domain)) { + __itt_id task_id = itt_null_id; + __itt_id parent_id = itt_null_id; + if (task) { + itt_id_make(&task_id, task, task_extra); + } + if (parent) { + itt_id_make(&parent_id, parent, parent_extra); + } + __itt_string_handle* n = ITT_get_string_handle(name_index); + __itt_task_begin(d, task_id, parent_id, n); + } +} + +void __TBB_EXPORTED_FUNC itt_task_end(d1::itt_domain_enum domain) { + if (__itt_domain* d = get_itt_domain(domain)) { + __itt_task_end(d); + } +} + +void __TBB_EXPORTED_FUNC itt_region_begin(d1::itt_domain_enum domain, void *region, unsigned long long region_extra, + void *parent, unsigned long long parent_extra, string_resource_index /* name_index */ ) { + if ( __itt_domain *d = get_itt_domain( domain ) ) { + __itt_id region_id = itt_null_id; + __itt_id parent_id = itt_null_id; + itt_id_make( ®ion_id, region, region_extra ); + if ( parent ) { + itt_id_make( &parent_id, parent, parent_extra ); + } + __itt_region_begin( d, region_id, parent_id, nullptr ); + } +} + +void __TBB_EXPORTED_FUNC itt_region_end(d1::itt_domain_enum domain, void *region, unsigned long long region_extra ) { + if ( __itt_domain *d = get_itt_domain( domain ) ) { + __itt_id region_id = itt_null_id; + itt_id_make( ®ion_id, region, region_extra ); + __itt_region_end( d, region_id ); + } +} + +#else +void create_itt_sync(void* /*ptr*/, const tchar* /*objtype*/, const tchar* /*objname*/) {} +void call_itt_notify(int /*t*/, void* /*ptr*/) {} +void itt_set_sync_name(void* /*obj*/, const tchar* /*name*/) {} +void itt_make_task_group(d1::itt_domain_enum /*domain*/, void* /*group*/, unsigned long long /*group_extra*/, + void* /*parent*/, unsigned long long /*parent_extra*/, string_resource_index /*name_index*/) {} +void itt_metadata_str_add(d1::itt_domain_enum /*domain*/, void* /*addr*/, unsigned long long /*addr_extra*/, + string_resource_index /*key*/, const char* /*value*/ ) { } +void itt_metadata_ptr_add(d1::itt_domain_enum /*domain*/, void * /*addr*/, unsigned long long /*addr_extra*/, + string_resource_index /*key*/, void * /*value*/ ) {} +void itt_relation_add(d1::itt_domain_enum /*domain*/, void* /*addr0*/, unsigned long long /*addr0_extra*/, + itt_relation /*relation*/, void* /*addr1*/, unsigned long long /*addr1_extra*/ ) { } +void itt_task_begin(d1::itt_domain_enum /*domain*/, void* /*task*/, unsigned long long /*task_extra*/, + void* /*parent*/, unsigned long long /*parent_extra*/, string_resource_index /*name_index*/ ) { } +void itt_task_end(d1::itt_domain_enum /*domain*/ ) { } +void itt_region_begin(d1::itt_domain_enum /*domain*/, void* /*region*/, unsigned long long /*region_extra*/, + void* /*parent*/, unsigned long long /*parent_extra*/, string_resource_index /*name_index*/ ) { } +void itt_region_end(d1::itt_domain_enum /*domain*/, void* /*region*/, unsigned long long /*region_extra*/ ) { } +#endif /* __TBB_USE_ITT_NOTIFY */ + +const tchar + *SyncType_Scheduler = _T("%Constant") + ; +const tchar + *SyncObj_ContextsList = _T("TBB Scheduler") + ; +} // namespace r1 +} // namespace detail +} // namespace tbb diff --git a/src/tbb/src/tbb/queuing_mutex.cpp b/src/tbb/src/tbb/queuing_mutex.cpp deleted file mode 100644 index 1dd71e348..000000000 --- a/src/tbb/src/tbb/queuing_mutex.cpp +++ /dev/null @@ -1,109 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/queuing_mutex.h" -#include "tbb/tbb_machine.h" -#include "tbb/tbb_stddef.h" -#include "tbb_misc.h" -#include "itt_notify.h" - -namespace tbb { - -using namespace internal; - -//! A method to acquire queuing_mutex lock -void queuing_mutex::scoped_lock::acquire( queuing_mutex& m ) -{ - __TBB_ASSERT( !this->mutex, "scoped_lock is already holding a mutex"); - - // Must set all fields before the fetch_and_store, because once the - // fetch_and_store executes, *this becomes accessible to other threads. - mutex = &m; - next = NULL; - going = 0; - - // The fetch_and_store must have release semantics, because we are - // "sending" the fields initialized above to other processors. - scoped_lock* pred = m.q_tail.fetch_and_store<tbb::release>(this); - if( pred ) { - ITT_NOTIFY(sync_prepare, mutex); -#if TBB_USE_ASSERT - __TBB_control_consistency_helper(); // on "m.q_tail" - __TBB_ASSERT( !pred->next, "the predecessor has another successor!"); -#endif - pred->next = this; - spin_wait_while_eq( going, 0ul ); - } - ITT_NOTIFY(sync_acquired, mutex); - - // Force acquire so that user's critical section receives correct values - // from processor that was previously in the user's critical section. - __TBB_load_with_acquire(going); -} - -//! A method to acquire queuing_mutex if it is free -bool queuing_mutex::scoped_lock::try_acquire( queuing_mutex& m ) -{ - __TBB_ASSERT( !this->mutex, "scoped_lock is already holding a mutex"); - - // Must set all fields before the fetch_and_store, because once the - // fetch_and_store executes, *this becomes accessible to other threads. - next = NULL; - going = 0; - - // The CAS must have release semantics, because we are - // "sending" the fields initialized above to other processors. - if( m.q_tail.compare_and_swap<tbb::release>(this, NULL) ) - return false; - - // Force acquire so that user's critical section receives correct values - // from processor that was previously in the user's critical section. - // try_acquire should always have acquire semantic, even if failed. - __TBB_load_with_acquire(going); - mutex = &m; - ITT_NOTIFY(sync_acquired, mutex); - return true; -} - -//! A method to release queuing_mutex lock -void queuing_mutex::scoped_lock::release( ) -{ - __TBB_ASSERT(this->mutex!=NULL, "no lock acquired"); - - ITT_NOTIFY(sync_releasing, mutex); - if( !next ) { - if( this == mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) { - // this was the only item in the queue, and the queue is now empty. - goto done; - } - // Someone in the queue - spin_wait_while_eq( next, (scoped_lock*)0 ); - } - __TBB_ASSERT(next,NULL); - __TBB_store_with_release(next->going, 1); -done: - initialize(); -} - -void queuing_mutex::internal_construct() { - ITT_SYNC_CREATE(this, _T("tbb::queuing_mutex"), _T("")); -} - -} // namespace tbb diff --git a/src/tbb/src/tbb/queuing_rw_mutex.cpp b/src/tbb/src/tbb/queuing_rw_mutex.cpp index 35130ea9b..b4590b5b7 100644 --- a/src/tbb/src/tbb/queuing_rw_mutex.cpp +++ b/src/tbb/src/tbb/queuing_rw_mutex.cpp @@ -1,495 +1,617 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ /** Before making any changes in the implementation, please emulate algorithmic changes with SPIN tool using <TBB directory>/tools/spin_models/ReaderWriterMutex.pml. There could be some code looking as "can be restructured" but its structure does matter! */ -#include "tbb/queuing_rw_mutex.h" -#include "tbb/tbb_machine.h" -#include "tbb/tbb_stddef.h" -#include "tbb/tbb_machine.h" +#include "oneapi/tbb/queuing_rw_mutex.h" +#include "oneapi/tbb/detail/_assert.h" +#include "oneapi/tbb/detail/_utils.h" #include "itt_notify.h" - namespace tbb { - -using namespace internal; - -//! Flag bits in a state_t that specify information about a locking request. -enum state_t_flags { - STATE_NONE = 0, - STATE_WRITER = 1<<0, - STATE_READER = 1<<1, - STATE_READER_UNBLOCKNEXT = 1<<2, - STATE_ACTIVEREADER = 1<<3, - STATE_UPGRADE_REQUESTED = 1<<4, - STATE_UPGRADE_WAITING = 1<<5, - STATE_UPGRADE_LOSER = 1<<6, - STATE_COMBINED_WAITINGREADER = STATE_READER | STATE_READER_UNBLOCKNEXT, - STATE_COMBINED_READER = STATE_COMBINED_WAITINGREADER | STATE_ACTIVEREADER, - STATE_COMBINED_UPGRADING = STATE_UPGRADE_WAITING | STATE_UPGRADE_LOSER -}; - -const unsigned char RELEASED = 0; -const unsigned char ACQUIRED = 1; - -inline bool queuing_rw_mutex::scoped_lock::try_acquire_internal_lock() -{ - return as_atomic(my_internal_lock).compare_and_swap<tbb::acquire>(ACQUIRED,RELEASED) == RELEASED; -} - -inline void queuing_rw_mutex::scoped_lock::acquire_internal_lock() -{ - // Usually, we would use the test-test-and-set idiom here, with exponential backoff. - // But so far, experiments indicate there is no value in doing so here. - while( !try_acquire_internal_lock() ) { - __TBB_Pause(1); - } -} - -inline void queuing_rw_mutex::scoped_lock::release_internal_lock() -{ - __TBB_store_with_release(my_internal_lock,RELEASED); -} - -inline void queuing_rw_mutex::scoped_lock::wait_for_release_of_internal_lock() -{ - spin_wait_until_eq(my_internal_lock, RELEASED); -} - -inline void queuing_rw_mutex::scoped_lock::unblock_or_wait_on_internal_lock( uintptr_t flag ) { - if( flag ) - wait_for_release_of_internal_lock(); - else - release_internal_lock(); -} +namespace detail { +namespace r1 { #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) // Workaround for overzealous compiler warnings - #pragma warning (push) - #pragma warning (disable: 4311 4312) + // #pragma warning (push) + // #pragma warning (disable: 4311 4312) #endif //! A view of a T* with additional functionality for twiddling low-order bits. template<typename T> -class tricky_atomic_pointer: no_copy { +class tricky_atomic_pointer { public: - typedef typename atomic_selector<sizeof(T*)>::word word; + using word = uintptr_t; - template<memory_semantics M> - static T* fetch_and_add( T* volatile * location, word addend ) { - return reinterpret_cast<T*>( atomic_traits<sizeof(T*),M>::fetch_and_add(location, addend) ); + static T* fetch_add( std::atomic<word>& location, word addend, std::memory_order memory_order ) { + return reinterpret_cast<T*>(location.fetch_add(addend, memory_order)); } - template<memory_semantics M> - static T* fetch_and_store( T* volatile * location, T* value ) { - return reinterpret_cast<T*>( atomic_traits<sizeof(T*),M>::fetch_and_store(location, reinterpret_cast<word>(value)) ); + + static T* exchange( std::atomic<word>& location, T* value, std::memory_order memory_order ) { + return reinterpret_cast<T*>(location.exchange(reinterpret_cast<word>(value), memory_order)); + } + + static T* compare_exchange_strong( std::atomic<word>& obj, const T* expected, const T* desired, std::memory_order memory_order ) { + word expd = reinterpret_cast<word>(expected); + obj.compare_exchange_strong(expd, reinterpret_cast<word>(desired), memory_order); + return reinterpret_cast<T*>(expd); + } + + static void store( std::atomic<word>& location, const T* value, std::memory_order memory_order ) { + location.store(reinterpret_cast<word>(value), memory_order); + } + + static T* load( std::atomic<word>& location, std::memory_order memory_order ) { + return reinterpret_cast<T*>(location.load(memory_order)); } - template<memory_semantics M> - static T* compare_and_swap( T* volatile * location, T* value, T* comparand ) { - return reinterpret_cast<T*>( - atomic_traits<sizeof(T*),M>::compare_and_swap(location, reinterpret_cast<word>(value), - reinterpret_cast<word>(comparand)) - ); + + static void spin_wait_while_eq(const std::atomic<word>& location, const T* value) { + tbb::detail::d0::spin_wait_while_eq(location, reinterpret_cast<word>(value) ); } T* & ref; tricky_atomic_pointer( T*& original ) : ref(original) {}; - tricky_atomic_pointer( T* volatile & original ) : ref(original) {}; - T* operator&( word operand2 ) const { + tricky_atomic_pointer(const tricky_atomic_pointer&) = delete; + tricky_atomic_pointer& operator=(const tricky_atomic_pointer&) = delete; + T* operator&( const word operand2 ) const { return reinterpret_cast<T*>( reinterpret_cast<word>(ref) & operand2 ); } - T* operator|( word operand2 ) const { + T* operator|( const word operand2 ) const { return reinterpret_cast<T*>( reinterpret_cast<word>(ref) | operand2 ); } }; -typedef tricky_atomic_pointer<queuing_rw_mutex::scoped_lock> tricky_pointer; +using tricky_pointer = tricky_atomic_pointer<queuing_rw_mutex::scoped_lock>; #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) // Workaround for overzealous compiler warnings - #pragma warning (pop) + // #pragma warning (pop) #endif -//! Mask for low order bit of a pointer. -static const tricky_pointer::word FLAG = 0x1; +//! Flag bits in a state_t that specify information about a locking request. +enum state_t_flags : unsigned char { + STATE_NONE = 0, + STATE_WRITER = 1<<0, + STATE_READER = 1<<1, + STATE_READER_UNBLOCKNEXT = 1<<2, + STATE_ACTIVEREADER = 1<<3, + STATE_UPGRADE_REQUESTED = 1<<4, + STATE_UPGRADE_WAITING = 1<<5, + STATE_UPGRADE_LOSER = 1<<6, + STATE_COMBINED_WAITINGREADER = STATE_READER | STATE_READER_UNBLOCKNEXT, + STATE_COMBINED_READER = STATE_COMBINED_WAITINGREADER | STATE_ACTIVEREADER, + STATE_COMBINED_UPGRADING = STATE_UPGRADE_WAITING | STATE_UPGRADE_LOSER +}; -inline -uintptr_t get_flag( queuing_rw_mutex::scoped_lock* ptr ) { - return uintptr_t(ptr) & FLAG; -} +static const unsigned char RELEASED = 0; +static const unsigned char ACQUIRED = 1; -//------------------------------------------------------------------------ -// Methods of queuing_rw_mutex::scoped_lock -//------------------------------------------------------------------------ - -//! A method to acquire queuing_rw_mutex lock -void queuing_rw_mutex::scoped_lock::acquire( queuing_rw_mutex& m, bool write ) -{ - __TBB_ASSERT( !my_mutex, "scoped_lock is already holding a mutex"); - - // Must set all fields before the fetch_and_store, because once the - // fetch_and_store executes, *this becomes accessible to other threads. - my_mutex = &m; - __TBB_store_relaxed(my_prev , (scoped_lock*)0); - __TBB_store_relaxed(my_next , (scoped_lock*)0); - __TBB_store_relaxed(my_going, 0); - my_state = state_t(write ? STATE_WRITER : STATE_READER); - my_internal_lock = RELEASED; - - queuing_rw_mutex::scoped_lock* pred = m.q_tail.fetch_and_store<tbb::release>(this); - - if( write ) { // Acquiring for write - - if( pred ) { - ITT_NOTIFY(sync_prepare, my_mutex); - pred = tricky_pointer(pred) & ~FLAG; - __TBB_ASSERT( !( uintptr_t(pred) & FLAG ), "use of corrupted pointer!" ); -#if TBB_USE_ASSERT - __TBB_control_consistency_helper(); // on "m.q_tail" - __TBB_ASSERT( !__TBB_load_relaxed(pred->my_next), "the predecessor has another successor!"); -#endif - __TBB_store_with_release(pred->my_next,this); - spin_wait_until_eq(my_going, 1); - } +struct queuing_rw_mutex_impl { + //! Try to acquire the internal lock + /** Returns true if lock was successfully acquired. */ + static bool try_acquire_internal_lock(d1::queuing_rw_mutex::scoped_lock& s) + { + auto expected = RELEASED; + return s.my_internal_lock.compare_exchange_strong(expected, ACQUIRED); + } - } else { // Acquiring for read -#if DO_ITT_NOTIFY - bool sync_prepare_done = false; -#endif - if( pred ) { - unsigned short pred_state; - __TBB_ASSERT( !__TBB_load_relaxed(my_prev), "the predecessor is already set" ); - if( uintptr_t(pred) & FLAG ) { - /* this is only possible if pred is an upgrading reader and it signals us to wait */ - pred_state = STATE_UPGRADE_WAITING; - pred = tricky_pointer(pred) & ~FLAG; - } else { - // Load pred->my_state now, because once pred->my_next becomes - // non-NULL, we must assume that *pred might be destroyed. - pred_state = pred->my_state.compare_and_swap<tbb::acquire>(STATE_READER_UNBLOCKNEXT, STATE_READER); - } - __TBB_store_relaxed(my_prev, pred); - __TBB_ASSERT( !( uintptr_t(pred) & FLAG ), "use of corrupted pointer!" ); -#if TBB_USE_ASSERT - __TBB_control_consistency_helper(); // on "m.q_tail" - __TBB_ASSERT( !__TBB_load_relaxed(pred->my_next), "the predecessor has another successor!"); -#endif - __TBB_store_with_release(pred->my_next,this); - if( pred_state != STATE_ACTIVEREADER ) { -#if DO_ITT_NOTIFY - sync_prepare_done = true; - ITT_NOTIFY(sync_prepare, my_mutex); -#endif - spin_wait_until_eq(my_going, 1); - } + //! Acquire the internal lock + static void acquire_internal_lock(d1::queuing_rw_mutex::scoped_lock& s) + { + // Usually, we would use the test-test-and-set idiom here, with exponential backoff. + // But so far, experiments indicate there is no value in doing so here. + while( !try_acquire_internal_lock(s) ) { + machine_pause(1); } + } - // The protected state must have been acquired here before it can be further released to any other reader(s): - unsigned short old_state = my_state.compare_and_swap<tbb::acquire>(STATE_ACTIVEREADER, STATE_READER); - if( old_state!=STATE_READER ) { -#if DO_ITT_NOTIFY - if( !sync_prepare_done ) - ITT_NOTIFY(sync_prepare, my_mutex); -#endif - // Failed to become active reader -> need to unblock the next waiting reader first - __TBB_ASSERT( my_state==STATE_READER_UNBLOCKNEXT, "unexpected state" ); - spin_wait_while_eq(my_next, (scoped_lock*)NULL); - /* my_state should be changed before unblocking the next otherwise it might finish - and another thread can get our old state and left blocked */ - my_state = STATE_ACTIVEREADER; - __TBB_store_with_release(my_next->my_going,1); - } + //! Release the internal lock + static void release_internal_lock(d1::queuing_rw_mutex::scoped_lock& s) + { + s.my_internal_lock.store(RELEASED, std::memory_order_release); } - ITT_NOTIFY(sync_acquired, my_mutex); + //! Wait for internal lock to be released + static void wait_for_release_of_internal_lock(d1::queuing_rw_mutex::scoped_lock& s) + { + spin_wait_until_eq(s.my_internal_lock, RELEASED); + } - // Force acquire so that user's critical section receives correct values - // from processor that was previously in the user's critical section. - __TBB_load_with_acquire(my_going); -} + //! A helper function + static void unblock_or_wait_on_internal_lock(d1::queuing_rw_mutex::scoped_lock& s, uintptr_t flag ) { + if( flag ) { + wait_for_release_of_internal_lock(s); + } + else { + release_internal_lock(s); + } + } -//! A method to acquire queuing_rw_mutex if it is free -bool queuing_rw_mutex::scoped_lock::try_acquire( queuing_rw_mutex& m, bool write ) -{ - __TBB_ASSERT( !my_mutex, "scoped_lock is already holding a mutex"); - - if( load<relaxed>(m.q_tail) ) - return false; // Someone already took the lock - - // Must set all fields before the fetch_and_store, because once the - // fetch_and_store executes, *this becomes accessible to other threads. - __TBB_store_relaxed(my_prev, (scoped_lock*)0); - __TBB_store_relaxed(my_next, (scoped_lock*)0); - __TBB_store_relaxed(my_going, 0); // TODO: remove dead assignment? - my_state = state_t(write ? STATE_WRITER : STATE_ACTIVEREADER); - my_internal_lock = RELEASED; - - // The CAS must have release semantics, because we are - // "sending" the fields initialized above to other processors. - if( m.q_tail.compare_and_swap<tbb::release>(this, NULL) ) - return false; // Someone already took the lock - // Force acquire so that user's critical section receives correct values - // from processor that was previously in the user's critical section. - // try_acquire should always have acquire semantic, even if failed. - __TBB_load_with_acquire(my_going); - my_mutex = &m; - ITT_NOTIFY(sync_acquired, my_mutex); - return true; -} + //! Mask for low order bit of a pointer. + static const tricky_pointer::word FLAG = 0x1; -//! A method to release queuing_rw_mutex lock -void queuing_rw_mutex::scoped_lock::release( ) -{ - __TBB_ASSERT(my_mutex!=NULL, "no lock acquired"); + static uintptr_t get_flag( d1::queuing_rw_mutex::scoped_lock* ptr ) { + return reinterpret_cast<uintptr_t>(ptr) & FLAG; + } - ITT_NOTIFY(sync_releasing, my_mutex); + //------------------------------------------------------------------------ + // Methods of queuing_rw_mutex::scoped_lock + //------------------------------------------------------------------------ + + //! A method to acquire queuing_rw_mutex lock + static void acquire(d1::queuing_rw_mutex& m, d1::queuing_rw_mutex::scoped_lock& s, bool write) + { + __TBB_ASSERT( !s.my_mutex, "scoped_lock is already holding a mutex"); + + // Must set all fields before the exchange, because once the + // exchange executes, *this becomes accessible to other threads. + s.my_mutex = &m; + s.my_prev.store(0U, std::memory_order_relaxed); + s.my_next.store(0U, std::memory_order_relaxed); + s.my_going.store(0U, std::memory_order_relaxed); + s.my_state.store(d1::queuing_rw_mutex::scoped_lock::state_t(write ? STATE_WRITER : STATE_READER), std::memory_order_relaxed); + s.my_internal_lock.store(RELEASED, std::memory_order_relaxed); + + + // The CAS must have release semantics, because we are + // "sending" the fields initialized above to other actors. + // We need acquire semantics, because we are acquiring the predecessor (or mutex if no predecessor) + queuing_rw_mutex::scoped_lock* predecessor = m.q_tail.exchange(&s, std::memory_order_acq_rel); + + if( write ) { // Acquiring for write + + if( predecessor ) { + ITT_NOTIFY(sync_prepare, s.my_mutex); + predecessor = tricky_pointer(predecessor) & ~FLAG; + __TBB_ASSERT( !predecessor->my_next, "the predecessor has another successor!"); + tricky_pointer::store(predecessor->my_next, &s, std::memory_order_release); + // We are acquiring the mutex + spin_wait_until_eq(s.my_going, 1U, std::memory_order_acquire); + } - if( my_state == STATE_WRITER ) { // Acquired for write + } else { // Acquiring for read + #if __TBB_USE_ITT_NOTIFY + bool sync_prepare_done = false; + #endif + if( predecessor ) { + unsigned char pred_state{}; + __TBB_ASSERT( !s.my_prev.load(std::memory_order_relaxed), "the predecessor is already set" ); + if( tricky_pointer(predecessor) & FLAG ) { + /* this is only possible if predecessor is an upgrading reader and it signals us to wait */ + pred_state = STATE_UPGRADE_WAITING; + predecessor = tricky_pointer(predecessor) & ~FLAG; + } else { + // Load predecessor->my_state now, because once predecessor->my_next becomes + // non-null, we must assume that *predecessor might be destroyed. + pred_state = predecessor->my_state.load(std::memory_order_relaxed); + if (pred_state == STATE_READER) { + // Notify the previous reader to unblock us. + predecessor->my_state.compare_exchange_strong(pred_state, STATE_READER_UNBLOCKNEXT, std::memory_order_relaxed); + } + if (pred_state == STATE_ACTIVEREADER) { // either we initially read it or CAS failed + // Active reader means that the predecessor already acquired the mutex and cannot notify us. + // Therefore, we need to acquire the mutex ourselves by re-reading predecessor state. + (void)predecessor->my_state.load(std::memory_order_acquire); + } + } + tricky_pointer::store(s.my_prev, predecessor, std::memory_order_relaxed); + __TBB_ASSERT( !( tricky_pointer(predecessor) & FLAG ), "use of corrupted pointer!" ); + __TBB_ASSERT( !predecessor->my_next.load(std::memory_order_relaxed), "the predecessor has another successor!"); + tricky_pointer::store(predecessor->my_next, &s, std::memory_order_release); + if( pred_state != STATE_ACTIVEREADER ) { + #if __TBB_USE_ITT_NOTIFY + sync_prepare_done = true; + ITT_NOTIFY(sync_prepare, s.my_mutex); + #endif + // We are acquiring the mutex + spin_wait_until_eq(s.my_going, 1U, std::memory_order_acquire); + } + } - // The logic below is the same as "writerUnlock", but elides - // "return" from the middle of the routine. - // In the statement below, acquire semantics of reading my_next is required - // so that following operations with fields of my_next are safe. - scoped_lock* n = __TBB_load_with_acquire(my_next); - if( !n ) { - if( this == my_mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) { - // this was the only item in the queue, and the queue is now empty. - goto done; + // The protected state must have been acquired here before it can be further released to any other reader(s): + unsigned char old_state = STATE_READER; + // When this reader is signaled by previous actor it acquires the mutex. + // We need to build happens-before relation with all other coming readers that will read our ACTIVEREADER + // without blocking on my_going. Therefore, we need to publish ACTIVEREADER with release semantics. + // On fail it is relaxed, because we will build happens-before on my_going. + s.my_state.compare_exchange_strong(old_state, STATE_ACTIVEREADER, std::memory_order_release, std::memory_order_relaxed); + if( old_state!=STATE_READER ) { +#if __TBB_USE_ITT_NOTIFY + if( !sync_prepare_done ) + ITT_NOTIFY(sync_prepare, s.my_mutex); +#endif + // Failed to become active reader -> need to unblock the next waiting reader first + __TBB_ASSERT( s.my_state.load(std::memory_order_relaxed)==STATE_READER_UNBLOCKNEXT, "unexpected state" ); + spin_wait_while_eq(s.my_next, 0U, std::memory_order_acquire); + /* my_state should be changed before unblocking the next otherwise it might finish + and another thread can get our old state and left blocked */ + s.my_state.store(STATE_ACTIVEREADER, std::memory_order_relaxed); + tricky_pointer::load(s.my_next, std::memory_order_relaxed)->my_going.store(1U, std::memory_order_release); } - spin_wait_while_eq( my_next, (scoped_lock*)NULL ); - n = __TBB_load_with_acquire(my_next); - } - __TBB_store_relaxed(n->my_going, 2); // protect next queue node from being destroyed too early - if( n->my_state==STATE_UPGRADE_WAITING ) { - // the next waiting for upgrade means this writer was upgraded before. - acquire_internal_lock(); - queuing_rw_mutex::scoped_lock* tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->my_prev), NULL); - n->my_state = STATE_UPGRADE_LOSER; - __TBB_store_with_release(n->my_going,1); - unblock_or_wait_on_internal_lock(get_flag(tmp)); - } else { - __TBB_ASSERT( my_state & (STATE_COMBINED_WAITINGREADER | STATE_WRITER), "unexpected state" ); - __TBB_ASSERT( !( uintptr_t(__TBB_load_relaxed(n->my_prev)) & FLAG ), "use of corrupted pointer!" ); - __TBB_store_relaxed(n->my_prev, (scoped_lock*)0); - __TBB_store_with_release(n->my_going,1); + __TBB_ASSERT(s.my_state.load(std::memory_order_relaxed) == STATE_ACTIVEREADER, "unlocked reader is active reader"); } - } else { // Acquired for read + ITT_NOTIFY(sync_acquired, s.my_mutex); + } - queuing_rw_mutex::scoped_lock *tmp = NULL; -retry: - // Addition to the original paper: Mark my_prev as in use - queuing_rw_mutex::scoped_lock *pred = tricky_pointer::fetch_and_add<tbb::acquire>(&my_prev, FLAG); + //! A method to acquire queuing_rw_mutex if it is free + static bool try_acquire(d1::queuing_rw_mutex& m, d1::queuing_rw_mutex::scoped_lock& s, bool write) + { + __TBB_ASSERT( !s.my_mutex, "scoped_lock is already holding a mutex"); + + if( m.q_tail.load(std::memory_order_relaxed) ) + return false; // Someone already took the lock + + // Must set all fields before the exchange, because once the + // exchange executes, *this becomes accessible to other threads. + s.my_prev.store(0U, std::memory_order_relaxed); + s.my_next.store(0U, std::memory_order_relaxed); + s.my_going.store(0U, std::memory_order_relaxed); // TODO: remove dead assignment? + s.my_state.store(d1::queuing_rw_mutex::scoped_lock::state_t(write ? STATE_WRITER : STATE_ACTIVEREADER), std::memory_order_relaxed); + s.my_internal_lock.store(RELEASED, std::memory_order_relaxed); + + // The CAS must have release semantics, because we are + // "sending" the fields initialized above to other actors. + // We need acquire semantics, because we are acquiring the mutex + d1::queuing_rw_mutex::scoped_lock* expected = nullptr; + if (!m.q_tail.compare_exchange_strong(expected, &s, std::memory_order_acq_rel)) + return false; // Someone already took the lock + s.my_mutex = &m; + ITT_NOTIFY(sync_acquired, s.my_mutex); + return true; + } - if( pred ) { - if( !(pred->try_acquire_internal_lock()) ) - { - // Failed to acquire the lock on pred. The predecessor either unlinks or upgrades. - // In the second case, it could or could not know my "in use" flag - need to check - tmp = tricky_pointer::compare_and_swap<tbb::release>(&my_prev, pred, tricky_pointer(pred) | FLAG ); - if( !(uintptr_t(tmp) & FLAG) ) { - // Wait for the predecessor to change my_prev (e.g. during unlink) - spin_wait_while_eq( my_prev, tricky_pointer(pred)|FLAG ); - // Now owner of pred is waiting for _us_ to release its lock - pred->release_internal_lock(); + //! A method to release queuing_rw_mutex lock + static void release(d1::queuing_rw_mutex::scoped_lock& s) { + __TBB_ASSERT(s.my_mutex!=nullptr, "no lock acquired"); + + ITT_NOTIFY(sync_releasing, s.my_mutex); + + if( s.my_state.load(std::memory_order_relaxed) == STATE_WRITER ) { // Acquired for write + + // The logic below is the same as "writerUnlock", but elides + // "return" from the middle of the routine. + // In the statement below, acquire semantics of reading my_next is required + // so that following operations with fields of my_next are safe. + d1::queuing_rw_mutex::scoped_lock* next = tricky_pointer::load(s.my_next, std::memory_order_acquire); + if( !next ) { + d1::queuing_rw_mutex::scoped_lock* expected = &s; + // Release mutex on success otherwise wait for successor publication + if( s.my_mutex->q_tail.compare_exchange_strong(expected, nullptr, + std::memory_order_release, std::memory_order_relaxed) ) + { + // this was the only item in the queue, and the queue is now empty. + goto done; } - // else the "in use" flag is back -> the predecessor didn't get it and will release itself; nothing to do - - tmp = NULL; - goto retry; + spin_wait_while_eq(s.my_next, 0U, std::memory_order_relaxed); + next = tricky_pointer::load(s.my_next, std::memory_order_acquire); + } + next->my_going.store(2U, std::memory_order_relaxed); // protect next queue node from being destroyed too early + // If the next is STATE_UPGRADE_WAITING, it is expected to acquire all other released readers via release + // sequence in next->my_state. In that case, we need to preserve release sequence in next->my_state + // contributed by other reader. So, there are two approaches not to break the release sequence: + // 1. Use read-modify-write (exchange) operation to store with release the UPGRADE_LOSER state; + // 2. Acquire the release sequence and store the sequence and UPGRADE_LOSER state. + // The second approach seems better on x86 because it does not involve interlocked operations. + // Therefore, we read next->my_state with acquire while it is not required for else branch to get the + // release sequence. + if( next->my_state.load(std::memory_order_acquire)==STATE_UPGRADE_WAITING ) { + // the next waiting for upgrade means this writer was upgraded before. + acquire_internal_lock(s); + // Responsibility transition, the one who reads uncorrupted my_prev will do release. + // Guarantee that above store of 2 into next->my_going happens-before resetting of next->my_prev + d1::queuing_rw_mutex::scoped_lock* tmp = tricky_pointer::exchange(next->my_prev, nullptr, std::memory_order_release); + // Pass the release sequence that we acquired with the above load of next->my_state. + next->my_state.store(STATE_UPGRADE_LOSER, std::memory_order_release); + // We are releasing the mutex + next->my_going.store(1U, std::memory_order_release); + unblock_or_wait_on_internal_lock(s, get_flag(tmp)); + } else { + // next->state cannot be STATE_UPGRADE_REQUESTED + __TBB_ASSERT( next->my_state.load(std::memory_order_relaxed) & (STATE_COMBINED_WAITINGREADER | STATE_WRITER), "unexpected state" ); + __TBB_ASSERT( !( next->my_prev.load(std::memory_order_relaxed) & FLAG ), "use of corrupted pointer!" ); + // Guarantee that above store of 2 into next->my_going happens-before resetting of next->my_prev + tricky_pointer::store(next->my_prev, nullptr, std::memory_order_release); + // We are releasing the mutex + next->my_going.store(1U, std::memory_order_release); } - __TBB_ASSERT(pred && pred->my_internal_lock==ACQUIRED, "predecessor's lock is not acquired"); - __TBB_store_relaxed(my_prev, pred); - acquire_internal_lock(); - __TBB_store_with_release(pred->my_next,reinterpret_cast<scoped_lock *>(NULL)); + } else { // Acquired for read + // The basic idea it to build happens-before relation with left and right readers via prev and next. In addition, + // the first reader should acquire the left (prev) signal and propagate to right (next). To simplify, we always + // build happens-before relation between left and right (left is happened before right). + queuing_rw_mutex::scoped_lock *tmp = nullptr; + retry: + // Addition to the original paper: Mark my_prev as in use + queuing_rw_mutex::scoped_lock *predecessor = tricky_pointer::fetch_add(s.my_prev, FLAG, std::memory_order_acquire); + + if( predecessor ) { + if( !(try_acquire_internal_lock(*predecessor)) ) + { + // Failed to acquire the lock on predecessor. The predecessor either unlinks or upgrades. + // In the second case, it could or could not know my "in use" flag - need to check + // Responsibility transition, the one who reads uncorrupted my_prev will do release. + tmp = tricky_pointer::compare_exchange_strong(s.my_prev, tricky_pointer(predecessor) | FLAG, predecessor, std::memory_order_acquire); + if( !(tricky_pointer(tmp) & FLAG) ) { + __TBB_ASSERT(tricky_pointer::load(s.my_prev, std::memory_order_relaxed) != (tricky_pointer(predecessor) | FLAG), nullptr); + // Now owner of predecessor is waiting for _us_ to release its lock + release_internal_lock(*predecessor); + } + // else the "in use" flag is back -> the predecessor didn't get it and will release itself; nothing to do + + tmp = nullptr; + goto retry; + } + __TBB_ASSERT(predecessor && predecessor->my_internal_lock.load(std::memory_order_relaxed)==ACQUIRED, "predecessor's lock is not acquired"); + tricky_pointer::store(s.my_prev, predecessor, std::memory_order_relaxed); + acquire_internal_lock(s); - if( !__TBB_load_relaxed(my_next) && this != my_mutex->q_tail.compare_and_swap<tbb::release>(pred, this) ) { - spin_wait_while_eq( my_next, (void*)NULL ); - } - __TBB_ASSERT( !get_flag(__TBB_load_relaxed(my_next)), "use of corrupted pointer" ); - - // ensure acquire semantics of reading 'my_next' - if( scoped_lock *const l_next = __TBB_load_with_acquire(my_next) ) { // I->next != nil, TODO: rename to n after clearing up and adapting the n in the comment two lines below - // Equivalent to I->next->prev = I->prev but protected against (prev[n]&FLAG)!=0 - tmp = tricky_pointer::fetch_and_store<tbb::release>(&(l_next->my_prev), pred); - // I->prev->next = I->next; - __TBB_ASSERT(__TBB_load_relaxed(my_prev)==pred, NULL); - __TBB_store_with_release(pred->my_next, my_next); - } - // Safe to release in the order opposite to acquiring which makes the code simpler - pred->release_internal_lock(); - - } else { // No predecessor when we looked - acquire_internal_lock(); // "exclusiveLock(&I->EL)" - scoped_lock* n = __TBB_load_with_acquire(my_next); - if( !n ) { - if( this != my_mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) { - spin_wait_while_eq( my_next, (scoped_lock*)NULL ); - n = __TBB_load_relaxed(my_next); - } else { - goto unlock_self; + tricky_pointer::store(predecessor->my_next, nullptr, std::memory_order_release); + + d1::queuing_rw_mutex::scoped_lock* expected = &s; + if( !tricky_pointer::load(s.my_next, std::memory_order_acquire) && !s.my_mutex->q_tail.compare_exchange_strong(expected, predecessor, std::memory_order_release) ) { + spin_wait_while_eq( s.my_next, 0U, std::memory_order_acquire ); + } + __TBB_ASSERT( !(s.my_next.load(std::memory_order_relaxed) & FLAG), "use of corrupted pointer" ); + + // my_next is acquired either with load or spin_wait. + if(d1::queuing_rw_mutex::scoped_lock *const l_next = tricky_pointer::load(s.my_next, std::memory_order_relaxed) ) { // I->next != nil, TODO: rename to next after clearing up and adapting the n in the comment two lines below + // Equivalent to I->next->prev = I->prev but protected against (prev[n]&FLAG)!=0 + tmp = tricky_pointer::exchange(l_next->my_prev, predecessor, std::memory_order_release); + // I->prev->next = I->next; + __TBB_ASSERT(tricky_pointer::load(s.my_prev, std::memory_order_relaxed)==predecessor, nullptr); + predecessor->my_next.store(s.my_next.load(std::memory_order_relaxed), std::memory_order_release); } + // Safe to release in the order opposite to acquiring which makes the code simpler + release_internal_lock(*predecessor); + + } else { // No predecessor when we looked + acquire_internal_lock(s); // "exclusiveLock(&I->EL)" + d1::queuing_rw_mutex::scoped_lock* next = tricky_pointer::load(s.my_next, std::memory_order_acquire); + if( !next ) { + d1::queuing_rw_mutex::scoped_lock* expected = &s; + // Release mutex on success otherwise wait for successor publication + if( !s.my_mutex->q_tail.compare_exchange_strong(expected, nullptr, + std::memory_order_release, std::memory_order_relaxed) ) + { + spin_wait_while_eq( s.my_next, 0U, std::memory_order_relaxed ); + next = tricky_pointer::load(s.my_next, std::memory_order_acquire); + } else { + goto unlock_self; + } + } + next->my_going.store(2U, std::memory_order_relaxed); + // Responsibility transition, the one who reads uncorrupted my_prev will do release. + tmp = tricky_pointer::exchange(next->my_prev, nullptr, std::memory_order_release); + next->my_going.store(1U, std::memory_order_release); } - __TBB_store_relaxed(n->my_going, 2); // protect next queue node from being destroyed too early - tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->my_prev), NULL); - __TBB_store_with_release(n->my_going,1); + unlock_self: + unblock_or_wait_on_internal_lock(s, get_flag(tmp)); } -unlock_self: - unblock_or_wait_on_internal_lock(get_flag(tmp)); - } -done: - spin_wait_while_eq( my_going, 2 ); - - initialize(); -} + done: + // Lifetime synchronization, no need to build happens-before relation + spin_wait_while_eq( s.my_going, 2U, std::memory_order_relaxed ); -bool queuing_rw_mutex::scoped_lock::downgrade_to_reader() -{ - __TBB_ASSERT( my_state==STATE_WRITER, "no sense to downgrade a reader" ); - - ITT_NOTIFY(sync_releasing, my_mutex); + s.initialize(); + } - if( ! __TBB_load_with_acquire(my_next) ) { - my_state = STATE_READER; - if( this==my_mutex->q_tail ) { - unsigned short old_state = my_state.compare_and_swap<tbb::release>(STATE_ACTIVEREADER, STATE_READER); - if( old_state==STATE_READER ) { - // Downgrade completed - return true; + static bool downgrade_to_reader(d1::queuing_rw_mutex::scoped_lock& s) { + if ( s.my_state.load(std::memory_order_relaxed) == STATE_ACTIVEREADER ) return true; // Already a reader + + ITT_NOTIFY(sync_releasing, s.my_mutex); + d1::queuing_rw_mutex::scoped_lock* next = tricky_pointer::load(s.my_next, std::memory_order_acquire); + if( !next ) { + s.my_state.store(STATE_READER, std::memory_order_seq_cst); + // the following load of q_tail must not be reordered with setting STATE_READER above + if( &s == s.my_mutex->q_tail.load(std::memory_order_seq_cst) ) { + unsigned char old_state = STATE_READER; + // When this reader is signaled by previous actor it acquires the mutex. + // We need to build happens-before relation with all other coming readers that will read our ACTIVEREADER + // without blocking on my_going. Therefore, we need to publish ACTIVEREADER with release semantics. + // On fail it is relaxed, because we will build happens-before on my_going. + s.my_state.compare_exchange_strong(old_state, STATE_ACTIVEREADER, std::memory_order_release, std::memory_order_relaxed); + if( old_state==STATE_READER ) + return true; // Downgrade completed } + /* wait for the next to register */ + spin_wait_while_eq(s.my_next, 0U, std::memory_order_relaxed); + next = tricky_pointer::load(s.my_next, std::memory_order_acquire); } - /* wait for the next to register */ - spin_wait_while_eq( my_next, (void*)NULL ); + + __TBB_ASSERT( next, "still no successor at this point!" ); + if( next->my_state.load(std::memory_order_relaxed) & STATE_COMBINED_WAITINGREADER ) + next->my_going.store(1U, std::memory_order_release); + // If the next is STATE_UPGRADE_WAITING, it is expected to acquire all other released readers via release + // sequence in next->my_state. In that case, we need to preserve release sequence in next->my_state + // contributed by other reader. So, there are two approaches not to break the release sequence: + // 1. Use read-modify-write (exchange) operation to store with release the UPGRADE_LOSER state; + // 2. Acquire the release sequence and store the sequence and UPGRADE_LOSER state. + // The second approach seems better on x86 because it does not involve interlocked operations. + // Therefore, we read next->my_state with acquire while it is not required for else branch to get the + // release sequence. + else if( next->my_state.load(std::memory_order_acquire)==STATE_UPGRADE_WAITING ) + // the next waiting for upgrade means this writer was upgraded before. + // To safe release sequence on next->my_state read it with acquire + next->my_state.store(STATE_UPGRADE_LOSER, std::memory_order_release); + s.my_state.store(STATE_ACTIVEREADER, std::memory_order_release); + return true; } - scoped_lock *const n = __TBB_load_relaxed(my_next); - __TBB_ASSERT( n, "still no successor at this point!" ); - if( n->my_state & STATE_COMBINED_WAITINGREADER ) - __TBB_store_with_release(n->my_going,1); - else if( n->my_state==STATE_UPGRADE_WAITING ) - // the next waiting for upgrade means this writer was upgraded before. - n->my_state = STATE_UPGRADE_LOSER; - my_state = STATE_ACTIVEREADER; - return true; -} -bool queuing_rw_mutex::scoped_lock::upgrade_to_writer() -{ - __TBB_ASSERT( my_state==STATE_ACTIVEREADER, "only active reader can be upgraded" ); - - queuing_rw_mutex::scoped_lock * tmp; - queuing_rw_mutex::scoped_lock * me = this; - - ITT_NOTIFY(sync_releasing, my_mutex); - my_state = STATE_UPGRADE_REQUESTED; -requested: - __TBB_ASSERT( !(uintptr_t(__TBB_load_relaxed(my_next)) & FLAG), "use of corrupted pointer!" ); - acquire_internal_lock(); - if( this != my_mutex->q_tail.compare_and_swap<tbb::release>(tricky_pointer(me)|FLAG, this) ) { - spin_wait_while_eq( my_next, (void*)NULL ); - queuing_rw_mutex::scoped_lock * n; - n = tricky_pointer::fetch_and_add<tbb::acquire>(&my_next, FLAG); - unsigned short n_state = n->my_state; - /* the next reader can be blocked by our state. the best thing to do is to unblock it */ - if( n_state & STATE_COMBINED_WAITINGREADER ) - __TBB_store_with_release(n->my_going,1); - tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->my_prev), this); - unblock_or_wait_on_internal_lock(get_flag(tmp)); - if( n_state & (STATE_COMBINED_READER | STATE_UPGRADE_REQUESTED) ) { - // save n|FLAG for simplicity of following comparisons - tmp = tricky_pointer(n)|FLAG; - for( atomic_backoff b; __TBB_load_relaxed(my_next)==tmp; b.pause() ) { - if( my_state & STATE_COMBINED_UPGRADING ) { - if( __TBB_load_with_acquire(my_next)==tmp ) - __TBB_store_relaxed(my_next, n); - goto waiting; + static bool upgrade_to_writer(d1::queuing_rw_mutex::scoped_lock& s) { + if (s.my_state.load(std::memory_order_relaxed) == STATE_WRITER) { + // Already a writer + return true; + } + + __TBB_ASSERT(s.my_state.load(std::memory_order_relaxed) == STATE_ACTIVEREADER, "only active reader can be updated"); + + queuing_rw_mutex::scoped_lock* tmp{}; + queuing_rw_mutex::scoped_lock* me = &s; + + ITT_NOTIFY(sync_releasing, s.my_mutex); + // Publish ourselves into my_state that other UPGRADE_WAITING actors can acquire our state. + s.my_state.store(STATE_UPGRADE_REQUESTED, std::memory_order_release); + requested: + __TBB_ASSERT( !(s.my_next.load(std::memory_order_relaxed) & FLAG), "use of corrupted pointer!" ); + acquire_internal_lock(s); + d1::queuing_rw_mutex::scoped_lock* expected = &s; + if( !s.my_mutex->q_tail.compare_exchange_strong(expected, tricky_pointer(me)|FLAG, std::memory_order_acq_rel) ) { + spin_wait_while_eq( s.my_next, 0U, std::memory_order_relaxed ); + queuing_rw_mutex::scoped_lock * next; + next = tricky_pointer::fetch_add(s.my_next, FLAG, std::memory_order_acquire); + // While we were READER the next READER might reach STATE_UPGRADE_WAITING state. + // Therefore, it did not build happens before relation with us and we need to acquire the + // next->my_state to build the happens before relation ourselves + unsigned short n_state = next->my_state.load(std::memory_order_acquire); + /* the next reader can be blocked by our state. the best thing to do is to unblock it */ + if( n_state & STATE_COMBINED_WAITINGREADER ) + next->my_going.store(1U, std::memory_order_release); + // Responsibility transition, the one who reads uncorrupted my_prev will do release. + tmp = tricky_pointer::exchange(next->my_prev, &s, std::memory_order_release); + unblock_or_wait_on_internal_lock(s, get_flag(tmp)); + if( n_state & (STATE_COMBINED_READER | STATE_UPGRADE_REQUESTED) ) { + // save next|FLAG for simplicity of following comparisons + tmp = tricky_pointer(next)|FLAG; + for( atomic_backoff b; tricky_pointer::load(s.my_next, std::memory_order_relaxed)==tmp; b.pause() ) { + if( s.my_state.load(std::memory_order_acquire) & STATE_COMBINED_UPGRADING ) { + if( tricky_pointer::load(s.my_next, std::memory_order_acquire)==tmp ) + tricky_pointer::store(s.my_next, next, std::memory_order_relaxed); + goto waiting; + } } + __TBB_ASSERT(tricky_pointer::load(s.my_next, std::memory_order_relaxed) != (tricky_pointer(next)|FLAG), nullptr); + goto requested; + } else { + __TBB_ASSERT( n_state & (STATE_WRITER | STATE_UPGRADE_WAITING), "unexpected state"); + __TBB_ASSERT( (tricky_pointer(next)|FLAG) == tricky_pointer::load(s.my_next, std::memory_order_relaxed), nullptr); + tricky_pointer::store(s.my_next, next, std::memory_order_relaxed); } - __TBB_ASSERT(__TBB_load_relaxed(my_next) != (tricky_pointer(n)|FLAG), NULL); - goto requested; } else { - __TBB_ASSERT( n_state & (STATE_WRITER | STATE_UPGRADE_WAITING), "unexpected state"); - __TBB_ASSERT( (tricky_pointer(n)|FLAG) == __TBB_load_relaxed(my_next), NULL); - __TBB_store_relaxed(my_next, n); + /* We are in the tail; whoever comes next is blocked by q_tail&FLAG */ + release_internal_lock(s); + } // if( this != my_mutex->q_tail... ) + { + unsigned char old_state = STATE_UPGRADE_REQUESTED; + // If we reach STATE_UPGRADE_WAITING state we do not build happens-before relation with READER on + // left. We delegate this responsibility to READER on left when it try upgrading. Therefore, we are releasing + // on success. + // Otherwise, on fail, we already acquired the next->my_state. + s.my_state.compare_exchange_strong(old_state, STATE_UPGRADE_WAITING, std::memory_order_release, std::memory_order_relaxed); } - } else { - /* We are in the tail; whoever comes next is blocked by q_tail&FLAG */ - release_internal_lock(); - } // if( this != my_mutex->q_tail... ) - my_state.compare_and_swap<tbb::acquire>(STATE_UPGRADE_WAITING, STATE_UPGRADE_REQUESTED); - -waiting: - __TBB_ASSERT( !( intptr_t(__TBB_load_relaxed(my_next)) & FLAG ), "use of corrupted pointer!" ); - __TBB_ASSERT( my_state & STATE_COMBINED_UPGRADING, "wrong state at upgrade waiting_retry" ); - __TBB_ASSERT( me==this, NULL ); - ITT_NOTIFY(sync_prepare, my_mutex); - /* if no one was blocked by the "corrupted" q_tail, turn it back */ - my_mutex->q_tail.compare_and_swap<tbb::release>( this, tricky_pointer(me)|FLAG ); - queuing_rw_mutex::scoped_lock * pred; - pred = tricky_pointer::fetch_and_add<tbb::acquire>(&my_prev, FLAG); - if( pred ) { - bool success = pred->try_acquire_internal_lock(); - pred->my_state.compare_and_swap<tbb::release>(STATE_UPGRADE_WAITING, STATE_UPGRADE_REQUESTED); - if( !success ) { - tmp = tricky_pointer::compare_and_swap<tbb::release>(&my_prev, pred, tricky_pointer(pred)|FLAG ); - if( uintptr_t(tmp) & FLAG ) { - spin_wait_while_eq(my_prev, pred); - pred = __TBB_load_relaxed(my_prev); + waiting: + __TBB_ASSERT( !( s.my_next.load(std::memory_order_relaxed) & FLAG ), "use of corrupted pointer!" ); + __TBB_ASSERT( s.my_state & STATE_COMBINED_UPGRADING, "wrong state at upgrade waiting_retry" ); + __TBB_ASSERT( me==&s, nullptr ); + ITT_NOTIFY(sync_prepare, s.my_mutex); + /* if no one was blocked by the "corrupted" q_tail, turn it back */ + expected = tricky_pointer(me)|FLAG; + s.my_mutex->q_tail.compare_exchange_strong(expected, &s, std::memory_order_release); + queuing_rw_mutex::scoped_lock * predecessor; + // Mark my_prev as 'in use' to prevent predecessor from releasing + predecessor = tricky_pointer::fetch_add(s.my_prev, FLAG, std::memory_order_acquire); + if( predecessor ) { + bool success = try_acquire_internal_lock(*predecessor); + { + // While the predecessor pointer (my_prev) is in use (FLAG is set), we can safely update the node`s state. + // Corrupted pointer transitions responsibility to release the predecessor`s node on us. + unsigned char old_state = STATE_UPGRADE_REQUESTED; + // Try to build happens before with the upgrading READER on left. If fail, the predecessor state is not + // important for us because it will acquire our state. + predecessor->my_state.compare_exchange_strong(old_state, STATE_UPGRADE_WAITING, std::memory_order_release, + std::memory_order_relaxed); + } + if( !success ) { + // Responsibility transition, the one who reads uncorrupted my_prev will do release. + tmp = tricky_pointer::compare_exchange_strong(s.my_prev, tricky_pointer(predecessor)|FLAG, predecessor, std::memory_order_acquire); + if( tricky_pointer(tmp) & FLAG ) { + tricky_pointer::spin_wait_while_eq(s.my_prev, predecessor); + predecessor = tricky_pointer::load(s.my_prev, std::memory_order_relaxed); + } else { + // TODO: spin_wait condition seems never reachable + tricky_pointer::spin_wait_while_eq(s.my_prev, tricky_pointer(predecessor)|FLAG); + release_internal_lock(*predecessor); + } } else { - spin_wait_while_eq( my_prev, tricky_pointer(pred)|FLAG ); - pred->release_internal_lock(); + tricky_pointer::store(s.my_prev, predecessor, std::memory_order_relaxed); + release_internal_lock(*predecessor); + tricky_pointer::spin_wait_while_eq(s.my_prev, predecessor); + predecessor = tricky_pointer::load(s.my_prev, std::memory_order_relaxed); } + if( predecessor ) + goto waiting; } else { - __TBB_store_relaxed(my_prev, pred); - pred->release_internal_lock(); - spin_wait_while_eq(my_prev, pred); - pred = __TBB_load_relaxed(my_prev); + tricky_pointer::store(s.my_prev, nullptr, std::memory_order_relaxed); } - if( pred ) - goto waiting; - } else { - // restore the corrupted my_prev field for possible further use (e.g. if downgrade back to reader) - __TBB_store_relaxed(my_prev, pred); + __TBB_ASSERT( !predecessor && !s.my_prev, nullptr ); + + // additional lifetime issue prevention checks + // wait for the successor to finish working with my fields + wait_for_release_of_internal_lock(s); + // now wait for the predecessor to finish working with my fields + spin_wait_while_eq( s.my_going, 2U ); + + bool result = ( s.my_state != STATE_UPGRADE_LOSER ); + s.my_state.store(STATE_WRITER, std::memory_order_relaxed); + s.my_going.store(1U, std::memory_order_relaxed); + + ITT_NOTIFY(sync_acquired, s.my_mutex); + return result; + } + + static bool is_writer(const d1::queuing_rw_mutex::scoped_lock& m) { + return m.my_state.load(std::memory_order_relaxed) == STATE_WRITER; } - __TBB_ASSERT( !pred && !__TBB_load_relaxed(my_prev), NULL ); - // additional lifetime issue prevention checks - // wait for the successor to finish working with my fields - wait_for_release_of_internal_lock(); - // now wait for the predecessor to finish working with my fields - spin_wait_while_eq( my_going, 2 ); + static void construct(d1::queuing_rw_mutex& m) { + suppress_unused_warning(m); + ITT_SYNC_CREATE(&m, _T("tbb::queuing_rw_mutex"), _T("")); + } +}; - // Acquire critical section indirectly from previous owner or directly from predecessor (TODO: not clear). - __TBB_control_consistency_helper(); // on either "my_mutex->q_tail" or "my_going" (TODO: not clear) +void __TBB_EXPORTED_FUNC acquire(d1::queuing_rw_mutex& m, d1::queuing_rw_mutex::scoped_lock& s, bool write) { + queuing_rw_mutex_impl::acquire(m, s, write); +} - bool result = ( my_state != STATE_UPGRADE_LOSER ); - my_state = STATE_WRITER; - __TBB_store_relaxed(my_going, 1); +bool __TBB_EXPORTED_FUNC try_acquire(d1::queuing_rw_mutex& m, d1::queuing_rw_mutex::scoped_lock& s, bool write) { + return queuing_rw_mutex_impl::try_acquire(m, s, write); +} + +void __TBB_EXPORTED_FUNC release(d1::queuing_rw_mutex::scoped_lock& s) { + queuing_rw_mutex_impl::release(s); +} + +bool __TBB_EXPORTED_FUNC upgrade_to_writer(d1::queuing_rw_mutex::scoped_lock& s) { + return queuing_rw_mutex_impl::upgrade_to_writer(s); +} + +bool __TBB_EXPORTED_FUNC is_writer(const d1::queuing_rw_mutex::scoped_lock& s) { + return queuing_rw_mutex_impl::is_writer(s); +} - ITT_NOTIFY(sync_acquired, my_mutex); - return result; +bool __TBB_EXPORTED_FUNC downgrade_to_reader(d1::queuing_rw_mutex::scoped_lock& s) { + return queuing_rw_mutex_impl::downgrade_to_reader(s); } -void queuing_rw_mutex::internal_construct() { - ITT_SYNC_CREATE(this, _T("tbb::queuing_rw_mutex"), _T("")); +void __TBB_EXPORTED_FUNC construct(d1::queuing_rw_mutex& m) { + queuing_rw_mutex_impl::construct(m); } +} // namespace r1 +} // namespace detail } // namespace tbb diff --git a/src/tbb/src/tbb/reader_writer_lock.cpp b/src/tbb/src/tbb/reader_writer_lock.cpp deleted file mode 100644 index 595ac9e97..000000000 --- a/src/tbb/src/tbb/reader_writer_lock.cpp +++ /dev/null @@ -1,347 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/reader_writer_lock.h" -#include "tbb/tbb_machine.h" -#include "tbb/tbb_exception.h" -#include "itt_notify.h" - -#if defined(_MSC_VER) && defined(_Wp64) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (disable: 4244) -#endif - -namespace tbb { -namespace interface5 { - -const uintptr_t WFLAG1 = 0x1; // writer interested or active -const uintptr_t WFLAG2 = 0x2; // writers interested, no entering readers -const uintptr_t RFLAG = 0x4; // reader interested but not active -const uintptr_t RC_INCR = 0x8; // to adjust reader count - - -// Perform an atomic bitwise-OR on the operand, and return its previous value. -inline uintptr_t fetch_and_or(atomic<uintptr_t>& operand, uintptr_t value) { - for (tbb::internal::atomic_backoff b;;b.pause()) { - uintptr_t old = operand; - uintptr_t result = operand.compare_and_swap(old|value, old); - if (result==old) return result; - } -} - -// Perform an atomic bitwise-AND on the operand, and return its previous value. -inline uintptr_t fetch_and_and(atomic<uintptr_t>& operand, uintptr_t value) { - for (tbb::internal::atomic_backoff b;;b.pause()) { - uintptr_t old = operand; - uintptr_t result = operand.compare_and_swap(old&value, old); - if (result==old) return result; - } -} - -//! Spin WHILE the value at the location is greater than or equal to a given value -/** T and U should be comparable types. */ -template<typename T, typename U> -void spin_wait_while_geq( const volatile T& location, U value ) { - tbb::internal::atomic_backoff backoff; - while( location>=value ) backoff.pause(); -} - -//! Spin UNTIL (location & value) is true. -/** T and U should be comparable types. */ -template<typename T, typename U> -void spin_wait_until_and( const volatile T& location, U value ) { - tbb::internal::atomic_backoff backoff; - while( !(location & value) ) backoff.pause(); -} - - -void reader_writer_lock::internal_construct() { - reader_head = NULL; - writer_head = NULL; - writer_tail = NULL; - rdr_count_and_flags = 0; - my_current_writer = tbb_thread::id(); -#if TBB_USE_THREADING_TOOLS - ITT_SYNC_CREATE(this, _T("tbb::reader_writer_lock"), _T("")); -#endif /* TBB_USE_THREADING_TOOLS */ -} - -void reader_writer_lock::internal_destroy() { - __TBB_ASSERT(rdr_count_and_flags==0, "reader_writer_lock destroyed with pending readers/writers."); - __TBB_ASSERT(reader_head==NULL, "reader_writer_lock destroyed with pending readers."); - __TBB_ASSERT(writer_tail==NULL, "reader_writer_lock destroyed with pending writers."); - __TBB_ASSERT(writer_head==NULL, "reader_writer_lock destroyed with pending/active writers."); -} - -// Acquires the reader_writer_lock for write. If the lock is currently held in write -// mode by another context, the writer will block by spinning on a local variable. -// Throws exception improper_lock if the context tries to acquire a -// reader_writer_lock that it already has write ownership of. -void reader_writer_lock::lock() { - if (is_current_writer()) { // recursive lock attempt - // we don't support recursive writer locks; throw exception - tbb::internal::throw_exception(tbb::internal::eid_improper_lock); - } - else { - scoped_lock *a_writer_lock = new scoped_lock(); - (void) start_write(a_writer_lock); - } -} - -// Tries to acquire the reader_writer_lock for write. This function does not block. -// Return Value: True or false, depending on whether the lock is acquired or not. -// If the lock is already held by this acquiring context, try_lock() returns false. -bool reader_writer_lock::try_lock() { - if (is_current_writer()) { // recursive lock attempt - return false; - } - else { - scoped_lock *a_writer_lock = new scoped_lock(); - a_writer_lock->status = waiting_nonblocking; - return start_write(a_writer_lock); - } -} - -bool reader_writer_lock::start_write(scoped_lock *I) { - tbb_thread::id id = this_tbb_thread::get_id(); - scoped_lock *pred = NULL; - if (I->status == waiting_nonblocking) { - if ((pred = writer_tail.compare_and_swap(I, NULL)) != NULL) { - delete I; - return false; - } - } - else { - ITT_NOTIFY(sync_prepare, this); - pred = writer_tail.fetch_and_store(I); - } - if (pred) - pred->next = I; - else { - set_next_writer(I); - if (I->status == waiting_nonblocking) { - if (I->next) { // potentially more writers - set_next_writer(I->next); - } - else { // no more writers - writer_head.fetch_and_store(NULL); - if (I != writer_tail.compare_and_swap(NULL, I)) { // an incoming writer is in the process of being added - spin_wait_while_eq(I->next, (scoped_lock *)NULL); // wait for new writer to be added - __TBB_ASSERT(I->next, "There should be a node following the last writer."); - set_next_writer(I->next); - } - } - delete I; - return false; - } - } - spin_wait_while_eq(I->status, waiting); - ITT_NOTIFY(sync_acquired, this); - my_current_writer = id; - return true; -} - -void reader_writer_lock::set_next_writer(scoped_lock *W) { - writer_head = W; - if (W->status == waiting_nonblocking) { - if (rdr_count_and_flags.compare_and_swap(WFLAG1+WFLAG2, 0) == 0) { - W->status = active; - } - } - else { - if (fetch_and_or(rdr_count_and_flags, WFLAG1) & RFLAG) { // reader present - spin_wait_until_and(rdr_count_and_flags, WFLAG2); // block until readers set WFLAG2 - } - else { // no reader in timing window - __TBB_AtomicOR(&rdr_count_and_flags, WFLAG2); - } - spin_wait_while_geq(rdr_count_and_flags, RC_INCR); // block until readers finish - W->status = active; - } -} - -// Acquires the reader_writer_lock for read. If the lock is currently held by a writer, -// this reader will block and wait until the writers are done. -// Throws exception improper_lock when the context tries to acquire a reader_writer_lock -// that it already has write ownership of. -void reader_writer_lock::lock_read() { - if (is_current_writer()) { // recursive lock attempt - // we don't support writer->reader downgrade; throw exception - tbb::internal::throw_exception(tbb::internal::eid_improper_lock); - } - else { - scoped_lock_read a_reader_lock; - start_read(&a_reader_lock); - } -} - -// Tries to acquire the reader_writer_lock for read. This function does not block. -// Return Value: True or false, depending on whether the lock is acquired or not. -bool reader_writer_lock::try_lock_read() { - if (is_current_writer()) { // recursive lock attempt - return false; - } - else { - if (rdr_count_and_flags.fetch_and_add(RC_INCR) & (WFLAG1+WFLAG2)) { // writers present - rdr_count_and_flags -= RC_INCR; - return false; - } - else { // no writers - ITT_NOTIFY(sync_acquired, this); - return true; - } - } -} - -void reader_writer_lock::start_read(scoped_lock_read *I) { - ITT_NOTIFY(sync_prepare, this); - I->next = reader_head.fetch_and_store(I); - if (!I->next) { // first arriving reader in my group; set RFLAG, test writer flags - // unblock and/or update statuses of non-blocking readers - if (!(fetch_and_or(rdr_count_and_flags, RFLAG) & (WFLAG1+WFLAG2))) { // no writers - unblock_readers(); - } - } - __TBB_ASSERT(I->status == waiting || I->status == active, "Lock requests should be waiting or active before blocking."); - spin_wait_while_eq(I->status, waiting); // block - if (I->next) { - __TBB_ASSERT(I->next->status == waiting, NULL); - rdr_count_and_flags += RC_INCR; - I->next->status = active; // wake successor - } - ITT_NOTIFY(sync_acquired, this); -} - -void reader_writer_lock::unblock_readers() { - // clear rdr interest flag, increment rdr count - __TBB_ASSERT(rdr_count_and_flags&RFLAG, NULL); - rdr_count_and_flags += RC_INCR-RFLAG; - __TBB_ASSERT(rdr_count_and_flags >= RC_INCR, NULL); - // indicate clear of window - if (rdr_count_and_flags & WFLAG1 && !(rdr_count_and_flags & WFLAG2)) { - __TBB_AtomicOR(&rdr_count_and_flags, WFLAG2); - } - // unblock waiting readers - scoped_lock_read *head = reader_head.fetch_and_store(NULL); - __TBB_ASSERT(head, NULL); - __TBB_ASSERT(head->status == waiting, NULL); - head->status = active; -} - -// Releases the reader_writer_lock -void reader_writer_lock::unlock() { - if( my_current_writer!=tbb_thread::id() ) { - // A writer owns the lock - __TBB_ASSERT(is_current_writer(), "caller of reader_writer_lock::unlock() does not own the lock."); - __TBB_ASSERT(writer_head, NULL); - __TBB_ASSERT(writer_head->status==active, NULL); - scoped_lock *a_writer_lock = writer_head; - end_write(a_writer_lock); - __TBB_ASSERT(a_writer_lock != writer_head, "Internal error: About to turn writer_head into dangling reference."); - delete a_writer_lock; - } else { - end_read(); - } -} - -void reader_writer_lock::end_write(scoped_lock *I) { - __TBB_ASSERT(I==writer_head, "Internal error: can't unlock a thread that is not holding the lock."); - my_current_writer = tbb_thread::id(); - ITT_NOTIFY(sync_releasing, this); - if (I->next) { // potentially more writers - writer_head = I->next; - writer_head->status = active; - } - else { // No more writers; clear writer flag, test reader interest flag - __TBB_ASSERT(writer_head, NULL); - if (fetch_and_and(rdr_count_and_flags, ~(WFLAG1+WFLAG2)) & RFLAG) { - unblock_readers(); - } - writer_head.fetch_and_store(NULL); - if (I != writer_tail.compare_and_swap(NULL, I)) { // an incoming writer is in the process of being added - spin_wait_while_eq(I->next, (scoped_lock *)NULL); // wait for new writer to be added - __TBB_ASSERT(I->next, "There should be a node following the last writer."); - set_next_writer(I->next); - } - } -} - -void reader_writer_lock::end_read() { - ITT_NOTIFY(sync_releasing, this); - __TBB_ASSERT(rdr_count_and_flags >= RC_INCR, "unlock() called but no readers hold the lock."); - rdr_count_and_flags -= RC_INCR; -} - -inline bool reader_writer_lock::is_current_writer() { - return my_current_writer==this_tbb_thread::get_id(); -} - -// Construct with a blocking attempt to acquire a write lock on the passed reader_writer_lock -void reader_writer_lock::scoped_lock::internal_construct (reader_writer_lock& lock) { - mutex = &lock; - next = NULL; - status = waiting; - if (mutex->is_current_writer()) { // recursive lock attempt - // we don't support recursive writer locks; throw exception - tbb::internal::throw_exception(tbb::internal::eid_improper_lock); - } - else { // this thread holds no locks - (void) mutex->start_write(this); - } -} - -inline reader_writer_lock::scoped_lock::scoped_lock() : mutex(NULL), next(NULL) { - status = waiting; -} - -// Construct with a blocking attempt to acquire a write lock on the passed reader_writer_lock -void reader_writer_lock::scoped_lock_read::internal_construct (reader_writer_lock& lock) { - mutex = &lock; - next = NULL; - status = waiting; - if (mutex->is_current_writer()) { // recursive lock attempt - // we don't support writer->reader downgrade; throw exception - tbb::internal::throw_exception(tbb::internal::eid_improper_lock); - } - else { // this thread holds no locks - mutex->start_read(this); - } -} - -inline reader_writer_lock::scoped_lock_read::scoped_lock_read() : mutex(NULL), next(NULL) { - status = waiting; -} - -void reader_writer_lock::scoped_lock::internal_destroy() { - if (mutex) { - __TBB_ASSERT(mutex->is_current_writer(), "~scoped_lock() destroyed by thread different than thread that holds lock."); - mutex->end_write(this); - } - status = invalid; -} - -void reader_writer_lock::scoped_lock_read::internal_destroy() { - if (mutex) - mutex->end_read(); - status = invalid; -} - -} // namespace interface5 -} // namespace tbb diff --git a/src/tbb/src/tbb/recursive_mutex.cpp b/src/tbb/src/tbb/recursive_mutex.cpp deleted file mode 100644 index 71b2e9634..000000000 --- a/src/tbb/src/tbb/recursive_mutex.cpp +++ /dev/null @@ -1,136 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/recursive_mutex.h" -#include "itt_notify.h" - -namespace tbb { - -void recursive_mutex::scoped_lock::internal_acquire( recursive_mutex& m ) { -#if _WIN32||_WIN64 - switch( m.state ) { - case INITIALIZED: - // since we cannot look into the internal of the CriticalSection object - // we won't know how many times the lock has been acquired, and thus - // we won't know when we may safely set the state back to INITIALIZED - // if we change the state to HELD as in mutex.cpp. thus, we won't change - // the state for recursive_mutex - EnterCriticalSection( &m.impl ); - break; - case DESTROYED: - __TBB_ASSERT(false,"recursive_mutex::scoped_lock: mutex already destroyed"); - break; - default: - __TBB_ASSERT(false,"recursive_mutex::scoped_lock: illegal mutex state"); - break; - } -#else - int error_code = pthread_mutex_lock(&m.impl); - if( error_code ) - tbb::internal::handle_perror(error_code,"recursive_mutex::scoped_lock: pthread_mutex_lock failed"); -#endif /* _WIN32||_WIN64 */ - my_mutex = &m; -} - -void recursive_mutex::scoped_lock::internal_release() { - __TBB_ASSERT( my_mutex, "recursive_mutex::scoped_lock: not holding a mutex" ); -#if _WIN32||_WIN64 - switch( my_mutex->state ) { - case INITIALIZED: - LeaveCriticalSection( &my_mutex->impl ); - break; - case DESTROYED: - __TBB_ASSERT(false,"recursive_mutex::scoped_lock: mutex already destroyed"); - break; - default: - __TBB_ASSERT(false,"recursive_mutex::scoped_lock: illegal mutex state"); - break; - } -#else - int error_code = pthread_mutex_unlock(&my_mutex->impl); - __TBB_ASSERT_EX(!error_code, "recursive_mutex::scoped_lock: pthread_mutex_unlock failed"); -#endif /* _WIN32||_WIN64 */ - my_mutex = NULL; -} - -bool recursive_mutex::scoped_lock::internal_try_acquire( recursive_mutex& m ) { -#if _WIN32||_WIN64 - switch( m.state ) { - case INITIALIZED: - break; - case DESTROYED: - __TBB_ASSERT(false,"recursive_mutex::scoped_lock: mutex already destroyed"); - break; - default: - __TBB_ASSERT(false,"recursive_mutex::scoped_lock: illegal mutex state"); - break; - } -#endif /* _WIN32||_WIN64 */ - bool result; -#if _WIN32||_WIN64 - result = TryEnterCriticalSection(&m.impl)!=0; -#else - result = pthread_mutex_trylock(&m.impl)==0; -#endif /* _WIN32||_WIN64 */ - if( result ) - my_mutex = &m; - return result; -} - -void recursive_mutex::internal_construct() { -#if _WIN32||_WIN64 - InitializeCriticalSectionEx(&impl, 4000, 0); - state = INITIALIZED; -#else - pthread_mutexattr_t mtx_attr; - int error_code = pthread_mutexattr_init( &mtx_attr ); - if( error_code ) - tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutexattr_init failed"); - - pthread_mutexattr_settype( &mtx_attr, PTHREAD_MUTEX_RECURSIVE ); - error_code = pthread_mutex_init( &impl, &mtx_attr ); - if( error_code ) - tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutex_init failed"); - pthread_mutexattr_destroy( &mtx_attr ); -#endif /* _WIN32||_WIN64*/ - ITT_SYNC_CREATE(&impl, _T("tbb::recursive_mutex"), _T("")); -} - -void recursive_mutex::internal_destroy() { -#if _WIN32||_WIN64 - switch( state ) { - case INITIALIZED: - DeleteCriticalSection(&impl); - break; - case DESTROYED: - __TBB_ASSERT(false,"recursive_mutex: already destroyed"); - break; - default: - __TBB_ASSERT(false,"recursive_mutex: illegal state for destruction"); - break; - } - state = DESTROYED; -#else - int error_code = pthread_mutex_destroy(&impl); - __TBB_ASSERT_EX(!error_code,"recursive_mutex: pthread_mutex_destroy failed"); -#endif /* _WIN32||_WIN64 */ -} - -} // namespace tbb diff --git a/src/tbb/src/rml/include/rml_base.h b/src/tbb/src/tbb/rml_base.h similarity index 68% rename from src/tbb/src/rml/include/rml_base.h rename to src/tbb/src/tbb/rml_base.h index 30bb21f48..9e1705837 100644 --- a/src/tbb/src/rml/include/rml_base.h +++ b/src/tbb/src/tbb/rml_base.h @@ -1,21 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ // Header guard and namespace names follow rml conventions. @@ -37,25 +33,20 @@ namespace rml { -//! Base class for denying assignment and copy constructor. -class no_copy { - void operator=( no_copy& ); - no_copy( no_copy& ); -public: - no_copy() {} -}; - class server; class versioned_object { public: //! A version number typedef unsigned version_type; - + + virtual ~versioned_object() {} + //! Get version of this object /** The version number is incremented when a incompatible change is introduced. The version number is invariant for the lifetime of the object. */ virtual version_type version() const RML_PURE(version_type) + }; //! Represents a client's job for an execution context. @@ -63,11 +54,6 @@ class versioned_object { Not derived from versioned_object because version is same as for client. */ class job { friend class server; - - //! Word for use by server - /** Typically the server uses it to speed up internal lookup. - Clients must not modify the word. */ - void* scratch_ptr; }; //! Information that client provides to server when asking for a server. @@ -80,7 +66,7 @@ class client: public versioned_object { //! Index of a job in a job pool typedef unsigned size_type; - //! Maximum number of threads that client can exploit profitably if nothing else is running on the machine. + //! Maximum number of threads that client can exploit profitably if nothing else is running on the machine. /** The returned value should remain invariant for the lifetime of the connection. [idempotent] */ virtual size_type max_job_count() const RML_PURE(size_type) @@ -95,23 +81,18 @@ class client: public versioned_object { after cleanup(job) has been called for each job. */ virtual void acknowledge_close_connection() RML_PURE(void) - enum policy_type {turnaround,throughput}; - - //! Inform server of desired policy. [idempotent] - virtual policy_type policy() const RML_PURE(policy_type) - - //! Inform client that server is done with *this. + //! Inform client that server is done with *this. /** Client should destroy the job. Not necessarily called by execution context represented by *this. Never called while any other thread is working on the job. */ virtual void cleanup( job& ) RML_PURE(void) - // In general, we should not add new virtual methods, because that would - // break derived classes. Think about reserving some vtable slots. + // In general, we should not add new virtual methods, because that would + // break derived classes. Think about reserving some vtable slots. }; // Information that server provides to client. -// Virtual functions are routines provided by the server for the client to call. +// Virtual functions are routines provided by the server for the client to call. class server: public versioned_object { public: //! Typedef for convenience of derived classes. @@ -123,32 +104,29 @@ class server: public versioned_object { //! Request that connection to server be closed. /** Causes each job associated with the client to have its cleanup method called, - possibly by a thread different than the thread that created the job. - This method can return before all cleanup methods return. - Actions that have to wait after all cleanup methods return should be part of - client::acknowledge_close_connection. + possibly by a thread different than the thread that created the job. + This method can return before all cleanup methods return. + Actions that have to wait after all cleanup methods return should be part of + client::acknowledge_close_connection. Pass true as exiting if request_close_connection() is called because exit() is called. In that case, it is the client's responsibility to make sure all threads are terminated. In all other cases, pass false. */ virtual void request_close_connection( bool exiting = false ) = 0; - //! Called by client thread when it reaches a point where it cannot make progress until other threads do. + //! Called by client thread when it reaches a point where it cannot make progress until other threads do. virtual void yield() = 0; //! Called by client to indicate a change in the number of non-RML threads that are running. - /** This is a performance hint to the RML to adjust how many threads it should let run + /** This is a performance hint to the RML to adjust how many threads it should let run concurrently. The delta is the change in the number of non-RML threads that are running. - For example, a value of 1 means the client has started running another thread, and a value + For example, a value of 1 means the client has started running another thread, and a value of -1 indicates that the client has blocked or terminated one of its threads. */ virtual void independent_thread_number_changed( int delta ) = 0; //! Default level of concurrency for which RML strives when there are no non-RML threads running. - /** Normally, the value is the hardware concurrency minus one. + /** Normally, the value is the hardware concurrency minus one. The "minus one" accounts for the thread created by main(). */ virtual unsigned default_concurrency() const = 0; - -protected: - static void*& scratch_ptr( job& j ) {return j.scratch_ptr;} }; class factory { @@ -161,9 +139,6 @@ class factory { st_incompatible }; - //! Scratch pointer for use by RML. - void* scratch_ptr; - protected: //! Pointer to routine that waits for server to indicate when client can close itself. status_type (*my_wait_to_close_routine)( factory& ); @@ -174,7 +149,7 @@ class factory { HMODULE library_handle; #else void* library_handle; -#endif /* _WIN32||_WIN64 */ +#endif /* _WIN32||_WIN64 */ //! Special marker to keep dll from being unloaded prematurely static const std::size_t c_dont_unload = 1; diff --git a/src/tbb/src/tbb/rml_tbb.cpp b/src/tbb/src/tbb/rml_tbb.cpp new file mode 100644 index 000000000..d1cd285c1 --- /dev/null +++ b/src/tbb/src/tbb/rml_tbb.cpp @@ -0,0 +1,112 @@ +/* + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "oneapi/tbb/detail/_assert.h" + +#include "rml_tbb.h" +#include "dynamic_link.h" + +namespace tbb { +namespace detail { +namespace r1 { +namespace rml { + +#define MAKE_SERVER(x) DLD(__TBB_make_rml_server,x) +#define GET_INFO(x) DLD(__TBB_call_with_my_server_info,x) +#define SERVER tbb_server +#define CLIENT tbb_client +#define FACTORY tbb_factory + +#if __TBB_WEAK_SYMBOLS_PRESENT + #pragma weak __TBB_make_rml_server + #pragma weak __TBB_call_with_my_server_info + extern "C" { + ::rml::factory::status_type __TBB_make_rml_server( rml::tbb_factory& f, rml::tbb_server*& server, rml::tbb_client& client ); + void __TBB_call_with_my_server_info( ::rml::server_info_callback_t cb, void* arg ); + } +#endif /* __TBB_WEAK_SYMBOLS_PRESENT */ + +#if TBB_USE_DEBUG +#define DEBUG_SUFFIX "_debug" +#else +#define DEBUG_SUFFIX +#endif /* TBB_USE_DEBUG */ + +// RML_SERVER_NAME is the name of the RML server library. +#if _WIN32 || _WIN64 +#define RML_SERVER_NAME "irml" DEBUG_SUFFIX ".dll" +#elif __APPLE__ +#define RML_SERVER_NAME "libirml" DEBUG_SUFFIX ".1.dylib" +#elif __FreeBSD__ || __NetBSD__ || __OpenBSD__ || __sun || _AIX +#define RML_SERVER_NAME "libirml" DEBUG_SUFFIX ".so" +#elif __unix__ +#define RML_SERVER_NAME "libirml" DEBUG_SUFFIX ".so.1" +#else +#error Unknown OS +#endif + +const ::rml::versioned_object::version_type CLIENT_VERSION = 2; + +#if __TBB_WEAK_SYMBOLS_PRESENT + #pragma weak __RML_open_factory + #pragma weak __RML_close_factory + extern "C" { + ::rml::factory::status_type __RML_open_factory ( ::rml::factory&, ::rml::versioned_object::version_type&, ::rml::versioned_object::version_type ); + void __RML_close_factory( ::rml::factory& f ); + } +#endif /* __TBB_WEAK_SYMBOLS_PRESENT */ + +::rml::factory::status_type FACTORY::open() { + // Failure of following assertion indicates that factory is already open, or not zero-inited. + __TBB_ASSERT_EX( !library_handle, nullptr); + status_type (*open_factory_routine)( factory&, version_type&, version_type ); + dynamic_link_descriptor server_link_table[4] = { + DLD(__RML_open_factory,open_factory_routine), + MAKE_SERVER(my_make_server_routine), + DLD(__RML_close_factory,my_wait_to_close_routine), + GET_INFO(my_call_with_server_info_routine), + }; + status_type result; + if ( dynamic_link( RML_SERVER_NAME, server_link_table, 4, &library_handle ) ) { + version_type server_version; + result = (*open_factory_routine)( *this, server_version, CLIENT_VERSION ); + // server_version can be checked here for incompatibility if necessary. + } else { + library_handle = nullptr; + result = st_not_found; + } + return result; +} + +void FACTORY::close() { + if ( library_handle ) + (*my_wait_to_close_routine)(*this); + if ( (size_t)library_handle>FACTORY::c_dont_unload ) { + dynamic_unlink(library_handle); + library_handle = nullptr; + } +} + +::rml::factory::status_type FACTORY::make_server( SERVER*& s, CLIENT& c) { + // Failure of following assertion means that factory was not successfully opened. + __TBB_ASSERT_EX( my_make_server_routine, nullptr); + return (*my_make_server_routine)(*this,s,c); +} + +} // namespace rml +} // namespace r1 +} // namespace detail +} // namespace tbb diff --git a/src/tbb/src/rml/include/rml_tbb.h b/src/tbb/src/tbb/rml_tbb.h similarity index 53% rename from src/tbb/src/rml/include/rml_tbb.h rename to src/tbb/src/tbb/rml_tbb.h index 952b1877d..61176f8d7 100644 --- a/src/tbb/src/rml/include/rml_tbb.h +++ b/src/tbb/src/tbb/rml_tbb.h @@ -1,21 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ // Header guard and namespace names follow TBB conventions. @@ -23,31 +19,30 @@ #ifndef __TBB_rml_tbb_H #define __TBB_rml_tbb_H -#include "tbb/tbb_config.h" +#include "oneapi/tbb/version.h" #include "rml_base.h" namespace tbb { -namespace internal { +namespace detail { +namespace r1 { namespace rml { -class tbb_client; - //------------------------------------------------------------------------ // Classes instantiated by the server //------------------------------------------------------------------------ -//! Represents a set of tbb worker threads provided by the server. +//! Represents a set of oneTBB worker threads provided by the server. class tbb_server: public ::rml::server { public: //! Inform server of adjustments in the number of workers that the client can profitably use. virtual void adjust_job_count_estimate( int delta ) = 0; -#if _WIN32||_WIN64 - //! Inform server of a tbb master thread. - virtual void register_master( execution_resource_t& v ) = 0; +#if _WIN32 || _WIN64 + //! Inform server of a oneTBB external thread. + virtual void register_external_thread( execution_resource_t& v ) = 0; - //! Inform server that the tbb master thread is done with its work. - virtual void unregister_master( execution_resource_t v ) = 0; + //! Inform server that the oneTBB external thread is done with its work. + virtual void unregister_external_thread( execution_resource_t v ) = 0; #endif /* _WIN32||_WIN64 */ }; @@ -89,13 +84,11 @@ class tbb_factory: public ::rml::factory { //! Close factory void close(); - - //! Call the callback with the server build info - void call_with_server_info( ::rml::server_info_callback_t cb, void* arg ) const; }; } // namespace rml -} // namespace internal +} // namespace r1 +} // namespace detail } // namespace tbb #endif /*__TBB_rml_tbb_H */ diff --git a/src/tbb/src/tbb/rml_thread_monitor.h b/src/tbb/src/tbb/rml_thread_monitor.h new file mode 100644 index 000000000..57e9c30b0 --- /dev/null +++ b/src/tbb/src/tbb/rml_thread_monitor.h @@ -0,0 +1,247 @@ +/* + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// All platform-specific threading support is encapsulated here. */ + +#ifndef __RML_thread_monitor_H +#define __RML_thread_monitor_H + +#if __TBB_USE_WINAPI +#include <windows.h> +#include <process.h> +#include <malloc.h> //_alloca +#include "misc.h" // support for processor groups +#if __TBB_WIN8UI_SUPPORT && (_WIN32_WINNT < 0x0A00) +#include <thread> +#endif +#elif __TBB_USE_POSIX +#include <pthread.h> +#include <cstring> +#include <cstdlib> +#include <time.h> +#else +#error Unsupported platform +#endif +#include <cstdio> + +#include "oneapi/tbb/detail/_template_helpers.h" + +#include "itt_notify.h" +#include "semaphore.h" + +// All platform-specific threading support is in this header. + +#if (_WIN32||_WIN64)&&!__TBB_ipf +// Deal with 64K aliasing. The formula for "offset" is a Fibonacci hash function, +// which has the desirable feature of spreading out the offsets fairly evenly +// without knowing the total number of offsets, and furthermore unlikely to +// accidentally cancel out other 64K aliasing schemes that Microsoft might implement later. +// See Knuth Vol 3. "Theorem S" for details on Fibonacci hashing. +// The second statement is really does need "volatile", otherwise the compiler might remove the _alloca. +#define AVOID_64K_ALIASING(idx) \ + std::size_t offset = (idx+1) * 40503U % (1U<<16); \ + void* volatile sink_for_alloca = _alloca(offset); \ + __TBB_ASSERT_EX(sink_for_alloca, "_alloca failed"); +#else +// Linux thread allocators avoid 64K aliasing. +#define AVOID_64K_ALIASING(idx) tbb::detail::suppress_unused_warning(idx) +#endif /* _WIN32||_WIN64 */ + +namespace tbb { +namespace detail { +namespace r1 { + +// Forward declaration: throws std::runtime_error with what() returning error_code description prefixed with aux_info +void handle_perror(int error_code, const char* aux_info); + +namespace rml { +namespace internal { + +#if __TBB_USE_ITT_NOTIFY +static const ::tbb::detail::r1::tchar *SyncType_RML = _T("%Constant"); +static const ::tbb::detail::r1::tchar *SyncObj_ThreadMonitor = _T("RML Thr Monitor"); +#endif /* __TBB_USE_ITT_NOTIFY */ + +//! Monitor with limited two-phase commit form of wait. +/** At most one thread should wait on an instance at a time. */ +class thread_monitor { +public: + thread_monitor() { + ITT_SYNC_CREATE(&my_sema, SyncType_RML, SyncObj_ThreadMonitor); + } + ~thread_monitor() {} + + //! Notify waiting thread + /** Can be called by any thread. */ + void notify(); + + //! Wait for notification + void wait(); + +#if __TBB_USE_WINAPI + typedef HANDLE handle_type; + + #define __RML_DECL_THREAD_ROUTINE unsigned WINAPI + typedef unsigned (WINAPI *thread_routine_type)(void*); + + //! Launch a thread + static handle_type launch( thread_routine_type thread_routine, void* arg, std::size_t stack_size, const size_t* worker_index = nullptr ); + +#elif __TBB_USE_POSIX + typedef pthread_t handle_type; + + #define __RML_DECL_THREAD_ROUTINE void* + typedef void*(*thread_routine_type)(void*); + + //! Launch a thread + static handle_type launch( thread_routine_type thread_routine, void* arg, std::size_t stack_size ); +#endif /* __TBB_USE_POSIX */ + + //! Join thread + static void join(handle_type handle); + + //! Detach thread + static void detach_thread(handle_type handle); +private: + // The protection from double notification of the binary semaphore + std::atomic<bool> my_notified{ false }; + binary_semaphore my_sema; +#if __TBB_USE_POSIX + static void check( int error_code, const char* routine ); +#endif +}; + +#if __TBB_USE_WINAPI + +#ifndef STACK_SIZE_PARAM_IS_A_RESERVATION +#define STACK_SIZE_PARAM_IS_A_RESERVATION 0x00010000 +#endif + +// _beginthreadex API is not available in Windows 8 Store* applications, so use std::thread instead +#if __TBB_WIN8UI_SUPPORT && (_WIN32_WINNT < 0x0A00) +inline thread_monitor::handle_type thread_monitor::launch( thread_routine_type thread_function, void* arg, std::size_t, const std::size_t*) { +//TODO: check that exception thrown from std::thread is not swallowed silently + std::thread* thread_tmp=new std::thread(thread_function, arg); + return thread_tmp->native_handle(); +} +#else +inline thread_monitor::handle_type thread_monitor::launch( thread_routine_type thread_routine, void* arg, std::size_t stack_size, const std::size_t* worker_index ) { + unsigned thread_id; + int number_of_processor_groups = ( worker_index ) ? NumberOfProcessorGroups() : 0; + unsigned create_flags = ( number_of_processor_groups > 1 ) ? CREATE_SUSPENDED : 0; + HANDLE h = (HANDLE)_beginthreadex( nullptr, unsigned(stack_size), thread_routine, arg, STACK_SIZE_PARAM_IS_A_RESERVATION | create_flags, &thread_id ); + if( !h ) { + handle_perror(0, "thread_monitor::launch: _beginthreadex failed\n"); + } + if ( number_of_processor_groups > 1 ) { + MoveThreadIntoProcessorGroup( h, FindProcessorGroupIndex( static_cast<int>(*worker_index) ) ); + ResumeThread( h ); + } + return h; +} +#endif //__TBB_WIN8UI_SUPPORT && (_WIN32_WINNT < 0x0A00) + +void thread_monitor::join(handle_type handle) { +#if TBB_USE_ASSERT + DWORD res = +#endif + WaitForSingleObjectEx(handle, INFINITE, FALSE); + __TBB_ASSERT( res==WAIT_OBJECT_0, nullptr); +#if TBB_USE_ASSERT + BOOL val = +#endif + CloseHandle(handle); + __TBB_ASSERT( val, nullptr); +} + +void thread_monitor::detach_thread(handle_type handle) { +#if TBB_USE_ASSERT + BOOL val = +#endif + CloseHandle(handle); + __TBB_ASSERT( val, nullptr); +} + +#endif /* __TBB_USE_WINAPI */ + +#if __TBB_USE_POSIX +inline void thread_monitor::check( int error_code, const char* routine ) { + if( error_code ) { + handle_perror(error_code, routine); + } +} + +inline thread_monitor::handle_type thread_monitor::launch( void* (*thread_routine)(void*), void* arg, std::size_t stack_size ) { + // FIXME - consider more graceful recovery than just exiting if a thread cannot be launched. + // Note that there are some tricky situations to deal with, such that the thread is already + // grabbed as part of an OpenMP team. + pthread_attr_t s; + check(pthread_attr_init( &s ), "pthread_attr_init has failed"); + if( stack_size>0 ) + check(pthread_attr_setstacksize( &s, stack_size ), "pthread_attr_setstack_size has failed" ); + + // pthread_create(2) can spuriously fail with EAGAIN. We retry + // max_num_tries times with progressively longer wait times. + pthread_t handle; + const int max_num_tries = 20; + int error = EAGAIN; + + for (int i = 0; i < max_num_tries && error == EAGAIN; i++) { + if (i != 0) { + // Wait i milliseconds + struct timespec ts = {0, i * 1000 * 1000}; + nanosleep(&ts, NULL); + } + error = pthread_create(&handle, &s, thread_routine, arg); + } + + if (error) + handle_perror(error, "pthread_create has failed"); + + check( pthread_attr_destroy( &s ), "pthread_attr_destroy has failed" ); + return handle; +} + +void thread_monitor::join(handle_type handle) { + check(pthread_join(handle, nullptr), "pthread_join has failed"); +} + +void thread_monitor::detach_thread(handle_type handle) { + check(pthread_detach(handle), "pthread_detach has failed"); +} +#endif /* __TBB_USE_POSIX */ + +inline void thread_monitor::notify() { + // Check that the semaphore is not notified twice + if (!my_notified.exchange(true, std::memory_order_release)) { + my_sema.V(); + } +} + +inline void thread_monitor::wait() { + my_sema.P(); + // memory_order_seq_cst is required here to be ordered with + // further load checking shutdown state + my_notified.store(false, std::memory_order_seq_cst); +} + +} // namespace internal +} // namespace rml +} // namespace r1 +} // namespace detail +} // namespace tbb + +#endif /* __RML_thread_monitor_H */ diff --git a/src/tbb/src/tbb/rtm_mutex.cpp b/src/tbb/src/tbb/rtm_mutex.cpp new file mode 100644 index 000000000..f386735b9 --- /dev/null +++ b/src/tbb/src/tbb/rtm_mutex.cpp @@ -0,0 +1,121 @@ +/* + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "oneapi/tbb/detail/_assert.h" +#include "oneapi/tbb/detail/_rtm_mutex.h" +#include "itt_notify.h" +#include "governor.h" +#include "misc.h" + +#include <atomic> + +namespace tbb { +namespace detail { +namespace r1 { + + +struct rtm_mutex_impl { + // maximum number of times to retry + // TODO: experiment on retry values. + static constexpr int retry_threshold = 10; + using transaction_result_type = decltype(begin_transaction()); + + //! Release speculative mutex + static void release(d1::rtm_mutex::scoped_lock& s) { + switch(s.m_transaction_state) { + case d1::rtm_mutex::rtm_state::rtm_transacting: + __TBB_ASSERT(is_in_transaction(), "m_transaction_state && not speculating"); + end_transaction(); + s.m_mutex = nullptr; + break; + case d1::rtm_mutex::rtm_state::rtm_real: + s.m_mutex->unlock(); + s.m_mutex = nullptr; + break; + case d1::rtm_mutex::rtm_state::rtm_none: + __TBB_ASSERT(false, "mutex is not locked, but in release"); + break; + default: + __TBB_ASSERT(false, "invalid m_transaction_state"); + } + s.m_transaction_state = d1::rtm_mutex::rtm_state::rtm_none; + } + + //! Acquire lock on the given mutex. + static void acquire(d1::rtm_mutex& m, d1::rtm_mutex::scoped_lock& s, bool only_speculate) { + __TBB_ASSERT(s.m_transaction_state == d1::rtm_mutex::rtm_state::rtm_none, "scoped_lock already in transaction"); + if(governor::speculation_enabled()) { + int num_retries = 0; + transaction_result_type abort_code = 0; + do { + if(m.m_flag.load(std::memory_order_acquire)) { + if(only_speculate) return; + spin_wait_while_eq(m.m_flag, true); + } + // _xbegin returns -1 on success or the abort code, so capture it + if((abort_code = begin_transaction()) == transaction_result_type(speculation_successful_begin)) + { + // started speculation + if(m.m_flag.load(std::memory_order_relaxed)) { + abort_transaction(); + } + s.m_transaction_state = d1::rtm_mutex::rtm_state::rtm_transacting; + // Don not wrap the following assignment to a function, + // because it can abort the transaction in debug. Need mutex for release(). + s.m_mutex = &m; + return; // successfully started speculation + } + ++num_retries; + } while((abort_code & speculation_retry) != 0 && (num_retries < retry_threshold)); + } + + if(only_speculate) return; + s.m_mutex = &m; + s.m_mutex->lock(); + s.m_transaction_state = d1::rtm_mutex::rtm_state::rtm_real; + } + + //! Try to acquire lock on the given mutex. + static bool try_acquire(d1::rtm_mutex& m, d1::rtm_mutex::scoped_lock& s) { + acquire(m, s, /*only_speculate=*/true); + if (s.m_transaction_state == d1::rtm_mutex::rtm_state::rtm_transacting) { + return true; + } + __TBB_ASSERT(s.m_transaction_state == d1::rtm_mutex::rtm_state::rtm_none, nullptr); + // transacting acquire failed. try_lock the real mutex + if (m.try_lock()) { + s.m_mutex = &m; + s.m_transaction_state = d1::rtm_mutex::rtm_state::rtm_real; + return true; + } + return false; + } +}; + +void __TBB_EXPORTED_FUNC acquire(d1::rtm_mutex& m, d1::rtm_mutex::scoped_lock& s, bool only_speculate) { + rtm_mutex_impl::acquire(m, s, only_speculate); +} +bool __TBB_EXPORTED_FUNC try_acquire(d1::rtm_mutex& m, d1::rtm_mutex::scoped_lock& s) { + return rtm_mutex_impl::try_acquire(m, s); +} +void __TBB_EXPORTED_FUNC release(d1::rtm_mutex::scoped_lock& s) { + rtm_mutex_impl::release(s); +} + +} // namespace r1 +} // namespace detail +} // namespace tbb + diff --git a/src/tbb/src/tbb/rtm_rw_mutex.cpp b/src/tbb/src/tbb/rtm_rw_mutex.cpp new file mode 100644 index 000000000..fa87d0e39 --- /dev/null +++ b/src/tbb/src/tbb/rtm_rw_mutex.cpp @@ -0,0 +1,271 @@ +/* + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "oneapi/tbb/detail/_assert.h" +#include "oneapi/tbb/detail/_rtm_rw_mutex.h" +#include "itt_notify.h" +#include "governor.h" +#include "misc.h" + +#include <atomic> + +namespace tbb { +namespace detail { +namespace r1 { + +struct rtm_rw_mutex_impl { + // maximum number of times to retry + // TODO: experiment on retry values. + static constexpr int retry_threshold_read = 10; + static constexpr int retry_threshold_write = 10; + using transaction_result_type = decltype(begin_transaction()); + + //! Release speculative mutex + static void release(d1::rtm_rw_mutex::scoped_lock& s) { + switch(s.m_transaction_state) { + case d1::rtm_rw_mutex::rtm_type::rtm_transacting_writer: + case d1::rtm_rw_mutex::rtm_type::rtm_transacting_reader: + __TBB_ASSERT(is_in_transaction(), "m_transaction_state && not speculating"); + end_transaction(); + s.m_mutex = nullptr; + break; + case d1::rtm_rw_mutex::rtm_type::rtm_real_reader: + __TBB_ASSERT(!s.m_mutex->write_flag.load(std::memory_order_relaxed), "write_flag set but read lock acquired"); + s.m_mutex->unlock_shared(); + s.m_mutex = nullptr; + break; + case d1::rtm_rw_mutex::rtm_type::rtm_real_writer: + __TBB_ASSERT(s.m_mutex->write_flag.load(std::memory_order_relaxed), "write_flag unset but write lock acquired"); + s.m_mutex->write_flag.store(false, std::memory_order_relaxed); + s.m_mutex->unlock(); + s.m_mutex = nullptr; + break; + case d1::rtm_rw_mutex::rtm_type::rtm_not_in_mutex: + __TBB_ASSERT(false, "rtm_not_in_mutex, but in release"); + break; + default: + __TBB_ASSERT(false, "invalid m_transaction_state"); + } + s.m_transaction_state = d1::rtm_rw_mutex::rtm_type::rtm_not_in_mutex; + } + + //! Acquire write lock on the given mutex. + static void acquire_writer(d1::rtm_rw_mutex& m, d1::rtm_rw_mutex::scoped_lock& s, bool only_speculate) { + __TBB_ASSERT(s.m_transaction_state == d1::rtm_rw_mutex::rtm_type::rtm_not_in_mutex, "scoped_lock already in transaction"); + if(governor::speculation_enabled()) { + int num_retries = 0; + transaction_result_type abort_code = 0; + do { + if(m.m_state.load(std::memory_order_acquire)) { + if(only_speculate) return; + spin_wait_until_eq(m.m_state, d1::rtm_rw_mutex::state_type(0)); + } + // _xbegin returns -1 on success or the abort code, so capture it + if((abort_code = begin_transaction()) == transaction_result_type(speculation_successful_begin)) + { + // started speculation + if(m.m_state.load(std::memory_order_relaxed)) { // add spin_rw_mutex to read-set. + // reader or writer grabbed the lock, so abort. + abort_transaction(); + } + s.m_transaction_state = d1::rtm_rw_mutex::rtm_type::rtm_transacting_writer; + // Don not wrap the following assignment to a function, + // because it can abort the transaction in debug. Need mutex for release(). + s.m_mutex = &m; + return; // successfully started speculation + } + ++num_retries; + } while((abort_code & speculation_retry) != 0 && (num_retries < retry_threshold_write)); + } + + if(only_speculate) return; + s.m_mutex = &m; // should apply a real try_lock... + s.m_mutex->lock(); // kill transactional writers + __TBB_ASSERT(!m.write_flag.load(std::memory_order_relaxed), "After acquire for write, write_flag already true"); + m.write_flag.store(true, std::memory_order_relaxed); // kill transactional readers + s.m_transaction_state = d1::rtm_rw_mutex::rtm_type::rtm_real_writer; + } + + //! Acquire read lock on given mutex. + // only_speculate : true if we are doing a try_acquire. If true and we fail to speculate, don't + // really acquire the lock, return and do a try_acquire on the contained spin_rw_mutex. If + // the lock is already held by a writer, just return. + static void acquire_reader(d1::rtm_rw_mutex& m, d1::rtm_rw_mutex::scoped_lock& s, bool only_speculate) { + __TBB_ASSERT(s.m_transaction_state == d1::rtm_rw_mutex::rtm_type::rtm_not_in_mutex, "scoped_lock already in transaction"); + if(governor::speculation_enabled()) { + int num_retries = 0; + transaction_result_type abort_code = 0; + do { + // if in try_acquire, and lock is held as writer, don't attempt to speculate. + if(m.write_flag.load(std::memory_order_acquire)) { + if(only_speculate) return; + spin_wait_while_eq(m.write_flag, true); + } + // _xbegin returns -1 on success or the abort code, so capture it + if((abort_code = begin_transaction()) == transaction_result_type(speculation_successful_begin)) + { + // started speculation + if(m.write_flag.load(std::memory_order_relaxed)) { // add write_flag to read-set. + abort_transaction(); // writer grabbed the lock, so abort. + } + s.m_transaction_state = d1::rtm_rw_mutex::rtm_type::rtm_transacting_reader; + // Don not wrap the following assignment to a function, + // because it can abort the transaction in debug. Need mutex for release(). + s.m_mutex = &m; + return; // successfully started speculation + } + // fallback path + // retry only if there is any hope of getting into a transaction soon + // Retry in the following cases (from Section 8.3.5 of + // Intel(R) Architecture Instruction Set Extensions Programming Reference): + // 1. abort caused by XABORT instruction (bit 0 of EAX register is set) + // 2. the transaction may succeed on a retry (bit 1 of EAX register is set) + // 3. if another logical processor conflicted with a memory address + // that was part of the transaction that aborted (bit 2 of EAX register is set) + // That is, retry if (abort_code & 0x7) is non-zero + ++num_retries; + } while((abort_code & speculation_retry) != 0 && (num_retries < retry_threshold_read)); + } + + if(only_speculate) return; + s.m_mutex = &m; + s.m_mutex->lock_shared(); + s.m_transaction_state = d1::rtm_rw_mutex::rtm_type::rtm_real_reader; + } + + //! Upgrade reader to become a writer. + /** Returns whether the upgrade happened without releasing and re-acquiring the lock */ + static bool upgrade(d1::rtm_rw_mutex::scoped_lock& s) { + switch(s.m_transaction_state) { + case d1::rtm_rw_mutex::rtm_type::rtm_real_reader: { + s.m_transaction_state = d1::rtm_rw_mutex::rtm_type::rtm_real_writer; + bool no_release = s.m_mutex->upgrade(); + __TBB_ASSERT(!s.m_mutex->write_flag.load(std::memory_order_relaxed), "After upgrade, write_flag already true"); + s.m_mutex->write_flag.store(true, std::memory_order_relaxed); + return no_release; + } + case d1::rtm_rw_mutex::rtm_type::rtm_transacting_reader: { + d1::rtm_rw_mutex& m = *s.m_mutex; + if(m.m_state.load(std::memory_order_acquire)) { // add spin_rw_mutex to read-set. + // Real reader or writer holds the lock; so commit the read and re-acquire for write. + release(s); + acquire_writer(m, s, false); + return false; + } else + { + s.m_transaction_state = d1::rtm_rw_mutex::rtm_type::rtm_transacting_writer; + return true; + } + } + default: + __TBB_ASSERT(false, "Invalid state for upgrade"); + return false; + } + } + + //! Downgrade writer to a reader. + static bool downgrade(d1::rtm_rw_mutex::scoped_lock& s) { + switch (s.m_transaction_state) { + case d1::rtm_rw_mutex::rtm_type::rtm_real_writer: + s.m_transaction_state = d1::rtm_rw_mutex::rtm_type::rtm_real_reader; + __TBB_ASSERT(s.m_mutex->write_flag.load(std::memory_order_relaxed), "Before downgrade write_flag not true"); + s.m_mutex->write_flag.store(false, std::memory_order_relaxed); + s.m_mutex->downgrade(); + return true; + case d1::rtm_rw_mutex::rtm_type::rtm_transacting_writer: + s.m_transaction_state = d1::rtm_rw_mutex::rtm_type::rtm_transacting_reader; + return true; + default: + __TBB_ASSERT(false, "Invalid state for downgrade"); + return false; + } + } + + //! Try to acquire write lock on the given mutex. + // There may be reader(s) which acquired the spin_rw_mutex, as well as possibly + // transactional reader(s). If this is the case, the acquire will fail, and assigning + // write_flag will kill the transactors. So we only assign write_flag if we have successfully + // acquired the lock. + static bool try_acquire_writer(d1::rtm_rw_mutex& m, d1::rtm_rw_mutex::scoped_lock& s) { + acquire_writer(m, s, /*only_speculate=*/true); + if (s.m_transaction_state == d1::rtm_rw_mutex::rtm_type::rtm_transacting_writer) { + return true; + } + __TBB_ASSERT(s.m_transaction_state == d1::rtm_rw_mutex::rtm_type::rtm_not_in_mutex, nullptr); + // transacting write acquire failed. try_lock the real mutex + if (m.try_lock()) { + s.m_mutex = &m; + // only shoot down readers if we're not transacting ourselves + __TBB_ASSERT(!m.write_flag.load(std::memory_order_relaxed), "After try_acquire_writer, write_flag already true"); + m.write_flag.store(true, std::memory_order_relaxed); + s.m_transaction_state = d1::rtm_rw_mutex::rtm_type::rtm_real_writer; + return true; + } + return false; + } + + //! Try to acquire read lock on the given mutex. + static bool try_acquire_reader(d1::rtm_rw_mutex& m, d1::rtm_rw_mutex::scoped_lock& s) { + // speculatively acquire the lock. If this fails, do try_lock_shared on the spin_rw_mutex. + acquire_reader(m, s, /*only_speculate=*/true); + if (s.m_transaction_state == d1::rtm_rw_mutex::rtm_type::rtm_transacting_reader) { + return true; + } + __TBB_ASSERT(s.m_transaction_state == d1::rtm_rw_mutex::rtm_type::rtm_not_in_mutex, nullptr); + // transacting read acquire failed. try_lock_shared the real mutex + if (m.try_lock_shared()) { + s.m_mutex = &m; + s.m_transaction_state = d1::rtm_rw_mutex::rtm_type::rtm_real_reader; + return true; + } + return false; + } +}; + +void __TBB_EXPORTED_FUNC acquire_writer(d1::rtm_rw_mutex& m, d1::rtm_rw_mutex::scoped_lock& s, bool only_speculate) { + rtm_rw_mutex_impl::acquire_writer(m, s, only_speculate); +} +//! Internal acquire read lock. +// only_speculate == true if we're doing a try_lock, else false. +void __TBB_EXPORTED_FUNC acquire_reader(d1::rtm_rw_mutex& m, d1::rtm_rw_mutex::scoped_lock& s, bool only_speculate) { + rtm_rw_mutex_impl::acquire_reader(m, s, only_speculate); +} +//! Internal upgrade reader to become a writer. +bool __TBB_EXPORTED_FUNC upgrade(d1::rtm_rw_mutex::scoped_lock& s) { + return rtm_rw_mutex_impl::upgrade(s); +} +//! Internal downgrade writer to become a reader. +bool __TBB_EXPORTED_FUNC downgrade(d1::rtm_rw_mutex::scoped_lock& s) { + return rtm_rw_mutex_impl::downgrade(s); +} +//! Internal try_acquire write lock. +bool __TBB_EXPORTED_FUNC try_acquire_writer(d1::rtm_rw_mutex& m, d1::rtm_rw_mutex::scoped_lock& s) { + return rtm_rw_mutex_impl::try_acquire_writer(m, s); +} +//! Internal try_acquire read lock. +bool __TBB_EXPORTED_FUNC try_acquire_reader(d1::rtm_rw_mutex& m, d1::rtm_rw_mutex::scoped_lock& s) { + return rtm_rw_mutex_impl::try_acquire_reader(m, s); +} +//! Internal release lock. +void __TBB_EXPORTED_FUNC release(d1::rtm_rw_mutex::scoped_lock& s) { + rtm_rw_mutex_impl::release(s); +} + +} // namespace r1 +} // namespace detail +} // namespace tbb + + diff --git a/src/tbb/src/tbb/scheduler.cpp b/src/tbb/src/tbb/scheduler.cpp deleted file mode 100644 index b46d5eb78..000000000 --- a/src/tbb/src/tbb/scheduler.cpp +++ /dev/null @@ -1,1227 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "custom_scheduler.h" -#include "scheduler_utility.h" -#include "governor.h" -#include "market.h" -#include "arena.h" -#include "mailbox.h" -#include "observer_proxy.h" -#include "tbb/tbb_machine.h" -#include "tbb/atomic.h" - -namespace tbb { -namespace internal { - -//------------------------------------------------------------------------ -// Library initialization -//------------------------------------------------------------------------ - -/** Defined in tbb_main.cpp **/ -extern generic_scheduler* (*AllocateSchedulerPtr)( arena*, size_t index ); - -inline generic_scheduler* allocate_scheduler ( arena* a, size_t index ) { - return AllocateSchedulerPtr(a, index); -} - -#if __TBB_TASK_GROUP_CONTEXT -context_state_propagation_mutex_type the_context_state_propagation_mutex; - -uintptr_t the_context_state_propagation_epoch = 0; - -//! Context to be associated with dummy tasks of worker threads schedulers. -/** It is never used for its direct purpose, and is introduced solely for the sake - of avoiding one extra conditional branch in the end of wait_for_all method. **/ -static task_group_context the_dummy_context(task_group_context::isolated); -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -void Scheduler_OneTimeInitialization ( bool itt_present ) { - AllocateSchedulerPtr = itt_present ? &custom_scheduler<DefaultSchedulerTraits>::allocate_scheduler : - &custom_scheduler<IntelSchedulerTraits>::allocate_scheduler; -#if __TBB_TASK_GROUP_CONTEXT - // There must be no tasks belonging to this fake task group. Mark invalid for the assert - __TBB_ASSERT(!(task_group_context::low_unused_state_bit & (task_group_context::low_unused_state_bit-1)), NULL); - the_dummy_context.my_state = task_group_context::low_unused_state_bit; -#if __TBB_TASK_PRIORITY - // It should never prevent tasks from being passed to execution. - the_dummy_context.my_priority = num_priority_levels - 1; -#endif /* __TBB_TASK_PRIORITY */ -#endif /* __TBB_TASK_GROUP_CONTEXT */ -} - -//------------------------------------------------------------------------ -// scheduler interface -//------------------------------------------------------------------------ - -// A pure virtual destructor should still have a body -// so the one for tbb::internal::scheduler::~scheduler() is provided here -scheduler::~scheduler( ) {} - -//------------------------------------------------------------------------ -// generic_scheduler -//------------------------------------------------------------------------ - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Suppress overzealous compiler warning about using 'this' in base initializer list. - #pragma warning(push) - #pragma warning(disable:4355) -#endif - -generic_scheduler::generic_scheduler( arena* a, size_t index ) - : my_stealing_threshold(0) - , my_market(NULL) - , my_random( this ) - , my_free_list(NULL) -#if __TBB_HOARD_NONLOCAL_TASKS - , my_nonlocal_free_list(NULL) -#endif - , my_dummy_task(NULL) - , my_ref_count(1) - , my_auto_initialized(false) -#if __TBB_COUNT_TASK_NODES - , my_task_node_count(0) -#endif /* __TBB_COUNT_TASK_NODES */ - , my_small_task_count(1) // Extra 1 is a guard reference - , my_return_list(NULL) -#if __TBB_TASK_GROUP_CONTEXT - , my_local_ctx_list_update(make_atomic(uintptr_t(0))) -#endif /* __TBB_TASK_GROUP_CONTEXT */ -#if __TBB_TASK_PRIORITY - , my_offloaded_tasks(NULL) - , my_offloaded_task_list_tail_link(NULL) - , my_local_reload_epoch(0) - , my_pool_reshuffling_pending(false) -#endif /* __TBB_TASK_PRIORITY */ -#if __TBB_TASK_GROUP_CONTEXT - , my_nonlocal_ctx_list_update(make_atomic(uintptr_t(0))) -#endif /* __TBB_TASK_GROUP_CONTEXT */ -#if __TBB_SURVIVE_THREAD_SWITCH && TBB_USE_ASSERT - , my_cilk_state(cs_none) -#endif /* __TBB_SURVIVE_THREAD_SWITCH && TBB_USE_ASSERT */ -{ - my_arena_index = index; - my_arena_slot = 0; - my_arena = a; - my_innermost_running_task = NULL; - my_dispatching_task = NULL; - my_affinity_id = 0; -#if __TBB_SCHEDULER_OBSERVER - my_last_global_observer = NULL; - my_last_local_observer = NULL; -#endif /* __TBB_SCHEDULER_OBSERVER */ -#if __TBB_TASK_PRIORITY - my_ref_top_priority = NULL; - my_ref_reload_epoch = NULL; -#endif /* __TBB_TASK_PRIORITY */ - - my_dummy_task = &allocate_task( sizeof(task), __TBB_CONTEXT_ARG(NULL, NULL) ); -#if __TBB_TASK_GROUP_CONTEXT - my_context_list_head.my_prev = &my_context_list_head; - my_context_list_head.my_next = &my_context_list_head; - ITT_SYNC_CREATE(&my_context_list_mutex, SyncType_Scheduler, SyncObj_ContextsList); -#endif /* __TBB_TASK_GROUP_CONTEXT */ - my_dummy_task->prefix().ref_count = 2; - ITT_SYNC_CREATE(&my_dummy_task->prefix().ref_count, SyncType_Scheduler, SyncObj_WorkerLifeCycleMgmt); - ITT_SYNC_CREATE(&my_return_list, SyncType_Scheduler, SyncObj_TaskReturnList); - assert_task_pool_valid(); -#if __TBB_SURVIVE_THREAD_SWITCH - my_cilk_unwatch_thunk.routine = NULL; -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ -} - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning(pop) -#endif // warning 4355 is back - -#if TBB_USE_ASSERT > 1 -void generic_scheduler::assert_task_pool_valid() const { - acquire_task_pool(); - task** tp = my_arena_slot->task_pool_ptr; - __TBB_ASSERT( my_arena_slot->my_task_pool_size >= min_task_pool_size, NULL ); - const size_t H = __TBB_load_relaxed(my_arena_slot->head); // mirror - const size_t T = __TBB_load_relaxed(my_arena_slot->tail); // mirror - __TBB_ASSERT( H <= T, NULL ); - for ( size_t i = 0; i < H; ++i ) - __TBB_ASSERT( tp[i] == poisoned_ptr, "Task pool corrupted" ); - for ( size_t i = H; i < T; ++i ) { - __TBB_ASSERT( (uintptr_t)tp[i] + 1 > 1u, "nil or invalid task pointer in the deque" ); - __TBB_ASSERT( tp[i]->prefix().state == task::ready || - tp[i]->prefix().extra_state == es_task_proxy, "task in the deque has invalid state" ); - } - for ( size_t i = T; i < my_arena_slot->my_task_pool_size; ++i ) - __TBB_ASSERT( tp[i] == poisoned_ptr, "Task pool corrupted" ); - release_task_pool(); -} -#endif /* TBB_USE_ASSERT > 1 */ - -void generic_scheduler::init_stack_info () { - // Stacks are growing top-down. Highest address is called "stack base", - // and the lowest is "stack limit". - __TBB_ASSERT( !my_stealing_threshold, "Stealing threshold has already been calculated" ); - size_t stack_size = my_market->worker_stack_size(); -#if USE_WINTHREAD -#if defined(_MSC_VER)&&_MSC_VER<1400 && !_WIN64 - NT_TIB *pteb = (NT_TIB*)__TBB_machine_get_current_teb(); -#else - NT_TIB *pteb = (NT_TIB*)NtCurrentTeb(); -#endif - __TBB_ASSERT( &pteb < pteb->StackBase && &pteb > pteb->StackLimit, "invalid stack info in TEB" ); - __TBB_ASSERT( stack_size >0, "stack_size not initialized?" ); - // When a thread is created with the attribute STACK_SIZE_PARAM_IS_A_RESERVATION, stack limit - // in the TIB points to the committed part of the stack only. This renders the expression - // "(uintptr_t)pteb->StackBase / 2 + (uintptr_t)pteb->StackLimit / 2" virtually useless. - // Thus for worker threads we use the explicit stack size we used while creating them. - // And for master threads we rely on the following fact and assumption: - // - the default stack size of a master thread on Windows is 1M; - // - if it was explicitly set by the application it is at least as large as the size of a worker stack. - if ( is_worker() || stack_size < MByte ) - my_stealing_threshold = (uintptr_t)pteb->StackBase - stack_size / 2; - else - my_stealing_threshold = (uintptr_t)pteb->StackBase - MByte / 2; -#else /* USE_PTHREAD */ - // There is no portable way to get stack base address in Posix, so we use - // non-portable method (on all modern Linux) or the simplified approach - // based on the common sense assumptions. The most important assumption - // is that the main thread's stack size is not less than that of other threads. - // See also comment 3 at the end of this file - void *stack_base = &stack_size; -#if __linux__ && !__bg__ -#if __TBB_ipf - void *rsb_base = __TBB_get_bsp(); -#endif - size_t np_stack_size = 0; - void *stack_limit = NULL; - pthread_attr_t np_attr_stack; - if( 0 == pthread_getattr_np(pthread_self(), &np_attr_stack) ) { - if ( 0 == pthread_attr_getstack(&np_attr_stack, &stack_limit, &np_stack_size) ) { -#if __TBB_ipf - pthread_attr_t attr_stack; - if ( 0 == pthread_attr_init(&attr_stack) ) { - if ( 0 == pthread_attr_getstacksize(&attr_stack, &stack_size) ) { - if ( np_stack_size < stack_size ) { - // We are in a secondary thread. Use reliable data. - // IA-64 architecture stack is split into RSE backup and memory parts - rsb_base = stack_limit; - stack_size = np_stack_size/2; - // Limit of the memory part of the stack - stack_limit = (char*)stack_limit + stack_size; - } - // We are either in the main thread or this thread stack - // is bigger that that of the main one. As we cannot discern - // these cases we fall back to the default (heuristic) values. - } - pthread_attr_destroy(&attr_stack); - } - // IA-64 architecture stack is split into RSE backup and memory parts - my_rsb_stealing_threshold = (uintptr_t)((char*)rsb_base + stack_size/2); -#endif /* __TBB_ipf */ - // Size of the stack free part - stack_size = size_t((char*)stack_base - (char*)stack_limit); - } - pthread_attr_destroy(&np_attr_stack); - } -#endif /* __linux__ */ - __TBB_ASSERT( stack_size>0, "stack size must be positive" ); - my_stealing_threshold = (uintptr_t)((char*)stack_base - stack_size/2); -#endif /* USE_PTHREAD */ -} - -#if __TBB_TASK_GROUP_CONTEXT -/** The function uses synchronization scheme similar to the one in the destructor - of task_group_context augmented with interlocked state change of each context - object. The purpose of this algo is to prevent threads doing nonlocal context - destruction from accessing destroyed owner-scheduler instance still pointed to - by the context object. **/ -void generic_scheduler::cleanup_local_context_list () { - // Detach contexts remaining in the local list - bool wait_for_concurrent_destroyers_to_leave = false; - uintptr_t local_count_snapshot = my_context_state_propagation_epoch; - my_local_ctx_list_update.store<relaxed>(1); - { - // This is just a definition. Actual lock is acquired only in case of conflict. - spin_mutex::scoped_lock lock; - // Full fence prevents reordering of store to my_local_ctx_list_update with - // load from my_nonlocal_ctx_list_update. - atomic_fence(); - // Check for the conflict with concurrent destroyer or cancellation propagator - if ( my_nonlocal_ctx_list_update.load<relaxed>() || local_count_snapshot != the_context_state_propagation_epoch ) - lock.acquire(my_context_list_mutex); - // No acquire fence is necessary for loading my_context_list_head.my_next, - // as the list can be updated by this thread only. - context_list_node_t *node = my_context_list_head.my_next; - while ( node != &my_context_list_head ) { - task_group_context &ctx = __TBB_get_object_ref(task_group_context, my_node, node); - __TBB_ASSERT( __TBB_load_relaxed(ctx.my_kind) != task_group_context::binding_required, "Only a context bound to a root task can be detached" ); - node = node->my_next; - __TBB_ASSERT( is_alive(ctx.my_version_and_traits), "Walked into a destroyed context while detaching contexts from the local list" ); - // Synchronizes with ~task_group_context(). TODO: evaluate and perhaps relax - if ( internal::as_atomic(ctx.my_kind).fetch_and_store(task_group_context::detached) == task_group_context::dying ) - wait_for_concurrent_destroyers_to_leave = true; - } - } - my_local_ctx_list_update.store<release>(0); - // Wait until other threads referencing this scheduler object finish with it - if ( wait_for_concurrent_destroyers_to_leave ) - spin_wait_until_eq( my_nonlocal_ctx_list_update, 0u ); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -void generic_scheduler::free_scheduler() { - __TBB_ASSERT( !my_arena_slot, NULL ); -#if __TBB_TASK_GROUP_CONTEXT - cleanup_local_context_list(); -#endif /* __TBB_TASK_GROUP_CONTEXT */ - free_task<small_local_task>( *my_dummy_task ); - -#if __TBB_HOARD_NONLOCAL_TASKS - while( task* t = my_nonlocal_free_list ) { - task_prefix& p = t->prefix(); - my_nonlocal_free_list = p.next; - __TBB_ASSERT( p.origin && p.origin!=this, NULL ); - free_nonlocal_small_task(*t); - } -#endif - // k accounts for a guard reference and each task that we deallocate. - intptr_t k = 1; - for(;;) { - while( task* t = my_free_list ) { - my_free_list = t->prefix().next; - deallocate_task(*t); - ++k; - } - if( my_return_list==plugged_return_list() ) - break; - my_free_list = (task*)__TBB_FetchAndStoreW( &my_return_list, (intptr_t)plugged_return_list() ); - } -#if __TBB_COUNT_TASK_NODES - my_market->update_task_node_count( my_task_node_count ); -#endif /* __TBB_COUNT_TASK_NODES */ - // Update my_small_task_count last. Doing so sooner might cause another thread to free *this. - __TBB_ASSERT( my_small_task_count>=k, "my_small_task_count corrupted" ); - governor::sign_off(this); - if( __TBB_FetchAndAddW( &my_small_task_count, -k )==k ) - NFS_Free( this ); -} - -task& generic_scheduler::allocate_task( size_t number_of_bytes, - __TBB_CONTEXT_ARG(task* parent, task_group_context* context) ) { - GATHER_STATISTIC(++my_counters.active_tasks); - task *t; - if( number_of_bytes<=quick_task_size ) { -#if __TBB_HOARD_NONLOCAL_TASKS - if( (t = my_nonlocal_free_list) ) { - GATHER_STATISTIC(--my_counters.free_list_length); - __TBB_ASSERT( t->state()==task::freed, "free list of tasks is corrupted" ); - my_nonlocal_free_list = t->prefix().next; - } else -#endif - if( (t = my_free_list) ) { - GATHER_STATISTIC(--my_counters.free_list_length); - __TBB_ASSERT( t->state()==task::freed, "free list of tasks is corrupted" ); - my_free_list = t->prefix().next; - } else if( my_return_list ) { - // No fence required for read of my_return_list above, because __TBB_FetchAndStoreW has a fence. - t = (task*)__TBB_FetchAndStoreW( &my_return_list, 0 ); // with acquire - __TBB_ASSERT( t, "another thread emptied the my_return_list" ); - __TBB_ASSERT( t->prefix().origin==this, "task returned to wrong my_return_list" ); - ITT_NOTIFY( sync_acquired, &my_return_list ); - my_free_list = t->prefix().next; - } else { - t = (task*)((char*)NFS_Allocate( 1, task_prefix_reservation_size+quick_task_size, NULL ) + task_prefix_reservation_size ); -#if __TBB_COUNT_TASK_NODES - ++my_task_node_count; -#endif /* __TBB_COUNT_TASK_NODES */ - t->prefix().origin = this; - t->prefix().next = 0; - ++my_small_task_count; - } -#if __TBB_PREFETCHING - task *t_next = t->prefix().next; - if( !t_next ) { // the task was last in the list -#if __TBB_HOARD_NONLOCAL_TASKS - if( my_free_list ) - t_next = my_free_list; - else -#endif - if( my_return_list ) // enable prefetching, gives speedup - t_next = my_free_list = (task*)__TBB_FetchAndStoreW( &my_return_list, 0 ); - } - if( t_next ) { // gives speedup for both cache lines - __TBB_cl_prefetch(t_next); - __TBB_cl_prefetch(&t_next->prefix()); - } -#endif /* __TBB_PREFETCHING */ - } else { - GATHER_STATISTIC(++my_counters.big_tasks); - t = (task*)((char*)NFS_Allocate( 1, task_prefix_reservation_size+number_of_bytes, NULL ) + task_prefix_reservation_size ); -#if __TBB_COUNT_TASK_NODES - ++my_task_node_count; -#endif /* __TBB_COUNT_TASK_NODES */ - t->prefix().origin = NULL; - } - task_prefix& p = t->prefix(); -#if __TBB_TASK_GROUP_CONTEXT - p.context = context; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - // Obsolete. But still in use, so has to be assigned correct value here. - p.owner = this; - p.ref_count = 0; - // Obsolete. Assign some not outrageously out-of-place value for a while. - p.depth = 0; - p.parent = parent; - // In TBB 2.1 and later, the constructor for task sets extra_state to indicate the version of the tbb/task.h header. - // In TBB 2.0 and earlier, the constructor leaves extra_state as zero. - p.extra_state = 0; - p.affinity = 0; - p.state = task::allocated; - return *t; -} - -void generic_scheduler::free_nonlocal_small_task( task& t ) { - __TBB_ASSERT( t.state()==task::freed, NULL ); - generic_scheduler& s = *static_cast<generic_scheduler*>(t.prefix().origin); - __TBB_ASSERT( &s!=this, NULL ); - for(;;) { - task* old = s.my_return_list; - if( old==plugged_return_list() ) - break; - // Atomically insert t at head of s.my_return_list - t.prefix().next = old; - ITT_NOTIFY( sync_releasing, &s.my_return_list ); - if( as_atomic(s.my_return_list).compare_and_swap(&t, old )==old ) { -#if __TBB_PREFETCHING - __TBB_cl_evict(&t.prefix()); - __TBB_cl_evict(&t); -#endif - return; - } - } - deallocate_task(t); - if( __TBB_FetchAndDecrementWrelease( &s.my_small_task_count )==1 ) { - // We freed the last task allocated by scheduler s, so it's our responsibility - // to free the scheduler. - NFS_Free( &s ); - } -} - -size_t generic_scheduler::prepare_task_pool ( size_t num_tasks ) { - size_t T = __TBB_load_relaxed(my_arena_slot->tail); // mirror - if ( T + num_tasks <= my_arena_slot->my_task_pool_size ) - return T; - acquire_task_pool(); - size_t H = __TBB_load_relaxed(my_arena_slot->head); // mirror - T -= H; - size_t new_size = T + num_tasks; - __TBB_ASSERT(!my_arena_slot->my_task_pool_size || my_arena_slot->my_task_pool_size >= min_task_pool_size, NULL); - if( !my_arena_slot->my_task_pool_size ) { - __TBB_ASSERT( !in_arena() && !my_arena_slot->task_pool_ptr, NULL ); - if( new_size < min_task_pool_size ) new_size = min_task_pool_size; - my_arena_slot->allocate_task_pool( new_size ); - } - // If the free space at the beginning of the task pool is too short, we - // are likely facing a pathological single-producer-multiple-consumers - // scenario, and thus it's better to expand the task pool - else if ( new_size <= my_arena_slot->my_task_pool_size - min_task_pool_size/4 ) { - // Relocate the busy part to the beginning of the deque - memmove( my_arena_slot->task_pool_ptr, my_arena_slot->task_pool_ptr + H, T * sizeof(task*) ); - my_arena_slot->fill_with_canary_pattern( T, my_arena_slot->tail ); - commit_relocated_tasks(T); - } - else { - // Grow task pool. As this operation is rare, and its cost is asymptotically - // amortizable, we can tolerate new task pool allocation done under the lock. - if ( new_size < 2 * my_arena_slot->my_task_pool_size ) - new_size = 2 * my_arena_slot->my_task_pool_size; - task** old_pool = my_arena_slot->task_pool_ptr; - my_arena_slot->allocate_task_pool( new_size ); // updates my_task_pool_size - __TBB_ASSERT( T <= my_arena_slot->my_task_pool_size, "new task pool is too short" ); - memcpy( my_arena_slot->task_pool_ptr, old_pool + H, T * sizeof(task*) ); - commit_relocated_tasks(T); - __TBB_ASSERT( old_pool, "attempt to free NULL TaskPool" ); - NFS_Free( old_pool ); - } - assert_task_pool_valid(); - return T; -} - -/** ATTENTION: - This method is mostly the same as generic_scheduler::lock_task_pool(), with - a little different logic of slot state checks (slot is either locked or points - to our task pool). - Thus if either of them is changed, consider changing the counterpart as well. **/ -inline void generic_scheduler::acquire_task_pool() const { - if ( !in_arena() ) - return; // we are not in arena - nothing to lock - bool sync_prepare_done = false; - for( atomic_backoff b;;b.pause() ) { -#if TBB_USE_ASSERT - __TBB_ASSERT( my_arena_slot == my_arena->my_slots + my_arena_index, "invalid arena slot index" ); - // Local copy of the arena slot task pool pointer is necessary for the next - // assertion to work correctly to exclude asynchronous state transition effect. - task** tp = my_arena_slot->task_pool; - __TBB_ASSERT( tp == LockedTaskPool || tp == my_arena_slot->task_pool_ptr, "slot ownership corrupt?" ); -#endif - if( my_arena_slot->task_pool != LockedTaskPool && - as_atomic(my_arena_slot->task_pool).compare_and_swap(LockedTaskPool, my_arena_slot->task_pool_ptr ) == my_arena_slot->task_pool_ptr ) - { - // We acquired our own slot - ITT_NOTIFY(sync_acquired, my_arena_slot); - break; - } - else if( !sync_prepare_done ) { - // Start waiting - ITT_NOTIFY(sync_prepare, my_arena_slot); - sync_prepare_done = true; - } - // Someone else acquired a lock, so pause and do exponential backoff. - } - __TBB_ASSERT( my_arena_slot->task_pool == LockedTaskPool, "not really acquired task pool" ); -} // generic_scheduler::acquire_task_pool - -inline void generic_scheduler::release_task_pool() const { - if ( !in_arena() ) - return; // we are not in arena - nothing to unlock - __TBB_ASSERT( my_arena_slot, "we are not in arena" ); - __TBB_ASSERT( my_arena_slot->task_pool == LockedTaskPool, "arena slot is not locked" ); - ITT_NOTIFY(sync_releasing, my_arena_slot); - __TBB_store_with_release( my_arena_slot->task_pool, my_arena_slot->task_pool_ptr ); -} - -/** ATTENTION: - This method is mostly the same as generic_scheduler::acquire_task_pool(), - with a little different logic of slot state checks (slot can be empty, locked - or point to any task pool other than ours, and asynchronous transitions between - all these states are possible). - Thus if any of them is changed, consider changing the counterpart as well **/ -inline task** generic_scheduler::lock_task_pool( arena_slot* victim_arena_slot ) const { - task** victim_task_pool; - bool sync_prepare_done = false; - for( atomic_backoff backoff;; /*backoff pause embedded in the loop*/) { - victim_task_pool = victim_arena_slot->task_pool; - // NOTE: Do not use comparison of head and tail indices to check for - // the presence of work in the victim's task pool, as they may give - // incorrect indication because of task pool relocations and resizes. - if ( victim_task_pool == EmptyTaskPool ) { - // The victim thread emptied its task pool - nothing to lock - if( sync_prepare_done ) - ITT_NOTIFY(sync_cancel, victim_arena_slot); - break; - } - if( victim_task_pool != LockedTaskPool && - as_atomic(victim_arena_slot->task_pool).compare_and_swap(LockedTaskPool, victim_task_pool ) == victim_task_pool ) - { - // We've locked victim's task pool - ITT_NOTIFY(sync_acquired, victim_arena_slot); - break; - } - else if( !sync_prepare_done ) { - // Start waiting - ITT_NOTIFY(sync_prepare, victim_arena_slot); - sync_prepare_done = true; - } - GATHER_STATISTIC( ++my_counters.thieves_conflicts ); - // Someone else acquired a lock, so pause and do exponential backoff. -#if __TBB_STEALING_ABORT_ON_CONTENTION - if(!backoff.bounded_pause()) { - // the 16 was acquired empirically and a theory behind it supposes - // that number of threads becomes much bigger than number of - // tasks which can be spawned by one thread causing excessive contention. - // TODO: However even small arenas can benefit from the abort on contention - // if preemption of a thief is a problem - if(my_arena->my_limit >= 16) - return EmptyTaskPool; - __TBB_Yield(); - } -#else - backoff.pause(); -#endif - } - __TBB_ASSERT( victim_task_pool == EmptyTaskPool || - (victim_arena_slot->task_pool == LockedTaskPool && victim_task_pool != LockedTaskPool), - "not really locked victim's task pool?" ); - return victim_task_pool; -} // generic_scheduler::lock_task_pool - -inline void generic_scheduler::unlock_task_pool( arena_slot* victim_arena_slot, - task** victim_task_pool ) const { - __TBB_ASSERT( victim_arena_slot, "empty victim arena slot pointer" ); - __TBB_ASSERT( victim_arena_slot->task_pool == LockedTaskPool, "victim arena slot is not locked" ); - ITT_NOTIFY(sync_releasing, victim_arena_slot); - __TBB_store_with_release( victim_arena_slot->task_pool, victim_task_pool ); -} - - -inline task* generic_scheduler::prepare_for_spawning( task* t ) { - __TBB_ASSERT( t->state()==task::allocated, "attempt to spawn task that is not in 'allocated' state" ); - t->prefix().state = task::ready; -#if TBB_USE_ASSERT - if( task* parent = t->parent() ) { - internal::reference_count ref_count = parent->prefix().ref_count; - __TBB_ASSERT( ref_count>=0, "attempt to spawn task whose parent has a ref_count<0" ); - __TBB_ASSERT( ref_count!=0, "attempt to spawn task whose parent has a ref_count==0 (forgot to set_ref_count?)" ); - parent->prefix().extra_state |= es_ref_count_active; - } -#endif /* TBB_USE_ASSERT */ - affinity_id dst_thread = t->prefix().affinity; - __TBB_ASSERT( dst_thread == 0 || is_version_3_task(*t), - "backwards compatibility to TBB 2.0 tasks is broken" ); - if( dst_thread != 0 && dst_thread != my_affinity_id ) { - task_proxy& proxy = (task_proxy&)allocate_task( sizeof(task_proxy), - __TBB_CONTEXT_ARG(NULL, NULL) ); - // Mark as a proxy - proxy.prefix().extra_state = es_task_proxy; - proxy.outbox = &my_arena->mailbox(dst_thread); - // Mark proxy as present in both locations (sender's task pool and destination mailbox) - proxy.task_and_tag = intptr_t(t) | task_proxy::location_mask; -#if __TBB_TASK_PRIORITY - proxy.prefix().context = t->prefix().context; -#endif /* __TBB_TASK_PRIORITY */ - ITT_NOTIFY( sync_releasing, proxy.outbox ); - // Mail the proxy - after this point t may be destroyed by another thread at any moment. - proxy.outbox->push(proxy); - return &proxy; - } - return t; -} - -/** Conceptually, this method should be a member of class scheduler. - But doing so would force us to publish class scheduler in the headers. */ -void generic_scheduler::local_spawn( task& first, task*& next ) { - __TBB_ASSERT( governor::is_set(this), NULL ); - if ( &first.prefix().next == &next ) { - // Single task is being spawned - size_t T = prepare_task_pool( 1 ); - my_arena_slot->task_pool_ptr[T] = prepare_for_spawning( &first ); - commit_spawned_tasks( T + 1 ); - } - else { - // Task list is being spawned - task *arr[min_task_pool_size]; - fast_reverse_vector<task*> tasks(arr, min_task_pool_size); - task *t_next = NULL; - for( task* t = &first; ; t = t_next ) { - // If t is affinitized to another thread, it may already be executed - // and destroyed by the time prepare_for_spawning returns. - // So milk it while it is alive. - bool end = &t->prefix().next == &next; - t_next = t->prefix().next; - tasks.push_back( prepare_for_spawning(t) ); - if( end ) - break; - } - size_t num_tasks = tasks.size(); - size_t T = prepare_task_pool( num_tasks ); - tasks.copy_memory( my_arena_slot->task_pool_ptr + T ); - commit_spawned_tasks( T + num_tasks ); - } - if ( !in_arena() ) - enter_arena(); - my_arena->advertise_new_work</*Spawned=*/true>(); - assert_task_pool_valid(); -} - -void generic_scheduler::local_spawn_root_and_wait( task& first, task*& next ) { - __TBB_ASSERT( governor::is_set(this), NULL ); - __TBB_ASSERT( &first, NULL ); - auto_empty_task dummy( __TBB_CONTEXT_ARG(this, first.prefix().context) ); - internal::reference_count n = 0; - for( task* t=&first; ; t=t->prefix().next ) { - ++n; - __TBB_ASSERT( !t->prefix().parent, "not a root task, or already running" ); - t->prefix().parent = &dummy; - if( &t->prefix().next==&next ) break; -#if __TBB_TASK_GROUP_CONTEXT - __TBB_ASSERT( t->prefix().context == t->prefix().next->prefix().context, - "all the root tasks in list must share the same context"); -#endif /* __TBB_TASK_GROUP_CONTEXT */ - } - dummy.prefix().ref_count = n+1; - if( n>1 ) - local_spawn( *first.prefix().next, next ); - local_wait_for_all( dummy, &first ); -} - -void tbb::internal::generic_scheduler::spawn( task& first, task*& next ) { - governor::local_scheduler()->local_spawn( first, next ); -} - -void tbb::internal::generic_scheduler::spawn_root_and_wait( task& first, task*& next ) { - governor::local_scheduler()->local_spawn_root_and_wait( first, next ); -} - -void tbb::internal::generic_scheduler::enqueue( task& t, void* prio ) { - generic_scheduler *s = governor::local_scheduler(); - // these redirections are due to bw-compatibility, consider reworking some day - __TBB_ASSERT( s->my_arena, "thread is not in any arena" ); - s->my_arena->enqueue_task(t, (intptr_t)prio, s->my_random ); -} - -#if __TBB_TASK_PRIORITY -class auto_indicator : no_copy { - volatile bool& my_indicator; -public: - auto_indicator ( volatile bool& indicator ) : my_indicator(indicator) { my_indicator = true ;} - ~auto_indicator () { my_indicator = false; } -}; - -task* generic_scheduler::winnow_task_pool () { - GATHER_STATISTIC( ++my_counters.prio_winnowings ); - __TBB_ASSERT( in_arena(), NULL ); - __TBB_ASSERT( my_offloaded_tasks, "At least one task is expected to be already offloaded" ); - // To eliminate possible sinking of the store to the indicator below the subsequent - // store to my_arena_slot->tail, the stores should have either been separated - // by full fence or both use release fences. And resetting indicator should have - // been done with release fence. But since this is just an optimization, and - // the corresponding checking sequence in arena::is_out_of_work() is not atomic - // anyway, fences aren't used, so that not to penalize warmer path. - auto_indicator indicator(my_pool_reshuffling_pending); - // The purpose of the synchronization algorithm here is for the owner thread - // to avoid locking task pool most of the time. - size_t T0 = __TBB_load_relaxed(my_arena_slot->tail); - __TBB_store_relaxed( my_arena_slot->tail, __TBB_load_relaxed(my_arena_slot->head) - 1 ); - atomic_fence(); - size_t H = __TBB_load_relaxed(my_arena_slot->head); - size_t T = __TBB_load_relaxed(my_arena_slot->tail); - __TBB_ASSERT( (intptr_t)T <= (intptr_t)T0, NULL); - __TBB_ASSERT( (intptr_t)H >= (intptr_t)T || (H == T0 && T == T0), NULL ); - bool acquired = false; - if ( H == T ) { - // Either no contention with thieves during arbitration protocol execution or ... - if ( H >= T0 ) { - // ... the task pool got empty - reset_deque_and_leave_arena( /*locked=*/false ); - return NULL; - } - } - else { - // Contention with thieves detected. Now without taking lock it is impossible - // to define the current head value because of its jitter caused by continuing - // stealing attempts (the pool is not locked so far). - acquired = true; - acquire_task_pool(); - H = __TBB_load_relaxed(my_arena_slot->head); - if ( H >= T0 ) { - reset_deque_and_leave_arena( /*locked=*/true ); - return NULL; - } - } - size_t src, - dst = T0; - // Find the first task to offload. - for ( src = H; src < T0; ++src ) { - task &t = *my_arena_slot->task_pool_ptr[src]; - intptr_t p = priority(t); - if ( p < *my_ref_top_priority ) { - // Position of the first offloaded task will be the starting point - // for relocation of subsequent tasks that survive winnowing. - dst = src; - offload_task( t, p ); - break; - } - } - for ( ++src; src < T0; ++src ) { - task &t = *my_arena_slot->task_pool_ptr[src]; - intptr_t p = priority(t); - if ( p < *my_ref_top_priority ) - offload_task( t, p ); - else - my_arena_slot->task_pool_ptr[dst++] = &t; - } - __TBB_ASSERT( T0 >= dst, NULL ); - task *t = H < dst ? my_arena_slot->task_pool_ptr[--dst] : NULL; - if ( H == dst ) { - // No tasks remain the primary pool - reset_deque_and_leave_arena( acquired ); - } - else if ( acquired ) { - __TBB_ASSERT( !is_poisoned(my_arena_slot->task_pool_ptr[H]), NULL ); - __TBB_store_relaxed( my_arena_slot->tail, dst ); - release_task_pool(); - } - else { - __TBB_ASSERT( !is_poisoned(my_arena_slot->task_pool_ptr[H]), NULL ); - // Release fence is necessary to make sure possibly relocated task pointers - // become visible to potential thieves - __TBB_store_with_release( my_arena_slot->tail, dst ); - } - my_arena_slot->fill_with_canary_pattern( dst, T0 ); - assert_task_pool_valid(); - return t; -} - -task* generic_scheduler::reload_tasks ( task*& offloaded_tasks, task**& offloaded_task_list_link, intptr_t top_priority ) { - GATHER_STATISTIC( ++my_counters.prio_reloads ); - __TBB_ASSERT( !in_arena(), NULL ); - task *arr[min_task_pool_size]; - fast_reverse_vector<task*> tasks(arr, min_task_pool_size); - task **link = &offloaded_tasks; - task *t; - while ( (t = *link) ) { - task** next_ptr = &t->prefix().next_offloaded; - if ( priority(*t) >= top_priority ) { - tasks.push_back( t ); - // Note that owner is an alias of next_offloaded. Thus the following - // assignment overwrites *next_ptr - task* next = *next_ptr; - t->prefix().owner = this; - __TBB_ASSERT( t->prefix().state == task::ready || t->prefix().extra_state == es_task_proxy, NULL ); - *link = next; - } - else { - link = next_ptr; - } - } - if ( link == &offloaded_tasks ) { - offloaded_tasks = NULL; -#if TBB_USE_ASSERT - offloaded_task_list_link = NULL; -#endif /* TBB_USE_ASSERT */ - } - else { - __TBB_ASSERT( link, NULL ); - // Mark end of list - *link = NULL; - offloaded_task_list_link = link; - } - __TBB_ASSERT( link, NULL ); - size_t num_tasks = tasks.size(); - if ( num_tasks ) { - GATHER_STATISTIC( ++my_counters.prio_tasks_reloaded ); - size_t T = prepare_task_pool( num_tasks ); - tasks.copy_memory( my_arena_slot->task_pool_ptr + T ); - if ( --num_tasks ) { - commit_spawned_tasks( T += num_tasks ); - enter_arena(); - my_arena->advertise_new_work</*Spawned=*/true>(); - } - __TBB_ASSERT( T == __TBB_load_relaxed(my_arena_slot->tail), NULL ); - __TBB_ASSERT( T < my_arena_slot->my_task_pool_size, NULL ); - t = my_arena_slot->task_pool_ptr[T]; - poison_pointer(my_arena_slot->task_pool_ptr[T]); - assert_task_pool_valid(); - } - return t; -} - -task* generic_scheduler::reload_tasks () { - uintptr_t reload_epoch = *my_ref_reload_epoch; - __TBB_ASSERT( my_offloaded_tasks, NULL ); - __TBB_ASSERT( my_local_reload_epoch <= reload_epoch - || my_local_reload_epoch - reload_epoch > uintptr_t(-1)/2, - "Reload epoch counter overflow?" ); - if ( my_local_reload_epoch == reload_epoch ) - return NULL; - __TBB_ASSERT( my_offloaded_tasks, NULL ); - intptr_t top_priority = effective_reference_priority(); - __TBB_ASSERT( (uintptr_t)top_priority < (uintptr_t)num_priority_levels, NULL ); - task *t = reload_tasks( my_offloaded_tasks, my_offloaded_task_list_tail_link, top_priority ); - if ( my_offloaded_tasks && (my_arena->my_bottom_priority >= top_priority || !my_arena->my_num_workers_requested) ) { - // Safeguard against deliberately relaxed synchronization while checking - // for the presence of work in arena (so that not to impact hot paths). - // Arena may be reset to empty state when offloaded low priority tasks - // are still present. This results in both bottom and top priority bounds - // becoming 'normal', which makes offloaded low priority tasks unreachable. - // Update arena's bottom priority to accommodate them. - - // First indicate the presence of lower-priority tasks - my_market->update_arena_priority( *my_arena, priority(*my_offloaded_tasks) ); - // Then mark arena as full to unlock arena priority level adjustment - // by arena::is_out_of_work(), and ensure worker's presence - my_arena->advertise_new_work</*Spawned=*/false>(); - } - my_local_reload_epoch = reload_epoch; - return t; -} -#endif /* __TBB_TASK_PRIORITY */ - -inline task* generic_scheduler::get_task() { - __TBB_ASSERT( in_arena(), NULL ); - task* result = NULL; - size_t T = __TBB_load_relaxed(my_arena_slot->tail); // mirror -retry: - __TBB_store_relaxed(my_arena_slot->tail, --T); - atomic_fence(); - if ( (intptr_t)__TBB_load_relaxed(my_arena_slot->head) > (intptr_t)T ) { - acquire_task_pool(); - size_t H = __TBB_load_relaxed(my_arena_slot->head); // mirror - if ( (intptr_t)H <= (intptr_t)T ) { - // The thief backed off - grab the task - result = my_arena_slot->task_pool_ptr[T]; - __TBB_ASSERT( !is_poisoned(result), NULL ); - poison_pointer( my_arena_slot->task_pool_ptr[T] ); - } - else { - __TBB_ASSERT ( H == __TBB_load_relaxed(my_arena_slot->head) - && T == __TBB_load_relaxed(my_arena_slot->tail) - && H == T + 1, "victim/thief arbitration algorithm failure" ); - } - if ( (intptr_t)H < (intptr_t)T ) - release_task_pool(); - else - reset_deque_and_leave_arena( /*locked=*/true ); - } - else { - __TBB_control_consistency_helper(); // on my_arena_slot->head - result = my_arena_slot->task_pool_ptr[T]; - __TBB_ASSERT( !is_poisoned(result), NULL ); - poison_pointer( my_arena_slot->task_pool_ptr[T] ); - } - if( result && is_proxy(*result) ) { - task_proxy &tp = *(task_proxy*)result; - result = tp.extract_task<task_proxy::pool_bit>(); - if( !result ) { - // Proxy was empty, so it's our responsibility to free it - free_task<small_task>(tp); - if ( in_arena() ) - goto retry; - __TBB_ASSERT( is_quiescent_local_task_pool_reset(), NULL ); - return NULL; - } - GATHER_STATISTIC( ++my_counters.proxies_executed ); - // Following assertion should be true because TBB 2.0 tasks never specify affinity, and hence are not proxied. - __TBB_ASSERT( is_version_3_task(*result), "backwards compatibility with TBB 2.0 broken" ); - // Task affinity has changed. - my_innermost_running_task = result; - result->note_affinity(my_affinity_id); - } - __TBB_ASSERT( result || is_quiescent_local_task_pool_reset(), NULL ); - return result; -} // generic_scheduler::get_task - -task* generic_scheduler::steal_task( arena_slot& victim_slot ) { - task** victim_pool = lock_task_pool( &victim_slot ); - if ( !victim_pool ) - return NULL; - task* result = NULL; - size_t H = __TBB_load_relaxed(victim_slot.head); // mirror - const size_t H0 = H; - int skip_and_bump = 0; // +1 for skipped task and +1 for bumped head&tail -retry: - __TBB_store_relaxed( victim_slot.head, ++H ); - atomic_fence(); - if ( (intptr_t)H > (intptr_t)__TBB_load_relaxed(victim_slot.tail) ) { - // Stealing attempt failed, deque contents has not been changed by us - GATHER_STATISTIC( ++my_counters.thief_backoffs ); - __TBB_store_relaxed( victim_slot.head, /*dead: H = */ H0 ); - skip_and_bump++; // trigger that we bumped head and tail - __TBB_ASSERT ( !result, NULL ); - } - else { - __TBB_control_consistency_helper(); // on victim_slot.tail - result = victim_pool[H-1]; - __TBB_ASSERT( !is_poisoned(result), NULL ); - if( is_proxy(*result) ) { - task_proxy& tp = *static_cast<task_proxy*>(result); - // If mailed task is likely to be grabbed by its destination thread, skip it. - if ( task_proxy::is_shared(tp.task_and_tag) && tp.outbox->recipient_is_idle() ) - { - GATHER_STATISTIC( ++my_counters.proxies_bypassed ); - result = NULL; - __TBB_ASSERT( skip_and_bump < 2, NULL ); - skip_and_bump = 1; // note we skipped a task - goto retry; - } - } - __TBB_ASSERT( result, NULL ); - // emit "task was consumed" signal - ITT_NOTIFY(sync_acquired, (void*)((uintptr_t)&victim_slot+sizeof(uintptr_t))); - const size_t H1 = H0 + 1; - if ( H1 < H ) { - // Some proxies in the task pool have been bypassed. Need to close - // the hole left by the stolen task. The following variant: - // victim_pool[H-1] = victim_pool[H0]; - // is of constant time, but creates a potential for degrading stealing - // mechanism efficiency and growing owner's stack size too much because - // of moving earlier split off (and thus larger) chunks closer to owner's - // end of the deque (tail). - // So we use linear time variant that is likely to be amortized to be - // near-constant time, though, and preserves stealing efficiency premises. - // These changes in the deque must be released to the owner. - memmove( victim_pool + H1, victim_pool + H0, (H - H1) * sizeof(task*) ); - __TBB_store_with_release( victim_slot.head, /*dead: H = */ H1 ); - if ( (intptr_t)H >= (intptr_t)__TBB_load_relaxed(victim_slot.tail) ) - skip_and_bump++; // trigger that we bumped head and tail - } - poison_pointer( victim_pool[H0] ); - } - - unlock_task_pool( &victim_slot, victim_pool ); - __TBB_ASSERT( skip_and_bump <= 2, NULL ); -#if __TBB_PREFETCHING - __TBB_cl_evict(&victim_slot.head); - __TBB_cl_evict(&victim_slot.tail); -#endif - if( --skip_and_bump > 0 ) { // if both: task skipped and head&tail bumped - // Synchronize with snapshot as we bumped head and tail which can falsely trigger EMPTY state - atomic_fence(); - my_arena->advertise_new_work</*Spawned=*/true>(); - } - return result; -} - -task* generic_scheduler::get_mailbox_task() { - __TBB_ASSERT( my_affinity_id>0, "not in arena" ); - while ( task_proxy* const tp = my_inbox.pop() ) { - if ( task* result = tp->extract_task<task_proxy::mailbox_bit>() ) { - ITT_NOTIFY( sync_acquired, my_inbox.outbox() ); - result->prefix().extra_state |= es_task_is_stolen; - return result; - } - // We have exclusive access to the proxy, and can destroy it. - free_task<no_cache_small_task>(*tp); - } - return NULL; -} - -// TODO: Rename to publish_task_pool -void generic_scheduler::enter_arena() { - __TBB_ASSERT ( my_arena, "no arena: initialization not completed?" ); - __TBB_ASSERT ( my_arena_index < my_arena->my_num_slots, "arena slot index is out-of-bound" ); - __TBB_ASSERT ( my_arena_slot == &my_arena->my_slots[my_arena_index], NULL); - __TBB_ASSERT ( my_arena_slot->task_pool == EmptyTaskPool, "someone else grabbed my arena slot?" ); - __TBB_ASSERT ( __TBB_load_relaxed(my_arena_slot->head) < __TBB_load_relaxed(my_arena_slot->tail), - "entering arena without tasks to share" ); - // Release signal on behalf of previously spawned tasks (when this thread was not in arena yet) - ITT_NOTIFY(sync_releasing, my_arena_slot); - __TBB_store_with_release( my_arena_slot->task_pool, my_arena_slot->task_pool_ptr ); -} - -void generic_scheduler::leave_arena() { - __TBB_ASSERT( in_arena(), "Not in arena" ); - // Do not reset my_arena_index. It will be used to (attempt to) re-acquire the slot next time - __TBB_ASSERT( &my_arena->my_slots[my_arena_index] == my_arena_slot, "arena slot and slot index mismatch" ); - __TBB_ASSERT ( my_arena_slot->task_pool == LockedTaskPool, "Task pool must be locked when leaving arena" ); - __TBB_ASSERT ( is_quiescent_local_task_pool_empty(), "Cannot leave arena when the task pool is not empty" ); - ITT_NOTIFY(sync_releasing, &my_arena->my_slots[my_arena_index]); - // No release fence is necessary here as this assignment precludes external - // accesses to the local task pool when becomes visible. Thus it is harmless - // if it gets hoisted above preceding local bookkeeping manipulations. - __TBB_store_relaxed( my_arena_slot->task_pool, EmptyTaskPool ); -} - -generic_scheduler* generic_scheduler::create_worker( market& m, size_t index ) { - generic_scheduler* s = allocate_scheduler( NULL, index ); // index is not a real slot in arena -#if __TBB_TASK_GROUP_CONTEXT - s->my_dummy_task->prefix().context = &the_dummy_context; - // Sync up the local cancellation state with the global one. No need for fence here. - s->my_context_state_propagation_epoch = the_context_state_propagation_epoch; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - s->my_market = &m; - s->init_stack_info(); -#if __TBB_TASK_PRIORITY - s->my_ref_top_priority = &s->my_market->my_global_top_priority; - s->my_ref_reload_epoch = &s->my_market->my_global_reload_epoch; -#endif /* __TBB_TASK_PRIORITY */ - return s; -} - -// TODO: make it a member method -generic_scheduler* generic_scheduler::create_master( arena& a ) { - generic_scheduler* s = allocate_scheduler( &a, 0 /*Master thread always occupies the first slot*/ ); - task& t = *s->my_dummy_task; - s->my_innermost_running_task = &t; - s->my_dispatching_task = &t; - t.prefix().ref_count = 1; - governor::sign_on(s); - __TBB_ASSERT( &task::self()==&t, "governor::sign_on failed?" ); -#if __TBB_TASK_GROUP_CONTEXT - // Context to be used by root tasks by default (if the user has not specified one). - // Allocation is done by NFS allocator because we cannot reuse memory allocated - // for task objects since the free list is empty at the moment. - t.prefix().context = a.my_default_ctx; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - s->my_market = a.my_market; - __TBB_ASSERT( s->my_arena_index == 0, "Master thread must occupy the first slot in its arena" ); - s->attach_mailbox(1); - s->my_arena_slot = a.my_slots + 0; - s->my_arena_slot->my_scheduler = s; -#if _WIN32||_WIN64 - __TBB_ASSERT( s->my_market, NULL ); - s->my_market->register_master( s->master_exec_resource ); -#endif /* _WIN32||_WIN64 */ - s->init_stack_info(); -#if __TBB_TASK_GROUP_CONTEXT - // Sync up the local cancellation state with the global one. No need for fence here. - s->my_context_state_propagation_epoch = the_context_state_propagation_epoch; -#endif -#if __TBB_TASK_PRIORITY - // In the current implementation master threads continue processing even when - // there are other masters with higher priority. Only TBB worker threads are - // redistributed between arenas based on the latters' priority. Thus master - // threads use arena's top priority as a reference point (in contrast to workers - // that use my_market->my_global_top_priority). - s->my_ref_top_priority = &s->my_arena->my_top_priority; - s->my_ref_reload_epoch = &s->my_arena->my_reload_epoch; -#endif /* __TBB_TASK_PRIORITY */ -#if __TBB_SCHEDULER_OBSERVER - // Process any existing observers. - __TBB_ASSERT( a.my_observers.empty(), "Just created arena cannot have any observers associated with it" ); - the_global_observer_list.notify_entry_observers( s->my_last_global_observer, /*worker=*/false ); -#endif /* __TBB_SCHEDULER_OBSERVER */ - return s; -} - -void generic_scheduler::cleanup_worker( void* arg, bool worker ) { - generic_scheduler& s = *(generic_scheduler*)arg; - __TBB_ASSERT( !s.my_arena_slot, "cleaning up attached worker" ); -#if __TBB_SCHEDULER_OBSERVER - if ( worker ) // can be called by master for worker, do not notify master twice - the_global_observer_list.notify_exit_observers( s.my_last_global_observer, /*worker=*/true ); -#endif /* __TBB_SCHEDULER_OBSERVER */ - s.free_scheduler(); -} - -void generic_scheduler::cleanup_master() { - generic_scheduler& s = *this; // for similarity with cleanup_worker - __TBB_ASSERT( s.my_arena_slot, NULL); -#if __TBB_SCHEDULER_OBSERVER - s.my_arena->my_observers.notify_exit_observers( s.my_last_local_observer, /*worker=*/false ); - the_global_observer_list.notify_exit_observers( s.my_last_global_observer, /*worker=*/false ); -#endif /* __TBB_SCHEDULER_OBSERVER */ - if( in_arena() ) { - acquire_task_pool(); - if ( my_arena_slot->task_pool == EmptyTaskPool || - __TBB_load_relaxed(my_arena_slot->head) >= __TBB_load_relaxed(my_arena_slot->tail) ) - { - // Local task pool is empty - leave_arena(); - } - else { - // Master's local task pool may e.g. contain proxies of affinitized tasks. - release_task_pool(); - __TBB_ASSERT ( governor::is_set(this), "TLS slot is cleared before the task pool cleanup" ); - s.local_wait_for_all( *s.my_dummy_task, NULL ); - __TBB_ASSERT( !in_arena(), NULL ); - __TBB_ASSERT ( governor::is_set(this), "Other thread reused our TLS key during the task pool cleanup" ); - } - } - __TBB_ASSERT( s.my_market, NULL ); - market *my_market = s.my_market; -#if _WIN32||_WIN64 - s.my_market->unregister_master( s.master_exec_resource ); -#endif /* _WIN32||_WIN64 */ - arena* a = s.my_arena; - __TBB_ASSERT(a->my_slots+0 == my_arena_slot, NULL); -#if __TBB_STATISTICS - *my_arena_slot->my_counters += s.my_counters; -#endif /* __TBB_STATISTICS */ -#if __TBB_TASK_PRIORITY - __TBB_ASSERT( my_arena_slot->my_scheduler, NULL ); - // Master's scheduler may be locked by a worker taking arena snapshot or by - // a thread propagating task group state change across the context tree. - while ( as_atomic(my_arena_slot->my_scheduler).compare_and_swap(NULL, this) != this ) - __TBB_Yield(); - __TBB_ASSERT( !my_arena_slot->my_scheduler, NULL ); -#else /* !__TBB_TASK_PRIORITY */ - __TBB_store_with_release(my_arena_slot->my_scheduler, (generic_scheduler*)NULL); -#endif /* __TBB_TASK_PRIORITY */ - my_arena_slot = NULL; // detached from slot - s.free_scheduler(); - // Resetting arena to EMPTY state (as earlier TBB versions did) should not be - // done here (or anywhere else in the master thread to that matter) because - // after introducing arena-per-master logic and fire-and-forget tasks doing - // so can result either in arena's premature destruction (at least without - // additional costly checks in workers) or in unnecessary arena state changes - // (and ensuing workers migration). -#if __TBB_STATISTICS_EARLY_DUMP - GATHER_STATISTIC( a->dump_arena_statistics() ); -#endif - if (governor::needsWaitWorkers()) - my_market->prepare_wait_workers(); - a->on_thread_leaving</*is_master*/true>(); - if (governor::needsWaitWorkers()) - my_market->wait_workers(); -} - -} // namespace internal -} // namespace tbb - -/* - Comments: - -1. The premise of the cancellation support implementation is that cancellations are - not part of the hot path of the program execution. Therefore all changes in its - implementation in order to reduce the overhead of the cancellation control flow - should be done only in ways that do not increase overhead of the normal execution. - - In general contexts are used by all threads and their descendants are created in - different threads as well. In order to minimize impact of the cross-thread tree - maintenance (first of all because of the synchronization), the tree of contexts - is split into pieces, each of which is handled by the only thread. Such pieces - are represented as lists of contexts, members of which are contexts that were - bound to their parents in the given thread. - - The context tree maintenance and cancellation propagation algorithms is designed - in such a manner that cross-thread access to a context list will take place only - when cancellation signal is sent (by user or when an exception happens), and - synchronization is necessary only then. Thus the normal execution flow (without - exceptions and cancellation) remains free from any synchronization done on - behalf of exception handling and cancellation support. - -2. Consider parallel cancellations at the different levels of the context tree: - - Ctx1 <- Cancelled by Thread1 |- Thread2 started processing - | | - Ctx2 |- Thread1 started processing - | T1 |- Thread2 finishes and syncs up local counters - Ctx3 <- Cancelled by Thread2 | - | |- Ctx5 is bound to Ctx2 - Ctx4 | - T2 |- Thread1 reaches Ctx2 - - Thread-propagator of each cancellation increments global counter. However the thread - propagating the cancellation from the outermost context (Thread1) may be the last - to finish. Which means that the local counters may be synchronized earlier (by Thread2, - at Time1) than it propagated cancellation into Ctx2 (at time Time2). If a new context - (Ctx5) is created and bound to Ctx2 between Time1 and Time2, checking its parent only - (Ctx2) may result in cancellation request being lost. - - This issue is solved by doing the whole propagation under the lock. - - If we need more concurrency while processing parallel cancellations, we could try - the following modification of the propagation algorithm: - - advance global counter and remember it - for each thread: - scan thread's list of contexts - for each thread: - sync up its local counter only if the global counter has not been changed - - However this version of the algorithm requires more analysis and verification. - -3. There is no portable way to get stack base address in Posix, however the modern - Linux versions provide pthread_attr_np API that can be used to obtain thread's - stack size and base address. Unfortunately even this function does not provide - enough information for the main thread on IA-64 architecture (RSE spill area - and memory stack are allocated as two separate discontinuous chunks of memory), - and there is no portable way to discern the main and the secondary threads. - Thus for OS X* and IA-64 Linux architecture we use the TBB worker stack size for - all threads and use the current stack top as the stack base. This simplified - approach is based on the following assumptions: - 1) If the default stack size is insufficient for the user app needs, the - required amount will be explicitly specified by the user at the point of the - TBB scheduler initialization (as an argument to tbb::task_scheduler_init - constructor). - 2) When a master thread initializes the scheduler, it has enough space on its - stack. Here "enough" means "at least as much as worker threads have". - 3) If the user app strives to conserve the memory by cutting stack size, it - should do this for TBB workers too (as in the #1). -*/ diff --git a/src/tbb/src/tbb/scheduler.h b/src/tbb/src/tbb/scheduler.h deleted file mode 100644 index 7172d91c3..000000000 --- a/src/tbb/src/tbb/scheduler.h +++ /dev/null @@ -1,709 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef _TBB_scheduler_H -#define _TBB_scheduler_H - -#include "scheduler_common.h" -#include "tbb/spin_mutex.h" -#include "mailbox.h" -#include "tbb_misc.h" // for FastRandom -#include "itt_notify.h" -#include "../rml/include/rml_tbb.h" - -#if __TBB_SURVIVE_THREAD_SWITCH -#include "cilk-tbb-interop.h" -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ - -namespace tbb { -namespace internal { - -template<typename SchedulerTraits> class custom_scheduler; -struct nested_arena_context; - -//------------------------------------------------------------------------ -// generic_scheduler -//------------------------------------------------------------------------ - -#if __TBB_TASK_GROUP_CONTEXT -struct scheduler_list_node_t { - scheduler_list_node_t *my_prev, - *my_next; -}; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#define EmptyTaskPool ((task**)0) -#define LockedTaskPool ((task**)~(intptr_t)0) - -#define LockedMaster ((generic_scheduler*)~(intptr_t)0) - -struct scheduler_state { - //! Index of the arena slot the scheduler occupies now, or occupied last time. - size_t my_arena_index; // TODO: make it unsigned and pair with my_affinity_id to fit into cache line - - //! Pointer to the slot in the arena we own at the moment. - arena_slot* my_arena_slot; - - //! The arena that I own (if master) or am servicing at the moment (if worker) - arena* my_arena; - - //! Innermost task whose task::execute() is running. - task* my_innermost_running_task; - - //! Task, in the context of which the current TBB dispatch loop is running. - /** Outside of or in the outermost dispatch loop (not in a nested call to - wait_for_all) it is my_dummy_task for master threads, and NULL for workers. **/ - task* my_dispatching_task; - - mail_inbox my_inbox; - - //! The mailbox id assigned to this scheduler. - /** The id is assigned upon first entry into the arena. - TODO: how are id's being garbage collected? - TODO: master thread may enter arena and leave and then reenter. - We want to give it the same affinity_id upon reentry, if practical. - */ - affinity_id my_affinity_id; - -#if __TBB_SCHEDULER_OBSERVER - //! Last observer in the global observers list processed by this scheduler - observer_proxy* my_last_global_observer; - - //! Last observer in the local observers list processed by this scheduler - observer_proxy* my_last_local_observer; -#endif /* __TBB_SCHEDULER_OBSERVER */ -#if __TBB_TASK_PRIORITY - //! Latest known highest priority of tasks in the market or arena. - /** Master threads currently tracks only tasks in their arenas, while workers - take into account global top priority (among all arenas in the market). **/ - volatile intptr_t *my_ref_top_priority; - - //! Pointer to market's (for workers) or current arena's (for the master) reload epoch counter. - volatile uintptr_t *my_ref_reload_epoch; -#endif /* __TBB_TASK_PRIORITY */ -}; - -//! Work stealing task scheduler. -/** None of the fields here are ever read or written by threads other than - the thread that creates the instance. - - Class generic_scheduler is an abstract base class that contains most of the scheduler, - except for tweaks specific to processors and tools (e.g. VTune). - The derived template class custom_scheduler<SchedulerTraits> fills in the tweaks. */ -class generic_scheduler: public scheduler, public ::rml::job, public scheduler_state { -public: // almost every class in TBB uses generic_scheduler - - //! If sizeof(task) is <=quick_task_size, it is handled on a free list instead of malloc'd. - static const size_t quick_task_size = 256-task_prefix_reservation_size; - - static bool is_version_3_task( task& t ) { - return (t.prefix().extra_state & 0x0F)>=0x1; - } - - //! Position in the call stack specifying its maximal filling when stealing is still allowed - uintptr_t my_stealing_threshold; -#if __TBB_ipf - //! Position in the RSE backup area specifying its maximal filling when stealing is still allowed - uintptr_t my_rsb_stealing_threshold; -#endif - - static const size_t null_arena_index = ~size_t(0); - - // TODO: Rename into is_task_pool_published() - inline bool in_arena () const; - - inline bool is_local_task_pool_quiescent () const; - - inline bool is_quiescent_local_task_pool_empty () const; - - inline bool is_quiescent_local_task_pool_reset () const; - - //! The market I am in - market* my_market; - - //! Random number generator used for picking a random victim from which to steal. - FastRandom my_random; - - //! Free list of small tasks that can be reused. - task* my_free_list; - -#if __TBB_HOARD_NONLOCAL_TASKS - //! Free list of small non-local tasks that should be returned or can be reused. - task* my_nonlocal_free_list; -#endif - //! Fake root task created by slave threads. - /** The task is used as the "parent" argument to method wait_for_all. */ - task* my_dummy_task; - - //! Reference count for scheduler - /** Number of task_scheduler_init objects that point to this scheduler */ - long my_ref_count; - - inline void attach_mailbox( affinity_id id ); - - /* A couple of bools can be located here because space is otherwise just padding after my_affinity_id. */ - - //! True if *this was created by automatic TBB initialization - bool my_auto_initialized; - -#if __TBB_COUNT_TASK_NODES - //! Net number of big task objects that have been allocated but not yet freed. - intptr_t my_task_node_count; -#endif /* __TBB_COUNT_TASK_NODES */ - - //! Sets up the data necessary for the stealing limiting heuristics - void init_stack_info (); - - //! Returns true if stealing is allowed - bool can_steal () { - int anchor; - // TODO IDEA: Add performance warning? -#if __TBB_ipf - return my_stealing_threshold < (uintptr_t)&anchor && (uintptr_t)__TBB_get_bsp() < my_rsb_stealing_threshold; -#else - return my_stealing_threshold < (uintptr_t)&anchor; -#endif - } - - //! Actions common to enter_arena and try_enter_arena - void do_enter_arena(); - - //! Used by workers to enter the arena - /** Does not lock the task pool in case if arena slot has been successfully grabbed. **/ - void enter_arena(); - - //! Leave the arena - /** Leaving arena automatically releases the task pool if it is locked. **/ - void leave_arena(); - - //! Resets head and tail indices to 0, and leaves arena - /** Argument specifies whether the task pool is currently locked by the owner - (via acquire_task_pool).**/ - inline void reset_deque_and_leave_arena ( bool locked ); - - //! Locks victim's task pool, and returns pointer to it. The pointer can be NULL. - /** Garbles victim_arena_slot->task_pool for the duration of the lock. **/ - task** lock_task_pool( arena_slot* victim_arena_slot ) const; - - //! Unlocks victim's task pool - /** Restores victim_arena_slot->task_pool munged by lock_task_pool. **/ - void unlock_task_pool( arena_slot* victim_arena_slot, task** victim_task_pool ) const; - - //! Locks the local task pool - /** Garbles my_arena_slot->task_pool for the duration of the lock. Requires - correctly set my_arena_slot->task_pool_ptr. **/ - void acquire_task_pool() const; - - //! Unlocks the local task pool - /** Restores my_arena_slot->task_pool munged by acquire_task_pool. Requires - correctly set my_arena_slot->task_pool_ptr. **/ - void release_task_pool() const; - - //! Checks if t is affinitized to another thread, and if so, bundles it as proxy. - /** Returns either t or proxy containing t. **/ - task* prepare_for_spawning( task* t ); - - //! Makes newly spawned tasks visible to thieves - inline void commit_spawned_tasks( size_t new_tail ); - - //! Makes relocated tasks visible to thieves and releases the local task pool. - /** Obviously, the task pool must be locked when calling this method. **/ - inline void commit_relocated_tasks( size_t new_tail ); - - //! Get a task from the local pool. - /** Called only by the pool owner. - Returns the pointer to the task or NULL if the pool is empty. - In the latter case compacts the pool. **/ - task* get_task(); - - //! Attempt to get a task from the mailbox. - /** Gets a task only if it has not been executed by its sender or a thief - that has stolen it from the sender's task pool. Otherwise returns NULL. - - This method is intended to be used only by the thread extracting the proxy - from its mailbox. (In contrast to local task pool, mailbox can be read only - by its owner). **/ - task* get_mailbox_task(); - - //! True if t is a task_proxy - static bool is_proxy( const task& t ) { - return t.prefix().extra_state==es_task_proxy; - } - - //! Steal task from another scheduler's ready pool. - task* steal_task( arena_slot& victim_arena_slot ); - - /** Initial size of the task deque sufficient to serve without reallocation - 4 nested parallel_for calls with iteration space of 65535 grains each. **/ - static const size_t min_task_pool_size = 64; - - //! Makes sure that the task pool can accommodate at least n more elements - /** If necessary relocates existing task pointers or grows the ready task deque. - Returns (possible updated) tail index (not accounting for n). **/ - size_t prepare_task_pool( size_t n ); - - //! Initialize a scheduler for a master thread. - static generic_scheduler* create_master( arena& a ); - - //! Perform necessary cleanup when a master thread stops using TBB. - void cleanup_master(); - - //! Initialize a scheduler for a worker thread. - static generic_scheduler* create_worker( market& m, size_t index ); - - //! Perform necessary cleanup when a worker thread finishes. - static void cleanup_worker( void* arg, bool worker ); - -protected: - template<typename SchedulerTraits> friend class custom_scheduler; - generic_scheduler( arena*, size_t index ); - -public: -#if TBB_USE_ASSERT > 1 - //! Check that internal data structures are in consistent state. - /** Raises __TBB_ASSERT failure if inconsistency is found. */ - void assert_task_pool_valid () const; -#else - void assert_task_pool_valid() const {} -#endif /* TBB_USE_ASSERT <= 1 */ - -#if __TBB_TASK_ARENA - void nested_arena_entry(arena*, nested_arena_context &, bool); - void nested_arena_exit(nested_arena_context &); - void wait_until_empty(); -#endif - - /*override*/ - void spawn( task& first, task*& next ); - - /*override*/ - void spawn_root_and_wait( task& first, task*& next ); - - /*override*/ - void enqueue( task&, void* reserved ); - - void local_spawn( task& first, task*& next ); - void local_spawn_root_and_wait( task& first, task*& next ); - virtual void local_wait_for_all( task& parent, task* child ) = 0; - - //! Destroy and deallocate this scheduler object - void free_scheduler(); - - //! Allocate task object, either from the heap or a free list. - /** Returns uninitialized task object with initialized prefix. */ - task& allocate_task( size_t number_of_bytes, - __TBB_CONTEXT_ARG(task* parent, task_group_context* context) ); - - //! Put task on free list. - /** Does not call destructor. */ - template<free_task_hint h> - void free_task( task& t ); - - //! Return task object to the memory allocator. - inline void deallocate_task( task& t ); - - //! True if running on a worker thread, false otherwise. - inline bool is_worker(); - - //! True if the scheduler is on the outermost dispatch level in a master thread. - /** Returns true when this scheduler instance is associated with an application - thread, and is not executing any TBB task. This includes being in a TBB - dispatch loop (one of wait_for_all methods) invoked directly from that thread. **/ - inline bool master_outermost_level () const; - - //! True if the scheduler is on the outermost dispatch level in a worker thread. - inline bool worker_outermost_level () const; - -#if __TBB_TASK_GROUP_CONTEXT - //! Returns task group context used by this scheduler instance. - /** This context is associated with root tasks created by a master thread - without explicitly specified context object outside of any running task. - - Note that the default context of a worker thread is never accessed by - user code (directly or indirectly). **/ - inline task_group_context* default_context (); -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - //! Returns number of worker threads in the arena this thread belongs to. - unsigned number_of_workers_in_my_arena(); - -#if __TBB_COUNT_TASK_NODES - intptr_t get_task_node_count( bool count_arena_workers = false ); -#endif /* __TBB_COUNT_TASK_NODES */ - - //! Special value used to mark my_return_list as not taking any more entries. - static task* plugged_return_list() {return (task*)(intptr_t)(-1);} - - //! Number of small tasks that have been allocated by this scheduler. - intptr_t my_small_task_count; - - //! List of small tasks that have been returned to this scheduler by other schedulers. - task* my_return_list; - - //! Try getting a task from other threads (via mailbox, stealing, FIFO queue, orphans adoption). - /** Returns obtained task or NULL if all attempts fail. */ - virtual task* receive_or_steal_task( __TBB_atomic reference_count& completion_ref_count, - bool return_if_no_work ) = 0; - - //! Free a small task t that that was allocated by a different scheduler - void free_nonlocal_small_task( task& t ); - -#if __TBB_TASK_GROUP_CONTEXT - //! Padding isolating thread-local members from members that can be written to by other threads. - char _padding1[NFS_MaxLineSize - sizeof(context_list_node_t)]; - - //! Head of the thread specific list of task group contexts. - context_list_node_t my_context_list_head; - - //! Mutex protecting access to the list of task group contexts. - // TODO: check whether it can be deadly preempted and replace by spinning/sleeping mutex - spin_mutex my_context_list_mutex; - - //! Last state propagation epoch known to this thread - /** Together with the_context_state_propagation_epoch constitute synchronization protocol - that keeps hot path of task group context construction destruction mostly - lock-free. - When local epoch equals the global one, the state of task group contexts - registered with this thread is consistent with that of the task group trees - they belong to. **/ - uintptr_t my_context_state_propagation_epoch; - - //! Flag indicating that a context is being destructed by its owner thread - /** Together with my_nonlocal_ctx_list_update constitute synchronization protocol - that keeps hot path of context destruction (by the owner thread) mostly - lock-free. **/ - tbb::atomic<uintptr_t> my_local_ctx_list_update; - -#if __TBB_TASK_PRIORITY - //! Returns reference priority used to decide whether a task should be offloaded. - inline intptr_t effective_reference_priority () const; - - // TODO: move into slots and fix is_out_of_work - //! Task pool for offloading tasks with priorities lower than the current top priority. - task* my_offloaded_tasks; - - //! Points to the last offloaded task in the my_offloaded_tasks list. - task** my_offloaded_task_list_tail_link; - - //! Indicator of how recently the offload area was checked for the presence of top priority tasks. - uintptr_t my_local_reload_epoch; - - //! Indicates that the pool is likely non-empty even if appears so from outside - volatile bool my_pool_reshuffling_pending; - - //! Searches offload area for top priority tasks and reloads found ones into primary task pool. - /** Returns one of the found tasks or NULL. **/ - task* reload_tasks (); - - task* reload_tasks ( task*& offloaded_tasks, task**& offloaded_task_list_link, intptr_t top_priority ); - - //! Moves tasks with priority below the top one from primary task pool into offload area. - /** Returns the next execution candidate task or NULL. **/ - task* winnow_task_pool (); - - //! Unconditionally moves the task into offload area. - inline void offload_task ( task& t, intptr_t task_priority ); -#endif /* __TBB_TASK_PRIORITY */ - - //! Detaches abandoned contexts - /** These contexts must be destroyed by other threads. **/ - void cleanup_local_context_list (); - - //! Finds all contexts registered by this scheduler affected by the state change - //! and propagates the new state to them. - template <typename T> - void propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ); - - // check consistency - static void assert_context_valid(const task_group_context *tgc) { - suppress_unused_warning(tgc); -#if TBB_USE_ASSERT - __TBB_ASSERT(tgc, NULL); - uintptr_t ctx = tgc->my_version_and_traits; - __TBB_ASSERT(is_alive(ctx), "referenced task_group_context was destroyed"); - static const char *msg = "task_group_context is invalid"; - __TBB_ASSERT(!(ctx&~(3|(7<<task_group_context::traits_offset))), msg); // the value fits known values of versions and traits - __TBB_ASSERT(tgc->my_kind < task_group_context::dying, msg); - __TBB_ASSERT(tgc->my_cancellation_requested == 0 || tgc->my_cancellation_requested == 1, msg); - __TBB_ASSERT(tgc->my_state < task_group_context::low_unused_state_bit, msg); - if(tgc->my_kind != task_group_context::isolated) { - __TBB_ASSERT(tgc->my_owner, msg); - __TBB_ASSERT(tgc->my_node.my_next && tgc->my_node.my_prev, msg); - } -#if __TBB_TASK_PRIORITY - assert_priority_valid(tgc->my_priority); -#endif - if(tgc->my_parent) -#if TBB_USE_ASSERT > 1 - assert_context_valid(tgc->my_parent); -#else - __TBB_ASSERT(is_alive(tgc->my_parent->my_version_and_traits), msg); -#endif -#endif - } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#if _WIN32||_WIN64 -private: - //! Handle returned by RML when registering a master with RML - ::rml::server::execution_resource_t master_exec_resource; -public: -#endif /* _WIN32||_WIN64 */ - -#if __TBB_TASK_GROUP_CONTEXT - //! Flag indicating that a context is being destructed by non-owner thread. - /** See also my_local_ctx_list_update. **/ - tbb::atomic<uintptr_t> my_nonlocal_ctx_list_update; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#if __TBB_SURVIVE_THREAD_SWITCH - __cilk_tbb_unwatch_thunk my_cilk_unwatch_thunk; -#if TBB_USE_ASSERT - //! State values used to check interface contract with cilkrts. - /** Names of cs_running...cs_freed derived from state machine diagram in cilk-tbb-interop.h */ - enum cilk_state_t { - cs_none=0xF000, // Start at nonzero value so that we can detect use of zeroed memory. - cs_running, - cs_limbo, - cs_freed - }; - cilk_state_t my_cilk_state; -#endif /* TBB_USE_ASSERT */ -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ - -#if __TBB_STATISTICS - //! Set of counters to track internal statistics on per thread basis - /** Placed at the end of the class definition to minimize the disturbance of - the core logic memory operations. **/ - mutable statistics_counters my_counters; -#endif /* __TBB_STATISTICS */ - -}; // class generic_scheduler - - -} // namespace internal -} // namespace tbb - -#include "arena.h" -#include "governor.h" - -namespace tbb { -namespace internal { - -inline bool generic_scheduler::in_arena () const { - __TBB_ASSERT(my_arena_slot, 0); - return my_arena_slot->task_pool != EmptyTaskPool; -} - -inline bool generic_scheduler::is_local_task_pool_quiescent () const { - __TBB_ASSERT(my_arena_slot, 0); - task** tp = my_arena_slot->task_pool; - return tp == EmptyTaskPool || tp == LockedTaskPool; -} - -inline bool generic_scheduler::is_quiescent_local_task_pool_empty () const { - __TBB_ASSERT( is_local_task_pool_quiescent(), "Task pool is not quiescent" ); - return __TBB_load_relaxed(my_arena_slot->head) == __TBB_load_relaxed(my_arena_slot->tail); -} - -inline bool generic_scheduler::is_quiescent_local_task_pool_reset () const { - __TBB_ASSERT( is_local_task_pool_quiescent(), "Task pool is not quiescent" ); - return __TBB_load_relaxed(my_arena_slot->head) == 0 && __TBB_load_relaxed(my_arena_slot->tail) == 0; -} - -inline bool generic_scheduler::master_outermost_level () const { - return my_dispatching_task == my_dummy_task; -} - -inline bool generic_scheduler::worker_outermost_level () const { - return !my_dispatching_task; -} - -#if __TBB_TASK_GROUP_CONTEXT -inline task_group_context* generic_scheduler::default_context () { - return my_dummy_task->prefix().context; -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -inline void generic_scheduler::attach_mailbox( affinity_id id ) { - __TBB_ASSERT(id>0,NULL); - my_inbox.attach( my_arena->mailbox(id) ); - my_affinity_id = id; -} - -inline bool generic_scheduler::is_worker() { - return my_arena_index != 0; //TODO: rework for multiple master -} - -inline unsigned generic_scheduler::number_of_workers_in_my_arena() { - return my_arena->my_max_num_workers; -} - -//! Return task object to the memory allocator. -inline void generic_scheduler::deallocate_task( task& t ) { -#if TBB_USE_ASSERT - task_prefix& p = t.prefix(); - p.state = 0xFF; - p.extra_state = 0xFF; - poison_pointer(p.next); -#endif /* TBB_USE_ASSERT */ - NFS_Free((char*)&t-task_prefix_reservation_size); -#if __TBB_COUNT_TASK_NODES - --my_task_node_count; -#endif /* __TBB_COUNT_TASK_NODES */ -} - -#if __TBB_COUNT_TASK_NODES -inline intptr_t generic_scheduler::get_task_node_count( bool count_arena_workers ) { - return my_task_node_count + (count_arena_workers? my_arena->workers_task_node_count(): 0); -} -#endif /* __TBB_COUNT_TASK_NODES */ - -inline void generic_scheduler::reset_deque_and_leave_arena ( bool locked ) { - if ( !locked ) - acquire_task_pool(); - __TBB_store_relaxed( my_arena_slot->tail, 0 ); - __TBB_store_relaxed( my_arena_slot->head, 0 ); - leave_arena(); -} - -//TODO: move to arena_slot -inline void generic_scheduler::commit_spawned_tasks( size_t new_tail ) { - __TBB_ASSERT ( new_tail <= my_arena_slot->my_task_pool_size, "task deque end was overwritten" ); - // emit "task was released" signal - ITT_NOTIFY(sync_releasing, (void*)((uintptr_t)my_arena_slot+sizeof(uintptr_t))); - // Release fence is necessary to make sure that previously stored task pointers - // are visible to thieves. - __TBB_store_with_release( my_arena_slot->tail, new_tail ); -} - -void generic_scheduler::commit_relocated_tasks ( size_t new_tail ) { - __TBB_ASSERT( is_local_task_pool_quiescent(), - "Task pool must be locked when calling commit_relocated_tasks()" ); - __TBB_store_relaxed( my_arena_slot->head, 0 ); - // Tail is updated last to minimize probability of a thread making arena - // snapshot being misguided into thinking that this task pool is empty. - __TBB_store_relaxed( my_arena_slot->tail, new_tail ); - release_task_pool(); -} - -template<free_task_hint hint> -void generic_scheduler::free_task( task& t ) { -#if __TBB_HOARD_NONLOCAL_TASKS - static const int h = hint&(~local_task); -#else - static const free_task_hint h = hint; -#endif - GATHER_STATISTIC(--my_counters.active_tasks); - task_prefix& p = t.prefix(); - // Verify that optimization hints are correct. - __TBB_ASSERT( h!=small_local_task || p.origin==this, NULL ); - __TBB_ASSERT( !(h&small_task) || p.origin, NULL ); - __TBB_ASSERT( !(h&local_task) || (!p.origin || uintptr_t(p.origin) > uintptr_t(4096)), "local_task means allocated"); - poison_value(p.depth); - poison_value(p.ref_count); - poison_pointer(p.owner); - __TBB_ASSERT( 1L<<t.state() & (1L<<task::executing|1L<<task::allocated), NULL ); - p.state = task::freed; - if( h==small_local_task || p.origin==this ) { - GATHER_STATISTIC(++my_counters.free_list_length); - p.next = my_free_list; - my_free_list = &t; - } else if( !(h&local_task) && p.origin && uintptr_t(p.origin) < uintptr_t(4096) ) { - // a special value reserved for future use, do nothing since - // origin is not pointing to a scheduler instance - } else if( !(h&local_task) && p.origin ) { - GATHER_STATISTIC(++my_counters.free_list_length); -#if __TBB_HOARD_NONLOCAL_TASKS - if( !(h&no_cache) ) { - p.next = my_nonlocal_free_list; - my_nonlocal_free_list = &t; - } else -#endif - free_nonlocal_small_task(t); - } else { - GATHER_STATISTIC(--my_counters.big_tasks); - deallocate_task(t); - } -} - -#if __TBB_TASK_PRIORITY -inline intptr_t generic_scheduler::effective_reference_priority () const { - // Workers on the outermost dispatch level (i.e. with empty stack) use market's - // priority as a reference point (to speedup discovering process level priority - // changes). But when there are enough workers to service (even if only partially) - // a lower priority arena, they should use arena's priority as a reference, lest - // be trapped in a futile spinning (because market's priority would prohibit - // executing ANY tasks in this arena). - return !worker_outermost_level() || - my_arena->my_num_workers_allotted < my_arena->num_workers_active() - ? *my_ref_top_priority : my_arena->my_top_priority; -} - -inline void generic_scheduler::offload_task ( task& t, intptr_t /*priority*/ ) { - GATHER_STATISTIC( ++my_counters.prio_tasks_offloaded ); - __TBB_ASSERT( my_offloaded_task_list_tail_link && !*my_offloaded_task_list_tail_link, NULL ); -#if TBB_USE_ASSERT - t.prefix().state = task::ready; -#endif /* TBB_USE_ASSERT */ - t.prefix().next_offloaded = my_offloaded_tasks; - my_offloaded_tasks = &t; -} -#endif /* __TBB_TASK_PRIORITY */ - -#if __TBB_FP_CONTEXT -class cpu_ctl_env_helper { - cpu_ctl_env guard_cpu_ctl_env; - cpu_ctl_env curr_cpu_ctl_env; -public: - cpu_ctl_env_helper() { - guard_cpu_ctl_env.get_env(); - curr_cpu_ctl_env = guard_cpu_ctl_env; - } - ~cpu_ctl_env_helper() { - if ( curr_cpu_ctl_env != guard_cpu_ctl_env ) - guard_cpu_ctl_env.set_env(); - } - void set_env( const task_group_context *ctx ) { - generic_scheduler::assert_context_valid(ctx); - const cpu_ctl_env &ctl = *punned_cast<cpu_ctl_env*>(&ctx->my_cpu_ctl_env); - if ( ctl != curr_cpu_ctl_env ) { - curr_cpu_ctl_env = ctl; - curr_cpu_ctl_env.set_env(); - } - } - void restore_default() { - if ( curr_cpu_ctl_env != guard_cpu_ctl_env ) { - guard_cpu_ctl_env.set_env(); - curr_cpu_ctl_env = guard_cpu_ctl_env; - } - } -}; -#else -struct cpu_ctl_env_helper { - void set_env( __TBB_CONTEXT_ARG1(task_group_context *) ) {} - void restore_default() {} -}; -#endif /* __TBB_FP_CONTEXT */ - -} // namespace internal -} // namespace tbb - -#endif /* _TBB_scheduler_H */ diff --git a/src/tbb/src/tbb/scheduler_common.h b/src/tbb/src/tbb/scheduler_common.h index bd4e03584..d689bf1d6 100644 --- a/src/tbb/src/tbb/scheduler_common.h +++ b/src/tbb/src/tbb/scheduler_common.h @@ -1,131 +1,299 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef _TBB_scheduler_common_H -#define _TBB_scheduler_common_H + Copyright (c) 2005-2024 Intel Corporation -#include "tbb/tbb_machine.h" -#include "tbb/cache_aligned_allocator.h" + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at -#include <string.h> // for memset, memcpy, memmove + http://www.apache.org/licenses/LICENSE-2.0 -#include "tbb_statistics.h" - -#if TBB_USE_ASSERT > 1 -#include <stdio.h> -#endif /* TBB_USE_ASSERT > 1 */ - -/* Temporarily change "private" to "public" while including "tbb/task.h". - This hack allows us to avoid publishing internal types and methods - in the public header files just for sake of friend declarations. */ -#ifndef private - #define private public - #define undef_private -#endif + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#include "tbb/task.h" -#include "tbb/tbb_exception.h" +#ifndef _TBB_scheduler_common_H +#define _TBB_scheduler_common_H -#ifdef undef_private - #undef private -#endif +#include "oneapi/tbb/detail/_utils.h" +#include "oneapi/tbb/detail/_template_helpers.h" +#include "oneapi/tbb/detail/_task.h" +#include "oneapi/tbb/detail/_machine.h" +#include "oneapi/tbb/task_group.h" +#include "oneapi/tbb/cache_aligned_allocator.h" +#include "oneapi/tbb/tbb_allocator.h" +#include "itt_notify.h" +#include "co_context.h" +#include "misc.h" +#include "governor.h" #ifndef __TBB_SCHEDULER_MUTEX_TYPE #define __TBB_SCHEDULER_MUTEX_TYPE tbb::spin_mutex #endif // TODO: add conditional inclusion based on specified type -#include "tbb/spin_mutex.h" - -// This macro is an attempt to get rid of ugly ifdefs in the shared parts of the code. -// It drops the second argument depending on whether the controlling macro is defined. -// The first argument is just a convenience allowing to keep comma before the macro usage. -#if __TBB_TASK_GROUP_CONTEXT - #define __TBB_CONTEXT_ARG1(context) context - #define __TBB_CONTEXT_ARG(arg1, context) arg1, context -#else /* !__TBB_TASK_GROUP_CONTEXT */ - #define __TBB_CONTEXT_ARG1(context) - #define __TBB_CONTEXT_ARG(arg1, context) arg1 -#endif /* !__TBB_TASK_GROUP_CONTEXT */ - -#if DO_TBB_TRACE -#include <cstdio> -#define TBB_TRACE(x) ((void)std::printf x) -#else -#define TBB_TRACE(x) ((void)(0)) -#endif /* DO_TBB_TRACE */ +#include "oneapi/tbb/spin_mutex.h" +#include "oneapi/tbb/mutex.h" -#if !__TBB_CPU_CTL_ENV_PRESENT -#include <fenv.h> +#if TBB_USE_ASSERT +#include <atomic> #endif +#include <cstdint> +#include <exception> +#include <memory> // unique_ptr +#include <unordered_map> + +//! Mutex type for global locks in the scheduler +using scheduler_mutex_type = __TBB_SCHEDULER_MUTEX_TYPE; + #if _MSC_VER && !defined(__INTEL_COMPILER) // Workaround for overzealous compiler warnings // These particular warnings are so ubiquitous that no attempt is made to narrow // the scope of the warnings. - #pragma warning (disable: 4100 4127 4312 4244 4267 4706) + // #pragma warning (disable: 4100 4127 4312 4244 4267 4706) #endif namespace tbb { -namespace interface7 { -namespace internal { -class task_arena_base; -class delegated_task; -class wait_task; -}} -namespace internal { -using namespace interface7::internal; +namespace detail { +namespace r1 { class arena; -template<typename SchedulerTraits> class custom_scheduler; -class generic_scheduler; -class governor; +class mail_inbox; class mail_outbox; class market; class observer_proxy; -class task_scheduler_observer_v3; -#if __TBB_TASK_PRIORITY -static const intptr_t num_priority_levels = 3; -static const intptr_t normalized_normal_priority = (num_priority_levels - 1) / 2; +enum task_stream_accessor_type { front_accessor = 0, back_nonnull_accessor }; +template<task_stream_accessor_type> class task_stream; -inline intptr_t normalize_priority ( priority_t p ) { - return intptr_t(p - priority_low) / priority_stride_v4; +using isolation_type = std::intptr_t; +constexpr isolation_type no_isolation = 0; + +struct cache_aligned_deleter { + template <typename T> + void operator() (T* ptr) const { + ptr->~T(); + cache_aligned_deallocate(ptr); + } +}; + +template <typename T> +using cache_aligned_unique_ptr = std::unique_ptr<T, cache_aligned_deleter>; + +template <typename T, typename ...Args> +cache_aligned_unique_ptr<T> make_cache_aligned_unique(Args&& ...args) { + return cache_aligned_unique_ptr<T>(new (cache_aligned_allocate(sizeof(T))) T(std::forward<Args>(args)...)); } -static const priority_t priority_from_normalized_rep[num_priority_levels] = { - priority_low, priority_normal, priority_high +//------------------------------------------------------------------------ +// Extended execute data +//------------------------------------------------------------------------ + +//! Execute data used on a task dispatcher side, reflects a current execution state +struct execution_data_ext : d1::execution_data { + task_dispatcher* task_disp{}; + isolation_type isolation{}; + d1::wait_context* wait_ctx{}; +}; + +//------------------------------------------------------------------------ +// Task accessor +//------------------------------------------------------------------------ + +//! Interpretation of reserved task fields inside a task dispatcher +struct task_accessor { + static constexpr std::uint64_t proxy_task_trait = 1; + static constexpr std::uint64_t resume_task_trait = 2; + static d1::task_group_context*& context(d1::task& t) { + task_group_context** tgc = reinterpret_cast<task_group_context**>(&t.m_reserved[0]); + return *tgc; + } + static isolation_type& isolation(d1::task& t) { + isolation_type* tag = reinterpret_cast<isolation_type*>(&t.m_reserved[2]); + return *tag; + } + static void set_proxy_trait(d1::task& t) { + // TODO: refactor proxy tasks not to work on uninitialized memory. + //__TBB_ASSERT((t.m_version_and_traits & proxy_task_trait) == 0, nullptr); + t.m_version_and_traits |= proxy_task_trait; + } + static bool is_proxy_task(d1::task& t) { + return (t.m_version_and_traits & proxy_task_trait) != 0; + } + static void set_resume_trait(d1::task& t) { + __TBB_ASSERT((t.m_version_and_traits & resume_task_trait) == 0, nullptr); + t.m_version_and_traits |= resume_task_trait; + } + static bool is_resume_task(d1::task& t) { + return (t.m_version_and_traits & resume_task_trait) != 0; + } +}; + +//------------------------------------------------------------------------ +//! Extended variant of the standard offsetof macro +/** The standard offsetof macro is not sufficient for TBB as it can be used for + POD-types only. The constant 0x1000 (not nullptr) is necessary to appease GCC. **/ +#define __TBB_offsetof(class_name, member_name) \ + ((ptrdiff_t)&(reinterpret_cast<class_name*>(0x1000)->member_name) - 0x1000) + +//! Returns address of the object containing a member with the given name and address +#define __TBB_get_object_ref(class_name, member_name, member_addr) \ + (*reinterpret_cast<class_name*>((char*)member_addr - __TBB_offsetof(class_name, member_name))) + +//! Helper class for tracking floating point context and task group context switches +/** Assuming presence of an itt collector, in addition to keeping track of floating + point context, this class emits itt events to indicate begin and end of task group + context execution **/ +template <bool report_tasks> +class context_guard_helper { + const d1::task_group_context* curr_ctx; + d1::cpu_ctl_env guard_cpu_ctl_env; + d1::cpu_ctl_env curr_cpu_ctl_env; +public: + context_guard_helper() : curr_ctx(nullptr) { + guard_cpu_ctl_env.get_env(); + curr_cpu_ctl_env = guard_cpu_ctl_env; + } + ~context_guard_helper() { + if (curr_cpu_ctl_env != guard_cpu_ctl_env) + guard_cpu_ctl_env.set_env(); + if (report_tasks && curr_ctx) + ITT_TASK_END; + } + // The function is called from bypass dispatch loop on the hot path. + // Consider performance issues when refactoring. + void set_ctx(const d1::task_group_context* ctx) { + if (!ctx) + return; + const d1::cpu_ctl_env* ctl = reinterpret_cast<const d1::cpu_ctl_env*>(&ctx->my_cpu_ctl_env); + // Compare the FPU settings directly because the context can be reused between parallel algorithms. + if (*ctl != curr_cpu_ctl_env) { + curr_cpu_ctl_env = *ctl; + curr_cpu_ctl_env.set_env(); + } + if (report_tasks && ctx != curr_ctx) { + // if task group context was active, report end of current execution frame. + if (curr_ctx) + ITT_TASK_END; + // reporting begin of new task group context execution frame. + // using address of task group context object to group tasks (parent). + // id of task execution frame is nullptr and reserved for future use. + ITT_TASK_BEGIN(ctx, ctx->my_name, nullptr); + curr_ctx = ctx; + } + } +#if _WIN64 + void restore_default() { + if (curr_cpu_ctl_env != guard_cpu_ctl_env) { + guard_cpu_ctl_env.set_env(); + curr_cpu_ctl_env = guard_cpu_ctl_env; + } + } +#endif // _WIN64 }; -inline void assert_priority_valid ( intptr_t p ) { - __TBB_ASSERT_EX( p >= 0 && p < num_priority_levels, NULL ); +#if (_WIN32 || _WIN64 || __unix__ || __APPLE__) && (__TBB_x86_32 || __TBB_x86_64) +#if _MSC_VER +#pragma intrinsic(__rdtsc) +#endif +inline std::uint64_t machine_time_stamp() { +#if __INTEL_COMPILER + return _rdtsc(); +#elif _MSC_VER + return __rdtsc(); +#else + std::uint32_t hi, lo; + __asm__ __volatile__("rdtsc" : "=d"(hi), "=a"(lo)); + return (std::uint64_t(hi) << 32) | lo; +#endif } -inline intptr_t& priority ( task& t ) { - return t.prefix().context->my_priority; +inline void prolonged_pause_impl() { + // Assumption based on practice: 1000-2000 ticks seems to be a suitable invariant for the + // majority of platforms. Currently, skip platforms that define __TBB_STEALING_PAUSE + // because these platforms require very careful tuning. + std::uint64_t prev = machine_time_stamp(); + const std::uint64_t finish = prev + 1000; + atomic_backoff backoff; + do { + backoff.bounded_pause(); + std::uint64_t curr = machine_time_stamp(); + if (curr <= prev) + // Possibly, the current logical thread is moved to another hardware thread or overflow is occurred. + break; + prev = curr; + } while (prev < finish); +} +#else +inline void prolonged_pause_impl() { +#ifdef __TBB_ipf + static const long PauseTime = 1500; +#else + static const long PauseTime = 80; +#endif + // TODO IDEA: Update PauseTime adaptively? + machine_pause(PauseTime); } -#endif /* __TBB_TASK_PRIORITY */ +#endif -//! Mutex type for global locks in the scheduler -typedef __TBB_SCHEDULER_MUTEX_TYPE scheduler_mutex_type; +inline void prolonged_pause() { +#if __TBB_WAITPKG_INTRINSICS_PRESENT + if (governor::wait_package_enabled()) { + std::uint64_t time_stamp = machine_time_stamp(); + // _tpause function directs the processor to enter an implementation-dependent optimized state + // until the Time Stamp Counter reaches or exceeds the value specified in second parameter. + // Constant "1000" is ticks to wait for. + // TODO : Modify this parameter based on empirical study of benchmarks. + // First parameter 0 selects between a lower power (cleared) or faster wakeup (set) optimized state. + _tpause(0, time_stamp + 1000); + } + else +#endif + prolonged_pause_impl(); +} -#if __TBB_TASK_GROUP_CONTEXT +// TODO: investigate possibility to work with number of CPU cycles +// because for different configurations this number of pauses + yields +// will be calculated in different amount of CPU cycles +// for example use rdtsc for it +class stealing_loop_backoff { + const int my_pause_threshold; + const int my_yield_threshold; + int my_pause_count; + int my_yield_count; +public: + // my_yield_threshold = 100 is an experimental value. Ideally, once we start calling __TBB_Yield(), + // the time spent spinning before calling out_of_work() should be approximately + // the time it takes for a thread to be woken up. Doing so would guarantee that we do + // no worse than 2x the optimal spin time. Or perhaps a time-slice quantum is the right amount. + stealing_loop_backoff(int num_workers, int yields_multiplier) + : my_pause_threshold{ 2 * (num_workers + 1) } + , my_yield_threshold{100 * yields_multiplier} + , my_pause_count{} + , my_yield_count{} + {} + bool pause() { + prolonged_pause(); + if (my_pause_count++ >= my_pause_threshold) { + my_pause_count = my_pause_threshold; + d0::yield(); + if (my_yield_count++ >= my_yield_threshold) { + my_yield_count = my_yield_threshold; + return true; + } + } + return false; + } + void reset_wait() { + my_pause_count = my_yield_count = 0; + } +}; + +//------------------------------------------------------------------------ +// Exception support +//------------------------------------------------------------------------ //! Task group state change propagation global epoch /** Together with generic_scheduler::my_context_state_propagation_epoch forms cross-thread signaling mechanism that allows to avoid locking at the hot path @@ -136,262 +304,308 @@ typedef __TBB_SCHEDULER_MUTEX_TYPE scheduler_mutex_type; and thus registration/deregistration routines take slower branch that may block (at most one thread of the pool can be blocked at any moment). Otherwise the control path is lock-free and fast. **/ -extern uintptr_t the_context_state_propagation_epoch; +extern std::atomic<std::uintptr_t> the_context_state_propagation_epoch; //! Mutex guarding state change propagation across task groups forest. /** Also protects modification of related data structures. **/ typedef scheduler_mutex_type context_state_propagation_mutex_type; extern context_state_propagation_mutex_type the_context_state_propagation_mutex; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -//! Alignment for a task object -const size_t task_alignment = 32; - -//! Number of bytes reserved for a task prefix -/** If not exactly sizeof(task_prefix), the extra bytes *precede* the task_prefix. */ -const size_t task_prefix_reservation_size = ((sizeof(internal::task_prefix)-1)/task_alignment+1)*task_alignment; - -//! Definitions for bits in task_prefix::extra_state -enum task_extra_state { - //! Tag for v1 tasks (i.e. tasks in TBB 1.0 and 2.0) - es_version_1_task = 0, - //! Tag for v3 tasks (i.e. tasks in TBB 2.1-2.2) - es_version_3_task = 1, - //! Tag for enqueued tasks - es_task_enqueued = 0x10, - //! Tag for v3 task_proxy. - es_task_proxy = 0x20, - //! Set if ref_count might be changed by another thread. Used for debugging. - es_ref_count_active = 0x40, - //! Set if the task has been stolen - es_task_is_stolen = 0x80 -}; -inline void reset_extra_state ( task *t ) { - t->prefix().extra_state &= ~(es_task_is_stolen | es_task_enqueued); -} +class tbb_exception_ptr { + std::exception_ptr my_ptr; +public: + static tbb_exception_ptr* allocate() noexcept; -//! Optimization hint to free_task that enables it omit unnecessary tests and code. -enum free_task_hint { - //! No hint - no_hint=0, - //! Task is known to have been allocated by this scheduler - local_task=1, - //! Task is known to be a small task. - /** Task should be returned to the free list of *some* scheduler, possibly not this scheduler. */ - small_task=2, - //! Bitwise-OR of local_task and small_task. - /** Task should be returned to free list of this scheduler. */ - small_local_task=3, - //! Disable caching for a small task. - no_cache = 4, - //! Task is known to be a small task and must not be cached. - no_cache_small_task = no_cache | small_task -}; + //! Destroys this objects + /** Note that objects of this type can be created only by the allocate() method. **/ + void destroy() noexcept; + + //! Throws the contained exception . + void throw_self(); + +private: + tbb_exception_ptr(const std::exception_ptr& src) : my_ptr(src) {} +}; // class tbb_exception_ptr //------------------------------------------------------------------------ // Debugging support //------------------------------------------------------------------------ #if TBB_USE_ASSERT +static const std::uintptr_t venom = tbb::detail::select_size_t_constant<0xDEADBEEFU, 0xDDEEAADDDEADBEEFULL>::value; -static const uintptr_t venom = tbb::internal::select_size_t_constant<0xDEADBEEFU,0xDDEEAADDDEADBEEFULL>::value; +inline void poison_value(std::uintptr_t& val) { val = venom; } -template <typename T> -void poison_value ( T& val ) { val = * punned_cast<T*>(&venom); } +inline void poison_value(std::atomic<std::uintptr_t>& val) { val.store(venom, std::memory_order_relaxed); } /** Expected to be used in assertions only, thus no empty form is defined. **/ -inline bool is_alive( uintptr_t v ) { return v != venom; } +inline bool is_alive(std::uintptr_t v) { return v != venom; } /** Logically, this method should be a member of class task. But we do not want to publish it, so it is here instead. */ -inline void assert_task_valid( const task& task ) { - __TBB_ASSERT( &task!=NULL, NULL ); - __TBB_ASSERT( !is_poisoned(&task), NULL ); - __TBB_ASSERT( (uintptr_t)&task % task_alignment == 0, "misaligned task" ); -#if __TBB_RECYCLE_TO_ENQUEUE - __TBB_ASSERT( (unsigned)task.state()<=(unsigned)task::to_enqueue, "corrupt task (invalid state)" ); -#else - __TBB_ASSERT( (unsigned)task.state()<=(unsigned)task::recycle, "corrupt task (invalid state)" ); -#endif +inline void assert_task_valid(const d1::task* t) { + assert_pointer_valid(t); } - #else /* !TBB_USE_ASSERT */ /** In contrast to debug version poison_value() is a macro here because the variable used as its argument may be undefined in release builds. **/ #define poison_value(g) ((void)0) -inline void assert_task_valid( const task& ) {} +inline void assert_task_valid(const d1::task*) {} #endif /* !TBB_USE_ASSERT */ -//------------------------------------------------------------------------ -// Helpers -//------------------------------------------------------------------------ - -#if __TBB_TASK_GROUP_CONTEXT -inline bool ConcurrentWaitsEnabled ( task& t ) { - return (t.prefix().context->my_version_and_traits & task_group_context::concurrent_wait) != 0; -} +struct suspend_point_type { +#if __TBB_RESUMABLE_TASKS + //! The arena related to this task_dispatcher + arena* m_arena{ nullptr }; + //! The random for the resume task + FastRandom m_random; + //! The flag is raised when the original owner should return to this task dispatcher. + std::atomic<bool> m_is_owner_recalled{ false }; + //! Inicates if the resume task should be placed to the critical task stream. + bool m_is_critical{ false }; + //! Associated coroutine + co_context m_co_context; + //! Supend point before resume + suspend_point_type* m_prev_suspend_point{nullptr}; + + // Possible state transitions: + // A -> S -> N -> A + // A -> N -> S -> N -> A + enum class stack_state { + active, // some thread is working with this stack + suspended, // no thread is working with this stack + notified // some thread tried to resume this stack + }; + + //! The flag required to protect suspend finish and resume call + std::atomic<stack_state> m_stack_state{stack_state::active}; + + void resume(suspend_point_type* sp) { + __TBB_ASSERT(m_stack_state.load(std::memory_order_relaxed) != stack_state::suspended, "The stack is expected to be active"); + + sp->m_prev_suspend_point = this; + + // Do not access sp after resume + m_co_context.resume(sp->m_co_context); + __TBB_ASSERT(m_stack_state.load(std::memory_order_relaxed) != stack_state::active, nullptr); + + finilize_resume(); + } -inline bool CancellationInfoPresent ( task& t ) { - return t.prefix().context->my_cancellation_requested != 0; -} + void finilize_resume() { + m_stack_state.store(stack_state::active, std::memory_order_relaxed); + // Set the suspended state for the stack that we left. If the state is already notified, it means that + // someone already tried to resume our previous stack but failed. So, we need to resume it. + // m_prev_suspend_point might be nullptr when destroying co_context based on threads + if (m_prev_suspend_point && m_prev_suspend_point->m_stack_state.exchange(stack_state::suspended) == stack_state::notified) { + r1::resume(m_prev_suspend_point); + } + m_prev_suspend_point = nullptr; + } -#if TBB_USE_CAPTURED_EXCEPTION - inline tbb_exception* TbbCurrentException( task_group_context*, tbb_exception* src) { return src->move(); } - inline tbb_exception* TbbCurrentException( task_group_context*, captured_exception* src) { return src; } -#else - // Using macro instead of an inline function here allows to avoid evaluation of the - // TbbCapturedException expression when exact propagation is enabled for the context. - #define TbbCurrentException(context, TbbCapturedException) \ - context->my_version_and_traits & task_group_context::exact_exception \ - ? tbb_exception_ptr::allocate() \ - : tbb_exception_ptr::allocate( *(TbbCapturedException) ); -#endif /* !TBB_USE_CAPTURED_EXCEPTION */ - -#define TbbRegisterCurrentException(context, TbbCapturedException) \ - if ( context->cancel_group_execution() ) { \ - /* We are the first to signal cancellation, so store the exception that caused it. */ \ - context->my_exception = TbbCurrentException( context, TbbCapturedException ); \ + bool try_notify_resume() { + // Check that stack is already suspended. Return false if not yet. + return m_stack_state.exchange(stack_state::notified) == stack_state::suspended; } -#define TbbCatchAll(context) \ - catch ( tbb_exception& exc ) { \ - TbbRegisterCurrentException( context, &exc ); \ - } catch ( std::exception& exc ) { \ - TbbRegisterCurrentException( context, captured_exception::allocate(typeid(exc).name(), exc.what()) ); \ - } catch ( ... ) { \ - TbbRegisterCurrentException( context, captured_exception::allocate("...", "Unidentified exception") );\ + void recall_owner() { + __TBB_ASSERT(m_stack_state.load(std::memory_order_relaxed) == stack_state::suspended, nullptr); + m_stack_state.store(stack_state::notified, std::memory_order_relaxed); + m_is_owner_recalled.store(true, std::memory_order_release); } -#else /* !__TBB_TASK_GROUP_CONTEXT */ + struct resume_task final : public d1::task { + task_dispatcher& m_target; + explicit resume_task(task_dispatcher& target) : m_target(target) { + task_accessor::set_resume_trait(*this); + } + d1::task* execute(d1::execution_data& ed) override; + d1::task* cancel(d1::execution_data&) override { + __TBB_ASSERT(false, "The resume task cannot be canceled"); + return nullptr; + } + } m_resume_task; -inline bool ConcurrentWaitsEnabled ( task& t ) { return false; } + suspend_point_type(arena* a, std::size_t stack_size, task_dispatcher& target); +#endif /*__TBB_RESUMABLE_TASKS */ +}; -#endif /* __TBB_TASK_GROUP_CONTEXT */ +#if _MSC_VER && !defined(__INTEL_COMPILER) +// structure was padded due to alignment specifier +// #pragma warning( push ) +// #pragma warning( disable: 4324 ) +#endif -//------------------------------------------------------------------------ -// arena_slot -//------------------------------------------------------------------------ -struct arena_slot_line1 { - //TODO: make this tbb:atomic<>. - //! Scheduler of the thread attached to the slot - /** Marks the slot as busy, and is used to iterate through the schedulers belonging to this arena **/ - generic_scheduler* my_scheduler; - - // Synchronization of access to Task pool - /** Also is used to specify if the slot is empty or locked: - 0 - empty - -1 - locked **/ - task* *__TBB_atomic task_pool; - - //! Index of the first ready task in the deque. - /** Modified by thieves, and by the owner during compaction/reallocation **/ - __TBB_atomic size_t head; -}; +class alignas (max_nfs_size) task_dispatcher { +public: + // TODO: reconsider low level design to better organize dependencies and files. + friend class thread_data; + friend class arena_slot; + friend class nested_arena_context; + friend class delegated_task; + friend struct base_waiter; + + //! The list of possible post resume actions. + enum class post_resume_action { + invalid, + register_waiter, + cleanup, + notify, + none + }; + + //! The data of the current thread attached to this task_dispatcher + thread_data* m_thread_data{ nullptr }; + + //! The current execution data + execution_data_ext m_execute_data_ext; + + //! Properties + struct properties { + bool outermost{ true }; + bool fifo_tasks_allowed{ true }; + bool critical_task_allowed{ true }; + } m_properties; + + //! Position in the call stack when stealing is still allowed. + std::uintptr_t m_stealing_threshold{}; + + //! Suspend point (null if this task dispatcher has been never suspended) + suspend_point_type* m_suspend_point{ nullptr }; + + //! Used to improve scalability of d1::wait_context by using per thread reference_counter + std::unordered_map<d1::wait_tree_vertex_interface*, d1::reference_vertex*, + std::hash<d1::wait_tree_vertex_interface*>, std::equal_to<d1::wait_tree_vertex_interface*>, + tbb_allocator<std::pair<d1::wait_tree_vertex_interface* const, d1::reference_vertex*>> + > + m_reference_vertex_map; + + //! Attempt to get a task from the mailbox. + /** Gets a task only if it has not been executed by its sender or a thief + that has stolen it from the sender's task pool. Otherwise returns nullptr. + This method is intended to be used only by the thread extracting the proxy + from its mailbox. (In contrast to local task pool, mailbox can be read only + by its owner). **/ + d1::task* get_mailbox_task(mail_inbox& my_inbox, execution_data_ext& ed, isolation_type isolation); + + d1::task* get_critical_task(d1::task*, execution_data_ext&, isolation_type, bool); + + template <bool ITTPossible, typename Waiter> + d1::task* receive_or_steal_task(thread_data& tls, execution_data_ext& ed, Waiter& waiter, + isolation_type isolation, bool outermost, bool criticality_absence); + + template <bool ITTPossible, typename Waiter> + d1::task* local_wait_for_all(d1::task * t, Waiter& waiter); + + task_dispatcher(const task_dispatcher&) = delete; + + bool can_steal(); +public: + task_dispatcher(arena* a); -struct arena_slot_line2 { - //! Hint provided for operations with the container of starvation-resistant tasks. - /** Modified by the owner thread (during these operations). **/ - unsigned hint_for_pop; + ~task_dispatcher() { + if (m_suspend_point) { + m_suspend_point->~suspend_point_type(); + cache_aligned_deallocate(m_suspend_point); + } - //! Index of the element following the last ready task in the deque. - /** Modified by the owner thread. **/ - __TBB_atomic size_t tail; + for (auto& elem : m_reference_vertex_map) { + d1::reference_vertex*& node = elem.second; + node->~reference_vertex(); + cache_aligned_deallocate(node); + poison_pointer(node); + } - //! Capacity of the primary task pool (number of elements - pointers to task). - size_t my_task_pool_size; + poison_pointer(m_thread_data); + poison_pointer(m_suspend_point); + } - // Task pool of the scheduler that owns this slot - task* *__TBB_atomic task_pool_ptr; + template <typename Waiter> + d1::task* local_wait_for_all(d1::task* t, Waiter& waiter); -#if __TBB_STATISTICS - //! Set of counters to accumulate internal statistics related to this arena - statistics_counters *my_counters; -#endif /* __TBB_STATISTICS */ -}; + bool allow_fifo_task(bool new_state) { + bool old_state = m_properties.fifo_tasks_allowed; + m_properties.fifo_tasks_allowed = new_state; + return old_state; + } -struct arena_slot : padded<arena_slot_line1>, padded<arena_slot_line2> { -#if TBB_USE_ASSERT - void fill_with_canary_pattern ( size_t first, size_t last ) { - for ( size_t i = first; i < last; ++i ) - poison_pointer(task_pool_ptr[i]); + isolation_type set_isolation(isolation_type isolation) { + isolation_type prev = m_execute_data_ext.isolation; + m_execute_data_ext.isolation = isolation; + return prev; } -#else - void fill_with_canary_pattern ( size_t, size_t ) {} -#endif /* TBB_USE_ASSERT */ - - void allocate_task_pool( size_t n ) { - size_t byte_size = ((n * sizeof(task*) + NFS_MaxLineSize - 1) / NFS_MaxLineSize) * NFS_MaxLineSize; - my_task_pool_size = byte_size / sizeof(task*); - task_pool_ptr = (task**)NFS_Allocate( 1, byte_size, NULL ); - // No need to clear the fresh deque since valid items are designated by the head and tail members. - // But fill it with a canary pattern in the high vigilance debug mode. - fill_with_canary_pattern( 0, my_task_pool_size ); + + thread_data& get_thread_data() { + __TBB_ASSERT(m_thread_data, nullptr); + return *m_thread_data; } - //! Deallocate task pool that was allocated by means of allocate_task_pool. - void free_task_pool( ) { -#if !__TBB_TASK_ARENA - __TBB_ASSERT( !task_pool /*TODO: == EmptyTaskPool*/, NULL); -#else - //TODO: understand the assertion and modify -#endif - if( task_pool_ptr ) { - __TBB_ASSERT( my_task_pool_size, NULL); - NFS_Free( task_pool_ptr ); - task_pool_ptr = NULL; - my_task_pool_size = 0; - } + static void execute_and_wait(d1::task* t, d1::wait_context& wait_ctx, d1::task_group_context& w_ctx); + + void set_stealing_threshold(std::uintptr_t stealing_threshold) { + bool assert_condition = (stealing_threshold == 0 && m_stealing_threshold != 0) || + (stealing_threshold != 0 && m_stealing_threshold == 0); + __TBB_ASSERT_EX( assert_condition, nullptr ); + m_stealing_threshold = stealing_threshold; } + + d1::task* get_inbox_or_critical_task(execution_data_ext&, mail_inbox&, isolation_type, bool); + d1::task* get_stream_or_critical_task(execution_data_ext&, arena&, task_stream<front_accessor>&, + unsigned& /*hint_for_stream*/, isolation_type, + bool /*critical_allowed*/); + d1::task* steal_or_get_critical(execution_data_ext&, arena&, unsigned /*arena_index*/, FastRandom&, + isolation_type, bool /*critical_allowed*/); + +#if __TBB_RESUMABLE_TASKS + /* [[noreturn]] */ void co_local_wait_for_all() noexcept; + void suspend(suspend_callback_type suspend_callback, void* user_callback); + void internal_suspend(); + void do_post_resume_action(); + + bool resume(task_dispatcher& target); + suspend_point_type* get_suspend_point(); + void init_suspend_point(arena* a, std::size_t stack_size); + friend void internal_resume(suspend_point_type*); + void recall_point(); +#endif /* __TBB_RESUMABLE_TASKS */ }; -#if !__TBB_CPU_CTL_ENV_PRESENT -class cpu_ctl_env { - fenv_t *my_fenv_ptr; -public: - cpu_ctl_env() : my_fenv_ptr(NULL) {} - ~cpu_ctl_env() { - if ( my_fenv_ptr ) - tbb::internal::NFS_Free( (void*)my_fenv_ptr ); - } - // It is possible not to copy memory but just to copy pointers but the following issues should be addressed: - // 1. The arena lifetime and the context lifetime are independent; - // 2. The user is allowed to recapture different FPU settings to context so 'current FPU settings' inside - // dispatch loop may become invalid. - // But do we really want to improve the fenv implementation? It seems to be better to replace the fenv implementation - // with a platform specific implementation. - cpu_ctl_env( const cpu_ctl_env &src ) : my_fenv_ptr(NULL) { - *this = src; - } - cpu_ctl_env& operator=( const cpu_ctl_env &src ) { - __TBB_ASSERT( src.my_fenv_ptr, NULL ); - if ( !my_fenv_ptr ) - my_fenv_ptr = (fenv_t*)tbb::internal::NFS_Allocate(1, sizeof(fenv_t), NULL); - *my_fenv_ptr = *src.my_fenv_ptr; - return *this; - } - bool operator!=( const cpu_ctl_env &ctl ) const { - __TBB_ASSERT( my_fenv_ptr, "cpu_ctl_env is not initialized." ); - __TBB_ASSERT( ctl.my_fenv_ptr, "cpu_ctl_env is not initialized." ); - return memcmp( (void*)my_fenv_ptr, (void*)ctl.my_fenv_ptr, sizeof(fenv_t) ); - } - void get_env () { - if ( !my_fenv_ptr ) - my_fenv_ptr = (fenv_t*)tbb::internal::NFS_Allocate(1, sizeof(fenv_t), NULL); - fegetenv( my_fenv_ptr ); - } - const cpu_ctl_env& set_env () const { - __TBB_ASSERT( my_fenv_ptr, "cpu_ctl_env is not initialized." ); - fesetenv( my_fenv_ptr ); - return *this; - } +#if _MSC_VER && !defined(__INTEL_COMPILER) +// #pragma warning( pop ) +#endif + +inline std::uintptr_t calculate_stealing_threshold(std::uintptr_t base, std::size_t stack_size) { + __TBB_ASSERT(stack_size != 0, "Stack size cannot be zero"); + __TBB_ASSERT(base > stack_size / 2, "Stack anchor calculation overflow"); + return base - stack_size / 2; +} + +struct task_group_context_impl { + static void destroy(d1::task_group_context&); + static void initialize(d1::task_group_context&); + static void register_with(d1::task_group_context&, thread_data*); + static void bind_to_impl(d1::task_group_context&, thread_data*); + static void bind_to(d1::task_group_context&, thread_data*); + static void propagate_task_group_state(d1::task_group_context&, std::atomic<uint32_t> d1::task_group_context::*, d1::task_group_context&, uint32_t); + static bool cancel_group_execution(d1::task_group_context&); + static bool is_group_execution_cancelled(const d1::task_group_context&); + static void reset(d1::task_group_context&); + static void capture_fp_settings(d1::task_group_context&); + static void copy_fp_settings(d1::task_group_context& ctx, const d1::task_group_context& src); }; -#endif /* !__TBB_CPU_CTL_ENV_PRESENT */ -} // namespace internal + +//! Forward declaration for scheduler entities +bool gcc_rethrow_exception_broken(); +void fix_broken_rethrow(); +//! Forward declaration: throws std::runtime_error with what() returning error_code description prefixed with aux_info +void handle_perror(int error_code, const char* aux_info); + +} // namespace r1 +} // namespace detail } // namespace tbb #endif /* _TBB_scheduler_common_H */ diff --git a/src/tbb/src/tbb/scheduler_utility.h b/src/tbb/src/tbb/scheduler_utility.h deleted file mode 100644 index 5e3227543..000000000 --- a/src/tbb/src/tbb/scheduler_utility.h +++ /dev/null @@ -1,133 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef _TBB_scheduler_utility_H -#define _TBB_scheduler_utility_H - -#include "scheduler.h" - -namespace tbb { -namespace internal { - -//------------------------------------------------------------------------ -// auto_empty_task -//------------------------------------------------------------------------ - -//! Smart holder for the empty task class with automatic destruction -class auto_empty_task { - task* my_task; - generic_scheduler* my_scheduler; -public: - auto_empty_task ( __TBB_CONTEXT_ARG(generic_scheduler *s, task_group_context* context) ) - : my_task( new(&s->allocate_task(sizeof(empty_task), __TBB_CONTEXT_ARG(NULL, context))) empty_task ) - , my_scheduler(s) - {} - // empty_task has trivial destructor, so there's no need to call it. - ~auto_empty_task () { my_scheduler->free_task<small_local_task>(*my_task); } - - operator task& () { return *my_task; } - task* operator & () { return my_task; } - task_prefix& prefix () { return my_task->prefix(); } -}; // class auto_empty_task - -//------------------------------------------------------------------------ -// fast_reverse_vector -//------------------------------------------------------------------------ - -//! Vector that grows without reallocations, and stores items in the reverse order. -/** Requires to initialize its first segment with a preallocated memory chunk - (usually it is static array or an array allocated on the stack). - The second template parameter specifies maximal number of segments. Each next - segment is twice as large as the previous one. **/ -template<typename T, size_t max_segments = 16> -class fast_reverse_vector -{ -public: - fast_reverse_vector ( T* initial_segment, size_t segment_size ) - : m_cur_segment(initial_segment) - , m_cur_segment_size(segment_size) - , m_pos(segment_size) - , m_num_segments(0) - , m_size(0) - { - __TBB_ASSERT ( initial_segment && segment_size, "Nonempty initial segment must be supplied"); - } - - ~fast_reverse_vector () - { - for ( size_t i = 1; i < m_num_segments; ++i ) - NFS_Free( m_segments[i] ); - } - - size_t size () const { return m_size + m_cur_segment_size - m_pos; } - - void push_back ( const T& val ) - { - if ( !m_pos ) { - if ( !m_num_segments ) m_segments[m_num_segments++] = m_cur_segment; - m_size += m_cur_segment_size; - m_cur_segment_size *= 2; - m_pos = m_cur_segment_size; - m_segments[m_num_segments++] = m_cur_segment = (T*)NFS_Allocate( m_cur_segment_size, sizeof(T), NULL ); - __TBB_ASSERT ( m_num_segments < max_segments, "Maximal capacity exceeded" ); - } - m_cur_segment[--m_pos] = val; - } - - //! Copies the contents of the vector into the dst array. - /** Can only be used when T is a POD type, as copying does not invoke copy constructors. **/ - void copy_memory ( T* dst ) const - { - size_t sz = m_cur_segment_size - m_pos; - memcpy( dst, m_cur_segment + m_pos, sz * sizeof(T) ); - dst += sz; - sz = m_cur_segment_size / 2; - for ( long i = (long)m_num_segments - 2; i >= 0; --i ) { - memcpy( dst, m_segments[i], sz * sizeof(T) ); - dst += sz; - sz /= 2; - } - } - -protected: - //! The current (not completely filled) segment - T *m_cur_segment; - - //! Capacity of m_cur_segment - size_t m_cur_segment_size; - - //! Insertion position in m_cur_segment - size_t m_pos; - - //! Array of segments (has fixed size specified by the second template parameter) - T *m_segments[max_segments]; - - //! Number of segments (the size of m_segments) - size_t m_num_segments; - - //! Number of items in the segments in m_segments - size_t m_size; - -}; // class fast_reverse_vector - -} // namespace internal -} // namespace tbb - -#endif /* _TBB_scheduler_utility_H */ diff --git a/src/tbb/src/tbb/semaphore.cpp b/src/tbb/src/tbb/semaphore.cpp index 8eb807ea9..211091d1f 100644 --- a/src/tbb/src/tbb/semaphore.cpp +++ b/src/tbb/src/tbb/semaphore.cpp @@ -1,21 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #include "semaphore.h" @@ -25,17 +21,18 @@ #endif namespace tbb { -namespace internal { +namespace detail { +namespace r1 { // TODO: For new win UI port, we can use SRWLock API without dynamic_link etc. #if __TBB_USE_SRWLOCK -static atomic<do_once_state> concmon_module_inited; +static std::atomic<do_once_state> concmon_module_inited; void WINAPI init_binsem_using_event( SRWLOCK* h_ ) { srwl_or_handle* shptr = (srwl_or_handle*) h_; - shptr->h = CreateEventEx( NULL, NULL, 0, EVENT_ALL_ACCESS|SEMAPHORE_ALL_ACCESS ); + shptr->h = CreateEventEx( nullptr, nullptr, 0, EVENT_ALL_ACCESS|SEMAPHORE_ALL_ACCESS ); } void WINAPI acquire_binsem_using_event( SRWLOCK* h_ ) @@ -63,18 +60,18 @@ static const dynamic_link_descriptor SRWLLinkTable[] = { inline void init_concmon_module() { - __TBB_ASSERT( (uintptr_t)__TBB_init_binsem==(uintptr_t)&init_binsem_using_event, NULL ); + __TBB_ASSERT( (uintptr_t)__TBB_init_binsem==(uintptr_t)&init_binsem_using_event, nullptr); if( dynamic_link( "Kernel32.dll", SRWLLinkTable, sizeof(SRWLLinkTable)/sizeof(dynamic_link_descriptor) ) ) { - __TBB_ASSERT( (uintptr_t)__TBB_init_binsem!=(uintptr_t)&init_binsem_using_event, NULL ); - __TBB_ASSERT( (uintptr_t)__TBB_acquire_binsem!=(uintptr_t)&acquire_binsem_using_event, NULL ); - __TBB_ASSERT( (uintptr_t)__TBB_release_binsem!=(uintptr_t)&release_binsem_using_event, NULL ); + __TBB_ASSERT( (uintptr_t)__TBB_init_binsem!=(uintptr_t)&init_binsem_using_event, nullptr); + __TBB_ASSERT( (uintptr_t)__TBB_acquire_binsem!=(uintptr_t)&acquire_binsem_using_event, nullptr); + __TBB_ASSERT( (uintptr_t)__TBB_release_binsem!=(uintptr_t)&release_binsem_using_event, nullptr); } } binary_semaphore::binary_semaphore() { atomic_do_once( &init_concmon_module, concmon_module_inited ); - __TBB_init_binsem( &my_sem.lock ); + __TBB_init_binsem( &my_sem.lock ); if( (uintptr_t)__TBB_init_binsem!=(uintptr_t)&init_binsem_using_event ) P(); } @@ -90,5 +87,6 @@ void binary_semaphore::V() { __TBB_release_binsem( &my_sem.lock ); } #endif /* __TBB_USE_SRWLOCK */ -} // namespace internal +} // namespace r1 +} // namespace detail } // namespace tbb diff --git a/src/tbb/src/tbb/semaphore.h b/src/tbb/src/tbb/semaphore.h index 54b505f1a..9d27f3ac9 100644 --- a/src/tbb/src/tbb/semaphore.h +++ b/src/tbb/src/tbb/semaphore.h @@ -1,48 +1,127 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ -#ifndef __TBB_tbb_semaphore_H -#define __TBB_tbb_semaphore_H +#ifndef __TBB_semaphore_H +#define __TBB_semaphore_H -#include "tbb/tbb_stddef.h" +#include "oneapi/tbb/detail/_utils.h" #if _WIN32||_WIN64 -#include "tbb/machine/windows_api.h" - +#include <windows.h> #elif __APPLE__ -#include <mach/semaphore.h> -#include <mach/task.h> -#include <mach/mach_init.h> -#include <mach/error.h> - +#include <dispatch/dispatch.h> #else #include <semaphore.h> #ifdef TBB_USE_DEBUG -#include <errno.h> +#include <cerrno> #endif #endif /*_WIN32||_WIN64*/ +#include <atomic> + +#if __unix__ +#if defined(__has_include) +#define __TBB_has_include __has_include +#else +#define __TBB_has_include(x) 0 +#endif + +/* Futex definitions */ +#include <unistd.h> +#if defined(__linux__) || __TBB_has_include(<sys/syscall.h>) +#include <sys/syscall.h> +#endif + +#if defined(SYS_futex) + +/* This section is included for Linux and some other systems that may support futexes.*/ + +#define __TBB_USE_FUTEX 1 + +/* +If available, use typical headers where futex API is defined. While Linux and OpenBSD +are known to provide such headers, other systems might have them as well. +*/ +#if defined(__linux__) || __TBB_has_include(<linux/futex.h>) +#include <linux/futex.h> +#elif defined(__OpenBSD__) || __TBB_has_include(<sys/futex.h>) +#include <sys/futex.h> +#endif + +#include <climits> +#include <cerrno> + +/* +Some systems might not define the macros or use different names. In such case we expect +the actual parameter values to match Linux: 0 for wait, 1 for wake. +*/ +#if defined(FUTEX_WAIT_PRIVATE) +#define __TBB_FUTEX_WAIT FUTEX_WAIT_PRIVATE +#elif defined(FUTEX_WAIT) +#define __TBB_FUTEX_WAIT FUTEX_WAIT +#else +#define __TBB_FUTEX_WAIT 0 +#endif + +#if defined(FUTEX_WAKE_PRIVATE) +#define __TBB_FUTEX_WAKE FUTEX_WAKE_PRIVATE +#elif defined(FUTEX_WAKE) +#define __TBB_FUTEX_WAKE FUTEX_WAKE +#else +#define __TBB_FUTEX_WAKE 1 +#endif + +#endif // SYS_futex +#endif // __unix__ + namespace tbb { -namespace internal { +namespace detail { +namespace r1 { + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// Futex implementation +//////////////////////////////////////////////////////////////////////////////////////////////////// + +#if __TBB_USE_FUTEX + +static inline int futex_wait( void *futex, int comparand ) { + int r = ::syscall(SYS_futex, futex, __TBB_FUTEX_WAIT, comparand, nullptr, nullptr, 0); +#if TBB_USE_ASSERT + int e = errno; + __TBB_ASSERT(r == 0 || r == EWOULDBLOCK || (r == -1 && (e == EAGAIN || e == EINTR)), "futex_wait failed."); +#endif /* TBB_USE_ASSERT */ + return r; +} + +static inline int futex_wakeup_one( void *futex ) { + int r = ::syscall(SYS_futex, futex, __TBB_FUTEX_WAKE, 1, nullptr, nullptr, 0); + __TBB_ASSERT(r == 0 || r == 1, "futex_wakeup_one: more than one thread woken up?"); + return r; +} +// Additional possible methods that are not required right now +// static inline int futex_wakeup_all( void *futex ) { +// int r = ::syscall( SYS_futex,futex,__TBB_FUTEX_WAKE,INT_MAX,nullptr,nullptr,0 ); +// __TBB_ASSERT( r>=0, "futex_wakeup_all: error in waking up threads" ); +// return r; +// } +#endif // __TBB_USE_FUTEX + +//////////////////////////////////////////////////////////////////////////////////////////////////// #if _WIN32||_WIN64 typedef LONG sem_count_t; //! Edsger Dijkstra's counting semaphore @@ -55,12 +134,12 @@ class semaphore : no_copy { ~semaphore() {CloseHandle( sem );} //! wait/acquire void P() {WaitForSingleObjectEx( sem, INFINITE, FALSE );} - //! post/release - void V() {ReleaseSemaphore( sem, 1, NULL );} + //! post/release + void V() {ReleaseSemaphore( sem, 1, nullptr);} private: HANDLE sem; void init_semaphore(size_t start_cnt_) { - sem = CreateSemaphoreEx( NULL, LONG(start_cnt_), max_semaphore_cnt, NULL, 0, SEMAPHORE_ALL_ACCESS ); + sem = CreateSemaphoreEx( nullptr, LONG(start_cnt_), max_semaphore_cnt, nullptr, 0, SEMAPHORE_ALL_ACCESS ); } }; #elif __APPLE__ @@ -68,28 +147,18 @@ class semaphore : no_copy { class semaphore : no_copy { public: //! ctor - semaphore(int start_cnt_ = 0) : sem(start_cnt_) { init_semaphore(start_cnt_); } + semaphore(int start_cnt_ = 0) { my_sem = dispatch_semaphore_create(start_cnt_); } //! dtor - ~semaphore() { - kern_return_t ret = semaphore_destroy( mach_task_self(), sem ); - __TBB_ASSERT_EX( ret==err_none, NULL ); - } + ~semaphore() { dispatch_release(my_sem); } //! wait/acquire - void P() { - int ret; - do { - ret = semaphore_wait( sem ); - } while( ret==KERN_ABORTED ); - __TBB_ASSERT( ret==KERN_SUCCESS, "semaphore_wait() failed" ); + void P() { + std::intptr_t ret = dispatch_semaphore_wait(my_sem, DISPATCH_TIME_FOREVER); + __TBB_ASSERT_EX(ret == 0, "dispatch_semaphore_wait() failed"); } - //! post/release - void V() { semaphore_signal( sem ); } + //! post/release + void V() { dispatch_semaphore_signal(my_sem); } private: - semaphore_t sem; - void init_semaphore(int start_cnt_) { - kern_return_t ret = semaphore_create( mach_task_self(), &sem, SYNC_POLICY_FIFO, start_cnt_ ); - __TBB_ASSERT_EX( ret==err_none, "failed to create a semaphore" ); - } + dispatch_semaphore_t my_sem; }; #else /* Linux/Unix */ typedef uint32_t sem_count_t; @@ -102,20 +171,20 @@ class semaphore : no_copy { //! dtor ~semaphore() { int ret = sem_destroy( &sem ); - __TBB_ASSERT_EX( !ret, NULL ); + __TBB_ASSERT_EX( !ret, nullptr); } //! wait/acquire void P() { while( sem_wait( &sem )!=0 ) - __TBB_ASSERT( errno==EINTR, NULL ); + __TBB_ASSERT( errno==EINTR, nullptr); } - //! post/release + //! post/release void V() { sem_post( &sem ); } private: sem_t sem; void init_semaphore(int start_cnt_) { int ret = sem_init( &sem, /*shared among threads*/ 0, start_cnt_ ); - __TBB_ASSERT_EX( !ret, NULL ); + __TBB_ASSERT_EX( !ret, nullptr); } }; #endif /* _WIN32||_WIN64 */ @@ -128,12 +197,12 @@ class semaphore : no_copy { class binary_semaphore : no_copy { public: //! ctor - binary_semaphore() { my_sem = CreateEventEx( NULL, NULL, 0, EVENT_ALL_ACCESS ); } + binary_semaphore() { my_sem = CreateEventEx( nullptr, nullptr, 0, EVENT_ALL_ACCESS ); } //! dtor ~binary_semaphore() { CloseHandle( my_sem ); } //! wait/acquire void P() { WaitForSingleObjectEx( my_sem, INFINITE, FALSE ); } - //! post/release + //! post/release void V() { SetEvent( my_sem ); } private: HANDLE my_sem; @@ -154,7 +223,7 @@ class binary_semaphore : no_copy { ~binary_semaphore(); //! wait/acquire void P(); - //! post/release + //! post/release void V(); private: srwl_or_handle my_sem; @@ -162,35 +231,13 @@ class binary_semaphore : no_copy { #endif /* !__TBB_USE_SRWLOCK */ #elif __APPLE__ //! binary_semaphore for concurrent monitor -class binary_semaphore : no_copy { -public: - //! ctor - binary_semaphore() : my_sem(0) { - kern_return_t ret = semaphore_create( mach_task_self(), &my_sem, SYNC_POLICY_FIFO, 0 ); - __TBB_ASSERT_EX( ret==err_none, "failed to create a semaphore" ); - } - //! dtor - ~binary_semaphore() { - kern_return_t ret = semaphore_destroy( mach_task_self(), my_sem ); - __TBB_ASSERT_EX( ret==err_none, NULL ); - } - //! wait/acquire - void P() { - int ret; - do { - ret = semaphore_wait( my_sem ); - } while( ret==KERN_ABORTED ); - __TBB_ASSERT( ret==KERN_SUCCESS, "semaphore_wait() failed" ); - } - //! post/release - void V() { semaphore_signal( my_sem ); } -private: - semaphore_t my_sem; -}; +using binary_semaphore = semaphore; #else /* Linux/Unix */ #if __TBB_USE_FUTEX class binary_semaphore : no_copy { +// The implementation is equivalent to the "Mutex, Take 3" one +// in the paper "Futexes Are Tricky" by Ulrich Drepper public: //! ctor binary_semaphore() { my_sem = 1; } @@ -198,27 +245,24 @@ class binary_semaphore : no_copy { ~binary_semaphore() {} //! wait/acquire void P() { - int s; - if( (s = my_sem.compare_and_swap( 1, 0 ))!=0 ) { + int s = 0; + if( !my_sem.compare_exchange_strong( s, 1 ) ) { if( s!=2 ) - s = my_sem.fetch_and_store( 2 ); - while( s!=0 ) { + s = my_sem.exchange( 2 ); + while( s!=0 ) { // This loop deals with spurious wakeup futex_wait( &my_sem, 2 ); - s = my_sem.fetch_and_store( 2 ); + s = my_sem.exchange( 2 ); } } } - //! post/release - void V() { - __TBB_ASSERT( my_sem>=1, "multiple V()'s in a row?" ); - if( my_sem--!=1 ) { - //if old value was 2 - my_sem = 0; + //! post/release + void V() { + __TBB_ASSERT( my_sem.load(std::memory_order_relaxed)>=1, "multiple V()'s in a row?" ); + if( my_sem.exchange( 0 )==2 ) futex_wakeup_one( &my_sem ); - } } private: - atomic<int> my_sem; + std::atomic<int> my_sem; // 0 - open; 1 - closed, no waits; 2 - closed, possible waits }; #else typedef uint32_t sem_count_t; @@ -228,19 +272,19 @@ class binary_semaphore : no_copy { //! ctor binary_semaphore() { int ret = sem_init( &my_sem, /*shared among threads*/ 0, 0 ); - __TBB_ASSERT_EX( !ret, NULL ); + __TBB_ASSERT_EX( !ret, nullptr); } //! dtor ~binary_semaphore() { int ret = sem_destroy( &my_sem ); - __TBB_ASSERT_EX( !ret, NULL ); + __TBB_ASSERT_EX( !ret, nullptr); } //! wait/acquire void P() { while( sem_wait( &my_sem )!=0 ) - __TBB_ASSERT( errno==EINTR, NULL ); + __TBB_ASSERT( errno==EINTR, nullptr); } - //! post/release + //! post/release void V() { sem_post( &my_sem ); } private: sem_t my_sem; @@ -248,7 +292,8 @@ class binary_semaphore : no_copy { #endif /* __TBB_USE_FUTEX */ #endif /* _WIN32||_WIN64 */ -} // namespace internal +} // namespace r1 +} // namespace detail } // namespace tbb -#endif /* __TBB_tbb_semaphore_H */ +#endif /* __TBB_semaphore_H */ diff --git a/src/tbb/src/tbb/small_object_pool.cpp b/src/tbb/src/tbb/small_object_pool.cpp new file mode 100644 index 000000000..28d11d011 --- /dev/null +++ b/src/tbb/src/tbb/small_object_pool.cpp @@ -0,0 +1,154 @@ +/* + Copyright (c) 2020-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "oneapi/tbb/cache_aligned_allocator.h" +#include "oneapi/tbb/detail/_small_object_pool.h" +#include "oneapi/tbb/detail/_task.h" +#include "governor.h" +#include "thread_data.h" +#include "task_dispatcher.h" + +#include <cstddef> + +namespace tbb { +namespace detail { +namespace r1 { + +small_object_pool_impl::small_object* const small_object_pool_impl::dead_public_list = + reinterpret_cast<small_object_pool_impl::small_object*>(1); + +void* __TBB_EXPORTED_FUNC allocate(d1::small_object_pool*& allocator, std::size_t number_of_bytes, const d1::execution_data& ed) { + auto& tls = static_cast<const execution_data_ext&>(ed).task_disp->get_thread_data(); + auto pool = tls.my_small_object_pool; + return pool->allocate_impl(allocator, number_of_bytes); +} + +void* __TBB_EXPORTED_FUNC allocate(d1::small_object_pool*& allocator, std::size_t number_of_bytes) { + // TODO: optimize if the allocator contains a valid pool. + auto tls = governor::get_thread_data(); + auto pool = tls->my_small_object_pool; + return pool->allocate_impl(allocator, number_of_bytes); +} + +void* small_object_pool_impl::allocate_impl(d1::small_object_pool*& allocator, std::size_t number_of_bytes) +{ + small_object* obj{nullptr}; + + if (number_of_bytes <= small_object_size) { + if (m_private_list) { + obj = m_private_list; + m_private_list = m_private_list->next; + } else if (m_public_list.load(std::memory_order_relaxed)) { + // No fence required for read of my_public_list above, because std::atomic::exchange() has a fence. + obj = m_public_list.exchange(nullptr); + __TBB_ASSERT( obj, "another thread emptied the my_public_list" ); + m_private_list = obj->next; + } else { + obj = new (cache_aligned_allocate(small_object_size)) small_object{nullptr}; + ++m_private_counter; + } + } else { + obj = new (cache_aligned_allocate(number_of_bytes)) small_object{nullptr}; + } + allocator = this; + + // Return uninitialized memory for further construction on user side. + obj->~small_object(); + return obj; +} + +void __TBB_EXPORTED_FUNC deallocate(d1::small_object_pool& allocator, void* ptr, std::size_t number_of_bytes) { + auto pool = static_cast<small_object_pool_impl*>(&allocator); + auto tls = governor::get_thread_data(); + pool->deallocate_impl(ptr, number_of_bytes, *tls); +} + +void __TBB_EXPORTED_FUNC deallocate(d1::small_object_pool& allocator, void* ptr, std::size_t number_of_bytes, const d1::execution_data& ed) { + auto& tls = static_cast<const execution_data_ext&>(ed).task_disp->get_thread_data(); + auto pool = static_cast<small_object_pool_impl*>(&allocator); + pool->deallocate_impl(ptr, number_of_bytes, tls); +} + +void small_object_pool_impl::deallocate_impl(void* ptr, std::size_t number_of_bytes, thread_data& td) { + __TBB_ASSERT(ptr != nullptr, "pointer to deallocate should not be null"); + __TBB_ASSERT(number_of_bytes >= sizeof(small_object), "number of bytes should be at least sizeof(small_object)"); + + if (number_of_bytes <= small_object_size) { + auto obj = new (ptr) small_object{nullptr}; + if (td.my_small_object_pool == this) { + obj->next = m_private_list; + m_private_list = obj; + } else { + auto old_public_list = m_public_list.load(std::memory_order_relaxed); + + for (;;) { + if (old_public_list == dead_public_list) { + obj->~small_object(); + cache_aligned_deallocate(obj); + if (++m_public_counter == 0) + { + this->~small_object_pool_impl(); + cache_aligned_deallocate(this); + } + break; + } + obj->next = old_public_list; + if (m_public_list.compare_exchange_strong(old_public_list, obj)) { + break; + } + } + } + } else { + cache_aligned_deallocate(ptr); + } +} + +std::int64_t small_object_pool_impl::cleanup_list(small_object* list) +{ + std::int64_t removed_count{}; + + while (list) { + small_object* current = list; + list = list->next; + current->~small_object(); + cache_aligned_deallocate(current); + ++removed_count; + } + return removed_count; +} + +void small_object_pool_impl::destroy() +{ + // clean up private list and subtract the removed count from private counter + m_private_counter -= cleanup_list(m_private_list); + // Grab public list and place dead mark + small_object* public_list = m_public_list.exchange(dead_public_list); + // clean up public list and subtract from private (intentionally) counter + m_private_counter -= cleanup_list(public_list); + __TBB_ASSERT(m_private_counter >= 0, "Private counter may not be less than 0"); + // Equivalent to fetch_sub(m_private_counter) - m_private_counter. But we need to do it + // atomically with operator-= not to access m_private_counter after the subtraction. + auto new_value = m_public_counter -= m_private_counter; + // check if this method is responsible to clean up the resources + if (new_value == 0) { + this->~small_object_pool_impl(); + cache_aligned_deallocate(this); + } +} + +} // namespace r1 +} // namespace detail +} // namespace tbb diff --git a/src/tbb/src/tbb/small_object_pool_impl.h b/src/tbb/src/tbb/small_object_pool_impl.h new file mode 100644 index 000000000..a6b664bea --- /dev/null +++ b/src/tbb/src/tbb/small_object_pool_impl.h @@ -0,0 +1,59 @@ +/* + Copyright (c) 2020-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_small_object_pool_impl_H +#define __TBB_small_object_pool_impl_H + +#include "oneapi/tbb/detail/_small_object_pool.h" +#include "oneapi/tbb/detail/_utils.h" + +#include <cstddef> +#include <cstdint> +#include <atomic> + + +namespace tbb { +namespace detail { +namespace r1 { + +class thread_data; + +class small_object_pool_impl : public d1::small_object_pool +{ + static constexpr std::size_t small_object_size = 256; + struct small_object { + small_object* next; + }; + static small_object* const dead_public_list; +public: + void* allocate_impl(small_object_pool*& allocator, std::size_t number_of_bytes); + void deallocate_impl(void* ptr, std::size_t number_of_bytes, thread_data& td); + void destroy(); +private: + static std::int64_t cleanup_list(small_object* list); + ~small_object_pool_impl() = default; +private: + alignas(max_nfs_size) small_object* m_private_list; + std::int64_t m_private_counter{}; + alignas(max_nfs_size) std::atomic<small_object*> m_public_list; + std::atomic<std::int64_t> m_public_counter{}; +}; + +} // namespace r1 +} // namespace detail +} // namespace tbb + +#endif /* __TBB_small_object_pool_impl_H */ diff --git a/src/tbb/src/tbb/spin_mutex.cpp b/src/tbb/src/tbb/spin_mutex.cpp deleted file mode 100644 index 6f516268c..000000000 --- a/src/tbb/src/tbb/spin_mutex.cpp +++ /dev/null @@ -1,58 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_machine.h" -#include "tbb/spin_mutex.h" -#include "itt_notify.h" -#include "tbb_misc.h" - -namespace tbb { - -void spin_mutex::scoped_lock::internal_acquire( spin_mutex& m ) { - __TBB_ASSERT( !my_mutex, "already holding a lock on a spin_mutex" ); - ITT_NOTIFY(sync_prepare, &m); - __TBB_LockByte(m.flag); - my_mutex = &m; - ITT_NOTIFY(sync_acquired, &m); -} - -void spin_mutex::scoped_lock::internal_release() { - __TBB_ASSERT( my_mutex, "release on spin_mutex::scoped_lock that is not holding a lock" ); - - ITT_NOTIFY(sync_releasing, my_mutex); - __TBB_UnlockByte(my_mutex->flag); - my_mutex = NULL; -} - -bool spin_mutex::scoped_lock::internal_try_acquire( spin_mutex& m ) { - __TBB_ASSERT( !my_mutex, "already holding a lock on a spin_mutex" ); - bool result = bool( __TBB_TryLockByte(m.flag) ); - if( result ) { - my_mutex = &m; - ITT_NOTIFY(sync_acquired, &m); - } - return result; -} - -void spin_mutex::internal_construct() { - ITT_SYNC_CREATE(this, _T("tbb::spin_mutex"), _T("")); -} - -} // namespace tbb diff --git a/src/tbb/src/tbb/spin_rw_mutex.cpp b/src/tbb/src/tbb/spin_rw_mutex.cpp deleted file mode 100644 index ffc4a9222..000000000 --- a/src/tbb/src/tbb/spin_rw_mutex.cpp +++ /dev/null @@ -1,159 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/spin_rw_mutex.h" -#include "tbb/tbb_machine.h" -#include "tbb/atomic.h" -#include "itt_notify.h" - -#if defined(_MSC_VER) && defined(_Wp64) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (disable: 4244) -#endif - -namespace tbb { - -template<typename T> // a template can work with private spin_rw_mutex::state_t -static inline T CAS(volatile T &addr, T newv, T oldv) { - // ICC (9.1 and 10.1 tried) unable to do implicit conversion - // from "volatile T*" to "volatile void*", so explicit cast added. - return tbb::internal::as_atomic(addr).compare_and_swap( newv, oldv ); -} - -//! Acquire write lock on the given mutex. -bool spin_rw_mutex_v3::internal_acquire_writer() -{ - ITT_NOTIFY(sync_prepare, this); - for( internal::atomic_backoff backoff;;backoff.pause() ){ - state_t s = const_cast<volatile state_t&>(state); // ensure reloading - if( !(s & BUSY) ) { // no readers, no writers - if( CAS(state, WRITER, s)==s ) - break; // successfully stored writer flag - backoff.reset(); // we could be very close to complete op. - } else if( !(s & WRITER_PENDING) ) { // no pending writers - __TBB_AtomicOR(&state, WRITER_PENDING); - } - } - ITT_NOTIFY(sync_acquired, this); - return false; -} - -//! Release writer lock on the given mutex -void spin_rw_mutex_v3::internal_release_writer() -{ - ITT_NOTIFY(sync_releasing, this); - __TBB_AtomicAND( &state, READERS ); -} - -//! Acquire read lock on given mutex. -void spin_rw_mutex_v3::internal_acquire_reader() -{ - ITT_NOTIFY(sync_prepare, this); - for( internal::atomic_backoff b;;b.pause() ){ - state_t s = const_cast<volatile state_t&>(state); // ensure reloading - if( !(s & (WRITER|WRITER_PENDING)) ) { // no writer or write requests - state_t t = (state_t)__TBB_FetchAndAddW( &state, (intptr_t) ONE_READER ); - if( !( t&WRITER )) - break; // successfully stored increased number of readers - // writer got there first, undo the increment - __TBB_FetchAndAddW( &state, -(intptr_t)ONE_READER ); - } - } - - ITT_NOTIFY(sync_acquired, this); - __TBB_ASSERT( state & READERS, "invalid state of a read lock: no readers" ); -} - -//! Upgrade reader to become a writer. -/** Returns whether the upgrade happened without releasing and re-acquiring the lock */ -bool spin_rw_mutex_v3::internal_upgrade() -{ - state_t s = state; - __TBB_ASSERT( s & READERS, "invalid state before upgrade: no readers " ); - // check and set writer-pending flag - // required conditions: either no pending writers, or we are the only reader - // (with multiple readers and pending writer, another upgrade could have been requested) - while( (s & READERS)==ONE_READER || !(s & WRITER_PENDING) ) { - state_t old_s = s; - if( (s=CAS(state, s | WRITER | WRITER_PENDING, s))==old_s ) { - ITT_NOTIFY(sync_prepare, this); - internal::atomic_backoff backoff; - while( (state & READERS) != ONE_READER ) backoff.pause(); - __TBB_ASSERT((state&(WRITER_PENDING|WRITER))==(WRITER_PENDING|WRITER),"invalid state when upgrading to writer"); - // both new readers and writers are blocked at this time - __TBB_FetchAndAddW( &state, - (intptr_t)(ONE_READER+WRITER_PENDING)); - ITT_NOTIFY(sync_acquired, this); - return true; // successfully upgraded - } - } - // slow reacquire - internal_release_reader(); - return internal_acquire_writer(); // always returns false -} - -//! Downgrade writer to a reader -void spin_rw_mutex_v3::internal_downgrade() { - ITT_NOTIFY(sync_releasing, this); - __TBB_FetchAndAddW( &state, (intptr_t)(ONE_READER-WRITER)); - __TBB_ASSERT( state & READERS, "invalid state after downgrade: no readers" ); -} - -//! Release read lock on the given mutex -void spin_rw_mutex_v3::internal_release_reader() -{ - __TBB_ASSERT( state & READERS, "invalid state of a read lock: no readers" ); - ITT_NOTIFY(sync_releasing, this); // release reader - __TBB_FetchAndAddWrelease( &state,-(intptr_t)ONE_READER); -} - -//! Try to acquire write lock on the given mutex -bool spin_rw_mutex_v3::internal_try_acquire_writer() -{ - // for a writer: only possible to acquire if no active readers or writers - state_t s = state; - if( !(s & BUSY) ) // no readers, no writers; mask is 1..1101 - if( CAS(state, WRITER, s)==s ) { - ITT_NOTIFY(sync_acquired, this); - return true; // successfully stored writer flag - } - return false; -} - -//! Try to acquire read lock on the given mutex -bool spin_rw_mutex_v3::internal_try_acquire_reader() -{ - // for a reader: acquire if no active or waiting writers - state_t s = state; - if( !(s & (WRITER|WRITER_PENDING)) ) { // no writers - state_t t = (state_t)__TBB_FetchAndAddW( &state, (intptr_t) ONE_READER ); - if( !( t&WRITER )) { // got the lock - ITT_NOTIFY(sync_acquired, this); - return true; // successfully stored increased number of readers - } - // writer got there first, undo the increment - __TBB_FetchAndAddW( &state, -(intptr_t)ONE_READER ); - } - return false; -} - -void spin_rw_mutex_v3::internal_construct() { - ITT_SYNC_CREATE(this, _T("tbb::spin_rw_mutex"), _T("")); -} -} // namespace tbb diff --git a/src/tbb/src/tbb/task.cpp b/src/tbb/src/tbb/task.cpp index 26b3ba1cc..84b4278f0 100644 --- a/src/tbb/src/tbb/task.cpp +++ b/src/tbb/src/tbb/task.cpp @@ -1,276 +1,257 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ // Do not include task.h directly. Use scheduler_common.h instead #include "scheduler_common.h" #include "governor.h" -#include "scheduler.h" +#include "arena.h" +#include "thread_data.h" +#include "task_dispatcher.h" +#include "waiters.h" #include "itt_notify.h" -#include "tbb/cache_aligned_allocator.h" -#include "tbb/partitioner.h" +#include "oneapi/tbb/detail/_task.h" +#include "oneapi/tbb/partitioner.h" +#include "oneapi/tbb/task.h" -#include <new> +#include <cstring> namespace tbb { - -using namespace std; - -namespace internal { +namespace detail { +namespace r1 { //------------------------------------------------------------------------ -// Methods of allocate_root_proxy +// resumable tasks //------------------------------------------------------------------------ -task& allocate_root_proxy::allocate( size_t size ) { - internal::generic_scheduler* v = governor::local_scheduler(); - __TBB_ASSERT( v, "thread did not activate a task_scheduler_init object?" ); -#if __TBB_TASK_GROUP_CONTEXT - task_prefix& p = v->my_innermost_running_task->prefix(); - - ITT_STACK_CREATE(p.context->itt_caller); -#endif - // New root task becomes part of the currently running task's cancellation context - return v->allocate_task( size, __TBB_CONTEXT_ARG(NULL, p.context) ); -} +#if __TBB_RESUMABLE_TASKS -void allocate_root_proxy::free( task& task ) { - internal::generic_scheduler* v = governor::local_scheduler(); - __TBB_ASSERT( v, "thread does not have initialized task_scheduler_init object?" ); -#if __TBB_TASK_GROUP_CONTEXT - // No need to do anything here as long as there is no context -> task connection -#endif /* __TBB_TASK_GROUP_CONTEXT */ - v->free_task<local_task>( task ); +void suspend(suspend_callback_type suspend_callback, void* user_callback) { + thread_data& td = *governor::get_thread_data(); + td.my_task_dispatcher->suspend(suspend_callback, user_callback); + // Do not access td after suspend. } -#if __TBB_TASK_GROUP_CONTEXT -//------------------------------------------------------------------------ -// Methods of allocate_root_with_context_proxy -//------------------------------------------------------------------------ -task& allocate_root_with_context_proxy::allocate( size_t size ) const { - internal::generic_scheduler* s = governor::local_scheduler(); - __TBB_ASSERT( s, "Scheduler auto-initialization failed?" ); - task& t = s->allocate_task( size, NULL, &my_context ); - // Supported usage model prohibits concurrent initial binding. Thus we do not - // need interlocked operations or fences to manipulate with my_context.my_kind - if ( __TBB_load_relaxed(my_context.my_kind) == task_group_context::binding_required ) { - // If we are in the outermost task dispatch loop of a master thread, then - // there is nothing to bind this context to, and we skip the binding part - // treating the context as isolated. - if ( s->my_innermost_running_task == s->my_dummy_task ) - __TBB_store_relaxed(my_context.my_kind, task_group_context::isolated); - else - my_context.bind_to( s ); +void resume(suspend_point_type* sp) { + assert_pointers_valid(sp, sp->m_arena); + task_dispatcher& task_disp = sp->m_resume_task.m_target; + + if (sp->try_notify_resume()) { + // TODO: remove this work-around + // Prolong the arena's lifetime while all coroutines are alive + // (otherwise the arena can be destroyed while some tasks are suspended). + arena& a = *sp->m_arena; + a.my_references += arena::ref_worker; + + if (task_disp.m_properties.critical_task_allowed) { + // The target is not in the process of executing critical task, so the resume task is not critical. + a.my_resume_task_stream.push(&sp->m_resume_task, random_lane_selector(sp->m_random)); + } else { + #if __TBB_PREVIEW_CRITICAL_TASKS + // The target is in the process of executing critical task, so the resume task is critical. + a.my_critical_task_stream.push(&sp->m_resume_task, random_lane_selector(sp->m_random)); + #endif + } + // Do not access target after that point. + a.advertise_new_work<arena::wakeup>(); + // Release our reference to my_arena. + a.on_thread_leaving(arena::ref_worker); } -#if __TBB_FP_CONTEXT - if ( __TBB_load_relaxed(my_context.my_kind) == task_group_context::isolated && - !(my_context.my_version_and_traits & task_group_context::fp_settings) ) - my_context.copy_fp_settings( *s->my_arena->my_default_ctx ); -#endif - ITT_STACK_CREATE(my_context.itt_caller); - return t; -} -void allocate_root_with_context_proxy::free( task& task ) const { - internal::generic_scheduler* v = governor::local_scheduler(); - __TBB_ASSERT( v, "thread does not have initialized task_scheduler_init object?" ); - // No need to do anything here as long as unbinding is performed by context destructor only. - v->free_task<local_task>( task ); } -#endif /* __TBB_TASK_GROUP_CONTEXT */ -//------------------------------------------------------------------------ -// Methods of allocate_continuation_proxy -//------------------------------------------------------------------------ -task& allocate_continuation_proxy::allocate( size_t size ) const { - task& t = *((task*)this); - assert_task_valid(t); - generic_scheduler* s = governor::local_scheduler(); - task* parent = t.parent(); - t.prefix().parent = NULL; - return s->allocate_task( size, __TBB_CONTEXT_ARG(parent, t.prefix().context) ); +suspend_point_type* current_suspend_point() { + thread_data& td = *governor::get_thread_data(); + return td.my_task_dispatcher->get_suspend_point(); } -void allocate_continuation_proxy::free( task& mytask ) const { - // Restore the parent as it was before the corresponding allocate was called. - ((task*)this)->prefix().parent = mytask.parent(); - governor::local_scheduler()->free_task<local_task>(mytask); +task_dispatcher& create_coroutine(thread_data& td) { + // We may have some task dispatchers cached + task_dispatcher* task_disp = td.my_arena->my_co_cache.pop(); + if (!task_disp) { + void* ptr = cache_aligned_allocate(sizeof(task_dispatcher)); + task_disp = new(ptr) task_dispatcher(td.my_arena); + task_disp->init_suspend_point(td.my_arena, td.my_arena->my_threading_control->worker_stack_size()); + } + // Prolong the arena's lifetime until all coroutines is alive + // (otherwise the arena can be destroyed while some tasks are suspended). + // TODO: consider behavior if there are more than 4K external references. + td.my_arena->my_references += arena::ref_external; + return *task_disp; } -//------------------------------------------------------------------------ -// Methods of allocate_child_proxy -//------------------------------------------------------------------------ -task& allocate_child_proxy::allocate( size_t size ) const { - task& t = *((task*)this); - assert_task_valid(t); - generic_scheduler* s = governor::local_scheduler(); - return s->allocate_task( size, __TBB_CONTEXT_ARG(&t, t.prefix().context) ); -} +void task_dispatcher::internal_suspend() { + __TBB_ASSERT(m_thread_data != nullptr, nullptr); -void allocate_child_proxy::free( task& mytask ) const { - governor::local_scheduler()->free_task<local_task>(mytask); -} + arena_slot* slot = m_thread_data->my_arena_slot; + __TBB_ASSERT(slot != nullptr, nullptr); -//------------------------------------------------------------------------ -// Methods of allocate_additional_child_of_proxy -//------------------------------------------------------------------------ -task& allocate_additional_child_of_proxy::allocate( size_t size ) const { - parent.increment_ref_count(); - generic_scheduler* s = governor::local_scheduler(); - return s->allocate_task( size, __TBB_CONTEXT_ARG(&parent, parent.prefix().context) ); -} + task_dispatcher& default_task_disp = slot->default_task_dispatcher(); + // TODO: simplify the next line, e.g. is_task_dispatcher_recalled( task_dispatcher& ) + bool is_recalled = default_task_disp.get_suspend_point()->m_is_owner_recalled.load(std::memory_order_acquire); + task_dispatcher& target = is_recalled ? default_task_disp : create_coroutine(*m_thread_data); + + resume(target); -void allocate_additional_child_of_proxy::free( task& task ) const { - // Undo the increment. We do not check the result of the fetch-and-decrement. - // We could consider be spawning the task if the fetch-and-decrement returns 1. - // But we do not know that was the programmer's intention. - // Furthermore, if it was the programmer's intention, the program has a fundamental - // race condition (that we warn about in Reference manual), because the - // reference count might have become zero before the corresponding call to - // allocate_additional_child_of_proxy::allocate. - parent.internal_decrement_ref_count(); - governor::local_scheduler()->free_task<local_task>(task); + if (m_properties.outermost) { + recall_point(); + } } -//------------------------------------------------------------------------ -// Support for auto_partitioner -//------------------------------------------------------------------------ -size_t get_initial_auto_partitioner_divisor() { - const size_t X_FACTOR = 4; - return X_FACTOR * (1+governor::local_scheduler()->number_of_workers_in_my_arena()); +void task_dispatcher::suspend(suspend_callback_type suspend_callback, void* user_callback) { + __TBB_ASSERT(suspend_callback != nullptr, nullptr); + __TBB_ASSERT(user_callback != nullptr, nullptr); + suspend_callback(user_callback, get_suspend_point()); + + __TBB_ASSERT(m_thread_data != nullptr, nullptr); + __TBB_ASSERT(m_thread_data->my_post_resume_action == post_resume_action::none, nullptr); + __TBB_ASSERT(m_thread_data->my_post_resume_arg == nullptr, nullptr); + internal_suspend(); } -//------------------------------------------------------------------------ -// Methods of affinity_partitioner_base_v3 -//------------------------------------------------------------------------ -void affinity_partitioner_base_v3::resize( unsigned factor ) { - // Check factor to avoid asking for number of workers while there might be no arena. - size_t new_size = factor ? factor*(1+governor::local_scheduler()->number_of_workers_in_my_arena()) : 0; - if( new_size!=my_size ) { - if( my_array ) { - NFS_Free( my_array ); - // Following two assignments must be done here for sake of exception safety. - my_array = NULL; - my_size = 0; - } - if( new_size ) { - my_array = static_cast<affinity_id*>(NFS_Allocate(new_size,sizeof(affinity_id), NULL )); - memset( my_array, 0, sizeof(affinity_id)*new_size ); - my_size = new_size; +bool task_dispatcher::resume(task_dispatcher& target) { + // Do not create non-trivial objects on the stack of this function. They might never be destroyed + { + thread_data* td = m_thread_data; + __TBB_ASSERT(&target != this, "We cannot resume to ourself"); + __TBB_ASSERT(td != nullptr, "This task dispatcher must be attach to a thread data"); + __TBB_ASSERT(td->my_task_dispatcher == this, "Thread data must be attached to this task dispatcher"); + + // Change the task dispatcher + td->detach_task_dispatcher(); + td->attach_task_dispatcher(target); + } + __TBB_ASSERT(m_suspend_point != nullptr, "Suspend point must be created"); + __TBB_ASSERT(target.m_suspend_point != nullptr, "Suspend point must be created"); + // Swap to the target coroutine. + + m_suspend_point->resume(target.m_suspend_point); + // Pay attention that m_thread_data can be changed after resume + if (m_thread_data) { + thread_data* td = m_thread_data; + __TBB_ASSERT(td != nullptr, "This task dispatcher must be attach to a thread data"); + __TBB_ASSERT(td->my_task_dispatcher == this, "Thread data must be attached to this task dispatcher"); + do_post_resume_action(); + + // Remove the recall flag if the thread in its original task dispatcher + arena_slot* slot = td->my_arena_slot; + __TBB_ASSERT(slot != nullptr, nullptr); + if (this == slot->my_default_task_dispatcher) { + __TBB_ASSERT(m_suspend_point != nullptr, nullptr); + m_suspend_point->m_is_owner_recalled.store(false, std::memory_order_relaxed); } + return true; } + return false; } -} // namespace internal - -using namespace tbb::internal; +void task_dispatcher::do_post_resume_action() { + thread_data* td = m_thread_data; + switch (td->my_post_resume_action) { + case post_resume_action::register_waiter: + { + __TBB_ASSERT(td->my_post_resume_arg, "The post resume action must have an argument"); + static_cast<thread_control_monitor::resume_context*>(td->my_post_resume_arg)->notify(); + break; + } + case post_resume_action::cleanup: + { + __TBB_ASSERT(td->my_post_resume_arg, "The post resume action must have an argument"); + task_dispatcher* to_cleanup = static_cast<task_dispatcher*>(td->my_post_resume_arg); + // Release coroutine's reference to my_arena + td->my_arena->on_thread_leaving(arena::ref_external); + // Cache the coroutine for possible later re-usage + td->my_arena->my_co_cache.push(to_cleanup); + break; + } + case post_resume_action::notify: + { + __TBB_ASSERT(td->my_post_resume_arg, "The post resume action must have an argument"); + suspend_point_type* sp = static_cast<suspend_point_type*>(td->my_post_resume_arg); + sp->recall_owner(); + // Do not access sp because it can be destroyed after recall + + auto is_our_suspend_point = [sp] (market_context ctx) { + return std::uintptr_t(sp) == ctx.my_uniq_addr; + }; + td->my_arena->get_waiting_threads_monitor().notify(is_our_suspend_point); + break; + } + default: + __TBB_ASSERT(td->my_post_resume_action == post_resume_action::none, "Unknown post resume action"); + __TBB_ASSERT(td->my_post_resume_arg == nullptr, "The post resume argument should not be set"); + } + td->clear_post_resume_action(); +} -//------------------------------------------------------------------------ -// task -//------------------------------------------------------------------------ +#else -void task::internal_set_ref_count( int count ) { - __TBB_ASSERT( count>=0, "count must not be negative" ); - task_prefix &p = prefix(); - __TBB_ASSERT(p.ref_count==1 && p.state==allocated && self().parent()==this - || !(p.extra_state & es_ref_count_active), "ref_count race detected"); - ITT_NOTIFY(sync_releasing, &p.ref_count); - p.ref_count = count; +void suspend(suspend_callback_type, void*) { + __TBB_ASSERT_RELEASE(false, "Resumable tasks are unsupported on this platform"); } -internal::reference_count task::internal_decrement_ref_count() { - ITT_NOTIFY( sync_releasing, &prefix().ref_count ); - internal::reference_count k = __TBB_FetchAndDecrementWrelease( &prefix().ref_count ); - __TBB_ASSERT( k>=1, "task's reference count underflowed" ); - if( k==1 ) - ITT_NOTIFY( sync_acquired, &prefix().ref_count ); - return k-1; +void resume(suspend_point_type*) { + __TBB_ASSERT_RELEASE(false, "Resumable tasks are unsupported on this platform"); } -task& task::self() { - generic_scheduler *v = governor::local_scheduler(); - v->assert_task_pool_valid(); - __TBB_ASSERT( v->my_innermost_running_task, NULL ); - return *v->my_innermost_running_task; +suspend_point_type* current_suspend_point() { + __TBB_ASSERT_RELEASE(false, "Resumable tasks are unsupported on this platform"); + return nullptr; } -bool task::is_owned_by_current_thread() const { - return true; -} +#endif /* __TBB_RESUMABLE_TASKS */ -void interface5::internal::task_base::destroy( task& victim ) { - // 1 may be a guard reference for wait_for_all, which was not reset because - // of concurrent_wait mode or because prepared root task was not actually used - // for spawning tasks (as in structured_task_group). - __TBB_ASSERT( (intptr_t)victim.prefix().ref_count <= 1, "Task being destroyed must not have children" ); - __TBB_ASSERT( victim.state()==task::allocated, "illegal state for victim task" ); - task* parent = victim.parent(); - victim.~task(); - if( parent ) { - __TBB_ASSERT( parent->state()!=task::freed && parent->state()!=task::ready, - "attempt to destroy child of running or corrupted parent?" ); - // 'reexecute' and 'executing' are also signs of a race condition, since most tasks - // set their ref_count upon entry but "es_ref_count_active" should detect this - parent->internal_decrement_ref_count(); - // Even if the last reference to *parent is removed, it should not be spawned (documented behavior). - } - governor::local_scheduler()->free_task<no_cache>( victim ); -} +void notify_waiters(std::uintptr_t wait_ctx_addr) { + auto is_related_wait_ctx = [&] (market_context context) { + return wait_ctx_addr == context.my_uniq_addr; + }; -void task::spawn_and_wait_for_all( task_list& list ) { - generic_scheduler* s = governor::local_scheduler(); - task* t = list.first; - if( t ) { - if( &t->prefix().next!=list.next_ptr ) - s->local_spawn( *t->prefix().next, *list.next_ptr ); - list.clear(); - } - s->local_wait_for_all( *this, t ); + governor::get_thread_data()->my_arena->get_waiting_threads_monitor().notify(is_related_wait_ctx); } -/** Defined out of line so that compiler does not replicate task's vtable. - It's pointless to define it inline anyway, because all call sites to it are virtual calls - that the compiler is unlikely to optimize. */ -void task::note_affinity( affinity_id ) { -} +d1::wait_tree_vertex_interface* get_thread_reference_vertex(d1::wait_tree_vertex_interface* top_wait_context) { + __TBB_ASSERT(top_wait_context, nullptr); + auto& dispatcher = *governor::get_thread_data()->my_task_dispatcher; + + d1::reference_vertex* ref_counter{nullptr}; + auto& reference_map = dispatcher.m_reference_vertex_map; + auto pos = reference_map.find(top_wait_context); + if (pos != reference_map.end()) { + ref_counter = pos->second; + } else { + constexpr std::size_t max_reference_vertex_map_size = 1000; + if (reference_map.size() > max_reference_vertex_map_size) { + // TODO: Research the possibility of using better approach for a clean-up + for (auto it = reference_map.begin(); it != reference_map.end();) { + if (it->second->get_num_child() == 0) { + it->second->~reference_vertex(); + cache_aligned_deallocate(it->second); + it = reference_map.erase(it); + } else { + ++it; + } + } + } -#if __TBB_TASK_GROUP_CONTEXT -void task::change_group ( task_group_context& ctx ) { - prefix().context = &ctx; - internal::generic_scheduler* s = governor::local_scheduler(); - if ( __TBB_load_relaxed(ctx.my_kind) == task_group_context::binding_required ) { - // If we are in the outermost task dispatch loop of a master thread, then - // there is nothing to bind this context to, and we skip the binding part - // treating the context as isolated. - if ( s->my_innermost_running_task == s->my_dummy_task ) - __TBB_store_relaxed(ctx.my_kind, task_group_context::isolated); - else - ctx.bind_to( s ); + reference_map[top_wait_context] = ref_counter = + new (cache_aligned_allocate(sizeof(d1::reference_vertex))) d1::reference_vertex(top_wait_context, 0); } -#if __TBB_FP_CONTEXT - if ( __TBB_load_relaxed(ctx.my_kind) == task_group_context::isolated && - !(ctx.my_version_and_traits & task_group_context::fp_settings) ) - ctx.copy_fp_settings( *s->my_arena->my_default_ctx ); -#endif - ITT_STACK_CREATE(ctx.itt_caller); + + return ref_counter; } -#endif /* __TBB_TASK_GROUP_CONTEXT */ +} // namespace r1 +} // namespace detail } // namespace tbb - diff --git a/src/tbb/src/tbb/task_dispatcher.cpp b/src/tbb/src/tbb/task_dispatcher.cpp new file mode 100644 index 000000000..5ea7d3f53 --- /dev/null +++ b/src/tbb/src/tbb/task_dispatcher.cpp @@ -0,0 +1,244 @@ +/* + Copyright (c) 2020-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "task_dispatcher.h" +#include "waiters.h" + +namespace tbb { +namespace detail { +namespace r1 { + +static inline void spawn_and_notify(d1::task& t, arena_slot* slot, arena* a) { + slot->spawn(t); + a->advertise_new_work<arena::work_spawned>(); + // TODO: TBB_REVAMP_TODO slot->assert_task_pool_valid(); +} + +void __TBB_EXPORTED_FUNC spawn(d1::task& t, d1::task_group_context& ctx) { + thread_data* tls = governor::get_thread_data(); + task_group_context_impl::bind_to(ctx, tls); + arena* a = tls->my_arena; + arena_slot* slot = tls->my_arena_slot; + // Capture current context + task_accessor::context(t) = &ctx; + // Mark isolation + task_accessor::isolation(t) = tls->my_task_dispatcher->m_execute_data_ext.isolation; + spawn_and_notify(t, slot, a); +} + +void __TBB_EXPORTED_FUNC spawn(d1::task& t, d1::task_group_context& ctx, d1::slot_id id) { + thread_data* tls = governor::get_thread_data(); + task_group_context_impl::bind_to(ctx, tls); + arena* a = tls->my_arena; + arena_slot* slot = tls->my_arena_slot; + execution_data_ext& ed = tls->my_task_dispatcher->m_execute_data_ext; + + // Capture context + task_accessor::context(t) = &ctx; + // Mark isolation + task_accessor::isolation(t) = ed.isolation; + + if ( id != d1::no_slot && id != tls->my_arena_index && id < a->my_num_slots) { + // Allocate proxy task + d1::small_object_allocator alloc{}; + auto proxy = alloc.new_object<task_proxy>(static_cast<d1::execution_data&>(ed)); + // Mark as a proxy + task_accessor::set_proxy_trait(*proxy); + // Mark isolation for the proxy task + task_accessor::isolation(*proxy) = ed.isolation; + // Deallocation hint (tls) from the task allocator + proxy->allocator = alloc; + proxy->slot = id; + proxy->outbox = &a->mailbox(id); + // Mark proxy as present in both locations (sender's task pool and destination mailbox) + proxy->task_and_tag = intptr_t(&t) | task_proxy::location_mask; + // Mail the proxy - after this point t may be destroyed by another thread at any moment. + proxy->outbox->push(proxy); + // Spawn proxy to the local task pool + spawn_and_notify(*proxy, slot, a); + } else { + spawn_and_notify(t, slot, a); + } +} + +void __TBB_EXPORTED_FUNC submit(d1::task& t, d1::task_group_context& ctx, arena* a, std::uintptr_t as_critical) { + suppress_unused_warning(as_critical); + assert_pointer_valid(a); + thread_data& tls = *governor::get_thread_data(); + + // TODO revamp: for each use case investigate neccesity to make this call + task_group_context_impl::bind_to(ctx, &tls); + task_accessor::context(t) = &ctx; + // TODO revamp: consider respecting task isolation if this call is being made by external thread + task_accessor::isolation(t) = tls.my_task_dispatcher->m_execute_data_ext.isolation; + + // TODO: consider code refactoring when lane selection mechanism is unified. + + if ( tls.is_attached_to(a) ) { + arena_slot* slot = tls.my_arena_slot; +#if __TBB_PREVIEW_CRITICAL_TASKS + if( as_critical ) { + a->my_critical_task_stream.push( &t, subsequent_lane_selector(slot->critical_hint()) ); + } else +#endif + { + slot->spawn(t); + } + } else { + random_lane_selector lane_selector{tls.my_random}; +#if !__TBB_PREVIEW_CRITICAL_TASKS + suppress_unused_warning(as_critical); +#else + if ( as_critical ) { + a->my_critical_task_stream.push( &t, lane_selector ); + } else +#endif + { + // Avoid joining the arena the thread is not currently in. + a->my_fifo_task_stream.push( &t, lane_selector ); + } + } + // It is assumed that some thread will explicitly wait in the arena the task is submitted + // into. Therefore, no need to utilize mandatory concurrency here. + a->advertise_new_work<arena::work_spawned>(); +} + +void __TBB_EXPORTED_FUNC execute_and_wait(d1::task& t, d1::task_group_context& t_ctx, d1::wait_context& wait_ctx, d1::task_group_context& w_ctx) { + task_accessor::context(t) = &t_ctx; + task_dispatcher::execute_and_wait(&t, wait_ctx, w_ctx); +} + +void __TBB_EXPORTED_FUNC wait(d1::wait_context& wait_ctx, d1::task_group_context& w_ctx) { + // Enter the task dispatch loop without a task + task_dispatcher::execute_and_wait(nullptr, wait_ctx, w_ctx); +} + +d1::slot_id __TBB_EXPORTED_FUNC execution_slot(const d1::execution_data* ed) { + if (ed) { + const execution_data_ext* ed_ext = static_cast<const execution_data_ext*>(ed); + assert_pointers_valid(ed_ext->task_disp, ed_ext->task_disp->m_thread_data); + return ed_ext->task_disp->m_thread_data->my_arena_index; + } else { + thread_data* td = governor::get_thread_data_if_initialized(); + return td ? td->my_arena_index : d1::slot_id(-1); + } +} + +d1::task_group_context* __TBB_EXPORTED_FUNC current_context() { + thread_data* td = governor::get_thread_data(); + assert_pointers_valid(td, td->my_task_dispatcher); + + task_dispatcher* task_disp = td->my_task_dispatcher; + if (task_disp->m_properties.outermost) { + // No one task is executed, so no execute_data. + return nullptr; + } else { + return td->my_task_dispatcher->m_execute_data_ext.context; + } +} + +void task_dispatcher::execute_and_wait(d1::task* t, d1::wait_context& wait_ctx, d1::task_group_context& w_ctx) { + // Get an associated task dispatcher + thread_data* tls = governor::get_thread_data(); + __TBB_ASSERT(tls->my_task_dispatcher != nullptr, nullptr); + task_dispatcher& local_td = *tls->my_task_dispatcher; + + // TODO: factor out the binding to execute_and_wait_impl + if (t) { + task_group_context_impl::bind_to(*task_accessor::context(*t), tls); + // Propagate the isolation to the task executed without spawn. + task_accessor::isolation(*t) = tls->my_task_dispatcher->m_execute_data_ext.isolation; + } + + // Waiting on special object tied to a waiting thread. + external_waiter waiter{ *tls->my_arena, wait_ctx }; + t = local_td.local_wait_for_all(t, waiter); + __TBB_ASSERT_EX(t == nullptr, "External waiter must not leave dispatch loop with a task"); + + // The external thread couldn't exit the dispatch loop in an idle state + if (local_td.m_thread_data->my_inbox.is_idle_state(true)) { + local_td.m_thread_data->my_inbox.set_is_idle(false); + } + + auto exception = w_ctx.my_exception.load(std::memory_order_acquire); + if (exception) { + __TBB_ASSERT(w_ctx.is_group_execution_cancelled(), "The task group context with an exception should be canceled."); + exception->throw_self(); + } +} + +#if __TBB_RESUMABLE_TASKS + +#if _WIN32 +/* [[noreturn]] */ void __stdcall co_local_wait_for_all(void* addr) noexcept +#else +/* [[noreturn]] */ void co_local_wait_for_all(unsigned hi, unsigned lo) noexcept +#endif +{ +#if !_WIN32 + std::uintptr_t addr = lo; + __TBB_ASSERT(sizeof(addr) == 8 || hi == 0, nullptr); + addr += std::uintptr_t(std::uint64_t(hi) << 32); +#endif + task_dispatcher& task_disp = *reinterpret_cast<task_dispatcher*>(addr); + assert_pointers_valid(task_disp.m_thread_data, task_disp.m_thread_data->my_arena); + task_disp.set_stealing_threshold(task_disp.m_thread_data->my_arena->calculate_stealing_threshold()); + __TBB_ASSERT(task_disp.can_steal(), nullptr); + task_disp.co_local_wait_for_all(); + // This code is unreachable +} + +/* [[noreturn]] */ void task_dispatcher::co_local_wait_for_all() noexcept { + // Do not create non-trivial objects on the stack of this function. They will never be destroyed. + assert_pointer_valid(m_thread_data); + + m_suspend_point->finilize_resume(); + // Basically calls the user callback passed to the tbb::task::suspend function + do_post_resume_action(); + + // Endless loop here because coroutine could be reused + d1::task* resume_task{}; + do { + arena* a = m_thread_data->my_arena; + coroutine_waiter waiter(*a); + resume_task = local_wait_for_all(nullptr, waiter); + assert_task_valid(resume_task); + __TBB_ASSERT(this == m_thread_data->my_task_dispatcher, nullptr); + + m_thread_data->set_post_resume_action(post_resume_action::cleanup, this); + + } while (resume(static_cast<suspend_point_type::resume_task*>(resume_task)->m_target)); + // This code might be unreachable +} + +d1::suspend_point task_dispatcher::get_suspend_point() { + if (m_suspend_point == nullptr) { + assert_pointer_valid(m_thread_data); + // 0 means that we attach this task dispatcher to the current stack + init_suspend_point(m_thread_data->my_arena, 0); + } + assert_pointer_valid(m_suspend_point); + return m_suspend_point; +} +void task_dispatcher::init_suspend_point(arena* a, std::size_t stack_size) { + __TBB_ASSERT(m_suspend_point == nullptr, nullptr); + m_suspend_point = new(cache_aligned_allocate(sizeof(suspend_point_type))) + suspend_point_type(a, stack_size, *this); +} +#endif /* __TBB_RESUMABLE_TASKS */ +} // namespace r1 +} // namespace detail +} // namespace tbb diff --git a/src/tbb/src/tbb/task_dispatcher.h b/src/tbb/src/tbb/task_dispatcher.h new file mode 100644 index 000000000..c818934e5 --- /dev/null +++ b/src/tbb/src/tbb/task_dispatcher.h @@ -0,0 +1,479 @@ +/* + Copyright (c) 2020-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef _TBB_task_dispatcher_H +#define _TBB_task_dispatcher_H + +#include "oneapi/tbb/detail/_utils.h" +#include "oneapi/tbb/detail/_task.h" +#include "oneapi/tbb/global_control.h" + +#include "scheduler_common.h" +#include "waiters.h" +#include "arena_slot.h" +#include "arena.h" +#include "thread_data.h" +#include "mailbox.h" +#include "itt_notify.h" +#include "concurrent_monitor.h" +#include "threading_control.h" + +#include <atomic> + +#if !__TBB_CPU_CTL_ENV_PRESENT +#include <fenv.h> // +#endif + +namespace tbb { +namespace detail { +namespace r1 { + +inline d1::task* get_self_recall_task(arena_slot& slot) { + suppress_unused_warning(slot); + d1::task* t = nullptr; +#if __TBB_RESUMABLE_TASKS + suspend_point_type* sp = slot.default_task_dispatcher().m_suspend_point; + if (sp && sp->m_is_owner_recalled.load(std::memory_order_acquire)) { + t = &sp->m_resume_task; + __TBB_ASSERT(sp->m_resume_task.m_target.m_thread_data == nullptr, nullptr); + } +#endif /* __TBB_RESUMABLE_TASKS */ + return t; +} + +// Defined in exception.cpp +/*[[noreturn]]*/void do_throw_noexcept(void (*throw_exception)()) noexcept; + +//------------------------------------------------------------------------ +// Suspend point +//------------------------------------------------------------------------ +#if __TBB_RESUMABLE_TASKS + +inline d1::task* suspend_point_type::resume_task::execute(d1::execution_data& ed) { + execution_data_ext& ed_ext = static_cast<execution_data_ext&>(ed); + + if (ed_ext.wait_ctx) { + thread_control_monitor::resume_context monitor_node{{std::uintptr_t(ed_ext.wait_ctx), nullptr}, ed_ext, m_target}; + // The wait_ctx is present only in external_waiter. In that case we leave the current stack + // in the abandoned state to resume when waiting completes. + thread_data* td = ed_ext.task_disp->m_thread_data; + td->set_post_resume_action(task_dispatcher::post_resume_action::register_waiter, &monitor_node); + + thread_control_monitor& wait_list = td->my_arena->get_waiting_threads_monitor(); + + if (wait_list.wait([&] { return !ed_ext.wait_ctx->continue_execution(); }, monitor_node)) { + return nullptr; + } + + td->clear_post_resume_action(); + r1::resume(ed_ext.task_disp->get_suspend_point()); + } else { + // If wait_ctx is null, it can be only a worker thread on outermost level because + // coroutine_waiter interrupts bypass loop before the resume_task execution. + ed_ext.task_disp->m_thread_data->set_post_resume_action(task_dispatcher::post_resume_action::notify, + ed_ext.task_disp->get_suspend_point()); + } + // Do not access this task because it might be destroyed + ed_ext.task_disp->resume(m_target); + return nullptr; +} + +inline suspend_point_type::suspend_point_type(arena* a, size_t stack_size, task_dispatcher& task_disp) + : m_arena(a) + , m_random(this) + , m_co_context(stack_size, &task_disp) + , m_resume_task(task_disp) +{ + assert_pointer_valid(m_arena); + assert_pointer_valid(m_arena->my_default_ctx); + task_accessor::context(m_resume_task) = m_arena->my_default_ctx; + task_accessor::isolation(m_resume_task) = no_isolation; + // Initialize the itt_caller for the context of the resume task. + // It will be bound to the stack of the first suspend call. + task_group_context_impl::bind_to(*task_accessor::context(m_resume_task), task_disp.m_thread_data); +} + +#endif /* __TBB_RESUMABLE_TASKS */ + +//------------------------------------------------------------------------ +// Task Dispatcher +//------------------------------------------------------------------------ +inline task_dispatcher::task_dispatcher(arena* a) { + m_execute_data_ext.context = a->my_default_ctx; + m_execute_data_ext.task_disp = this; +} + +inline bool task_dispatcher::can_steal() { + __TBB_ASSERT(m_stealing_threshold != 0, nullptr); + stack_anchor_type anchor{}; + return reinterpret_cast<std::uintptr_t>(&anchor) > m_stealing_threshold; +} + +inline d1::task* task_dispatcher::get_inbox_or_critical_task( + execution_data_ext& ed, mail_inbox& inbox, isolation_type isolation, bool critical_allowed) +{ + if (inbox.empty()) + return nullptr; + d1::task* result = get_critical_task(nullptr, ed, isolation, critical_allowed); + if (result) + return result; + // Check if there are tasks mailed to this thread via task-to-thread affinity mechanism. + result = get_mailbox_task(inbox, ed, isolation); + // There is a race with a thread adding a new task (possibly with suitable isolation) + // to our mailbox, so the below conditions might result in a false positive. + // Then set_is_idle(false) allows that task to be stolen; it's OK. + if (isolation != no_isolation && !result && !inbox.empty() && inbox.is_idle_state(true)) { + // We have proxy tasks in our mailbox but the isolation blocks their execution. + // So publish the proxy tasks in mailbox to be available for stealing from owner's task pool. + inbox.set_is_idle( false ); + } + return result; +} + +inline d1::task* task_dispatcher::get_stream_or_critical_task( + execution_data_ext& ed, arena& a, task_stream<front_accessor>& stream, unsigned& hint, + isolation_type isolation, bool critical_allowed) +{ + if (stream.empty()) + return nullptr; + d1::task* result = get_critical_task(nullptr, ed, isolation, critical_allowed); + if (result) + return result; + return a.get_stream_task(stream, hint); +} + +inline d1::task* task_dispatcher::steal_or_get_critical( + execution_data_ext& ed, arena& a, unsigned arena_index, FastRandom& random, + isolation_type isolation, bool critical_allowed) +{ + if (d1::task* t = a.steal_task(arena_index, random, ed, isolation)) { + ed.context = task_accessor::context(*t); + ed.isolation = task_accessor::isolation(*t); + return get_critical_task(t, ed, isolation, critical_allowed); + } + return nullptr; +} + +template <bool ITTPossible, typename Waiter> +d1::task* task_dispatcher::receive_or_steal_task( + thread_data& tls, execution_data_ext& ed, Waiter& waiter, isolation_type isolation, + bool fifo_allowed, bool critical_allowed) +{ + __TBB_ASSERT(governor::is_thread_data_set(&tls), nullptr); + // Task to return + d1::task* t = nullptr; + // Get tls data (again) + arena& a = *tls.my_arena; + arena_slot& slot = *tls.my_arena_slot; + unsigned arena_index = tls.my_arena_index; + mail_inbox& inbox = tls.my_inbox; + task_stream<front_accessor>& resume_stream = a.my_resume_task_stream; + unsigned& resume_hint = slot.hint_for_resume_stream; + task_stream<front_accessor>& fifo_stream = a.my_fifo_task_stream; + unsigned& fifo_hint = slot.hint_for_fifo_stream; + + waiter.reset_wait(); + // Thread is in idle state now + inbox.set_is_idle(true); + + bool stealing_is_allowed = can_steal(); + + // Stealing loop mailbox/enqueue/other_slots + for (;;) { + __TBB_ASSERT(t == nullptr, nullptr); + // Check if the resource manager requires our arena to relinquish some threads + // For the external thread restore idle state to true after dispatch loop + if (!waiter.continue_execution(slot, t)) { + __TBB_ASSERT(t == nullptr, nullptr); + break; + } + // Start searching + if (t != nullptr) { + // continue_execution returned a task + } + else if ((t = get_inbox_or_critical_task(ed, inbox, isolation, critical_allowed))) { + // Successfully got the task from mailbox or critical task + } + else if ((t = get_stream_or_critical_task(ed, a, resume_stream, resume_hint, isolation, critical_allowed))) { + // Successfully got the resume or critical task + } + else if (fifo_allowed && isolation == no_isolation + && (t = get_stream_or_critical_task(ed, a, fifo_stream, fifo_hint, isolation, critical_allowed))) { + // Checked if there are tasks in starvation-resistant stream. Only allowed at the outermost dispatch level without isolation. + } + else if (stealing_is_allowed + && (t = steal_or_get_critical(ed, a, arena_index, tls.my_random, isolation, critical_allowed))) { + // Stole a task from a random arena slot + } + else { + t = get_critical_task(t, ed, isolation, critical_allowed); + } + + if (t != nullptr) { + ed.context = task_accessor::context(*t); + ed.isolation = task_accessor::isolation(*t); + a.my_observers.notify_entry_observers(tls.my_last_observer, tls.my_is_worker); + break; // Stealing success, end of stealing attempt + } + // Nothing to do, pause a little. + waiter.pause(slot); + } // end of nonlocal task retrieval loop + + __TBB_ASSERT(is_alive(a.my_guard), nullptr); + if (inbox.is_idle_state(true)) { + inbox.set_is_idle(false); + } + return t; +} + +template <bool ITTPossible, typename Waiter> +d1::task* task_dispatcher::local_wait_for_all(d1::task* t, Waiter& waiter ) { + assert_pointer_valid(m_thread_data); + __TBB_ASSERT(m_thread_data->my_task_dispatcher == this, nullptr); + + // Guard an outer/default execution state + struct dispatch_loop_guard { + task_dispatcher& task_disp; + execution_data_ext old_execute_data_ext; + properties old_properties; + bool is_initially_registered; + + ~dispatch_loop_guard() { + task_disp.m_execute_data_ext = old_execute_data_ext; + task_disp.m_properties = old_properties; + + if (!is_initially_registered) { + task_disp.m_thread_data->my_arena->my_tc_client.get_pm_client()->unregister_thread(); + task_disp.m_thread_data->my_is_registered = false; + } + + __TBB_ASSERT(task_disp.m_thread_data && governor::is_thread_data_set(task_disp.m_thread_data), nullptr); + __TBB_ASSERT(task_disp.m_thread_data->my_task_dispatcher == &task_disp, nullptr); + } + } dl_guard{ *this, m_execute_data_ext, m_properties, m_thread_data->my_is_registered }; + + // The context guard to track fp setting and itt tasks. + context_guard_helper</*report_tasks=*/ITTPossible> context_guard; + + // Current isolation context + const isolation_type isolation = dl_guard.old_execute_data_ext.isolation; + + // Critical work inflection point. Once turned false current execution context has taken + // critical task on the previous stack frame and cannot take more until that critical path is + // finished. + bool critical_allowed = dl_guard.old_properties.critical_task_allowed; + + // Extended execution data that is used for dispatching. + // Base version is passed to the task::execute method. + execution_data_ext& ed = m_execute_data_ext; + ed.context = t ? task_accessor::context(*t) : nullptr; + ed.original_slot = m_thread_data->my_arena_index; + ed.affinity_slot = d1::no_slot; + ed.task_disp = this; + ed.wait_ctx = waiter.wait_ctx(); + + m_properties.outermost = false; + m_properties.fifo_tasks_allowed = false; + + if (!dl_guard.is_initially_registered) { + m_thread_data->my_arena->my_tc_client.get_pm_client()->register_thread(); + m_thread_data->my_is_registered = true; + } + + t = get_critical_task(t, ed, isolation, critical_allowed); + if (t && m_thread_data->my_inbox.is_idle_state(true)) { + // The thread has a work to do. Therefore, marking its inbox as not idle so that + // affinitized tasks can be stolen from it. + m_thread_data->my_inbox.set_is_idle(false); + } + + // Infinite exception loop + for (;;) { + try { + // Main execution loop + do { + // We assume that bypass tasks are from the same task group. + context_guard.set_ctx(ed.context); + // Inner level evaluates tasks coming from nesting loops and those returned + // by just executed tasks (bypassing spawn or enqueue calls). + while (t != nullptr) { + assert_task_valid(t); + assert_pointer_valid</*alignment = */alignof(void*)>(ed.context); + __TBB_ASSERT(ed.context->my_state == d1::task_group_context::state::bound || + ed.context->my_state == d1::task_group_context::state::isolated, nullptr); + __TBB_ASSERT(m_thread_data->my_inbox.is_idle_state(false), nullptr); + __TBB_ASSERT(task_accessor::is_resume_task(*t) || isolation == no_isolation || isolation == ed.isolation, nullptr); + // Check premature leave + if (Waiter::postpone_execution(*t)) { + __TBB_ASSERT(task_accessor::is_resume_task(*t) && dl_guard.old_properties.outermost, + "Currently, the bypass loop can be interrupted only for resume task on outermost level"); + return t; + } + // Copy itt_caller to a stack because the context might be destroyed after t->execute. + void* itt_caller = ed.context->my_itt_caller; + suppress_unused_warning(itt_caller); + + ITT_CALLEE_ENTER(ITTPossible, t, itt_caller); + + if (ed.context->is_group_execution_cancelled()) { + t = t->cancel(ed); + } else { + t = t->execute(ed); + } + + ITT_CALLEE_LEAVE(ITTPossible, itt_caller); + + // The task affinity in execution data is set for affinitized tasks. + // So drop it after the task execution. + ed.affinity_slot = d1::no_slot; + // Reset task owner id for bypassed task + ed.original_slot = m_thread_data->my_arena_index; + t = get_critical_task(t, ed, isolation, critical_allowed); + } + __TBB_ASSERT(m_thread_data && governor::is_thread_data_set(m_thread_data), nullptr); + __TBB_ASSERT(m_thread_data->my_task_dispatcher == this, nullptr); + // When refactoring, pay attention that m_thread_data can be changed after t->execute() + __TBB_ASSERT(m_thread_data->my_arena_slot != nullptr, nullptr); + arena_slot& slot = *m_thread_data->my_arena_slot; + if (!waiter.continue_execution(slot, t)) { + break; + } + // Retrieve the task from local task pool + if (t || (slot.is_task_pool_published() && (t = slot.get_task(ed, isolation)))) { + __TBB_ASSERT(ed.original_slot == m_thread_data->my_arena_index, nullptr); + ed.context = task_accessor::context(*t); + ed.isolation = task_accessor::isolation(*t); + continue; + } + // Retrieve the task from global sources + t = receive_or_steal_task<ITTPossible>( + *m_thread_data, ed, waiter, isolation, dl_guard.old_properties.fifo_tasks_allowed, + critical_allowed + ); + } while (t != nullptr); // main dispatch loop + break; // Exit exception loop; + } catch (...) { + if (global_control::active_value(global_control::terminate_on_exception) == 1) { + do_throw_noexcept([] { throw; }); + } + if (ed.context->cancel_group_execution()) { + /* We are the first to signal cancellation, so store the exception that caused it. */ + ed.context->my_exception.store(tbb_exception_ptr::allocate(), std::memory_order_release); + } + } + } // Infinite exception loop + __TBB_ASSERT(t == nullptr, nullptr); + + +#if __TBB_RESUMABLE_TASKS + if (dl_guard.old_properties.outermost) { + recall_point(); + } +#endif /* __TBB_RESUMABLE_TASKS */ + + return nullptr; +} + +#if __TBB_RESUMABLE_TASKS +inline void task_dispatcher::recall_point() { + if (this != &m_thread_data->my_arena_slot->default_task_dispatcher()) { + __TBB_ASSERT(m_suspend_point != nullptr, nullptr); + __TBB_ASSERT(m_suspend_point->m_is_owner_recalled.load(std::memory_order_relaxed) == false, nullptr); + + m_thread_data->set_post_resume_action(post_resume_action::notify, get_suspend_point()); + internal_suspend(); + + if (m_thread_data->my_inbox.is_idle_state(true)) { + m_thread_data->my_inbox.set_is_idle(false); + } + } +} +#endif /* __TBB_RESUMABLE_TASKS */ + +#if __TBB_PREVIEW_CRITICAL_TASKS +inline d1::task* task_dispatcher::get_critical_task(d1::task* t, execution_data_ext& ed, isolation_type isolation, bool critical_allowed) { + __TBB_ASSERT( critical_allowed || !m_properties.critical_task_allowed, nullptr ); + + if (!critical_allowed) { + // The stack is already in the process of critical path execution. Cannot take another + // critical work until finish with the current one. + __TBB_ASSERT(!m_properties.critical_task_allowed, nullptr); + return t; + } + + assert_pointers_valid(m_thread_data, m_thread_data->my_arena, m_thread_data->my_arena_slot); + thread_data& td = *m_thread_data; + arena& a = *td.my_arena; + arena_slot& slot = *td.my_arena_slot; + + d1::task* crit_t = a.get_critical_task(slot.hint_for_critical_stream, isolation); + if (crit_t != nullptr) { + assert_task_valid(crit_t); + if (t != nullptr) { + assert_pointer_valid</*alignment = */alignof(void*)>(ed.context); + r1::spawn(*t, *ed.context); + } + ed.context = task_accessor::context(*crit_t); + ed.isolation = task_accessor::isolation(*crit_t); + + // We cannot execute more than one critical task on the same stack. + // In other words, we prevent nested critical tasks. + m_properties.critical_task_allowed = false; + + // TODO: add a test that the observer is called when critical task is taken. + a.my_observers.notify_entry_observers(td.my_last_observer, td.my_is_worker); + t = crit_t; + } else { + // Was unable to find critical work in the queue. Allow inspecting the queue in nested + // invocations. Handles the case when critical task has been just completed. + m_properties.critical_task_allowed = true; + } + return t; +} +#else +inline d1::task* task_dispatcher::get_critical_task(d1::task* t, execution_data_ext&, isolation_type, bool /*critical_allowed*/) { + return t; +} +#endif + +inline d1::task* task_dispatcher::get_mailbox_task(mail_inbox& my_inbox, execution_data_ext& ed, isolation_type isolation) { + while (task_proxy* const tp = my_inbox.pop(isolation)) { + if (d1::task* result = tp->extract_task<task_proxy::mailbox_bit>()) { + ed.original_slot = (unsigned short)(-2); + ed.affinity_slot = ed.task_disp->m_thread_data->my_arena_index; + return result; + } + // We have exclusive access to the proxy, and can destroy it. + tp->allocator.delete_object(tp, ed); + } + return nullptr; +} + +template <typename Waiter> +d1::task* task_dispatcher::local_wait_for_all(d1::task* t, Waiter& waiter) { + if (governor::is_itt_present()) { + return local_wait_for_all</*ITTPossible = */ true>(t, waiter); + } else { + return local_wait_for_all</*ITTPossible = */ false>(t, waiter); + } +} + +} // namespace r1 +} // namespace detail +} // namespace tbb + +#endif // _TBB_task_dispatcher_H + diff --git a/src/tbb/src/tbb/task_group_context.cpp b/src/tbb/src/tbb/task_group_context.cpp index 64efc0dc5..c20b2790f 100644 --- a/src/tbb/src/tbb/task_group_context.cpp +++ b/src/tbb/src/tbb/task_group_context.cpp @@ -1,278 +1,134 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ + Copyright (c) 2005-2023 Intel Corporation -#include "scheduler.h" + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at -#include "itt_notify.h" + http://www.apache.org/licenses/LICENSE-2.0 -namespace tbb { + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -#if __TBB_TASK_GROUP_CONTEXT +#include "oneapi/tbb/detail/_config.h" +#include "oneapi/tbb/tbb_allocator.h" +#include "oneapi/tbb/task_group.h" +#include "governor.h" +#include "thread_data.h" +#include "scheduler_common.h" +#include "itt_notify.h" +#include "task_dispatcher.h" -using namespace internal; +#include <type_traits> + +namespace tbb { +namespace detail { +namespace r1 { //------------------------------------------------------------------------ -// captured_exception +// tbb_exception_ptr //------------------------------------------------------------------------ - -inline char* duplicate_string ( const char* src ) { - char* dst = NULL; - if ( src ) { - size_t len = strlen(src) + 1; - dst = (char*)allocate_via_handler_v3(len); - strncpy (dst, src, len); - } - return dst; +tbb_exception_ptr* tbb_exception_ptr::allocate() noexcept { + tbb_exception_ptr* eptr = (tbb_exception_ptr*)allocate_memory(sizeof(tbb_exception_ptr)); + return eptr ? new (eptr) tbb_exception_ptr(std::current_exception()) : nullptr; } -captured_exception::~captured_exception () throw() { - clear(); +void tbb_exception_ptr::destroy() noexcept { + this->~tbb_exception_ptr(); + deallocate_memory(this); } -void captured_exception::set ( const char* a_name, const char* info ) throw() { - my_exception_name = duplicate_string( a_name ); - my_exception_info = duplicate_string( info ); +void tbb_exception_ptr::throw_self() { + if (governor::rethrow_exception_broken()) fix_broken_rethrow(); + std::rethrow_exception(my_ptr); } -void captured_exception::clear () throw() { - deallocate_via_handler_v3 (const_cast<char*>(my_exception_name)); - deallocate_via_handler_v3 (const_cast<char*>(my_exception_info)); -} +//------------------------------------------------------------------------ +// task_group_context +//------------------------------------------------------------------------ -captured_exception* captured_exception::move () throw() { - captured_exception *e = (captured_exception*)allocate_via_handler_v3(sizeof(captured_exception)); - if ( e ) { - ::new (e) captured_exception(); - e->my_exception_name = my_exception_name; - e->my_exception_info = my_exception_info; - e->my_dynamic = true; - my_exception_name = my_exception_info = NULL; - } - return e; -} +void task_group_context_impl::destroy(d1::task_group_context& ctx) { + __TBB_ASSERT(!is_poisoned(ctx.my_context_list), nullptr); -void captured_exception::destroy () throw() { - __TBB_ASSERT ( my_dynamic, "Method destroy can be used only on objects created by clone or allocate" ); - if ( my_dynamic ) { - this->captured_exception::~captured_exception(); - deallocate_via_handler_v3 (this); + if (ctx.my_context_list != nullptr) { + __TBB_ASSERT(ctx.my_state.load(std::memory_order_relaxed) == d1::task_group_context::state::bound, nullptr); + // The owner can be destroyed at any moment. Access the associate data with caution. + ctx.my_context_list->remove(ctx.my_node); } -} + d1::cpu_ctl_env* ctl = reinterpret_cast<d1::cpu_ctl_env*>(&ctx.my_cpu_ctl_env); +#if _MSC_VER && _MSC_VER <= 1900 && !__INTEL_COMPILER + suppress_unused_warning(ctl); +#endif + ctl->~cpu_ctl_env(); -captured_exception* captured_exception::allocate ( const char* a_name, const char* info ) { - captured_exception *e = (captured_exception*)allocate_via_handler_v3( sizeof(captured_exception) ); - if ( e ) { - ::new (e) captured_exception(a_name, info); - e->my_dynamic = true; + auto exception = ctx.my_exception.load(std::memory_order_relaxed); + if (exception) { + exception->destroy(); } - return e; -} + ITT_STACK_DESTROY(ctx.my_itt_caller); -const char* captured_exception::name() const throw() { - return my_exception_name; -} + poison_pointer(ctx.my_parent); + poison_pointer(ctx.my_context_list); + poison_pointer(ctx.my_node.my_next_node); + poison_pointer(ctx.my_node.my_prev_node); + poison_pointer(ctx.my_exception); + poison_pointer(ctx.my_itt_caller); -const char* captured_exception::what() const throw() { - return my_exception_info; + ctx.my_state.store(d1::task_group_context::state::dead, std::memory_order_release); } - -//------------------------------------------------------------------------ -// tbb_exception_ptr -//------------------------------------------------------------------------ - -#if !TBB_USE_CAPTURED_EXCEPTION - -namespace internal { - -template<typename T> -tbb_exception_ptr* AllocateExceptionContainer( const T& src ) { - tbb_exception_ptr *eptr = (tbb_exception_ptr*)allocate_via_handler_v3( sizeof(tbb_exception_ptr) ); - if ( eptr ) - new (eptr) tbb_exception_ptr(src); - return eptr; +void task_group_context_impl::initialize(d1::task_group_context& ctx) { + ITT_TASK_GROUP(&ctx, ctx.my_name, nullptr); + + ctx.my_node.my_next_node = &ctx.my_node; + ctx.my_node.my_prev_node = &ctx.my_node; + ctx.my_cpu_ctl_env = 0; + ctx.my_cancellation_requested = 0; + ctx.my_may_have_children.store(0, std::memory_order_relaxed); + // Set the created state to bound at the first usage. + ctx.my_state.store(d1::task_group_context::state::created, std::memory_order_relaxed); + ctx.my_parent = nullptr; + ctx.my_context_list = nullptr; + ctx.my_exception.store(nullptr, std::memory_order_relaxed); + ctx.my_itt_caller = nullptr; + + static_assert(sizeof(d1::cpu_ctl_env) <= sizeof(ctx.my_cpu_ctl_env), "FPU settings storage does not fit to uint64_t"); + d1::cpu_ctl_env* ctl = new (&ctx.my_cpu_ctl_env) d1::cpu_ctl_env; + if (ctx.my_traits.fp_settings) + ctl->get_env(); } -tbb_exception_ptr* tbb_exception_ptr::allocate () { - return AllocateExceptionContainer( std::current_exception() ); -} +void task_group_context_impl::register_with(d1::task_group_context& ctx, thread_data* td) { + __TBB_ASSERT(!is_poisoned(ctx.my_context_list), nullptr); + __TBB_ASSERT(td, nullptr); + ctx.my_context_list = td->my_context_list; -tbb_exception_ptr* tbb_exception_ptr::allocate ( const tbb_exception& ) { - return AllocateExceptionContainer( std::current_exception() ); + ctx.my_context_list->push_front(ctx.my_node); } -tbb_exception_ptr* tbb_exception_ptr::allocate ( captured_exception& src ) { - tbb_exception_ptr *res = AllocateExceptionContainer( src ); - src.destroy(); - return res; -} - -void tbb_exception_ptr::destroy () throw() { - this->tbb_exception_ptr::~tbb_exception_ptr(); - deallocate_via_handler_v3 (this); -} +void task_group_context_impl::bind_to_impl(d1::task_group_context& ctx, thread_data* td) { + __TBB_ASSERT(!is_poisoned(ctx.my_context_list), nullptr); + __TBB_ASSERT(ctx.my_state.load(std::memory_order_relaxed) == d1::task_group_context::state::locked, "The context can be bound only under the lock."); + __TBB_ASSERT(!ctx.my_parent, "Parent is set before initial binding"); -} // namespace internal -#endif /* !TBB_USE_CAPTURED_EXCEPTION */ + ctx.my_parent = td->my_task_dispatcher->m_execute_data_ext.context; + __TBB_ASSERT(ctx.my_parent, nullptr); - -//------------------------------------------------------------------------ -// task_group_context -//------------------------------------------------------------------------ - -task_group_context::~task_group_context () { - if ( __TBB_load_relaxed(my_kind) == binding_completed ) { - if ( governor::is_set(my_owner) ) { - // Local update of the context list - uintptr_t local_count_snapshot = my_owner->my_context_state_propagation_epoch; - my_owner->my_local_ctx_list_update.store<relaxed>(1); - // Prevent load of nonlocal update flag from being hoisted before the - // store to local update flag. - atomic_fence(); - if ( my_owner->my_nonlocal_ctx_list_update.load<relaxed>() ) { - spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex); - my_node.my_prev->my_next = my_node.my_next; - my_node.my_next->my_prev = my_node.my_prev; - my_owner->my_local_ctx_list_update.store<relaxed>(0); - } - else { - my_node.my_prev->my_next = my_node.my_next; - my_node.my_next->my_prev = my_node.my_prev; - // Release fence is necessary so that update of our neighbors in - // the context list was committed when possible concurrent destroyer - // proceeds after local update flag is reset by the following store. - my_owner->my_local_ctx_list_update.store<release>(0); - if ( local_count_snapshot != the_context_state_propagation_epoch ) { - // Another thread was propagating cancellation request when we removed - // ourselves from the list. We must ensure that it is not accessing us - // when this destructor finishes. We'll be able to acquire the lock - // below only after the other thread finishes with us. - spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex); - } - } - } - else { - // Nonlocal update of the context list - // Synchronizes with generic_scheduler::cleanup_local_context_list() - // TODO: evaluate and perhaps relax, or add some lock instead - if ( internal::as_atomic(my_kind).fetch_and_store(dying) == detached ) { - my_node.my_prev->my_next = my_node.my_next; - my_node.my_next->my_prev = my_node.my_prev; - } - else { - //TODO: evaluate and perhaps relax - my_owner->my_nonlocal_ctx_list_update.fetch_and_increment<full_fence>(); - //TODO: evaluate and perhaps remove - spin_wait_until_eq( my_owner->my_local_ctx_list_update, 0u ); - my_owner->my_context_list_mutex.lock(); - my_node.my_prev->my_next = my_node.my_next; - my_node.my_next->my_prev = my_node.my_prev; - my_owner->my_context_list_mutex.unlock(); - //TODO: evaluate and perhaps relax - my_owner->my_nonlocal_ctx_list_update.fetch_and_decrement<full_fence>(); - } - } - } -#if __TBB_FP_CONTEXT - internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env)->~cpu_ctl_env(); -#endif - poison_value(my_version_and_traits); - if ( my_exception ) - my_exception->destroy(); - ITT_STACK(itt_caller != ITT_CALLER_NULL, caller_destroy, itt_caller); -} - -void task_group_context::init () { - __TBB_STATIC_ASSERT ( sizeof(my_version_and_traits) >= 4, "Layout of my_version_and_traits must be reconsidered on this platform" ); - __TBB_STATIC_ASSERT ( sizeof(task_group_context) == 2 * NFS_MaxLineSize, "Context class has wrong size - check padding and members alignment" ); - __TBB_ASSERT ( (uintptr_t(this) & (sizeof(my_cancellation_requested) - 1)) == 0, "Context is improperly aligned" ); - __TBB_ASSERT ( __TBB_load_relaxed(my_kind) == isolated || __TBB_load_relaxed(my_kind) == bound, "Context can be created only as isolated or bound" ); - my_parent = NULL; - my_cancellation_requested = 0; - my_exception = NULL; - my_owner = NULL; - my_state = 0; - itt_caller = ITT_CALLER_NULL; -#if __TBB_TASK_PRIORITY - my_priority = normalized_normal_priority; -#endif /* __TBB_TASK_PRIORITY */ -#if __TBB_FP_CONTEXT - __TBB_STATIC_ASSERT( sizeof(my_cpu_ctl_env) == sizeof(internal::uint64_t), "The reserved space for FPU settings are not equal sizeof(uint64_t)" ); - __TBB_STATIC_ASSERT( sizeof(cpu_ctl_env) <= sizeof(my_cpu_ctl_env), "FPU settings storage does not fit to uint64_t" ); - suppress_unused_warning( my_cpu_ctl_env.space ); - - cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env); - new ( &ctl ) cpu_ctl_env; - if ( my_version_and_traits & fp_settings ) - ctl.get_env(); -#endif -} - -void task_group_context::register_with ( generic_scheduler *local_sched ) { - __TBB_ASSERT( local_sched, NULL ); - my_owner = local_sched; - // state propagation logic assumes new contexts are bound to head of the list - my_node.my_prev = &local_sched->my_context_list_head; - // Notify threads that may be concurrently destroying contexts registered - // in this scheduler's list that local list update is underway. - local_sched->my_local_ctx_list_update.store<relaxed>(1); - // Prevent load of global propagation epoch counter from being hoisted before - // speculative stores above, as well as load of nonlocal update flag from - // being hoisted before the store to local update flag. - atomic_fence(); - // Finalize local context list update - if ( local_sched->my_nonlocal_ctx_list_update.load<relaxed>() ) { - spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex); - local_sched->my_context_list_head.my_next->my_prev = &my_node; - my_node.my_next = local_sched->my_context_list_head.my_next; - my_owner->my_local_ctx_list_update.store<relaxed>(0); - local_sched->my_context_list_head.my_next = &my_node; - } - else { - local_sched->my_context_list_head.my_next->my_prev = &my_node; - my_node.my_next = local_sched->my_context_list_head.my_next; - my_owner->my_local_ctx_list_update.store<release>(0); - // Thread-local list of contexts allows concurrent traversal by another thread - // while propagating state change. To ensure visibility of my_node's members - // to the concurrently traversing thread, the list's head is updated by means - // of store-with-release. - __TBB_store_with_release(local_sched->my_context_list_head.my_next, &my_node); - } -} - -void task_group_context::bind_to ( generic_scheduler *local_sched ) { - __TBB_ASSERT ( __TBB_load_relaxed(my_kind) == binding_required, "Already bound or isolated?" ); - __TBB_ASSERT ( !my_parent, "Parent is set before initial binding" ); - my_parent = local_sched->my_innermost_running_task->prefix().context; -#if __TBB_FP_CONTEXT // Inherit FPU settings only if the context has not captured FPU settings yet. - if ( !(my_version_and_traits & fp_settings) ) - copy_fp_settings(*my_parent); -#endif + if (!ctx.my_traits.fp_settings) + copy_fp_settings(ctx, *ctx.my_parent); // Condition below prevents unnecessary thrashing parent context's cache line - if ( !(my_parent->my_state & may_have_children) ) - my_parent->my_state |= may_have_children; // full fence is below - if ( my_parent->my_parent ) { + if (ctx.my_parent->my_may_have_children.load(std::memory_order_relaxed) != d1::task_group_context::may_have_children) { + ctx.my_parent->my_may_have_children.store(d1::task_group_context::may_have_children, std::memory_order_relaxed); // full fence is below + } + if (ctx.my_parent->my_parent) { // Even if this context were made accessible for state change propagation - // (by placing __TBB_store_with_release(s->my_context_list_head.my_next, &my_node) + // (by placing store_with_release(td->my_context_list_state.head.my_next, &ctx.my_node) // above), it still could be missed if state propagation from a grand-ancestor // was underway concurrently with binding. // Speculative propagation from the parent together with epoch counters @@ -282,220 +138,221 @@ void task_group_context::bind_to ( generic_scheduler *local_sched ) { // Acquire fence is necessary to prevent reordering subsequent speculative // loads of parent state data out of the scope where epoch counters comparison // can reliably validate it. - uintptr_t local_count_snapshot = __TBB_load_with_acquire( my_parent->my_owner->my_context_state_propagation_epoch ); + uintptr_t local_count_snapshot = ctx.my_parent->my_context_list->epoch.load(std::memory_order_acquire); // Speculative propagation of parent's state. The speculation will be // validated by the epoch counters check further on. - my_cancellation_requested = my_parent->my_cancellation_requested; -#if __TBB_TASK_PRIORITY - my_priority = my_parent->my_priority; -#endif /* __TBB_TASK_PRIORITY */ - register_with( local_sched ); // Issues full fence + ctx.my_cancellation_requested.store(ctx.my_parent->my_cancellation_requested.load(std::memory_order_relaxed), std::memory_order_relaxed); + register_with(ctx, td); // Issues full fence // If no state propagation was detected by the following condition, the above // full fence guarantees that the parent had correct state during speculative // propagation before the fence. Otherwise the propagation from parent is // repeated under the lock. - if ( local_count_snapshot != the_context_state_propagation_epoch ) { + if (local_count_snapshot != the_context_state_propagation_epoch.load(std::memory_order_relaxed)) { // Another thread may be propagating state change right now. So resort to lock. context_state_propagation_mutex_type::scoped_lock lock(the_context_state_propagation_mutex); - my_cancellation_requested = my_parent->my_cancellation_requested; -#if __TBB_TASK_PRIORITY - my_priority = my_parent->my_priority; -#endif /* __TBB_TASK_PRIORITY */ + ctx.my_cancellation_requested.store(ctx.my_parent->my_cancellation_requested.load(std::memory_order_relaxed), std::memory_order_relaxed); } - } - else { - register_with( local_sched ); // Issues full fence + } else { + register_with(ctx, td); // Issues full fence // As we do not have grand-ancestors, concurrent state propagation (if any) // may originate only from the parent context, and thus it is safe to directly // copy the state from it. - my_cancellation_requested = my_parent->my_cancellation_requested; -#if __TBB_TASK_PRIORITY - my_priority = my_parent->my_priority; -#endif /* __TBB_TASK_PRIORITY */ + ctx.my_cancellation_requested.store(ctx.my_parent->my_cancellation_requested.load(std::memory_order_relaxed), std::memory_order_relaxed); } - __TBB_store_relaxed(my_kind, binding_completed); } -#if __TBB_TASK_GROUP_CONTEXT -template <typename T> -void task_group_context::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) { - if (this->*mptr_state == new_state) { - // Nothing to do, whether descending from "src" or not, so no need to scan. - // Hopefully this happens often thanks to earlier invocations. - // This optimization is enabled by LIFO order in the context lists: - // - new contexts are bound to the beginning of lists; - // - descendants are newer than ancestors; - // - earlier invocations are therefore likely to "paint" long chains. - } - else if (this == &src) { - // This clause is disjunct from the traversal below, which skips src entirely. - // Note that src.*mptr_state is not necessarily still equal to new_state (another thread may have changed it again). - // Such interference is probably not frequent enough to aim for optimisation by writing new_state again (to make the other thread back down). - // Letting the other thread prevail may also be fairer. - } - else { - for ( task_group_context *ancestor = my_parent; ancestor != NULL; ancestor = ancestor->my_parent ) { - __TBB_ASSERT(internal::is_alive(ancestor->my_version_and_traits), "context tree was corrupted"); - if ( ancestor == &src ) { - for ( task_group_context *ctx = this; ctx != ancestor; ctx = ctx->my_parent ) - ctx->*mptr_state = new_state; - break; +void task_group_context_impl::bind_to(d1::task_group_context& ctx, thread_data* td) { + d1::task_group_context::state state = ctx.my_state.load(std::memory_order_acquire); + if (state <= d1::task_group_context::state::locked) { + if (state == d1::task_group_context::state::created && +#if defined(__INTEL_COMPILER) && __INTEL_COMPILER <= 1910 + ((std::atomic<typename std::underlying_type<d1::task_group_context::state>::type>&)ctx.my_state).compare_exchange_strong( + (typename std::underlying_type<d1::task_group_context::state>::type&)state, + (typename std::underlying_type<d1::task_group_context::state>::type)d1::task_group_context::state::locked) +#else + ctx.my_state.compare_exchange_strong(state, d1::task_group_context::state::locked) +#endif + ) { + // If we are in the outermost task dispatch loop of an external thread, then + // there is nothing to bind this context to, and we skip the binding part + // treating the context as isolated. + __TBB_ASSERT(td->my_task_dispatcher->m_execute_data_ext.context != nullptr, nullptr); + d1::task_group_context::state release_state{}; + if (td->my_task_dispatcher->m_execute_data_ext.context == td->my_arena->my_default_ctx || !ctx.my_traits.bound) { + if (!ctx.my_traits.fp_settings) { + copy_fp_settings(ctx, *td->my_arena->my_default_ctx); + } + release_state = d1::task_group_context::state::isolated; + } else { + bind_to_impl(ctx, td); + release_state = d1::task_group_context::state::bound; } + ITT_STACK_CREATE(ctx.my_itt_caller); + ctx.my_state.store(release_state, std::memory_order_release); } + spin_wait_while_eq(ctx.my_state, d1::task_group_context::state::locked); } + __TBB_ASSERT(ctx.my_state.load(std::memory_order_relaxed) != d1::task_group_context::state::created, nullptr); + __TBB_ASSERT(ctx.my_state.load(std::memory_order_relaxed) != d1::task_group_context::state::locked, nullptr); } -template <typename T> -void generic_scheduler::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) { - spin_mutex::scoped_lock lock(my_context_list_mutex); - // Acquire fence is necessary to ensure that the subsequent node->my_next load - // returned the correct value in case it was just inserted in another thread. - // The fence also ensures visibility of the correct my_parent value. - context_list_node_t *node = __TBB_load_with_acquire(my_context_list_head.my_next); - while ( node != &my_context_list_head ) { - task_group_context &ctx = __TBB_get_object_ref(task_group_context, my_node, node); - if ( ctx.*mptr_state != new_state ) - ctx.propagate_task_group_state( mptr_state, src, new_state ); - node = node->my_next; - __TBB_ASSERT( is_alive(ctx.my_version_and_traits), "Local context list contains destroyed object" ); - } - // Sync up local propagation epoch with the global one. Release fence prevents - // reordering of possible store to *mptr_state after the sync point. - __TBB_store_with_release(my_context_state_propagation_epoch, the_context_state_propagation_epoch); -} - -template <typename T> -bool market::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) { - if ( !(src.my_state & task_group_context::may_have_children) ) - return true; - // The whole propagation algorithm is under the lock in order to ensure correctness - // in case of concurrent state changes at the different levels of the context tree. - // See comment at the bottom of scheduler.cpp - context_state_propagation_mutex_type::scoped_lock lock(the_context_state_propagation_mutex); - if ( src.*mptr_state != new_state ) - // Another thread has concurrently changed the state. Back down. - return false; - // Advance global state propagation epoch - __TBB_FetchAndAddWrelease(&the_context_state_propagation_epoch, 1); - // Propagate to all workers and masters and sync up their local epochs with the global one - unsigned num_workers = my_num_workers; - for ( unsigned i = 0; i < num_workers; ++i ) { - generic_scheduler *s = my_workers[i]; - // If the worker is only about to be registered, skip it. - if ( s ) - s->propagate_task_group_state( mptr_state, src, new_state ); - } - // Propagate to all master threads (under my_arenas_list_mutex lock) - ForEachArena(a) { // uses lock on my_arenas_list_mutex - arena_slot &slot = a.my_slots[0]; - generic_scheduler *s = slot.my_scheduler; - // If the master is under construction, skip it. Otherwise make sure that it does not - // leave its arena and its scheduler get destroyed while we accessing its data. - if ( s && as_atomic(slot.my_scheduler).compare_and_swap(LockedMaster, s) == s ) { //TODO: remove need in lock - __TBB_ASSERT( slot.my_scheduler == LockedMaster, NULL ); - // The whole propagation sequence is locked, thus no contention is expected - __TBB_ASSERT( s != LockedMaster, NULL ); - s->propagate_task_group_state( mptr_state, src, new_state ); - __TBB_store_with_release( slot.my_scheduler, s ); +void task_group_context_impl::propagate_task_group_state(d1::task_group_context& ctx, std::atomic<std::uint32_t> d1::task_group_context::* mptr_state, d1::task_group_context& src, std::uint32_t new_state) { + __TBB_ASSERT(!is_poisoned(ctx.my_context_list), nullptr); + /* 1. if ((ctx.*mptr_state).load(std::memory_order_relaxed) == new_state): + Nothing to do, whether descending from "src" or not, so no need to scan. + Hopefully this happens often thanks to earlier invocations. + This optimization is enabled by LIFO order in the context lists: + - new contexts are bound to the beginning of lists; + - descendants are newer than ancestors; + - earlier invocations are therefore likely to "paint" long chains. + 2. if (&ctx != &src): + This clause is disjunct from the traversal below, which skips src entirely. + Note that src.*mptr_state is not necessarily still equal to new_state (another thread may have changed it again). + Such interference is probably not frequent enough to aim for optimisation by writing new_state again (to make the other thread back down). + Letting the other thread prevail may also be fairer. + */ + if ((ctx.*mptr_state).load(std::memory_order_relaxed) != new_state && &ctx != &src) { + for (d1::task_group_context* ancestor = ctx.my_parent; ancestor != nullptr; ancestor = ancestor->my_parent) { + if (ancestor == &src) { + for (d1::task_group_context* c = &ctx; c != ancestor; c = c->my_parent) + (c->*mptr_state).store(new_state, std::memory_order_relaxed); + break; + } } - } EndForEach(); - return true; -} - -template <typename T> -bool arena::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) { - return my_market->propagate_task_group_state( mptr_state, src, new_state ); + } } -#endif /* __TBB_TASK_GROUP_CONTEXT */ -bool task_group_context::cancel_group_execution () { - __TBB_ASSERT ( my_cancellation_requested == 0 || my_cancellation_requested == 1, "Invalid cancellation state"); - if ( my_cancellation_requested || as_atomic(my_cancellation_requested).compare_and_swap(1, 0) ) { +bool task_group_context_impl::cancel_group_execution(d1::task_group_context& ctx) { + __TBB_ASSERT(!is_poisoned(ctx.my_context_list), nullptr); + __TBB_ASSERT(ctx.my_cancellation_requested.load(std::memory_order_relaxed) <= 1, "The cancellation state can be either 0 or 1"); + if (ctx.my_cancellation_requested.load(std::memory_order_relaxed) || ctx.my_cancellation_requested.exchange(1)) { // This task group and any descendants have already been canceled. - // (A newly added descendant would inherit its parent's my_cancellation_requested, + // (A newly added descendant would inherit its parent's ctx.my_cancellation_requested, // not missing out on any cancellation still being propagated, and a context cannot be uncanceled.) return false; } - governor::local_scheduler()->my_arena->propagate_task_group_state( &task_group_context::my_cancellation_requested, *this, (uintptr_t)1 ); + governor::get_thread_data()->my_arena->my_threading_control->propagate_task_group_state(&d1::task_group_context::my_cancellation_requested, ctx, uint32_t(1)); return true; } -bool task_group_context::is_group_execution_cancelled () const { - return my_cancellation_requested != 0; +bool task_group_context_impl::is_group_execution_cancelled(const d1::task_group_context& ctx) { + return ctx.my_cancellation_requested.load(std::memory_order_relaxed) != 0; } // IMPORTANT: It is assumed that this method is not used concurrently! -void task_group_context::reset () { +void task_group_context_impl::reset(d1::task_group_context& ctx) { + __TBB_ASSERT(!is_poisoned(ctx.my_context_list), nullptr); //! TODO: Add assertion that this context does not have children // No fences are necessary since this context can be accessed from another thread // only after stealing happened (which means necessary fences were used). - if ( my_exception ) { - my_exception->destroy(); - my_exception = NULL; + + auto exception = ctx.my_exception.load(std::memory_order_relaxed); + if (exception) { + exception->destroy(); + ctx.my_exception.store(nullptr, std::memory_order_relaxed); } - my_cancellation_requested = 0; + ctx.my_cancellation_requested = 0; } -#if __TBB_FP_CONTEXT // IMPORTANT: It is assumed that this method is not used concurrently! -void task_group_context::capture_fp_settings () { +void task_group_context_impl::capture_fp_settings(d1::task_group_context& ctx) { + __TBB_ASSERT(!is_poisoned(ctx.my_context_list), nullptr); //! TODO: Add assertion that this context does not have children // No fences are necessary since this context can be accessed from another thread // only after stealing happened (which means necessary fences were used). - cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env); - if ( !(my_version_and_traits & fp_settings) ) { - new ( &ctl ) cpu_ctl_env; - my_version_and_traits |= fp_settings; + d1::cpu_ctl_env* ctl = reinterpret_cast<d1::cpu_ctl_env*>(&ctx.my_cpu_ctl_env); + if (!ctx.my_traits.fp_settings) { + ctl = new (&ctx.my_cpu_ctl_env) d1::cpu_ctl_env; + ctx.my_traits.fp_settings = true; } - ctl.get_env(); + ctl->get_env(); } -void task_group_context::copy_fp_settings( const task_group_context &src ) { - __TBB_ASSERT( !(my_version_and_traits & fp_settings), "The context already has FPU settings." ); - __TBB_ASSERT( src.my_version_and_traits & fp_settings, "The source context does not have FPU settings." ); +void task_group_context_impl::copy_fp_settings(d1::task_group_context& ctx, const d1::task_group_context& src) { + __TBB_ASSERT(!is_poisoned(ctx.my_context_list), nullptr); + __TBB_ASSERT(!ctx.my_traits.fp_settings, "The context already has FPU settings."); + __TBB_ASSERT(src.my_traits.fp_settings, "The source context does not have FPU settings."); - cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env); - cpu_ctl_env &src_ctl = *internal::punned_cast<cpu_ctl_env*>(&src.my_cpu_ctl_env); - new (&ctl) cpu_ctl_env( src_ctl ); - my_version_and_traits |= fp_settings; -} -#endif /* __TBB_FP_CONTEXT */ - -void task_group_context::register_pending_exception () { - if ( my_cancellation_requested ) - return; -#if TBB_USE_EXCEPTIONS - try { - throw; - } TbbCatchAll( this ); -#endif /* TBB_USE_EXCEPTIONS */ + const d1::cpu_ctl_env* src_ctl = reinterpret_cast<const d1::cpu_ctl_env*>(&src.my_cpu_ctl_env); + new (&ctx.my_cpu_ctl_env) d1::cpu_ctl_env(*src_ctl); + ctx.my_traits.fp_settings = true; } -#if __TBB_TASK_PRIORITY -void task_group_context::set_priority ( priority_t prio ) { - __TBB_ASSERT( prio == priority_low || prio == priority_normal || prio == priority_high, "Invalid priority level value" ); - intptr_t p = normalize_priority(prio); - if ( my_priority == p && !(my_state & task_group_context::may_have_children)) - return; - my_priority = p; - internal::generic_scheduler* s = governor::local_scheduler_if_initialized(); - if ( !s || !s->my_arena->propagate_task_group_state(&task_group_context::my_priority, *this, p) ) - return; - // Updating arena priority here does not eliminate necessity of checking each - // task priority and updating arena priority if necessary before the task execution. - // These checks will be necessary because: - // a) set_priority() may be invoked before any tasks from this task group are spawned; - // b) all spawned tasks from this task group are retrieved from the task pools. - // These cases create a time window when arena priority may be lowered. - s->my_market->update_arena_priority( *s->my_arena, p ); -} +/* + Comments: + +1. The premise of the cancellation support implementation is that cancellations are + not part of the hot path of the program execution. Therefore all changes in its + implementation in order to reduce the overhead of the cancellation control flow + should be done only in ways that do not increase overhead of the normal execution. + + In general, contexts are used by all threads and their descendants are created in + different threads as well. In order to minimize impact of the cross-thread tree + maintenance (first of all because of the synchronization), the tree of contexts + is split into pieces, each of which is handled by a single thread. Such pieces + are represented as lists of contexts, members of which are contexts that were + bound to their parents in the given thread. + + The context tree maintenance and cancellation propagation algorithms are designed + in such a manner that cross-thread access to a context list will take place only + when cancellation signal is sent (by user or when an exception happens), and + synchronization is necessary only then. Thus the normal execution flow (without + exceptions and cancellation) remains free from any synchronization done on + behalf of exception handling and cancellation support. + +2. Consider parallel cancellations at the different levels of the context tree: + + Ctx1 <- Cancelled by Thread1 |- Thread2 started processing + | | + Ctx2 |- Thread1 started processing + | T1 |- Thread2 finishes and syncs up local counters + Ctx3 <- Cancelled by Thread2 | + | |- Ctx5 is bound to Ctx2 + Ctx4 | + T2 |- Thread1 reaches Ctx2 + + Thread-propagator of each cancellation increments global counter. However the thread + propagating the cancellation from the outermost context (Thread1) may be the last + to finish. Which means that the local counters may be synchronized earlier (by Thread2, + at Time1) than it propagated cancellation into Ctx2 (at time Time2). If a new context + (Ctx5) is created and bound to Ctx2 between Time1 and Time2, checking its parent only + (Ctx2) may result in cancellation request being lost. + + This issue is solved by doing the whole propagation under the lock. + + If we need more concurrency while processing parallel cancellations, we could try + the following modification of the propagation algorithm: + + advance global counter and remember it + for each thread: + scan thread's list of contexts + for each thread: + sync up its local counter only if the global counter has not been changed + + However this version of the algorithm requires more analysis and verification. +*/ -priority_t task_group_context::priority () const { - return static_cast<priority_t>(priority_from_normalized_rep[my_priority]); +void __TBB_EXPORTED_FUNC initialize(d1::task_group_context& ctx) { + task_group_context_impl::initialize(ctx); +} +void __TBB_EXPORTED_FUNC destroy(d1::task_group_context& ctx) { + task_group_context_impl::destroy(ctx); +} +void __TBB_EXPORTED_FUNC reset(d1::task_group_context& ctx) { + task_group_context_impl::reset(ctx); +} +bool __TBB_EXPORTED_FUNC cancel_group_execution(d1::task_group_context& ctx) { + return task_group_context_impl::cancel_group_execution(ctx); +} +bool __TBB_EXPORTED_FUNC is_group_execution_cancelled(d1::task_group_context& ctx) { + return task_group_context_impl::is_group_execution_cancelled(ctx); +} +void __TBB_EXPORTED_FUNC capture_fp_settings(d1::task_group_context& ctx) { + task_group_context_impl::capture_fp_settings(ctx); } -#endif /* __TBB_TASK_PRIORITY */ - -#endif /* __TBB_TASK_GROUP_CONTEXT */ +} // namespace r1 +} // namespace detail } // namespace tbb + diff --git a/src/tbb/src/tbb/task_stream.h b/src/tbb/src/tbb/task_stream.h index dc1be2cb2..0aaace52b 100644 --- a/src/tbb/src/tbb/task_stream.h +++ b/src/tbb/src/tbb/task_stream.h @@ -1,164 +1,286 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef _TBB_task_stream_H #define _TBB_task_stream_H -#include "tbb/tbb_stddef.h" +//! This file is a possible future replacement for the task_stream class implemented in +//! task_stream.h. It refactors the code and extends task_stream capabilities by moving lane +//! management during operations on caller side. Despite the fact that new implementation should not +//! affect performance of the original task stream, analysis on this subject was not made at the +//! time it was developed. In addition, it is not clearly seen at the moment that this container +//! would be suitable for critical tasks due to linear time complexity on its operations. + +#include "oneapi/tbb/detail/_utils.h" +#include "oneapi/tbb/cache_aligned_allocator.h" +#include "oneapi/tbb/mutex.h" + +#include "scheduler_common.h" +#include "misc.h" // for FastRandom + #include <deque> #include <climits> -#include "tbb/atomic.h" // for __TBB_Atomic* -#include "tbb/spin_mutex.h" -#include "tbb/tbb_allocator.h" -#include "scheduler_common.h" -#include "tbb_misc.h" // for FastRandom +#include <atomic> namespace tbb { -namespace internal { +namespace detail { +namespace r1 { //! Essentially, this is just a pair of a queue and a mutex to protect the queue. /** The reason std::pair is not used is that the code would look less clean if field names were replaced with 'first' and 'second'. **/ template< typename T, typename mutex_t > -struct queue_and_mutex { - typedef std::deque< T, tbb_allocator<T> > queue_base_t; - - queue_base_t my_queue; - mutex_t my_mutex; +struct alignas(max_nfs_size) queue_and_mutex { + typedef std::deque< T, cache_aligned_allocator<T> > queue_base_t; - queue_and_mutex () : my_queue(), my_mutex() {} - ~queue_and_mutex () {} + queue_base_t my_queue{}; + mutex_t my_mutex{}; }; -const uintptr_t one = 1; +using population_t = uintptr_t; +const population_t one = 1; -inline void set_one_bit( uintptr_t& dest, int pos ) { - __TBB_ASSERT( pos>=0, NULL ); - __TBB_ASSERT( pos<32, NULL ); - __TBB_AtomicOR( &dest, one<<pos ); +inline void set_one_bit( std::atomic<population_t>& dest, int pos ) { + __TBB_ASSERT( pos>=0, nullptr); + __TBB_ASSERT( pos<int(sizeof(population_t)*CHAR_BIT), nullptr); + dest.fetch_or( one<<pos ); } -inline void clear_one_bit( uintptr_t& dest, int pos ) { - __TBB_ASSERT( pos>=0, NULL ); - __TBB_ASSERT( pos<32, NULL ); - __TBB_AtomicAND( &dest, ~(one<<pos) ); +inline void clear_one_bit( std::atomic<population_t>& dest, int pos ) { + __TBB_ASSERT( pos>=0, nullptr); + __TBB_ASSERT( pos<int(sizeof(population_t)*CHAR_BIT), nullptr); + dest.fetch_and( ~(one<<pos) ); } -inline bool is_bit_set( uintptr_t val, int pos ) { - __TBB_ASSERT( pos>=0, NULL ); - __TBB_ASSERT( pos<32, NULL ); +inline bool is_bit_set( population_t val, int pos ) { + __TBB_ASSERT( pos>=0, nullptr); + __TBB_ASSERT( pos<int(sizeof(population_t)*CHAR_BIT), nullptr); return (val & (one<<pos)) != 0; } +struct random_lane_selector : +#if __INTEL_COMPILER == 1110 || __INTEL_COMPILER == 1500 + no_assign +#else + no_copy +#endif +{ + random_lane_selector( FastRandom& random ) : my_random( random ) {} + unsigned operator()( unsigned out_of ) const { + __TBB_ASSERT( ((out_of-1) & out_of) == 0, "number of lanes is not power of two." ); + return my_random.get() & (out_of-1); + } +private: + FastRandom& my_random; +}; + +struct lane_selector_base : +#if __INTEL_COMPILER == 1110 || __INTEL_COMPILER == 1500 + no_assign +#else + no_copy +#endif +{ + unsigned& my_previous; + lane_selector_base( unsigned& previous ) : my_previous( previous ) {} +}; + +struct subsequent_lane_selector : lane_selector_base { + subsequent_lane_selector( unsigned& previous ) : lane_selector_base( previous ) {} + unsigned operator()( unsigned out_of ) const { + __TBB_ASSERT( ((out_of-1) & out_of) == 0, "number of lanes is not power of two." ); + return (++my_previous &= out_of-1); + } +}; + +struct preceding_lane_selector : lane_selector_base { + preceding_lane_selector( unsigned& previous ) : lane_selector_base( previous ) {} + unsigned operator()( unsigned out_of ) const { + __TBB_ASSERT( ((out_of-1) & out_of) == 0, "number of lanes is not power of two." ); + return (--my_previous &= (out_of-1)); + } +}; + +//! Specializes from which side of the underlying container elements are retrieved. Method must be +//! called under corresponding mutex locked. +template<task_stream_accessor_type accessor> +class task_stream_accessor : no_copy { +protected: + using lane_t = queue_and_mutex <d1::task*, mutex>; + d1::task* get_item( lane_t::queue_base_t& queue ) { + d1::task* result = queue.front(); + queue.pop_front(); + return result; + } +}; + +template<> +class task_stream_accessor< back_nonnull_accessor > : no_copy { +protected: + using lane_t = queue_and_mutex <d1::task*, mutex>; + d1::task* get_item( lane_t::queue_base_t& queue ) { + d1::task* result = nullptr; + __TBB_ASSERT(!queue.empty(), nullptr); + // Isolated task can put zeros in queue see look_specific + do { + result = queue.back(); + queue.pop_back(); + } while ( !result && !queue.empty() ); + return result; + } +}; + //! The container for "fairness-oriented" aka "enqueued" tasks. -class task_stream : no_copy { - typedef queue_and_mutex <task*, spin_mutex> lane_t; - uintptr_t population; - padded<lane_t>* lanes; - unsigned N; +template<task_stream_accessor_type accessor> +class task_stream : public task_stream_accessor< accessor > { + using lane_t = typename task_stream_accessor<accessor>::lane_t; + std::atomic<population_t> population{}; + lane_t* lanes{nullptr}; + unsigned N{}; public: - task_stream() : population(), lanes() - { - } + task_stream() = default; void initialize( unsigned n_lanes ) { - const unsigned max_lanes = -#if __TBB_MORE_FIFO_LANES - sizeof(population) * CHAR_BIT; -#else - 32; -#endif - N = n_lanes>=max_lanes ? max_lanes : n_lanes>2 ? 1<<(__TBB_Log2(n_lanes-1)+1) : 2; - __TBB_ASSERT( N==max_lanes || N>=n_lanes && ((N-1)&N)==0, "number of lanes miscalculated"); - __TBB_ASSERT( N <= sizeof(population) * CHAR_BIT, NULL ); - lanes = new padded<lane_t>[N]; - __TBB_ASSERT( !population, NULL ); + const unsigned max_lanes = sizeof(population_t) * CHAR_BIT; + + N = n_lanes >= max_lanes ? max_lanes : n_lanes > 2 ? 1 << (tbb::detail::log2(n_lanes - 1) + 1) : 2; + __TBB_ASSERT( N == max_lanes || (N >= n_lanes && ((N - 1) & N) == 0), "number of lanes miscalculated" ); + __TBB_ASSERT( N <= sizeof(population_t) * CHAR_BIT, nullptr); + lanes = static_cast<lane_t*>(cache_aligned_allocate(sizeof(lane_t) * N)); + for (unsigned i = 0; i < N; ++i) { + new (lanes + i) lane_t; + } + __TBB_ASSERT( !population.load(std::memory_order_relaxed), nullptr); } - ~task_stream() { if (lanes) delete[] lanes; } - - //! Push a task into a lane. - void push( task* source, FastRandom& random ) { - // Lane selection is random. Each thread should keep a separate seed value. - unsigned idx; - for( ; ; ) { - idx = random.get() & (N-1); - spin_mutex::scoped_lock lock; - if( lock.try_acquire(lanes[idx].my_mutex) ) { - lanes[idx].my_queue.push_back(source); - set_one_bit( population, idx ); //TODO: avoid atomic op if the bit is already set - break; + ~task_stream() { + if (lanes) { + for (unsigned i = 0; i < N; ++i) { + lanes[i].~lane_t(); } + cache_aligned_deallocate(lanes); } } - //! Try finding and popping a task. - task* pop( unsigned& last_used_lane ) { - task* result = NULL; - // Lane selection is round-robin. Each thread should keep its last used lane. - unsigned idx = (last_used_lane+1)&(N-1); - for( ; population; idx=(idx+1)&(N-1) ) { - if( is_bit_set( population, idx ) ) { + //! Push a task into a lane. Lane selection is performed by passed functor. + template<typename lane_selector_t> + void push(d1::task* source, const lane_selector_t& next_lane ) { + bool succeed = false; + unsigned lane = 0; + do { + lane = next_lane( /*out_of=*/N ); + __TBB_ASSERT( lane < N, "Incorrect lane index." ); + } while( ! (succeed = try_push( source, lane )) ); + } + + //! Try finding and popping a task using passed functor for lane selection. Last used lane is + //! updated inside lane selector. + template<typename lane_selector_t> + d1::task* pop( const lane_selector_t& next_lane ) { + d1::task* popped = nullptr; + unsigned lane = 0; + for (atomic_backoff b; !empty() && !popped; b.pause()) { + lane = next_lane( /*out_of=*/N); + __TBB_ASSERT(lane < N, "Incorrect lane index."); + popped = try_pop(lane); + } + return popped; + } + + //! Try finding and popping a related task. + d1::task* pop_specific( unsigned& last_used_lane, isolation_type isolation ) { + d1::task* result = nullptr; + // Lane selection is round-robin in backward direction. + unsigned idx = last_used_lane & (N-1); + do { + if( is_bit_set( population.load(std::memory_order_relaxed), idx ) ) { lane_t& lane = lanes[idx]; - spin_mutex::scoped_lock lock; + mutex::scoped_lock lock; if( lock.try_acquire(lane.my_mutex) && !lane.my_queue.empty() ) { - result = lane.my_queue.front(); - lane.my_queue.pop_front(); + result = look_specific( lane.my_queue, isolation ); if( lane.my_queue.empty() ) clear_one_bit( population, idx ); - break; + if( result ) + break; } } - } + idx=(idx-1)&(N-1); + } while( !empty() && idx != last_used_lane ); last_used_lane = idx; return result; } //! Checks existence of a task. bool empty() { - return !population; + return !population.load(std::memory_order_relaxed); } - //! Destroys all remaining tasks in every lane. Returns the number of destroyed tasks. - /** Tasks are not executed, because it would potentially create more tasks at a late stage. - The scheduler is really expected to execute all tasks before task_stream destruction. */ - intptr_t drain() { - intptr_t result = 0; - for(unsigned i=0; i<N; ++i) { - lane_t& lane = lanes[i]; - spin_mutex::scoped_lock lock(lane.my_mutex); - for(lane_t::queue_base_t::iterator it=lane.my_queue.begin(); - it!=lane.my_queue.end(); ++it, ++result) - { - task* t = *it; - tbb::task::destroy(*t); - } - lane.my_queue.clear(); - clear_one_bit( population, i ); +private: + //! Returns true on successful push, otherwise - false. + bool try_push(d1::task* source, unsigned lane_idx ) { + mutex::scoped_lock lock; + if( lock.try_acquire( lanes[lane_idx].my_mutex ) ) { + lanes[lane_idx].my_queue.push_back( source ); + set_one_bit( population, lane_idx ); // TODO: avoid atomic op if the bit is already set + return true; + } + return false; + } + + //! Returns pointer to task on successful pop, otherwise - nullptr. + d1::task* try_pop( unsigned lane_idx ) { + if( !is_bit_set( population.load(std::memory_order_relaxed), lane_idx ) ) + return nullptr; + d1::task* result = nullptr; + lane_t& lane = lanes[lane_idx]; + mutex::scoped_lock lock; + if( lock.try_acquire( lane.my_mutex ) && !lane.my_queue.empty() ) { + result = this->get_item( lane.my_queue ); + if( lane.my_queue.empty() ) + clear_one_bit( population, lane_idx ); } return result; } + + // TODO: unify '*_specific' logic with 'pop' methods above + d1::task* look_specific( typename lane_t::queue_base_t& queue, isolation_type isolation ) { + __TBB_ASSERT( !queue.empty(), nullptr); + // TODO: add a worst-case performance test and consider an alternative container with better + // performance for isolation search. + typename lane_t::queue_base_t::iterator curr = queue.end(); + do { + // TODO: consider logic from get_task to simplify the code. + d1::task* result = *--curr; + if( result && task_accessor::isolation(*result) == isolation ) { + if( queue.end() - curr == 1 ) + queue.pop_back(); // a little of housekeeping along the way + else + *curr = nullptr; // grabbing task with the same isolation + // TODO: move one of the container's ends instead if the task has been found there + return result; + } + } while( curr != queue.begin() ); + return nullptr; + } + }; // task_stream -} // namespace internal +} // namespace r1 +} // namespace detail } // namespace tbb #endif /* _TBB_task_stream_H */ diff --git a/src/tbb/src/tbb/tbb.rc b/src/tbb/src/tbb/tbb.rc new file mode 100644 index 000000000..57e9d391d --- /dev/null +++ b/src/tbb/src/tbb/tbb.rc @@ -0,0 +1,74 @@ +// Copyright (c) 2005-2024 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +///////////////////////////////////////////////////////////////////////////// +// +// Includes +// +#include <winresrc.h> +#include "../../include/oneapi/tbb/version.h" + +///////////////////////////////////////////////////////////////////////////// +// Neutral resources + +#ifdef _WIN32 +LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL +#pragma code_page(1252) +#endif //_WIN32 + +///////////////////////////////////////////////////////////////////////////// +// +// Version +// +#define TBB_VERNUMBERS TBB_VERSION_MAJOR,TBB_VERSION_MINOR,TBB_VERSION_PATCH +#define TBB_VERSION TBB_VERSION_STRING + +VS_VERSION_INFO VERSIONINFO + FILEVERSION TBB_VERNUMBERS + PRODUCTVERSION TBB_VERNUMBERS + FILEFLAGSMASK 0x17L +#ifdef _DEBUG + FILEFLAGS 0x1L +#else + FILEFLAGS 0x0L +#endif + FILEOS 0x40004L + FILETYPE 0x2L + FILESUBTYPE 0x0L +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "000004b0" + BEGIN + VALUE "CompanyName", "Intel Corporation\0" + VALUE "FileDescription", "oneAPI Threading Building Blocks (oneTBB) library\0" + VALUE "FileVersion", TBB_VERSION "\0" + VALUE "LegalCopyright", "Copyright 2005-2024 Intel Corporation. All Rights Reserved.\0" + VALUE "LegalTrademarks", "\0" +#ifndef TBB_USE_DEBUG + VALUE "OriginalFilename", "tbb12.dll\0" +#else + VALUE "OriginalFilename", "tbb12_debug.dll\0" +#endif + VALUE "ProductName", "oneAPI Threading Building Blocks (oneTBB)\0" + VALUE "ProductVersion", TBB_VERSION "\0" + VALUE "PrivateBuild", "\0" + VALUE "SpecialBuild", "\0" + END + END + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x0, 1200 + END +END diff --git a/src/tbb/src/tbb/tbb_assert_impl.h b/src/tbb/src/tbb/tbb_assert_impl.h deleted file mode 100644 index f026c6b6a..000000000 --- a/src/tbb/src/tbb/tbb_assert_impl.h +++ /dev/null @@ -1,103 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// IMPORTANT: To use assertion handling in TBB, exactly one of the TBB source files -// should #include tbb_assert_impl.h thus instantiating assertion handling routines. -// The intent of putting it to a separate file is to allow some tests to use it -// as well in order to avoid dependency on the library. - -// include headers for required function declarations -#include <cstdlib> -#include <stdio.h> -#include <string.h> -#include <stdarg.h> -#if _MSC_VER -#include <crtdbg.h> -#endif - -#if _MSC_VER >= 1400 -#define __TBB_EXPORTED_FUNC __cdecl -#else -#define __TBB_EXPORTED_FUNC -#endif - -using namespace std; - -#if __TBBMALLOC_BUILD -namespace rml { namespace internal { -#else -namespace tbb { -#endif - //! Type for an assertion handler - typedef void(*assertion_handler_type)( const char* filename, int line, const char* expression, const char * comment ); - - static assertion_handler_type assertion_handler; - - assertion_handler_type __TBB_EXPORTED_FUNC set_assertion_handler( assertion_handler_type new_handler ) { - assertion_handler_type old_handler = assertion_handler; - assertion_handler = new_handler; - return old_handler; - } - - void __TBB_EXPORTED_FUNC assertion_failure( const char* filename, int line, const char* expression, const char* comment ) { - if( assertion_handler_type a = assertion_handler ) { - (*a)(filename,line,expression,comment); - } else { - static bool already_failed; - if( !already_failed ) { - already_failed = true; - fprintf( stderr, "Assertion %s failed on line %d of file %s\n", - expression, line, filename ); - if( comment ) - fprintf( stderr, "Detailed description: %s\n", comment ); -#if _MSC_VER && _DEBUG - if(1 == _CrtDbgReport(_CRT_ASSERT, filename, line, "tbb_debug.dll", "%s\r\n%s", expression, comment?comment:"")) - _CrtDbgBreak(); -#else - fflush(stderr); - abort(); -#endif - } - } - } - -#if defined(_MSC_VER)&&_MSC_VER<1400 -# define vsnprintf _vsnprintf -#endif - -#if !__TBBMALLOC_BUILD - namespace internal { - //! Report a runtime warning. - void __TBB_EXPORTED_FUNC runtime_warning( const char* format, ... ) - { - char str[1024]; memset(str, 0, 1024); - va_list args; va_start(args, format); - vsnprintf( str, 1024-1, format, args); - va_end(args); - fprintf( stderr, "TBB Warning: %s\n", str); - } - } // namespace internal -#endif - -#if __TBBMALLOC_BUILD -}} // namespaces rml::internal -#else -} // namespace tbb -#endif diff --git a/src/tbb/src/tbb/tbb_main.cpp b/src/tbb/src/tbb/tbb_main.cpp deleted file mode 100644 index 3442425a4..000000000 --- a/src/tbb/src/tbb/tbb_main.cpp +++ /dev/null @@ -1,403 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" -#include "tbb_main.h" -#include "governor.h" -#include "market.h" -#include "tbb_misc.h" -#include "itt_notify.h" - -namespace tbb { -namespace internal { - -//------------------------------------------------------------------------ -// Begin shared data layout. -// The following global data items are mostly read-only after initialization. -//------------------------------------------------------------------------ - -//! Padding in order to prevent false sharing. -static const char _pad[NFS_MaxLineSize - sizeof(int)] = {}; - -//------------------------------------------------------------------------ -// governor data -basic_tls<generic_scheduler*> governor::theTLS; -unsigned governor::DefaultNumberOfThreads; -rml::tbb_factory governor::theRMLServerFactory; -bool governor::UsePrivateRML; -const task_scheduler_init *governor::BlockingTSI; -#if TBB_USE_ASSERT -bool governor::IsBlockingTerminationInProgress; -#endif -bool governor::is_speculation_enabled; - -//------------------------------------------------------------------------ -// market data -market* market::theMarket; -market::global_market_mutex_type market::theMarketMutex; - -//------------------------------------------------------------------------ -// One time initialization data - -//! Counter of references to global shared resources such as TLS. -atomic<int> __TBB_InitOnce::count; - -__TBB_atomic_flag __TBB_InitOnce::InitializationLock; - -//! Flag that is set to true after one-time initializations are done. -bool __TBB_InitOnce::InitializationDone; - -#if DO_ITT_NOTIFY - static bool ITT_Present; - static bool ITT_InitializationDone; -#endif - -#if !(_WIN32||_WIN64) || __TBB_SOURCE_DIRECTLY_INCLUDED - static __TBB_InitOnce __TBB_InitOnceHiddenInstance; -#endif - -//------------------------------------------------------------------------ -// generic_scheduler data - -//! Pointer to the scheduler factory function -generic_scheduler* (*AllocateSchedulerPtr)( arena*, size_t index ); - -#if __TBB_OLD_PRIMES_RNG -//! Table of primes used by fast random-number generator (FastRandom). -/** Also serves to keep anything else from being placed in the same - cache line as the global data items preceding it. */ -static const unsigned Primes[] = { - 0x9e3779b1, 0xffe6cc59, 0x2109f6dd, 0x43977ab5, - 0xba5703f5, 0xb495a877, 0xe1626741, 0x79695e6b, - 0xbc98c09f, 0xd5bee2b3, 0x287488f9, 0x3af18231, - 0x9677cd4d, 0xbe3a6929, 0xadc6a877, 0xdcf0674b, - 0xbe4d6fe9, 0x5f15e201, 0x99afc3fd, 0xf3f16801, - 0xe222cfff, 0x24ba5fdb, 0x0620452d, 0x79f149e3, - 0xc8b93f49, 0x972702cd, 0xb07dd827, 0x6c97d5ed, - 0x085a3d61, 0x46eb5ea7, 0x3d9910ed, 0x2e687b5b, - 0x29609227, 0x6eb081f1, 0x0954c4e1, 0x9d114db9, - 0x542acfa9, 0xb3e6bd7b, 0x0742d917, 0xe9f3ffa7, - 0x54581edb, 0xf2480f45, 0x0bb9288f, 0xef1affc7, - 0x85fa0ca7, 0x3ccc14db, 0xe6baf34b, 0x343377f7, - 0x5ca19031, 0xe6d9293b, 0xf0a9f391, 0x5d2e980b, - 0xfc411073, 0xc3749363, 0xb892d829, 0x3549366b, - 0x629750ad, 0xb98294e5, 0x892d9483, 0xc235baf3, - 0x3d2402a3, 0x6bdef3c9, 0xbec333cd, 0x40c9520f -}; - -//------------------------------------------------------------------------ -// End of shared data layout -//------------------------------------------------------------------------ - -//------------------------------------------------------------------------ -// Shared data accessors -//------------------------------------------------------------------------ - -unsigned GetPrime ( unsigned seed ) { - return Primes[seed%(sizeof(Primes)/sizeof(Primes[0]))]; -} -#endif //__TBB_OLD_PRIMES_RNG - -//------------------------------------------------------------------------ -// __TBB_InitOnce -//------------------------------------------------------------------------ - -void __TBB_InitOnce::add_ref() { - if( ++count==1 ) - governor::acquire_resources(); -} - -void __TBB_InitOnce::remove_ref() { - int k = --count; - __TBB_ASSERT(k>=0,"removed __TBB_InitOnce ref that was not added?"); - if( k==0 ) { - governor::release_resources(); - ITT_FINI_ITTLIB(); - } -} - -//------------------------------------------------------------------------ -// One-time Initializations -//------------------------------------------------------------------------ - -//! Defined in cache_aligned_allocator.cpp -void initialize_cache_aligned_allocator(); - -//! Defined in scheduler.cpp -void Scheduler_OneTimeInitialization ( bool itt_present ); - -#if DO_ITT_NOTIFY - -#if __TBB_ITT_STRUCTURE_API - -static __itt_domain *fgt_domain = NULL; - -struct resource_string { - const char *str; - __itt_string_handle *itt_str_handle; -}; - -// -// populate resource strings -// -#define TBB_STRING_RESOURCE( index_name, str ) { str, NULL }, -static resource_string strings_for_itt[] = { - #include "tbb/internal/_tbb_strings.h" - { "num_resource_strings", NULL } -}; -#undef TBB_STRING_RESOURCE - -static __itt_string_handle *ITT_get_string_handle(int idx) { - __TBB_ASSERT(idx >= 0, NULL); - return idx < NUM_STRINGS ? strings_for_itt[idx].itt_str_handle : NULL; -} - -static void ITT_init_domains() { - fgt_domain = __itt_domain_create( _T("tbb.flow") ); - fgt_domain->flags = 1; -} - -static void ITT_init_strings() { - for ( int i = 0; i < NUM_STRINGS; ++i ) { -#if _WIN32||_WIN64 - strings_for_itt[i].itt_str_handle = __itt_string_handle_createA( strings_for_itt[i].str ); -#else - strings_for_itt[i].itt_str_handle = __itt_string_handle_create( strings_for_itt[i].str ); -#endif - } -} - -static void ITT_init() { - ITT_init_domains(); - ITT_init_strings(); -} - -#endif // __TBB_ITT_STRUCTURE_API - -/** Thread-unsafe lazy one-time initialization of tools interop. - Used by both dummy handlers and general TBB one-time initialization routine. **/ -void ITT_DoUnsafeOneTimeInitialization () { - if ( !ITT_InitializationDone ) { - ITT_Present = (__TBB_load_ittnotify()!=0); -#if __TBB_ITT_STRUCTURE_API - if (ITT_Present) ITT_init(); -#endif - ITT_InitializationDone = true; - ITT_SYNC_CREATE(&market::theMarketMutex, SyncType_GlobalLock, SyncObj_SchedulerInitialization); - } -} - -/** Thread-safe lazy one-time initialization of tools interop. - Used by dummy handlers only. **/ -extern "C" -void ITT_DoOneTimeInitialization() { - __TBB_InitOnce::lock(); - ITT_DoUnsafeOneTimeInitialization(); - __TBB_InitOnce::unlock(); -} -#endif /* DO_ITT_NOTIFY */ - -//! Performs thread-safe lazy one-time general TBB initialization. -void DoOneTimeInitializations() { - suppress_unused_warning(_pad); - __TBB_InitOnce::lock(); - // No fence required for load of InitializationDone, because we are inside a critical section. - if( !__TBB_InitOnce::InitializationDone ) { - __TBB_InitOnce::add_ref(); - if( GetBoolEnvironmentVariable("TBB_VERSION") ) - PrintVersion(); - bool itt_present = false; -#if DO_ITT_NOTIFY - ITT_DoUnsafeOneTimeInitialization(); - itt_present = ITT_Present; -#endif /* DO_ITT_NOTIFY */ - initialize_cache_aligned_allocator(); - governor::initialize_rml_factory(); - Scheduler_OneTimeInitialization( itt_present ); - // Force processor groups support detection - governor::default_num_threads(); - // Dump version data - governor::print_version_info(); - PrintExtraVersionInfo( "Tools support", itt_present ? "enabled" : "disabled" ); - __TBB_InitOnce::InitializationDone = true; - } - __TBB_InitOnce::unlock(); -} - -#if (_WIN32||_WIN64) && !__TBB_SOURCE_DIRECTLY_INCLUDED -//! Windows "DllMain" that handles startup and shutdown of dynamic library. -extern "C" bool WINAPI DllMain( HANDLE /*hinstDLL*/, DWORD reason, LPVOID /*lpvReserved*/ ) { - switch( reason ) { - case DLL_PROCESS_ATTACH: - __TBB_InitOnce::add_ref(); - break; - case DLL_PROCESS_DETACH: - __TBB_InitOnce::remove_ref(); - // It is assumed that InitializationDone is not set after DLL_PROCESS_DETACH, - // and thus no race on InitializationDone is possible. - if( __TBB_InitOnce::initialization_done() ) { - // Remove reference that we added in DoOneTimeInitializations. - __TBB_InitOnce::remove_ref(); - } - break; - case DLL_THREAD_DETACH: - governor::terminate_auto_initialized_scheduler(); - break; - } - return true; -} -#endif /* (_WIN32||_WIN64) && !__TBB_SOURCE_DIRECTLY_INCLUDED */ - -void itt_store_pointer_with_release_v3( void* dst, void* src ) { - ITT_NOTIFY(sync_releasing, dst); - __TBB_store_with_release(*static_cast<void**>(dst),src); -} - -void* itt_load_pointer_with_acquire_v3( const void* src ) { - void* result = __TBB_load_with_acquire(*static_cast<void*const*>(src)); - ITT_NOTIFY(sync_acquired, const_cast<void*>(src)); - return result; -} - -#if DO_ITT_NOTIFY -void call_itt_notify_v5(int t, void *ptr) { - switch (t) { - case 0: ITT_NOTIFY(sync_prepare, ptr); break; - case 1: ITT_NOTIFY(sync_cancel, ptr); break; - case 2: ITT_NOTIFY(sync_acquired, ptr); break; - case 3: ITT_NOTIFY(sync_releasing, ptr); break; - } -} -#else -void call_itt_notify_v5(int /*t*/, void* /*ptr*/) {} -#endif - -#if __TBB_ITT_STRUCTURE_API - -#if DO_ITT_NOTIFY - -const __itt_id itt_null_id = {0, 0, 0}; - -static inline __itt_domain* get_itt_domain( itt_domain_enum idx ) { - return ( idx == ITT_DOMAIN_FLOW ) ? fgt_domain : NULL; -} - -static inline void itt_id_make(__itt_id *id, void* addr, unsigned long long extra) { - *id = __itt_id_make(addr, extra); -} - -static inline void itt_id_create(const __itt_domain *domain, __itt_id id) { - ITTNOTIFY_VOID_D1(id_create, domain, id); -} - -void itt_make_task_group_v7( itt_domain_enum domain, void *group, unsigned long long group_extra, - void *parent, unsigned long long parent_extra, string_index name_index ) { - if ( __itt_domain *d = get_itt_domain( domain ) ) { - __itt_id group_id = itt_null_id; - __itt_id parent_id = itt_null_id; - itt_id_make( &group_id, group, group_extra ); - itt_id_create( d, group_id ); - if ( parent ) { - itt_id_make( &parent_id, parent, parent_extra ); - } - __itt_string_handle *n = ITT_get_string_handle(name_index); - ITTNOTIFY_VOID_D3(task_group, d, group_id, parent_id, n); - } -} - -void itt_metadata_str_add_v7( itt_domain_enum domain, void *addr, unsigned long long addr_extra, - string_index key, const char *value ) { - if ( __itt_domain *d = get_itt_domain( domain ) ) { - __itt_id id = itt_null_id; - itt_id_make( &id, addr, addr_extra ); - __itt_string_handle *k = ITT_get_string_handle(key); - size_t value_length = strlen( value ); -#if _WIN32||_WIN64 - ITTNOTIFY_VOID_D4(metadata_str_addA, d, id, k, value, value_length); -#else - ITTNOTIFY_VOID_D4(metadata_str_add, d, id, k, value, value_length); -#endif - } -} - -void itt_relation_add_v7( itt_domain_enum domain, void *addr0, unsigned long long addr0_extra, - itt_relation relation, void *addr1, unsigned long long addr1_extra ) { - if ( __itt_domain *d = get_itt_domain( domain ) ) { - __itt_id id0 = itt_null_id; - __itt_id id1 = itt_null_id; - itt_id_make( &id0, addr0, addr0_extra ); - itt_id_make( &id1, addr1, addr1_extra ); - ITTNOTIFY_VOID_D3(relation_add, d, id0, (__itt_relation)relation, id1); - } -} - -void itt_task_begin_v7( itt_domain_enum domain, void *task, unsigned long long task_extra, - void *parent, unsigned long long parent_extra, string_index /* name_index */ ) { - if ( __itt_domain *d = get_itt_domain( domain ) ) { - __itt_id task_id = itt_null_id; - __itt_id parent_id = itt_null_id; - itt_id_make( &task_id, task, task_extra ); - if ( parent ) { - itt_id_make( &parent_id, parent, parent_extra ); - } - ITTNOTIFY_VOID_D3(task_begin, d, task_id, parent_id, NULL ); - } -} - -void itt_task_end_v7( itt_domain_enum domain ) { - if ( __itt_domain *d = get_itt_domain( domain ) ) { - ITTNOTIFY_VOID_D0(task_end, d); - } -} - -#else // DO_ITT_NOTIFY - -void itt_make_task_group_v7( itt_domain_enum domain, void *group, unsigned long long group_extra, - void *parent, unsigned long long parent_extra, string_index name_index ) { } - -void itt_metadata_str_add_v7( itt_domain_enum domain, void *addr, unsigned long long addr_extra, - string_index key, const char *value ) { } - -void itt_relation_add_v7( itt_domain_enum domain, void *addr0, unsigned long long addr0_extra, - itt_relation relation, void *addr1, unsigned long long addr1_extra ) { } - -void itt_task_begin_v7( itt_domain_enum domain, void *task, unsigned long long task_extra, - void * /*parent*/, unsigned long long /* parent_extra */, string_index /* name_index */ ) { } - -void itt_task_end_v7( itt_domain_enum domain ) { } - -#endif // DO_ITT_NOTIFY - -#endif // __TBB_ITT_STRUCTURE_API - -void* itt_load_pointer_v3( const void* src ) { - //TODO: replace this with __TBB_load_relaxed - void* result = *static_cast<void*const*>(src); - return result; -} - -void itt_set_sync_name_v3( void* obj, const tchar* name) { - ITT_SYNC_RENAME(obj, name); - suppress_unused_warning(obj && name); -} - - -} // namespace internal -} // namespace tbb diff --git a/src/tbb/src/tbb/tbb_main.h b/src/tbb/src/tbb/tbb_main.h deleted file mode 100644 index 30e1bd813..000000000 --- a/src/tbb/src/tbb/tbb_main.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef _TBB_tbb_main_H -#define _TBB_tbb_main_H - -#include "tbb/atomic.h" - -namespace tbb { - -namespace internal { - -void DoOneTimeInitializations (); - -//------------------------------------------------------------------------ -// __TBB_InitOnce -//------------------------------------------------------------------------ - -//! Class that supports TBB initialization. -/** It handles acquisition and release of global resources (e.g. TLS) during startup and shutdown, - as well as synchronization for DoOneTimeInitializations. */ -class __TBB_InitOnce { - friend void DoOneTimeInitializations(); - friend void ITT_DoUnsafeOneTimeInitialization (); - - static atomic<int> count; - - //! Platform specific code to acquire resources. - static void acquire_resources(); - - //! Platform specific code to release resources. - static void release_resources(); - - //! Specifies if the one-time initializations has been done. - static bool InitializationDone; - - //! Global initialization lock - /** Scenarios are possible when tools interop has to be initialized before the - TBB itself. This imposes a requirement that the global initialization lock - has to support valid static initialization, and does not issue any tool - notifications in any build mode. **/ - static __TBB_atomic_flag InitializationLock; - -public: - static void lock() { __TBB_LockByte( InitializationLock ); } - - static void unlock() { __TBB_UnlockByte( InitializationLock ); } - - static bool initialization_done() { return __TBB_load_with_acquire(InitializationDone); } - - //! Add initial reference to resources. - /** We assume that dynamic loading of the library prevents any other threads - from entering the library until this constructor has finished running. **/ - __TBB_InitOnce() { add_ref(); } - - //! Remove the initial reference to resources. - /** This is not necessarily the last reference if other threads are still running. **/ - ~__TBB_InitOnce() { - remove_ref(); - // We assume that InitializationDone is not set after file-scope destructors - // start running, and thus no race on InitializationDone is possible. - if( initialization_done() ) { - // Remove an extra reference that was added in DoOneTimeInitializations. - remove_ref(); - } - } - //! Add reference to resources. If first reference added, acquire the resources. - static void add_ref(); - - //! Remove reference to resources. If last reference removed, release the resources. - static void remove_ref(); -}; // class __TBB_InitOnce - - -} // namespace internal - -} // namespace tbb - -#endif /* _TBB_tbb_main_H */ diff --git a/src/tbb/src/tbb/tbb_misc.cpp b/src/tbb/src/tbb/tbb_misc.cpp deleted file mode 100644 index 02f14a66f..000000000 --- a/src/tbb/src/tbb/tbb_misc.cpp +++ /dev/null @@ -1,283 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Source file for miscellaneous entities that are infrequently referenced by -// an executing program. - -#include "tbb/tbb_stddef.h" -#include "tbb_assert_impl.h" // Out-of-line TBB assertion handling routines are instantiated here. -#include "tbb/tbb_exception.h" -#include "tbb/tbb_machine.h" -#include "tbb_misc.h" -#include <cstdio> -#include <cstdlib> -#include <stdexcept> - -#if _WIN32||_WIN64 -#include "tbb/machine/windows_api.h" -#endif - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include <cstring> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -using namespace std; - -namespace tbb { - -const char* bad_last_alloc::what() const throw() { return "bad allocation in previous or concurrent attempt"; } -const char* improper_lock::what() const throw() { return "attempted recursive lock on critical section or non-recursive mutex"; } -const char* user_abort::what() const throw() { return "User-initiated abort has terminated this operation"; } -const char* invalid_multiple_scheduling::what() const throw() { return "The same task_handle object cannot be executed more than once"; } -const char* missing_wait::what() const throw() { return "wait() was not called on the structured_task_group"; } - -namespace internal { - -#if TBB_USE_EXCEPTIONS - #define DO_THROW(exc, init_args) throw exc init_args; -#else /* !TBB_USE_EXCEPTIONS */ - #define PRINT_ERROR_AND_ABORT(exc_name, msg) \ - fprintf (stderr, "Exception %s with message %s would've been thrown, " \ - "if exception handling were not disabled. Aborting.\n", exc_name, msg); \ - fflush(stderr); \ - abort(); - #define DO_THROW(exc, init_args) PRINT_ERROR_AND_ABORT(#exc, #init_args) -#endif /* !TBB_USE_EXCEPTIONS */ - - -/* The "what" should be fairly short, not more than about 128 characters. - Because we control all the call sites to handle_perror, it is pointless - to bullet-proof it for very long strings. - - Design note: ADR put this routine off to the side in tbb_misc.cpp instead of - Task.cpp because the throw generates a pathetic lot of code, and ADR wanted - this large chunk of code to be placed on a cold page. */ -void handle_perror( int error_code, const char* what ) { - char buf[256]; -#if _MSC_VER - #define snprintf _snprintf -#endif - int written = snprintf(buf, sizeof(buf), "%s: %s", what, strerror( error_code )); - // On overflow, the returned value exceeds sizeof(buf) (for GLIBC) or is negative (for MSVC). - __TBB_ASSERT_EX( written>0 && written<(int)sizeof(buf), "Error description is too long" ); - // Ensure that buffer ends in terminator. - buf[sizeof(buf)-1] = 0; -#if TBB_USE_EXCEPTIONS - throw runtime_error(buf); -#else - PRINT_ERROR_AND_ABORT( "runtime_error", buf); -#endif /* !TBB_USE_EXCEPTIONS */ -} - -#if _WIN32||_WIN64 -void handle_win_error( int error_code ) { - char buf[512]; -#if !__TBB_WIN8UI_SUPPORT - FormatMessageA( FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, error_code, 0, buf, sizeof(buf), NULL ); -#else -//TODO: update with right replacement for FormatMessageA - sprintf_s((char*)&buf, 512, "error code %d", error_code); -#endif -#if TBB_USE_EXCEPTIONS - throw runtime_error(buf); -#else - PRINT_ERROR_AND_ABORT( "runtime_error", buf); -#endif /* !TBB_USE_EXCEPTIONS */ -} -#endif // _WIN32||_WIN64 - -void throw_bad_last_alloc_exception_v4() { - throw_exception_v4(eid_bad_last_alloc); -} - -void throw_exception_v4 ( exception_id eid ) { - __TBB_ASSERT ( eid > 0 && eid < eid_max, "Unknown exception ID" ); - switch ( eid ) { - case eid_bad_alloc: DO_THROW( bad_alloc, () ); - case eid_bad_last_alloc: DO_THROW( bad_last_alloc, () ); - case eid_nonpositive_step: DO_THROW( invalid_argument, ("Step must be positive") ); - case eid_out_of_range: DO_THROW( out_of_range, ("Index out of requested size range") ); - case eid_segment_range_error: DO_THROW( range_error, ("Index out of allocated segment slots") ); - case eid_index_range_error: DO_THROW( range_error, ("Index is not allocated") ); - case eid_missing_wait: DO_THROW( missing_wait, () ); - case eid_invalid_multiple_scheduling: DO_THROW( invalid_multiple_scheduling, () ); - case eid_improper_lock: DO_THROW( improper_lock, () ); - case eid_possible_deadlock: DO_THROW( runtime_error, ("Resource deadlock would occur") ); - case eid_operation_not_permitted: DO_THROW( runtime_error, ("Operation not permitted") ); - case eid_condvar_wait_failed: DO_THROW( runtime_error, ("Wait on condition variable failed") ); - case eid_invalid_load_factor: DO_THROW( out_of_range, ("Invalid hash load factor") ); - case eid_reserved: DO_THROW( out_of_range, ("[backward compatibility] Invalid number of buckets") ); - case eid_invalid_swap: DO_THROW( invalid_argument, ("swap() is invalid on non-equal allocators") ); - case eid_reservation_length_error: DO_THROW( length_error, ("reservation size exceeds permitted max size") ); - case eid_invalid_key: DO_THROW( out_of_range, ("invalid key") ); - case eid_user_abort: DO_THROW( user_abort, () ); - case eid_bad_tagged_msg_cast: DO_THROW( runtime_error, ("Illegal tagged_msg cast") ); -#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE - case eid_blocking_sch_init: DO_THROW( runtime_error, ("Nesting of blocking termination is impossible") ); -#endif - default: break; - } -#if !TBB_USE_EXCEPTIONS && __APPLE__ - out_of_range e1(""); - length_error e2(""); - range_error e3(""); - invalid_argument e4(""); -#endif /* !TBB_USE_EXCEPTIONS && __APPLE__ */ -} - -#if _XBOX || __TBB_WIN8UI_SUPPORT -bool GetBoolEnvironmentVariable( const char * ) { return false;} -#else /* _XBOX || __TBB_WIN8UI_SUPPORT */ -bool GetBoolEnvironmentVariable( const char * name ) { - if( const char* s = getenv(name) ) - return strcmp(s,"0") != 0; - return false; -} -#endif /* _XBOX || __TBB_WIN8UI_SUPPORT */ - -#include "tbb_version.h" - -/** The leading "\0" is here so that applying "strings" to the binary delivers a clean result. */ -static const char VersionString[] = "\0" TBB_VERSION_STRINGS; - -static bool PrintVersionFlag = false; - -void PrintVersion() { - PrintVersionFlag = true; - fputs(VersionString+1,stderr); -} - -void PrintExtraVersionInfo( const char* category, const char* format, ... ) { - if( PrintVersionFlag ) { - char str[1024]; memset(str, 0, 1024); - va_list args; va_start(args, format); - // Note: correct vsnprintf definition obtained from tbb_assert_impl.h - vsnprintf( str, 1024-1, format, args); - va_end(args); - fprintf(stderr, "TBB: %s\t%s\n", category, str ); - } -} - -void PrintRMLVersionInfo( void* arg, const char* server_info ) { - PrintExtraVersionInfo( server_info, (const char *)arg ); -} - -//! check for transaction support. -#if _MSC_VER -#include <intrin.h> // for __cpuid -#endif -bool cpu_has_speculation() { -#if __TBB_TSX_AVAILABLE -#if (__INTEL_COMPILER || __GNUC__ || _MSC_VER || __SUNPRO_CC) - bool result = false; - const int hle_ebx_mask = 1<<4; -#if _MSC_VER - int info[4] = {0,0,0,0}; - const int reg_ebx = 1; - __cpuidex(info, 7, 0); - result = (info[reg_ebx] & hle_ebx_mask)!=0; -#elif __GNUC__ || __SUNPRO_CC - int32_t reg_ebx = 0; - int32_t reg_eax = 7; - int32_t reg_ecx = 0; - __asm__ __volatile__ ( "movl %%ebx, %%esi\n" - "cpuid\n" - "movl %%ebx, %0\n" - "movl %%esi, %%ebx\n" - : "=a"(reg_ebx) : "0" (reg_eax), "c" (reg_ecx) : "esi", -#if __TBB_x86_64 - "ebx", -#endif - "edx" - ); - result = (reg_ebx & hle_ebx_mask)!=0 ; -#endif - return result; -#else - #error Speculation detection not enabled for compiler -#endif /* __INTEL_COMPILER || __GNUC__ || _MSC_VER */ -#else /* __TBB_TSX_AVAILABLE */ - return false; -#endif /* __TBB_TSX_AVAILABLE */ -} - -} // namespace internal - -extern "C" int TBB_runtime_interface_version() { - return TBB_INTERFACE_VERSION; -} - -} // namespace tbb - -#if !__TBB_RML_STATIC -#if __TBB_x86_32 - -#include "tbb/atomic.h" - -// in MSVC environment, int64_t defined in tbb::internal namespace only (see tbb_stddef.h) -#if _MSC_VER -using tbb::internal::int64_t; -#endif - -//! Warn about 8-byte store that crosses a cache line. -extern "C" void __TBB_machine_store8_slow_perf_warning( volatile void *ptr ) { - // Report run-time warning unless we have already recently reported warning for that address. - const unsigned n = 4; - static tbb::atomic<void*> cache[n]; - static tbb::atomic<unsigned> k; - for( unsigned i=0; i<n; ++i ) - if( ptr==cache[i] ) - goto done; - cache[(k++)%n] = const_cast<void*>(ptr); - tbb::internal::runtime_warning( "atomic store on misaligned 8-byte location %p is slow", ptr ); -done:; -} - -//! Handle 8-byte store that crosses a cache line. -extern "C" void __TBB_machine_store8_slow( volatile void *ptr, int64_t value ) { - for( tbb::internal::atomic_backoff b;;b.pause() ) { - int64_t tmp = *(int64_t*)ptr; - if( __TBB_machine_cmpswp8(ptr,value,tmp)==tmp ) - break; - } -} - -#endif /* __TBB_x86_32 */ -#endif /* !__TBB_RML_STATIC */ - -#if __TBB_ipf -/* It was found that on IA-64 architecture inlining of __TBB_machine_lockbyte leads - to serious performance regression with ICC. So keep it out-of-line. - */ -extern "C" intptr_t __TBB_machine_lockbyte( volatile unsigned char& flag ) { - tbb::internal::atomic_backoff backoff; - while( !__TBB_TryLockByte(flag) ) backoff.pause(); - return 0; -} -#endif diff --git a/src/tbb/src/tbb/tbb_misc.h b/src/tbb/src/tbb/tbb_misc.h deleted file mode 100644 index dcf8109e0..000000000 --- a/src/tbb/src/tbb/tbb_misc.h +++ /dev/null @@ -1,269 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef _TBB_tbb_misc_H -#define _TBB_tbb_misc_H - -#include "tbb/tbb_stddef.h" -#include "tbb/tbb_machine.h" -#include "tbb/atomic.h" // For atomic_xxx definitions - -#if __linux__ || __FreeBSD__ -#include <sys/param.h> // __FreeBSD_version -#if __FreeBSD_version >= 701000 -#include <sys/cpuset.h> -#endif -#endif - -// Does the operating system have a system call to pin a thread to a set of OS processors? -#define __TBB_OS_AFFINITY_SYSCALL_PRESENT ((__linux__ && !__ANDROID__) || (__FreeBSD_version >= 701000)) -// On IBM* Blue Gene* CNK nodes, the affinity API has restrictions that prevent its usability for TBB, -// and also sysconf(_SC_NPROCESSORS_ONLN) already takes process affinity into account. -#define __TBB_USE_OS_AFFINITY_SYSCALL (__TBB_OS_AFFINITY_SYSCALL_PRESENT && !__bg__) - -namespace tbb { -namespace internal { - -const size_t MByte = 1024*1024; - -#if __TBB_WIN8UI_SUPPORT -// In Win8UI mode, TBB uses a thread creation API that does not allow to specify the stack size. -// Still, the thread stack size value, either explicit or default, is used by the scheduler. -// So here we set the default value to match the platform's default of 1MB. -const size_t ThreadStackSize = 1*MByte; -#else -const size_t ThreadStackSize = (sizeof(uintptr_t) <= 4 ? 2 : 4 )*MByte; -#endif - -#ifndef __TBB_HardwareConcurrency - -//! Returns maximal parallelism level supported by the current OS configuration. -int AvailableHwConcurrency(); - -#else - -inline int AvailableHwConcurrency() { - int n = __TBB_HardwareConcurrency(); - return n > 0 ? n : 1; // Fail safety strap -} -#endif /* __TBB_HardwareConcurrency */ - - -#if _WIN32||_WIN64 - -//! Returns number of processor groups in the current OS configuration. -/** AvailableHwConcurrency must be called at least once before calling this method. **/ -int NumberOfProcessorGroups(); - -//! Retrieves index of processor group containing processor with the given index -int FindProcessorGroupIndex ( int processorIndex ); - -//! Affinitizes the thread to the specified processor group -void MoveThreadIntoProcessorGroup( void* hThread, int groupIndex ); - -#endif /* _WIN32||_WIN64 */ - -//! Throws std::runtime_error with what() returning error_code description prefixed with aux_info -void handle_win_error( int error_code ); - -//! True if environment variable with given name is set and not 0; otherwise false. -bool GetBoolEnvironmentVariable( const char * name ); - -//! Prints TBB version information on stderr -void PrintVersion(); - -//! Prints arbitrary extra TBB version information on stderr -void PrintExtraVersionInfo( const char* category, const char* format, ... ); - -//! A callback routine to print RML version information on stderr -void PrintRMLVersionInfo( void* arg, const char* server_info ); - -// For TBB compilation only; not to be used in public headers -#if defined(min) || defined(max) -#undef min -#undef max -#endif - -//! Utility template function returning lesser of the two values. -/** Provided here to avoid including not strict safe <algorithm>.\n - In case operands cause signed/unsigned or size mismatch warnings it is caller's - responsibility to do the appropriate cast before calling the function. **/ -template<typename T1, typename T2> -T1 min ( const T1& val1, const T2& val2 ) { - return val1 < val2 ? val1 : val2; -} - -//! Utility template function returning greater of the two values. -/** Provided here to avoid including not strict safe <algorithm>.\n - In case operands cause signed/unsigned or size mismatch warnings it is caller's - responsibility to do the appropriate cast before calling the function. **/ -template<typename T1, typename T2> -T1 max ( const T1& val1, const T2& val2 ) { - return val1 < val2 ? val2 : val1; -} - -//! Utility helper structure to ease overload resolution -template<int > struct int_to_type {}; - -//------------------------------------------------------------------------ -// FastRandom -//------------------------------------------------------------------------ - -/** Defined in tbb_main.cpp **/ -unsigned GetPrime ( unsigned seed ); - -//! A fast random number generator. -/** Uses linear congruential method. */ -class FastRandom { -private: -#if __TBB_OLD_PRIMES_RNG - unsigned x, a; - static const unsigned c = 1; -#else - unsigned x, c; - static const unsigned a = 0x9e3779b1; // a big prime number -#endif //__TBB_OLD_PRIMES_RNG -public: - //! Get a random number. - unsigned short get() { - return get(x); - } - //! Get a random number for the given seed; update the seed for next use. - unsigned short get( unsigned& seed ) { - unsigned short r = (unsigned short)(seed>>16); - __TBB_ASSERT(c&1, "c must be odd for big rng period"); - seed = seed*a+c; - return r; - } - //! Construct a random number generator. - FastRandom( void* unique_ptr ) { init(uintptr_t(unique_ptr)); } - FastRandom( uint32_t seed) { init(seed); } - FastRandom( uint64_t seed) { init(seed); } - template <typename T> - void init( T seed ) { - init(seed,int_to_type<sizeof(seed)>()); - } - void init( uint64_t seed , int_to_type<8> ) { - init(uint32_t((seed>>32)+seed), int_to_type<4>()); - } - void init( uint32_t seed, int_to_type<4> ) { -#if __TBB_OLD_PRIMES_RNG - x = seed; - a = GetPrime( seed ); -#else - // threads use different seeds for unique sequences - c = (seed|1)*0xba5703f5; // c must be odd, shuffle by a prime number - x = c^(seed>>1); // also shuffle x for the first get() invocation -#endif - } -}; - -//------------------------------------------------------------------------ -// Atomic extensions -//------------------------------------------------------------------------ - -//! Atomically replaces value of dst with newValue if they satisfy condition of compare predicate -/** Return value semantics is the same as for CAS. **/ -template<typename T1, typename T2, class Pred> -T1 atomic_update ( tbb::atomic<T1>& dst, T2 newValue, Pred compare ) { - T1 oldValue = dst; - while ( compare(oldValue, newValue) ) { - if ( dst.compare_and_swap((T1)newValue, oldValue) == oldValue ) - break; - oldValue = dst; - } - return oldValue; -} - -//! One-time initialization states -enum do_once_state { - do_once_uninitialized = 0, ///< No execution attempts have been undertaken yet - do_once_pending, ///< A thread is executing associated do-once routine - do_once_executed, ///< Do-once routine has been executed - initialization_complete = do_once_executed ///< Convenience alias -}; - -//! One-time initialization function -/** /param initializer Pointer to function without arguments - The variant that returns bool is used for cases when initialization can fail - and it is OK to continue execution, but the state should be reset so that - the initialization attempt was repeated the next time. - /param state Shared state associated with initializer that specifies its - initialization state. Must be initially set to #uninitialized value - (e.g. by means of default static zero initialization). **/ -template <typename F> -void atomic_do_once ( const F& initializer, atomic<do_once_state>& state ) { - // tbb::atomic provides necessary acquire and release fences. - // The loop in the implementation is necessary to avoid race when thread T2 - // that arrived in the middle of initialization attempt by another thread T1 - // has just made initialization possible. - // In such a case T2 has to rely on T1 to initialize, but T1 may already be past - // the point where it can recognize the changed conditions. - while ( state != do_once_executed ) { - if( state == do_once_uninitialized ) { - if( state.compare_and_swap( do_once_pending, do_once_uninitialized ) == do_once_uninitialized ) { - run_initializer( initializer, state ); - break; - } - } - spin_wait_while_eq( state, do_once_pending ); - } -} - -// Run the initializer which can not fail -inline void run_initializer( void (*f)(), atomic<do_once_state>& state ) { - f(); - state = do_once_executed; -} - -// Run the initializer which can require repeated call -inline void run_initializer( bool (*f)(), atomic<do_once_state>& state ) { - state = f() ? do_once_executed : do_once_uninitialized; -} - -#if __TBB_USE_OS_AFFINITY_SYSCALL - #if __linux__ - typedef cpu_set_t basic_mask_t; - #elif __FreeBSD_version >= 701000 - typedef cpuset_t basic_mask_t; - #else - #error affinity_helper is not implemented in this OS - #endif - class affinity_helper : no_copy { - basic_mask_t* threadMask; - int is_changed; - public: - affinity_helper() : threadMask(NULL), is_changed(0) {} - ~affinity_helper(); - void protect_affinity_mask(); - }; -#else - class affinity_helper : no_copy { - public: - void protect_affinity_mask() {} - }; -#endif /* __TBB_USE_OS_AFFINITY_SYSCALL */ - -extern bool cpu_has_speculation(); - -} // namespace internal -} // namespace tbb - -#endif /* _TBB_tbb_misc_H */ diff --git a/src/tbb/src/tbb/tbb_resource.rc b/src/tbb/src/tbb/tbb_resource.rc deleted file mode 100644 index b9100260d..000000000 --- a/src/tbb/src/tbb/tbb_resource.rc +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2005-2014 Intel Corporation. All Rights Reserved. -// -// This file is part of Threading Building Blocks. Threading Building Blocks is free software; -// you can redistribute it and/or modify it under the terms of the GNU General Public License -// version 2 as published by the Free Software Foundation. Threading Building Blocks is -// distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. You should have received a copy of -// the GNU General Public License along with Threading Building Blocks; if not, write to the -// Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -// -// As a special exception, you may use this file as part of a free software library without -// restriction. Specifically, if other files instantiate templates or use macros or inline -// functions from this file, or you compile this file and link it with other files to produce -// an executable, this file does not by itself cause the resulting executable to be covered -// by the GNU General Public License. This exception does not however invalidate any other -// reasons why the executable file might be covered by the GNU General Public License. - -// Microsoft Visual C++ generated resource script. -// -#ifdef APSTUDIO_INVOKED -#ifndef APSTUDIO_READONLY_SYMBOLS -#define _APS_NO_MFC 1 -#define _APS_NEXT_RESOURCE_VALUE 102 -#define _APS_NEXT_COMMAND_VALUE 40001 -#define _APS_NEXT_CONTROL_VALUE 1001 -#define _APS_NEXT_SYMED_VALUE 101 -#endif -#endif - -#define APSTUDIO_READONLY_SYMBOLS -///////////////////////////////////////////////////////////////////////////// -// -// Generated from the TEXTINCLUDE 2 resource. -// -#include <winresrc.h> -#define ENDL "\r\n" -#include "tbb_version.h" - -///////////////////////////////////////////////////////////////////////////// -#undef APSTUDIO_READONLY_SYMBOLS - -///////////////////////////////////////////////////////////////////////////// -// Neutral resources - -//#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_NEU) -#ifdef _WIN32 -LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL -#pragma code_page(1252) -#endif //_WIN32 - -///////////////////////////////////////////////////////////////////////////// -// manifest integration -#ifdef TBB_MANIFEST -#include "winuser.h" -2 RT_MANIFEST tbbmanifest.exe.manifest -#endif - -///////////////////////////////////////////////////////////////////////////// -// -// Version -// - -VS_VERSION_INFO VERSIONINFO - FILEVERSION TBB_VERNUMBERS - PRODUCTVERSION TBB_VERNUMBERS - FILEFLAGSMASK 0x17L -#ifdef _DEBUG - FILEFLAGS 0x1L -#else - FILEFLAGS 0x0L -#endif - FILEOS 0x40004L - FILETYPE 0x2L - FILESUBTYPE 0x0L -BEGIN - BLOCK "StringFileInfo" - BEGIN - BLOCK "000004b0" - BEGIN - VALUE "CompanyName", "Intel Corporation\0" - VALUE "FileDescription", "Intel(R) Threading Building Blocks library\0" - VALUE "FileVersion", TBB_VERSION "\0" - VALUE "LegalCopyright", "Copyright 2005-2014 Intel Corporation. All Rights Reserved.\0" - VALUE "LegalTrademarks", "\0" -#ifndef TBB_USE_DEBUG - VALUE "OriginalFilename", "tbb.dll\0" -#else - VALUE "OriginalFilename", "tbb_debug.dll\0" -#endif - VALUE "ProductName", "Intel(R) Threading Building Blocks for Windows\0" - VALUE "ProductVersion", TBB_VERSION "\0" - VALUE "PrivateBuild", "\0" - VALUE "SpecialBuild", "\0" - END - END - BLOCK "VarFileInfo" - BEGIN - VALUE "Translation", 0x0, 1200 - END -END - -//#endif // Neutral resources -///////////////////////////////////////////////////////////////////////////// - - -#ifndef APSTUDIO_INVOKED -///////////////////////////////////////////////////////////////////////////// -// -// Generated from the TEXTINCLUDE 3 resource. -// - - -///////////////////////////////////////////////////////////////////////////// -#endif // not APSTUDIO_INVOKED - diff --git a/src/tbb/src/tbb/tbb_statistics.cpp b/src/tbb/src/tbb/tbb_statistics.cpp deleted file mode 100644 index 1c8300693..000000000 --- a/src/tbb/src/tbb/tbb_statistics.cpp +++ /dev/null @@ -1,187 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb_statistics.h" - -#if __TBB_STATISTICS - -#include <climits> -#include <cstdarg> -#if __TBB_STATISTICS_STDOUT -#include <cstdio> -#endif - -#include "tbb/spin_mutex.h" - -namespace tbb { -namespace internal { - -//! Human readable titles of statistics groups defined by statistics_groups enum. -/** The order of this vector elements must correspond to the statistics_counters - structure layout. **/ -const char* StatGroupTitles[] = { - "task objects", "tasks executed", "stealing attempts", "task proxies", "arena", "market", "priority ops", "prio ops details" -}; - -//! Human readable titles of statistics elements defined by statistics_counters struct. -/** The order of this vector elements must correspond to the statistics_counters - structure layout (with NULLs interspersed to separate groups). **/ -const char* StatFieldTitles[] = { - /*task objects*/ "active", "freed", "big", NULL, - /*tasks executed*/ "total", "w/o spawn", NULL, - /*stealing attempts*/ "succeeded", "failed", "conflicts", "backoffs", NULL, - /*task proxies*/ "mailed", "revoked", "stolen", "bypassed", "ignored", NULL, - /*arena*/ "switches", "roundtrips", "avg.conc", "avg.allot", NULL, - /*market*/ "roundtrips", NULL, - /*priority ops*/ "ar.switch", "mkt.switch", "ar.reset", "ref.fixup", "avg.ar.pr", "avg.mkt.pr", NULL, - /*prio ops details*/ "winnows", "reloads", "orphaned", "winnowed", "reloaded", NULL -}; - -//! Class for logging statistics -/** There should be only one instance of this class. - Results are written to a file "statistics.txt" in tab-separated format. */ -class statistics_logger { -public: - statistics_logger () { - __TBB_ASSERT( sg_end - 1 == 1 << (sizeof(StatGroupTitles)/sizeof(*StatGroupTitles) - 1), NULL ); - - my_file = fopen("statistics.txt","w"); - if( !my_file ) - perror("fopen(\"statistics.txt\"\")"); - // Initialize groups dump layout info - group_start_field[0] = 0; - for ( size_t i = 0, j = 0; i < NumGroups; ++i, ++j ) { - __TBB_ASSERT( StatFieldTitles[j], "Empty group occurred" ); - while ( StatFieldTitles[j] ) - ++j; - group_start_field[i + 1] = j - i; // -i accounts for preceding NULL separators - } - __TBB_ASSERT( group_start_field[NumGroups] == statistics_counters::size(), - "Wrong number of elements in StatFieldTitles" ); - dump( "%-*s", IDColumnWidth, ""); - process_groups( &statistics_logger::print_group_title ); - dump( "%-*s", IDColumnWidth, "ID"); - process_groups( &statistics_logger::print_field_titles ); - } - - ~statistics_logger () { fclose(my_file); } - - void record( const statistics_counters& c, size_t id ) { - spin_mutex::scoped_lock lock(my_mutex); - counters_to_dump = &c; -#if __TBB_STATISTICS_TOTALS_ONLY - if ( id == arena_counters_total ) { - dump( "%-*s", IDColumnWidth, "Tot" ); - process_groups( &statistics_logger::print_field_values ); - } -#else /* !__TBB_STATISTICS_TOTALS_ONLY */ - const char* idString = NULL; - switch ( id ) { - case 0: - idString = "M"; break; - case workers_counters_total: - idString = "Wtot"; break; - case arena_counters_total: - idString = "Tot"; break; - default: - dump( "W%-*u", IDColumnWidth - 1, id ); - } - if ( idString ) - dump( "%-*s", IDColumnWidth, idString ); - process_groups( &statistics_logger::print_field_values ); -#endif /* !__TBB_STATISTICS_TOTALS_ONLY */ - } -private: - static const size_t IDColumnWidth = 5; - static const size_t StatisticsColumnWidth = 10; - static const size_t NumGroups = sizeof(StatGroupTitles)/sizeof(char*); - - //! File into which statistics are written. - FILE* my_file; - //! Mutex that serializes accesses to my_file - spin_mutex my_mutex; - //! Indices of the each group's first field in statistics_counters struct. - /** An extra element is used to track the total number of statistics fields. **/ - size_t group_start_field[NumGroups + 1]; - //! Currently processed set of counters. - const statistics_counters* counters_to_dump; - - static const size_t NumFields = sizeof(StatFieldTitles)/sizeof(*StatFieldTitles) - NumGroups; - bool averages_fields[NumFields]; - - void dump ( char const* fmt, ... ) { - va_list args; - if ( my_file ) { - va_start( args, fmt ); - vfprintf( my_file, fmt, args ); - va_end( args ); - } -#if __TBB_STATISTICS_STDOUT - va_start( args, fmt ); - vprintf( fmt, args ); - va_end( args ); -#endif - } - - void process_groups ( void (statistics_logger::*per_group_action)(size_t group_idx) ) { - for ( size_t i = 0, group_flag = 1; i < NumGroups; ++i, group_flag <<= 1 ) { - __TBB_ASSERT( group_flag < sg_end, "StatGroupTitles contents is incompatible with statistics_groups definition" ); - if ( __TBB_ActiveStatisticsGroups & group_flag ) - (this->*per_group_action)( i ); - } - dump( "\n" ); - } - - void print_group_title ( size_t group_idx ) { - dump( "%-*s", (group_start_field[group_idx + 1] - group_start_field[group_idx]) * (StatisticsColumnWidth + 1), - StatGroupTitles[group_idx] ); - } - - void print_field_titles ( size_t group_idx ) { - // +group_idx accounts for preceding NULL separators - size_t i = group_start_field[group_idx] + group_idx; - while ( StatFieldTitles[i] ) { - averages_fields[i - group_idx] = strncmp(StatFieldTitles[i], "avg.", 4) == 0; - dump( "%-*s ", StatisticsColumnWidth, StatFieldTitles[i++] ); - } - } - - void print_field_values ( size_t group_idx ) { - size_t begin = group_start_field[group_idx], - end = group_start_field[group_idx + 1]; - for ( size_t i = begin; i < end; ++i ) { - if ( averages_fields[i] ) - dump( "%-*.2f ", StatisticsColumnWidth, (double)counters_to_dump->field(i)/counters_to_dump->tasks_executed ); - else - dump( "%-*ld ", StatisticsColumnWidth, counters_to_dump->field(i) ); - } - } -}; // class statistics_logger - -static statistics_logger the_statistics; - -void dump_statistics ( const statistics_counters& c, size_t id ) { - the_statistics.record(c, id); -} - -} // namespace internal -} // namespace tbb - -#endif /* __TBB_STATISTICS */ diff --git a/src/tbb/src/tbb/tbb_statistics.h b/src/tbb/src/tbb/tbb_statistics.h deleted file mode 100644 index e42806a60..000000000 --- a/src/tbb/src/tbb/tbb_statistics.h +++ /dev/null @@ -1,240 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef _TBB_tbb_statistics_H -#define _TBB_tbb_statistics_H - -/** - This file defines parameters of the internal statistics collected by the TBB - library (currently by the task scheduler only). - - Statistics is accumulated separately in each thread and is dumped when - the scheduler instance associated with the given thread is destroyed. - For apps with multiple master threads or with the same master repeatedly - initializing and then deinitializing task scheduler this results in TBB - workers statistics getting inseparably mixed. - - Therefore statistics is accumulated in arena slots, and should be dumped - when arena is destroyed. This separates statistics collected for each - scheduler activity region in each master thread. - - With the current RML implementation (TBB 2.2, 3.0) to avoid complete loss of - statistics data during app shutdown (because of lazy workers deinitialization - logic) set __TBB_STATISTICS_EARLY_DUMP macro to write the statistics at the - moment a master thread deinitializes its scheduler. This may happen a little - earlier than the moment of arena destruction resulting in the following undesired - (though usually tolerable) effects: - - a few events related to unsuccessful stealing or thread pool activity may be lost, - - statistics may be substantially incomplete in case of FIFO tasks used in - the FAF mode. - - Macro __TBB_STATISTICS_STDOUT and global variable __TBB_ActiveStatisticsGroups - defined below can be used to configure the statistics output. - - To add new counter: - 1) Insert it into the appropriate group range in statistics_counters; - 2) Insert the corresponding field title into StatFieldTitles (preserving - relative order of the fields). - - To add new counters group: - 1) Insert new group bit flag into statistics_groups; - 2) Insert the new group title into StatGroupTitles (preserving - relative order of the groups). - 3) Add counter belonging to the new group as described above -**/ - -#include "tbb/tbb_stddef.h" - -#ifndef __TBB_STATISTICS -#define __TBB_STATISTICS 0 -#endif /* __TBB_STATISTICS */ - -#if __TBB_STATISTICS - -#include <string.h> // for memset - -//! Dump counters into stdout as well. -/** By default statistics counters are written to the file "statistics.txt" only. **/ -#define __TBB_STATISTICS_STDOUT 1 - -//! Dump only totals for all threads in the given arena -/** By default statistics counters for each arena slot are dumped separately, as - well as the subtotal for workers. **/ -#define __TBB_STATISTICS_TOTALS_ONLY 1 - -//! Dump statistics for an arena when its master completes -/** By default (when this macro is not set) the statistics is sent to output when - arena object is destroyed. But with the current lazy workers termination - logic default behavior may result in loosing all statistics output. **/ -#define __TBB_STATISTICS_EARLY_DUMP 1 - -#define GATHER_STATISTIC(x) (x) - -namespace tbb { -namespace internal { - -//! Groups of statistics counters. -/** The order of enumerators must be the same as the order of the corresponding - field groups in the statistics_counters structure. **/ -enum statistics_groups { - sg_task_allocation = 0x01, - sg_task_execution = 0x02, - sg_stealing = 0x04, - sg_affinity = 0x08, - sg_arena = 0x10, - sg_market = 0x20, - sg_prio = 0x40, - sg_prio_ex = 0x80, - // List end marker. Insert new groups only before it. - sg_end -}; - -//! Groups of counters to output -const uintptr_t __TBB_ActiveStatisticsGroups = sg_task_execution | sg_stealing | sg_affinity | sg_arena | sg_market; - -//! A set of various statistics counters that are updated by the library on per thread basis. -/** All the fields must be of the same type (statistics_counters::counter_type). - This is necessary to allow reinterpreting this structure as an array. **/ -struct statistics_counters { - typedef long counter_type; - - // Group: sg_task_allocation - // Counters in this group can have negative values as the tasks migrate across - // threads while the associated counters are updated in the current thread only - // to avoid data races - - //! Number of tasks allocated and not yet destroyed - counter_type active_tasks; - //! Number of task corpses stored for future reuse - counter_type free_list_length; - //! Number of big tasks allocated during the run - /** To find total number of tasks malloc'd, compute (big_tasks+my_small_task_count) */ - counter_type big_tasks; - - // Group: sg_task_execution - - //! Number of tasks executed - counter_type tasks_executed; - //! Number of elided spawns - counter_type spawns_bypassed; - - // Group: sg_stealing - - //! Number of tasks successfully stolen - counter_type steals_committed; - //! Number of failed stealing attempts - counter_type steals_failed; - //! Number of failed attempts to lock victim's task pool - counter_type thieves_conflicts; - //! Number of times thief backed off because of the collision with the owner - counter_type thief_backoffs; - - // Group: sg_affinity - - //! Number of tasks received from mailbox - counter_type mails_received; - //! Number of affinitized tasks executed by the owner - /** Goes as "revoked" in statistics printout. **/ - counter_type proxies_executed; - //! Number of affinitized tasks intercepted by thieves - counter_type proxies_stolen; - //! Number of proxy bypasses by thieves during stealing - counter_type proxies_bypassed; - //! Number of affinitized tasks executed by the owner via scheduler bypass mechanism - counter_type affinity_ignored; - - // Group: sg_arena - - //! Number of times the state of arena switched between "full" and "empty" - counter_type gate_switches; - //! Number of times workers left an arena and returned into the market - counter_type arena_roundtrips; - // !Average concurrency level of this arena - counter_type avg_arena_concurrency; - //! Average assigned priority - counter_type avg_assigned_workers; - - // Group: sg_market - - //! Number of times workers left the market and returned into RML - counter_type market_roundtrips; - - // Group; sg_prio - - //! Number of arena priority switches - counter_type arena_prio_switches; - //! Number of market priority switches - counter_type market_prio_switches; - //! Number of arena priority switches - counter_type arena_prio_resets; - //! Number of reference priority source fixups to avoid deadlock - counter_type prio_ref_fixups; - //! Average arena priority - counter_type avg_arena_prio; - //! Average market priority - counter_type avg_market_prio; - - // Group; sg_prio_ex - - //! Number of times local task pools were winnowed - counter_type prio_winnowings; - //! Number of times secondary task pools were searched for top priority tasks - counter_type prio_reloads; - //! Number of times secondary task pools were abandoned by quitting workers - counter_type prio_orphanings; - //! Number of tasks offloaded into secondary task pools - counter_type prio_tasks_offloaded; - //! Number of tasks reloaded from secondary task pools - counter_type prio_tasks_reloaded; - - // Constructor and helpers - - statistics_counters() { reset(); } - - void reset () { memset( this, 0, sizeof(statistics_counters) ); } - - counter_type& field ( size_t index ) { return reinterpret_cast<counter_type*>(this)[index]; } - - const counter_type& field ( size_t index ) const { return reinterpret_cast<const counter_type*>(this)[index]; } - - static size_t size () { return sizeof(statistics_counters) / sizeof(counter_type); } - - const statistics_counters& operator += ( const statistics_counters& rhs ) { - for ( size_t i = 0; i < size(); ++i ) - field(i) += rhs.field(i); - return *this; - } -}; // statistics_counters - -static const size_t workers_counters_total = (size_t)-1; -static const size_t arena_counters_total = (size_t)-2; - -void dump_statistics ( const statistics_counters& c, size_t id ); - -} // namespace internal -} // namespace tbb - -#else /* !__TBB_STATISTICS */ - -#define GATHER_STATISTIC(x) ((void)0) - -#endif /* !__TBB_STATISTICS */ - -#endif /* _TBB_tbb_statistics_H */ diff --git a/src/tbb/src/tbb/tbb_thread.cpp b/src/tbb/src/tbb/tbb_thread.cpp deleted file mode 100644 index 350e76055..000000000 --- a/src/tbb/src/tbb/tbb_thread.cpp +++ /dev/null @@ -1,195 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if _WIN32||_WIN64 -#include <process.h> // _beginthreadex() -#endif -#include <errno.h> -#include "tbb_misc.h" // handle_win_error(), ThreadStackSize -#include "tbb/tbb_stddef.h" -#include "tbb/tbb_thread.h" -#include "tbb/tbb_allocator.h" -#include "governor.h" // default_num_threads() -#if __TBB_WIN8UI_SUPPORT -#include <thread> -#endif - -namespace tbb { -namespace internal { - -//! Allocate a closure -void* allocate_closure_v3( size_t size ) -{ - return allocate_via_handler_v3( size ); -} - -//! Free a closure allocated by allocate_closure_v3 -void free_closure_v3( void *ptr ) -{ - deallocate_via_handler_v3( ptr ); -} - -void tbb_thread_v3::join() -{ - if (!joinable()) - handle_perror( EINVAL, "tbb_thread::join" ); // Invalid argument - if (this_tbb_thread::get_id() == get_id()) - handle_perror( EDEADLK, "tbb_thread::join" ); // Resource deadlock avoided -#if _WIN32||_WIN64 -#if __TBB_WIN8UI_SUPPORT - std::thread* thread_tmp=(std::thread*)my_thread_id; - thread_tmp->join(); - delete thread_tmp; -#else // __TBB_WIN8UI_SUPPORT - DWORD status = WaitForSingleObjectEx( my_handle, INFINITE, FALSE ); - if ( status == WAIT_FAILED ) - handle_win_error( GetLastError() ); - BOOL close_stat = CloseHandle( my_handle ); - if ( close_stat == 0 ) - handle_win_error( GetLastError() ); - my_thread_id = 0; -#endif // __TBB_WIN8UI_SUPPORT -#else - int status = pthread_join( my_handle, NULL ); - if( status ) - handle_perror( status, "pthread_join" ); -#endif // _WIN32||_WIN64 - my_handle = 0; -} - -void tbb_thread_v3::detach() { - if (!joinable()) - handle_perror( EINVAL, "tbb_thread::detach" ); // Invalid argument -#if _WIN32||_WIN64 - BOOL status = CloseHandle( my_handle ); - if ( status == 0 ) - handle_win_error( GetLastError() ); - my_thread_id = 0; -#else - int status = pthread_detach( my_handle ); - if( status ) - handle_perror( status, "pthread_detach" ); -#endif // _WIN32||_WIN64 - my_handle = 0; -} - -void tbb_thread_v3::internal_start( __TBB_NATIVE_THREAD_ROUTINE_PTR(start_routine), - void* closure ) { -#if _WIN32||_WIN64 -#if __TBB_WIN8UI_SUPPORT - std::thread* thread_tmp=new std::thread(start_routine, closure); - my_handle = thread_tmp->native_handle(); -// TODO: to find out the way to find thread_id without GetThreadId and other -// desktop functions. -// Now tbb_thread does have its own thread_id that stores std::thread object - my_thread_id = (size_t)thread_tmp; -#else - unsigned thread_id; - // The return type of _beginthreadex is "uintptr_t" on new MS compilers, - // and 'unsigned long' on old MS compilers. uintptr_t works for both. - uintptr_t status = _beginthreadex( NULL, ThreadStackSize, start_routine, - closure, 0, &thread_id ); - if( status==0 ) - handle_perror(errno,"__beginthreadex"); - else { - my_handle = (HANDLE)status; - my_thread_id = thread_id; - } -#endif -#else - pthread_t thread_handle; - int status; - pthread_attr_t stack_size; - status = pthread_attr_init( &stack_size ); - if( status ) - handle_perror( status, "pthread_attr_init" ); - status = pthread_attr_setstacksize( &stack_size, ThreadStackSize ); - if( status ) - handle_perror( status, "pthread_attr_setstacksize" ); - - status = pthread_create( &thread_handle, &stack_size, start_routine, closure ); - if( status ) - handle_perror( status, "pthread_create" ); - status = pthread_attr_destroy( &stack_size ); - if( status ) - handle_perror( status, "pthread_attr_destroy" ); - - my_handle = thread_handle; -#endif // _WIN32||_WIN64 -} - -unsigned tbb_thread_v3::hardware_concurrency() __TBB_NOEXCEPT(true) { - return governor::default_num_threads(); -} - -tbb_thread_v3::id thread_get_id_v3() { -#if _WIN32||_WIN64 - return tbb_thread_v3::id( GetCurrentThreadId() ); -#else - return tbb_thread_v3::id( pthread_self() ); -#endif // _WIN32||_WIN64 -} - -void move_v3( tbb_thread_v3& t1, tbb_thread_v3& t2 ) -{ - if (t1.joinable()) - t1.detach(); - t1.my_handle = t2.my_handle; - t2.my_handle = 0; -#if _WIN32||_WIN64 - t1.my_thread_id = t2.my_thread_id; - t2.my_thread_id = 0; -#endif // _WIN32||_WIN64 -} - -void thread_yield_v3() -{ - __TBB_Yield(); -} - -void thread_sleep_v3(const tick_count::interval_t &i) -{ -#if _WIN32||_WIN64 - tick_count t0 = tick_count::now(); - tick_count t1 = t0; - for(;;) { - double remainder = (i-(t1-t0)).seconds()*1e3; // milliseconds remaining to sleep - if( remainder<=0 ) break; - DWORD t = remainder>=INFINITE ? INFINITE-1 : DWORD(remainder); -#if !__TBB_WIN8UI_SUPPORT - Sleep( t ); -#else - std::chrono::milliseconds sleep_time( t ); - std::this_thread::sleep_for( sleep_time ); -#endif - t1 = tick_count::now(); - } -#else - struct timespec req; - double sec = i.seconds(); - - req.tv_sec = static_cast<long>(sec); - req.tv_nsec = static_cast<long>( (sec - req.tv_sec)*1e9 ); - nanosleep(&req, NULL); -#endif // _WIN32||_WIN64 -} - -} // internal -} // tbb diff --git a/src/tbb/src/tbb/tbb_version.h b/src/tbb/src/tbb/tbb_version.h deleted file mode 100644 index 5acf40b01..000000000 --- a/src/tbb/src/tbb/tbb_version.h +++ /dev/null @@ -1,119 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Please define version number in the file: -#include "tbb/tbb_stddef.h" - -// And don't touch anything below -#ifndef ENDL -#define ENDL "\n" -#endif -#include "version_string.ver" - -#ifndef __TBB_VERSION_STRINGS -#pragma message("Warning: version_string.ver isn't generated properly by version_info.sh script!") -// here is an example of macros value: -#define __TBB_VERSION_STRINGS \ -"TBB: BUILD_HOST\tUnknown\n" \ -"TBB: BUILD_ARCH\tUnknown\n" \ -"TBB: BUILD_OS\t\tUnknown\n" \ -"TBB: BUILD_CL\t\tUnknown\n" \ -"TBB: BUILD_COMPILER\tUnknown\n" \ -"TBB: BUILD_COMMAND\tUnknown\n" -#endif -#ifndef __TBB_DATETIME -#ifdef RC_INVOKED -#define __TBB_DATETIME "Unknown" -#else -#define __TBB_DATETIME __DATE__ __TIME__ -#endif -#endif - -#define __TBB_VERSION_NUMBER(N) #N ": VERSION\t\t" __TBB_STRING(TBB_VERSION_MAJOR.TBB_VERSION_MINOR) ENDL -#define __TBB_INTERFACE_VERSION_NUMBER(N) #N ": INTERFACE VERSION\t" __TBB_STRING(TBB_INTERFACE_VERSION) ENDL - -#define __TBB_VERSION_DATETIME(N) #N ": BUILD_DATE\t\t" __TBB_DATETIME ENDL -#ifndef TBB_USE_DEBUG - #define __TBB_VERSION_USE_DEBUG(N) #N ": TBB_USE_DEBUG\tundefined" ENDL -#elif TBB_USE_DEBUG==0 - #define __TBB_VERSION_USE_DEBUG(N) #N ": TBB_USE_DEBUG\t0" ENDL -#elif TBB_USE_DEBUG==1 - #define __TBB_VERSION_USE_DEBUG(N) #N ": TBB_USE_DEBUG\t1" ENDL -#elif TBB_USE_DEBUG==2 - #define __TBB_VERSION_USE_DEBUG(N) #N ": TBB_USE_DEBUG\t2" ENDL -#else - #error Unexpected value for TBB_USE_DEBUG -#endif - -/* Make __TBB_VERSION_USE_ASSERT and __TBB_VERSION_DO_NOTIFY empty for rc - * because rc from VS2005 crashed with fatal error RC10056 for too complex - * macros (for example, when __TBB_CPF_BUILD is enabled). - * All information is available in BUILD_COMMAND anyway. - */ - -#ifdef RC_INVOKED - #define __TBB_VERSION_USE_ASSERT(N) -#else // RC_INVOKED -#ifndef TBB_USE_ASSERT - #define __TBB_VERSION_USE_ASSERT(N) #N ": TBB_USE_ASSERT\tundefined" ENDL -#elif TBB_USE_ASSERT==0 - #define __TBB_VERSION_USE_ASSERT(N) #N ": TBB_USE_ASSERT\t0" ENDL -#elif TBB_USE_ASSERT==1 - #define __TBB_VERSION_USE_ASSERT(N) #N ": TBB_USE_ASSERT\t1" ENDL -#elif TBB_USE_ASSERT==2 - #define __TBB_VERSION_USE_ASSERT(N) #N ": TBB_USE_ASSERT\t2" ENDL -#else - #error Unexpected value for TBB_USE_ASSERT -#endif -#endif // RC_INVOKED - -#ifndef __TBB_CPF_BUILD - #define __TBB_VERSION_TBB_PREVIEW_BINARY(N) -#else - #define __TBB_VERSION_TBB_PREVIEW_BINARY(N) #N ": TBB_PREVIEW_BINARY\t1" ENDL -#endif - -#ifdef RC_INVOKED - #define __TBB_VERSION_DO_NOTIFY(N) -#else -#ifndef DO_ITT_NOTIFY - #define __TBB_VERSION_DO_NOTIFY(N) #N ": DO_ITT_NOTIFY\tundefined" ENDL -#elif DO_ITT_NOTIFY==1 - #define __TBB_VERSION_DO_NOTIFY(N) #N ": DO_ITT_NOTIFY\t1" ENDL -#elif DO_ITT_NOTIFY==0 - #define __TBB_VERSION_DO_NOTIFY(N) -#else - #error Unexpected value for DO_ITT_NOTIFY -#endif -#endif // RC_INVOKED - -#define TBB_VERSION_STRINGS_P(N) __TBB_VERSION_NUMBER(N) __TBB_INTERFACE_VERSION_NUMBER(N) __TBB_VERSION_DATETIME(N) __TBB_VERSION_STRINGS(N) __TBB_VERSION_USE_DEBUG(N) __TBB_VERSION_USE_ASSERT(N) __TBB_VERSION_TBB_PREVIEW_BINARY(N) __TBB_VERSION_DO_NOTIFY(N) - -#define TBB_VERSION_STRINGS TBB_VERSION_STRINGS_P(TBB) -#define TBBMALLOC_VERSION_STRINGS TBB_VERSION_STRINGS_P(TBBmalloc) - -// numbers -#ifndef __TBB_VERSION_YMD -#define __TBB_VERSION_YMD 0, 0 -#endif - -#define TBB_VERNUMBERS TBB_VERSION_MAJOR, TBB_VERSION_MINOR, __TBB_VERSION_YMD - -#define TBB_VERSION __TBB_STRING(TBB_VERNUMBERS) diff --git a/src/tbb/src/tbb/tcm.h b/src/tbb/src/tbb/tcm.h new file mode 100644 index 000000000..66ee18a2f --- /dev/null +++ b/src/tbb/src/tbb/tcm.h @@ -0,0 +1,174 @@ +/* + Copyright (c) 2023-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef _TBB_tcm_H +#define _TBB_tcm_H + +#include <stdint.h> +#include <stdbool.h> + +#ifdef __cplusplus +extern "C" { +#endif + +// Support for the TCM API return value + +typedef enum _tcm_result_t { + TCM_RESULT_SUCCESS = 0x0, + TCM_RESULT_ERROR_INVALID_ARGUMENT = 0x78000004, + TCM_RESULT_ERROR_UNKNOWN = 0x7ffffffe +} tcm_result_t; + +// Support for permit states + +enum tcm_permit_states_t { + TCM_PERMIT_STATE_VOID, + TCM_PERMIT_STATE_INACTIVE, + TCM_PERMIT_STATE_PENDING, + TCM_PERMIT_STATE_IDLE, + TCM_PERMIT_STATE_ACTIVE +}; + +typedef uint8_t tcm_permit_state_t; + +// Support for permit flags + +typedef struct _tcm_permit_flags_t { + uint32_t stale : 1; + uint32_t rigid_concurrency : 1; + uint32_t exclusive : 1; + uint32_t request_as_inactive : 1; + uint32_t reserved : 28; +} tcm_permit_flags_t; + +typedef struct _tcm_callback_flags_t { + uint32_t new_concurrency : 1; + uint32_t new_state : 1; + uint32_t reserved : 30; +} tcm_callback_flags_t; + +// Support for cpu masks + +struct hwloc_bitmap_s; +typedef struct hwloc_bitmap_s* hwloc_bitmap_t; +typedef hwloc_bitmap_t tcm_cpu_mask_t; + +// Support for ids + +typedef uint64_t tcm_client_id_t; + +// Support for permits + +typedef struct _tcm_permit_t { + uint32_t* concurrencies; + tcm_cpu_mask_t* cpu_masks; + uint32_t size; + tcm_permit_state_t state; + tcm_permit_flags_t flags; +} tcm_permit_t; + +// Support for permit handle + +typedef struct tcm_permit_rep_t* tcm_permit_handle_t; + +// Support for constraints + +typedef int32_t tcm_numa_node_t; +typedef int32_t tcm_core_type_t; + +const int8_t tcm_automatic = -1; +const int8_t tcm_any = -2; + +#define TCM_PERMIT_REQUEST_CONSTRAINTS_INITIALIZER {tcm_automatic, tcm_automatic, NULL, \ + tcm_automatic, tcm_automatic, tcm_automatic} + +typedef struct _tcm_cpu_constraints_t { + int32_t min_concurrency; + int32_t max_concurrency; + tcm_cpu_mask_t mask; + tcm_numa_node_t numa_id; + tcm_core_type_t core_type_id; + int32_t threads_per_core; +} tcm_cpu_constraints_t; + +// Support for priorities + +enum tcm_request_priorities_t { + TCM_REQUEST_PRIORITY_LOW = (INT32_MAX / 4) * 1, + TCM_REQUEST_PRIORITY_NORMAL = (INT32_MAX / 4) * 2, + TCM_REQUEST_PRIORITY_HIGH = (INT32_MAX / 4) * 3 +}; + +typedef int32_t tcm_request_priority_t; + +// Support for requests + +#define TCM_PERMIT_REQUEST_INITIALIZER {tcm_automatic, tcm_automatic, \ + NULL, 0, TCM_REQUEST_PRIORITY_NORMAL, {}, {}} + +typedef struct _tcm_permit_request_t { + int32_t min_sw_threads; + int32_t max_sw_threads; + tcm_cpu_constraints_t* cpu_constraints; + uint32_t constraints_size; + tcm_request_priority_t priority; + tcm_permit_flags_t flags; + char reserved[4]; +} tcm_permit_request_t; + +// Support for client callback + +typedef tcm_result_t (*tcm_callback_t)(tcm_permit_handle_t p, void* callback_arg, tcm_callback_flags_t); + +#if _WIN32 + #define __TCM_EXPORT __declspec(dllexport) +#else + #define __TCM_EXPORT +#endif + + +__TCM_EXPORT tcm_result_t tcmConnect(tcm_callback_t callback, + tcm_client_id_t *client_id); +__TCM_EXPORT tcm_result_t tcmDisconnect(tcm_client_id_t client_id); + +__TCM_EXPORT tcm_result_t tcmRequestPermit(tcm_client_id_t client_id, + tcm_permit_request_t request, + void* callback_arg, + tcm_permit_handle_t* permit_handle, + tcm_permit_t* permit); + +__TCM_EXPORT tcm_result_t tcmGetPermitData(tcm_permit_handle_t permit_handle, + tcm_permit_t* permit); + +__TCM_EXPORT tcm_result_t tcmReleasePermit(tcm_permit_handle_t permit); + +__TCM_EXPORT tcm_result_t tcmIdlePermit(tcm_permit_handle_t permit_handle); + +__TCM_EXPORT tcm_result_t tcmDeactivatePermit(tcm_permit_handle_t permit_handle); + +__TCM_EXPORT tcm_result_t tcmActivatePermit(tcm_permit_handle_t permit_handle); + +__TCM_EXPORT tcm_result_t tcmRegisterThread(tcm_permit_handle_t permit_handle); + +__TCM_EXPORT tcm_result_t tcmUnregisterThread(); + +__TCM_EXPORT tcm_result_t tcmGetVersionInfo(char* buffer, uint32_t buffer_size); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif /* _TBB_tcm_H */ diff --git a/src/tbb/src/tbb/tcm_adaptor.cpp b/src/tbb/src/tbb/tcm_adaptor.cpp new file mode 100644 index 000000000..85ca125b4 --- /dev/null +++ b/src/tbb/src/tbb/tcm_adaptor.cpp @@ -0,0 +1,327 @@ +/* + Copyright (c) 2023-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "oneapi/tbb/detail/_intrusive_list_node.h" +#include "oneapi/tbb/detail/_template_helpers.h" +#include "oneapi/tbb/task_arena.h" + +#include "pm_client.h" +#include "dynamic_link.h" +#include "misc.h" +#include "tcm.h" +#include "tcm_adaptor.h" + +#include <iostream> + +namespace tbb { +namespace detail { +namespace r1 { + +namespace { +#if __TBB_WEAK_SYMBOLS_PRESENT +#pragma weak tcmConnect +#pragma weak tcmDisconnect +#pragma weak tcmRequestPermit +#pragma weak tcmGetPermitData +#pragma weak tcmReleasePermit +#pragma weak tcmIdlePermit +#pragma weak tcmDeactivatePermit +#pragma weak tcmActivatePermit +#pragma weak tcmRegisterThread +#pragma weak tcmUnregisterThread +#pragma weak tcmGetVersionInfo +#endif /* __TBB_WEAK_SYMBOLS_PRESENT */ + +tcm_result_t(*tcm_connect)(tcm_callback_t callback, tcm_client_id_t* client_id){nullptr}; +tcm_result_t(*tcm_disconnect)(tcm_client_id_t client_id){ nullptr }; +tcm_result_t(*tcm_request_permit)(tcm_client_id_t client_id, tcm_permit_request_t request, + void* callback_arg, tcm_permit_handle_t* permit_handle, tcm_permit_t* permit){nullptr}; +tcm_result_t(*tcm_get_permit_data)(tcm_permit_handle_t permit_handle, tcm_permit_t* permit){nullptr}; +tcm_result_t(*tcm_release_permit)(tcm_permit_handle_t permit){nullptr}; +tcm_result_t(*tcm_idle_permit)(tcm_permit_handle_t permit_handle){nullptr}; +tcm_result_t(*tcm_deactivate_permit)(tcm_permit_handle_t permit_handle){nullptr}; +tcm_result_t(*tcm_activate_permit)(tcm_permit_handle_t permit_handle){nullptr}; +tcm_result_t(*tcm_register_thread)(tcm_permit_handle_t permit_handle){nullptr}; +tcm_result_t(*tcm_unregister_thread)(){nullptr}; +tcm_result_t (*tcm_get_version_info)(char* buffer, uint32_t buffer_size){nullptr}; + +static const dynamic_link_descriptor tcm_link_table[] = { + DLD(tcmConnect, tcm_connect), + DLD(tcmDisconnect, tcm_disconnect), + DLD(tcmRequestPermit, tcm_request_permit), + DLD(tcmGetPermitData, tcm_get_permit_data), + DLD(tcmReleasePermit, tcm_release_permit), + DLD(tcmIdlePermit, tcm_idle_permit), + DLD(tcmDeactivatePermit, tcm_deactivate_permit), + DLD(tcmActivatePermit, tcm_activate_permit), + DLD(tcmRegisterThread, tcm_register_thread), + DLD(tcmUnregisterThread, tcm_unregister_thread), + DLD(tcmGetVersionInfo, tcm_get_version_info) +}; + +#if TBB_USE_DEBUG +#define DEBUG_SUFFIX "_debug" +#else +#define DEBUG_SUFFIX +#endif /* TBB_USE_DEBUG */ + +#if _WIN32 || _WIN64 +#define LIBRARY_EXTENSION ".dll" +#define LIBRARY_PREFIX +#elif __unix__ +#define LIBRARY_EXTENSION ".so.1" +#define LIBRARY_PREFIX "lib" +#else +#define LIBRARY_EXTENSION +#define LIBRARY_PREFIX +#endif /* __unix__ */ + +#define TCMLIB_NAME LIBRARY_PREFIX "tcm" DEBUG_SUFFIX LIBRARY_EXTENSION + +static bool tcm_functions_loaded{ false }; +} + +class tcm_client : public pm_client { + using tcm_client_mutex_type = d1::mutex; +public: + tcm_client(tcm_adaptor& adaptor, arena& a) : pm_client(a), my_tcm_adaptor(adaptor) {} + + ~tcm_client() { + if (my_permit_handle) { + __TBB_ASSERT(tcm_release_permit, nullptr); + auto res = tcm_release_permit(my_permit_handle); + __TBB_ASSERT_EX(res == TCM_RESULT_SUCCESS, nullptr); + } + } + + int update_concurrency(uint32_t concurrency) { + return my_arena.update_concurrency(concurrency); + } + + unsigned priority_level() { + return my_arena.priority_level(); + } + + tcm_permit_request_t& permit_request() { + return my_permit_request; + } + + tcm_permit_handle_t& permit_handle() { + return my_permit_handle; + } + + void actualize_permit() { + __TBB_ASSERT(tcm_get_permit_data, nullptr); + int delta{}; + { + tcm_client_mutex_type::scoped_lock lock(my_permit_mutex); + + uint32_t new_concurrency{}; + tcm_permit_t new_permit{ &new_concurrency, nullptr, 1, TCM_PERMIT_STATE_VOID, {} }; + auto res = tcm_get_permit_data(my_permit_handle, &new_permit); + __TBB_ASSERT_EX(res == TCM_RESULT_SUCCESS, nullptr); + + // The permit has changed during the reading, so the callback will be invoked soon one more time and + // we can just skip this renegotiation iteration. + if (!new_permit.flags.stale) { + // If there is no other demand in TCM, the permit may still have granted concurrency but + // be in the deactivated state thus we enforce 0 allotment to preserve arena invariants. + delta = update_concurrency(new_permit.state != TCM_PERMIT_STATE_INACTIVE ? new_concurrency : 0); + } + } + if (delta) { + my_tcm_adaptor.notify_thread_request(delta); + } + } + + void request_permit(tcm_client_id_t client_id) { + __TBB_ASSERT(tcm_request_permit, nullptr); + + my_permit_request.max_sw_threads = max_workers(); + my_permit_request.min_sw_threads = my_permit_request.max_sw_threads == 0 ? 0 : min_workers(); + + if (my_permit_request.constraints_size > 0) { + my_permit_request.cpu_constraints->min_concurrency = my_permit_request.min_sw_threads; + my_permit_request.cpu_constraints->max_concurrency = my_permit_request.max_sw_threads; + } + + __TBB_ASSERT(my_permit_request.max_sw_threads >= my_permit_request.min_sw_threads, nullptr); + + tcm_result_t res = tcm_request_permit(client_id, my_permit_request, this, &my_permit_handle, nullptr); + __TBB_ASSERT_EX(res == TCM_RESULT_SUCCESS, nullptr); + } + + void deactivate_permit() { + __TBB_ASSERT(tcm_deactivate_permit, nullptr); + tcm_result_t res = tcm_deactivate_permit(my_permit_handle); + __TBB_ASSERT_EX(res == TCM_RESULT_SUCCESS, nullptr); + } + + void init(tcm_client_id_t client_id, d1::constraints& constraints) { + __TBB_ASSERT(tcm_request_permit, nullptr); + __TBB_ASSERT(tcm_deactivate_permit, nullptr); + + if (constraints.core_type != d1::task_arena::automatic || + constraints.numa_id != d1::task_arena::automatic || + constraints.max_threads_per_core != d1::task_arena::automatic) + { + my_permit_constraints.max_concurrency = constraints.max_concurrency; + my_permit_constraints.min_concurrency = 0; + my_permit_constraints.core_type_id = constraints.core_type; + my_permit_constraints.numa_id = constraints.numa_id; + my_permit_constraints.threads_per_core = constraints.max_threads_per_core; + + my_permit_request.cpu_constraints = &my_permit_constraints; + my_permit_request.constraints_size = 1; + } + + my_permit_request.min_sw_threads = 0; + my_permit_request.max_sw_threads = 0; + my_permit_request.flags.request_as_inactive = 1; + + tcm_result_t res = tcm_request_permit(client_id, my_permit_request, this, &my_permit_handle, nullptr); + __TBB_ASSERT_EX(res == TCM_RESULT_SUCCESS, nullptr); + + my_permit_request.flags.request_as_inactive = 0; + } + + void register_thread() override { + __TBB_ASSERT(tcm_register_thread, nullptr); + auto return_code = tcm_register_thread(my_permit_handle); + __TBB_ASSERT_EX(return_code == TCM_RESULT_SUCCESS, nullptr); + } + + void unregister_thread() override { + __TBB_ASSERT(tcm_unregister_thread, nullptr); + auto return_code = tcm_unregister_thread(); + __TBB_ASSERT_EX(return_code == TCM_RESULT_SUCCESS, nullptr); + } + +private: + tcm_cpu_constraints_t my_permit_constraints = TCM_PERMIT_REQUEST_CONSTRAINTS_INITIALIZER; + tcm_permit_request_t my_permit_request = TCM_PERMIT_REQUEST_INITIALIZER; + tcm_permit_handle_t my_permit_handle{}; + tcm_client_mutex_type my_permit_mutex; + tcm_adaptor& my_tcm_adaptor; +}; + +//------------------------------------------------------------------------ +// tcm_adaptor_impl +//------------------------------------------------------------------------ + +struct tcm_adaptor_impl { + using demand_mutex_type = d1::mutex; + demand_mutex_type my_demand_mutex; + tcm_client_id_t client_id{}; + + tcm_adaptor_impl(tcm_client_id_t id) : client_id(id) + {} +}; + +//------------------------------------------------------------------------ +// tcm_adaptor +//------------------------------------------------------------------------ + +tcm_result_t renegotiation_callback(tcm_permit_handle_t, void* client_ptr, tcm_callback_flags_t) { + __TBB_ASSERT(client_ptr, nullptr); + static_cast<tcm_client*>(client_ptr)->actualize_permit(); + return TCM_RESULT_SUCCESS; +} + +void tcm_adaptor::initialize() { + tcm_functions_loaded = dynamic_link(TCMLIB_NAME, tcm_link_table, /* tcm_link_table size = */ 11); +} + +bool tcm_adaptor::is_initialized() { + return tcm_functions_loaded; +} + +void tcm_adaptor::print_version() { + if (is_initialized()) { + __TBB_ASSERT(tcm_get_version_info, nullptr); + char buffer[1024]; + tcm_get_version_info(buffer, 1024); + std::fprintf(stderr, "%.*s", 1024, buffer); + } +} + +tcm_adaptor::tcm_adaptor() { + __TBB_ASSERT(tcm_connect, nullptr); + tcm_client_id_t client_id{}; + auto return_code = tcm_connect(renegotiation_callback, &client_id); + if (return_code == TCM_RESULT_SUCCESS) { + my_impl = make_cache_aligned_unique<tcm_adaptor_impl>(client_id); + } +} + +tcm_adaptor::~tcm_adaptor() { + if (my_impl) { + __TBB_ASSERT(tcm_disconnect, nullptr); + auto return_code = tcm_disconnect(my_impl->client_id); + __TBB_ASSERT_EX(return_code == TCM_RESULT_SUCCESS, nullptr); + my_impl = nullptr; + } +} + +bool tcm_adaptor::is_connected() { + return my_impl != nullptr; +} + +pm_client* tcm_adaptor::create_client(arena& a) { + return new (cache_aligned_allocate(sizeof(tcm_client))) tcm_client(*this, a); +} + +void tcm_adaptor::register_client(pm_client* c, d1::constraints& constraints) { + static_cast<tcm_client*>(c)->init(my_impl->client_id, constraints); +} + +void tcm_adaptor::unregister_and_destroy_client(pm_client& c) { + auto& client = static_cast<tcm_client&>(c); + + { + tcm_adaptor_impl::demand_mutex_type::scoped_lock lock(my_impl->my_demand_mutex); + client.~tcm_client(); + } + cache_aligned_deallocate(&client); +} + +void tcm_adaptor::set_active_num_workers(int) {} + + +void tcm_adaptor::adjust_demand(pm_client& c, int mandatory_delta, int workers_delta) { + __TBB_ASSERT(-1 <= mandatory_delta && mandatory_delta <= 1, nullptr); + + auto& client = static_cast<tcm_client&>(c); + { + tcm_adaptor_impl::demand_mutex_type::scoped_lock lock(my_impl->my_demand_mutex); + + // Update client's state + workers_delta = client.update_request(mandatory_delta, workers_delta); + if (workers_delta == 0) return; + + if (client.max_workers() == 0) { + client.deactivate_permit(); + } else { + client.request_permit(my_impl->client_id); + } + } + + client.actualize_permit(); +} + +} // namespace r1 +} // namespace detail +} // namespace tbb diff --git a/src/tbb/src/tbb/tcm_adaptor.h b/src/tbb/src/tbb/tcm_adaptor.h new file mode 100644 index 000000000..f9f4d5277 --- /dev/null +++ b/src/tbb/src/tbb/tcm_adaptor.h @@ -0,0 +1,63 @@ +/* + Copyright (c) 2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef _TBB_tcm_adaptor_H +#define _TBB_tcm_adaptor_H + +#include "scheduler_common.h" + +#include "permit_manager.h" +#include "pm_client.h" + +namespace tbb { +namespace detail { +namespace r1 { + +struct tcm_adaptor_impl; + +//------------------------------------------------------------------------ +// Class tcm_adaptor +//------------------------------------------------------------------------ + +class tcm_adaptor : public permit_manager { +public: + tcm_adaptor(); + ~tcm_adaptor(); + + pm_client* create_client(arena& a) override; + void register_client(pm_client* client, d1::constraints& constraints) override; + void unregister_and_destroy_client(pm_client& c) override; + + void set_active_num_workers(int soft_limit) override; + + void adjust_demand(pm_client& c, int mandatory_delta, int workers_delta) override; + + bool is_connected(); + + static void initialize(); + static bool is_initialized(); + static void print_version(); +private: + cache_aligned_unique_ptr<tcm_adaptor_impl> my_impl; + + friend class tcm_client; +}; // class tcm_adaptor + +} // namespace r1 +} // namespace detail +} // namespace tbb + +#endif /* _TBB_tcm_adaptor_H */ diff --git a/src/tbb/src/tbb/thread_control_monitor.h b/src/tbb/src/tbb/thread_control_monitor.h new file mode 100644 index 000000000..f9c3cacc9 --- /dev/null +++ b/src/tbb/src/tbb/thread_control_monitor.h @@ -0,0 +1,116 @@ +/* + Copyright (c) 2021-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_thread_control_monitor_H +#define __TBB_thread_control_monitor_H + +#include "concurrent_monitor.h" +#include "scheduler_common.h" + +#include <atomic> + +namespace tbb { +namespace detail { +namespace r1 { + +struct market_context { + market_context() = default; + + market_context(std::uintptr_t first_addr, arena* a) : + my_uniq_addr(first_addr), my_arena_addr(a) + {} + + std::uintptr_t my_uniq_addr{0}; + arena* my_arena_addr{nullptr}; +}; + +#if __TBB_RESUMABLE_TASKS +class resume_node : public wait_node<market_context> { + using base_type = wait_node<market_context>; +public: + resume_node(market_context ctx, execution_data_ext& ed_ext, task_dispatcher& target) + : base_type(ctx), my_curr_dispatcher(ed_ext.task_disp), my_target_dispatcher(&target) + , my_suspend_point(my_curr_dispatcher->get_suspend_point()) + {} + + ~resume_node() override { + if (this->my_skipped_wakeup) { + spin_wait_until_eq(this->my_notify_calls, 1); + } + + poison_pointer(my_curr_dispatcher); + poison_pointer(my_target_dispatcher); + poison_pointer(my_suspend_point); + } + + void init() override { + base_type::init(); + } + + void wait() override { + my_curr_dispatcher->resume(*my_target_dispatcher); + __TBB_ASSERT(!this->my_is_in_list.load(std::memory_order_relaxed), "Still in the queue?"); + } + + void reset() override { + base_type::reset(); + spin_wait_until_eq(this->my_notify_calls, 1); + my_notify_calls.store(0, std::memory_order_relaxed); + } + + // notify is called (perhaps, concurrently) twice from: + // - concurrent_monitor::notify + // - post_resume_action::register_waiter + // The second notify is called after thread switches the stack + // (Because we can not call resume while the stack is occupied) + // We need calling resume only when both notifications are performed. + void notify() override { + if (++my_notify_calls == 2) { + r1::resume(my_suspend_point); + } + } + +private: + friend class thread_data; + friend struct suspend_point_type::resume_task; + task_dispatcher* my_curr_dispatcher; + task_dispatcher* my_target_dispatcher; + suspend_point_type* my_suspend_point; + std::atomic<int> my_notify_calls{0}; +}; +#endif // __TBB_RESUMABLE_TASKS + +class thread_control_monitor : public concurrent_monitor_base<market_context> { + using base_type = concurrent_monitor_base<market_context>; +public: + using base_type::base_type; + + ~thread_control_monitor() { + destroy(); + } + + /** per-thread descriptor for concurrent_monitor */ + using thread_context = sleep_node<market_context>; +#if __TBB_RESUMABLE_TASKS + using resume_context = resume_node; +#endif +}; + +} // namespace r1 +} // namespace detail +} // namespace tbb + +#endif // __TBB_thread_control_monitor_H diff --git a/src/tbb/src/tbb/thread_data.h b/src/tbb/src/tbb/thread_data.h new file mode 100644 index 000000000..422ec694e --- /dev/null +++ b/src/tbb/src/tbb/thread_data.h @@ -0,0 +1,262 @@ +/* + Copyright (c) 2020-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_thread_data_H +#define __TBB_thread_data_H + +#include "oneapi/tbb/detail/_task.h" +#include "oneapi/tbb/task.h" + +#include "rml_base.h" // rml::job + +#include "scheduler_common.h" +#include "arena.h" +#include "concurrent_monitor.h" +#include "mailbox.h" +#include "misc.h" // FastRandom +#include "small_object_pool_impl.h" +#include "intrusive_list.h" + +#include <atomic> + +namespace tbb { +namespace detail { +namespace r1 { + +class task; +class arena_slot; +class task_group_context; +class task_dispatcher; +class thread_dispatcher_client; + +class context_list : public intrusive_list<d1::intrusive_list_node> { +public: + bool orphaned{false}; + + //! Last state propagation epoch known to this thread + /** Together with the_context_state_propagation_epoch constitute synchronization protocol + that keeps hot path of task group context construction destruction mostly + lock-free. + When local epoch equals the global one, the state of task group contexts + registered with this thread is consistent with that of the task group trees + they belong to. **/ + std::atomic<std::uintptr_t> epoch{}; + + //! Mutex protecting access to the list of task group contexts. + d1::mutex m_mutex{}; + + void destroy() { + this->~context_list(); + cache_aligned_deallocate(this); + } + + void remove(d1::intrusive_list_node& val) { + mutex::scoped_lock lock(m_mutex); + + intrusive_list<d1::intrusive_list_node>::remove(val); + + if (orphaned && empty()) { + lock.release(); + destroy(); + } + } + + void push_front(d1::intrusive_list_node& val) { + mutex::scoped_lock lock(m_mutex); + + intrusive_list<d1::intrusive_list_node>::push_front(val); + } + + void orphan() { + mutex::scoped_lock lock(m_mutex); + + orphaned = true; + if (empty()) { + lock.release(); + destroy(); + } + } +}; + +//------------------------------------------------------------------------ +// Thread Data +//------------------------------------------------------------------------ +class thread_data : public ::rml::job + , public d1::intrusive_list_node + , no_copy { +public: + thread_data(unsigned short index, bool is_worker) + : my_arena_index{ index } + , my_is_worker{ is_worker } + , my_is_registered { false } + , my_task_dispatcher{ nullptr } + , my_arena{ nullptr } + , my_last_client{ nullptr } + , my_arena_slot{} + , my_random{ this } + , my_last_observer{ nullptr } + , my_small_object_pool{new (cache_aligned_allocate(sizeof(small_object_pool_impl))) small_object_pool_impl{}} + , my_context_list(new (cache_aligned_allocate(sizeof(context_list))) context_list{}) +#if __TBB_RESUMABLE_TASKS + , my_post_resume_action{ task_dispatcher::post_resume_action::none } + , my_post_resume_arg{nullptr} +#endif /* __TBB_RESUMABLE_TASKS */ + { + ITT_SYNC_CREATE(&my_context_list->m_mutex, SyncType_Scheduler, SyncObj_ContextsList); + } + + ~thread_data() { + my_context_list->orphan(); + my_small_object_pool->destroy(); + poison_pointer(my_task_dispatcher); + poison_pointer(my_arena); + poison_pointer(my_arena_slot); + poison_pointer(my_last_observer); + poison_pointer(my_small_object_pool); + poison_pointer(my_context_list); +#if __TBB_RESUMABLE_TASKS + poison_pointer(my_post_resume_arg); +#endif /* __TBB_RESUMABLE_TASKS */ + } + + void attach_arena(arena& a, std::size_t index); + bool is_attached_to(arena*); + void attach_task_dispatcher(task_dispatcher&); + void detach_task_dispatcher(); + void enter_task_dispatcher(task_dispatcher& task_disp, std::uintptr_t stealing_threshold); + void leave_task_dispatcher(); + void propagate_task_group_state(std::atomic<uint32_t> d1::task_group_context::* mptr_state, d1::task_group_context& src, uint32_t new_state); + + //! Index of the arena slot the scheduler occupies now, or occupied last time + unsigned short my_arena_index; + + //! Indicates if the thread is created by RML + const bool my_is_worker; + + bool my_is_registered; + + //! The current task dipsatcher + task_dispatcher* my_task_dispatcher; + + //! The arena that I own (if external thread) or am servicing at the moment (if worker) + arena* my_arena; + + thread_dispatcher_client* my_last_client; + + //! Pointer to the slot in the arena we own at the moment + arena_slot* my_arena_slot; + + //! The mailbox (affinity mechanism) the current thread attached to + mail_inbox my_inbox; + + //! The random generator + FastRandom my_random; + + //! Last observer in the observers list processed on this slot + observer_proxy* my_last_observer; + + //! Pool of small object for fast task allocation + small_object_pool_impl* my_small_object_pool; + + context_list* my_context_list; +#if __TBB_RESUMABLE_TASKS + //! Suspends the current coroutine (task_dispatcher). + void suspend(void* suspend_callback, void* user_callback); + + //! Resumes the target task_dispatcher. + void resume(task_dispatcher& target); + + //! Set post resume action to perform after resume. + void set_post_resume_action(task_dispatcher::post_resume_action pra, void* arg) { + __TBB_ASSERT(my_post_resume_action == task_dispatcher::post_resume_action::none, "The Post resume action must not be set"); + __TBB_ASSERT(!my_post_resume_arg, "The post resume action must not have an argument"); + my_post_resume_action = pra; + my_post_resume_arg = arg; + } + + void clear_post_resume_action() { + my_post_resume_action = task_dispatcher::post_resume_action::none; + my_post_resume_arg = nullptr; + } + + //! The post resume action requested after the swap contexts. + task_dispatcher::post_resume_action my_post_resume_action; + + //! The post resume action argument. + void* my_post_resume_arg; +#endif /* __TBB_RESUMABLE_TASKS */ + + //! The default context + // TODO: consider using common default context because it is used only to simplify + // cancellation check. + d1::task_group_context my_default_context; +}; + +inline void thread_data::attach_arena(arena& a, std::size_t index) { + my_arena = &a; + my_arena_index = static_cast<unsigned short>(index); + my_arena_slot = a.my_slots + index; + // Read the current slot mail_outbox and attach it to the mail_inbox (remove inbox later maybe) + my_inbox.attach(my_arena->mailbox(index)); +} + +inline bool thread_data::is_attached_to(arena* a) { return my_arena == a; } + +inline void thread_data::attach_task_dispatcher(task_dispatcher& task_disp) { + __TBB_ASSERT(my_task_dispatcher == nullptr, nullptr); + __TBB_ASSERT(task_disp.m_thread_data == nullptr, nullptr); + task_disp.m_thread_data = this; + my_task_dispatcher = &task_disp; +} + +inline void thread_data::detach_task_dispatcher() { + __TBB_ASSERT(my_task_dispatcher != nullptr, nullptr); + __TBB_ASSERT(my_task_dispatcher->m_thread_data == this, nullptr); + my_task_dispatcher->m_thread_data = nullptr; + my_task_dispatcher = nullptr; +} + +inline void thread_data::enter_task_dispatcher(task_dispatcher& task_disp, std::uintptr_t stealing_threshold) { + task_disp.set_stealing_threshold(stealing_threshold); + attach_task_dispatcher(task_disp); +} + +inline void thread_data::leave_task_dispatcher() { + my_task_dispatcher->set_stealing_threshold(0); + detach_task_dispatcher(); +} + +inline void thread_data::propagate_task_group_state(std::atomic<std::uint32_t> d1::task_group_context::* mptr_state, d1::task_group_context& src, std::uint32_t new_state) { + mutex::scoped_lock lock(my_context_list->m_mutex); + // Acquire fence is necessary to ensure that the subsequent node->my_next load + // returned the correct value in case it was just inserted in another thread. + // The fence also ensures visibility of the correct ctx.my_parent value. + for (context_list::iterator it = my_context_list->begin(); it != my_context_list->end(); ++it) { + d1::task_group_context& ctx = __TBB_get_object_ref(d1::task_group_context, my_node, &(*it)); + if ((ctx.*mptr_state).load(std::memory_order_relaxed) != new_state) + task_group_context_impl::propagate_task_group_state(ctx, mptr_state, src, new_state); + } + // Sync up local propagation epoch with the global one. Release fence prevents + // reordering of possible store to *mptr_state after the sync point. + my_context_list->epoch.store(the_context_state_propagation_epoch.load(std::memory_order_relaxed), std::memory_order_release); +} + +} // namespace r1 +} // namespace detail +} // namespace tbb + +#endif // __TBB_thread_data_H + diff --git a/src/tbb/src/tbb/thread_dispatcher.cpp b/src/tbb/src/tbb/thread_dispatcher.cpp new file mode 100644 index 000000000..69a108d6f --- /dev/null +++ b/src/tbb/src/tbb/thread_dispatcher.cpp @@ -0,0 +1,236 @@ +/* + Copyright (c) 2022-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "thread_dispatcher.h" +#include "threading_control.h" + +namespace tbb { +namespace detail { +namespace r1 { + +thread_dispatcher::thread_dispatcher(threading_control& tc, unsigned hard_limit, std::size_t stack_size) + : my_threading_control(tc) + , my_num_workers_hard_limit(hard_limit) + , my_stack_size(stack_size) +{ + my_server = governor::create_rml_server( *this ); + __TBB_ASSERT( my_server, "Failed to create RML server" ); +} + +thread_dispatcher::~thread_dispatcher() { + poison_pointer(my_server); +} + +thread_dispatcher_client* thread_dispatcher::select_next_client(thread_dispatcher_client* hint) { + unsigned next_client_priority_level = num_priority_levels; + if (hint) { + next_client_priority_level = hint->priority_level(); + } + + for (unsigned idx = 0; idx < next_client_priority_level; ++idx) { + if (!my_client_list[idx].empty()) { + return &*my_client_list[idx].begin(); + } + } + + return hint; +} + +thread_dispatcher_client* thread_dispatcher::create_client(arena& a) { + return new (cache_aligned_allocate(sizeof(thread_dispatcher_client))) thread_dispatcher_client(a, my_clients_aba_epoch); +} + + +void thread_dispatcher::register_client(thread_dispatcher_client* client) { + client_list_mutex_type::scoped_lock lock(my_list_mutex); + insert_client(*client); +} + +bool thread_dispatcher::try_unregister_client(thread_dispatcher_client* client, std::uint64_t aba_epoch, unsigned priority) { + __TBB_ASSERT(client, nullptr); + // we hold reference to the server, so market cannot be destroyed at any moment here + __TBB_ASSERT(!is_poisoned(my_server), nullptr); + my_list_mutex.lock(); + for (auto& it : my_client_list[priority]) { + if (client == &it) { + if (it.get_aba_epoch() == aba_epoch) { + // Client is alive + // Acquire my_references to sync with threads that just left the arena + // Pay attention that references should be read before workers_requested because + // if references is no zero some other thread might call adjust_demand and lead to + // a race over workers_requested + if (!client->references() && !client->has_request()) { + // Client is abandoned. Destroy it. + remove_client(*client); + ++my_clients_aba_epoch; + + my_list_mutex.unlock(); + destroy_client(client); + + return true; + } + } + break; + } + } + my_list_mutex.unlock(); + return false; +} + +void thread_dispatcher::destroy_client(thread_dispatcher_client* client) { + client->~thread_dispatcher_client(); + cache_aligned_deallocate(client); +} + +// Should be called under lock +void thread_dispatcher::insert_client(thread_dispatcher_client& client) { + __TBB_ASSERT(client.priority_level() < num_priority_levels, nullptr); + my_client_list[client.priority_level()].push_front(client); + + __TBB_ASSERT(!my_next_client || my_next_client->priority_level() < num_priority_levels, nullptr); + my_next_client = select_next_client(my_next_client); +} + +// Should be called under lock +void thread_dispatcher::remove_client(thread_dispatcher_client& client) { + __TBB_ASSERT(client.priority_level() < num_priority_levels, nullptr); + my_client_list[client.priority_level()].remove(client); + + if (my_next_client == &client) { + my_next_client = nullptr; + } + my_next_client = select_next_client(my_next_client); +} + +bool thread_dispatcher::is_client_alive(thread_dispatcher_client* client) { + if (!client) { + return false; + } + + // Still cannot access internals of the client since the object itself might be destroyed. + for (auto& priority_list : my_client_list) { + for (auto& c : priority_list) { + if (client == &c) { + return true; + } + } + } + return false; +} + +thread_dispatcher_client* thread_dispatcher::client_in_need(client_list_type* clients, thread_dispatcher_client* hint) { + // TODO: make sure client with higher priority returned only if there are available slots in it. + hint = select_next_client(hint); + if (!hint) { + return nullptr; + } + + client_list_type::iterator it = hint; + unsigned curr_priority_level = hint->priority_level(); + __TBB_ASSERT(it != clients[curr_priority_level].end(), nullptr); + do { + thread_dispatcher_client& t = *it; + if (++it == clients[curr_priority_level].end()) { + do { + ++curr_priority_level %= num_priority_levels; + } while (clients[curr_priority_level].empty()); + it = clients[curr_priority_level].begin(); + } + if (t.try_join()) { + return &t; + } + } while (it != hint); + return nullptr; +} + +thread_dispatcher_client* thread_dispatcher::client_in_need(thread_dispatcher_client* prev) { + client_list_mutex_type::scoped_lock lock(my_list_mutex, /*is_writer=*/false); + if (is_client_alive(prev)) { + return client_in_need(my_client_list, prev); + } + return client_in_need(my_client_list, my_next_client); +} + +bool thread_dispatcher::is_any_client_in_need() { + client_list_mutex_type::scoped_lock lock(my_list_mutex, /*is_writer=*/false); + for (auto& priority_list : my_client_list) { + for (auto& client : priority_list) { + if (client.is_joinable()) { + return true; + } + } + } + return false; +} + +void thread_dispatcher::adjust_job_count_estimate(int delta) { + my_server->adjust_job_count_estimate(delta); +} + +void thread_dispatcher::release(bool blocking_terminate) { + my_join_workers = blocking_terminate; + my_server->request_close_connection(); +} + +void thread_dispatcher::process(job& j) { + thread_data& td = static_cast<thread_data&>(j); + // td.my_last_client can be dead. Don't access it until client_in_need is called + thread_dispatcher_client* client = td.my_last_client; + for (int i = 0; i < 2; ++i) { + while ((client = client_in_need(client)) ) { + td.my_last_client = client; + client->process(td); + } + // Workers leave thread_dispatcher because there is no client in need. It can happen earlier than + // adjust_job_count_estimate() decreases my_slack and RML can put this thread to sleep. + // It might result in a busy-loop checking for my_slack<0 and calling this method instantly. + // the yield refines this spinning. + if ( !i ) { + yield(); + } + } +} + + +//! Used when RML asks for join mode during workers termination. +bool thread_dispatcher::must_join_workers() const { return my_join_workers; } + +//! Returns the requested stack size of worker threads. +std::size_t thread_dispatcher::worker_stack_size() const { return my_stack_size; } + +void thread_dispatcher::acknowledge_close_connection() { + my_threading_control.destroy(); +} + +::rml::job* thread_dispatcher::create_one_job() { + unsigned short index = ++my_first_unused_worker_idx; + __TBB_ASSERT(index > 0, nullptr); + ITT_THREAD_SET_NAME(_T("TBB Worker Thread")); + // index serves as a hint decreasing conflicts between workers when they migrate between arenas + thread_data* td = new (cache_aligned_allocate(sizeof(thread_data))) thread_data{ index, true }; + __TBB_ASSERT(index <= my_num_workers_hard_limit, nullptr); + my_threading_control.register_thread(*td); + return td; +} + +void thread_dispatcher::cleanup(job& j) { + my_threading_control.unregister_thread(static_cast<thread_data&>(j)); + governor::auto_terminate(&j); +} + +} // namespace r1 +} // namespace detail +} // namespace tbb diff --git a/src/tbb/src/tbb/thread_dispatcher.h b/src/tbb/src/tbb/thread_dispatcher.h new file mode 100644 index 000000000..e511e2b73 --- /dev/null +++ b/src/tbb/src/tbb/thread_dispatcher.h @@ -0,0 +1,107 @@ +/* + Copyright (c) 2022-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef _TBB_thread_dispatcher_H +#define _TBB_thread_dispatcher_H + +#include "oneapi/tbb/detail/_config.h" +#include "oneapi/tbb/detail/_utils.h" +#include "oneapi/tbb/rw_mutex.h" +#include "oneapi/tbb/task_arena.h" + +#include "arena.h" +#include "governor.h" +#include "thread_data.h" +#include "rml_tbb.h" +#include "thread_dispatcher_client.h" + +namespace tbb { +namespace detail { +namespace r1 { + +class threading_control_impl; + +class thread_dispatcher : no_copy, rml::tbb_client { + using client_list_type = intrusive_list<thread_dispatcher_client>; + using client_list_mutex_type = d1::rw_mutex; +public: + thread_dispatcher(threading_control& tc, unsigned hard_limit, std::size_t stack_size); + ~thread_dispatcher(); + + thread_dispatcher_client* create_client(arena& a); + void register_client(thread_dispatcher_client* client); + bool try_unregister_client(thread_dispatcher_client* client, std::uint64_t aba_epoch, unsigned priority); + bool is_any_client_in_need(); + + void adjust_job_count_estimate(int delta); + void release(bool blocking_terminate); + void process(job& j) override; + //! Used when RML asks for join mode during workers termination. + bool must_join_workers() const; + //! Returns the requested stack size of worker threads. + std::size_t worker_stack_size() const; + +private: + version_type version () const override { return 0; } + unsigned max_job_count () const override { return my_num_workers_hard_limit; } + std::size_t min_stack_size () const override { return worker_stack_size(); } + void cleanup(job& j) override; + void acknowledge_close_connection() override; + ::rml::job* create_one_job() override; + + thread_dispatcher_client* select_next_client(thread_dispatcher_client* hint); + void destroy_client(thread_dispatcher_client* client); + void insert_client(thread_dispatcher_client& client); + void remove_client(thread_dispatcher_client& client); + bool is_client_alive(thread_dispatcher_client* client); + thread_dispatcher_client* client_in_need(client_list_type* clients, thread_dispatcher_client* hint); + thread_dispatcher_client* client_in_need(thread_dispatcher_client* prev); + + friend class threading_control_impl; + static constexpr unsigned num_priority_levels = d1::num_priority_levels; + client_list_mutex_type my_list_mutex; + client_list_type my_client_list[num_priority_levels]; + + thread_dispatcher_client* my_next_client{nullptr}; + + //! Shutdown mode + bool my_join_workers{false}; + + threading_control& my_threading_control; + + //! ABA prevention marker to assign to newly created clients + std::atomic<std::uint64_t> my_clients_aba_epoch{0}; + + //! Maximal number of workers allowed for use by the underlying resource manager + /** It can't be changed after thread_dispatcher creation. **/ + unsigned my_num_workers_hard_limit{0}; + + //! Stack size of worker threads + std::size_t my_stack_size{0}; + + //! First unused index of worker + /** Used to assign indices to the new workers coming from RML **/ + std::atomic<unsigned> my_first_unused_worker_idx{0}; + + //! Pointer to the RML server object that services this TBB instance. + rml::tbb_server* my_server{nullptr}; +}; + +} // namespace r1 +} // namespace detail +} // namespace tbb + +#endif // _TBB_thread_dispatcher_H diff --git a/src/tbb/src/tbb/thread_dispatcher_client.h b/src/tbb/src/tbb/thread_dispatcher_client.h new file mode 100644 index 000000000..f7c199cb8 --- /dev/null +++ b/src/tbb/src/tbb/thread_dispatcher_client.h @@ -0,0 +1,69 @@ +/* + Copyright (c) 2022-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef _TBB_thread_dispatcher_client_H +#define _TBB_thread_dispatcher_client_H + +#include "oneapi/tbb/detail/_intrusive_list_node.h" +#include "arena.h" + +namespace tbb { +namespace detail { +namespace r1 { + +class thread_dispatcher_client : public d1::intrusive_list_node /* Need for list in thread pool */ { +public: + thread_dispatcher_client(arena& a, std::uint64_t aba_epoch) : my_arena(a), my_aba_epoch(aba_epoch) {} + + // Interface of communication with thread pool + bool try_join() { + return my_arena.try_join(); + } + + bool is_joinable() { + return my_arena.is_joinable(); + } + + void process(thread_data& td) { + my_arena.process(td); + } + + unsigned priority_level() { + return my_arena.priority_level(); + } + + std::uint64_t get_aba_epoch() { + return my_aba_epoch; + } + + unsigned references() { + return my_arena.references(); + } + + bool has_request() { + return my_arena.has_request(); + } + +private: + arena& my_arena; + std::uint64_t my_aba_epoch; +}; + +} // namespace r1 +} // namespace detail +} // namespace tbb + +#endif // _TBB_thread_dispatcher_client_H diff --git a/src/tbb/src/tbb/thread_request_serializer.cpp b/src/tbb/src/tbb/thread_request_serializer.cpp new file mode 100644 index 000000000..41cf51b0b --- /dev/null +++ b/src/tbb/src/tbb/thread_request_serializer.cpp @@ -0,0 +1,139 @@ +/* + Copyright (c) 2022-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "misc.h" +#include "thread_request_serializer.h" + +namespace tbb { +namespace detail { +namespace r1 { + +thread_request_serializer::thread_request_serializer(thread_dispatcher& td, int soft_limit) + : my_thread_dispatcher(td) + , my_soft_limit(soft_limit) +{} + +void thread_request_serializer::update(int delta) { + constexpr std::uint64_t delta_mask = (pending_delta_base << 1) - 1; + constexpr std::uint64_t counter_value = delta_mask + 1; + + int prev_pending_delta = my_pending_delta.fetch_add(counter_value + delta); + + // There is a pseudo request aggregator, so only thread that see pending_delta_base in my_pending_delta + // Will enter to critical section and call adjust_job_count_estimate + if (prev_pending_delta == pending_delta_base) { + delta = int(my_pending_delta.exchange(pending_delta_base) & delta_mask) - int(pending_delta_base); + mutex_type::scoped_lock lock(my_mutex); + my_total_request.store(my_total_request.load(std::memory_order_relaxed) + delta, std::memory_order_relaxed); + delta = limit_delta(delta, my_soft_limit, my_total_request.load(std::memory_order_relaxed)); + my_thread_dispatcher.adjust_job_count_estimate(delta); + } +} + +void thread_request_serializer::set_active_num_workers(int soft_limit) { + mutex_type::scoped_lock lock(my_mutex); + int delta = soft_limit - my_soft_limit; + delta = limit_delta(delta, my_total_request.load(std::memory_order_relaxed), soft_limit); + my_thread_dispatcher.adjust_job_count_estimate(delta); + my_soft_limit = soft_limit; +} + +int thread_request_serializer::limit_delta(int delta, int limit, int new_value) { + // This method can be described with such pseudocode: + // bool above_limit = prev_value >= limit && new_value >= limit; + // bool below_limit = prev_value <= limit && new_value <= limit; + // enum request_type { ABOVE_LIMIT, CROSS_LIMIT, BELOW_LIMIT }; + // request = above_limit ? ABOVE_LIMIT : below_limit ? BELOW_LIMIT : CROSS_LIMIT; + + // switch (request) { + // case ABOVE_LIMIT: + // delta = 0; + // case CROSS_LIMIT: + // delta = delta > 0 ? limit - prev_value : new_value - limit; + // case BELOW_LIMIT: + // // No changes to delta + // } + + int prev_value = new_value - delta; + + // actual new_value and prev_value cannot exceed the limit + new_value = min(limit, new_value); + prev_value = min(limit, prev_value); + return new_value - prev_value; +} + + +thread_request_serializer_proxy::thread_request_serializer_proxy(thread_dispatcher& td, int soft_limit) : my_serializer(td, soft_limit) +{} + +void thread_request_serializer_proxy::register_mandatory_request(int mandatory_delta) { + if (mandatory_delta != 0) { + mutex_type::scoped_lock lock(my_mutex, /* is_write = */ false); + int prev_value = my_num_mandatory_requests.fetch_add(mandatory_delta); + + const bool should_try_enable = mandatory_delta > 0 && prev_value == 0; + const bool should_try_disable = mandatory_delta < 0 && prev_value == 1; + + if (should_try_enable) { + enable_mandatory_concurrency(lock); + } else if (should_try_disable) { + disable_mandatory_concurrency(lock); + } + } +} + +void thread_request_serializer_proxy::set_active_num_workers(int soft_limit) { + mutex_type::scoped_lock lock(my_mutex, /* is_write = */ true); + + if (soft_limit != 0) { + my_is_mandatory_concurrency_enabled = false; + } else if (my_num_mandatory_requests > 0) { + my_is_mandatory_concurrency_enabled = true; + soft_limit = 1; + } + + my_serializer.set_active_num_workers(soft_limit); +} + +int thread_request_serializer_proxy::num_workers_requested() { return my_serializer.num_workers_requested(); } + +void thread_request_serializer_proxy::update(int delta) { my_serializer.update(delta); } + +void thread_request_serializer_proxy::enable_mandatory_concurrency(mutex_type::scoped_lock& lock) { + lock.upgrade_to_writer(); + bool still_should_enable = my_num_mandatory_requests.load(std::memory_order_relaxed) > 0 && + !my_is_mandatory_concurrency_enabled && my_serializer.is_no_workers_avaliable(); + + if (still_should_enable) { + my_is_mandatory_concurrency_enabled = true; + my_serializer.set_active_num_workers(1); + } +} + +void thread_request_serializer_proxy::disable_mandatory_concurrency(mutex_type::scoped_lock& lock) { + lock.upgrade_to_writer(); + bool still_should_disable = my_num_mandatory_requests.load(std::memory_order_relaxed) <= 0 && + my_is_mandatory_concurrency_enabled && !my_serializer.is_no_workers_avaliable(); + + if (still_should_disable) { + my_is_mandatory_concurrency_enabled = false; + my_serializer.set_active_num_workers(0); + } +} + +} // r1 +} // detail +} // tbb diff --git a/src/tbb/src/tbb/thread_request_serializer.h b/src/tbb/src/tbb/thread_request_serializer.h new file mode 100644 index 000000000..9dc9799e1 --- /dev/null +++ b/src/tbb/src/tbb/thread_request_serializer.h @@ -0,0 +1,84 @@ +/* + Copyright (c) 2022-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef _TBB_thread_serializer_handlers_H +#define _TBB_thread_serializer_handlers_H + +#include "oneapi/tbb/mutex.h" +#include "oneapi/tbb/rw_mutex.h" + +#include "thread_dispatcher.h" + +namespace tbb { +namespace detail { +namespace r1 { + +class thread_request_observer { +protected: + virtual ~thread_request_observer() {} +public: + virtual void update(int delta) = 0; +}; + + +class thread_request_serializer : public thread_request_observer { + using mutex_type = d1::mutex; +public: + thread_request_serializer(thread_dispatcher& td, int soft_limit); + void set_active_num_workers(int soft_limit); + int num_workers_requested() { return my_total_request.load(std::memory_order_relaxed); } + bool is_no_workers_avaliable() { return my_soft_limit == 0; } + +private: + friend class thread_request_serializer_proxy; + void update(int delta) override; + static int limit_delta(int delta, int limit, int new_value); + + thread_dispatcher& my_thread_dispatcher; + int my_soft_limit{ 0 }; + std::atomic<int> my_total_request{ 0 }; + // my_pending_delta is set to pending_delta_base to have ability to hold negative values + // consider increase base since thead number will be bigger than 1 << 15 + static constexpr std::uint64_t pending_delta_base = 1 << 15; + std::atomic<std::uint64_t> my_pending_delta{ pending_delta_base }; + mutex_type my_mutex; +}; + +// Handles mandatory concurrency i.e. enables worker threads for enqueue tasks +class thread_request_serializer_proxy : public thread_request_observer { + using mutex_type = d1::rw_mutex; +public: + thread_request_serializer_proxy(thread_dispatcher& td, int soft_limit); + void register_mandatory_request(int mandatory_delta); + void set_active_num_workers(int soft_limit); + int num_workers_requested(); + +private: + void update(int delta) override; + void enable_mandatory_concurrency(mutex_type::scoped_lock& lock); + void disable_mandatory_concurrency(mutex_type::scoped_lock& lock); + + std::atomic<int> my_num_mandatory_requests{0}; + bool my_is_mandatory_concurrency_enabled{false}; + thread_request_serializer my_serializer; + mutex_type my_mutex; +}; + +} // namespace r1 +} // namespace detail +} // namespace tbb + +#endif // _TBB_thread_serializer_handlers_H diff --git a/src/tbb/src/tbb/threading_control.cpp b/src/tbb/src/tbb/threading_control.cpp new file mode 100644 index 000000000..7a62b337f --- /dev/null +++ b/src/tbb/src/tbb/threading_control.cpp @@ -0,0 +1,411 @@ +/* + Copyright (c) 2022-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "threading_control.h" +#include "permit_manager.h" +#include "market.h" +#include "tcm_adaptor.h" +#include "thread_dispatcher.h" +#include "governor.h" +#include "thread_dispatcher_client.h" + +namespace tbb { +namespace detail { +namespace r1 { + +// ---------------------------------------- threading_control_impl -------------------------------------------------------------- + +std::size_t global_control_active_value_unsafe(d1::global_control::parameter); + +std::pair<unsigned, unsigned> threading_control_impl::calculate_workers_limits() { + // Expecting that 4P is suitable for most applications. + // Limit to 2P for large thread number. + // TODO: ask RML for max concurrency and possibly correct hard_limit + unsigned factor = governor::default_num_threads() <= 128 ? 4 : 2; + + // The requested number of threads is intentionally not considered in + // computation of the hard limit, in order to separate responsibilities + // and avoid complicated interactions between global_control and task_scheduler_init. + // The threading control guarantees that at least 256 threads might be created. + unsigned workers_app_limit = global_control_active_value_unsafe(global_control::max_allowed_parallelism); + unsigned workers_hard_limit = max(max(factor * governor::default_num_threads(), 256u), workers_app_limit); + unsigned workers_soft_limit = calc_workers_soft_limit(workers_hard_limit); + + return std::make_pair(workers_soft_limit, workers_hard_limit); +} + +unsigned threading_control_impl::calc_workers_soft_limit(unsigned workers_hard_limit) { + unsigned workers_soft_limit{}; + unsigned soft_limit = global_control_active_value_unsafe(global_control::max_allowed_parallelism); + + // if user set no limits (yet), use default value + workers_soft_limit = soft_limit != 0 ? soft_limit - 1 : governor::default_num_threads() - 1; + + if (workers_soft_limit >= workers_hard_limit) { + workers_soft_limit = workers_hard_limit - 1; + } + + return workers_soft_limit; +} + +cache_aligned_unique_ptr<permit_manager> threading_control_impl::make_permit_manager(unsigned workers_soft_limit) { + if (tcm_adaptor::is_initialized()) { + auto tcm = make_cache_aligned_unique<tcm_adaptor>(); + if (tcm->is_connected()) { + return tcm; + } + } + return make_cache_aligned_unique<market>(workers_soft_limit); +} + +cache_aligned_unique_ptr<thread_dispatcher> threading_control_impl::make_thread_dispatcher(threading_control& tc, + unsigned workers_soft_limit, + unsigned workers_hard_limit) +{ + stack_size_type stack_size = global_control_active_value_unsafe(global_control::thread_stack_size); + + cache_aligned_unique_ptr<thread_dispatcher> td = + make_cache_aligned_unique<thread_dispatcher>(tc, workers_hard_limit, stack_size); + // This check relies on the fact that for shared RML default_concurrency == max_concurrency + if (!governor::UsePrivateRML && td->my_server->default_concurrency() < workers_soft_limit) { + runtime_warning("RML might limit the number of workers to %u while %u is requested.\n", + td->my_server->default_concurrency(), workers_soft_limit); + } + + return td; +} + +threading_control_impl::threading_control_impl(threading_control* tc) { + unsigned workers_soft_limit{}, workers_hard_limit{}; + std::tie(workers_soft_limit, workers_hard_limit) = calculate_workers_limits(); + + my_permit_manager = make_permit_manager(workers_soft_limit); + my_thread_dispatcher = make_thread_dispatcher(*tc, workers_soft_limit, workers_hard_limit); + my_thread_request_serializer = + make_cache_aligned_unique<thread_request_serializer_proxy>(*my_thread_dispatcher, workers_soft_limit); + my_permit_manager->set_thread_request_observer(*my_thread_request_serializer); + + my_cancellation_disseminator = make_cache_aligned_unique<cancellation_disseminator>(); + my_waiting_threads_monitor = make_cache_aligned_unique<thread_control_monitor>(); +} + +void threading_control_impl::release(bool blocking_terminate) { + my_thread_dispatcher->release(blocking_terminate); +} + +void threading_control_impl::set_active_num_workers(unsigned soft_limit) { + __TBB_ASSERT(soft_limit <= my_thread_dispatcher->my_num_workers_hard_limit, nullptr); + my_thread_request_serializer->set_active_num_workers(soft_limit); + my_permit_manager->set_active_num_workers(soft_limit); +} + +threading_control_client threading_control_impl::create_client(arena& a) { + pm_client* pm_client = my_permit_manager->create_client(a); + thread_dispatcher_client* td_client = my_thread_dispatcher->create_client(a); + + return threading_control_client{pm_client, td_client}; +} + +threading_control_impl::client_snapshot threading_control_impl::prepare_client_destruction(threading_control_client client) { + auto td_client = client.get_thread_dispatcher_client(); + return {td_client->get_aba_epoch(), td_client->priority_level(), td_client, client.get_pm_client()}; +} + +bool threading_control_impl::try_destroy_client(threading_control_impl::client_snapshot snapshot) { + if (my_thread_dispatcher->try_unregister_client(snapshot.my_td_client, snapshot.aba_epoch, snapshot.priority_level)) { + my_permit_manager->unregister_and_destroy_client(*snapshot.my_pm_client); + return true; + } + return false; +} + +void threading_control_impl::publish_client(threading_control_client tc_client, d1::constraints& constraints) { + my_permit_manager->register_client(tc_client.get_pm_client(), constraints); + my_thread_dispatcher->register_client(tc_client.get_thread_dispatcher_client()); +} + +void threading_control_impl::register_thread(thread_data& td) { + my_cancellation_disseminator->register_thread(td); +} +void threading_control_impl::unregister_thread(thread_data& td) { + my_cancellation_disseminator->unregister_thread(td); +} + +void threading_control_impl::propagate_task_group_state(std::atomic<uint32_t> d1::task_group_context::*mptr_state, + d1::task_group_context& src, uint32_t new_state) +{ + my_cancellation_disseminator->propagate_task_group_state(mptr_state, src, new_state); +} + +std::size_t threading_control_impl::worker_stack_size() { + return my_thread_dispatcher->worker_stack_size(); +} + +unsigned threading_control_impl::max_num_workers() { + return my_thread_dispatcher->my_num_workers_hard_limit; +} + +void threading_control_impl::adjust_demand(threading_control_client tc_client, int mandatory_delta, int workers_delta) { + auto& c = *tc_client.get_pm_client(); + my_thread_request_serializer->register_mandatory_request(mandatory_delta); + my_permit_manager->adjust_demand(c, mandatory_delta, workers_delta); +} + +bool threading_control_impl::is_any_other_client_active() { + return my_thread_request_serializer->num_workers_requested() > 0 ? my_thread_dispatcher->is_any_client_in_need() : false; +} + +thread_control_monitor& threading_control_impl::get_waiting_threads_monitor() { + return *my_waiting_threads_monitor; +} + +// ---------------------------------------- threading_control ------------------------------------------------------------------- + +// Defined in global_control.cpp +void global_control_lock(); +void global_control_unlock(); + +void threading_control::add_ref(bool is_public) { + ++my_ref_count; + if (is_public) { + my_public_ref_count++; + } +} + +bool threading_control::remove_ref(bool is_public) { + if (is_public) { + __TBB_ASSERT(g_threading_control == this, "Global threading control instance was destroyed prematurely?"); + __TBB_ASSERT(my_public_ref_count.load(std::memory_order_relaxed), nullptr); + --my_public_ref_count; + } + + bool is_last_ref = --my_ref_count == 0; + if (is_last_ref) { + __TBB_ASSERT(!my_public_ref_count.load(std::memory_order_relaxed), nullptr); + g_threading_control = nullptr; + } + + return is_last_ref; +} + +threading_control* threading_control::get_threading_control(bool is_public) { + threading_control* control = g_threading_control; + if (control) { + control->add_ref(is_public); + } + + return control; +} + +threading_control* threading_control::create_threading_control() { + // Global control should be locked before threading_control_impl + global_control_lock(); + + threading_control* thr_control{ nullptr }; + try_call([&] { + global_mutex_type::scoped_lock lock(g_threading_control_mutex); + + thr_control = get_threading_control(/*public = */ true); + if (thr_control == nullptr) { + thr_control = new (cache_aligned_allocate(sizeof(threading_control))) threading_control(/*public_ref = */ 1, /*private_ref = */ 1); + thr_control->my_pimpl = make_cache_aligned_unique<threading_control_impl>(thr_control); + + __TBB_InitOnce::add_ref(); + + if (global_control_active_value_unsafe(global_control::scheduler_handle)) { + ++thr_control->my_public_ref_count; + ++thr_control->my_ref_count; + } + + g_threading_control = thr_control; + } + }).on_exception([&] { + global_control_unlock(); + + cache_aligned_deleter deleter{}; + deleter(thr_control); + }); + + global_control_unlock(); + return thr_control; +} + +void threading_control::destroy () { + cache_aligned_deleter deleter; + deleter(this); + __TBB_InitOnce::remove_ref(); +} + +void threading_control::wait_last_reference(global_mutex_type::scoped_lock& lock) { + while (my_public_ref_count.load(std::memory_order_relaxed) == 1 && my_ref_count.load(std::memory_order_relaxed) > 1) { + lock.release(); + // To guarantee that request_close_connection() is called by the last external thread, we need to wait till all + // references are released. Re-read my_public_ref_count to limit waiting if new external threads are created. + // Theoretically, new private references to the threading control can be added during waiting making it potentially + // endless. + // TODO: revise why the weak scheduler needs threading control's pointer and try to remove this wait. + // Note that the threading control should know about its schedulers for cancellation/exception/priority propagation, + // see e.g. task_group_context::cancel_group_execution() + while (my_public_ref_count.load(std::memory_order_acquire) == 1 && my_ref_count.load(std::memory_order_acquire) > 1) { + yield(); + } + lock.acquire(g_threading_control_mutex); + } +} + +bool threading_control::release(bool is_public, bool blocking_terminate) { + bool do_release = false; + { + global_mutex_type::scoped_lock lock(g_threading_control_mutex); + if (blocking_terminate) { + __TBB_ASSERT(is_public, "Only an object with a public reference can request the blocking terminate"); + wait_last_reference(lock); + } + do_release = remove_ref(is_public); + } + + if (do_release) { + __TBB_ASSERT(!my_public_ref_count.load(std::memory_order_relaxed), "No public references must remain if we remove the threading control."); + // inform RML that blocking termination is required + my_pimpl->release(blocking_terminate); + return blocking_terminate; + } + return false; +} + +threading_control::threading_control(unsigned public_ref, unsigned ref) : my_public_ref_count(public_ref), my_ref_count(ref) +{} + +threading_control* threading_control::register_public_reference() { + threading_control* control{nullptr}; + global_mutex_type::scoped_lock lock(g_threading_control_mutex); + control = get_threading_control(/*public = */ true); + if (!control) { + // We are going to create threading_control_impl, we should acquire mutexes in right order + lock.release(); + control = create_threading_control(); + } + + return control; +} + +bool threading_control::unregister_public_reference(bool blocking_terminate) { + __TBB_ASSERT(g_threading_control, "Threading control should exist until last public reference"); + __TBB_ASSERT(g_threading_control->my_public_ref_count.load(std::memory_order_relaxed), nullptr); + return g_threading_control->release(/*public = */ true, /*blocking_terminate = */ blocking_terminate); +} + +threading_control_client threading_control::create_client(arena& a) { + { + global_mutex_type::scoped_lock lock(g_threading_control_mutex); + add_ref(/*public = */ false); + } + + return my_pimpl->create_client(a); +} + +void threading_control::publish_client(threading_control_client client, d1::constraints& constraints) { + return my_pimpl->publish_client(client, constraints); +} + +threading_control::client_snapshot threading_control::prepare_client_destruction(threading_control_client client) { + return my_pimpl->prepare_client_destruction(client); +} + +bool threading_control::try_destroy_client(threading_control::client_snapshot deleter) { + bool res = my_pimpl->try_destroy_client(deleter); + if (res) { + release(/*public = */ false, /*blocking_terminate = */ false); + } + return res; +} + +void threading_control::set_active_num_workers(unsigned soft_limit) { + threading_control* thr_control{nullptr}; + { + global_mutex_type::scoped_lock lock(g_threading_control_mutex); + thr_control = get_threading_control(/*public = */ false); + } + + if (thr_control != nullptr) { + thr_control->my_pimpl->set_active_num_workers(soft_limit); + thr_control->release(/*is_public=*/false, /*blocking_terminate=*/false); + } +} + +bool threading_control::is_present() { + global_mutex_type::scoped_lock lock(g_threading_control_mutex); + return g_threading_control != nullptr; +} + +bool threading_control::register_lifetime_control() { + global_mutex_type::scoped_lock lock(g_threading_control_mutex); + return get_threading_control(/*public = */ true) != nullptr; +} + +bool threading_control::unregister_lifetime_control(bool blocking_terminate) { + threading_control* thr_control{nullptr}; + { + global_mutex_type::scoped_lock lock(g_threading_control_mutex); + thr_control = g_threading_control; + } + + bool released{true}; + if (thr_control) { + released = thr_control->release(/*public = */ true, /*blocking_terminate = */ blocking_terminate); + } + + return released; +} + +void threading_control::register_thread(thread_data& td) { + my_pimpl->register_thread(td); +} + +void threading_control::unregister_thread(thread_data& td) { + my_pimpl->unregister_thread(td); +} + +void threading_control::propagate_task_group_state(std::atomic<uint32_t> d1::task_group_context::*mptr_state, + d1::task_group_context& src, uint32_t new_state) +{ + my_pimpl->propagate_task_group_state(mptr_state, src, new_state); +} + +std::size_t threading_control::worker_stack_size() { + return my_pimpl->worker_stack_size(); +} + +unsigned threading_control::max_num_workers() { + global_mutex_type::scoped_lock lock(g_threading_control_mutex); + return g_threading_control ? g_threading_control->my_pimpl->max_num_workers() : 0; +} + +void threading_control::adjust_demand(threading_control_client client, int mandatory_delta, int workers_delta) { + my_pimpl->adjust_demand(client, mandatory_delta, workers_delta); +} + +bool threading_control::is_any_other_client_active() { + return my_pimpl->is_any_other_client_active(); +} + +thread_control_monitor& threading_control::get_waiting_threads_monitor() { + return my_pimpl->get_waiting_threads_monitor(); +} + +} // r1 +} // detail +} // tbb diff --git a/src/tbb/src/tbb/threading_control.h b/src/tbb/src/tbb/threading_control.h new file mode 100644 index 000000000..7381b2978 --- /dev/null +++ b/src/tbb/src/tbb/threading_control.h @@ -0,0 +1,154 @@ +/* + Copyright (c) 2022-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef _TBB_threading_control_H +#define _TBB_threading_control_H + +#include "oneapi/tbb/mutex.h" +#include "oneapi/tbb/global_control.h" + +#include "threading_control_client.h" +#include "intrusive_list.h" +#include "main.h" +#include "permit_manager.h" +#include "pm_client.h" +#include "thread_dispatcher.h" +#include "cancellation_disseminator.h" +#include "thread_request_serializer.h" +#include "scheduler_common.h" + +namespace tbb { +namespace detail { +namespace r1 { + +class arena; +class thread_data; + +class threading_control; + +class threading_control_impl { +public: + threading_control_impl(threading_control*); + +public: + void release(bool blocking_terminate); + + threading_control_client create_client(arena& a); + void publish_client(threading_control_client client, d1::constraints& constraints); + + struct client_snapshot { + std::uint64_t aba_epoch; + unsigned priority_level; + thread_dispatcher_client* my_td_client; + pm_client* my_pm_client; + }; + + client_snapshot prepare_client_destruction(threading_control_client client); + bool try_destroy_client(client_snapshot deleter); + + void register_thread(thread_data& td); + void unregister_thread(thread_data& td); + void propagate_task_group_state(std::atomic<uint32_t> d1::task_group_context::*mptr_state, + d1::task_group_context& src, uint32_t new_state); + + void set_active_num_workers(unsigned soft_limit); + std::size_t worker_stack_size(); + unsigned max_num_workers(); + + void adjust_demand(threading_control_client, int mandatory_delta, int workers_delta); + bool is_any_other_client_active(); + + thread_control_monitor& get_waiting_threads_monitor(); + +private: + static unsigned calc_workers_soft_limit(unsigned workers_hard_limit); + static std::pair<unsigned, unsigned> calculate_workers_limits(); + static cache_aligned_unique_ptr<permit_manager> make_permit_manager(unsigned workers_soft_limit); + static cache_aligned_unique_ptr<thread_dispatcher> make_thread_dispatcher(threading_control& control, + unsigned workers_soft_limit, + unsigned workers_hard_limit); + + // TODO: Consider allocation one chunk of memory and construct objects on it + cache_aligned_unique_ptr<permit_manager> my_permit_manager{nullptr}; + cache_aligned_unique_ptr<thread_dispatcher> my_thread_dispatcher{nullptr}; + cache_aligned_unique_ptr<thread_request_serializer_proxy> my_thread_request_serializer{nullptr}; + cache_aligned_unique_ptr<cancellation_disseminator> my_cancellation_disseminator{nullptr}; + cache_aligned_unique_ptr<thread_control_monitor> my_waiting_threads_monitor{nullptr}; +}; + + +class threading_control { + using global_mutex_type = d1::mutex; +public: + using client_snapshot = threading_control_impl::client_snapshot; + + static threading_control* register_public_reference(); + static bool unregister_public_reference(bool blocking_terminate); + + static bool is_present(); + static void set_active_num_workers(unsigned soft_limit); + static bool register_lifetime_control(); + static bool unregister_lifetime_control(bool blocking_terminate); + + threading_control_client create_client(arena& a); + void publish_client(threading_control_client client, d1::constraints& constraints); + client_snapshot prepare_client_destruction(threading_control_client client); + bool try_destroy_client(client_snapshot deleter); + + void register_thread(thread_data& td); + void unregister_thread(thread_data& td); + void propagate_task_group_state(std::atomic<uint32_t> d1::task_group_context::*mptr_state, + d1::task_group_context& src, uint32_t new_state); + + std::size_t worker_stack_size(); + static unsigned max_num_workers(); + + void adjust_demand(threading_control_client client, int mandatory_delta, int workers_delta); + bool is_any_other_client_active(); + + thread_control_monitor& get_waiting_threads_monitor(); + +private: + threading_control(unsigned public_ref, unsigned ref); + void add_ref(bool is_public); + bool remove_ref(bool is_public); + + static threading_control* get_threading_control(bool is_public); + static threading_control* create_threading_control(); + + bool release(bool is_public, bool blocking_terminate); + void wait_last_reference(global_mutex_type::scoped_lock& lock); + void destroy(); + + friend class thread_dispatcher; + + static threading_control* g_threading_control; + //! Mutex guarding creation/destruction of g_threading_control, insertions/deletions in my_arenas, and cancellation propagation + static global_mutex_type g_threading_control_mutex; + + cache_aligned_unique_ptr<threading_control_impl> my_pimpl{nullptr}; + //! Count of external threads attached + std::atomic<unsigned> my_public_ref_count{0}; + //! Reference count controlling threading_control object lifetime + std::atomic<unsigned> my_ref_count{0}; +}; + +} // r1 +} // detail +} // tbb + + +#endif // _TBB_threading_control_H diff --git a/src/tbb/src/tbb/threading_control_client.h b/src/tbb/src/tbb/threading_control_client.h new file mode 100644 index 000000000..4ff9359cf --- /dev/null +++ b/src/tbb/src/tbb/threading_control_client.h @@ -0,0 +1,58 @@ +/* + Copyright (c) 2022-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef _TBB_threading_control_client_H +#define _TBB_threading_control_client_H + +#include "oneapi/tbb/detail/_assert.h" + +namespace tbb { +namespace detail { +namespace r1 { + +class pm_client; +class thread_dispatcher_client; + +class threading_control_client { +public: + threading_control_client() = default; + threading_control_client(const threading_control_client&) = default; + threading_control_client& operator=(const threading_control_client&) = default; + + threading_control_client(pm_client* p, thread_dispatcher_client* t) : my_pm_client(p), my_thread_dispatcher_client(t) { + __TBB_ASSERT(my_pm_client, nullptr); + __TBB_ASSERT(my_thread_dispatcher_client, nullptr); + } + + pm_client* get_pm_client() { + return my_pm_client; + } + + thread_dispatcher_client* get_thread_dispatcher_client() { + return my_thread_dispatcher_client; + } + +private: + pm_client* my_pm_client{nullptr}; + thread_dispatcher_client* my_thread_dispatcher_client{nullptr}; +}; + + +} +} +} + +#endif // _TBB_threading_control_client_H diff --git a/src/tbb/src/tbb/tls.h b/src/tbb/src/tbb/tls.h index 23ec1e751..e87a943aa 100644 --- a/src/tbb/src/tbb/tls.h +++ b/src/tbb/src/tbb/tls.h @@ -1,51 +1,49 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2022 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef _TBB_tls_H #define _TBB_tls_H -#if USE_PTHREAD +#include "oneapi/tbb/detail/_config.h" + +#if __TBB_USE_POSIX #include <pthread.h> -#else /* assume USE_WINTHREAD */ -#include "tbb/machine/windows_api.h" +#else /* assume __TBB_USE_WINAPI */ +#include <windows.h> #endif namespace tbb { - -namespace internal { +namespace detail { +namespace r1 { typedef void (*tls_dtor_t)(void*); //! Basic cross-platform wrapper class for TLS operations. template <typename T> class basic_tls { -#if USE_PTHREAD +#if __TBB_USE_POSIX typedef pthread_key_t tls_key_t; public: - int create( tls_dtor_t dtor = NULL ) { + int create( tls_dtor_t dtor = nullptr ) { return pthread_key_create(&my_key, dtor); } int destroy() { return pthread_key_delete(my_key); } void set( T value ) { pthread_setspecific(my_key, (void*)value); } T get() { return (T)pthread_getspecific(my_key); } -#else /* USE_WINTHREAD */ +#else /* __TBB_USE_WINAPI */ typedef DWORD tls_key_t; public: #if !__TBB_WIN8UI_SUPPORT @@ -61,7 +59,7 @@ class basic_tls { T get() { return (T)TlsGetValue(my_key); } #else /*!__TBB_WIN8UI_SUPPORT*/ int create() { - tls_key_t tmp = FlsAlloc(NULL); + tls_key_t tmp = FlsAlloc(nullptr); if( tmp== (DWORD)0xFFFFFFFF ) return (DWORD)0xFFFFFFFF; my_key = tmp; @@ -71,54 +69,13 @@ class basic_tls { void set( T value ) { FlsSetValue(my_key, (LPVOID)value); } T get() { return (T)FlsGetValue(my_key); } #endif /* !__TBB_WIN8UI_SUPPORT */ -#endif /* USE_WINTHREAD */ +#endif /* __TBB_USE_WINAPI */ private: tls_key_t my_key; }; -//! More advanced TLS support template class. -/** It supports RAII and to some extent mimic __declspec(thread) variables. */ -template <typename T> -class tls : public basic_tls<T> { - typedef basic_tls<T> base; -public: - tls() { base::create(); } - ~tls() { base::destroy(); } - T operator=(T value) { base::set(value); return value; } - operator T() { return base::get(); } -}; - -template <typename T> -class tls<T*> : basic_tls<T*> { - typedef basic_tls<T*> base; - static void internal_dtor(void* ptr) { - if (ptr) delete (T*)ptr; - } - T* internal_get() { - T* result = base::get(); - if (!result) { - result = new T; - base::set(result); - } - return result; - } -public: - tls() { -#if USE_PTHREAD - base::create( internal_dtor ); -#else - base::create(); -#endif - } - ~tls() { base::destroy(); } - T* operator=(T* value) { base::set(value); return value; } - operator T*() { return internal_get(); } - T* operator->() { return internal_get(); } - T& operator*() { return *internal_get(); } -}; - -} // namespace internal - +} // namespace r1 +} // namespace detail } // namespace tbb #endif /* _TBB_tls_H */ diff --git a/src/tbb/src/tbb/tools_api/disable_warnings.h b/src/tbb/src/tbb/tools_api/disable_warnings.h index 896d2b246..c5999a158 100644 --- a/src/tbb/src/tbb/tools_api/disable_warnings.h +++ b/src/tbb/src/tbb/tools_api/disable_warnings.h @@ -1,39 +1,39 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #include "ittnotify_config.h" #if ITT_PLATFORM==ITT_PLATFORM_WIN -#pragma warning (disable: 593) /* parameter "XXXX" was set but never used */ -#pragma warning (disable: 344) /* typedef name has already been declared (with same type) */ -#pragma warning (disable: 174) /* expression has no effect */ -#pragma warning (disable: 4127) /* conditional expression is constant */ -#pragma warning (disable: 4306) /* conversion from '?' to '?' of greater size */ +#if defined _MSC_VER + +// #pragma warning (disable: 593) /* parameter "XXXX" was set but never used */ +// #pragma warning (disable: 344) /* typedef name has already been declared (with same type) */ +// #pragma warning (disable: 174) /* expression has no effect */ +// #pragma warning (disable: 4127) /* conditional expression is constant */ +// #pragma warning (disable: 4306) /* conversion from '?' to '?' of greater size */ + +#endif #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if defined __INTEL_COMPILER -#pragma warning (disable: 869) /* parameter "XXXXX" was never referenced */ -#pragma warning (disable: 1418) /* external function definition with no prior declaration */ -#pragma warning (disable: 1419) /* external declaration in primary source file */ +// #pragma warning (disable: 869) /* parameter "XXXXX" was never referenced */ +// #pragma warning (disable: 1418) /* external function definition with no prior declaration */ +// #pragma warning (disable: 1419) /* external declaration in primary source file */ #endif /* __INTEL_COMPILER */ diff --git a/src/tbb/src/tbb/tools_api/internal/ittnotify.h b/src/tbb/src/tbb/tools_api/internal/ittnotify.h deleted file mode 100644 index 89d3bb372..000000000 --- a/src/tbb/src/tbb/tools_api/internal/ittnotify.h +++ /dev/null @@ -1,210 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef _INTERNAL_ITTNOTIFY_H_ -#define _INTERNAL_ITTNOTIFY_H_ - -/** - * @file - * @brief Internal User API functions and types - */ - -/** @cond exclude_from_documentation */ -#ifndef ITT_OS_WIN -# define ITT_OS_WIN 1 -#endif /* ITT_OS_WIN */ - -#ifndef ITT_OS_LINUX -# define ITT_OS_LINUX 2 -#endif /* ITT_OS_LINUX */ - -#ifndef ITT_OS_MAC -# define ITT_OS_MAC 3 -#endif /* ITT_OS_MAC */ - -#ifndef ITT_OS -# if defined WIN32 || defined _WIN32 -# define ITT_OS ITT_OS_WIN -# elif defined( __APPLE__ ) && defined( __MACH__ ) -# define ITT_OS ITT_OS_MAC -# else -# define ITT_OS ITT_OS_LINUX -# endif -#endif /* ITT_OS */ - -#ifndef ITT_PLATFORM_WIN -# define ITT_PLATFORM_WIN 1 -#endif /* ITT_PLATFORM_WIN */ - -#ifndef ITT_PLATFORM_POSIX -# define ITT_PLATFORM_POSIX 2 -#endif /* ITT_PLATFORM_POSIX */ - -#ifndef ITT_PLATFORM_MAC -# define ITT_PLATFORM_MAC 3 -#endif /* ITT_PLATFORM_MAC */ - -#ifndef ITT_PLATFORM -# if ITT_OS==ITT_OS_WIN -# define ITT_PLATFORM ITT_PLATFORM_WIN -# elif ITT_OS==ITT_OS_MAC -# define ITT_PLATFORM ITT_PLATFORM_MAC -# else -# define ITT_PLATFORM ITT_PLATFORM_POSIX -# endif -#endif /* ITT_PLATFORM */ - -#if defined(_UNICODE) && !defined(UNICODE) -#define UNICODE -#endif - -#include <stddef.h> -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#include <tchar.h> -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#include <stdint.h> -#if defined(UNICODE) || defined(_UNICODE) -#include <wchar.h> -#endif /* UNICODE || _UNICODE */ -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -#ifndef CDECL -# if ITT_PLATFORM==ITT_PLATFORM_WIN -# define CDECL __cdecl -# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -# if defined _M_IX86 || defined __i386__ -# define CDECL __attribute__ ((cdecl)) -# else /* _M_IX86 || __i386__ */ -# define CDECL /* actual only on x86 platform */ -# endif /* _M_IX86 || __i386__ */ -# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* CDECL */ - -#ifndef STDCALL -# if ITT_PLATFORM==ITT_PLATFORM_WIN -# define STDCALL __stdcall -# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -# if defined _M_IX86 || defined __i386__ -# define STDCALL __attribute__ ((stdcall)) -# else /* _M_IX86 || __i386__ */ -# define STDCALL /* supported only on x86 platform */ -# endif /* _M_IX86 || __i386__ */ -# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* STDCALL */ - -#define ITTAPI CDECL -#define LIBITTAPI CDECL - -/* TODO: Temporary for compatibility! */ -#define ITTAPI_CALL CDECL -#define LIBITTAPI_CALL CDECL - -#if ITT_PLATFORM==ITT_PLATFORM_WIN -/* use __forceinline (VC++ specific) */ -#define ITT_INLINE __forceinline -#define ITT_INLINE_ATTRIBUTE /* nothing */ -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -/* - * Generally, functions are not inlined unless optimization is specified. - * For functions declared inline, this attribute inlines the function even - * if no optimization level was specified. - */ -#ifdef __STRICT_ANSI__ -#define ITT_INLINE static -#else /* __STRICT_ANSI__ */ -#define ITT_INLINE static inline -#endif /* __STRICT_ANSI__ */ -#define ITT_INLINE_ATTRIBUTE __attribute__ ((always_inline, unused)) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -/** @endcond */ - -/** @cond exclude_from_documentation */ -/* Helper macro for joining tokens */ -#define ITT_JOIN_AUX(p,n) p##n -#define ITT_JOIN(p,n) ITT_JOIN_AUX(p,n) - -#ifdef ITT_MAJOR -#undef ITT_MAJOR -#endif -#ifdef ITT_MINOR -#undef ITT_MINOR -#endif -#define ITT_MAJOR 3 -#define ITT_MINOR 0 - -/* Standard versioning of a token with major and minor version numbers */ -#define ITT_VERSIONIZE(x) \ - ITT_JOIN(x, \ - ITT_JOIN(_, \ - ITT_JOIN(ITT_MAJOR, \ - ITT_JOIN(_, ITT_MINOR)))) - -#ifndef INTEL_ITTNOTIFY_PREFIX -# define INTEL_ITTNOTIFY_PREFIX __itt_ -#endif /* INTEL_ITTNOTIFY_PREFIX */ -#ifndef INTEL_ITTNOTIFY_POSTFIX -# define INTEL_ITTNOTIFY_POSTFIX _ptr_ -#endif /* INTEL_ITTNOTIFY_POSTFIX */ - -#define ITTNOTIFY_NAME_AUX(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n) -#define ITTNOTIFY_NAME(n) ITT_VERSIONIZE(ITTNOTIFY_NAME_AUX(ITT_JOIN(n,INTEL_ITTNOTIFY_POSTFIX))) - -#define ITTNOTIFY_VOID(n) (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n) -#define ITTNOTIFY_DATA(n) (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n) - -#define ITTNOTIFY_VOID_D0(n,d) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d) -#define ITTNOTIFY_VOID_D1(n,d,x) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x) -#define ITTNOTIFY_VOID_D2(n,d,x,y) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y) -#define ITTNOTIFY_VOID_D3(n,d,x,y,z) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z) -#define ITTNOTIFY_VOID_D4(n,d,x,y,z,a) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a) -#define ITTNOTIFY_VOID_D5(n,d,x,y,z,a,b) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b) -#define ITTNOTIFY_VOID_D6(n,d,x,y,z,a,b,c) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c) -#define ITTNOTIFY_DATA_D0(n,d) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d) -#define ITTNOTIFY_DATA_D1(n,d,x) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x) -#define ITTNOTIFY_DATA_D2(n,d,x,y) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y) -#define ITTNOTIFY_DATA_D3(n,d,x,y,z) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z) -#define ITTNOTIFY_DATA_D4(n,d,x,y,z,a) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a) -#define ITTNOTIFY_DATA_D5(n,d,x,y,z,a,b) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b) -#define ITTNOTIFY_DATA_D6(n,d,x,y,z,a,b,c) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c) - -#ifdef ITT_STUB -#undef ITT_STUB -#endif -#ifdef ITT_STUBV -#undef ITT_STUBV -#endif -#define ITT_STUBV(api,type,name,args) \ - typedef type (api* ITT_JOIN(ITTNOTIFY_NAME(name),_t)) args; \ - extern ITT_JOIN(ITTNOTIFY_NAME(name),_t) ITTNOTIFY_NAME(name); -#define ITT_STUB ITT_STUBV -/** @endcond */ - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -#define INTEL_ITTNOTIFY_API_PRIVATE -#include "../ittnotify.h" - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif /* _INTERNAL_ITTNOTIFY_H_ */ diff --git a/src/tbb/src/tbb/tools_api/ittnotify.h b/src/tbb/src/tbb/tools_api/ittnotify.h index bf9606d0a..eb1571dc8 100644 --- a/src/tbb/src/tbb/tools_api/ittnotify.h +++ b/src/tbb/src/tbb/tools_api/ittnotify.h @@ -1,21 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef _ITTNOTIFY_H_ @@ -26,11 +22,11 @@ @brief Public User API functions and types @mainpage -The ITT API is used to annotate a user's program with additional information +The Instrumentation and Tracing Technology API (ITT API) is used to +annotate a user's program with additional information that can be used by correctness and performance tools. The user inserts calls in their program. Those calls generate information that is collected -at runtime, and used by tools such as Intel(R) Parallel Amplifier and -Intel(R) Parallel Inspector. +at runtime, and used by Intel(R) Threading Tools. @section API Concepts The following general concepts are used throughout the API. @@ -101,11 +97,24 @@ The same ID may not be reused for different instances, unless a previous # define ITT_OS_MAC 3 #endif /* ITT_OS_MAC */ +#ifndef ITT_OS_FREEBSD +# define ITT_OS_FREEBSD 4 +#endif /* ITT_OS_FREEBSD */ + +#ifndef ITT_OS_OPENBSD +# define ITT_OS_OPENBSD 5 +#endif /* ITT_OS_OPENBSD */ + + #ifndef ITT_OS # if defined WIN32 || defined _WIN32 # define ITT_OS ITT_OS_WIN # elif defined( __APPLE__ ) && defined( __MACH__ ) # define ITT_OS ITT_OS_MAC +# elif defined( __FreeBSD__ ) +# define ITT_OS ITT_OS_FREEBSD +# elif defined( __OpenBSD__ ) +# define ITT_OS ITT_OS_OPENBSD # else # define ITT_OS ITT_OS_LINUX # endif @@ -123,11 +132,23 @@ The same ID may not be reused for different instances, unless a previous # define ITT_PLATFORM_MAC 3 #endif /* ITT_PLATFORM_MAC */ +#ifndef ITT_PLATFORM_FREEBSD +# define ITT_PLATFORM_FREEBSD 4 +#endif /* ITT_PLATFORM_FREEBSD */ + +#ifndef ITT_PLATFORM_OPENBSD +# define ITT_PLATFORM_OPENBSD 5 +#endif /* ITT_PLATFORM_OPENBSD */ + #ifndef ITT_PLATFORM # if ITT_OS==ITT_OS_WIN # define ITT_PLATFORM ITT_PLATFORM_WIN # elif ITT_OS==ITT_OS_MAC # define ITT_PLATFORM ITT_PLATFORM_MAC +# elif ITT_OS==ITT_OS_FREEBSD +# define ITT_PLATFORM ITT_PLATFORM_FREEBSD +# elif ITT_OS==ITT_OS_OPENBSD +# define ITT_PLATFORM ITT_PLATFORM_OPENBSD # else # define ITT_PLATFORM ITT_PLATFORM_POSIX # endif @@ -147,40 +168,45 @@ The same ID may not be reused for different instances, unless a previous #endif /* UNICODE || _UNICODE */ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#ifndef CDECL +#ifndef ITTAPI_CDECL # if ITT_PLATFORM==ITT_PLATFORM_WIN -# define CDECL __cdecl +# define ITTAPI_CDECL __cdecl # else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -# if defined _M_IX86 || defined __i386__ -# define CDECL __attribute__ ((cdecl)) +# if defined _M_IX86 || defined __i386__ +# define ITTAPI_CDECL __attribute__ ((cdecl)) # else /* _M_IX86 || __i386__ */ -# define CDECL /* actual only on x86 platform */ +# define ITTAPI_CDECL /* actual only on x86 platform */ # endif /* _M_IX86 || __i386__ */ # endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* CDECL */ +#endif /* ITTAPI_CDECL */ #ifndef STDCALL # if ITT_PLATFORM==ITT_PLATFORM_WIN # define STDCALL __stdcall # else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ # if defined _M_IX86 || defined __i386__ -# define STDCALL __attribute__ ((stdcall)) +# define STDCALL __attribute__ ((stdcall)) # else /* _M_IX86 || __i386__ */ # define STDCALL /* supported only on x86 platform */ # endif /* _M_IX86 || __i386__ */ # endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* STDCALL */ -#define ITTAPI CDECL -#define LIBITTAPI CDECL +#define ITTAPI ITTAPI_CDECL +#define LIBITTAPI ITTAPI_CDECL /* TODO: Temporary for compatibility! */ -#define ITTAPI_CALL CDECL -#define LIBITTAPI_CALL CDECL +#define ITTAPI_CALL ITTAPI_CDECL +#define LIBITTAPI_CALL ITTAPI_CDECL #if ITT_PLATFORM==ITT_PLATFORM_WIN /* use __forceinline (VC++ specific) */ -#define ITT_INLINE __forceinline +#if defined(__MINGW32__) && !defined(__cplusplus) +#define ITT_INLINE static __inline__ __attribute__((__always_inline__,__gnu_inline__)) +#else +#define ITT_INLINE static __forceinline +#endif /* __MINGW32__ */ + #define ITT_INLINE_ATTRIBUTE /* nothing */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /* @@ -189,11 +215,12 @@ The same ID may not be reused for different instances, unless a previous * if no optimization level was specified. */ #ifdef __STRICT_ANSI__ -#define ITT_INLINE static inline +#define ITT_INLINE static +#define ITT_INLINE_ATTRIBUTE __attribute__((unused)) #else /* __STRICT_ANSI__ */ #define ITT_INLINE static inline +#define ITT_INLINE_ATTRIBUTE __attribute__((always_inline, unused)) #endif /* __STRICT_ANSI__ */ -#define ITT_INLINE_ATTRIBUTE __attribute__ ((always_inline, unused)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /** @endcond */ @@ -201,8 +228,7 @@ The same ID may not be reused for different instances, unless a previous # if ITT_PLATFORM==ITT_PLATFORM_WIN # pragma message("WARNING!!! Deprecated API is used. Please undefine INTEL_ITTNOTIFY_ENABLE_LEGACY macro") # else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -// #warning usage leads to ICC's compilation error -// # warning "Deprecated API is used. Please undefine INTEL_ITTNOTIFY_ENABLE_LEGACY macro" +# warning "Deprecated API is used. Please undefine INTEL_ITTNOTIFY_ENABLE_LEGACY macro" # endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ # include "legacy/ittnotify.h" #endif /* INTEL_ITTNOTIFY_ENABLE_LEGACY */ @@ -241,20 +267,20 @@ The same ID may not be reused for different instances, unless a previous #define ITTNOTIFY_VOID(n) (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n) #define ITTNOTIFY_DATA(n) (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n) -#define ITTNOTIFY_VOID_D0(n,d) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d) -#define ITTNOTIFY_VOID_D1(n,d,x) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x) -#define ITTNOTIFY_VOID_D2(n,d,x,y) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y) -#define ITTNOTIFY_VOID_D3(n,d,x,y,z) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z) -#define ITTNOTIFY_VOID_D4(n,d,x,y,z,a) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a) -#define ITTNOTIFY_VOID_D5(n,d,x,y,z,a,b) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b) -#define ITTNOTIFY_VOID_D6(n,d,x,y,z,a,b,c) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c) -#define ITTNOTIFY_DATA_D0(n,d) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d) -#define ITTNOTIFY_DATA_D1(n,d,x) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x) -#define ITTNOTIFY_DATA_D2(n,d,x,y) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y) -#define ITTNOTIFY_DATA_D3(n,d,x,y,z) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z) -#define ITTNOTIFY_DATA_D4(n,d,x,y,z,a) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a) -#define ITTNOTIFY_DATA_D5(n,d,x,y,z,a,b) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b) -#define ITTNOTIFY_DATA_D6(n,d,x,y,z,a,b,c) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c) +#define ITTNOTIFY_VOID_D0(n,d) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d) +#define ITTNOTIFY_VOID_D1(n,d,x) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x) +#define ITTNOTIFY_VOID_D2(n,d,x,y) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y) +#define ITTNOTIFY_VOID_D3(n,d,x,y,z) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z) +#define ITTNOTIFY_VOID_D4(n,d,x,y,z,a) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a) +#define ITTNOTIFY_VOID_D5(n,d,x,y,z,a,b) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b) +#define ITTNOTIFY_VOID_D6(n,d,x,y,z,a,b,c) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c) +#define ITTNOTIFY_DATA_D0(n,d) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d) +#define ITTNOTIFY_DATA_D1(n,d,x) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x) +#define ITTNOTIFY_DATA_D2(n,d,x,y) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y) +#define ITTNOTIFY_DATA_D3(n,d,x,y,z) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z) +#define ITTNOTIFY_DATA_D4(n,d,x,y,z,a) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a) +#define ITTNOTIFY_DATA_D5(n,d,x,y,z,a,b) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b) +#define ITTNOTIFY_DATA_D6(n,d,x,y,z,a,b,c) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c) #ifdef ITT_STUB #undef ITT_STUB @@ -292,7 +318,7 @@ extern "C" { * only pauses tracing and analyzing memory access. * It does not pause tracing or analyzing threading APIs. * . - * - Intel(R) Parallel Amplifier and Intel(R) VTune(TM) Amplifier XE: + * Intel(R) VTune(TM) Profiler: * - Does continue to record when new threads are started. * . * - Other effects: @@ -304,30 +330,146 @@ extern "C" { void ITTAPI __itt_pause(void); /** @brief Resume collection */ void ITTAPI __itt_resume(void); +/** @brief Detach collection */ +void ITTAPI __itt_detach(void); +/** + * @enum __itt_collection_scope + * @brief Enumerator for collection scopes + */ +typedef enum { + __itt_collection_scope_host = 1 << 0, + __itt_collection_scope_offload = 1 << 1, + __itt_collection_scope_all = 0x7FFFFFFF +} __itt_collection_scope; + +/** @brief Pause scoped collection */ +void ITTAPI __itt_pause_scoped(__itt_collection_scope); +/** @brief Resume scoped collection */ +void ITTAPI __itt_resume_scoped(__itt_collection_scope); + /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, pause, (void)) -ITT_STUBV(ITTAPI, void, resume, (void)) -#define __itt_pause ITTNOTIFY_VOID(pause) -#define __itt_pause_ptr ITTNOTIFY_NAME(pause) -#define __itt_resume ITTNOTIFY_VOID(resume) -#define __itt_resume_ptr ITTNOTIFY_NAME(resume) +ITT_STUBV(ITTAPI, void, pause, (void)) +ITT_STUBV(ITTAPI, void, pause_scoped, (__itt_collection_scope)) +ITT_STUBV(ITTAPI, void, resume, (void)) +ITT_STUBV(ITTAPI, void, resume_scoped, (__itt_collection_scope)) +ITT_STUBV(ITTAPI, void, detach, (void)) +#define __itt_pause ITTNOTIFY_VOID(pause) +#define __itt_pause_ptr ITTNOTIFY_NAME(pause) +#define __itt_pause_scoped ITTNOTIFY_VOID(pause_scoped) +#define __itt_pause_scoped_ptr ITTNOTIFY_NAME(pause_scoped) +#define __itt_resume ITTNOTIFY_VOID(resume) +#define __itt_resume_ptr ITTNOTIFY_NAME(resume) +#define __itt_resume_scoped ITTNOTIFY_VOID(resume_scoped) +#define __itt_resume_scoped_ptr ITTNOTIFY_NAME(resume_scoped) +#define __itt_detach ITTNOTIFY_VOID(detach) +#define __itt_detach_ptr ITTNOTIFY_NAME(detach) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_pause() -#define __itt_pause_ptr 0 +#define __itt_pause_ptr 0 +#define __itt_pause_scoped(scope) +#define __itt_pause_scoped_ptr 0 #define __itt_resume() -#define __itt_resume_ptr 0 +#define __itt_resume_ptr 0 +#define __itt_resume_scoped(scope) +#define __itt_resume_scoped_ptr 0 +#define __itt_detach() +#define __itt_detach_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ -#define __itt_pause_ptr 0 -#define __itt_resume_ptr 0 +#define __itt_pause_ptr 0 +#define __itt_pause_scoped_ptr 0 +#define __itt_resume_ptr 0 +#define __itt_resume_scoped_ptr 0 +#define __itt_detach_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} control group */ /** @endcond */ +/** + * @defgroup Intel Processor Trace control + * API from this group provides control over collection and analysis of Intel Processor Trace (Intel PT) data + * Information about Intel Processor Trace technology can be found here (Volume 3 chapter 35): + * https://github.com/tpn/pdfs/blob/master/Intel%2064%20and%20IA-32%20Architectures%20Software%20Developer's%20Manual%20-%20Combined%20Volumes%201-4%20-%20May%202018%20(325462-sdm-vol-1-2abcd-3abcd).pdf + * Use this API to mark particular code regions for loading detailed performance statistics. + * This mode makes your analysis faster and more accurate. + * @{ +*/ +typedef unsigned char __itt_pt_region; + +/** + * @brief function saves a region name marked with Intel PT API and returns a region id. + * Only 7 names can be registered. Attempts to register more names will be ignored and a region id with auto names will be returned. + * For automatic naming of regions pass NULL as function parameter +*/ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +__itt_pt_region ITTAPI __itt_pt_region_createA(const char *name); +__itt_pt_region ITTAPI __itt_pt_region_createW(const wchar_t *name); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_pt_region_create __itt_pt_region_createW +#else /* UNICODE */ +# define __itt_pt_region_create __itt_pt_region_createA +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_pt_region ITTAPI __itt_pt_region_create(const char *name); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_pt_region, pt_region_createA, (const char *name)) +ITT_STUB(ITTAPI, __itt_pt_region, pt_region_createW, (const wchar_t *name)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_pt_region, pt_region_create, (const char *name)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_pt_region_createA ITTNOTIFY_DATA(pt_region_createA) +#define __itt_pt_region_createA_ptr ITTNOTIFY_NAME(pt_region_createA) +#define __itt_pt_region_createW ITTNOTIFY_DATA(pt_region_createW) +#define __itt_pt_region_createW_ptr ITTNOTIFY_NAME(pt_region_createW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_pt_region_create ITTNOTIFY_DATA(pt_region_create) +#define __itt_pt_region_create_ptr ITTNOTIFY_NAME(pt_region_create) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_pt_region_createA(name) (__itt_pt_region)0 +#define __itt_pt_region_createA_ptr 0 +#define __itt_pt_region_createW(name) (__itt_pt_region)0 +#define __itt_pt_region_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_pt_region_create(name) (__itt_pt_region)0 +#define __itt_pt_region_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_pt_region_createA_ptr 0 +#define __itt_pt_region_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_pt_region_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief function contains a special code pattern identified on the post-processing stage and + * marks the beginning of a code region targeted for Intel PT analysis + * @param[in] region - region id, 0 <= region < 8 +*/ +void __itt_mark_pt_region_begin(__itt_pt_region region); +/** + * @brief function contains a special code pattern identified on the post-processing stage and + * marks the end of a code region targeted for Intel PT analysis + * @param[in] region - region id, 0 <= region < 8 +*/ +void __itt_mark_pt_region_end(__itt_pt_region region); +/** @} Intel PT control group*/ + /** * @defgroup threads Threads * @ingroup public @@ -427,19 +569,19 @@ ITT_STUBV(ITTAPI, void, thread_ignore, (void)) *********************************************************************/ /** @{ */ /** - * @hideinitializer + * @hideinitializer * @brief possible value for suppression mask */ #define __itt_suppress_all_errors 0x7fffffff /** - * @hideinitializer + * @hideinitializer * @brief possible value for suppression mask (suppresses errors from threading analysis) */ #define __itt_suppress_threading_errors 0x000000ff /** - * @hideinitializer + * @hideinitializer * @brief possible value for suppression mask (suppresses errors from memory analysis) */ #define __itt_suppress_memory_errors 0x0000ff00 @@ -465,7 +607,7 @@ ITT_STUBV(ITTAPI, void, suppress_push, (unsigned int mask)) /** @endcond */ /** - * @brief Undo the effects of the matching call to __itt_suppress_push + * @brief Undo the effects of the matching call to __itt_suppress_push */ void ITTAPI __itt_suppress_pop(void); @@ -485,14 +627,26 @@ ITT_STUBV(ITTAPI, void, suppress_pop, (void)) /** @endcond */ /** - * @enum __itt_model_disable - * @brief Enumerator for the disable methods + * @enum __itt_suppress_mode + * @brief Enumerator for the suppressing modes */ typedef enum __itt_suppress_mode { __itt_unsuppress_range, __itt_suppress_range } __itt_suppress_mode_t; +/** + * @enum __itt_collection_state + * @brief Enumerator for collection state. + */ +typedef enum { + __itt_collection_uninitialized = 0, /* uninitialized */ + __itt_collection_init_fail = 1, /* failed to init */ + __itt_collection_collector_absent = 2, /* non work state collector is absent */ + __itt_collection_collector_exists = 3, /* work state collector exists */ + __itt_collection_init_successful = 4 /* success to init */ +} __itt_collection_state; + /** * @brief Mark a range of memory for error suppression or unsuppression for error types included in mask */ @@ -1440,7 +1594,7 @@ ITT_STUBV(ITTAPI, void, heap_allocate_end, (__itt_heap_function h, void** addr, /** @endcond */ /** - * @brief Record an free begin occurrence. + * @brief Record a free begin occurrence. */ void ITTAPI __itt_heap_free_begin(__itt_heap_function h, void* addr); @@ -1460,7 +1614,7 @@ ITT_STUBV(ITTAPI, void, heap_free_begin, (__itt_heap_function h, void* addr)) /** @endcond */ /** - * @brief Record an free end occurrence. + * @brief Record a free end occurrence. */ void ITTAPI __itt_heap_free_end(__itt_heap_function h, void* addr); @@ -1480,7 +1634,7 @@ ITT_STUBV(ITTAPI, void, heap_free_end, (__itt_heap_function h, void* addr)) /** @endcond */ /** - * @brief Record an reallocation begin occurrence. + * @brief Record a reallocation begin occurrence. */ void ITTAPI __itt_heap_reallocate_begin(__itt_heap_function h, void* addr, size_t new_size, int initialized); @@ -1500,7 +1654,7 @@ ITT_STUBV(ITTAPI, void, heap_reallocate_begin, (__itt_heap_function h, void* add /** @endcond */ /** - * @brief Record an reallocation end occurrence. + * @brief Record a reallocation end occurrence. */ void ITTAPI __itt_heap_reallocate_end(__itt_heap_function h, void* addr, void** new_addr, size_t new_size, int initialized); @@ -1595,13 +1749,13 @@ ITT_STUBV(ITTAPI, void, heap_record_memory_growth_end, (void)) * @brief Specify the type of heap detection/reporting to modify. */ /** - * @hideinitializer + * @hideinitializer * @brief Report on memory leaks. */ #define __itt_heap_leaks 0x00000001 /** - * @hideinitializer + * @hideinitializer * @brief Report on memory growth. */ #define __itt_heap_growth 0x00000002 @@ -1678,7 +1832,7 @@ typedef struct ___itt_domain * @ingroup domains * @brief Create a domain. * Create domain using some domain name: the URI naming style is recommended. - * Because the set of domains is expected to be static over the application's + * Because the set of domains is expected to be static over the application's * execution time, there is no mechanism to destroy a domain. * Any domain can be accessed by any thread in the process, regardless of * which thread created the domain. This call is thread-safe. @@ -1762,7 +1916,7 @@ static const __itt_id __itt_null = { 0, 0, 0 }; * @ingroup ids * @brief A convenience function is provided to create an ID without domain control. * @brief This is a convenience function to initialize an __itt_id structure. This function - * does not affect the trace collector runtime in any way. After you make the ID with this + * does not affect the collector runtime in any way. After you make the ID with this * function, you still must create it with the __itt_id_create function before using the ID * to identify a named entity. * @param[in] addr The address of object; high QWORD of the ID value. @@ -1813,7 +1967,7 @@ ITT_STUBV(ITTAPI, void, id_create, (const __itt_domain *domain, __itt_id id)) * @brief Destroy an instance of identifier. * This ends the lifetime of the current instance of the given ID value in the trace. * Any relationships that are established after this lifetime ends are invalid. - * This call must be performed before the given ID value can be reused for a different + * This call must be performed before the given ID value can be reused for a different * named entity instance. * @param[in] domain The domain controlling the execution of this call. * @param[in] id The ID to destroy. @@ -1931,7 +2085,7 @@ ITT_STUB(ITTAPI, __itt_string_handle*, string_handle_create, (const char *na typedef unsigned long long __itt_timestamp; /** @endcond */ -static const __itt_timestamp __itt_timestamp_none = (__itt_timestamp)-1LL; +#define __itt_timestamp_none ((__itt_timestamp)-1LL) /** @cond exclude_from_gpa_documentation */ @@ -2055,7 +2209,7 @@ void ITTAPI __itt_frame_end_v3(const __itt_domain *domain, __itt_id *id); * take the current timestamp as the end timestamp. * @param[in] domain The domain for this frame instance * @param[in] id The instance ID for this frame instance or NULL - * @param[in] begin Timestamp of the beggining of the frame + * @param[in] begin Timestamp of the beginning of the frame * @param[in] end Timestamp of the end of the frame */ void ITTAPI __itt_frame_submit_v3(const __itt_domain *domain, __itt_id *id, @@ -2170,18 +2324,42 @@ void ITTAPI __itt_task_begin_fn(const __itt_domain *domain, __itt_id taskid, __i */ void ITTAPI __itt_task_end(const __itt_domain *domain); +/** + * @ingroup tasks + * @brief Begin an overlapped task instance. + * @param[in] domain The domain for this task. + * @param[in] taskid The identifier for this task instance, *cannot* be __itt_null. + * @param[in] parentid The parent of this task, or __itt_null. + * @param[in] name The name of this task. + */ +void ITTAPI __itt_task_begin_overlapped(const __itt_domain* domain, __itt_id taskid, __itt_id parentid, __itt_string_handle* name); + +/** + * @ingroup tasks + * @brief End an overlapped task instance. + * @param[in] domain The domain for this task + * @param[in] taskid Explicit ID of finished task + */ +void ITTAPI __itt_task_end_overlapped(const __itt_domain *domain, __itt_id taskid); + /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, task_begin, (const __itt_domain *domain, __itt_id id, __itt_id parentid, __itt_string_handle *name)) ITT_STUBV(ITTAPI, void, task_begin_fn, (const __itt_domain *domain, __itt_id id, __itt_id parentid, void* fn)) ITT_STUBV(ITTAPI, void, task_end, (const __itt_domain *domain)) +ITT_STUBV(ITTAPI, void, task_begin_overlapped, (const __itt_domain *domain, __itt_id taskid, __itt_id parentid, __itt_string_handle *name)) +ITT_STUBV(ITTAPI, void, task_end_overlapped, (const __itt_domain *domain, __itt_id taskid)) #define __itt_task_begin(d,x,y,z) ITTNOTIFY_VOID_D3(task_begin,d,x,y,z) #define __itt_task_begin_ptr ITTNOTIFY_NAME(task_begin) #define __itt_task_begin_fn(d,x,y,z) ITTNOTIFY_VOID_D3(task_begin_fn,d,x,y,z) #define __itt_task_begin_fn_ptr ITTNOTIFY_NAME(task_begin_fn) #define __itt_task_end(d) ITTNOTIFY_VOID_D0(task_end,d) #define __itt_task_end_ptr ITTNOTIFY_NAME(task_end) +#define __itt_task_begin_overlapped(d,x,y,z) ITTNOTIFY_VOID_D3(task_begin_overlapped,d,x,y,z) +#define __itt_task_begin_overlapped_ptr ITTNOTIFY_NAME(task_begin_overlapped) +#define __itt_task_end_overlapped(d,x) ITTNOTIFY_VOID_D1(task_end_overlapped,d,x) +#define __itt_task_end_overlapped_ptr ITTNOTIFY_NAME(task_end_overlapped) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_task_begin(domain,id,parentid,name) #define __itt_task_begin_ptr 0 @@ -2189,72 +2367,25 @@ ITT_STUBV(ITTAPI, void, task_end, (const __itt_domain *domain)) #define __itt_task_begin_fn_ptr 0 #define __itt_task_end(domain) #define __itt_task_end_ptr 0 +#define __itt_task_begin_overlapped(domain,taskid,parentid,name) +#define __itt_task_begin_overlapped_ptr 0 +#define __itt_task_end_overlapped(domain,taskid) +#define __itt_task_end_overlapped_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_task_begin_ptr 0 #define __itt_task_begin_fn_ptr 0 #define __itt_task_end_ptr 0 +#define __itt_task_begin_overlapped_ptr 0 +#define __itt_task_end_overlapped_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} tasks group */ -/** - * @defgroup counters Counters - * @ingroup public - * Counters are user-defined objects with a monotonically increasing - * value. Counter values are 64-bit unsigned integers. Counter values - * are tracked per-thread. Counters have names that can be displayed in - * the tools. - * @{ - */ - -/** - * @ingroup counters - * @brief Increment a counter by one. - * The first call with a given name creates a counter by that name and sets its - * value to zero on every thread. Successive calls increment the counter value - * on the thread on which the call is issued. - * @param[in] domain The domain controlling the call. Counter names are not domain specific. - * The domain argument is used only to enable or disable the API calls. - * @param[in] name The name of the counter - */ -void ITTAPI __itt_counter_inc_v3(const __itt_domain *domain, __itt_string_handle *name); - -/** - * @ingroup counters - * @brief Increment a counter by the value specified in delta. - * @param[in] domain The domain controlling the call. Counter names are not domain specific. - * The domain argument is used only to enable or disable the API calls. - * @param[in] name The name of the counter - * @param[in] delta The amount by which to increment the counter - */ -void ITTAPI __itt_counter_inc_delta_v3(const __itt_domain *domain, __itt_string_handle *name, unsigned long long delta); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, counter_inc_v3, (const __itt_domain *domain, __itt_string_handle *name)) -ITT_STUBV(ITTAPI, void, counter_inc_delta_v3, (const __itt_domain *domain, __itt_string_handle *name, unsigned long long delta)) -#define __itt_counter_inc_v3(d,x) ITTNOTIFY_VOID_D1(counter_inc_v3,d,x) -#define __itt_counter_inc_v3_ptr ITTNOTIFY_NAME(counter_inc_v3) -#define __itt_counter_inc_delta_v3(d,x,y) ITTNOTIFY_VOID_D2(counter_inc_delta_v3,d,x,y) -#define __itt_counter_inc_delta_v3_ptr ITTNOTIFY_NAME(counter_inc_delta_v3) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_counter_inc_v3(domain,name) -#define __itt_counter_inc_v3_ptr 0 -#define __itt_counter_inc_delta_v3(domain,name,delta) -#define __itt_counter_inc_delta_v3_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_counter_inc_v3_ptr 0 -#define __itt_counter_inc_delta_v3_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ -/** @} counters group */ /** * @defgroup markers Markers - * Markers represent a single discreet event in time. Markers have a scope, + * Markers represent a single discrete event in time. Markers have a scope, * described by an enumerated type __itt_scope. Markers are created by * the API call __itt_marker. A marker instance can be given an ID for use in * adding metadata. @@ -2372,7 +2503,7 @@ ITT_STUBV(ITTAPI, void, metadata_add, (const __itt_domain *domain, __itt_id id, * @param[in] id The identifier of the instance to which the metadata is to be added, or __itt_null to add to the current task * @param[in] key The name of the metadata * @param[in] data The metadata itself - * @param[in] length The number of characters in the string, or -1 if the length is unknown but the string is null-terminated + * @param[in] length The number of characters in the string, or -1 if the length is unknown but the string is null-terminated */ #if ITT_PLATFORM==ITT_PLATFORM_WIN void ITTAPI __itt_metadata_str_addA(const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char *data, size_t length); @@ -2408,9 +2539,9 @@ ITT_STUBV(ITTAPI, void, metadata_str_add, (const __itt_domain *domain, __itt_id #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_metadata_str_addA(d,x,y,z,a) +#define __itt_metadata_str_addA(d,x,y,z,a) #define __itt_metadata_str_addA_ptr 0 -#define __itt_metadata_str_addW(d,x,y,z,a) +#define __itt_metadata_str_addW(d,x,y,z,a) #define __itt_metadata_str_addW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_metadata_str_add(d,x,y,z,a) @@ -2434,7 +2565,7 @@ ITT_STUBV(ITTAPI, void, metadata_str_add, (const __itt_domain *domain, __itt_id * @param[in] scope The scope of the instance to which the metadata is to be added * @param[in] id The identifier of the instance to which the metadata is to be added, or __itt_null to add to the current task - + * @param[in] key The name of the metadata * @param[in] type The type of the metadata * @param[in] count The number of elements of the given type. If count == 0, no metadata will be added. @@ -2467,7 +2598,7 @@ ITT_STUBV(ITTAPI, void, metadata_add_with_scope, (const __itt_domain *domain, __ * @param[in] key The name of the metadata * @param[in] data The metadata itself - * @param[in] length The number of characters in the string, or -1 if the length is unknown but the string is null-terminated + * @param[in] length The number of characters in the string, or -1 if the length is unknown but the string is null-terminated */ #if ITT_PLATFORM==ITT_PLATFORM_WIN void ITTAPI __itt_metadata_str_add_with_scopeA(const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length); @@ -2503,9 +2634,9 @@ ITT_STUBV(ITTAPI, void, metadata_str_add_with_scope, (const __itt_domain *domain #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_metadata_str_add_with_scopeA(d,x,y,z,a) +#define __itt_metadata_str_add_with_scopeA(d,x,y,z,a) #define __itt_metadata_str_add_with_scopeA_ptr 0 -#define __itt_metadata_str_add_with_scopeW(d,x,y,z,a) +#define __itt_metadata_str_add_with_scopeW(d,x,y,z,a) #define __itt_metadata_str_add_with_scopeW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_metadata_str_add_with_scope(d,x,y,z,a) @@ -2659,7 +2790,7 @@ ITT_STUB(ITTAPI, __itt_clock_domain*, clock_domain_create, (__itt_get_clock_info /** * @ingroup clockdomains - * @brief Recalculate clock domains frequences and clock base timestamps. + * @brief Recalculate clock domains frequencies and clock base timestamps. */ void ITTAPI __itt_clock_domain_reset(void); @@ -2786,74 +2917,439 @@ ITT_STUBV(ITTAPI, void, task_end_ex, (const __itt_domain *domain, __itt /** @endcond */ /** - * @ingroup markers - * @brief Create a marker instance. - * @param[in] domain The domain for this marker - * @param[in] clock_domain The clock domain controlling the execution of this call. - * @param[in] timestamp The user defined timestamp. - * @param[in] id The instance ID for this marker, or __itt_null - * @param[in] name The name for this marker - * @param[in] scope The scope for this marker + * @defgroup counters Counters + * @ingroup public + * Counters are user-defined objects with a monotonically increasing + * value. Counter values are 64-bit unsigned integers. + * Counters have names that can be displayed in + * the tools. + * @{ */ -void ITTAPI __itt_marker_ex(const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_string_handle *name, __itt_scope scope); +/** + * @brief opaque structure for counter identification + */ /** @cond exclude_from_documentation */ + +typedef struct ___itt_counter* __itt_counter; + +/** + * @brief Create an unsigned 64 bits integer counter with given name/domain + * + * After __itt_counter_create() is called, __itt_counter_inc(id), __itt_counter_inc_delta(id, delta), + * __itt_counter_set_value(id, value_ptr) or __itt_counter_set_value_ex(id, clock_domain, timestamp, value_ptr) + * can be used to change the value of the counter, where value_ptr is a pointer to an unsigned 64 bits integer + * + * The call is equal to __itt_counter_create_typed(name, domain, __itt_metadata_u64) + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +__itt_counter ITTAPI __itt_counter_createA(const char *name, const char *domain); +__itt_counter ITTAPI __itt_counter_createW(const wchar_t *name, const wchar_t *domain); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_counter_create __itt_counter_createW +# define __itt_counter_create_ptr __itt_counter_createW_ptr +#else /* UNICODE */ +# define __itt_counter_create __itt_counter_createA +# define __itt_counter_create_ptr __itt_counter_createA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_counter ITTAPI __itt_counter_create(const char *name, const char *domain); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, marker_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_string_handle *name, __itt_scope scope)) -#define __itt_marker_ex(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(marker_ex,d,x,y,z,a,b) -#define __itt_marker_ex_ptr ITTNOTIFY_NAME(marker_ex) +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_counter, counter_createA, (const char *name, const char *domain)) +ITT_STUB(ITTAPI, __itt_counter, counter_createW, (const wchar_t *name, const wchar_t *domain)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_counter, counter_create, (const char *name, const char *domain)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_counter_createA ITTNOTIFY_DATA(counter_createA) +#define __itt_counter_createA_ptr ITTNOTIFY_NAME(counter_createA) +#define __itt_counter_createW ITTNOTIFY_DATA(counter_createW) +#define __itt_counter_createW_ptr ITTNOTIFY_NAME(counter_createW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_counter_create ITTNOTIFY_DATA(counter_create) +#define __itt_counter_create_ptr ITTNOTIFY_NAME(counter_create) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_marker_ex(domain,clock_domain,timestamp,id,name,scope) -#define __itt_marker_ex_ptr 0 +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_counter_createA(name, domain) +#define __itt_counter_createA_ptr 0 +#define __itt_counter_createW(name, domain) +#define __itt_counter_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_counter_create(name, domain) +#define __itt_counter_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ -#define __itt_marker_ex_ptr 0 +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_counter_createA_ptr 0 +#define __itt_counter_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_counter_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** - * @ingroup clockdomain - * @brief Add a relation to the current task instance. - * The current task instance is the head of the relation. - * @param[in] domain The domain controlling this call - * @param[in] clock_domain The clock domain controlling the execution of this call. - * @param[in] timestamp The user defined timestamp. - * @param[in] relation The kind of relation - * @param[in] tail The ID for the tail of the relation - */ -void ITTAPI __itt_relation_add_to_current_ex(const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_relation relation, __itt_id tail); - -/** - * @ingroup clockdomain - * @brief Add a relation between two instance identifiers. - * @param[in] domain The domain controlling this call - * @param[in] clock_domain The clock domain controlling the execution of this call. - * @param[in] timestamp The user defined timestamp. - * @param[in] head The ID for the head of the relation - * @param[in] relation The kind of relation - * @param[in] tail The ID for the tail of the relation + * @brief Increment the unsigned 64 bits integer counter value + * + * Calling this function to non-unsigned 64 bits integer counters has no effect */ -void ITTAPI __itt_relation_add_ex(const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id head, __itt_relation relation, __itt_id tail); +void ITTAPI __itt_counter_inc(__itt_counter id); -/** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, relation_add_to_current_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_relation relation, __itt_id tail)) -ITT_STUBV(ITTAPI, void, relation_add_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id head, __itt_relation relation, __itt_id tail)) -#define __itt_relation_add_to_current_ex(d,x,y,z,a) ITTNOTIFY_VOID_D4(relation_add_to_current_ex,d,x,y,z,a) -#define __itt_relation_add_to_current_ex_ptr ITTNOTIFY_NAME(relation_add_to_current_ex) -#define __itt_relation_add_ex(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(relation_add_ex,d,x,y,z,a,b) -#define __itt_relation_add_ex_ptr ITTNOTIFY_NAME(relation_add_ex) +ITT_STUBV(ITTAPI, void, counter_inc, (__itt_counter id)) +#define __itt_counter_inc ITTNOTIFY_VOID(counter_inc) +#define __itt_counter_inc_ptr ITTNOTIFY_NAME(counter_inc) #else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_relation_add_to_current_ex(domain,clock_domain,timestame,relation,tail) -#define __itt_relation_add_to_current_ex_ptr 0 -#define __itt_relation_add_ex(domain,clock_domain,timestamp,head,relation,tail) -#define __itt_relation_add_ex_ptr 0 +#define __itt_counter_inc(id) +#define __itt_counter_inc_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ -#define __itt_relation_add_to_current_ex_ptr 0 -#define __itt_relation_add_ex_ptr 0 +#define __itt_counter_inc_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** + * @brief Increment the unsigned 64 bits integer counter value with x + * + * Calling this function to non-unsigned 64 bits integer counters has no effect + */ +void ITTAPI __itt_counter_inc_delta(__itt_counter id, unsigned long long value); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_inc_delta, (__itt_counter id, unsigned long long value)) +#define __itt_counter_inc_delta ITTNOTIFY_VOID(counter_inc_delta) +#define __itt_counter_inc_delta_ptr ITTNOTIFY_NAME(counter_inc_delta) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_inc_delta(id, value) +#define __itt_counter_inc_delta_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_inc_delta_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Decrement the unsigned 64 bits integer counter value + * + * Calling this function to non-unsigned 64 bits integer counters has no effect + */ +void ITTAPI __itt_counter_dec(__itt_counter id); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_dec, (__itt_counter id)) +#define __itt_counter_dec ITTNOTIFY_VOID(counter_dec) +#define __itt_counter_dec_ptr ITTNOTIFY_NAME(counter_dec) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_dec(id) +#define __itt_counter_dec_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_dec_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** + * @brief Decrement the unsigned 64 bits integer counter value with x + * + * Calling this function to non-unsigned 64 bits integer counters has no effect + */ +void ITTAPI __itt_counter_dec_delta(__itt_counter id, unsigned long long value); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_dec_delta, (__itt_counter id, unsigned long long value)) +#define __itt_counter_dec_delta ITTNOTIFY_VOID(counter_dec_delta) +#define __itt_counter_dec_delta_ptr ITTNOTIFY_NAME(counter_dec_delta) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_dec_delta(id, value) +#define __itt_counter_dec_delta_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_dec_delta_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup counters + * @brief Increment a counter by one. + * The first call with a given name creates a counter by that name and sets its + * value to zero. Successive calls increment the counter value. + * @param[in] domain The domain controlling the call. Counter names are not domain specific. + * The domain argument is used only to enable or disable the API calls. + * @param[in] name The name of the counter + */ +void ITTAPI __itt_counter_inc_v3(const __itt_domain *domain, __itt_string_handle *name); + +/** + * @ingroup counters + * @brief Increment a counter by the value specified in delta. + * @param[in] domain The domain controlling the call. Counter names are not domain specific. + * The domain argument is used only to enable or disable the API calls. + * @param[in] name The name of the counter + * @param[in] delta The amount by which to increment the counter + */ +void ITTAPI __itt_counter_inc_delta_v3(const __itt_domain *domain, __itt_string_handle *name, unsigned long long delta); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_inc_v3, (const __itt_domain *domain, __itt_string_handle *name)) +ITT_STUBV(ITTAPI, void, counter_inc_delta_v3, (const __itt_domain *domain, __itt_string_handle *name, unsigned long long delta)) +#define __itt_counter_inc_v3(d,x) ITTNOTIFY_VOID_D1(counter_inc_v3,d,x) +#define __itt_counter_inc_v3_ptr ITTNOTIFY_NAME(counter_inc_v3) +#define __itt_counter_inc_delta_v3(d,x,y) ITTNOTIFY_VOID_D2(counter_inc_delta_v3,d,x,y) +#define __itt_counter_inc_delta_v3_ptr ITTNOTIFY_NAME(counter_inc_delta_v3) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_inc_v3(domain,name) +#define __itt_counter_inc_v3_ptr 0 +#define __itt_counter_inc_delta_v3(domain,name,delta) +#define __itt_counter_inc_delta_v3_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_inc_v3_ptr 0 +#define __itt_counter_inc_delta_v3_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + + +/** + * @ingroup counters + * @brief Decrement a counter by one. + * The first call with a given name creates a counter by that name and sets its + * value to zero. Successive calls decrement the counter value. + * @param[in] domain The domain controlling the call. Counter names are not domain specific. + * The domain argument is used only to enable or disable the API calls. + * @param[in] name The name of the counter + */ +void ITTAPI __itt_counter_dec_v3(const __itt_domain *domain, __itt_string_handle *name); + +/** + * @ingroup counters + * @brief Decrement a counter by the value specified in delta. + * @param[in] domain The domain controlling the call. Counter names are not domain specific. + * The domain argument is used only to enable or disable the API calls. + * @param[in] name The name of the counter + * @param[in] delta The amount by which to decrement the counter + */ +void ITTAPI __itt_counter_dec_delta_v3(const __itt_domain *domain, __itt_string_handle *name, unsigned long long delta); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_dec_v3, (const __itt_domain *domain, __itt_string_handle *name)) +ITT_STUBV(ITTAPI, void, counter_dec_delta_v3, (const __itt_domain *domain, __itt_string_handle *name, unsigned long long delta)) +#define __itt_counter_dec_v3(d,x) ITTNOTIFY_VOID_D1(counter_dec_v3,d,x) +#define __itt_counter_dec_v3_ptr ITTNOTIFY_NAME(counter_dec_v3) +#define __itt_counter_dec_delta_v3(d,x,y) ITTNOTIFY_VOID_D2(counter_dec_delta_v3,d,x,y) +#define __itt_counter_dec_delta_v3_ptr ITTNOTIFY_NAME(counter_dec_delta_v3) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_dec_v3(domain,name) +#define __itt_counter_dec_v3_ptr 0 +#define __itt_counter_dec_delta_v3(domain,name,delta) +#define __itt_counter_dec_delta_v3_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_dec_v3_ptr 0 +#define __itt_counter_dec_delta_v3_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @} counters group */ + + +/** + * @brief Set the counter value + */ +void ITTAPI __itt_counter_set_value(__itt_counter id, void *value_ptr); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_set_value, (__itt_counter id, void *value_ptr)) +#define __itt_counter_set_value ITTNOTIFY_VOID(counter_set_value) +#define __itt_counter_set_value_ptr ITTNOTIFY_NAME(counter_set_value) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_set_value(id, value_ptr) +#define __itt_counter_set_value_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_set_value_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Set the counter value + */ +void ITTAPI __itt_counter_set_value_ex(__itt_counter id, __itt_clock_domain *clock_domain, unsigned long long timestamp, void *value_ptr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_set_value_ex, (__itt_counter id, __itt_clock_domain *clock_domain, unsigned long long timestamp, void *value_ptr)) +#define __itt_counter_set_value_ex ITTNOTIFY_VOID(counter_set_value_ex) +#define __itt_counter_set_value_ex_ptr ITTNOTIFY_NAME(counter_set_value_ex) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_set_value_ex(id, clock_domain, timestamp, value_ptr) +#define __itt_counter_set_value_ex_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_set_value_ex_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Create a typed counter with given name/domain + * + * After __itt_counter_create_typed() is called, __itt_counter_inc(id), __itt_counter_inc_delta(id, delta), + * __itt_counter_set_value(id, value_ptr) or __itt_counter_set_value_ex(id, clock_domain, timestamp, value_ptr) + * can be used to change the value of the counter + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +__itt_counter ITTAPI __itt_counter_create_typedA(const char *name, const char *domain, __itt_metadata_type type); +__itt_counter ITTAPI __itt_counter_create_typedW(const wchar_t *name, const wchar_t *domain, __itt_metadata_type type); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_counter_create_typed __itt_counter_create_typedW +# define __itt_counter_create_typed_ptr __itt_counter_create_typedW_ptr +#else /* UNICODE */ +# define __itt_counter_create_typed __itt_counter_create_typedA +# define __itt_counter_create_typed_ptr __itt_counter_create_typedA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_counter ITTAPI __itt_counter_create_typed(const char *name, const char *domain, __itt_metadata_type type); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_counter, counter_create_typedA, (const char *name, const char *domain, __itt_metadata_type type)) +ITT_STUB(ITTAPI, __itt_counter, counter_create_typedW, (const wchar_t *name, const wchar_t *domain, __itt_metadata_type type)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_counter, counter_create_typed, (const char *name, const char *domain, __itt_metadata_type type)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_counter_create_typedA ITTNOTIFY_DATA(counter_create_typedA) +#define __itt_counter_create_typedA_ptr ITTNOTIFY_NAME(counter_create_typedA) +#define __itt_counter_create_typedW ITTNOTIFY_DATA(counter_create_typedW) +#define __itt_counter_create_typedW_ptr ITTNOTIFY_NAME(counter_create_typedW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_counter_create_typed ITTNOTIFY_DATA(counter_create_typed) +#define __itt_counter_create_typed_ptr ITTNOTIFY_NAME(counter_create_typed) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_counter_create_typedA(name, domain, type) +#define __itt_counter_create_typedA_ptr 0 +#define __itt_counter_create_typedW(name, domain, type) +#define __itt_counter_create_typedW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_counter_create_typed(name, domain, type) +#define __itt_counter_create_typed_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_counter_create_typedA_ptr 0 +#define __itt_counter_create_typedW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_counter_create_typed_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Destroy the counter identified by the pointer previously returned by __itt_counter_create() or + * __itt_counter_create_typed() + */ +void ITTAPI __itt_counter_destroy(__itt_counter id); + +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, counter_destroy, (__itt_counter id)) +#define __itt_counter_destroy ITTNOTIFY_VOID(counter_destroy) +#define __itt_counter_destroy_ptr ITTNOTIFY_NAME(counter_destroy) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_counter_destroy(id) +#define __itt_counter_destroy_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_counter_destroy_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} counters group */ + +/** + * @ingroup markers + * @brief Create a marker instance. + * @param[in] domain The domain for this marker + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + * @param[in] id The instance ID for this marker, or __itt_null + * @param[in] name The name for this marker + * @param[in] scope The scope for this marker + */ +void ITTAPI __itt_marker_ex(const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_string_handle *name, __itt_scope scope); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, marker_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_string_handle *name, __itt_scope scope)) +#define __itt_marker_ex(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(marker_ex,d,x,y,z,a,b) +#define __itt_marker_ex_ptr ITTNOTIFY_NAME(marker_ex) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_marker_ex(domain,clock_domain,timestamp,id,name,scope) +#define __itt_marker_ex_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_marker_ex_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @ingroup clockdomain + * @brief Add a relation to the current task instance. + * The current task instance is the head of the relation. + * @param[in] domain The domain controlling this call + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + * @param[in] relation The kind of relation + * @param[in] tail The ID for the tail of the relation + */ +void ITTAPI __itt_relation_add_to_current_ex(const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_relation relation, __itt_id tail); + +/** + * @ingroup clockdomain + * @brief Add a relation between two instance identifiers. + * @param[in] domain The domain controlling this call + * @param[in] clock_domain The clock domain controlling the execution of this call. + * @param[in] timestamp The user defined timestamp. + * @param[in] head The ID for the head of the relation + * @param[in] relation The kind of relation + * @param[in] tail The ID for the tail of the relation + */ +void ITTAPI __itt_relation_add_ex(const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id head, __itt_relation relation, __itt_id tail); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, relation_add_to_current_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_relation relation, __itt_id tail)) +ITT_STUBV(ITTAPI, void, relation_add_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id head, __itt_relation relation, __itt_id tail)) +#define __itt_relation_add_to_current_ex(d,x,y,z,a) ITTNOTIFY_VOID_D4(relation_add_to_current_ex,d,x,y,z,a) +#define __itt_relation_add_to_current_ex_ptr ITTNOTIFY_NAME(relation_add_to_current_ex) +#define __itt_relation_add_ex(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(relation_add_ex,d,x,y,z,a,b) +#define __itt_relation_add_ex_ptr ITTNOTIFY_NAME(relation_add_ex) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_relation_add_to_current_ex(domain,clock_domain,timestame,relation,tail) +#define __itt_relation_add_to_current_ex_ptr 0 +#define __itt_relation_add_ex(domain,clock_domain,timestamp,head,relation,tail) +#define __itt_relation_add_ex_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_relation_add_to_current_ex_ptr 0 +#define __itt_relation_add_ex_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ @@ -3072,132 +3568,649 @@ ITT_STUB(LIBITTAPI, int, event_end, (__itt_event event)) #define __itt_event_end ITTNOTIFY_DATA(event_end) #define __itt_event_end_ptr ITTNOTIFY_NAME(event_end) #else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_event_end(event) (int)0 -#define __itt_event_end_ptr 0 +#define __itt_event_end(event) (int)0 +#define __itt_event_end_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_event_end_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ +/** @} events group */ + + +/** + * @defgroup arrays Arrays Visualizer + * @ingroup public + * Visualize arrays + * @{ + */ + +/** + * @enum __itt_av_data_type + * @brief Defines types of arrays data (for C/C++ intrinsic types) + */ +typedef enum +{ + __itt_e_first = 0, + __itt_e_char = 0, /* 1-byte integer */ + __itt_e_uchar, /* 1-byte unsigned integer */ + __itt_e_int16, /* 2-byte integer */ + __itt_e_uint16, /* 2-byte unsigned integer */ + __itt_e_int32, /* 4-byte integer */ + __itt_e_uint32, /* 4-byte unsigned integer */ + __itt_e_int64, /* 8-byte integer */ + __itt_e_uint64, /* 8-byte unsigned integer */ + __itt_e_float, /* 4-byte floating */ + __itt_e_double, /* 8-byte floating */ + __itt_e_last = __itt_e_double +} __itt_av_data_type; + +/** + * @brief Save an array data to a file. + * Output format is defined by the file extension. The csv and bmp formats are supported (bmp - for 2-dimensional array only). + * @param[in] data - pointer to the array data + * @param[in] rank - the rank of the array + * @param[in] dimensions - pointer to an array of integers, which specifies the array dimensions. + * The size of dimensions must be equal to the rank + * @param[in] type - the type of the array, specified as one of the __itt_av_data_type values (for intrinsic types) + * @param[in] filePath - the file path; the output format is defined by the file extension + * @param[in] columnOrder - defines how the array is stored in the linear memory. + * It should be 1 for column-major order (e.g. in FORTRAN) or 0 - for row-major order (e.g. in C). + */ + +#if ITT_PLATFORM==ITT_PLATFORM_WIN +int ITTAPI __itt_av_saveA(void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder); +int ITTAPI __itt_av_saveW(void *data, int rank, const int *dimensions, int type, const wchar_t *filePath, int columnOrder); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_av_save __itt_av_saveW +# define __itt_av_save_ptr __itt_av_saveW_ptr +#else /* UNICODE */ +# define __itt_av_save __itt_av_saveA +# define __itt_av_save_ptr __itt_av_saveA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +int ITTAPI __itt_av_save(void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, int, av_saveA, (void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder)) +ITT_STUB(ITTAPI, int, av_saveW, (void *data, int rank, const int *dimensions, int type, const wchar_t *filePath, int columnOrder)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, int, av_save, (void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_av_saveA ITTNOTIFY_DATA(av_saveA) +#define __itt_av_saveA_ptr ITTNOTIFY_NAME(av_saveA) +#define __itt_av_saveW ITTNOTIFY_DATA(av_saveW) +#define __itt_av_saveW_ptr ITTNOTIFY_NAME(av_saveW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_av_save ITTNOTIFY_DATA(av_save) +#define __itt_av_save_ptr ITTNOTIFY_NAME(av_save) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_av_saveA(name) +#define __itt_av_saveA_ptr 0 +#define __itt_av_saveW(name) +#define __itt_av_saveW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_av_save(name) +#define __itt_av_save_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_av_saveA_ptr 0 +#define __itt_av_saveW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_av_save_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +void ITTAPI __itt_enable_attach(void); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, enable_attach, (void)) +#define __itt_enable_attach ITTNOTIFY_VOID(enable_attach) +#define __itt_enable_attach_ptr ITTNOTIFY_NAME(enable_attach) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_enable_attach() +#define __itt_enable_attach_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_enable_attach_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @cond exclude_from_gpa_documentation */ + +/** @} arrays group */ + +/** @endcond */ + +/** + * @brief Module load notification + * This API is used to report necessary information in case of bypassing default system loader. + * Notification should be done immediately after this module is loaded to process memory. + * @param[in] start_addr - module start address + * @param[in] end_addr - module end address + * @param[in] path - file system full path to the module + */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +void ITTAPI __itt_module_loadA(void *start_addr, void *end_addr, const char *path); +void ITTAPI __itt_module_loadW(void *start_addr, void *end_addr, const wchar_t *path); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_module_load __itt_module_loadW +# define __itt_module_load_ptr __itt_module_loadW_ptr +#else /* UNICODE */ +# define __itt_module_load __itt_module_loadA +# define __itt_module_load_ptr __itt_module_loadA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +void ITTAPI __itt_module_load(void *start_addr, void *end_addr, const char *path); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, void, module_loadA, (void *start_addr, void *end_addr, const char *path)) +ITT_STUB(ITTAPI, void, module_loadW, (void *start_addr, void *end_addr, const wchar_t *path)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, void, module_load, (void *start_addr, void *end_addr, const char *path)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_module_loadA ITTNOTIFY_VOID(module_loadA) +#define __itt_module_loadA_ptr ITTNOTIFY_NAME(module_loadA) +#define __itt_module_loadW ITTNOTIFY_VOID(module_loadW) +#define __itt_module_loadW_ptr ITTNOTIFY_NAME(module_loadW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_module_load ITTNOTIFY_VOID(module_load) +#define __itt_module_load_ptr ITTNOTIFY_NAME(module_load) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_module_loadA(start_addr, end_addr, path) +#define __itt_module_loadA_ptr 0 +#define __itt_module_loadW(start_addr, end_addr, path) +#define __itt_module_loadW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_module_load(start_addr, end_addr, path) +#define __itt_module_load_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_module_loadA_ptr 0 +#define __itt_module_loadW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_module_load_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Report module unload + * This API is used to report necessary information in case of bypassing default system loader. + * Notification should be done just before the module is unloaded from process memory. + * @param[in] addr - base address of loaded module + */ +void ITTAPI __itt_module_unload(void *addr); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, module_unload, (void *addr)) +#define __itt_module_unload ITTNOTIFY_VOID(module_unload) +#define __itt_module_unload_ptr ITTNOTIFY_NAME(module_unload) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_module_unload(addr) +#define __itt_module_unload_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_module_unload_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @cond exclude_from_documentation */ +typedef enum +{ + __itt_module_type_unknown = 0, + __itt_module_type_elf, + __itt_module_type_coff +} __itt_module_type; +/** @endcond */ + +/** @cond exclude_from_documentation */ +typedef enum +{ + itt_section_type_unknown, + itt_section_type_bss, /* notifies that the section contains uninitialized data. These are the relevant section types and the modules that contain them: + * ELF module: SHT_NOBITS section type + * COFF module: IMAGE_SCN_CNT_UNINITIALIZED_DATA section type + */ + itt_section_type_data, /* notifies that section contains initialized data. These are the relevant section types and the modules that contain them: + * ELF module: SHT_PROGBITS section type + * COFF module: IMAGE_SCN_CNT_INITIALIZED_DATA section type + */ + itt_section_type_text /* notifies that the section contains executable code. These are the relevant section types and the modules that contain them: + * ELF module: SHT_PROGBITS section type + * COFF module: IMAGE_SCN_CNT_CODE section type + */ +} __itt_section_type; +/** @endcond */ + +/** + * @hideinitializer + * @brief bit-mask, detects a section attribute that indicates whether a section can be executed as code: + * These are the relevant section attributes and the modules that contain them: + * ELF module: PF_X section attribute + * COFF module: IMAGE_SCN_MEM_EXECUTE attribute + */ +#define __itt_section_exec 0x20000000 + +/** + * @hideinitializer + * @brief bit-mask, detects a section attribute that indicates whether a section can be read. + * These are the relevant section attributes and the modules that contain them: + * ELF module: PF_R attribute + * COFF module: IMAGE_SCN_MEM_READ attribute + */ +#define __itt_section_read 0x40000000 + +/** + * @hideinitializer + * @brief bit-mask, detects a section attribute that indicates whether a section can be written to. + * These are the relevant section attributes and the modules that contain them: + * ELF module: PF_W attribute + * COFF module: IMAGE_SCN_MEM_WRITE attribute + */ +#define __itt_section_write 0x80000000 + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_section_info +{ + const char* name; /*!< Section name in UTF8 */ + __itt_section_type type; /*!< Section content and semantics description */ + size_t flags; /*!< Section bit flags that describe attributes using bit mask + * Zero if disabled, non-zero if enabled + */ + void* start_addr; /*!< Section load(relocated) start address */ + size_t size; /*!< Section file offset */ + size_t file_offset; /*!< Section size */ +} __itt_section_info; + +#pragma pack(pop) +/** @endcond */ + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_module_object +{ + unsigned int version; /*!< API version*/ + __itt_id module_id; /*!< Unique identifier. This is unchanged for sections that belong to the same module */ + __itt_module_type module_type; /*!< Binary module format */ + const char* module_name; /*!< Unique module name or path to module in UTF8 + * Contains module name when module_bufer and module_size exist + * Contains module path when module_bufer and module_size absent + * module_name remains the same for the certain module_id + */ + void* module_buffer; /*!< Module buffer content */ + size_t module_size; /*!< Module buffer size */ + /*!< If module_buffer and module_size exist, the binary module is dumped onto the system. + * If module_buffer and module_size do not exist, + * the binary module exists on the system already. + * The module_name parameter contains the path to the module. + */ + __itt_section_info* section_array; /*!< Reference to section information */ + size_t section_number; +} __itt_module_object; + +#pragma pack(pop) +/** @endcond */ + +/** + * @brief Load module content and its loaded(relocated) sections. + * This API is useful to save a module, or specify its location on the system and report information about loaded sections. + * The target module is saved on the system if module buffer content and size are available. + * If module buffer content and size are unavailable, the module name contains the path to the existing binary module. + * @param[in] module_obj - provides module and section information, along with unique module identifiers (name,module ID) + * which bind the binary module to particular sections. + */ +void ITTAPI __itt_module_load_with_sections(__itt_module_object* module_obj); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, module_load_with_sections, (__itt_module_object* module_obj)) +#define __itt_module_load_with_sections ITTNOTIFY_VOID(module_load_with_sections) +#define __itt_module_load_with_sections_ptr ITTNOTIFY_NAME(module_load_with_sections) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_module_load_with_sections(module_obj) +#define __itt_module_load_with_sections_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_module_load_with_sections_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Unload a module and its loaded(relocated) sections. + * This API notifies that the module and its sections were unloaded. + * @param[in] module_obj - provides module and sections information, along with unique module identifiers (name,module ID) + * which bind the binary module to particular sections. + */ +void ITTAPI __itt_module_unload_with_sections(__itt_module_object* module_obj); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, module_unload_with_sections, (__itt_module_object* module_obj)) +#define __itt_module_unload_with_sections ITTNOTIFY_VOID(module_unload_with_sections) +#define __itt_module_unload_with_sections_ptr ITTNOTIFY_NAME(module_unload_with_sections) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_module_unload_with_sections(module_obj) +#define __itt_module_unload_with_sections_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_module_unload_with_sections_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_histogram +{ + const __itt_domain* domain; /*!< Domain of the histogram*/ + const char* nameA; /*!< Name of the histogram */ +#if defined(UNICODE) || defined(_UNICODE) + const wchar_t* nameW; +#else /* UNICODE || _UNICODE */ + void* nameW; +#endif /* UNICODE || _UNICODE */ + __itt_metadata_type x_type; /*!< Type of the histogram X axis */ + __itt_metadata_type y_type; /*!< Type of the histogram Y axis */ + int extra1; /*!< Reserved to the runtime */ + void* extra2; /*!< Reserved to the runtime */ + struct ___itt_histogram* next; +} __itt_histogram; + +#pragma pack(pop) +/** @endcond */ + +/** + * @brief Create a typed histogram instance with given name/domain. + * @param[in] domain The domain controlling the call. + * @param[in] name The name of the histogram. + * @param[in] x_type The type of the X axis in histogram (may be 0 to calculate batch statistics). + * @param[in] y_type The type of the Y axis in histogram. +*/ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +__itt_histogram* ITTAPI __itt_histogram_createA(const __itt_domain* domain, const char* name, __itt_metadata_type x_type, __itt_metadata_type y_type); +__itt_histogram* ITTAPI __itt_histogram_createW(const __itt_domain* domain, const wchar_t* name, __itt_metadata_type x_type, __itt_metadata_type y_type); +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_histogram_create __itt_histogram_createW +# define __itt_histogram_create_ptr __itt_histogram_createW_ptr +#else /* UNICODE */ +# define __itt_histogram_create __itt_histogram_createA +# define __itt_histogram_create_ptr __itt_histogram_createA_ptr +#endif /* UNICODE */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_histogram* ITTAPI __itt_histogram_create(const __itt_domain* domain, const char* name, __itt_metadata_type x_type, __itt_metadata_type y_type); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_histogram*, histogram_createA, (const __itt_domain* domain, const char* name, __itt_metadata_type x_type, __itt_metadata_type y_type)) +ITT_STUB(ITTAPI, __itt_histogram*, histogram_createW, (const __itt_domain* domain, const wchar_t* name, __itt_metadata_type x_type, __itt_metadata_type y_type)) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_histogram*, histogram_create, (const __itt_domain* domain, const char* name, __itt_metadata_type x_type, __itt_metadata_type y_type)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_histogram_createA ITTNOTIFY_DATA(histogram_createA) +#define __itt_histogram_createA_ptr ITTNOTIFY_NAME(histogram_createA) +#define __itt_histogram_createW ITTNOTIFY_DATA(histogram_createW) +#define __itt_histogram_createW_ptr ITTNOTIFY_NAME(histogram_createW) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_histogram_create ITTNOTIFY_DATA(histogram_create) +#define __itt_histogram_create_ptr ITTNOTIFY_NAME(histogram_create) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#else /* INTEL_NO_ITTNOTIFY_API */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_histogram_createA(domain, name, x_type, y_type) (__itt_histogram*)0 +#define __itt_histogram_createA_ptr 0 +#define __itt_histogram_createW(domain, name, x_type, y_type) (__itt_histogram*)0 +#define __itt_histogram_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_histogram_create(domain, name, x_type, y_type) (__itt_histogram*)0 +#define __itt_histogram_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_histogram_createA_ptr 0 +#define __itt_histogram_createW_ptr 0 +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define __itt_histogram_create_ptr 0 +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ + +/** + * @brief Submit statistics for a histogram instance. + * @param[in] histogram Pointer to the histogram instance to which the histogram statistic is to be dumped. + * @param[in] length The number of elements in dumped axis data array. + * @param[in] x_data The X axis dumped data itself (may be NULL to calculate batch statistics). + * @param[in] y_data The Y axis dumped data itself. +*/ +void ITTAPI __itt_histogram_submit(__itt_histogram* histogram, size_t length, void* x_data, void* y_data); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, histogram_submit, (__itt_histogram* histogram, size_t length, void* x_data, void* y_data)) +#define __itt_histogram_submit ITTNOTIFY_VOID(histogram_submit) +#define __itt_histogram_submit_ptr ITTNOTIFY_NAME(histogram_submit) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_histogram_submit(histogram, length, x_data, y_data) +#define __itt_histogram_submit_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ -#define __itt_event_end_ptr 0 +#define __itt_histogram_submit_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ -/** @} events group */ - /** - * @defgroup arrays Arrays Visualizer - * @ingroup public - * Visualize arrays - * @{ - */ +* @brief function allows to obtain the current collection state at the moment +* @return collection state as a enum __itt_collection_state +*/ +__itt_collection_state __itt_get_collection_state(void); /** - * @enum __itt_av_data_type - * @brief Defines types of arrays data (for C/C++ intrinsic types) - */ -typedef enum -{ - __itt_e_first = 0, - __itt_e_char = 0, /* 1-byte integer */ - __itt_e_uchar, /* 1-byte unsigned integer */ - __itt_e_int16, /* 2-byte integer */ - __itt_e_uint16, /* 2-byte unsigned integer */ - __itt_e_int32, /* 4-byte integer */ - __itt_e_uint32, /* 4-byte unsigned integer */ - __itt_e_int64, /* 8-byte integer */ - __itt_e_uint64, /* 8-byte unsigned integer */ - __itt_e_float, /* 4-byte floating */ - __itt_e_double, /* 8-byte floating */ - __itt_e_last = __itt_e_double -} __itt_av_data_type; +* @brief function releases resources allocated by ITT API static part +* this API should be called from the library destructor +* @return void +*/ +void __itt_release_resources(void); +/** @endcond */ /** - * @brief Save an array data to a file. - * Output format is defined by the file extension. The csv and bmp formats are supported (bmp - for 2-dimensional array only). - * @param[in] data - pointer to the array data - * @param[in] rank - the rank of the array - * @param[in] dimensions - pointer to an array of integers, which specifies the array dimensions. - * The size of dimensions must be equal to the rank - * @param[in] type - the type of the array, specified as one of the __itt_av_data_type values (for intrinsic types) - * @param[in] filePath - the file path; the output format is defined by the file extension - * @param[in] columnOrder - defines how the array is stored in the linear memory. - * It should be 1 for column-major order (e.g. in FORTRAN) or 0 - for row-major order (e.g. in C). - */ - + * @brief Create a typed counter with given domain pointer, string name and counter type +*/ #if ITT_PLATFORM==ITT_PLATFORM_WIN -int ITTAPI __itt_av_saveA(void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder); -int ITTAPI __itt_av_saveW(void *data, int rank, const int *dimensions, int type, const wchar_t *filePath, int columnOrder); +__itt_counter ITTAPI __itt_counter_createA_v3(const __itt_domain* domain, const char* name, __itt_metadata_type type); +__itt_counter ITTAPI __itt_counter_createW_v3(const __itt_domain* domain, const wchar_t* name, __itt_metadata_type type); #if defined(UNICODE) || defined(_UNICODE) -# define __itt_av_save __itt_av_saveW -# define __itt_av_save_ptr __itt_av_saveW_ptr +# define __itt_counter_create_v3 __itt_counter_createW_v3 +# define __itt_counter_create_v3_ptr __itt_counter_createW_v3_ptr #else /* UNICODE */ -# define __itt_av_save __itt_av_saveA -# define __itt_av_save_ptr __itt_av_saveA_ptr +# define __itt_counter_create_v3 __itt_counter_createA_v3 +# define __itt_counter_create_v3_ptr __itt_counter_createA_v3_ptr #endif /* UNICODE */ -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -int ITTAPI __itt_av_save(void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder); +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +__itt_counter ITTAPI __itt_counter_create_v3(const __itt_domain* domain, const char* name, __itt_metadata_type type); #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -/** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API #if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(ITTAPI, int, av_saveA, (void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder)) -ITT_STUB(ITTAPI, int, av_saveW, (void *data, int rank, const int *dimensions, int type, const wchar_t *filePath, int columnOrder)) +ITT_STUB(ITTAPI, __itt_counter, counter_createA_v3, (const __itt_domain* domain, const char* name, __itt_metadata_type type)) +ITT_STUB(ITTAPI, __itt_counter, counter_createW_v3, (const __itt_domain* domain, const wchar_t* name, __itt_metadata_type type)) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUB(ITTAPI, int, av_save, (void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder)) +ITT_STUB(ITTAPI, __itt_counter, counter_create_v3, (const __itt_domain* domain, const char* name, __itt_metadata_type type)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_av_saveA ITTNOTIFY_DATA(av_saveA) -#define __itt_av_saveA_ptr ITTNOTIFY_NAME(av_saveA) -#define __itt_av_saveW ITTNOTIFY_DATA(av_saveW) -#define __itt_av_saveW_ptr ITTNOTIFY_NAME(av_saveW) +#define __itt_counter_createA_v3 ITTNOTIFY_DATA(counter_createA_v3) +#define __itt_counter_createA_v3_ptr ITTNOTIFY_NAME(counter_createA_v3) +#define __itt_counter_createW_v3 ITTNOTIFY_DATA(counter_createW_v3) +#define __itt_counter_createW_v3_ptr ITTNOTIFY_NAME(counter_createW_v3) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_av_save ITTNOTIFY_DATA(av_save) -#define __itt_av_save_ptr ITTNOTIFY_NAME(av_save) +#define __itt_counter_create_v3 ITTNOTIFY_DATA(counter_create_v3) +#define __itt_counter_create_v3_ptr ITTNOTIFY_NAME(counter_create_v3) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_av_saveA(name) -#define __itt_av_saveA_ptr 0 -#define __itt_av_saveW(name) -#define __itt_av_saveW_ptr 0 +#define __itt_counter_createA_v3(domain, name, type) (__itt_counter)0 +#define __itt_counter_createA_v3_ptr 0 +#define __itt_counter_createW_v3(domain, name, type) (__itt_counter)0 +#define __itt_counter_create_typedW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_av_save(name) -#define __itt_av_save_ptr 0 +#define __itt_counter_create_v3(domain, name, type) (__itt_counter)0 +#define __itt_counter_create_v3_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_av_saveA_ptr 0 -#define __itt_av_saveW_ptr 0 +#define __itt_counter_createA_v3_ptr 0 +#define __itt_counter_createW_v3_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_av_save_ptr 0 +#define __itt_counter_create_v3_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ -void ITTAPI __itt_enable_attach(void); +/** + * @brief Set the counter value api + */ +void ITTAPI __itt_counter_set_value_v3(__itt_counter counter, void *value_ptr); -/** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, enable_attach, (void)) -#define __itt_enable_attach ITTNOTIFY_VOID(enable_attach) -#define __itt_enable_attach_ptr ITTNOTIFY_NAME(enable_attach) +ITT_STUBV(ITTAPI, void, counter_set_value_v3, (__itt_counter counter, void *value_ptr)) +#define __itt_counter_set_value_v3 ITTNOTIFY_VOID(counter_set_value_v3) +#define __itt_counter_set_value_v3_ptr ITTNOTIFY_NAME(counter_set_value_v3) #else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_enable_attach() -#define __itt_enable_attach_ptr 0 +#define __itt_counter_set_value_v3(counter, value_ptr) +#define __itt_counter_set_value_v3_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ -#define __itt_enable_attach_ptr 0 +#define __itt_counter_set_value_v3_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ -/** @cond exclude_from_gpa_documentation */ +/** + * @brief describes the type of context metadata +*/ +typedef enum { + __itt_context_unknown = 0, /*!< Undefined type */ + __itt_context_nameA, /*!< ASCII string char* type */ + __itt_context_nameW, /*!< Unicode string wchar_t* type */ + __itt_context_deviceA, /*!< ASCII string char* type */ + __itt_context_deviceW, /*!< Unicode string wchar_t* type */ + __itt_context_unitsA, /*!< ASCII string char* type */ + __itt_context_unitsW, /*!< Unicode string wchar_t* type */ + __itt_context_pci_addrA, /*!< ASCII string char* type */ + __itt_context_pci_addrW, /*!< Unicode string wchar_t* type */ + __itt_context_tid, /*!< Unsigned 64-bit integer type */ + __itt_context_max_val, /*!< Unsigned 64-bit integer type */ + __itt_context_bandwidth_flag, /*!< Unsigned 64-bit integer type */ + __itt_context_latency_flag, /*!< Unsigned 64-bit integer type */ + __itt_context_occupancy_flag, /*!< Unsigned 64-bit integer type */ + __itt_context_on_thread_flag, /*!< Unsigned 64-bit integer type */ + __itt_context_is_abs_val_flag, /*!< Unsigned 64-bit integer type */ + __itt_context_cpu_instructions_flag, /*!< Unsigned 64-bit integer type */ + __itt_context_cpu_cycles_flag /*!< Unsigned 64-bit integer type */ +} __itt_context_type; -/** @} arrays group */ +#if defined(UNICODE) || defined(_UNICODE) +# define __itt_context_name __itt_context_nameW +# define __itt_context_device __itt_context_deviceW +# define __itt_context_units __itt_context_unitsW +# define __itt_context_pci_addr __itt_context_pci_addrW +#else /* UNICODE || _UNICODE */ +# define __itt_context_name __itt_context_nameA +# define __itt_context_device __itt_context_deviceA +# define __itt_context_units __itt_context_unitsA +# define __itt_context_pci_addr __itt_context_pci_addrA +#endif /* UNICODE || _UNICODE */ + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_context_metadata +{ + __itt_context_type type; /*!< Type of the context metadata value */ + void* value; /*!< Pointer to context metadata value itself */ +} __itt_context_metadata; + +#pragma pack(pop) +/** @endcond */ + +/** @cond exclude_from_documentation */ +#pragma pack(push, 8) + +typedef struct ___itt_counter_metadata +{ + __itt_counter counter; /*!< Associated context metadata counter */ + __itt_context_type type; /*!< Type of the context metadata value */ + const char* str_valueA; /*!< String context metadata value */ +#if defined(UNICODE) || defined(_UNICODE) + const wchar_t* str_valueW; +#else /* UNICODE || _UNICODE */ + void* str_valueW; +#endif /* UNICODE || _UNICODE */ + unsigned long long value; /*!< Numeric context metadata value */ + int extra1; /*!< Reserved to the runtime */ + void* extra2; /*!< Reserved to the runtime */ + struct ___itt_counter_metadata* next; +} __itt_counter_metadata; +#pragma pack(pop) /** @endcond */ +/** + * @brief Bind context metadata to counter instance + * @param[in] counter Pointer to the counter instance to which the context metadata is to be associated. + * @param[in] length The number of elements in context metadata array. + * @param[in] metadata The context metadata itself. +*/ +void ITTAPI __itt_bind_context_metadata_to_counter(__itt_counter counter, size_t length, __itt_context_metadata* metadata); + +/** @cond exclude_from_documentation */ +#ifndef INTEL_NO_MACRO_BODY +#ifndef INTEL_NO_ITTNOTIFY_API +ITT_STUBV(ITTAPI, void, bind_context_metadata_to_counter, (__itt_counter counter, size_t length, __itt_context_metadata* metadata)) +#define __itt_bind_context_metadata_to_counter ITTNOTIFY_VOID(bind_context_metadata_to_counter) +#define __itt_bind_context_metadata_to_counter_ptr ITTNOTIFY_NAME(bind_context_metadata_to_counter) +#else /* INTEL_NO_ITTNOTIFY_API */ +#define __itt_bind_context_metadata_to_counter(counter, length, metadata) +#define __itt_bind_context_metadata_to_counter_ptr 0 +#endif /* INTEL_NO_ITTNOTIFY_API */ +#else /* INTEL_NO_MACRO_BODY */ +#define __itt_bind_context_metadata_to_counter_ptr 0 +#endif /* INTEL_NO_MACRO_BODY */ +/** @endcond */ #ifdef __cplusplus } @@ -3214,16 +4227,6 @@ ITT_STUBV(ITTAPI, void, enable_attach, (void)) extern "C" { #endif /* __cplusplus */ -/** - * @ingroup tasks - * @brief Begin an overlapped task instance. - * @param[in] domain The domain for this task. - * @param[in] taskid The identifier for this task instance, *cannot* be __itt_null. - * @param[in] parentid The parent of this task, or __itt_null. - * @param[in] name The name of this task. - */ -void ITTAPI __itt_task_begin_overlapped(const __itt_domain* domain, __itt_id taskid, __itt_id parentid, __itt_string_handle* name); - /** * @ingroup clockdomain * @brief Begin an overlapped task instance. @@ -3236,14 +4239,6 @@ void ITTAPI __itt_task_begin_overlapped(const __itt_domain* domain, __itt_id tas */ void ITTAPI __itt_task_begin_overlapped_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid, __itt_id parentid, __itt_string_handle* name); -/** - * @ingroup tasks - * @brief End an overlapped task instance. - * @param[in] domain The domain for this task - * @param[in] taskid Explicit ID of finished task - */ -void ITTAPI __itt_task_end_overlapped(const __itt_domain *domain, __itt_id taskid); - /** * @ingroup clockdomain * @brief End an overlapped task instance. @@ -3257,30 +4252,19 @@ void ITTAPI __itt_task_end_overlapped_ex(const __itt_domain* domain, __itt_clock /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, task_begin_overlapped, (const __itt_domain *domain, __itt_id taskid, __itt_id parentid, __itt_string_handle *name)) ITT_STUBV(ITTAPI, void, task_begin_overlapped_ex, (const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid, __itt_id parentid, __itt_string_handle* name)) -ITT_STUBV(ITTAPI, void, task_end_overlapped, (const __itt_domain *domain, __itt_id taskid)) ITT_STUBV(ITTAPI, void, task_end_overlapped_ex, (const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid)) -#define __itt_task_begin_overlapped(d,x,y,z) ITTNOTIFY_VOID_D3(task_begin_overlapped,d,x,y,z) -#define __itt_task_begin_overlapped_ptr ITTNOTIFY_NAME(task_begin_overlapped) #define __itt_task_begin_overlapped_ex(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(task_begin_overlapped_ex,d,x,y,z,a,b) #define __itt_task_begin_overlapped_ex_ptr ITTNOTIFY_NAME(task_begin_overlapped_ex) -#define __itt_task_end_overlapped(d,x) ITTNOTIFY_VOID_D1(task_end_overlapped,d,x) -#define __itt_task_end_overlapped_ptr ITTNOTIFY_NAME(task_end_overlapped) #define __itt_task_end_overlapped_ex(d,x,y,z) ITTNOTIFY_VOID_D3(task_end_overlapped_ex,d,x,y,z) #define __itt_task_end_overlapped_ex_ptr ITTNOTIFY_NAME(task_end_overlapped_ex) #else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_task_begin_overlapped(domain,taskid,parentid,name) -#define __itt_task_begin_overlapped_ptr 0 #define __itt_task_begin_overlapped_ex(domain,clock_domain,timestamp,taskid,parentid,name) #define __itt_task_begin_overlapped_ex_ptr 0 -#define __itt_task_end_overlapped(domain,taskid) -#define __itt_task_end_overlapped_ptr 0 #define __itt_task_end_overlapped_ex(domain,clock_domain,timestamp,taskid) #define __itt_task_end_overlapped_ex_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ -#define __itt_task_begin_overlapped_ptr 0 #define __itt_task_begin_overlapped_ex_ptr 0 #define __itt_task_end_overlapped_ptr 0 #define __itt_task_end_overlapped_ex_ptr 0 @@ -3540,130 +4524,7 @@ ITT_STUB(ITTAPI, int, mark_global_off, (__itt_mark_type mt)) * Counters group * @{ */ -/** - * @brief opaque structure for counter identification - */ -typedef struct ___itt_counter *__itt_counter; - -/** - * @brief Create a counter with given name/domain for the calling thread - * - * After __itt_counter_create() is called, __itt_counter_inc() / __itt_counter_inc_delta() can be used - * to increment the counter on any thread - */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -__itt_counter ITTAPI __itt_counter_createA(const char *name, const char *domain); -__itt_counter ITTAPI __itt_counter_createW(const wchar_t *name, const wchar_t *domain); -#if defined(UNICODE) || defined(_UNICODE) -# define __itt_counter_create __itt_counter_createW -# define __itt_counter_create_ptr __itt_counter_createW_ptr -#else /* UNICODE */ -# define __itt_counter_create __itt_counter_createA -# define __itt_counter_create_ptr __itt_counter_createA_ptr -#endif /* UNICODE */ -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -__itt_counter ITTAPI __itt_counter_create(const char *name, const char *domain); -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(ITTAPI, __itt_counter, counter_createA, (const char *name, const char *domain)) -ITT_STUB(ITTAPI, __itt_counter, counter_createW, (const wchar_t *name, const wchar_t *domain)) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUB(ITTAPI, __itt_counter, counter_create, (const char *name, const char *domain)) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_counter_createA ITTNOTIFY_DATA(counter_createA) -#define __itt_counter_createA_ptr ITTNOTIFY_NAME(counter_createA) -#define __itt_counter_createW ITTNOTIFY_DATA(counter_createW) -#define __itt_counter_createW_ptr ITTNOTIFY_NAME(counter_createW) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_counter_create ITTNOTIFY_DATA(counter_create) -#define __itt_counter_create_ptr ITTNOTIFY_NAME(counter_create) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#else /* INTEL_NO_ITTNOTIFY_API */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_counter_createA(name, domain) -#define __itt_counter_createA_ptr 0 -#define __itt_counter_createW(name, domain) -#define __itt_counter_createW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_counter_create(name, domain) -#define __itt_counter_create_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_counter_createA_ptr 0 -#define __itt_counter_createW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_counter_create_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Destroy the counter identified by the pointer previously returned by __itt_counter_create() - */ -void ITTAPI __itt_counter_destroy(__itt_counter id); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, counter_destroy, (__itt_counter id)) -#define __itt_counter_destroy ITTNOTIFY_VOID(counter_destroy) -#define __itt_counter_destroy_ptr ITTNOTIFY_NAME(counter_destroy) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_counter_destroy(id) -#define __itt_counter_destroy_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_counter_destroy_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Increment the counter value - */ -void ITTAPI __itt_counter_inc(__itt_counter id); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, counter_inc, (__itt_counter id)) -#define __itt_counter_inc ITTNOTIFY_VOID(counter_inc) -#define __itt_counter_inc_ptr ITTNOTIFY_NAME(counter_inc) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_counter_inc(id) -#define __itt_counter_inc_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_counter_inc_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Increment the counter value with x - */ -void ITTAPI __itt_counter_inc_delta(__itt_counter id, unsigned long long value); -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, counter_inc_delta, (__itt_counter id, unsigned long long value)) -#define __itt_counter_inc_delta ITTNOTIFY_VOID(counter_inc_delta) -#define __itt_counter_inc_delta_ptr ITTNOTIFY_NAME(counter_inc_delta) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_counter_inc_delta(id, value) -#define __itt_counter_inc_delta_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_counter_inc_delta_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ -/** @} counters group */ /** * @defgroup stitch Stack Stitching @@ -3698,7 +4559,7 @@ ITT_STUB(ITTAPI, __itt_caller, stack_caller_create, (void)) /** @endcond */ /** - * @brief Destroy the inforamtion about stitch point identified by the pointer previously returned by __itt_stack_caller_create() + * @brief Destroy the information about stitch point identified by the pointer previously returned by __itt_stack_caller_create() */ void ITTAPI __itt_stack_caller_destroy(__itt_caller id); @@ -3769,7 +4630,7 @@ typedef enum __itt_error_code { __itt_error_success = 0, /*!< no error */ __itt_error_no_module = 1, /*!< module can't be loaded */ - /* %1$s -- library name; win: %2$d -- system error code; unx: %2$s -- system error message. */ + /* %1$s -- library name; win: %2$d -- system error code; unix: %2$s -- system error message. */ __itt_error_no_symbol = 2, /*!< symbol not found */ /* %1$s -- library name, %2$s -- symbol name. */ __itt_error_unknown_group = 3, /*!< unknown group specified */ diff --git a/src/tbb/src/tbb/tools_api/ittnotify_config.h b/src/tbb/src/tbb/tools_api/ittnotify_config.h index 8ddbdd005..001d42e0e 100644 --- a/src/tbb/src/tbb/tools_api/ittnotify_config.h +++ b/src/tbb/src/tbb/tools_api/ittnotify_config.h @@ -1,21 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef _ITTNOTIFY_CONFIG_H_ @@ -34,11 +30,23 @@ # define ITT_OS_MAC 3 #endif /* ITT_OS_MAC */ +#ifndef ITT_OS_FREEBSD +# define ITT_OS_FREEBSD 4 +#endif /* ITT_OS_FREEBSD */ + +#ifndef ITT_OS_OPENBSD +# define ITT_OS_OPENBSD 5 +#endif /* ITT_OS_OPENBSD */ + #ifndef ITT_OS # if defined WIN32 || defined _WIN32 # define ITT_OS ITT_OS_WIN # elif defined( __APPLE__ ) && defined( __MACH__ ) # define ITT_OS ITT_OS_MAC +# elif defined( __FreeBSD__ ) +# define ITT_OS ITT_OS_FREEBSD +# elif defined( __OpenBSD__ ) +# define ITT_OS ITT_OS_OPENBSD # else # define ITT_OS ITT_OS_LINUX # endif @@ -56,11 +64,23 @@ # define ITT_PLATFORM_MAC 3 #endif /* ITT_PLATFORM_MAC */ +#ifndef ITT_PLATFORM_FREEBSD +# define ITT_PLATFORM_FREEBSD 4 +#endif /* ITT_PLATFORM_FREEBSD */ + +#ifndef ITT_PLATFORM_OPENBSD +# define ITT_PLATFORM_OPENBSD 5 +#endif /* ITT_PLATFORM_OPENBSD */ + #ifndef ITT_PLATFORM # if ITT_OS==ITT_OS_WIN # define ITT_PLATFORM ITT_PLATFORM_WIN # elif ITT_OS==ITT_OS_MAC # define ITT_PLATFORM ITT_PLATFORM_MAC +# elif ITT_OS==ITT_OS_FREEBSD +# define ITT_PLATFORM ITT_PLATFORM_FREEBSD +# elif ITT_OS==ITT_OS_OPENBSD +# define ITT_PLATFORM ITT_PLATFORM_OPENBSD # else # define ITT_PLATFORM ITT_PLATFORM_POSIX # endif @@ -80,40 +100,45 @@ #endif /* UNICODE || _UNICODE */ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#ifndef CDECL +#ifndef ITTAPI_CDECL # if ITT_PLATFORM==ITT_PLATFORM_WIN -# define CDECL __cdecl +# define ITTAPI_CDECL __cdecl # else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -# if defined _M_IX86 || defined __i386__ -# define CDECL __attribute__ ((cdecl)) +# if defined _M_IX86 || defined __i386__ +# define ITTAPI_CDECL __attribute__ ((cdecl)) # else /* _M_IX86 || __i386__ */ -# define CDECL /* actual only on x86 platform */ +# define ITTAPI_CDECL /* actual only on x86 platform */ # endif /* _M_IX86 || __i386__ */ # endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* CDECL */ +#endif /* ITTAPI_CDECL */ #ifndef STDCALL # if ITT_PLATFORM==ITT_PLATFORM_WIN # define STDCALL __stdcall # else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ # if defined _M_IX86 || defined __i386__ -# define STDCALL __attribute__ ((stdcall)) +# define STDCALL __attribute__ ((stdcall)) # else /* _M_IX86 || __i386__ */ # define STDCALL /* supported only on x86 platform */ # endif /* _M_IX86 || __i386__ */ # endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* STDCALL */ -#define ITTAPI CDECL -#define LIBITTAPI CDECL +#define ITTAPI ITTAPI_CDECL +#define LIBITTAPI ITTAPI_CDECL /* TODO: Temporary for compatibility! */ -#define ITTAPI_CALL CDECL -#define LIBITTAPI_CALL CDECL +#define ITTAPI_CALL ITTAPI_CDECL +#define LIBITTAPI_CALL ITTAPI_CDECL #if ITT_PLATFORM==ITT_PLATFORM_WIN /* use __forceinline (VC++ specific) */ -#define ITT_INLINE __forceinline +#if defined(__MINGW32__) && !defined(__cplusplus) +#define ITT_INLINE static __inline__ __attribute__((__always_inline__,__gnu_inline__)) +#else +#define ITT_INLINE static __forceinline +#endif /* __MINGW32__ */ + #define ITT_INLINE_ATTRIBUTE /* nothing */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /* @@ -122,11 +147,12 @@ * if no optimization level was specified. */ #ifdef __STRICT_ANSI__ -#define ITT_INLINE static inline +#define ITT_INLINE static +#define ITT_INLINE_ATTRIBUTE __attribute__((unused)) #else /* __STRICT_ANSI__ */ #define ITT_INLINE static inline +#define ITT_INLINE_ATTRIBUTE __attribute__((always_inline, unused)) #endif /* __STRICT_ANSI__ */ -#define ITT_INLINE_ATTRIBUTE __attribute__ ((always_inline, unused)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /** @endcond */ @@ -138,10 +164,38 @@ # define ITT_ARCH_IA32E 2 #endif /* ITT_ARCH_IA32E */ +#ifndef ITT_ARCH_IA64 +# define ITT_ARCH_IA64 3 +#endif /* ITT_ARCH_IA64 */ + #ifndef ITT_ARCH_ARM # define ITT_ARCH_ARM 4 #endif /* ITT_ARCH_ARM */ +#ifndef ITT_ARCH_PPC64 +# define ITT_ARCH_PPC64 5 +#endif /* ITT_ARCH_PPC64 */ + +#ifndef ITT_ARCH_ARM64 +# define ITT_ARCH_ARM64 6 +#endif /* ITT_ARCH_ARM64 */ + +#ifndef ITT_ARCH_LOONGARCH64 +# define ITT_ARCH_LOONGARCH64 7 +#endif /* ITT_ARCH_LOONGARCH64 */ + +#ifndef ITT_ARCH_S390X +# define ITT_ARCH_S390X 8 +#endif /* ITT_ARCH_S390X */ + +#ifndef ITT_ARCH_HPPA +# define ITT_ARCH_HPPA 9 +#endif /* ITT_ARCH_HPPA */ + +#ifndef ITT_ARCH_RISCV64 +# define ITT_ARCH_RISCV64 10 +#endif /* ITT_ARCH_RISCV64 */ + #ifndef ITT_ARCH # if defined _M_IX86 || defined __i386__ # define ITT_ARCH ITT_ARCH_IA32 @@ -149,15 +203,32 @@ # define ITT_ARCH ITT_ARCH_IA32E # elif defined _M_IA64 || defined __ia64__ # define ITT_ARCH ITT_ARCH_IA64 -# elif defined _M_ARM || __arm__ +# elif defined _M_ARM || defined __arm__ # define ITT_ARCH ITT_ARCH_ARM +# elif defined __aarch64__ +# define ITT_ARCH ITT_ARCH_ARM64 +# elif defined __powerpc64__ +# define ITT_ARCH ITT_ARCH_PPC64 +# elif defined __loongarch__ +# define ITT_ARCH ITT_ARCH_LOONGARCH64 +# elif defined __s390__ || defined __s390x__ +# define ITT_ARCH ITT_ARCH_S390X +# elif defined __hppa__ +# define ITT_ARCH ITT_ARCH_HPPA +# elif defined __riscv && __riscv_xlen == 64 +# define ITT_ARCH ITT_ARCH_RISCV64 # endif + #endif #ifdef __cplusplus # define ITT_EXTERN_C extern "C" +# define ITT_EXTERN_C_BEGIN extern "C" { +# define ITT_EXTERN_C_END } #else # define ITT_EXTERN_C /* nothing */ +# define ITT_EXTERN_C_BEGIN /* nothing */ +# define ITT_EXTERN_C_END /* nothing */ #endif /* __cplusplus */ #define ITT_TO_STR_AUX(x) #x @@ -173,10 +244,10 @@ #define ITT_MAGIC { 0xED, 0xAB, 0xAB, 0xEC, 0x0D, 0xEE, 0xDA, 0x30 } /* Replace with snapshot date YYYYMMDD for promotion build. */ -#define API_VERSION_BUILD 20111111 +#define API_VERSION_BUILD 20230630 #ifndef API_VERSION_NUM -#define API_VERSION_NUM 0.0.0 +#define API_VERSION_NUM 3.24.4 #endif /* API_VERSION_NUM */ #define API_VERSION "ITT-API-Version " ITT_TO_STR(API_VERSION_NUM) \ @@ -188,7 +259,11 @@ typedef HMODULE lib_t; typedef DWORD TIDT; typedef CRITICAL_SECTION mutex_t; +#ifdef __cplusplus +#define MUTEX_INITIALIZER {} +#else #define MUTEX_INITIALIZER { 0 } +#endif #define strong_alias(name, aliasname) /* empty for Windows */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #include <dlfcn.h> @@ -216,13 +291,13 @@ typedef pthread_mutex_t mutex_t; #define __itt_mutex_init(mutex) InitializeCriticalSection(mutex) #define __itt_mutex_lock(mutex) EnterCriticalSection(mutex) #define __itt_mutex_unlock(mutex) LeaveCriticalSection(mutex) +#define __itt_mutex_destroy(mutex) DeleteCriticalSection(mutex) #define __itt_load_lib(name) LoadLibraryA(name) #define __itt_unload_lib(handle) FreeLibrary(handle) #define __itt_system_error() (int)GetLastError() #define __itt_fstrcmp(s1, s2) lstrcmpA(s1, s2) -#define __itt_fstrlen(s) lstrlenA(s) -#define __itt_fstrcpyn(s1, s2, l) lstrcpynA(s1, s2, l) -#define __itt_fstrdup(s) _strdup(s) +#define __itt_fstrnlen(s, l) strnlen_s(s, l) +#define __itt_fstrcpyn(s1, b, s2, l) strncpy_s(s1, b, s2, l) #define __itt_thread_id() GetCurrentThreadId() #define __itt_thread_yield() SwitchToThread() #ifndef ITT_SIMPLE_INIT @@ -232,7 +307,18 @@ ITT_INLINE long __itt_interlocked_increment(volatile long* ptr) { return InterlockedIncrement(ptr); } +ITT_INLINE long +__itt_interlocked_compare_exchange(volatile long* ptr, long exchange, long comperand) ITT_INLINE_ATTRIBUTE; +ITT_INLINE long +__itt_interlocked_compare_exchange(volatile long* ptr, long exchange, long comperand) +{ + return InterlockedCompareExchange(ptr, exchange, comperand); +} #endif /* ITT_SIMPLE_INIT */ + +#define DL_SYMBOLS (1) +#define PTHREAD_SYMBOLS (1) + #else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ #define __itt_get_proc(lib, name) dlsym(lib, name) #define __itt_mutex_init(mutex) {\ @@ -257,20 +343,40 @@ ITT_INLINE long __itt_interlocked_increment(volatile long* ptr) } #define __itt_mutex_lock(mutex) pthread_mutex_lock(mutex) #define __itt_mutex_unlock(mutex) pthread_mutex_unlock(mutex) +#define __itt_mutex_destroy(mutex) pthread_mutex_destroy(mutex) #define __itt_load_lib(name) dlopen(name, RTLD_LAZY) #define __itt_unload_lib(handle) dlclose(handle) #define __itt_system_error() errno #define __itt_fstrcmp(s1, s2) strcmp(s1, s2) -#define __itt_fstrlen(s) strlen(s) -#define __itt_fstrcpyn(s1, s2, l) strncpy(s1, s2, l) -#define __itt_fstrdup(s) strdup(s) + +/* makes customer code define safe APIs for SDL_STRNLEN_S and SDL_STRNCPY_S */ +#ifdef SDL_STRNLEN_S +#define __itt_fstrnlen(s, l) SDL_STRNLEN_S(s, l) +#else +#define __itt_fstrnlen(s, l) strlen(s) +#endif /* SDL_STRNLEN_S */ +#ifdef SDL_STRNCPY_S +#define __itt_fstrcpyn(s1, b, s2, l) SDL_STRNCPY_S(s1, b, s2, l) +#else +#define __itt_fstrcpyn(s1, b, s2, l) { \ + if (b > 0) { \ + /* 'volatile' is used to suppress the warning that a destination */ \ + /* bound depends on the length of the source. */ \ + volatile size_t num_to_copy = (size_t)(b - 1) < (size_t)(l) ? \ + (size_t)(b - 1) : (size_t)(l); \ + strncpy(s1, s2, num_to_copy); \ + s1[num_to_copy] = 0; \ + } \ +} +#endif /* SDL_STRNCPY_S */ + #define __itt_thread_id() pthread_self() #define __itt_thread_yield() sched_yield() #if ITT_ARCH==ITT_ARCH_IA64 #ifdef __INTEL_COMPILER #define __TBB_machine_fetchadd4(addr, val) __fetchadd4_acq((void *)addr, val) #else /* __INTEL_COMPILER */ -/* TODO: Add Support for not Intel compilers for IA-64 architecture */ +#define __TBB_machine_fetchadd4(addr, val) __sync_fetch_and_add(addr, val) #endif /* __INTEL_COMPILER */ #elif ITT_ARCH==ITT_ARCH_IA32 || ITT_ARCH==ITT_ARCH_IA32E /* ITT_ARCH!=ITT_ARCH_IA64 */ ITT_INLINE long @@ -279,12 +385,12 @@ ITT_INLINE long __TBB_machine_fetchadd4(volatile void* ptr, long addend) { long result; __asm__ __volatile__("lock\nxadd %0,%1" - : "=r"(result),"=m"(*(int*)ptr) - : "0"(addend), "m"(*(int*)ptr) + : "=r"(result),"=m"(*(volatile int*)ptr) + : "0"(addend), "m"(*(volatile int*)ptr) : "memory"); return result; } -#elif ITT_ARCH==ITT_ARCH_ARM +#else #define __TBB_machine_fetchadd4(addr, val) __sync_fetch_and_add(addr, val) #endif /* ITT_ARCH==ITT_ARCH_IA64 */ #ifndef ITT_SIMPLE_INIT @@ -294,13 +400,46 @@ ITT_INLINE long __itt_interlocked_increment(volatile long* ptr) { return __TBB_machine_fetchadd4(ptr, 1) + 1L; } +ITT_INLINE long +__itt_interlocked_compare_exchange(volatile long* ptr, long exchange, long comperand) ITT_INLINE_ATTRIBUTE; +ITT_INLINE long +__itt_interlocked_compare_exchange(volatile long* ptr, long exchange, long comperand) +{ + return __sync_val_compare_and_swap(ptr, exchange, comperand); +} #endif /* ITT_SIMPLE_INIT */ + +void* dlopen(const char*, int) __attribute__((weak)); +void* dlsym(void*, const char*) __attribute__((weak)); +int dlclose(void*) __attribute__((weak)); +#define DL_SYMBOLS (dlopen && dlsym && dlclose) + +int pthread_mutex_init(pthread_mutex_t*, const pthread_mutexattr_t*) __attribute__((weak)); +int pthread_mutex_lock(pthread_mutex_t*) __attribute__((weak)); +int pthread_mutex_unlock(pthread_mutex_t*) __attribute__((weak)); +int pthread_mutex_destroy(pthread_mutex_t*) __attribute__((weak)); +int pthread_mutexattr_init(pthread_mutexattr_t*) __attribute__((weak)); +int pthread_mutexattr_settype(pthread_mutexattr_t*, int) __attribute__((weak)); +int pthread_mutexattr_destroy(pthread_mutexattr_t*) __attribute__((weak)); +pthread_t pthread_self(void) __attribute__((weak)); +#define PTHREAD_SYMBOLS (pthread_mutex_init && pthread_mutex_lock && pthread_mutex_unlock && pthread_mutex_destroy && pthread_mutexattr_init && pthread_mutexattr_settype && pthread_mutexattr_destroy && pthread_self) + #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -typedef enum { - __itt_collection_normal = 0, - __itt_collection_paused = 1 -} __itt_collection_state; +/* strdup() is not included into C99 which results in a compiler warning about + * implicitly declared symbol. To avoid the issue strdup is implemented + * manually. + */ +#define ITT_STRDUP_MAX_STRING_SIZE 4096 +#define __itt_fstrdup(s, new_s) do { \ + if (s != NULL) { \ + size_t s_len = __itt_fstrnlen(s, ITT_STRDUP_MAX_STRING_SIZE); \ + new_s = (char *)malloc(s_len + 1); \ + if (new_s != NULL) { \ + __itt_fstrcpyn(new_s, s_len + 1, s, s_len); \ + } \ + } \ +} while(0) typedef enum { __itt_thread_normal = 0, @@ -343,8 +482,33 @@ typedef struct ___itt_api_info __itt_group_id group; } __itt_api_info; +typedef struct __itt_counter_info +{ + const char* nameA; /*!< Copy of original name in ASCII. */ +#if defined(UNICODE) || defined(_UNICODE) + const wchar_t* nameW; /*!< Copy of original name in UNICODE. */ +#else /* UNICODE || _UNICODE */ + void* nameW; +#endif /* UNICODE || _UNICODE */ + const char* domainA; /*!< Copy of original name in ASCII. */ +#if defined(UNICODE) || defined(_UNICODE) + const wchar_t* domainW; /*!< Copy of original name in UNICODE. */ +#else /* UNICODE || _UNICODE */ + void* domainW; +#endif /* UNICODE || _UNICODE */ + int type; + long index; + int extra1; /*!< Reserved to the runtime */ + void* extra2; /*!< Reserved to the runtime */ + struct __itt_counter_info* next; +} __itt_counter_info_t; + struct ___itt_domain; struct ___itt_string_handle; +struct ___itt_histogram; +struct ___itt_counter_metadata; + +#include "ittnotify.h" typedef struct ___itt_global { @@ -366,6 +530,10 @@ typedef struct ___itt_global struct ___itt_domain* domain_list; struct ___itt_string_handle* string_list; __itt_collection_state state; + __itt_counter_info_t* counter_list; + unsigned int ipt_collect_events; + struct ___itt_histogram* histogram_list; + struct ___itt_counter_metadata* counter_metadata_list; } __itt_global; #pragma pack(pop) @@ -391,7 +559,9 @@ typedef struct ___itt_global h = (__itt_thread_info*)malloc(sizeof(__itt_thread_info)); \ if (h != NULL) { \ h->tid = t; \ - h->nameA = n ? __itt_fstrdup(n) : NULL; \ + char *n_copy = NULL; \ + __itt_fstrdup(n, n_copy); \ + h->nameA = n_copy; \ h->nameW = NULL; \ h->state = s; \ h->extra1 = 0; /* reserved */ \ @@ -407,7 +577,7 @@ typedef struct ___itt_global #define NEW_DOMAIN_W(gptr,h,h_tail,name) { \ h = (__itt_domain*)malloc(sizeof(__itt_domain)); \ if (h != NULL) { \ - h->flags = 0; /* domain is disabled by default */ \ + h->flags = 1; /* domain is enabled by default */ \ h->nameA = NULL; \ h->nameW = name ? _wcsdup(name) : NULL; \ h->extra1 = 0; /* reserved */ \ @@ -423,8 +593,10 @@ typedef struct ___itt_global #define NEW_DOMAIN_A(gptr,h,h_tail,name) { \ h = (__itt_domain*)malloc(sizeof(__itt_domain)); \ if (h != NULL) { \ - h->flags = 0; /* domain is disabled by default */ \ - h->nameA = name ? __itt_fstrdup(name) : NULL; \ + h->flags = 1; /* domain is enabled by default */ \ + char *name_copy = NULL; \ + __itt_fstrdup(name, name_copy); \ + h->nameA = name_copy; \ h->nameW = NULL; \ h->extra1 = 0; /* reserved */ \ h->extra2 = NULL; /* reserved */ \ @@ -454,7 +626,9 @@ typedef struct ___itt_global #define NEW_STRING_HANDLE_A(gptr,h,h_tail,name) { \ h = (__itt_string_handle*)malloc(sizeof(__itt_string_handle)); \ if (h != NULL) { \ - h->strA = name ? __itt_fstrdup(name) : NULL; \ + char *name_copy = NULL; \ + __itt_fstrdup(name, name_copy); \ + h->strA = name_copy; \ h->strW = NULL; \ h->extra1 = 0; /* reserved */ \ h->extra2 = NULL; /* reserved */ \ @@ -466,4 +640,136 @@ typedef struct ___itt_global } \ } +#define NEW_COUNTER_W(gptr,h,h_tail,name,domain,type) { \ + h = (__itt_counter_info_t*)malloc(sizeof(__itt_counter_info_t)); \ + if (h != NULL) { \ + h->nameA = NULL; \ + h->nameW = name ? _wcsdup(name) : NULL; \ + h->domainA = NULL; \ + h->domainW = domain ? _wcsdup(domain) : NULL; \ + h->type = type; \ + h->index = 0; \ + h->next = NULL; \ + if (h_tail == NULL) \ + (gptr)->counter_list = h; \ + else \ + h_tail->next = h; \ + } \ +} + +#define NEW_COUNTER_A(gptr,h,h_tail,name,domain,type) { \ + h = (__itt_counter_info_t*)malloc(sizeof(__itt_counter_info_t)); \ + if (h != NULL) { \ + char *name_copy = NULL; \ + __itt_fstrdup(name, name_copy); \ + h->nameA = name_copy; \ + h->nameW = NULL; \ + char *domain_copy = NULL; \ + __itt_fstrdup(domain, domain_copy); \ + h->domainA = domain_copy; \ + h->domainW = NULL; \ + h->type = type; \ + h->index = 0; \ + h->next = NULL; \ + if (h_tail == NULL) \ + (gptr)->counter_list = h; \ + else \ + h_tail->next = h; \ + } \ +} + +#define NEW_HISTOGRAM_W(gptr,h,h_tail,domain,name,x_type,y_type) { \ + h = (__itt_histogram*)malloc(sizeof(__itt_histogram)); \ + if (h != NULL) { \ + h->domain = domain; \ + h->nameA = NULL; \ + h->nameW = name ? _wcsdup(name) : NULL; \ + h->x_type = x_type; \ + h->y_type = y_type; \ + h->extra1 = 0; \ + h->extra2 = NULL; \ + h->next = NULL; \ + if (h_tail == NULL) \ + (gptr)->histogram_list = h; \ + else \ + h_tail->next = h; \ + } \ +} + +#define NEW_HISTOGRAM_A(gptr,h,h_tail,domain,name,x_type,y_type) { \ + h = (__itt_histogram*)malloc(sizeof(__itt_histogram)); \ + if (h != NULL) { \ + h->domain = domain; \ + char *name_copy = NULL; \ + __itt_fstrdup(name, name_copy); \ + h->nameA = name_copy; \ + h->nameW = NULL; \ + h->x_type = x_type; \ + h->y_type = y_type; \ + h->extra1 = 0; \ + h->extra2 = NULL; \ + h->next = NULL; \ + if (h_tail == NULL) \ + (gptr)->histogram_list = h; \ + else \ + h_tail->next = h; \ + } \ +} + +#define NEW_COUNTER_METADATA_NUM(gptr,h,h_tail,counter,type,value) { \ + h = (__itt_counter_metadata*)malloc(sizeof(__itt_counter_metadata)); \ + if (h != NULL) { \ + h->counter = counter; \ + h->type = type; \ + h->str_valueA = NULL; \ + h->str_valueW = NULL; \ + h->value = value; \ + h->extra1 = 0; \ + h->extra2 = NULL; \ + h->next = NULL; \ + if (h_tail == NULL) \ + (gptr)->counter_metadata_list = h; \ + else \ + h_tail->next = h; \ + } \ +} + +#define NEW_COUNTER_METADATA_STR_A(gptr,h,h_tail,counter,type,str_valueA) { \ + h = (__itt_counter_metadata*)malloc(sizeof(__itt_counter_metadata)); \ + if (h != NULL) { \ + h->counter = counter; \ + h->type = type; \ + char *str_value_copy = NULL; \ + __itt_fstrdup(str_valueA, str_value_copy); \ + h->str_valueA = str_value_copy; \ + h->str_valueW = NULL; \ + h->value = 0; \ + h->extra1 = 0; \ + h->extra2 = NULL; \ + h->next = NULL; \ + if (h_tail == NULL) \ + (gptr)->counter_metadata_list = h; \ + else \ + h_tail->next = h; \ + } \ +} + +#define NEW_COUNTER_METADATA_STR_W(gptr,h,h_tail,counter,type,str_valueW) { \ + h = (__itt_counter_metadata*)malloc(sizeof(__itt_counter_metadata)); \ + if (h != NULL) { \ + h->counter = counter; \ + h->type = type; \ + h->str_valueA = NULL; \ + h->str_valueW = str_valueW ? _wcsdup(str_valueW) : NULL; \ + h->value = 0; \ + h->extra1 = 0; \ + h->extra2 = NULL; \ + h->next = NULL; \ + if (h_tail == NULL) \ + (gptr)->counter_metadata_list = h; \ + else \ + h_tail->next = h; \ + } \ +} + #endif /* _ITTNOTIFY_CONFIG_H_ */ diff --git a/src/tbb/src/tbb/tools_api/ittnotify_static.c b/src/tbb/src/tbb/tools_api/ittnotify_static.c index fd9d6849a..c3a53bf0a 100644 --- a/src/tbb/src/tbb/tools_api/ittnotify_static.c +++ b/src/tbb/src/tbb/tools_api/ittnotify_static.c @@ -1,27 +1,27 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ +#define INTEL_NO_MACRO_BODY +#define INTEL_ITTNOTIFY_API_PRIVATE #include "ittnotify_config.h" #if ITT_PLATFORM==ITT_PLATFORM_WIN +#if !defined(PATH_MAX) #define PATH_MAX 512 +#endif #else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ #include <limits.h> #include <dlfcn.h> @@ -32,20 +32,56 @@ #include <stdarg.h> #include <string.h> -#define INTEL_NO_MACRO_BODY -#define INTEL_ITTNOTIFY_API_PRIVATE #include "ittnotify.h" #include "legacy/ittnotify.h" #include "disable_warnings.h" -static const char api_version[] = API_VERSION "\0\n@(#) $Revision: 336044 $\n"; +static const char api_version[] = API_VERSION "\0\n@(#) $Revision$\n"; #define _N_(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n) +#ifndef HAS_CPP_ATTR +#if defined(__cplusplus) && defined(__has_cpp_attribute) +#define HAS_CPP_ATTR(X) __has_cpp_attribute(X) +#else +#define HAS_CPP_ATTR(X) 0 +#endif +#endif + +#ifndef HAS_C_ATTR +#if defined(__STDC__) && defined(__has_c_attribute) +#define HAS_C_ATTR(X) __has_c_attribute(X) +#else +#define HAS_C_ATTR(X) 0 +#endif +#endif + +#ifndef HAS_GNU_ATTR +#if defined(__has_attribute) +#define HAS_GNU_ATTR(X) __has_attribute(X) +#else +#define HAS_GNU_ATTR(X) 0 +#endif +#endif + +#ifndef ITT_ATTRIBUTE_FALLTHROUGH +#if (HAS_CPP_ATTR(fallthrough) || HAS_C_ATTR(fallthrough)) && (__cplusplus >= 201703L || _MSVC_LANG >= 201703L) +#define ITT_ATTRIBUTE_FALLTHROUGH [[fallthrough]] +#elif HAS_CPP_ATTR(gnu::fallthrough) +#define ITT_ATTRIBUTE_FALLTHROUGH [[gnu::fallthrough]] +#elif HAS_CPP_ATTR(clang::fallthrough) +#define ITT_ATTRIBUTE_FALLTHROUGH [[clang::fallthrough]] +#elif HAS_GNU_ATTR(fallthrough) && !__INTEL_COMPILER +#define ITT_ATTRIBUTE_FALLTHROUGH __attribute__((fallthrough)) +#else +#define ITT_ATTRIBUTE_FALLTHROUGH +#endif +#endif + #if ITT_OS==ITT_OS_WIN static const char* ittnotify_lib_name = "libittnotify.dll"; -#elif ITT_OS==ITT_OS_LINUX +#elif ITT_OS==ITT_OS_LINUX || ITT_OS==ITT_OS_FREEBSD|| ITT_OS==ITT_OS_OPENBSD static const char* ittnotify_lib_name = "libittnotify.so"; #elif ITT_OS==ITT_OS_MAC static const char* ittnotify_lib_name = "libittnotify.dylib"; @@ -76,7 +112,15 @@ static const char* ittnotify_lib_name = "libittnotify.dylib"; #endif /* default location of userapi collector on Android */ -#define ANDROID_ITTNOTIFY_DEFAULT_PATH "/data/data/com.intel.vtune/intel/libittnotify.so" +#define ANDROID_ITTNOTIFY_DEFAULT_PATH_MASK(x) "/data/data/com.intel.vtune/perfrun/lib" \ + #x "/runtime/libittnotify.so" + +#if ITT_ARCH==ITT_ARCH_IA32 || ITT_ARCH==ITT_ARCH_ARM +#define ANDROID_ITTNOTIFY_DEFAULT_PATH ANDROID_ITTNOTIFY_DEFAULT_PATH_MASK(32) +#else +#define ANDROID_ITTNOTIFY_DEFAULT_PATH ANDROID_ITTNOTIFY_DEFAULT_PATH_MASK(64) +#endif + #endif @@ -89,9 +133,11 @@ static const char* ittnotify_lib_name = "libittnotify.dylib"; #endif /* LIB_VAR_NAME */ #define ITT_MUTEX_INIT_AND_LOCK(p) { \ + if (PTHREAD_SYMBOLS) \ + { \ if (!p.mutex_initialized) \ { \ - if (__itt_interlocked_increment(&p.atomic_counter) == 1) \ + if (__itt_interlocked_compare_exchange(&p.atomic_counter, 1, 0) == 0) \ { \ __itt_mutex_init(&p.mutex); \ p.mutex_initialized = 1; \ @@ -101,9 +147,24 @@ static const char* ittnotify_lib_name = "libittnotify.dylib"; __itt_thread_yield(); \ } \ __itt_mutex_lock(&p.mutex); \ + } \ } -const int _N_(err) = 0; +#define ITT_MUTEX_DESTROY(p) { \ + if (PTHREAD_SYMBOLS) \ + { \ + if (p.mutex_initialized) \ + { \ + if (__itt_interlocked_compare_exchange(&p.atomic_counter, 0, 1) == 1) \ + { \ + __itt_mutex_destroy(&p.mutex); \ + p.mutex_initialized = 0; \ + } \ + } \ + } \ +} + +#define ITT_MODULE_OBJECT_VERSION 1 typedef int (__itt_init_ittlib_t)(const char*, __itt_group_id); @@ -123,16 +184,19 @@ static __itt_fini_ittlib_t* __itt_fini_ittlib_ptr = _N_(fini_ittlib); #define __itt_fini_ittlib_name __itt_fini_ittlib_ptr #endif /* __itt_fini_ittlib_name */ +extern __itt_global _N_(_ittapi_global); + /* building pointers to imported funcs */ #undef ITT_STUBV #undef ITT_STUB #define ITT_STUB(api,type,name,args,params,ptr,group,format) \ static type api ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)) args;\ typedef type api ITT_JOIN(_N_(name),_t) args; \ -ITT_EXTERN_C { ITT_JOIN(_N_(name),_t)* ITTNOTIFY_NAME(name) = ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)); } \ +ITT_EXTERN_C_BEGIN ITT_JOIN(_N_(name),_t)* ITTNOTIFY_NAME(name) = ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)); ITT_EXTERN_C_END \ static type api ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)) args \ { \ - __itt_init_ittlib_name(NULL, __itt_group_all); \ + if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list == NULL) \ + __itt_init_ittlib_name(NULL, __itt_group_all); \ if (ITTNOTIFY_NAME(name) && ITTNOTIFY_NAME(name) != ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init))) \ return ITTNOTIFY_NAME(name) params; \ else \ @@ -142,10 +206,11 @@ static type api ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)) args \ #define ITT_STUBV(api,type,name,args,params,ptr,group,format) \ static type api ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)) args;\ typedef type api ITT_JOIN(_N_(name),_t) args; \ -ITT_EXTERN_C { ITT_JOIN(_N_(name),_t)* ITTNOTIFY_NAME(name) = ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)); } \ +ITT_EXTERN_C_BEGIN ITT_JOIN(_N_(name),_t)* ITTNOTIFY_NAME(name) = ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)); ITT_EXTERN_C_END \ static type api ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)) args \ { \ - __itt_init_ittlib_name(NULL, __itt_group_all); \ + if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list == NULL) \ + __itt_init_ittlib_name(NULL, __itt_group_all); \ if (ITTNOTIFY_NAME(name) && ITTNOTIFY_NAME(name) != ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init))) \ ITTNOTIFY_NAME(name) params; \ else \ @@ -160,12 +225,12 @@ static type api ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)) args \ #define ITT_STUB(api,type,name,args,params,ptr,group,format) \ static type api ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)) args;\ typedef type api ITT_JOIN(_N_(name),_t) args; \ -ITT_EXTERN_C { ITT_JOIN(_N_(name),_t)* ITTNOTIFY_NAME(name) = ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)); } +ITT_EXTERN_C_BEGIN ITT_JOIN(_N_(name),_t)* ITTNOTIFY_NAME(name) = ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)); ITT_EXTERN_C_END #define ITT_STUBV(api,type,name,args,params,ptr,group,format) \ static type api ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)) args;\ typedef type api ITT_JOIN(_N_(name),_t) args; \ -ITT_EXTERN_C { ITT_JOIN(_N_(name),_t)* ITTNOTIFY_NAME(name) = ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)); } +ITT_EXTERN_C_BEGIN ITT_JOIN(_N_(name),_t)* ITTNOTIFY_NAME(name) = ITT_VERSIONIZE(ITT_JOIN(_N_(name),_init)); ITT_EXTERN_C_END #define __ITT_INTERNAL_INIT #include "ittnotify_static.h" @@ -191,8 +256,10 @@ static __itt_group_alias group_alias[] = { #pragma pack(pop) #if ITT_PLATFORM==ITT_PLATFORM_WIN +#if _MSC_VER #pragma warning(push) #pragma warning(disable: 4054) /* warning C4054: 'type cast' : from function pointer 'XXX' to data pointer 'void *' */ +#endif #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ static __itt_api_info api_list[] = { @@ -214,52 +281,10 @@ static __itt_api_info api_list[] = { }; #if ITT_PLATFORM==ITT_PLATFORM_WIN +#if _MSC_VER #pragma warning(pop) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -/* private, init thread info item. used for internal purposes */ -static __itt_thread_info init_thread_info = { - (const char*)NULL, /* nameA */ -#if defined(UNICODE) || defined(_UNICODE) - (const wchar_t*)NULL, /* nameW */ -#else - (void*)NULL, /* nameW */ #endif - 0, /* tid */ - __itt_thread_normal, /* state */ - 0, /* extra1 */ - (void*)NULL, /* extra2 */ - (__itt_thread_info*)NULL /* next */ -}; - -/* private, NULL domain item. used for internal purposes */ -static __itt_domain null_domain = { - 0, /* flags: disabled by default */ - (const char*)NULL, /* nameA */ -#if defined(UNICODE) || defined(_UNICODE) - (const wchar_t*)NULL, /* nameW */ -#else - (void*)NULL, /* nameW */ -#endif - 0, /* extra1 */ - (void*)NULL, /* extra2 */ - (__itt_domain*)NULL /* next */ -}; - -/* private, NULL string handle item. used for internal purposes */ -static __itt_string_handle null_string_handle = { - (const char*)NULL, /* strA */ -#if defined(UNICODE) || defined(_UNICODE) - (const wchar_t*)NULL, /* strW */ -#else - (void*)NULL, /* strW */ -#endif - 0, /* extra1 */ - (void*)NULL, /* extra2 */ - (__itt_string_handle*)NULL /* next */ -}; - -static const char dll_path[PATH_MAX] = { 0 }; +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /* static part descriptor which handles. all notification api attributes. */ __itt_global _N_(_ittapi_global) = { @@ -271,18 +296,23 @@ __itt_global _N_(_ittapi_global) = { MUTEX_INITIALIZER, /* mutex */ NULL, /* dynamic library handle */ NULL, /* error_handler */ - (const char**)&dll_path, /* dll_path_ptr */ + NULL, /* dll_path_ptr */ (__itt_api_info*)&api_list, /* api_list_ptr */ NULL, /* next __itt_global */ - (__itt_thread_info*)&init_thread_info, /* thread_list */ - (__itt_domain*)&null_domain, /* domain_list */ - (__itt_string_handle*)&null_string_handle, /* string_list */ - __itt_collection_normal /* collection state */ + NULL, /* thread_list */ + NULL, /* domain_list */ + NULL, /* string_list */ + __itt_collection_uninitialized, /* collection state */ + NULL, /* counter_list */ + 0, /* ipt_collect_events */ + NULL, /* histogram_list */ + NULL /* counter_metadata_list */ }; typedef void (__itt_api_init_t)(__itt_global*, __itt_group_id); typedef void (__itt_api_fini_t)(__itt_global*); +static __itt_domain dummy_domain; /* ========================================================================= */ #ifdef ITT_NOTIFY_EXT_REPORT @@ -290,210 +320,766 @@ ITT_EXTERN_C void _N_(error_handler)(__itt_error_code, va_list args); #endif /* ITT_NOTIFY_EXT_REPORT */ #if ITT_PLATFORM==ITT_PLATFORM_WIN +#if _MSC_VER #pragma warning(push) #pragma warning(disable: 4055) /* warning C4055: 'type cast' : from data pointer 'void *' to function pointer 'XXX' */ +#endif #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -static void __itt_report_error(__itt_error_code code, ...) +static void __itt_report_error(int code, ...) { va_list args; va_start(args, code); if (_N_(_ittapi_global).error_handler != NULL) { __itt_error_handler_t* handler = (__itt_error_handler_t*)(size_t)_N_(_ittapi_global).error_handler; - handler(code, args); + handler((__itt_error_code)code, args); } #ifdef ITT_NOTIFY_EXT_REPORT - _N_(error_handler)(code, args); + _N_(error_handler)((__itt_error_code)code, args); #endif /* ITT_NOTIFY_EXT_REPORT */ va_end(args); } +static int __itt_is_collector_available(void); + #if ITT_PLATFORM==ITT_PLATFORM_WIN +#if _MSC_VER #pragma warning(pop) +#endif #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if ITT_PLATFORM==ITT_PLATFORM_WIN static __itt_domain* ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(domain_createW),_init))(const wchar_t* name) { - __itt_domain *h_tail, *h; + __itt_domain *h_tail = NULL, *h = NULL; - if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list->tid == 0) + if (name == NULL) + { + return NULL; + } + + ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global)); + if (_N_(_ittapi_global).api_initialized) { - __itt_init_ittlib_name(NULL, __itt_group_all); if (ITTNOTIFY_NAME(domain_createW) && ITTNOTIFY_NAME(domain_createW) != ITT_VERSIONIZE(ITT_JOIN(_N_(domain_createW),_init))) + { + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); return ITTNOTIFY_NAME(domain_createW)(name); + } + else + { + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return &dummy_domain; + } + } + if (__itt_is_collector_available()) + { + for (h_tail = NULL, h = _N_(_ittapi_global).domain_list; h != NULL; h_tail = h, h = h->next) + { + if (h->nameW != NULL && !wcscmp(h->nameW, name)) break; + } + if (h == NULL) + { + NEW_DOMAIN_W(&_N_(_ittapi_global), h, h_tail, name); + } + } + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return h; +} + +static __itt_domain* ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(domain_createA),_init))(const char* name) +#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ +static __itt_domain* ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(domain_create),_init))(const char* name) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +{ + __itt_domain *h_tail = NULL, *h = NULL; + + if (name == NULL) + { + return NULL; + } + + ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global)); + if (_N_(_ittapi_global).api_initialized) + { +#if ITT_PLATFORM==ITT_PLATFORM_WIN + if (ITTNOTIFY_NAME(domain_createA) && ITTNOTIFY_NAME(domain_createA) != ITT_VERSIONIZE(ITT_JOIN(_N_(domain_createA),_init))) + { + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return ITTNOTIFY_NAME(domain_createA)(name); + } +#else + if (ITTNOTIFY_NAME(domain_create) && ITTNOTIFY_NAME(domain_create) != ITT_VERSIONIZE(ITT_JOIN(_N_(domain_create),_init))) + { + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return ITTNOTIFY_NAME(domain_create)(name); + } +#endif + else + { +#if ITT_PLATFORM==ITT_PLATFORM_WIN + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); +#else + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); +#endif + return &dummy_domain; + } + } + if (__itt_is_collector_available()) + { + for (h_tail = NULL, h = _N_(_ittapi_global).domain_list; h != NULL; h_tail = h, h = h->next) + { + if (h->nameA != NULL && !__itt_fstrcmp(h->nameA, name)) break; + } + if (h == NULL) + { + NEW_DOMAIN_A(&_N_(_ittapi_global), h, h_tail, name); + } + } + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return h; +} + +static void ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(module_load_with_sections),_init))(__itt_module_object* module_obj) +{ + if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list == NULL) + { + __itt_init_ittlib_name(NULL, __itt_group_all); + } + if (ITTNOTIFY_NAME(module_load_with_sections) && ITTNOTIFY_NAME(module_load_with_sections) != ITT_VERSIONIZE(ITT_JOIN(_N_(module_load_with_sections),_init))) + { + if(module_obj != NULL) + { + module_obj->version = ITT_MODULE_OBJECT_VERSION; + ITTNOTIFY_NAME(module_load_with_sections)(module_obj); + } + } +} + +static void ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(module_unload_with_sections),_init))(__itt_module_object* module_obj) +{ + if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list == NULL) + { + __itt_init_ittlib_name(NULL, __itt_group_all); + } + if (ITTNOTIFY_NAME(module_unload_with_sections) && ITTNOTIFY_NAME(module_unload_with_sections) != ITT_VERSIONIZE(ITT_JOIN(_N_(module_unload_with_sections),_init))) + { + if(module_obj != NULL) + { + module_obj->version = ITT_MODULE_OBJECT_VERSION; + ITTNOTIFY_NAME(module_unload_with_sections)(module_obj); + } + } +} + +#if ITT_PLATFORM==ITT_PLATFORM_WIN +static __itt_string_handle* ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(string_handle_createW),_init))(const wchar_t* name) +{ + __itt_string_handle *h_tail = NULL, *h = NULL; + + if (name == NULL) + { + return NULL; + } + + ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global)); + if (_N_(_ittapi_global).api_initialized) + { + if (ITTNOTIFY_NAME(string_handle_createW) && ITTNOTIFY_NAME(string_handle_createW) != ITT_VERSIONIZE(ITT_JOIN(_N_(string_handle_createW),_init))) + { + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return ITTNOTIFY_NAME(string_handle_createW)(name); + } + else + { + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return NULL; + } + } + if (__itt_is_collector_available()) + { + for (h_tail = NULL, h = _N_(_ittapi_global).string_list; h != NULL; h_tail = h, h = h->next) + { + if (h->strW != NULL && !wcscmp(h->strW, name)) break; + } + if (h == NULL) + { + NEW_STRING_HANDLE_W(&_N_(_ittapi_global), h, h_tail, name); + } + } + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return h; +} + +static __itt_string_handle* ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(string_handle_createA),_init))(const char* name) +#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ +static __itt_string_handle* ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(string_handle_create),_init))(const char* name) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +{ + __itt_string_handle *h_tail = NULL, *h = NULL; + + if (name == NULL) + { + return NULL; + } + + ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global)); + if (_N_(_ittapi_global).api_initialized) + { +#if ITT_PLATFORM==ITT_PLATFORM_WIN + if (ITTNOTIFY_NAME(string_handle_createA) && ITTNOTIFY_NAME(string_handle_createA) != ITT_VERSIONIZE(ITT_JOIN(_N_(string_handle_createA),_init))) + { + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return ITTNOTIFY_NAME(string_handle_createA)(name); + } +#else + if (ITTNOTIFY_NAME(string_handle_create) && ITTNOTIFY_NAME(string_handle_create) != ITT_VERSIONIZE(ITT_JOIN(_N_(string_handle_create),_init))) + { + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return ITTNOTIFY_NAME(string_handle_create)(name); + } +#endif + else + { +#if ITT_PLATFORM==ITT_PLATFORM_WIN + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); +#else + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); +#endif + return NULL; + } + } + if (__itt_is_collector_available()) + { + for (h_tail = NULL, h = _N_(_ittapi_global).string_list; h != NULL; h_tail = h, h = h->next) + { + if (h->strA != NULL && !__itt_fstrcmp(h->strA, name)) break; + } + if (h == NULL) + { + NEW_STRING_HANDLE_A(&_N_(_ittapi_global), h, h_tail, name); + } + } + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return h; +} + +#if ITT_PLATFORM==ITT_PLATFORM_WIN +static __itt_counter ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(counter_createW),_init))(const wchar_t *name, const wchar_t *domain) +{ + __itt_counter_info_t *h_tail = NULL, *h = NULL; + __itt_metadata_type type = __itt_metadata_u64; + + if (name == NULL) + { + return NULL; + } + + ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global)); + if (_N_(_ittapi_global).api_initialized) + { + if (ITTNOTIFY_NAME(counter_createW) && ITTNOTIFY_NAME(counter_createW) != ITT_VERSIONIZE(ITT_JOIN(_N_(counter_createW),_init))) + { + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return ITTNOTIFY_NAME(counter_createW)(name, domain); + } + else + { + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return NULL; + } + } + if (__itt_is_collector_available()) + { + for (h_tail = NULL, h = _N_(_ittapi_global).counter_list; h != NULL; h_tail = h, h = h->next) + { + if (h->nameW != NULL && h->type == (int)type && !wcscmp(h->nameW, name) && ((h->domainW == NULL && domain == NULL) || + (h->domainW != NULL && domain != NULL && !wcscmp(h->domainW, domain)))) break; + + } + if (h == NULL) + { + NEW_COUNTER_W(&_N_(_ittapi_global), h, h_tail, name, domain, type); + } + } + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return (__itt_counter)h; +} + +static __itt_counter ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(counter_createA),_init))(const char *name, const char *domain) +#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ +static __itt_counter ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(counter_create),_init))(const char *name, const char *domain) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +{ + __itt_counter_info_t *h_tail = NULL, *h = NULL; + __itt_metadata_type type = __itt_metadata_u64; + + if (name == NULL) + { + return NULL; + } + + ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global)); + if (_N_(_ittapi_global).api_initialized) + { +#if ITT_PLATFORM==ITT_PLATFORM_WIN + if (ITTNOTIFY_NAME(counter_createA) && ITTNOTIFY_NAME(counter_createA) != ITT_VERSIONIZE(ITT_JOIN(_N_(counter_createA),_init))) + { + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return ITTNOTIFY_NAME(counter_createA)(name, domain); + } +#else + if (ITTNOTIFY_NAME(counter_create) && ITTNOTIFY_NAME(counter_create) != ITT_VERSIONIZE(ITT_JOIN(_N_(counter_create),_init))) + { + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return ITTNOTIFY_NAME(counter_create)(name, domain); + } +#endif + else + { +#if ITT_PLATFORM==ITT_PLATFORM_WIN + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); +#else + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); +#endif + return NULL; + } + } + if (__itt_is_collector_available()) + { + for (h_tail = NULL, h = _N_(_ittapi_global).counter_list; h != NULL; h_tail = h, h = h->next) + { + if (h->nameA != NULL && h->type == (int)type && !__itt_fstrcmp(h->nameA, name) && ((h->domainA == NULL && domain == NULL) || + (h->domainA != NULL && domain != NULL && !__itt_fstrcmp(h->domainA, domain)))) break; + } + if (h == NULL) + { + NEW_COUNTER_A(&_N_(_ittapi_global), h, h_tail, name, domain, type); + } + } + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return (__itt_counter)h; +} + +#if ITT_PLATFORM==ITT_PLATFORM_WIN +static __itt_counter ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(counter_create_typedW),_init))(const wchar_t *name, const wchar_t *domain, __itt_metadata_type type) +{ + __itt_counter_info_t *h_tail = NULL, *h = NULL; + + if (name == NULL) + { + return NULL; + } + + ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global)); + if (_N_(_ittapi_global).api_initialized) + { + if (ITTNOTIFY_NAME(counter_create_typedW) && ITTNOTIFY_NAME(counter_create_typedW) != ITT_VERSIONIZE(ITT_JOIN(_N_(counter_create_typedW),_init))) + { + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return ITTNOTIFY_NAME(counter_create_typedW)(name, domain, type); + } + else + { + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return NULL; + } } + if (__itt_is_collector_available()) + { + for (h_tail = NULL, h = _N_(_ittapi_global).counter_list; h != NULL; h_tail = h, h = h->next) + { + if (h->nameW != NULL && h->type == (int)type && !wcscmp(h->nameW, name) && ((h->domainW == NULL && domain == NULL) || + (h->domainW != NULL && domain != NULL && !wcscmp(h->domainW, domain)))) break; + + } + if (h == NULL) + { + NEW_COUNTER_W(&_N_(_ittapi_global), h, h_tail, name, domain, type); + } + } + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return (__itt_counter)h; +} + +static __itt_counter ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(counter_create_typedA),_init))(const char *name, const char *domain, __itt_metadata_type type) +#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ +static __itt_counter ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(counter_create_typed),_init))(const char *name, const char *domain, __itt_metadata_type type) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +{ + __itt_counter_info_t *h_tail = NULL, *h = NULL; if (name == NULL) - return _N_(_ittapi_global).domain_list; + { + return NULL; + } + + ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global)); + if (_N_(_ittapi_global).api_initialized) + { +#if ITT_PLATFORM==ITT_PLATFORM_WIN + if (ITTNOTIFY_NAME(counter_create_typedA) && ITTNOTIFY_NAME(counter_create_typedA) != ITT_VERSIONIZE(ITT_JOIN(_N_(counter_create_typedA),_init))) + { + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return ITTNOTIFY_NAME(counter_create_typedA)(name, domain, type); + } +#else + if (ITTNOTIFY_NAME(counter_create_typed) && ITTNOTIFY_NAME(counter_create_typed) != ITT_VERSIONIZE(ITT_JOIN(_N_(counter_create_typed),_init))) + { + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return ITTNOTIFY_NAME(counter_create_typed)(name, domain, type); + } +#endif + else + { +#if ITT_PLATFORM==ITT_PLATFORM_WIN + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); +#else + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); +#endif + return NULL; + } + } + if (__itt_is_collector_available()) + { + for (h_tail = NULL, h = _N_(_ittapi_global).counter_list; h != NULL; h_tail = h, h = h->next) + { + if (h->nameA != NULL && h->type == (int)type && !__itt_fstrcmp(h->nameA, name) && ((h->domainA == NULL && domain == NULL) || + (h->domainA != NULL && domain != NULL && !__itt_fstrcmp(h->domainA, domain)))) break; + } + if (h == NULL) + { + NEW_COUNTER_A(&_N_(_ittapi_global), h, h_tail, name, domain, type); + } + } + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return (__itt_counter)h; +} + +#if ITT_PLATFORM==ITT_PLATFORM_WIN +static __itt_histogram* ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(histogram_createW),_init))(const __itt_domain* domain, const wchar_t* name, __itt_metadata_type x_type, __itt_metadata_type y_type) +{ + __itt_histogram *h_tail = NULL, *h = NULL; + + if (domain == NULL || name == NULL) + { + return NULL; + } + + ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global)); + if (_N_(_ittapi_global).api_initialized) + { + if (ITTNOTIFY_NAME(histogram_createW) && ITTNOTIFY_NAME(histogram_createW) != ITT_VERSIONIZE(ITT_JOIN(_N_(histogram_createW),_init))) + { + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return ITTNOTIFY_NAME(histogram_createW)(domain, name, x_type, y_type); + } + else + { + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return NULL; + } + } + if (__itt_is_collector_available()) + { + for (h_tail = NULL, h = _N_(_ittapi_global).histogram_list; h != NULL; h_tail = h, h = h->next) + { + if (h->domain == NULL) continue; + else if (h->domain == domain && h->nameW != NULL && !wcscmp(h->nameW, name)) break; + } + if (h == NULL) + { + NEW_HISTOGRAM_W(&_N_(_ittapi_global), h, h_tail, domain, name, x_type, y_type); + } + } + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return (__itt_histogram*)h; +} + +static __itt_histogram* ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(histogram_createA),_init))(const __itt_domain* domain, const char* name, __itt_metadata_type x_type, __itt_metadata_type y_type) +#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ +static __itt_histogram* ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(histogram_create),_init))(const __itt_domain* domain, const char* name, __itt_metadata_type x_type, __itt_metadata_type y_type) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +{ + __itt_histogram *h_tail = NULL, *h = NULL; + + if (domain == NULL || name == NULL) + { + return NULL; + } + + ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global)); + if (_N_(_ittapi_global).api_initialized) + { +#if ITT_PLATFORM==ITT_PLATFORM_WIN + if (ITTNOTIFY_NAME(histogram_createA) && ITTNOTIFY_NAME(histogram_createA) != ITT_VERSIONIZE(ITT_JOIN(_N_(histogram_createA),_init))) + { + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return ITTNOTIFY_NAME(histogram_createA)(domain, name, x_type, y_type); + } +#else + if (ITTNOTIFY_NAME(histogram_create) && ITTNOTIFY_NAME(histogram_create) != ITT_VERSIONIZE(ITT_JOIN(_N_(histogram_create),_init))) + { + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return ITTNOTIFY_NAME(histogram_create)(domain, name, x_type, y_type); + } +#endif + else + { +#if ITT_PLATFORM==ITT_PLATFORM_WIN + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); +#else + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); +#endif + return NULL; + } + } + if (__itt_is_collector_available()) + { + for (h_tail = NULL, h = _N_(_ittapi_global).histogram_list; h != NULL; h_tail = h, h = h->next) + { + if (h->domain == NULL) continue; + else if (h->domain == domain && h->nameA != NULL && !__itt_fstrcmp(h->nameA, name)) break; + } + if (h == NULL) + { + NEW_HISTOGRAM_A(&_N_(_ittapi_global), h, h_tail, domain, name, x_type, y_type); + } + } + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return (__itt_histogram*)h; +} + +#if ITT_PLATFORM==ITT_PLATFORM_WIN +static __itt_counter ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(counter_createW_v3),_init))(const __itt_domain* domain, const wchar_t* name, __itt_metadata_type type) +{ + __itt_counter_info_t *h_tail = NULL, *h = NULL; + + if (name == NULL || domain == NULL) + { + return NULL; + } + + ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global)); + if (_N_(_ittapi_global).api_initialized) + { + if (ITTNOTIFY_NAME(counter_createW_v3) && ITTNOTIFY_NAME(counter_createW_v3) != ITT_VERSIONIZE(ITT_JOIN(_N_(counter_createW_v3),_init))) + { + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return ITTNOTIFY_NAME(counter_createW_v3)(domain, name, type); + } + else + { + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return NULL; + } + } + if (__itt_is_collector_available()) + { + for (h_tail = NULL, h = _N_(_ittapi_global).counter_list; h != NULL; h_tail = h, h = h->next) + { + if (h->nameW != NULL && h->type == (int)type && !wcscmp(h->nameW, name) && ((h->domainW == NULL && domain->nameW == NULL) || + (h->domainW != NULL && domain->nameW != NULL && !wcscmp(h->domainW, domain->nameW)))) break; - ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global)); - for (h_tail = NULL, h = _N_(_ittapi_global).domain_list; h != NULL; h_tail = h, h = h->next) - if (h->nameW != NULL && !wcscmp(h->nameW, name)) - break; - if (h == NULL) { - NEW_DOMAIN_W(&_N_(_ittapi_global),h,h_tail,name); + } + if (h == NULL) + { + NEW_COUNTER_W(&_N_(_ittapi_global),h,h_tail,name,domain->nameW,type); + } } __itt_mutex_unlock(&_N_(_ittapi_global).mutex); - return h; + return (__itt_counter)h; } -static __itt_domain* ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(domain_createA),_init))(const char* name) +static __itt_counter ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(counter_createA_v3),_init))(const __itt_domain* domain, const char* name, __itt_metadata_type type) #else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ -static __itt_domain* ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(domain_create),_init))(const char* name) +static __itt_counter ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(counter_create_v3),_init))(const __itt_domain* domain, const char* name, __itt_metadata_type type) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ { - __itt_domain *h_tail, *h; + __itt_counter_info_t *h_tail = NULL, *h = NULL; - if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list->tid == 0) + if (name == NULL || domain == NULL) + { + return NULL; + } + + ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global)); + if (_N_(_ittapi_global).api_initialized) { - __itt_init_ittlib_name(NULL, __itt_group_all); #if ITT_PLATFORM==ITT_PLATFORM_WIN - if (ITTNOTIFY_NAME(domain_createA) && ITTNOTIFY_NAME(domain_createA) != ITT_VERSIONIZE(ITT_JOIN(_N_(domain_createA),_init))) - return ITTNOTIFY_NAME(domain_createA)(name); + if (ITTNOTIFY_NAME(counter_createA_v3) && ITTNOTIFY_NAME(counter_createA_v3) != ITT_VERSIONIZE(ITT_JOIN(_N_(counter_createA_v3),_init))) + { + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return ITTNOTIFY_NAME(counter_createA_v3)(domain, name, type); + } #else - if (ITTNOTIFY_NAME(domain_create) && ITTNOTIFY_NAME(domain_create) != ITT_VERSIONIZE(ITT_JOIN(_N_(domain_create),_init))) - return ITTNOTIFY_NAME(domain_create)(name); + if (ITTNOTIFY_NAME(counter_create_v3) && ITTNOTIFY_NAME(counter_create_v3) != ITT_VERSIONIZE(ITT_JOIN(_N_(counter_create_v3),_init))) + { + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return ITTNOTIFY_NAME(counter_create_v3)(domain, name, type); + } +#endif + else + { +#if ITT_PLATFORM==ITT_PLATFORM_WIN + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); +#else + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); #endif + return NULL; + } } - - if (name == NULL) - return _N_(_ittapi_global).domain_list; - - ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global)); - for (h_tail = NULL, h = _N_(_ittapi_global).domain_list; h != NULL; h_tail = h, h = h->next) - if (h->nameA != NULL && !__itt_fstrcmp(h->nameA, name)) - break; - if (h == NULL) { - NEW_DOMAIN_A(&_N_(_ittapi_global),h,h_tail,name); + if (__itt_is_collector_available()) + { + for (h_tail = NULL, h = _N_(_ittapi_global).counter_list; h != NULL; h_tail = h, h = h->next) + { + if (h->nameA != NULL && h->type == (int)type && !__itt_fstrcmp(h->nameA, name) && ((h->domainA == NULL && domain->nameA == NULL) || + (h->domainA != NULL && domain->nameA != NULL && !__itt_fstrcmp(h->domainA, domain->nameA)))) break; + } + if (h == NULL) + { + NEW_COUNTER_A(&_N_(_ittapi_global),h,h_tail,name,domain->nameA,type); + } } - __itt_mutex_unlock(&_N_(_ittapi_global).mutex); - return h; + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return (__itt_counter)h; } -#if ITT_PLATFORM==ITT_PLATFORM_WIN -static __itt_string_handle* ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(string_handle_createW),_init))(const wchar_t* name) +static void ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(bind_context_metadata_to_counter),_init))(__itt_counter counter, size_t length, __itt_context_metadata* metadata) { - __itt_string_handle *h_tail, *h; + __itt_counter_metadata *h_tail = NULL, *h = NULL; - if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list->tid == 0) + if (counter == NULL || length == 0 || metadata == NULL) { - __itt_init_ittlib_name(NULL, __itt_group_all); - if (ITTNOTIFY_NAME(string_handle_createW) && ITTNOTIFY_NAME(string_handle_createW) != ITT_VERSIONIZE(ITT_JOIN(_N_(string_handle_createW),_init))) - return ITTNOTIFY_NAME(string_handle_createW)(name); + return; } - if (name == NULL) - return _N_(_ittapi_global).string_list; - ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global)); - for (h_tail = NULL, h = _N_(_ittapi_global).string_list; h != NULL; h_tail = h, h = h->next) - if (h->strW != NULL && !wcscmp(h->strW, name)) - break; - if (h == NULL) { - NEW_STRING_HANDLE_W(&_N_(_ittapi_global),h,h_tail,name); - } - __itt_mutex_unlock(&_N_(_ittapi_global).mutex); - return h; -} - -static __itt_string_handle* ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(string_handle_createA),_init))(const char* name) -#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ -static __itt_string_handle* ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(string_handle_create),_init))(const char* name) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -{ - __itt_string_handle *h_tail, *h; - - if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list->tid == 0) + if (_N_(_ittapi_global).api_initialized) { - __itt_init_ittlib_name(NULL, __itt_group_all); + if (ITTNOTIFY_NAME(bind_context_metadata_to_counter) && ITTNOTIFY_NAME(bind_context_metadata_to_counter) != ITT_VERSIONIZE(ITT_JOIN(_N_(bind_context_metadata_to_counter),_init))) + { + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + ITTNOTIFY_NAME(bind_context_metadata_to_counter)(counter, length, metadata); + } + else + { #if ITT_PLATFORM==ITT_PLATFORM_WIN - if (ITTNOTIFY_NAME(string_handle_createA) && ITTNOTIFY_NAME(string_handle_createA) != ITT_VERSIONIZE(ITT_JOIN(_N_(string_handle_createA),_init))) - return ITTNOTIFY_NAME(string_handle_createA)(name); + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); #else - if (ITTNOTIFY_NAME(string_handle_create) && ITTNOTIFY_NAME(string_handle_create) != ITT_VERSIONIZE(ITT_JOIN(_N_(string_handle_create),_init))) - return ITTNOTIFY_NAME(string_handle_create)(name); + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); #endif + return; + } } + if (__itt_is_collector_available()) + { + size_t item; + char* str_valueA = NULL; +#if ITT_PLATFORM==ITT_PLATFORM_WIN + wchar_t* str_valueW = NULL; +#endif + unsigned long long value = 0; + __itt_context_type type = __itt_context_unknown; - if (name == NULL) - return _N_(_ittapi_global).string_list; - - ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global)); - for (h_tail = NULL, h = _N_(_ittapi_global).string_list; h != NULL; h_tail = h, h = h->next) - if (h->strA != NULL && !__itt_fstrcmp(h->strA, name)) - break; - if (h == NULL) { - NEW_STRING_HANDLE_A(&_N_(_ittapi_global),h,h_tail,name); + for (item = 0; item < length; item++) + { + type = metadata[item].type; + for (h_tail = NULL, h = _N_(_ittapi_global).counter_metadata_list; h != NULL; h_tail = h, h = h->next) + { + if (h->counter != NULL && h->counter == counter && h->type == type) break; + } + if (h == NULL && counter != NULL && type != __itt_context_unknown) + { + if (type == __itt_context_nameA || type == __itt_context_deviceA || type == __itt_context_unitsA || type == __itt_context_pci_addrA) + { + str_valueA = (char*)(metadata[item].value); + NEW_COUNTER_METADATA_STR_A(&_N_(_ittapi_global),h,h_tail,counter,type,str_valueA); + } +#if ITT_PLATFORM==ITT_PLATFORM_WIN + else if (type == __itt_context_nameW || type == __itt_context_deviceW || type == __itt_context_unitsW || type == __itt_context_pci_addrW) + { + str_valueW = (wchar_t*)(metadata[item].value); + NEW_COUNTER_METADATA_STR_W(&_N_(_ittapi_global),h,h_tail,counter,type,str_valueW); + } +#endif + else if (type >= __itt_context_tid && type <= __itt_context_cpu_cycles_flag) + { + value = *(unsigned long long*)(metadata[item].value); + NEW_COUNTER_METADATA_NUM(&_N_(_ittapi_global),h,h_tail,counter,type,value); + } + } + } } - __itt_mutex_unlock(&_N_(_ittapi_global).mutex); - return h; + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); } - /* -------------------------------------------------------------------------- */ static void ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(pause),_init))(void) { - if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list->tid == 0) + if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list == NULL) { __itt_init_ittlib_name(NULL, __itt_group_all); - if (ITTNOTIFY_NAME(pause) && ITTNOTIFY_NAME(pause) != ITT_VERSIONIZE(ITT_JOIN(_N_(pause),_init))) - { - ITTNOTIFY_NAME(pause)(); - return; - } } - _N_(_ittapi_global).state = __itt_collection_paused; + if (ITTNOTIFY_NAME(pause) && ITTNOTIFY_NAME(pause) != ITT_VERSIONIZE(ITT_JOIN(_N_(pause),_init))) + { + ITTNOTIFY_NAME(pause)(); + } } static void ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(resume),_init))(void) { - if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list->tid == 0) + if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list == NULL) { __itt_init_ittlib_name(NULL, __itt_group_all); - if (ITTNOTIFY_NAME(resume) && ITTNOTIFY_NAME(resume) != ITT_VERSIONIZE(ITT_JOIN(_N_(resume),_init))) - { - ITTNOTIFY_NAME(resume)(); - return; - } } - _N_(_ittapi_global).state = __itt_collection_normal; + if (ITTNOTIFY_NAME(resume) && ITTNOTIFY_NAME(resume) != ITT_VERSIONIZE(ITT_JOIN(_N_(resume),_init))) + { + ITTNOTIFY_NAME(resume)(); + } } -#if ITT_PLATFORM==ITT_PLATFORM_WIN -static void ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_nameW),_init))(const wchar_t* name) +static void ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(pause_scoped),_init))(__itt_collection_scope scope) { - TIDT tid = __itt_thread_id(); - __itt_thread_info *h_tail, *h; + if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list == NULL) + { + __itt_init_ittlib_name(NULL, __itt_group_all); + } + if (ITTNOTIFY_NAME(pause_scoped) && ITTNOTIFY_NAME(pause_scoped) != ITT_VERSIONIZE(ITT_JOIN(_N_(pause_scoped),_init))) + { + ITTNOTIFY_NAME(pause_scoped)(scope); + } +} - if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list->tid == 0) +static void ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(resume_scoped),_init))(__itt_collection_scope scope) +{ + if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list == NULL) { __itt_init_ittlib_name(NULL, __itt_group_all); - if (ITTNOTIFY_NAME(thread_set_nameW) && ITTNOTIFY_NAME(thread_set_nameW) != ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_nameW),_init))) - { - ITTNOTIFY_NAME(thread_set_nameW)(name); - return; - } } + if (ITTNOTIFY_NAME(resume_scoped) && ITTNOTIFY_NAME(resume_scoped) != ITT_VERSIONIZE(ITT_JOIN(_N_(resume_scoped),_init))) + { + ITTNOTIFY_NAME(resume_scoped)(scope); + } +} - __itt_mutex_lock(&_N_(_ittapi_global).mutex); - for (h_tail = NULL, h = _N_(_ittapi_global).thread_list; h != NULL; h_tail = h, h = h->next) - if (h->tid == tid) - break; - if (h == NULL) { - NEW_THREAD_INFO_W(&_N_(_ittapi_global), h, h_tail, tid, __itt_thread_normal, name); +#if ITT_PLATFORM==ITT_PLATFORM_WIN +static void ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_nameW),_init))(const wchar_t* name) +{ + if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list == NULL) + { + __itt_init_ittlib_name(NULL, __itt_group_all); } - else + if (ITTNOTIFY_NAME(thread_set_nameW) && ITTNOTIFY_NAME(thread_set_nameW) != ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_nameW),_init))) { - h->nameW = name ? _wcsdup(name) : NULL; + ITTNOTIFY_NAME(thread_set_nameW)(name); } - __itt_mutex_unlock(&_N_(_ittapi_global).mutex); } static int ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(thr_name_setW),_init))(const wchar_t* name, int namelen) { - namelen = namelen; + (void)namelen; ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_nameW),_init))(name); return 0; } @@ -503,51 +1089,34 @@ static void ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_nameA),_init))(const c static void ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_name),_init))(const char* name) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ { - TIDT tid = __itt_thread_id(); - __itt_thread_info *h_tail, *h; - - if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list->tid == 0) + if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list == NULL) { __itt_init_ittlib_name(NULL, __itt_group_all); -#if ITT_PLATFORM==ITT_PLATFORM_WIN - if (ITTNOTIFY_NAME(thread_set_nameA) && ITTNOTIFY_NAME(thread_set_nameA) != ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_nameA),_init))) - { - ITTNOTIFY_NAME(thread_set_nameA)(name); - return; - } -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - if (ITTNOTIFY_NAME(thread_set_name) && ITTNOTIFY_NAME(thread_set_name) != ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_name),_init))) - { - ITTNOTIFY_NAME(thread_set_name)(name); - return; - } -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ } - - __itt_mutex_lock(&_N_(_ittapi_global).mutex); - for (h_tail = NULL, h = _N_(_ittapi_global).thread_list; h != NULL; h_tail = h, h = h->next) - if (h->tid == tid) - break; - if (h == NULL) { - NEW_THREAD_INFO_A(&_N_(_ittapi_global), h, h_tail, tid, __itt_thread_normal, name); +#if ITT_PLATFORM==ITT_PLATFORM_WIN + if (ITTNOTIFY_NAME(thread_set_nameA) && ITTNOTIFY_NAME(thread_set_nameA) != ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_nameA),_init))) + { + ITTNOTIFY_NAME(thread_set_nameA)(name); } - else +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + if (ITTNOTIFY_NAME(thread_set_name) && ITTNOTIFY_NAME(thread_set_name) != ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_name),_init))) { - h->nameA = name ? __itt_fstrdup(name) : NULL; + ITTNOTIFY_NAME(thread_set_name)(name); } - __itt_mutex_unlock(&_N_(_ittapi_global).mutex); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ } #if ITT_PLATFORM==ITT_PLATFORM_WIN static int ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(thr_name_setA),_init))(const char* name, int namelen) { - namelen = namelen; + (void)namelen; ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_nameA),_init))(name); return 0; } #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ static int ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(thr_name_set),_init))(const char* name, int namelen) { + (void)namelen; ITT_VERSIONIZE(ITT_JOIN(_N_(thread_set_name),_init))(name); return 0; } @@ -555,32 +1124,14 @@ static int ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(thr_name_set),_init))(const char* static void ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(thread_ignore),_init))(void) { - TIDT tid = __itt_thread_id(); - __itt_thread_info *h_tail, *h; - - if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list->tid == 0) + if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list == NULL) { __itt_init_ittlib_name(NULL, __itt_group_all); - if (ITTNOTIFY_NAME(thread_ignore) && ITTNOTIFY_NAME(thread_ignore) != ITT_VERSIONIZE(ITT_JOIN(_N_(thread_ignore),_init))) - { - ITTNOTIFY_NAME(thread_ignore)(); - return; - } - } - - __itt_mutex_lock(&_N_(_ittapi_global).mutex); - for (h_tail = NULL, h = _N_(_ittapi_global).thread_list; h != NULL; h_tail = h, h = h->next) - if (h->tid == tid) - break; - if (h == NULL) { - static const char* name = "unknown"; - NEW_THREAD_INFO_A(&_N_(_ittapi_global), h, h_tail, tid, __itt_thread_ignored, name); } - else + if (ITTNOTIFY_NAME(thread_ignore) && ITTNOTIFY_NAME(thread_ignore) != ITT_VERSIONIZE(ITT_JOIN(_N_(thread_ignore),_init))) { - h->state = __itt_thread_ignored; + ITTNOTIFY_NAME(thread_ignore)(); } - __itt_mutex_unlock(&_N_(_ittapi_global).mutex); } static void ITTAPI ITT_VERSIONIZE(ITT_JOIN(_N_(thr_ignore),_init))(void) @@ -659,7 +1210,7 @@ static const char* __itt_fsplit(const char* s, const char* sep, const char** out /* This function return value of env variable that placed into static buffer. * !!! The same static buffer is used for subsequent calls. !!! - * This was done to aviod dynamic allocation for few calls. + * This was done to avoid dynamic allocation for few calls. * Actually we need this function only four times. */ static const char* __itt_get_env_var(const char* name) @@ -683,7 +1234,7 @@ static const char* __itt_get_env_var(const char* name) } else { - /* If environment variable is empty, GetEnvirornmentVariables() + /* If environment variable is empty, GetEnvironmentVariables() * returns zero (number of characters (not including terminating null), * and GetLastError() returns ERROR_SUCCESS. */ DWORD err = GetLastError(); @@ -697,12 +1248,12 @@ static const char* __itt_get_env_var(const char* name) char* env = getenv(name); if (env != NULL) { - size_t len = strlen(env); + size_t len = __itt_fstrnlen(env, MAX_ENV_VALUE_SIZE); size_t max_len = MAX_ENV_VALUE_SIZE - (size_t)(env_value - env_buff); if (len < max_len) { const char* ret = (const char*)env_value; - strncpy(env_value, env, len + 1); + __itt_fstrcpyn(env_value, max_len, env, len + 1); env_value += len + 1; return ret; } else @@ -720,10 +1271,25 @@ static const char* __itt_get_lib_name(void) #ifdef __ANDROID__ if (lib_name == NULL) { - const char* const system_wide_marker_filename = "/data/local/tmp/com.intel.itt.collector_lib"; - int itt_marker_file_fd = open(system_wide_marker_filename, O_RDONLY); + +#if ITT_ARCH==ITT_ARCH_IA32 || ITT_ARCH==ITT_ARCH_ARM + const char* const marker_filename = "com.intel.itt.collector_lib_32"; +#else + const char* const marker_filename = "com.intel.itt.collector_lib_64"; +#endif + + char system_wide_marker_filename[PATH_MAX] = {0}; + int itt_marker_file_fd = -1; ssize_t res = 0; + res = snprintf(system_wide_marker_filename, PATH_MAX - 1, "%s%s", "/data/local/tmp/", marker_filename); + if (res < 0) + { + ITT_ANDROID_LOGE("Unable to concatenate marker file string."); + return lib_name; + } + itt_marker_file_fd = open(system_wide_marker_filename, O_RDONLY); + if (itt_marker_file_fd == -1) { const pid_t my_pid = getpid(); @@ -733,7 +1299,13 @@ static const char* __itt_get_lib_name(void) int cmdline_fd = 0; ITT_ANDROID_LOGI("Unable to open system-wide marker file."); - snprintf(cmdline_path, PATH_MAX - 1, "/proc/%d/cmdline", my_pid); + res = snprintf(cmdline_path, PATH_MAX - 1, "/proc/%d/cmdline", my_pid); + if (res < 0) + { + ITT_ANDROID_LOGE("Unable to get cmdline path string."); + return lib_name; + } + ITT_ANDROID_LOGI("CMD file: %s\n", cmdline_path); cmdline_fd = open(cmdline_path, O_RDONLY); if (cmdline_fd == -1) @@ -759,7 +1331,13 @@ static const char* __itt_get_lib_name(void) return lib_name; } ITT_ANDROID_LOGI("Package name: %s\n", package_name); - snprintf(app_sandbox_file, PATH_MAX - 1, "/data/data/%s/com.intel.itt.collector_lib", package_name); + res = snprintf(app_sandbox_file, PATH_MAX - 1, "/data/data/%s/%s", package_name, marker_filename); + if (res < 0) + { + ITT_ANDROID_LOGE("Unable to concatenate marker file string."); + return lib_name; + } + ITT_ANDROID_LOGI("Lib marker file name: %s\n", app_sandbox_file); itt_marker_file_fd = open(app_sandbox_file, O_RDONLY); if (itt_marker_file_fd == -1) @@ -790,7 +1368,7 @@ static const char* __itt_get_lib_name(void) ITT_ANDROID_LOGE("Unable to close %s file!", itt_marker_file_fd); return lib_name; } - ITT_ANDROID_LOGI("Set env"); + ITT_ANDROID_LOGI("Set env %s to %s", ITT_TO_STR(LIB_VAR_NAME), itt_lib_name); res = setenv(ITT_TO_STR(LIB_VAR_NAME), itt_lib_name, 0); if (res == -1) { @@ -798,7 +1376,7 @@ static const char* __itt_get_lib_name(void) return lib_name; } lib_name = __itt_get_env_var(ITT_TO_STR(LIB_VAR_NAME)); - ITT_ANDROID_LOGI("ITT Lib path from env: %s", itt_lib_name); + ITT_ANDROID_LOGI("ITT Lib path from env: %s", lib_name); } } #endif @@ -806,9 +1384,8 @@ static const char* __itt_get_lib_name(void) return lib_name; } -#ifndef min -#define min(a,b) (a) < (b) ? (a) : (b) -#endif /* min */ +/* Avoid clashes with std::min */ +#define __itt_min(a,b) ((a) < (b) ? (a) : (b)) static __itt_group_id __itt_get_groups(void) { @@ -824,8 +1401,9 @@ static __itt_group_id __itt_get_groups(void) const char* chunk; while ((group_str = __itt_fsplit(group_str, ",; ", &chunk, &len)) != NULL) { - __itt_fstrcpyn(gr, chunk, sizeof(gr) - 1); - gr[min(len, (int)(sizeof(gr) - 1))] = 0; + int min_len = __itt_min(len, (int)(sizeof(gr) - 1)); + __itt_fstrcpyn(gr, sizeof(gr) - 1, chunk, min_len); + gr[min_len] = 0; for (i = 0; group_list[i].name != NULL; i++) { @@ -856,6 +1434,8 @@ static __itt_group_id __itt_get_groups(void) return res; } +#undef __itt_min + static int __itt_lib_version(lib_t lib) { if (lib == NULL) @@ -880,33 +1460,55 @@ static void __itt_reinit_all_pointers(void) static void __itt_nullify_all_pointers(void) { int i; - /* Nulify all pointers except domain_create and string_handle_create */ + /* Nulify all pointers except domain_create, string_handle_create and counter_create */ for (i = 0; _N_(_ittapi_global).api_list_ptr[i].name != NULL; i++) *_N_(_ittapi_global).api_list_ptr[i].func_ptr = _N_(_ittapi_global).api_list_ptr[i].null_func; } +static int __itt_is_collector_available(void) +{ + int is_available; + + ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global)); + if (_N_(_ittapi_global).state == __itt_collection_uninitialized) + { + _N_(_ittapi_global).state = (NULL == __itt_get_lib_name()) ? __itt_collection_collector_absent : __itt_collection_collector_exists; + } + is_available = (_N_(_ittapi_global).state == __itt_collection_collector_exists || + _N_(_ittapi_global).state == __itt_collection_init_successful); + __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + return is_available; +} + #if ITT_PLATFORM==ITT_PLATFORM_WIN +#if _MSC_VER #pragma warning(push) #pragma warning(disable: 4054) /* warning C4054: 'type cast' : from function pointer 'XXX' to data pointer 'void *' */ #pragma warning(disable: 4055) /* warning C4055: 'type cast' : from data pointer 'void *' to function pointer 'XXX' */ +#endif #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_EXTERN_C void _N_(fini_ittlib)(void) { - __itt_api_fini_t* __itt_api_fini_ptr; + __itt_api_fini_t* __itt_api_fini_ptr = NULL; static volatile TIDT current_thread = 0; if (_N_(_ittapi_global).api_initialized) { - __itt_mutex_lock(&_N_(_ittapi_global).mutex); + ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global)); if (_N_(_ittapi_global).api_initialized) { if (current_thread == 0) { - current_thread = __itt_thread_id(); - __itt_api_fini_ptr = (__itt_api_fini_t*)(size_t)__itt_get_proc(_N_(_ittapi_global).lib, "__itt_api_fini"); + if (PTHREAD_SYMBOLS) current_thread = __itt_thread_id(); + if (_N_(_ittapi_global).lib != NULL) + { + __itt_api_fini_ptr = (__itt_api_fini_t*)(size_t)__itt_get_proc(_N_(_ittapi_global).lib, "__itt_api_fini"); + } if (__itt_api_fini_ptr) + { __itt_api_fini_ptr(&_N_(_ittapi_global)); + } __itt_nullify_all_pointers(); @@ -919,8 +1521,80 @@ ITT_EXTERN_C void _N_(fini_ittlib)(void) current_thread = 0; } } - __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + } +} + +/* !!! this function should be called under mutex lock !!! */ +static void __itt_free_allocated_resources(void) +{ + __itt_string_handle* current_string = _N_(_ittapi_global).string_list; + while (current_string != NULL) + { + __itt_string_handle* tmp = current_string->next; + free((char*)current_string->strA); +#if ITT_PLATFORM==ITT_PLATFORM_WIN + free((wchar_t*)current_string->strW); +#endif + free(current_string); + current_string = tmp; + } + _N_(_ittapi_global).string_list = NULL; + + __itt_domain* current_domain = _N_(_ittapi_global).domain_list; + while (current_domain != NULL) + { + __itt_domain* tmp = current_domain->next; + free((char*)current_domain->nameA); +#if ITT_PLATFORM==ITT_PLATFORM_WIN + free((wchar_t*)current_domain->nameW); +#endif + free(current_domain); + current_domain = tmp; + } + _N_(_ittapi_global).domain_list = NULL; + + __itt_counter_info_t* current_couter = _N_(_ittapi_global).counter_list; + while (current_couter != NULL) + { + __itt_counter_info_t* tmp = current_couter->next; + free((char*)current_couter->nameA); + free((char*)current_couter->domainA); +#if ITT_PLATFORM==ITT_PLATFORM_WIN + free((wchar_t*)current_couter->nameW); + free((wchar_t*)current_couter->domainW); +#endif + free(current_couter); + current_couter = tmp; + } + _N_(_ittapi_global).counter_list = NULL; + + __itt_histogram* current_histogram = _N_(_ittapi_global).histogram_list; + while (current_histogram != NULL) + { + __itt_histogram* tmp = current_histogram->next; + free((char*)current_histogram->nameA); +#if ITT_PLATFORM==ITT_PLATFORM_WIN + free((wchar_t*)current_histogram->nameW); +#endif + free(current_histogram); + current_histogram = tmp; + } + _N_(_ittapi_global).histogram_list = NULL; + + + __itt_counter_metadata* current_counter_metadata = _N_(_ittapi_global).counter_metadata_list; + while (current_counter_metadata != NULL) + { + __itt_counter_metadata* tmp = current_counter_metadata->next; + free((char*)current_counter_metadata->str_valueA); +#if ITT_PLATFORM==ITT_PLATFORM_WIN + free((wchar_t*)current_counter_metadata->str_valueW); +#endif + free(current_counter_metadata); + current_counter_metadata = tmp; } + _N_(_ittapi_global).counter_metadata_list = NULL; } ITT_EXTERN_C int _N_(init_ittlib)(const char* lib_name, __itt_group_id init_groups) @@ -942,23 +1616,27 @@ ITT_EXTERN_C int _N_(init_ittlib)(const char* lib_name, __itt_group_id init_grou { if (current_thread == 0) { - current_thread = __itt_thread_id(); - _N_(_ittapi_global).thread_list->tid = current_thread; + if (PTHREAD_SYMBOLS) current_thread = __itt_thread_id(); if (lib_name == NULL) + { lib_name = __itt_get_lib_name(); + } groups = __itt_get_groups(); - if (groups != __itt_group_none || lib_name != NULL) + if (DL_SYMBOLS && (groups != __itt_group_none || lib_name != NULL)) { _N_(_ittapi_global).lib = __itt_load_lib((lib_name == NULL) ? ittnotify_lib_name : lib_name); if (_N_(_ittapi_global).lib != NULL) { + _N_(_ittapi_global).state = __itt_collection_init_successful; __itt_api_init_t* __itt_api_init_ptr; int lib_version = __itt_lib_version(_N_(_ittapi_global).lib); - switch (lib_version) { + switch (lib_version) + { case 0: groups = __itt_group_legacy; + ITT_ATTRIBUTE_FALLTHROUGH; case 1: /* Fill all pointers from dynamic library */ for (i = 0; _N_(_ittapi_global).api_list_ptr[i].name != NULL; i++) @@ -1011,6 +1689,8 @@ ITT_EXTERN_C int _N_(init_ittlib)(const char* lib_name, __itt_group_id init_grou } else { + _N_(_ittapi_global).state = __itt_collection_init_fail; + __itt_free_allocated_resources(); __itt_nullify_all_pointers(); __itt_report_error(__itt_error_no_module, lib_name, @@ -1024,6 +1704,7 @@ ITT_EXTERN_C int _N_(init_ittlib)(const char* lib_name, __itt_group_id init_grou } else { + _N_(_ittapi_global).state = __itt_collection_collector_absent; __itt_nullify_all_pointers(); } _N_(_ittapi_global).api_initialized = 1; @@ -1034,15 +1715,19 @@ ITT_EXTERN_C int _N_(init_ittlib)(const char* lib_name, __itt_group_id init_grou } #ifndef ITT_SIMPLE_INIT - __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); #endif /* ITT_SIMPLE_INIT */ } /* Evaluating if any function ptr is non empty and it's in init_groups */ for (i = 0; _N_(_ittapi_global).api_list_ptr[i].name != NULL; i++) + { if (*_N_(_ittapi_global).api_list_ptr[i].func_ptr != _N_(_ittapi_global).api_list_ptr[i].null_func && _N_(_ittapi_global).api_list_ptr[i].group & init_groups) + { return 1; + } + } return 0; } @@ -1054,6 +1739,69 @@ ITT_EXTERN_C __itt_error_handler_t* _N_(set_error_handler)(__itt_error_handler_t } #if ITT_PLATFORM==ITT_PLATFORM_WIN +#if _MSC_VER #pragma warning(pop) +#endif #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +/** __itt_mark_pt_region functions marks region of interest + * region parameter defines different regions. + * 0 <= region < 8 */ + +#if defined(ITT_API_IPT_SUPPORT) && (ITT_PLATFORM==ITT_PLATFORM_WIN || ITT_PLATFORM==ITT_PLATFORM_POSIX) && !defined(__ANDROID__) +void __itt_pt_mark(__itt_pt_region region); +void __itt_pt_mark_event(__itt_pt_region region); +#endif + +ITT_EXTERN_C void _N_(mark_pt_region_begin)(__itt_pt_region region) +{ +#if defined(ITT_API_IPT_SUPPORT) && (ITT_PLATFORM==ITT_PLATFORM_WIN || ITT_PLATFORM==ITT_PLATFORM_POSIX) && !defined(__ANDROID__) + if (_N_(_ittapi_global).ipt_collect_events == 1) + { + __itt_pt_mark_event(2*region); + } + else + { + __itt_pt_mark(2*region); + } +#else + (void)region; +#endif +} + +ITT_EXTERN_C void _N_(mark_pt_region_end)(__itt_pt_region region) +{ +#if defined(ITT_API_IPT_SUPPORT) && (ITT_PLATFORM==ITT_PLATFORM_WIN || ITT_PLATFORM==ITT_PLATFORM_POSIX) && !defined(__ANDROID__) + if (_N_(_ittapi_global).ipt_collect_events == 1) + { + __itt_pt_mark_event(2*region + 1); + } + else + { + __itt_pt_mark(2*region + 1); + } +#else + (void)region; +#endif +} + +ITT_EXTERN_C __itt_collection_state (_N_(get_collection_state))(void) +{ + if (!_N_(_ittapi_global).api_initialized && _N_(_ittapi_global).thread_list == NULL) + { + __itt_init_ittlib_name(NULL, __itt_group_all); + } + return _N_(_ittapi_global).state; +} + +/* !!! should be called from the library destructor !!! + * this function destroys the mutex and frees resources + * allocated by ITT API static part + */ +ITT_EXTERN_C void (_N_(release_resources))(void) +{ + ITT_MUTEX_INIT_AND_LOCK(_N_(_ittapi_global)); + __itt_free_allocated_resources(); + if (PTHREAD_SYMBOLS) __itt_mutex_unlock(&_N_(_ittapi_global).mutex); + ITT_MUTEX_DESTROY(_N_(_ittapi_global)); +} diff --git a/src/tbb/src/tbb/tools_api/ittnotify_static.h b/src/tbb/src/tbb/tools_api/ittnotify_static.h index ba90501fb..7f5729140 100644 --- a/src/tbb/src/tbb/tools_api/ittnotify_static.h +++ b/src/tbb/src/tbb/tools_api/ittnotify_static.h @@ -1,21 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #include "ittnotify_config.h" @@ -43,6 +39,9 @@ ITT_STUB(ITTAPI, __itt_domain*, domain_createW, (const wchar_t *name), (ITT_FORM ITT_STUB(ITTAPI, __itt_domain*, domain_create, (const char *name), (ITT_FORMAT name), domain_create, __itt_group_structure, "\"%s\"") #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUBV(ITTAPI, void, module_load_with_sections, (__itt_module_object* module_obj), (ITT_FORMAT module_obj), module_load_with_sections, __itt_group_module, "%p") +ITT_STUBV(ITTAPI, void, module_unload_with_sections, (__itt_module_object* module_obj), (ITT_FORMAT module_obj), module_unload_with_sections, __itt_group_module, "%p") + #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUB(ITTAPI, __itt_string_handle*, string_handle_createA, (const char *name), (ITT_FORMAT name), string_handle_createA, __itt_group_structure, "\"%s\"") ITT_STUB(ITTAPI, __itt_string_handle*, string_handle_createW, (const wchar_t *name), (ITT_FORMAT name), string_handle_createW, __itt_group_structure, "\"%S\"") @@ -50,8 +49,25 @@ ITT_STUB(ITTAPI, __itt_string_handle*, string_handle_createW, (const wchar_t *na ITT_STUB(ITTAPI, __itt_string_handle*, string_handle_create, (const char *name), (ITT_FORMAT name), string_handle_create, __itt_group_structure, "\"%s\"") #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_counter, counter_createA, (const char *name, const char *domain), (ITT_FORMAT name, domain), counter_createA, __itt_group_counter, "\"%s\", \"%s\"") +ITT_STUB(ITTAPI, __itt_counter, counter_createW, (const wchar_t *name, const wchar_t *domain), (ITT_FORMAT name, domain), counter_createW, __itt_group_counter, "\"%s\", \"%s\"") +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_counter, counter_create, (const char *name, const char *domain), (ITT_FORMAT name, domain), counter_create, __itt_group_counter, "\"%s\", \"%s\"") +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_counter, counter_create_typedA, (const char *name, const char *domain, __itt_metadata_type type), (ITT_FORMAT name, domain, type), counter_create_typedA, __itt_group_counter, "\"%s\", \"%s\", %d") +ITT_STUB(ITTAPI, __itt_counter, counter_create_typedW, (const wchar_t *name, const wchar_t *domain, __itt_metadata_type type), (ITT_FORMAT name, domain, type), counter_create_typedW, __itt_group_counter, "\"%s\", \"%s\", %d") +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_counter, counter_create_typed, (const char *name, const char *domain, __itt_metadata_type type), (ITT_FORMAT name, domain, type), counter_create_typed, __itt_group_counter, "\"%s\", \"%s\", %d") +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + + ITT_STUBV(ITTAPI, void, pause, (void), (ITT_NO_PARAMS), pause, __itt_group_control | __itt_group_legacy, "no args") ITT_STUBV(ITTAPI, void, resume, (void), (ITT_NO_PARAMS), resume, __itt_group_control | __itt_group_legacy, "no args") +ITT_STUBV(ITTAPI, void, pause_scoped, (__itt_collection_scope scope), (ITT_FORMAT scope), pause_scoped, __itt_group_control, "%d") +ITT_STUBV(ITTAPI, void, resume_scoped, (__itt_collection_scope scope), (ITT_FORMAT scope), resume_scoped, __itt_group_control, "%d") #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUBV(ITTAPI, void, thread_set_nameA, (const char *name), (ITT_FORMAT name), thread_set_nameA, __itt_group_thread, "\"%s\"") @@ -68,12 +84,31 @@ ITT_STUB(LIBITTAPI, int, thr_name_setW, (const wchar_t *name, int namelen), (IT ITT_STUB(LIBITTAPI, int, thr_name_set, (const char *name, int namelen), (ITT_FORMAT name, namelen), thr_name_set, __itt_group_thread | __itt_group_legacy, "\"%s\", %d") #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_STUBV(LIBITTAPI, void, thr_ignore, (void), (ITT_NO_PARAMS), thr_ignore, __itt_group_thread | __itt_group_legacy, "no args") + +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_histogram*, histogram_createA, (const __itt_domain* domain, const char* name, __itt_metadata_type x_type, __itt_metadata_type y_type), (ITT_FORMAT domain, name, x_type, y_type), histogram_createA, __itt_group_structure, "%p, \"%s\", %d, %d") +ITT_STUB(ITTAPI, __itt_histogram*, histogram_createW, (const __itt_domain* domain, const wchar_t* name, __itt_metadata_type x_type, __itt_metadata_type y_type), (ITT_FORMAT domain, name, x_type, y_type), histogram_createW, __itt_group_structure, "%p, \"%s\", %d, %d") +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_histogram*, histogram_create, (const __itt_domain* domain, const char* name, __itt_metadata_type x_type, __itt_metadata_type y_type), (ITT_FORMAT domain, name, x_type, y_type), histogram_create, __itt_group_structure, "%p, \"%s\", %d, %d") +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + + #if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUB(ITTAPI, __itt_counter, counter_createA_v3, (const __itt_domain* domain, const char *name, __itt_metadata_type type), (ITT_FORMAT domain, name, type), counter_createA_v3, __itt_group_counter, "%p, \"%s\", %d") +ITT_STUB(ITTAPI, __itt_counter, counter_createW_v3, (const __itt_domain* domain, const wchar_t *name, __itt_metadata_type type), (ITT_FORMAT domain, name, type), counter_createW_v3, __itt_group_counter, "%p, \"%s\", %d") +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_counter, counter_create_v3, (const __itt_domain* domain, const char *name, __itt_metadata_type type), (ITT_FORMAT domain, name, type), counter_create_v3, __itt_group_counter, "%p, \"%s\", %d") +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +ITT_STUBV(ITTAPI, void, bind_context_metadata_to_counter, (__itt_counter counter, size_t length, __itt_context_metadata* metadata), (ITT_FORMAT counter, length, metadata), bind_context_metadata_to_counter, __itt_group_structure, "%p, %lu, %p") + #endif /* __ITT_INTERNAL_BODY */ ITT_STUBV(ITTAPI, void, enable_attach, (void), (ITT_NO_PARAMS), enable_attach, __itt_group_all, "no args") #else /* __ITT_INTERNAL_INIT */ +ITT_STUBV(ITTAPI, void, detach, (void), (ITT_NO_PARAMS), detach, __itt_group_control | __itt_group_legacy, "no args") + #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUBV(ITTAPI, void, sync_createA, (void *addr, const char *objtype, const char *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_createA, __itt_group_sync | __itt_group_fsync, "%p, \"%s\", \"%s\", %x") ITT_STUBV(ITTAPI, void, sync_createW, (void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_createW, __itt_group_sync | __itt_group_fsync, "%p, \"%S\", \"%S\", %x") @@ -177,6 +212,8 @@ ITT_STUBV(ITTAPI, void, task_end, (const __itt_domain *domain), ITT_STUBV(ITTAPI, void, counter_inc_v3, (const __itt_domain *domain, __itt_string_handle *name), (ITT_FORMAT domain, name), counter_inc_v3, __itt_group_structure, "%p, %p") ITT_STUBV(ITTAPI, void, counter_inc_delta_v3, (const __itt_domain *domain, __itt_string_handle *name, unsigned long long value), (ITT_FORMAT domain, name, value), counter_inc_delta_v3, __itt_group_structure, "%p, %p, %lu") +ITT_STUBV(ITTAPI, void, counter_dec_v3, (const __itt_domain *domain, __itt_string_handle *name), (ITT_FORMAT domain, name), counter_dec_v3, __itt_group_structure, "%p, %p") +ITT_STUBV(ITTAPI, void, counter_dec_delta_v3, (const __itt_domain *domain, __itt_string_handle *name, unsigned long long value), (ITT_FORMAT domain, name, value), counter_dec_delta_v3, __itt_group_structure, "%p, %p, %lu") ITT_STUBV(ITTAPI, void, marker, (const __itt_domain *domain, __itt_id id, __itt_string_handle *name, __itt_scope scope), (ITT_FORMAT domain, id, name, scope), marker, __itt_group_structure, "%p, %lu, %p, %d") @@ -239,21 +276,24 @@ ITT_STUB(ITTAPI, __itt_frame, frame_createW, (const wchar_t *domain), (ITT_FORMA #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_STUB(ITTAPI, __itt_frame, frame_create, (const char *domain), (ITT_FORMAT domain), frame_create, __itt_group_frame, "\"%s\"") #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* __ITT_INTERNAL_BODY */ -ITT_STUBV(ITTAPI, void, frame_begin, (__itt_frame frame), (ITT_FORMAT frame), frame_begin, __itt_group_frame, "%p") -ITT_STUBV(ITTAPI, void, frame_end, (__itt_frame frame), (ITT_FORMAT frame), frame_end, __itt_group_frame, "%p") -#ifndef __ITT_INTERNAL_BODY #if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(ITTAPI, __itt_counter, counter_createA, (const char *name, const char *domain), (ITT_FORMAT name, domain), counter_createA, __itt_group_counter, "\"%s\", \"%s\"") -ITT_STUB(ITTAPI, __itt_counter, counter_createW, (const wchar_t *name, const wchar_t *domain), (ITT_FORMAT name, domain), counter_createW, __itt_group_counter, "\"%s\", \"%s\"") -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUB(ITTAPI, __itt_counter, counter_create, (const char *name, const char *domain), (ITT_FORMAT name, domain), counter_create, __itt_group_counter, "\"%s\", \"%s\"") +ITT_STUB(ITTAPI, __itt_pt_region, pt_region_createA, (const char *name), (ITT_FORMAT name), pt_region_createA, __itt_group_structure, "\"%s\"") +ITT_STUB(ITTAPI, __itt_pt_region, pt_region_createW, (const wchar_t *name), (ITT_FORMAT name), pt_region_createW, __itt_group_structure, "\"%S\"") +#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ +ITT_STUB(ITTAPI, __itt_pt_region, pt_region_create, (const char *name), (ITT_FORMAT name), pt_region_create, __itt_group_structure, "\"%s\"") #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* __ITT_INTERNAL_BODY */ -ITT_STUBV(ITTAPI, void, counter_destroy, (__itt_counter id), (ITT_FORMAT id), counter_destroy, __itt_group_counter, "%p") -ITT_STUBV(ITTAPI, void, counter_inc, (__itt_counter id), (ITT_FORMAT id), counter_inc, __itt_group_counter, "%p") -ITT_STUBV(ITTAPI, void, counter_inc_delta, (__itt_counter id, unsigned long long value), (ITT_FORMAT id, value), counter_inc_delta, __itt_group_counter, "%p, %lu") +ITT_STUBV(ITTAPI, void, frame_begin, (__itt_frame frame), (ITT_FORMAT frame), frame_begin, __itt_group_frame, "%p") +ITT_STUBV(ITTAPI, void, frame_end, (__itt_frame frame), (ITT_FORMAT frame), frame_end, __itt_group_frame, "%p") + +ITT_STUBV(ITTAPI, void, counter_destroy, (__itt_counter id), (ITT_FORMAT id), counter_destroy, __itt_group_counter, "%p") +ITT_STUBV(ITTAPI, void, counter_inc, (__itt_counter id), (ITT_FORMAT id), counter_inc, __itt_group_counter, "%p") +ITT_STUBV(ITTAPI, void, counter_inc_delta, (__itt_counter id, unsigned long long value), (ITT_FORMAT id, value), counter_inc_delta, __itt_group_counter, "%p, %lu") +ITT_STUBV(ITTAPI, void, counter_dec, (__itt_counter id), (ITT_FORMAT id), counter_dec, __itt_group_counter, "%p") +ITT_STUBV(ITTAPI, void, counter_dec_delta, (__itt_counter id, unsigned long long value), (ITT_FORMAT id, value), counter_dec_delta, __itt_group_counter, "%p, %lu") +ITT_STUBV(ITTAPI, void, counter_set_value, (__itt_counter id, void *value_ptr), (ITT_FORMAT id, value_ptr), counter_set_value, __itt_group_counter, "%p, %p") +ITT_STUBV(ITTAPI, void, counter_set_value_ex, (__itt_counter id, __itt_clock_domain *clock_domain, unsigned long long timestamp, void *value_ptr), (ITT_FORMAT id, clock_domain, timestamp, value_ptr), counter_set_value_ex, __itt_group_counter, "%p, %p, %llu, %p") #ifndef __ITT_INTERNAL_BODY #if ITT_PLATFORM==ITT_PLATFORM_WIN @@ -323,4 +363,16 @@ ITT_STUB(ITTAPI, int, av_save, (void *data, int rank, const int *dimensions, in #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* __ITT_INTERNAL_BODY */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +ITT_STUBV(ITTAPI, void, module_loadA, (void *start_addr, void* end_addr, const char *path), (ITT_FORMAT start_addr, end_addr, path), module_loadA, __itt_group_module, "%p, %p, %p") +ITT_STUBV(ITTAPI, void, module_loadW, (void *start_addr, void* end_addr, const wchar_t *path), (ITT_FORMAT start_addr, end_addr, path), module_loadW, __itt_group_module, "%p, %p, %p") +#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ +ITT_STUBV(ITTAPI, void, module_load, (void *start_addr, void *end_addr, const char *path), (ITT_FORMAT start_addr, end_addr, path), module_load, __itt_group_module, "%p, %p, %p") +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +ITT_STUBV(ITTAPI, void, module_unload, (void *start_addr), (ITT_FORMAT start_addr), module_unload, __itt_group_module, "%p") + +ITT_STUBV(ITTAPI, void, histogram_submit, (__itt_histogram* histogram, size_t length, void* x_data, void* y_data), (ITT_FORMAT histogram, length, x_data, y_data), histogram_submit, __itt_group_structure, "%p, %lu, %p, %p") + +ITT_STUBV(ITTAPI, void, counter_set_value_v3, (__itt_counter counter, void *value_ptr), (ITT_FORMAT counter, value_ptr), counter_set_value_v3, __itt_group_counter, "%p, %p") + #endif /* __ITT_INTERNAL_INIT */ diff --git a/src/tbb/src/tbb/tools_api/ittnotify_types.h b/src/tbb/src/tbb/tools_api/ittnotify_types.h index 7e0cc3492..1c0fded40 100644 --- a/src/tbb/src/tbb/tools_api/ittnotify_types.h +++ b/src/tbb/src/tbb/tools_api/ittnotify_types.h @@ -1,21 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2022 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef _ITTNOTIFY_TYPES_H_ @@ -23,25 +19,26 @@ typedef enum ___itt_group_id { - __itt_group_none = 0, - __itt_group_legacy = 1<<0, - __itt_group_control = 1<<1, - __itt_group_thread = 1<<2, - __itt_group_mark = 1<<3, - __itt_group_sync = 1<<4, - __itt_group_fsync = 1<<5, - __itt_group_jit = 1<<6, - __itt_group_model = 1<<7, - __itt_group_splitter_min = 1<<7, - __itt_group_counter = 1<<8, - __itt_group_frame = 1<<9, - __itt_group_stitch = 1<<10, - __itt_group_heap = 1<<11, - __itt_group_splitter_max = 1<<12, - __itt_group_structure = 1<<12, - __itt_group_suppress = 1<<13, - __itt_group_arrays = 1<<14, - __itt_group_all = -1 + __itt_group_none = 0, + __itt_group_legacy = 1<<0, + __itt_group_control = 1<<1, + __itt_group_thread = 1<<2, + __itt_group_mark = 1<<3, + __itt_group_sync = 1<<4, + __itt_group_fsync = 1<<5, + __itt_group_jit = 1<<6, + __itt_group_model = 1<<7, + __itt_group_splitter_min = 1<<7, + __itt_group_counter = 1<<8, + __itt_group_frame = 1<<9, + __itt_group_stitch = 1<<10, + __itt_group_heap = 1<<11, + __itt_group_splitter_max = 1<<12, + __itt_group_structure = 1<<12, + __itt_group_suppress = 1<<13, + __itt_group_arrays = 1<<14, + __itt_group_module = 1<<15, + __itt_group_all = -1 } __itt_group_id; #pragma pack(push, 8) @@ -71,6 +68,7 @@ typedef struct ___itt_group_list { __itt_group_structure, "structure" }, \ { __itt_group_suppress, "suppress" }, \ { __itt_group_arrays, "arrays" }, \ + { __itt_group_module, "module" }, \ { __itt_group_none, NULL } \ } diff --git a/src/tbb/src/tbb/tools_api/legacy/ittnotify.h b/src/tbb/src/tbb/tools_api/legacy/ittnotify.h index 22ec4d147..837bc4800 100644 --- a/src/tbb/src/tbb/tools_api/legacy/ittnotify.h +++ b/src/tbb/src/tbb/tools_api/legacy/ittnotify.h @@ -1,21 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef _LEGACY_ITTNOTIFY_H_ @@ -39,11 +35,23 @@ # define ITT_OS_MAC 3 #endif /* ITT_OS_MAC */ +#ifndef ITT_OS_FREEBSD +# define ITT_OS_FREEBSD 4 +#endif /* ITT_OS_FREEBSD */ + +#ifndef ITT_OS_OPENBSD +# define ITT_OS_OPENBSD 5 +#endif /* ITT_OS_OPENBSD */ + #ifndef ITT_OS # if defined WIN32 || defined _WIN32 # define ITT_OS ITT_OS_WIN # elif defined( __APPLE__ ) && defined( __MACH__ ) # define ITT_OS ITT_OS_MAC +# elif defined( __FreeBSD__ ) +# define ITT_OS ITT_OS_FREEBSD +# elif defined( __OpenBSD__ ) +# define ITT_OS ITT_OS_OPENBSD # else # define ITT_OS ITT_OS_LINUX # endif @@ -61,11 +69,23 @@ # define ITT_PLATFORM_MAC 3 #endif /* ITT_PLATFORM_MAC */ +#ifndef ITT_PLATFORM_FREEBSD +# define ITT_PLATFORM_FREEBSD 4 +#endif /* ITT_PLATFORM_FREEBSD */ + +#ifndef ITT_PLATFORM_OPENBSD +# define ITT_PLATFORM_OPENBSD 5 +#endif /* ITT_PLATFORM_OPENBSD */ + #ifndef ITT_PLATFORM # if ITT_OS==ITT_OS_WIN # define ITT_PLATFORM ITT_PLATFORM_WIN # elif ITT_OS==ITT_OS_MAC # define ITT_PLATFORM ITT_PLATFORM_MAC +# elif ITT_OS==ITT_OS_FREEBSD +# define ITT_PLATFORM ITT_PLATFORM_FREEBSD +# elif ITT_OS==ITT_OS_OPENBSD +# define ITT_PLATFORM ITT_PLATFORM_OPENBSD # else # define ITT_PLATFORM ITT_PLATFORM_POSIX # endif @@ -85,40 +105,45 @@ #endif /* UNICODE || _UNICODE */ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#ifndef CDECL +#ifndef ITTAPI_CDECL # if ITT_PLATFORM==ITT_PLATFORM_WIN -# define CDECL __cdecl +# define ITTAPI_CDECL __cdecl # else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -# if defined _M_IX86 || defined __i386__ -# define CDECL __attribute__ ((cdecl)) +# if defined _M_IX86 || defined __i386__ +# define ITTAPI_CDECL __attribute__ ((cdecl)) # else /* _M_IX86 || __i386__ */ -# define CDECL /* actual only on x86 platform */ +# define ITTAPI_CDECL /* actual only on x86 platform */ # endif /* _M_IX86 || __i386__ */ # endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* CDECL */ +#endif /* ITTAPI_CDECL */ #ifndef STDCALL # if ITT_PLATFORM==ITT_PLATFORM_WIN # define STDCALL __stdcall # else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ # if defined _M_IX86 || defined __i386__ -# define STDCALL __attribute__ ((stdcall)) +# define STDCALL __attribute__ ((stdcall)) # else /* _M_IX86 || __i386__ */ # define STDCALL /* supported only on x86 platform */ # endif /* _M_IX86 || __i386__ */ # endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* STDCALL */ -#define ITTAPI CDECL -#define LIBITTAPI CDECL +#define ITTAPI ITTAPI_CDECL +#define LIBITTAPI ITTAPI_CDECL /* TODO: Temporary for compatibility! */ -#define ITTAPI_CALL CDECL -#define LIBITTAPI_CALL CDECL +#define ITTAPI_CALL ITTAPI_CDECL +#define LIBITTAPI_CALL ITTAPI_CDECL #if ITT_PLATFORM==ITT_PLATFORM_WIN /* use __forceinline (VC++ specific) */ -#define ITT_INLINE __forceinline +#if defined(__MINGW32__) && !defined(__cplusplus) +#define ITT_INLINE static __inline__ __attribute__((__always_inline__,__gnu_inline__)) +#else +#define ITT_INLINE static __forceinline +#endif /* __MINGW32__ */ + #define ITT_INLINE_ATTRIBUTE /* nothing */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /* @@ -127,11 +152,12 @@ * if no optimization level was specified. */ #ifdef __STRICT_ANSI__ -#define ITT_INLINE static inline +#define ITT_INLINE static +#define ITT_INLINE_ATTRIBUTE __attribute__((unused)) #else /* __STRICT_ANSI__ */ #define ITT_INLINE static inline +#define ITT_INLINE_ATTRIBUTE __attribute__((always_inline, unused)) #endif /* __STRICT_ANSI__ */ -#define ITT_INLINE_ATTRIBUTE __attribute__ ((always_inline, unused)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /** @endcond */ @@ -169,20 +195,20 @@ #define ITTNOTIFY_VOID(n) (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n) #define ITTNOTIFY_DATA(n) (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n) -#define ITTNOTIFY_VOID_D0(n,d) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d) -#define ITTNOTIFY_VOID_D1(n,d,x) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x) -#define ITTNOTIFY_VOID_D2(n,d,x,y) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y) -#define ITTNOTIFY_VOID_D3(n,d,x,y,z) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z) -#define ITTNOTIFY_VOID_D4(n,d,x,y,z,a) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a) -#define ITTNOTIFY_VOID_D5(n,d,x,y,z,a,b) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b) -#define ITTNOTIFY_VOID_D6(n,d,x,y,z,a,b,c) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c) -#define ITTNOTIFY_DATA_D0(n,d) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d) -#define ITTNOTIFY_DATA_D1(n,d,x) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x) -#define ITTNOTIFY_DATA_D2(n,d,x,y) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y) -#define ITTNOTIFY_DATA_D3(n,d,x,y,z) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z) -#define ITTNOTIFY_DATA_D4(n,d,x,y,z,a) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a) -#define ITTNOTIFY_DATA_D5(n,d,x,y,z,a,b) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b) -#define ITTNOTIFY_DATA_D6(n,d,x,y,z,a,b,c) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c) +#define ITTNOTIFY_VOID_D0(n,d) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d) +#define ITTNOTIFY_VOID_D1(n,d,x) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x) +#define ITTNOTIFY_VOID_D2(n,d,x,y) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y) +#define ITTNOTIFY_VOID_D3(n,d,x,y,z) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z) +#define ITTNOTIFY_VOID_D4(n,d,x,y,z,a) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a) +#define ITTNOTIFY_VOID_D5(n,d,x,y,z,a,b) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b) +#define ITTNOTIFY_VOID_D6(n,d,x,y,z,a,b,c) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c) +#define ITTNOTIFY_DATA_D0(n,d) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d) +#define ITTNOTIFY_DATA_D1(n,d,x) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x) +#define ITTNOTIFY_DATA_D2(n,d,x,y) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y) +#define ITTNOTIFY_DATA_D3(n,d,x,y,z) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z) +#define ITTNOTIFY_DATA_D4(n,d,x,y,z,a) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a) +#define ITTNOTIFY_DATA_D5(n,d,x,y,z,a,b) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b) +#define ITTNOTIFY_DATA_D6(n,d,x,y,z,a,b,c) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c) #ifdef ITT_STUB #undef ITT_STUB @@ -219,7 +245,7 @@ extern "C" { * only pauses tracing and analyzing memory access. * It does not pause tracing or analyzing threading APIs. * . - * - Intel(R) Parallel Amplifier and Intel(R) VTune(TM) Amplifier XE: + * - Intel(R) VTune(TM) Profiler: * - Does continue to record when new threads are started. * . * - Other effects: @@ -232,25 +258,33 @@ extern "C" { void ITTAPI __itt_pause(void); /** @brief Resume collection */ void ITTAPI __itt_resume(void); +/** @brief Detach collection */ +void ITTAPI __itt_detach(void); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, pause, (void)) ITT_STUBV(ITTAPI, void, resume, (void)) +ITT_STUBV(ITTAPI, void, detach, (void)) #define __itt_pause ITTNOTIFY_VOID(pause) #define __itt_pause_ptr ITTNOTIFY_NAME(pause) #define __itt_resume ITTNOTIFY_VOID(resume) #define __itt_resume_ptr ITTNOTIFY_NAME(resume) +#define __itt_detach ITTNOTIFY_VOID(detach) +#define __itt_detach_ptr ITTNOTIFY_NAME(detach) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_pause() #define __itt_pause_ptr 0 #define __itt_resume() #define __itt_resume_ptr 0 +#define __itt_detach() +#define __itt_detach_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_pause_ptr 0 #define __itt_resume_ptr 0 +#define __itt_detach_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ #endif /* _ITTNOTIFY_H_ */ @@ -438,7 +472,7 @@ ITT_STUBV(ITTAPI, void, sync_set_name, (void *addr, const char *objtype, con * @param[in] objname - null-terminated object name string. If NULL, no name will be assigned * to the object -- you can use the __itt_sync_rename call later to assign * the name - * @param[in] typelen, namelen - a lenght of string for appropriate objtype and objname parameter + * @param[in] typelen, namelen - a length of string for appropriate objtype and objname parameter * @param[in] attribute - one of [#__itt_attr_barrier, #__itt_attr_mutex] values which defines the * exact semantics of how prepare/acquired/releasing calls work. * @return __itt_err upon failure (name or namelen being null,name and namelen mismatched) @@ -947,9 +981,9 @@ ITT_STUB(ITTAPI, __itt_frame, frame_create, (const char *domain)) #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ -/** @brief Record an frame begin occurrence. */ +/** @brief Record a frame begin occurrence. */ void ITTAPI __itt_frame_begin(__itt_frame frame); -/** @brief Record an frame end occurrence. */ +/** @brief Record a frame end occurrence. */ void ITTAPI __itt_frame_end (__itt_frame frame); /** @cond exclude_from_documentation */ diff --git a/src/tbb/src/tbb/tools_api/prototype/ittnotify.h b/src/tbb/src/tbb/tools_api/prototype/ittnotify.h deleted file mode 100644 index 612a95288..000000000 --- a/src/tbb/src/tbb/tools_api/prototype/ittnotify.h +++ /dev/null @@ -1,213 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef _PROTOTYPE_ITTNOTIFY_H_ -#define _PROTOTYPE_ITTNOTIFY_H_ - -/** - * @file - * @brief Prototype User API functions and types - */ - -/** @cond exclude_from_documentation */ -#ifndef ITT_OS_WIN -# define ITT_OS_WIN 1 -#endif /* ITT_OS_WIN */ - -#ifndef ITT_OS_LINUX -# define ITT_OS_LINUX 2 -#endif /* ITT_OS_LINUX */ - -#ifndef ITT_OS_MAC -# define ITT_OS_MAC 3 -#endif /* ITT_OS_MAC */ - -#ifndef ITT_OS -# if defined WIN32 || defined _WIN32 -# define ITT_OS ITT_OS_WIN -# elif defined( __APPLE__ ) && defined( __MACH__ ) -# define ITT_OS ITT_OS_MAC -# else -# define ITT_OS ITT_OS_LINUX -# endif -#endif /* ITT_OS */ - -#ifndef ITT_PLATFORM_WIN -# define ITT_PLATFORM_WIN 1 -#endif /* ITT_PLATFORM_WIN */ - -#ifndef ITT_PLATFORM_POSIX -# define ITT_PLATFORM_POSIX 2 -#endif /* ITT_PLATFORM_POSIX */ - -#ifndef ITT_PLATFORM_MAC -# define ITT_PLATFORM_MAC 3 -#endif /* ITT_PLATFORM_MAC */ - -#ifndef ITT_PLATFORM -# if ITT_OS==ITT_OS_WIN -# define ITT_PLATFORM ITT_PLATFORM_WIN -# elif ITT_OS==ITT_OS_MAC -# define ITT_PLATFORM ITT_PLATFORM_MAC -# else -# define ITT_PLATFORM ITT_PLATFORM_POSIX -# endif -#endif /* ITT_PLATFORM */ - -#if defined(_UNICODE) && !defined(UNICODE) -#define UNICODE -#endif - -#include <stddef.h> -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#include <tchar.h> -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#include <stdint.h> -#if defined(UNICODE) || defined(_UNICODE) -#include <wchar.h> -#endif /* UNICODE || _UNICODE */ -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -#ifndef CDECL -# if ITT_PLATFORM==ITT_PLATFORM_WIN -# define CDECL __cdecl -# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -# if defined _M_IX86 || defined __i386__ -# define CDECL __attribute__ ((cdecl)) -# else /* _M_IX86 || __i386__ */ -# define CDECL /* actual only on x86 platform */ -# endif /* _M_IX86 || __i386__ */ -# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* CDECL */ - -#ifndef STDCALL -# if ITT_PLATFORM==ITT_PLATFORM_WIN -# define STDCALL __stdcall -# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -# if defined _M_IX86 || defined __i386__ -# define STDCALL __attribute__ ((stdcall)) -# else /* _M_IX86 || __i386__ */ -# define STDCALL /* supported only on x86 platform */ -# endif /* _M_IX86 || __i386__ */ -# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* STDCALL */ - -#define ITTAPI CDECL -#define LIBITTAPI CDECL - -/* TODO: Temporary for compatibility! */ -#define ITTAPI_CALL CDECL -#define LIBITTAPI_CALL CDECL - -#if ITT_PLATFORM==ITT_PLATFORM_WIN -/* use __forceinline (VC++ specific) */ -#define ITT_INLINE __forceinline -#define ITT_INLINE_ATTRIBUTE /* nothing */ -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -/* - * Generally, functions are not inlined unless optimization is specified. - * For functions declared inline, this attribute inlines the function even - * if no optimization level was specified. - */ -#ifdef __STRICT_ANSI__ -#define ITT_INLINE static -#else /* __STRICT_ANSI__ */ -#define ITT_INLINE static inline -#endif /* __STRICT_ANSI__ */ -#define ITT_INLINE_ATTRIBUTE __attribute__ ((always_inline, unused)) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -/** @endcond */ - -/** @cond exclude_from_documentation */ -/* Helper macro for joining tokens */ -#define ITT_JOIN_AUX(p,n) p##n -#define ITT_JOIN(p,n) ITT_JOIN_AUX(p,n) - -#ifdef ITT_MAJOR -#undef ITT_MAJOR -#endif -#ifdef ITT_MINOR -#undef ITT_MINOR -#endif -#define ITT_MAJOR 3 -#define ITT_MINOR 0 - -/* Standard versioning of a token with major and minor version numbers */ -#define ITT_VERSIONIZE(x) \ - ITT_JOIN(x, \ - ITT_JOIN(_, \ - ITT_JOIN(ITT_MAJOR, \ - ITT_JOIN(_, ITT_MINOR)))) - -#ifndef INTEL_ITTNOTIFY_PREFIX -# define INTEL_ITTNOTIFY_PREFIX __itt_ -#endif /* INTEL_ITTNOTIFY_PREFIX */ -#ifndef INTEL_ITTNOTIFY_POSTFIX -# define INTEL_ITTNOTIFY_POSTFIX _ptr_ -#endif /* INTEL_ITTNOTIFY_POSTFIX */ - -#define ITTNOTIFY_NAME_AUX(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n) -#define ITTNOTIFY_NAME(n) ITT_VERSIONIZE(ITTNOTIFY_NAME_AUX(ITT_JOIN(n,INTEL_ITTNOTIFY_POSTFIX))) - -#define ITTNOTIFY_VOID(n) (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n) -#define ITTNOTIFY_DATA(n) (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n) - -#define ITTNOTIFY_VOID_D0(n,d) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d) -#define ITTNOTIFY_VOID_D1(n,d,x) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x) -#define ITTNOTIFY_VOID_D2(n,d,x,y) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y) -#define ITTNOTIFY_VOID_D3(n,d,x,y,z) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z) -#define ITTNOTIFY_VOID_D4(n,d,x,y,z,a) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a) -#define ITTNOTIFY_VOID_D5(n,d,x,y,z,a,b) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b) -#define ITTNOTIFY_VOID_D6(n,d,x,y,z,a,b,c) (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c) -#define ITTNOTIFY_DATA_D0(n,d) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d) -#define ITTNOTIFY_DATA_D1(n,d,x) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x) -#define ITTNOTIFY_DATA_D2(n,d,x,y) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y) -#define ITTNOTIFY_DATA_D3(n,d,x,y,z) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z) -#define ITTNOTIFY_DATA_D4(n,d,x,y,z,a) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a) -#define ITTNOTIFY_DATA_D5(n,d,x,y,z,a,b) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b) -#define ITTNOTIFY_DATA_D6(n,d,x,y,z,a,b,c) (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c) - -#ifdef ITT_STUB -#undef ITT_STUB -#endif -#ifdef ITT_STUBV -#undef ITT_STUBV -#endif -#define ITT_STUBV(api,type,name,args) \ - typedef type (api* ITT_JOIN(ITTNOTIFY_NAME(name),_t)) args; \ - extern ITT_JOIN(ITTNOTIFY_NAME(name),_t) ITTNOTIFY_NAME(name); -#define ITT_STUB ITT_STUBV -/** @endcond */ - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -/** - * @defgroup prototype Prototype API - * @{ - * @} - */ - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif /* _PROTOTYPE_ITTNOTIFY_H_ */ diff --git a/src/tbb/src/tbb/version.cpp b/src/tbb/src/tbb/version.cpp new file mode 100644 index 000000000..ca113372f --- /dev/null +++ b/src/tbb/src/tbb/version.cpp @@ -0,0 +1,26 @@ +/* + Copyright (c) 2020-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "oneapi/tbb/version.h" + +extern "C" int TBB_runtime_interface_version() { + return TBB_INTERFACE_VERSION; +} + +extern "C" const char* TBB_runtime_version() { + static const char version_str[] = TBB_VERSION_STRING; + return version_str; +} diff --git a/src/tbb/src/tbb/waiters.h b/src/tbb/src/tbb/waiters.h new file mode 100644 index 000000000..8ed431f85 --- /dev/null +++ b/src/tbb/src/tbb/waiters.h @@ -0,0 +1,214 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef _TBB_waiters_H +#define _TBB_waiters_H + +#include "oneapi/tbb/detail/_task.h" +#include "scheduler_common.h" +#include "arena.h" +#include "threading_control.h" + +namespace tbb { +namespace detail { +namespace r1 { + +inline d1::task* get_self_recall_task(arena_slot& slot); + +class waiter_base { +public: + waiter_base(arena& a, int yields_multiplier = 1) : my_arena(a), my_backoff(int(a.my_num_slots), yields_multiplier) {} + + bool pause() { + if (my_backoff.pause()) { + my_arena.out_of_work(); + return true; + } + + return false; + } + + void reset_wait() { + my_backoff.reset_wait(); + } + +protected: + arena& my_arena; + stealing_loop_backoff my_backoff; +}; + +class outermost_worker_waiter : public waiter_base { +public: + using waiter_base::waiter_base; + + bool continue_execution(arena_slot& slot, d1::task*& t) const { + __TBB_ASSERT(t == nullptr, nullptr); + + if (is_worker_should_leave(slot)) { + if (!governor::hybrid_cpu()) { + static constexpr std::chrono::microseconds worker_wait_leave_duration(1000); + static_assert(worker_wait_leave_duration > std::chrono::steady_clock::duration(1), "Clock resolution is not enough for measured interval."); + + for (auto t1 = std::chrono::steady_clock::now(), t2 = t1; + std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1) < worker_wait_leave_duration; + t2 = std::chrono::steady_clock::now()) + { + if (!my_arena.is_empty() && !my_arena.is_recall_requested()) { + return true; + } + + if (my_arena.my_threading_control->is_any_other_client_active()) { + break; + } + d0::yield(); + } + } + // Leave dispatch loop + return false; + } + + t = get_self_recall_task(slot); + return true; + } + + void pause(arena_slot&) { + waiter_base::pause(); + } + + + d1::wait_context* wait_ctx() { + return nullptr; + } + + static bool postpone_execution(d1::task&) { + return false; + } + +private: + using base_type = waiter_base; + + bool is_worker_should_leave(arena_slot& slot) const { + bool is_top_priority_arena = my_arena.is_top_priority(); + bool is_task_pool_empty = slot.task_pool.load(std::memory_order_relaxed) == EmptyTaskPool; + + if (is_top_priority_arena) { + // Worker in most priority arena do not leave arena, until all work in task_pool is done + if (is_task_pool_empty && my_arena.is_recall_requested()) { + return true; + } + } else { + if (my_arena.is_recall_requested()) { + // If worker has work in task pool, we must notify other threads, + // because can appear missed wake up of other threads + if (!is_task_pool_empty) { + my_arena.advertise_new_work<arena::wakeup>(); + } + return true; + } + } + + return false; + } +}; + +class sleep_waiter : public waiter_base { +protected: + using waiter_base::waiter_base; + + template <typename Pred> + void sleep(std::uintptr_t uniq_tag, Pred wakeup_condition) { + my_arena.get_waiting_threads_monitor().wait<thread_control_monitor::thread_context>(wakeup_condition, + market_context{uniq_tag, &my_arena}); + reset_wait(); + } +}; + +class external_waiter : public sleep_waiter { +public: + external_waiter(arena& a, d1::wait_context& wo) + : sleep_waiter(a, /*yields_multiplier*/10), my_wait_ctx(wo) + {} + + bool continue_execution(arena_slot& slot, d1::task*& t) const { + __TBB_ASSERT(t == nullptr, nullptr); + if (!my_wait_ctx.continue_execution()) + return false; + t = get_self_recall_task(slot); + return true; + } + + void pause(arena_slot&) { + if (!sleep_waiter::pause()) { + return; + } + + auto wakeup_condition = [&] { return !my_arena.is_empty() || !my_wait_ctx.continue_execution(); }; + + sleep(std::uintptr_t(&my_wait_ctx), wakeup_condition); + } + + d1::wait_context* wait_ctx() { + return &my_wait_ctx; + } + + static bool postpone_execution(d1::task&) { + return false; + } + +private: + d1::wait_context& my_wait_ctx; +}; + +#if __TBB_RESUMABLE_TASKS + +class coroutine_waiter : public sleep_waiter { +public: + using sleep_waiter::sleep_waiter; + + bool continue_execution(arena_slot& slot, d1::task*& t) const { + __TBB_ASSERT(t == nullptr, nullptr); + t = get_self_recall_task(slot); + return true; + } + + void pause(arena_slot& slot) { + if (!sleep_waiter::pause()) { + return; + } + + suspend_point_type* sp = slot.default_task_dispatcher().m_suspend_point; + + auto wakeup_condition = [&] { return !my_arena.is_empty() || sp->m_is_owner_recalled.load(std::memory_order_relaxed); }; + + sleep(std::uintptr_t(sp), wakeup_condition); + } + + d1::wait_context* wait_ctx() { + return nullptr; + } + + static bool postpone_execution(d1::task& t) { + return task_accessor::is_resume_task(t); + } +}; + +#endif // __TBB_RESUMABLE_TASKS + +} // namespace r1 +} // namespace detail +} // namespace tbb + +#endif // _TBB_waiters_H diff --git a/src/tbb/src/tbb/win32-tbb-export.def b/src/tbb/src/tbb/win32-tbb-export.def deleted file mode 100644 index 024024d74..000000000 --- a/src/tbb/src/tbb/win32-tbb-export.def +++ /dev/null @@ -1,28 +0,0 @@ -; Copyright 2005-2014 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. Threading Building Blocks is free software; -; you can redistribute it and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. Threading Building Blocks is -; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -; See the GNU General Public License for more details. You should have received a copy of -; the GNU General Public License along with Threading Building Blocks; if not, write to the -; Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software library without -; restriction. Specifically, if other files instantiate templates or use macros or inline -; functions from this file, or you compile this file and link it with other files to produce -; an executable, this file does not by itself cause the resulting executable to be covered -; by the GNU General Public License. This exception does not however invalidate any other -; reasons why the executable file might be covered by the GNU General Public License. - -EXPORTS - -#define __TBB_SYMBOL( sym ) sym -#if _M_ARM -#include "winrt-tbb-export.lst" -#else -#include "win32-tbb-export.lst" -#endif - - diff --git a/src/tbb/src/tbb/win32-tbb-export.lst b/src/tbb/src/tbb/win32-tbb-export.lst deleted file mode 100644 index 47db18110..000000000 --- a/src/tbb/src/tbb/win32-tbb-export.lst +++ /dev/null @@ -1,333 +0,0 @@ -; Copyright 2005-2014 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. Threading Building Blocks is free software; -; you can redistribute it and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. Threading Building Blocks is -; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -; See the GNU General Public License for more details. You should have received a copy of -; the GNU General Public License along with Threading Building Blocks; if not, write to the -; Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software library without -; restriction. Specifically, if other files instantiate templates or use macros or inline -; functions from this file, or you compile this file and link it with other files to produce -; an executable, this file does not by itself cause the resulting executable to be covered -; by the GNU General Public License. This exception does not however invalidate any other -; reasons why the executable file might be covered by the GNU General Public License. - -#include "tbb/tbb_config.h" - -// Assembly-language support that is called directly by clients -// __TBB_SYMBOL( __TBB_machine_cmpswp1 ) -// __TBB_SYMBOL( __TBB_machine_cmpswp2 ) -// __TBB_SYMBOL( __TBB_machine_cmpswp4 ) -__TBB_SYMBOL( __TBB_machine_cmpswp8 ) -// __TBB_SYMBOL( __TBB_machine_fetchadd1 ) -// __TBB_SYMBOL( __TBB_machine_fetchadd2 ) -// __TBB_SYMBOL( __TBB_machine_fetchadd4 ) -__TBB_SYMBOL( __TBB_machine_fetchadd8 ) -// __TBB_SYMBOL( __TBB_machine_fetchstore1 ) -// __TBB_SYMBOL( __TBB_machine_fetchstore2 ) -// __TBB_SYMBOL( __TBB_machine_fetchstore4 ) -__TBB_SYMBOL( __TBB_machine_fetchstore8 ) -__TBB_SYMBOL( __TBB_machine_store8 ) -__TBB_SYMBOL( __TBB_machine_load8 ) -__TBB_SYMBOL( __TBB_machine_trylockbyte ) -__TBB_SYMBOL( __TBB_machine_try_lock_elided ) -__TBB_SYMBOL( __TBB_machine_unlock_elided ) -__TBB_SYMBOL( __TBB_machine_is_in_transaction ) - -// cache_aligned_allocator.cpp -__TBB_SYMBOL( ?NFS_Allocate@internal@tbb@@YAPAXIIPAX@Z ) -__TBB_SYMBOL( ?NFS_GetLineSize@internal@tbb@@YAIXZ ) -__TBB_SYMBOL( ?NFS_Free@internal@tbb@@YAXPAX@Z ) -__TBB_SYMBOL( ?allocate_via_handler_v3@internal@tbb@@YAPAXI@Z ) -__TBB_SYMBOL( ?deallocate_via_handler_v3@internal@tbb@@YAXPAX@Z ) -__TBB_SYMBOL( ?is_malloc_used_v3@internal@tbb@@YA_NXZ ) - -// task.cpp v3 -__TBB_SYMBOL( ?allocate@allocate_additional_child_of_proxy@internal@tbb@@QBEAAVtask@3@I@Z ) -__TBB_SYMBOL( ?allocate@allocate_child_proxy@internal@tbb@@QBEAAVtask@3@I@Z ) -__TBB_SYMBOL( ?allocate@allocate_continuation_proxy@internal@tbb@@QBEAAVtask@3@I@Z ) -__TBB_SYMBOL( ?allocate@allocate_root_proxy@internal@tbb@@SAAAVtask@3@I@Z ) -__TBB_SYMBOL( ?destroy@task_base@internal@interface5@tbb@@SAXAAVtask@4@@Z ) -__TBB_SYMBOL( ?free@allocate_additional_child_of_proxy@internal@tbb@@QBEXAAVtask@3@@Z ) -__TBB_SYMBOL( ?free@allocate_child_proxy@internal@tbb@@QBEXAAVtask@3@@Z ) -__TBB_SYMBOL( ?free@allocate_continuation_proxy@internal@tbb@@QBEXAAVtask@3@@Z ) -__TBB_SYMBOL( ?free@allocate_root_proxy@internal@tbb@@SAXAAVtask@3@@Z ) -__TBB_SYMBOL( ?internal_set_ref_count@task@tbb@@AAEXH@Z ) -__TBB_SYMBOL( ?internal_decrement_ref_count@task@tbb@@AAEHXZ ) -__TBB_SYMBOL( ?is_owned_by_current_thread@task@tbb@@QBE_NXZ ) -__TBB_SYMBOL( ?note_affinity@task@tbb@@UAEXG@Z ) -__TBB_SYMBOL( ?resize@affinity_partitioner_base_v3@internal@tbb@@AAEXI@Z ) -__TBB_SYMBOL( ?self@task@tbb@@SAAAV12@XZ ) -__TBB_SYMBOL( ?spawn_and_wait_for_all@task@tbb@@QAEXAAVtask_list@2@@Z ) -__TBB_SYMBOL( ?default_num_threads@task_scheduler_init@tbb@@SAHXZ ) -__TBB_SYMBOL( ?initialize@task_scheduler_init@tbb@@QAEXHI@Z ) -__TBB_SYMBOL( ?initialize@task_scheduler_init@tbb@@QAEXH@Z ) -__TBB_SYMBOL( ?terminate@task_scheduler_init@tbb@@QAEXXZ ) -#if __TBB_SCHEDULER_OBSERVER -__TBB_SYMBOL( ?observe@task_scheduler_observer_v3@internal@tbb@@QAEX_N@Z ) -#endif /* __TBB_SCHEDULER_OBSERVER */ - -#if __TBB_TASK_ARENA -/* arena.cpp */ -__TBB_SYMBOL( ?internal_current_slot@task_arena_base@internal@interface7@tbb@@KAHXZ ) -__TBB_SYMBOL( ?internal_initialize@task_arena_base@internal@interface7@tbb@@IAEXXZ ) -__TBB_SYMBOL( ?internal_terminate@task_arena_base@internal@interface7@tbb@@IAEXXZ ) -__TBB_SYMBOL( ?internal_enqueue@task_arena_base@internal@interface7@tbb@@IBEXAAVtask@4@H@Z ) -__TBB_SYMBOL( ?internal_execute@task_arena_base@internal@interface7@tbb@@IBEXAAVdelegate_base@234@@Z ) -__TBB_SYMBOL( ?internal_wait@task_arena_base@internal@interface7@tbb@@IBEXXZ ) -#endif /* __TBB_TASK_ARENA */ - -#if !TBB_NO_LEGACY -// task_v2.cpp -__TBB_SYMBOL( ?destroy@task@tbb@@QAEXAAV12@@Z ) -#endif - -// exception handling support -#if __TBB_TASK_GROUP_CONTEXT -__TBB_SYMBOL( ?allocate@allocate_root_with_context_proxy@internal@tbb@@QBEAAVtask@3@I@Z ) -__TBB_SYMBOL( ?free@allocate_root_with_context_proxy@internal@tbb@@QBEXAAVtask@3@@Z ) -__TBB_SYMBOL( ?change_group@task@tbb@@QAEXAAVtask_group_context@2@@Z ) -__TBB_SYMBOL( ?is_group_execution_cancelled@task_group_context@tbb@@QBE_NXZ ) -__TBB_SYMBOL( ?cancel_group_execution@task_group_context@tbb@@QAE_NXZ ) -__TBB_SYMBOL( ?reset@task_group_context@tbb@@QAEXXZ ) -__TBB_SYMBOL( ?capture_fp_settings@task_group_context@tbb@@QAEXXZ ) -__TBB_SYMBOL( ?init@task_group_context@tbb@@IAEXXZ ) -__TBB_SYMBOL( ?register_pending_exception@task_group_context@tbb@@QAEXXZ ) -__TBB_SYMBOL( ??1task_group_context@tbb@@QAE@XZ ) -#if __TBB_TASK_PRIORITY -__TBB_SYMBOL( ?set_priority@task_group_context@tbb@@QAEXW4priority_t@2@@Z ) -__TBB_SYMBOL( ?priority@task_group_context@tbb@@QBE?AW4priority_t@2@XZ ) -#endif /* __TBB_TASK_PRIORITY */ -__TBB_SYMBOL( ?name@captured_exception@tbb@@UBEPBDXZ ) -__TBB_SYMBOL( ?what@captured_exception@tbb@@UBEPBDXZ ) -__TBB_SYMBOL( ??1captured_exception@tbb@@UAE@XZ ) -__TBB_SYMBOL( ?move@captured_exception@tbb@@UAEPAV12@XZ ) -__TBB_SYMBOL( ?destroy@captured_exception@tbb@@UAEXXZ ) -__TBB_SYMBOL( ?set@captured_exception@tbb@@QAEXPBD0@Z ) -__TBB_SYMBOL( ?clear@captured_exception@tbb@@QAEXXZ ) -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -// Symbols for exceptions thrown from TBB -__TBB_SYMBOL( ?throw_bad_last_alloc_exception_v4@internal@tbb@@YAXXZ ) -__TBB_SYMBOL( ?throw_exception_v4@internal@tbb@@YAXW4exception_id@12@@Z ) -__TBB_SYMBOL( ?what@bad_last_alloc@tbb@@UBEPBDXZ ) -__TBB_SYMBOL( ?what@missing_wait@tbb@@UBEPBDXZ ) -__TBB_SYMBOL( ?what@invalid_multiple_scheduling@tbb@@UBEPBDXZ ) -__TBB_SYMBOL( ?what@improper_lock@tbb@@UBEPBDXZ ) -__TBB_SYMBOL( ?what@user_abort@tbb@@UBEPBDXZ ) - -// tbb_misc.cpp -__TBB_SYMBOL( ?assertion_failure@tbb@@YAXPBDH00@Z ) -__TBB_SYMBOL( ?get_initial_auto_partitioner_divisor@internal@tbb@@YAIXZ ) -__TBB_SYMBOL( ?handle_perror@internal@tbb@@YAXHPBD@Z ) -__TBB_SYMBOL( ?set_assertion_handler@tbb@@YAP6AXPBDH00@ZP6AX0H00@Z@Z ) -__TBB_SYMBOL( ?runtime_warning@internal@tbb@@YAXPBDZZ ) -__TBB_SYMBOL( TBB_runtime_interface_version ) - -// tbb_main.cpp -__TBB_SYMBOL( ?itt_load_pointer_with_acquire_v3@internal@tbb@@YAPAXPBX@Z ) -__TBB_SYMBOL( ?itt_store_pointer_with_release_v3@internal@tbb@@YAXPAX0@Z ) -__TBB_SYMBOL( ?call_itt_notify_v5@internal@tbb@@YAXHPAX@Z ) -__TBB_SYMBOL( ?itt_set_sync_name_v3@internal@tbb@@YAXPAXPB_W@Z ) -__TBB_SYMBOL( ?itt_load_pointer_v3@internal@tbb@@YAPAXPBX@Z ) -#if __TBB_ITT_STRUCTURE_API -__TBB_SYMBOL( ?itt_make_task_group_v7@internal@tbb@@YAXW4itt_domain_enum@12@PAX_K12W4string_index@12@@Z ) -__TBB_SYMBOL( ?itt_metadata_str_add_v7@internal@tbb@@YAXW4itt_domain_enum@12@PAX_KW4string_index@12@PBD@Z ) -__TBB_SYMBOL( ?itt_relation_add_v7@internal@tbb@@YAXW4itt_domain_enum@12@PAX_KW4itt_relation@12@12@Z ) -__TBB_SYMBOL( ?itt_task_begin_v7@internal@tbb@@YAXW4itt_domain_enum@12@PAX_K12W4string_index@12@@Z ) -__TBB_SYMBOL( ?itt_task_end_v7@internal@tbb@@YAXW4itt_domain_enum@12@@Z ) -#endif - -// pipeline.cpp -__TBB_SYMBOL( ??0pipeline@tbb@@QAE@XZ ) -__TBB_SYMBOL( ??1filter@tbb@@UAE@XZ ) -__TBB_SYMBOL( ??1pipeline@tbb@@UAE@XZ ) -__TBB_SYMBOL( ??_7pipeline@tbb@@6B@ ) -__TBB_SYMBOL( ?add_filter@pipeline@tbb@@QAEXAAVfilter@2@@Z ) -__TBB_SYMBOL( ?clear@pipeline@tbb@@QAEXXZ ) -__TBB_SYMBOL( ?inject_token@pipeline@tbb@@AAEXAAVtask@2@@Z ) -__TBB_SYMBOL( ?run@pipeline@tbb@@QAEXI@Z ) -#if __TBB_TASK_GROUP_CONTEXT -__TBB_SYMBOL( ?run@pipeline@tbb@@QAEXIAAVtask_group_context@2@@Z ) -#endif -__TBB_SYMBOL( ?process_item@thread_bound_filter@tbb@@QAE?AW4result_type@12@XZ ) -__TBB_SYMBOL( ?try_process_item@thread_bound_filter@tbb@@QAE?AW4result_type@12@XZ ) -__TBB_SYMBOL( ?set_end_of_input@filter@tbb@@IAEXXZ ) - -// queuing_rw_mutex.cpp -__TBB_SYMBOL( ?internal_construct@queuing_rw_mutex@tbb@@QAEXXZ ) -__TBB_SYMBOL( ?acquire@scoped_lock@queuing_rw_mutex@tbb@@QAEXAAV23@_N@Z ) -__TBB_SYMBOL( ?downgrade_to_reader@scoped_lock@queuing_rw_mutex@tbb@@QAE_NXZ ) -__TBB_SYMBOL( ?release@scoped_lock@queuing_rw_mutex@tbb@@QAEXXZ ) -__TBB_SYMBOL( ?upgrade_to_writer@scoped_lock@queuing_rw_mutex@tbb@@QAE_NXZ ) -__TBB_SYMBOL( ?try_acquire@scoped_lock@queuing_rw_mutex@tbb@@QAE_NAAV23@_N@Z ) - -// reader_writer_lock.cpp -__TBB_SYMBOL( ?try_lock_read@reader_writer_lock@interface5@tbb@@QAE_NXZ ) -__TBB_SYMBOL( ?try_lock@reader_writer_lock@interface5@tbb@@QAE_NXZ ) -__TBB_SYMBOL( ?unlock@reader_writer_lock@interface5@tbb@@QAEXXZ ) -__TBB_SYMBOL( ?lock_read@reader_writer_lock@interface5@tbb@@QAEXXZ ) -__TBB_SYMBOL( ?lock@reader_writer_lock@interface5@tbb@@QAEXXZ ) -__TBB_SYMBOL( ?internal_construct@reader_writer_lock@interface5@tbb@@AAEXXZ ) -__TBB_SYMBOL( ?internal_destroy@reader_writer_lock@interface5@tbb@@AAEXXZ ) -__TBB_SYMBOL( ?internal_construct@scoped_lock@reader_writer_lock@interface5@tbb@@AAEXAAV234@@Z ) -__TBB_SYMBOL( ?internal_destroy@scoped_lock@reader_writer_lock@interface5@tbb@@AAEXXZ ) -__TBB_SYMBOL( ?internal_construct@scoped_lock_read@reader_writer_lock@interface5@tbb@@AAEXAAV234@@Z ) -__TBB_SYMBOL( ?internal_destroy@scoped_lock_read@reader_writer_lock@interface5@tbb@@AAEXXZ ) - -#if !TBB_NO_LEGACY -// spin_rw_mutex.cpp v2 -__TBB_SYMBOL( ?internal_acquire_reader@spin_rw_mutex@tbb@@CAXPAV12@@Z ) -__TBB_SYMBOL( ?internal_acquire_writer@spin_rw_mutex@tbb@@CA_NPAV12@@Z ) -__TBB_SYMBOL( ?internal_downgrade@spin_rw_mutex@tbb@@CAXPAV12@@Z ) -__TBB_SYMBOL( ?internal_itt_releasing@spin_rw_mutex@tbb@@CAXPAV12@@Z ) -__TBB_SYMBOL( ?internal_release_reader@spin_rw_mutex@tbb@@CAXPAV12@@Z ) -__TBB_SYMBOL( ?internal_release_writer@spin_rw_mutex@tbb@@CAXPAV12@@Z ) -__TBB_SYMBOL( ?internal_upgrade@spin_rw_mutex@tbb@@CA_NPAV12@@Z ) -__TBB_SYMBOL( ?internal_try_acquire_writer@spin_rw_mutex@tbb@@CA_NPAV12@@Z ) -__TBB_SYMBOL( ?internal_try_acquire_reader@spin_rw_mutex@tbb@@CA_NPAV12@@Z ) -#endif - -// spin_rw_mutex v3 -__TBB_SYMBOL( ?internal_construct@spin_rw_mutex_v3@tbb@@AAEXXZ ) -__TBB_SYMBOL( ?internal_upgrade@spin_rw_mutex_v3@tbb@@AAE_NXZ ) -__TBB_SYMBOL( ?internal_downgrade@spin_rw_mutex_v3@tbb@@AAEXXZ ) -__TBB_SYMBOL( ?internal_acquire_reader@spin_rw_mutex_v3@tbb@@AAEXXZ ) -__TBB_SYMBOL( ?internal_acquire_writer@spin_rw_mutex_v3@tbb@@AAE_NXZ ) -__TBB_SYMBOL( ?internal_release_reader@spin_rw_mutex_v3@tbb@@AAEXXZ ) -__TBB_SYMBOL( ?internal_release_writer@spin_rw_mutex_v3@tbb@@AAEXXZ ) -__TBB_SYMBOL( ?internal_try_acquire_reader@spin_rw_mutex_v3@tbb@@AAE_NXZ ) -__TBB_SYMBOL( ?internal_try_acquire_writer@spin_rw_mutex_v3@tbb@@AAE_NXZ ) - -// x86_rtm_rw_mutex.cpp -__TBB_SYMBOL( ?internal_construct@x86_rtm_rw_mutex@internal@interface8@tbb@@AAEXXZ ) -__TBB_SYMBOL( ?internal_release@x86_rtm_rw_mutex@internal@interface8@tbb@@AAEXAAVscoped_lock@1234@@Z ) -__TBB_SYMBOL( ?internal_acquire_writer@x86_rtm_rw_mutex@internal@interface8@tbb@@AAEXAAVscoped_lock@1234@_N@Z ) -__TBB_SYMBOL( ?internal_acquire_reader@x86_rtm_rw_mutex@internal@interface8@tbb@@AAEXAAVscoped_lock@1234@_N@Z ) -__TBB_SYMBOL( ?internal_upgrade@x86_rtm_rw_mutex@internal@interface8@tbb@@AAE_NAAVscoped_lock@1234@@Z ) -__TBB_SYMBOL( ?internal_downgrade@x86_rtm_rw_mutex@internal@interface8@tbb@@AAE_NAAVscoped_lock@1234@@Z ) -__TBB_SYMBOL( ?internal_try_acquire_writer@x86_rtm_rw_mutex@internal@interface8@tbb@@AAE_NAAVscoped_lock@1234@@Z ) - -// spin_mutex.cpp -__TBB_SYMBOL( ?internal_construct@spin_mutex@tbb@@QAEXXZ ) -__TBB_SYMBOL( ?internal_acquire@scoped_lock@spin_mutex@tbb@@AAEXAAV23@@Z ) -__TBB_SYMBOL( ?internal_release@scoped_lock@spin_mutex@tbb@@AAEXXZ ) -__TBB_SYMBOL( ?internal_try_acquire@scoped_lock@spin_mutex@tbb@@AAE_NAAV23@@Z ) - -// mutex.cpp -__TBB_SYMBOL( ?internal_acquire@scoped_lock@mutex@tbb@@AAEXAAV23@@Z ) -__TBB_SYMBOL( ?internal_release@scoped_lock@mutex@tbb@@AAEXXZ ) -__TBB_SYMBOL( ?internal_try_acquire@scoped_lock@mutex@tbb@@AAE_NAAV23@@Z ) -__TBB_SYMBOL( ?internal_construct@mutex@tbb@@AAEXXZ ) -__TBB_SYMBOL( ?internal_destroy@mutex@tbb@@AAEXXZ ) - -// recursive_mutex.cpp -__TBB_SYMBOL( ?internal_acquire@scoped_lock@recursive_mutex@tbb@@AAEXAAV23@@Z ) -__TBB_SYMBOL( ?internal_release@scoped_lock@recursive_mutex@tbb@@AAEXXZ ) -__TBB_SYMBOL( ?internal_try_acquire@scoped_lock@recursive_mutex@tbb@@AAE_NAAV23@@Z ) -__TBB_SYMBOL( ?internal_construct@recursive_mutex@tbb@@AAEXXZ ) -__TBB_SYMBOL( ?internal_destroy@recursive_mutex@tbb@@AAEXXZ ) - -// queuing_mutex.cpp -__TBB_SYMBOL( ?internal_construct@queuing_mutex@tbb@@QAEXXZ ) -__TBB_SYMBOL( ?acquire@scoped_lock@queuing_mutex@tbb@@QAEXAAV23@@Z ) -__TBB_SYMBOL( ?release@scoped_lock@queuing_mutex@tbb@@QAEXXZ ) -__TBB_SYMBOL( ?try_acquire@scoped_lock@queuing_mutex@tbb@@QAE_NAAV23@@Z ) - -// critical_section.cpp -__TBB_SYMBOL( ?internal_construct@critical_section_v4@internal@tbb@@QAEXXZ ) - -#if !TBB_NO_LEGACY -// concurrent_hash_map.cpp -__TBB_SYMBOL( ?internal_grow_predicate@hash_map_segment_base@internal@tbb@@QBE_NXZ ) - -// concurrent_queue.cpp v2 -__TBB_SYMBOL( ?advance@concurrent_queue_iterator_base@internal@tbb@@IAEXXZ ) -__TBB_SYMBOL( ?assign@concurrent_queue_iterator_base@internal@tbb@@IAEXABV123@@Z ) -__TBB_SYMBOL( ?internal_size@concurrent_queue_base@internal@tbb@@IBEHXZ ) -__TBB_SYMBOL( ??0concurrent_queue_base@internal@tbb@@IAE@I@Z ) -__TBB_SYMBOL( ??0concurrent_queue_iterator_base@internal@tbb@@IAE@ABVconcurrent_queue_base@12@@Z ) -__TBB_SYMBOL( ??1concurrent_queue_base@internal@tbb@@MAE@XZ ) -__TBB_SYMBOL( ??1concurrent_queue_iterator_base@internal@tbb@@IAE@XZ ) -__TBB_SYMBOL( ?internal_pop@concurrent_queue_base@internal@tbb@@IAEXPAX@Z ) -__TBB_SYMBOL( ?internal_pop_if_present@concurrent_queue_base@internal@tbb@@IAE_NPAX@Z ) -__TBB_SYMBOL( ?internal_push@concurrent_queue_base@internal@tbb@@IAEXPBX@Z ) -__TBB_SYMBOL( ?internal_push_if_not_full@concurrent_queue_base@internal@tbb@@IAE_NPBX@Z ) -__TBB_SYMBOL( ?internal_set_capacity@concurrent_queue_base@internal@tbb@@IAEXHI@Z ) -#endif - -// concurrent_queue v3 -__TBB_SYMBOL( ??1concurrent_queue_iterator_base_v3@internal@tbb@@IAE@XZ ) -__TBB_SYMBOL( ??0concurrent_queue_iterator_base_v3@internal@tbb@@IAE@ABVconcurrent_queue_base_v3@12@@Z ) -__TBB_SYMBOL( ??0concurrent_queue_iterator_base_v3@internal@tbb@@IAE@ABVconcurrent_queue_base_v3@12@I@Z ) -__TBB_SYMBOL( ?advance@concurrent_queue_iterator_base_v3@internal@tbb@@IAEXXZ ) -__TBB_SYMBOL( ?assign@concurrent_queue_iterator_base_v3@internal@tbb@@IAEXABV123@@Z ) -__TBB_SYMBOL( ??0concurrent_queue_base_v3@internal@tbb@@IAE@I@Z ) -__TBB_SYMBOL( ??1concurrent_queue_base_v3@internal@tbb@@MAE@XZ ) -__TBB_SYMBOL( ?internal_pop@concurrent_queue_base_v3@internal@tbb@@IAEXPAX@Z ) -__TBB_SYMBOL( ?internal_pop_if_present@concurrent_queue_base_v3@internal@tbb@@IAE_NPAX@Z ) -__TBB_SYMBOL( ?internal_abort@concurrent_queue_base_v3@internal@tbb@@IAEXXZ ) -__TBB_SYMBOL( ?internal_push@concurrent_queue_base_v3@internal@tbb@@IAEXPBX@Z ) -__TBB_SYMBOL( ?internal_push_move@concurrent_queue_base_v8@internal@tbb@@IAEXPBX@Z ) -__TBB_SYMBOL( ?internal_push_if_not_full@concurrent_queue_base_v3@internal@tbb@@IAE_NPBX@Z ) -__TBB_SYMBOL( ?internal_push_move_if_not_full@concurrent_queue_base_v8@internal@tbb@@IAE_NPBX@Z ) -__TBB_SYMBOL( ?internal_size@concurrent_queue_base_v3@internal@tbb@@IBEHXZ ) -__TBB_SYMBOL( ?internal_empty@concurrent_queue_base_v3@internal@tbb@@IBE_NXZ ) -__TBB_SYMBOL( ?internal_set_capacity@concurrent_queue_base_v3@internal@tbb@@IAEXHI@Z ) -__TBB_SYMBOL( ?internal_finish_clear@concurrent_queue_base_v3@internal@tbb@@IAEXXZ ) -__TBB_SYMBOL( ?internal_throw_exception@concurrent_queue_base_v3@internal@tbb@@IBEXXZ ) -__TBB_SYMBOL( ?assign@concurrent_queue_base_v3@internal@tbb@@IAEXABV123@@Z ) -__TBB_SYMBOL( ?move_content@concurrent_queue_base_v8@internal@tbb@@IAEXAAV123@@Z ) - -#if !TBB_NO_LEGACY -// concurrent_vector.cpp v2 -__TBB_SYMBOL( ?internal_assign@concurrent_vector_base@internal@tbb@@IAEXABV123@IP6AXPAXI@ZP6AX1PBXI@Z4@Z ) -__TBB_SYMBOL( ?internal_capacity@concurrent_vector_base@internal@tbb@@IBEIXZ ) -__TBB_SYMBOL( ?internal_clear@concurrent_vector_base@internal@tbb@@IAEXP6AXPAXI@Z_N@Z ) -__TBB_SYMBOL( ?internal_copy@concurrent_vector_base@internal@tbb@@IAEXABV123@IP6AXPAXPBXI@Z@Z ) -__TBB_SYMBOL( ?internal_grow_by@concurrent_vector_base@internal@tbb@@IAEIIIP6AXPAXI@Z@Z ) -__TBB_SYMBOL( ?internal_grow_to_at_least@concurrent_vector_base@internal@tbb@@IAEXIIP6AXPAXI@Z@Z ) -__TBB_SYMBOL( ?internal_push_back@concurrent_vector_base@internal@tbb@@IAEPAXIAAI@Z ) -__TBB_SYMBOL( ?internal_reserve@concurrent_vector_base@internal@tbb@@IAEXIII@Z ) -#endif - -// concurrent_vector v3 -__TBB_SYMBOL( ??1concurrent_vector_base_v3@internal@tbb@@IAE@XZ ) -__TBB_SYMBOL( ?internal_assign@concurrent_vector_base_v3@internal@tbb@@IAEXABV123@IP6AXPAXI@ZP6AX1PBXI@Z4@Z ) -__TBB_SYMBOL( ?internal_capacity@concurrent_vector_base_v3@internal@tbb@@IBEIXZ ) -__TBB_SYMBOL( ?internal_clear@concurrent_vector_base_v3@internal@tbb@@IAEIP6AXPAXI@Z@Z ) -__TBB_SYMBOL( ?internal_copy@concurrent_vector_base_v3@internal@tbb@@IAEXABV123@IP6AXPAXPBXI@Z@Z ) -__TBB_SYMBOL( ?internal_grow_by@concurrent_vector_base_v3@internal@tbb@@IAEIIIP6AXPAXPBXI@Z1@Z ) -__TBB_SYMBOL( ?internal_grow_to_at_least@concurrent_vector_base_v3@internal@tbb@@IAEXIIP6AXPAXPBXI@Z1@Z ) -__TBB_SYMBOL( ?internal_push_back@concurrent_vector_base_v3@internal@tbb@@IAEPAXIAAI@Z ) -__TBB_SYMBOL( ?internal_reserve@concurrent_vector_base_v3@internal@tbb@@IAEXIII@Z ) -__TBB_SYMBOL( ?internal_compact@concurrent_vector_base_v3@internal@tbb@@IAEPAXIPAXP6AX0I@ZP6AX0PBXI@Z@Z ) -__TBB_SYMBOL( ?internal_swap@concurrent_vector_base_v3@internal@tbb@@IAEXAAV123@@Z ) -__TBB_SYMBOL( ?internal_throw_exception@concurrent_vector_base_v3@internal@tbb@@IBEXI@Z ) -__TBB_SYMBOL( ?internal_resize@concurrent_vector_base_v3@internal@tbb@@IAEXIIIPBXP6AXPAXI@ZP6AX10I@Z@Z ) -__TBB_SYMBOL( ?internal_grow_to_at_least_with_result@concurrent_vector_base_v3@internal@tbb@@IAEIIIP6AXPAXPBXI@Z1@Z ) - -// tbb_thread -__TBB_SYMBOL( ?join@tbb_thread_v3@internal@tbb@@QAEXXZ ) -__TBB_SYMBOL( ?detach@tbb_thread_v3@internal@tbb@@QAEXXZ ) -__TBB_SYMBOL( ?internal_start@tbb_thread_v3@internal@tbb@@AAEXP6GIPAX@Z0@Z ) -__TBB_SYMBOL( ?allocate_closure_v3@internal@tbb@@YAPAXI@Z ) -__TBB_SYMBOL( ?free_closure_v3@internal@tbb@@YAXPAX@Z ) -__TBB_SYMBOL( ?hardware_concurrency@tbb_thread_v3@internal@tbb@@SAIXZ ) -__TBB_SYMBOL( ?thread_yield_v3@internal@tbb@@YAXXZ ) -__TBB_SYMBOL( ?thread_sleep_v3@internal@tbb@@YAXABVinterval_t@tick_count@2@@Z ) -__TBB_SYMBOL( ?move_v3@internal@tbb@@YAXAAVtbb_thread_v3@12@0@Z ) -__TBB_SYMBOL( ?thread_get_id_v3@internal@tbb@@YA?AVid@tbb_thread_v3@12@XZ ) - -// condition_variable -__TBB_SYMBOL( ?internal_initialize_condition_variable@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z ) -__TBB_SYMBOL( ?internal_condition_variable_wait@internal@interface5@tbb@@YA_NAATcondvar_impl_t@123@PAVmutex@3@PBVinterval_t@tick_count@3@@Z ) -__TBB_SYMBOL( ?internal_condition_variable_notify_one@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z ) -__TBB_SYMBOL( ?internal_condition_variable_notify_all@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z ) -__TBB_SYMBOL( ?internal_destroy_condition_variable@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z ) - -#undef __TBB_SYMBOL diff --git a/src/tbb/src/tbb/win64-gcc-tbb-export.def b/src/tbb/src/tbb/win64-gcc-tbb-export.def deleted file mode 100644 index 7bd89f675..000000000 --- a/src/tbb/src/tbb/win64-gcc-tbb-export.def +++ /dev/null @@ -1,45 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - - -{ -global: - -#define __TBB_SYMBOL( sym ) sym; -#include "win64-gcc-tbb-export.lst" - -local: - -/* TBB symbols */ -*3tbb*; -*__TBB*; - -/* Intel Compiler (libirc) symbols */ -__intel_*; -_intel_*; -get_msg_buf; -get_text_buf; -message_catalog; -print_buf; -irc__get_msg; -irc__print; - -}; - diff --git a/src/tbb/src/tbb/win64-gcc-tbb-export.lst b/src/tbb/src/tbb/win64-gcc-tbb-export.lst deleted file mode 100644 index 941334e34..000000000 --- a/src/tbb/src/tbb/win64-gcc-tbb-export.lst +++ /dev/null @@ -1,375 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" - -/* cache_aligned_allocator.cpp */ -__TBB_SYMBOL( _ZN3tbb8internal12NFS_AllocateEyyPv ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal15NFS_GetLineSizeEv ) -__TBB_SYMBOL( _ZN3tbb8internal8NFS_FreeEPv ) -__TBB_SYMBOL( _ZN3tbb8internal23allocate_via_handler_v3Ey ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal25deallocate_via_handler_v3EPv ) -__TBB_SYMBOL( _ZN3tbb8internal17is_malloc_used_v3Ev ) - -/* task.cpp v3 */ -__TBB_SYMBOL( _ZN3tbb4task13note_affinityEt ) -__TBB_SYMBOL( _ZN3tbb4task22internal_set_ref_countEi ) -__TBB_SYMBOL( _ZN3tbb4task28internal_decrement_ref_countEv ) -__TBB_SYMBOL( _ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE ) -__TBB_SYMBOL( _ZN3tbb4task4selfEv ) -__TBB_SYMBOL( _ZN3tbb10interface58internal9task_base7destroyERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb4task26is_owned_by_current_threadEv ) -__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZN3tbb8internal19allocate_root_proxy8allocateEy ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal28affinity_partitioner_base_v36resizeEj ) -__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb8internal20allocate_child_proxy8allocateEy ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb8internal27allocate_continuation_proxy8allocateEy ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEy ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZTIN3tbb4taskE ) -__TBB_SYMBOL( _ZTSN3tbb4taskE ) -__TBB_SYMBOL( _ZTVN3tbb4taskE ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init19default_num_threadsEv ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEiy ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init10initializeEi ) -__TBB_SYMBOL( _ZN3tbb19task_scheduler_init9terminateEv ) -#if __TBB_SCHEDULER_OBSERVER -__TBB_SYMBOL( _ZN3tbb8internal26task_scheduler_observer_v37observeEb ) -#endif /* __TBB_SCHEDULER_OBSERVER */ -__TBB_SYMBOL( _ZN3tbb10empty_task7executeEv ) -__TBB_SYMBOL( _ZN3tbb10empty_taskD0Ev ) -__TBB_SYMBOL( _ZN3tbb10empty_taskD1Ev ) -__TBB_SYMBOL( _ZTIN3tbb10empty_taskE ) -__TBB_SYMBOL( _ZTSN3tbb10empty_taskE ) -__TBB_SYMBOL( _ZTVN3tbb10empty_taskE ) - -#if __TBB_TASK_ARENA -/* arena.cpp */ -__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base19internal_initializeEv ) -__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base18internal_terminateEv ) -__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_enqueueERNS_4taskEx ) -__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base16internal_executeERNS1_13delegate_baseE ) -__TBB_SYMBOL( _ZNK3tbb10interface78internal15task_arena_base13internal_waitEv ) -__TBB_SYMBOL( _ZN3tbb10interface78internal15task_arena_base21internal_current_slotEv ) -#endif /* __TBB_TASK_ARENA */ - -#if !TBB_NO_LEGACY -/* task_v2.cpp */ -__TBB_SYMBOL( _ZN3tbb4task7destroyERS0_ ) -#endif /* !TBB_NO_LEGACY */ - -/* Exception handling in task scheduler */ -#if __TBB_TASK_GROUP_CONTEXT -__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEy ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE ) -__TBB_SYMBOL( _ZN3tbb4task12change_groupERNS_18task_group_contextE ) -__TBB_SYMBOL( _ZNK3tbb18task_group_context28is_group_execution_cancelledEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context22cancel_group_executionEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context26register_pending_exceptionEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context5resetEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context19capture_fp_settingsEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_context4initEv ) -__TBB_SYMBOL( _ZN3tbb18task_group_contextD1Ev ) -__TBB_SYMBOL( _ZN3tbb18task_group_contextD2Ev ) -#if __TBB_TASK_PRIORITY -__TBB_SYMBOL( _ZN3tbb18task_group_context12set_priorityENS_10priority_tE ) -__TBB_SYMBOL( _ZNK3tbb18task_group_context8priorityEv ) -#endif /* __TBB_TASK_PRIORITY */ -__TBB_SYMBOL( _ZNK3tbb18captured_exception4nameEv ) -__TBB_SYMBOL( _ZNK3tbb18captured_exception4whatEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception10throw_selfEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception3setEPKcS2_ ) -__TBB_SYMBOL( _ZN3tbb18captured_exception4moveEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception5clearEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception7destroyEv ) -__TBB_SYMBOL( _ZN3tbb18captured_exception8allocateEPKcS2_ ) -__TBB_SYMBOL( _ZN3tbb18captured_exceptionD0Ev ) -__TBB_SYMBOL( _ZN3tbb18captured_exceptionD1Ev ) -__TBB_SYMBOL( _ZN3tbb18captured_exceptionD2Ev ) -__TBB_SYMBOL( _ZTIN3tbb18captured_exceptionE ) -__TBB_SYMBOL( _ZTSN3tbb18captured_exceptionE ) -__TBB_SYMBOL( _ZTVN3tbb18captured_exceptionE ) -__TBB_SYMBOL( _ZN3tbb13tbb_exceptionD2Ev ) -__TBB_SYMBOL( _ZTIN3tbb13tbb_exceptionE ) -__TBB_SYMBOL( _ZTSN3tbb13tbb_exceptionE ) -__TBB_SYMBOL( _ZTVN3tbb13tbb_exceptionE ) -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -/* Symbols for exceptions thrown from TBB */ -__TBB_SYMBOL( _ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev ) -__TBB_SYMBOL( _ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE ) -__TBB_SYMBOL( _ZN3tbb14bad_last_allocD0Ev ) -__TBB_SYMBOL( _ZN3tbb14bad_last_allocD1Ev ) -__TBB_SYMBOL( _ZNK3tbb14bad_last_alloc4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb14bad_last_allocE ) -__TBB_SYMBOL( _ZTSN3tbb14bad_last_allocE ) -__TBB_SYMBOL( _ZTVN3tbb14bad_last_allocE ) -__TBB_SYMBOL( _ZN3tbb12missing_waitD0Ev ) -__TBB_SYMBOL( _ZN3tbb12missing_waitD1Ev ) -__TBB_SYMBOL( _ZNK3tbb12missing_wait4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb12missing_waitE ) -__TBB_SYMBOL( _ZTSN3tbb12missing_waitE ) -__TBB_SYMBOL( _ZTVN3tbb12missing_waitE ) -__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD0Ev ) -__TBB_SYMBOL( _ZN3tbb27invalid_multiple_schedulingD1Ev ) -__TBB_SYMBOL( _ZNK3tbb27invalid_multiple_scheduling4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb27invalid_multiple_schedulingE ) -__TBB_SYMBOL( _ZTSN3tbb27invalid_multiple_schedulingE ) -__TBB_SYMBOL( _ZTVN3tbb27invalid_multiple_schedulingE ) -__TBB_SYMBOL( _ZN3tbb13improper_lockD0Ev ) -__TBB_SYMBOL( _ZN3tbb13improper_lockD1Ev ) -__TBB_SYMBOL( _ZNK3tbb13improper_lock4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb13improper_lockE ) -__TBB_SYMBOL( _ZTSN3tbb13improper_lockE ) -__TBB_SYMBOL( _ZTVN3tbb13improper_lockE ) -__TBB_SYMBOL( _ZN3tbb10user_abortD0Ev ) -__TBB_SYMBOL( _ZN3tbb10user_abortD1Ev ) -__TBB_SYMBOL( _ZNK3tbb10user_abort4whatEv ) -__TBB_SYMBOL( _ZTIN3tbb10user_abortE ) -__TBB_SYMBOL( _ZTSN3tbb10user_abortE ) -__TBB_SYMBOL( _ZTVN3tbb10user_abortE ) - -/* tbb_misc.cpp */ -__TBB_SYMBOL( _ZN3tbb17assertion_failureEPKciS1_S1_ ) -__TBB_SYMBOL( _ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E ) -__TBB_SYMBOL( _ZN3tbb8internal36get_initial_auto_partitioner_divisorEv ) -__TBB_SYMBOL( _ZN3tbb8internal13handle_perrorEiPKc ) -__TBB_SYMBOL( _ZN3tbb8internal15runtime_warningEPKcz ) -__TBB_SYMBOL( TBB_runtime_interface_version ) - -/* tbb_main.cpp */ -__TBB_SYMBOL( _ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv ) -__TBB_SYMBOL( _ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal18call_itt_notify_v5EiPv ) -__TBB_SYMBOL( _ZN3tbb8internal20itt_set_sync_name_v3EPvPKc ) -__TBB_SYMBOL( _ZN3tbb8internal19itt_load_pointer_v3EPKv ) - -/* pipeline.cpp */ -__TBB_SYMBOL( _ZTIN3tbb6filterE ) -__TBB_SYMBOL( _ZTSN3tbb6filterE ) -__TBB_SYMBOL( _ZTVN3tbb6filterE ) -__TBB_SYMBOL( _ZN3tbb6filterD2Ev ) -__TBB_SYMBOL( _ZN3tbb8pipeline10add_filterERNS_6filterE ) -__TBB_SYMBOL( _ZN3tbb8pipeline12inject_tokenERNS_4taskE ) -__TBB_SYMBOL( _ZN3tbb8pipeline13remove_filterERNS_6filterE ) -__TBB_SYMBOL( _ZN3tbb8pipeline3runEy ) // MODIFIED LINUX ENTRY -#if __TBB_TASK_GROUP_CONTEXT -__TBB_SYMBOL( _ZN3tbb8pipeline3runEyRNS_18task_group_contextE ) // MODIFIED LINUX ENTRY -#endif -__TBB_SYMBOL( _ZN3tbb8pipeline5clearEv ) -__TBB_SYMBOL( _ZN3tbb19thread_bound_filter12process_itemEv ) -__TBB_SYMBOL( _ZN3tbb19thread_bound_filter16try_process_itemEv ) -__TBB_SYMBOL( _ZTIN3tbb8pipelineE ) -__TBB_SYMBOL( _ZTSN3tbb8pipelineE ) -__TBB_SYMBOL( _ZTVN3tbb8pipelineE ) -__TBB_SYMBOL( _ZN3tbb8pipelineC1Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineC2Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineD0Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineD1Ev ) -__TBB_SYMBOL( _ZN3tbb8pipelineD2Ev ) -__TBB_SYMBOL( _ZN3tbb6filter16set_end_of_inputEv ) - -/* queuing_rw_mutex.cpp */ -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex18internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv ) -__TBB_SYMBOL( _ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b ) - -/* reader_writer_lock.cpp */ -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_ ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock13try_lock_readEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_ ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock18internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock4lockEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock6unlockEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock8try_lockEv ) -__TBB_SYMBOL( _ZN3tbb10interface518reader_writer_lock9lock_readEv ) - -#if !TBB_NO_LEGACY -/* spin_rw_mutex.cpp v2 */ -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_ ) -__TBB_SYMBOL( _ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_ ) -#endif - -// x86_rtm_rw_mutex.cpp -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex18internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex23internal_acquire_writerERNS2_11scoped_lockEb ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex27internal_try_acquire_writerERNS2_11scoped_lockE ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex23internal_acquire_readerERNS2_11scoped_lockEb ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex16internal_releaseERNS2_11scoped_lockE ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex16internal_upgradeERNS2_11scoped_lockE ) -__TBB_SYMBOL( _ZN3tbb10interface88internal16x86_rtm_rw_mutex18internal_downgradeERNS2_11scoped_lockE ) - -/* spin_rw_mutex v3 */ -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v316internal_upgradeEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v318internal_downgradeEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_readerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v323internal_release_writerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv ) -__TBB_SYMBOL( _ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv ) - -/* spin_mutex.cpp */ -__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv ) -__TBB_SYMBOL( _ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb10spin_mutex18internal_constructEv ) - -/* mutex.cpp */ -__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock16internal_releaseEv ) -__TBB_SYMBOL( _ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb5mutex16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb5mutex18internal_constructEv ) - -/* recursive_mutex.cpp */ -__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex16internal_destroyEv ) -__TBB_SYMBOL( _ZN3tbb15recursive_mutex18internal_constructEv ) - -/* QueuingMutex.cpp */ -__TBB_SYMBOL( _ZN3tbb13queuing_mutex18internal_constructEv ) -__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_ ) -__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock7releaseEv ) -__TBB_SYMBOL( _ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_ ) - -/* critical_section.cpp */ -__TBB_SYMBOL( _ZN3tbb8internal19critical_section_v418internal_constructEv ) - -#if !TBB_NO_LEGACY -/* concurrent_hash_map */ -__TBB_SYMBOL( _ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv ) - -/* concurrent_queue.cpp v2 */ -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base12internal_popEPv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base21internal_set_capacityExy ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseC2Ey ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal21concurrent_queue_baseD2Ev ) -__TBB_SYMBOL( _ZTIN3tbb8internal21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZTSN3tbb8internal21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZTVN3tbb8internal21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE ) -__TBB_SYMBOL( _ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev ) -__TBB_SYMBOL( _ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv ) -#endif - -/* concurrent_queue v3 */ -/* constructors */ -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3C2Ey ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E ) -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Ey ) // MODIFIED LINUX ENTRY -/* destructors */ -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v3D2Ev ) -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev ) -/* typeinfo */ -__TBB_SYMBOL( _ZTIN3tbb8internal24concurrent_queue_base_v3E ) -__TBB_SYMBOL( _ZTSN3tbb8internal24concurrent_queue_base_v3E ) -/* vtable */ -__TBB_SYMBOL( _ZTVN3tbb8internal24concurrent_queue_base_v3E ) -/* methods */ -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v818internal_push_moveEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v830internal_push_move_if_not_fullEPKv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v314internal_abortEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityExy ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv ) -__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv ) -__TBB_SYMBOL( _ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_ ) -__TBB_SYMBOL( _ZN3tbb8internal24concurrent_queue_base_v812move_contentERS1_ ) - - -#if !TBB_NO_LEGACY -/* concurrent_vector.cpp v2 */ -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_yPFvPvPKvyE ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvyEb ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_yPFvPvyEPFvS4_PKvyESA_ ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_grow_byEyyPFvPvyE ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base16internal_reserveEyyy ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base18internal_push_backEyRy ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEyyPFvPvyE ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv ) -#endif - -/* concurrent_vector v3 */ -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_yPFvPvPKvyE ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvyE ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_yPFvPvyEPFvS4_PKvyESA_ ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEyyPFvPvPKvyES4_ ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEyyy ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEyRy ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEyyPFvPvPKvyES4_ ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v316internal_compactEyPvPFvS2_yEPFvS2_PKvyE ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_ ) -__TBB_SYMBOL( _ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEy ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v3D2Ev ) -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEyyyPKvPFvPvyEPFvS4_S3_yE ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEyyPFvPvPKvyES4_ ) // MODIFIED LINUX ENTRY - -/* tbb_thread */ -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv ) -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v36detachEv ) -__TBB_SYMBOL( _ZN3tbb8internal16thread_get_id_v3Ev ) -__TBB_SYMBOL( _ZN3tbb8internal15free_closure_v3EPv ) -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v34joinEv ) -__TBB_SYMBOL( _ZN3tbb8internal13tbb_thread_v314internal_startEPFjPvES2_ ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal19allocate_closure_v3Ey ) // MODIFIED LINUX ENTRY -__TBB_SYMBOL( _ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_ ) -__TBB_SYMBOL( _ZN3tbb8internal15thread_yield_v3Ev ) -__TBB_SYMBOL( _ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE ) - -/* condition_variable */ -__TBB_SYMBOL( _ZN3tbb10interface58internal32internal_condition_variable_waitERNS1_14condvar_impl_tEPNS_5mutexEPKNS_10tick_count10interval_tE ) -__TBB_SYMBOL( _ZN3tbb10interface58internal35internal_destroy_condition_variableERNS1_14condvar_impl_tE ) -__TBB_SYMBOL( _ZN3tbb10interface58internal38internal_condition_variable_notify_allERNS1_14condvar_impl_tE ) -__TBB_SYMBOL( _ZN3tbb10interface58internal38internal_condition_variable_notify_oneERNS1_14condvar_impl_tE ) -__TBB_SYMBOL( _ZN3tbb10interface58internal38internal_initialize_condition_variableERNS1_14condvar_impl_tE ) - -#undef __TBB_SYMBOL diff --git a/src/tbb/src/tbb/win64-tbb-export.def b/src/tbb/src/tbb/win64-tbb-export.def deleted file mode 100644 index 6135952d7..000000000 --- a/src/tbb/src/tbb/win64-tbb-export.def +++ /dev/null @@ -1,26 +0,0 @@ -; Copyright 2005-2014 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. Threading Building Blocks is free software; -; you can redistribute it and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. Threading Building Blocks is -; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -; See the GNU General Public License for more details. You should have received a copy of -; the GNU General Public License along with Threading Building Blocks; if not, write to the -; Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software library without -; restriction. Specifically, if other files instantiate templates or use macros or inline -; functions from this file, or you compile this file and link it with other files to produce -; an executable, this file does not by itself cause the resulting executable to be covered -; by the GNU General Public License. This exception does not however invalidate any other -; reasons why the executable file might be covered by the GNU General Public License. - -; This file is organized with a section for each .cpp file. -; Each of these sections is in alphabetical order. - -EXPORTS - -#define __TBB_SYMBOL( sym ) sym -#include "win64-tbb-export.lst" - diff --git a/src/tbb/src/tbb/win64-tbb-export.lst b/src/tbb/src/tbb/win64-tbb-export.lst deleted file mode 100644 index b922c1477..000000000 --- a/src/tbb/src/tbb/win64-tbb-export.lst +++ /dev/null @@ -1,329 +0,0 @@ -; Copyright 2005-2014 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. Threading Building Blocks is free software; -; you can redistribute it and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. Threading Building Blocks is -; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -; See the GNU General Public License for more details. You should have received a copy of -; the GNU General Public License along with Threading Building Blocks; if not, write to the -; Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software library without -; restriction. Specifically, if other files instantiate templates or use macros or inline -; functions from this file, or you compile this file and link it with other files to produce -; an executable, this file does not by itself cause the resulting executable to be covered -; by the GNU General Public License. This exception does not however invalidate any other -; reasons why the executable file might be covered by the GNU General Public License. - -// This file is organized with a section for each .cpp file. -// Each of these sections is in alphabetical order. - -#include "tbb/tbb_config.h" - -// Assembly-language support that is called directly by clients -__TBB_SYMBOL( __TBB_machine_cmpswp1 ) -__TBB_SYMBOL( __TBB_machine_fetchadd1 ) -__TBB_SYMBOL( __TBB_machine_fetchstore1 ) -__TBB_SYMBOL( __TBB_machine_cmpswp2 ) -__TBB_SYMBOL( __TBB_machine_fetchadd2 ) -__TBB_SYMBOL( __TBB_machine_fetchstore2 ) -__TBB_SYMBOL( __TBB_machine_pause ) -__TBB_SYMBOL( __TBB_machine_try_lock_elided ) -__TBB_SYMBOL( __TBB_machine_unlock_elided ) -__TBB_SYMBOL( __TBB_machine_is_in_transaction ) - -// cache_aligned_allocator.cpp -__TBB_SYMBOL( ?NFS_Allocate@internal@tbb@@YAPEAX_K0PEAX@Z ) -__TBB_SYMBOL( ?NFS_GetLineSize@internal@tbb@@YA_KXZ ) -__TBB_SYMBOL( ?NFS_Free@internal@tbb@@YAXPEAX@Z ) -__TBB_SYMBOL( ?allocate_via_handler_v3@internal@tbb@@YAPEAX_K@Z ) -__TBB_SYMBOL( ?deallocate_via_handler_v3@internal@tbb@@YAXPEAX@Z ) -__TBB_SYMBOL( ?is_malloc_used_v3@internal@tbb@@YA_NXZ ) - - -// task.cpp v3 -__TBB_SYMBOL( ?resize@affinity_partitioner_base_v3@internal@tbb@@AEAAXI@Z ) -__TBB_SYMBOL( ?allocate@allocate_additional_child_of_proxy@internal@tbb@@QEBAAEAVtask@3@_K@Z ) -__TBB_SYMBOL( ?allocate@allocate_child_proxy@internal@tbb@@QEBAAEAVtask@3@_K@Z ) -__TBB_SYMBOL( ?allocate@allocate_continuation_proxy@internal@tbb@@QEBAAEAVtask@3@_K@Z ) -__TBB_SYMBOL( ?allocate@allocate_root_proxy@internal@tbb@@SAAEAVtask@3@_K@Z ) -__TBB_SYMBOL( ?destroy@task_base@internal@interface5@tbb@@SAXAEAVtask@4@@Z ) -__TBB_SYMBOL( ?free@allocate_additional_child_of_proxy@internal@tbb@@QEBAXAEAVtask@3@@Z ) -__TBB_SYMBOL( ?free@allocate_child_proxy@internal@tbb@@QEBAXAEAVtask@3@@Z ) -__TBB_SYMBOL( ?free@allocate_continuation_proxy@internal@tbb@@QEBAXAEAVtask@3@@Z ) -__TBB_SYMBOL( ?free@allocate_root_proxy@internal@tbb@@SAXAEAVtask@3@@Z ) -__TBB_SYMBOL( ?internal_set_ref_count@task@tbb@@AEAAXH@Z ) -__TBB_SYMBOL( ?internal_decrement_ref_count@task@tbb@@AEAA_JXZ ) -__TBB_SYMBOL( ?is_owned_by_current_thread@task@tbb@@QEBA_NXZ ) -__TBB_SYMBOL( ?note_affinity@task@tbb@@UEAAXG@Z ) -__TBB_SYMBOL( ?self@task@tbb@@SAAEAV12@XZ ) -__TBB_SYMBOL( ?spawn_and_wait_for_all@task@tbb@@QEAAXAEAVtask_list@2@@Z ) -__TBB_SYMBOL( ?default_num_threads@task_scheduler_init@tbb@@SAHXZ ) -__TBB_SYMBOL( ?initialize@task_scheduler_init@tbb@@QEAAXH_K@Z ) -__TBB_SYMBOL( ?initialize@task_scheduler_init@tbb@@QEAAXH@Z ) -__TBB_SYMBOL( ?terminate@task_scheduler_init@tbb@@QEAAXXZ ) -#if __TBB_SCHEDULER_OBSERVER -__TBB_SYMBOL( ?observe@task_scheduler_observer_v3@internal@tbb@@QEAAX_N@Z ) -#endif /* __TBB_SCHEDULER_OBSERVER */ - -#if __TBB_TASK_ARENA -/* arena.cpp */ -__TBB_SYMBOL( ?internal_current_slot@task_arena_base@internal@interface7@tbb@@KAHXZ ) -__TBB_SYMBOL( ?internal_initialize@task_arena_base@internal@interface7@tbb@@IEAAXXZ ) -__TBB_SYMBOL( ?internal_terminate@task_arena_base@internal@interface7@tbb@@IEAAXXZ ) -__TBB_SYMBOL( ?internal_enqueue@task_arena_base@internal@interface7@tbb@@IEBAXAEAVtask@4@_J@Z ) -__TBB_SYMBOL( ?internal_execute@task_arena_base@internal@interface7@tbb@@IEBAXAEAVdelegate_base@234@@Z ) -__TBB_SYMBOL( ?internal_wait@task_arena_base@internal@interface7@tbb@@IEBAXXZ ) -#endif /* __TBB_TASK_ARENA */ - -#if !TBB_NO_LEGACY -// task_v2.cpp -__TBB_SYMBOL( ?destroy@task@tbb@@QEAAXAEAV12@@Z ) -#endif - -// Exception handling in task scheduler -#if __TBB_TASK_GROUP_CONTEXT -__TBB_SYMBOL( ?allocate@allocate_root_with_context_proxy@internal@tbb@@QEBAAEAVtask@3@_K@Z ) -__TBB_SYMBOL( ?free@allocate_root_with_context_proxy@internal@tbb@@QEBAXAEAVtask@3@@Z ) -__TBB_SYMBOL( ?change_group@task@tbb@@QEAAXAEAVtask_group_context@2@@Z ) -__TBB_SYMBOL( ?is_group_execution_cancelled@task_group_context@tbb@@QEBA_NXZ ) -__TBB_SYMBOL( ?cancel_group_execution@task_group_context@tbb@@QEAA_NXZ ) -__TBB_SYMBOL( ?reset@task_group_context@tbb@@QEAAXXZ ) -__TBB_SYMBOL( ?capture_fp_settings@task_group_context@tbb@@QEAAXXZ ) -__TBB_SYMBOL( ?init@task_group_context@tbb@@IEAAXXZ ) -__TBB_SYMBOL( ?register_pending_exception@task_group_context@tbb@@QEAAXXZ ) -__TBB_SYMBOL( ??1task_group_context@tbb@@QEAA@XZ ) -#if __TBB_TASK_PRIORITY -__TBB_SYMBOL( ?set_priority@task_group_context@tbb@@QEAAXW4priority_t@2@@Z ) -__TBB_SYMBOL( ?priority@task_group_context@tbb@@QEBA?AW4priority_t@2@XZ ) -#endif /* __TBB_TASK_PRIORITY */ -__TBB_SYMBOL( ?name@captured_exception@tbb@@UEBAPEBDXZ ) -__TBB_SYMBOL( ?what@captured_exception@tbb@@UEBAPEBDXZ ) -__TBB_SYMBOL( ??1captured_exception@tbb@@UEAA@XZ ) -__TBB_SYMBOL( ?move@captured_exception@tbb@@UEAAPEAV12@XZ ) -__TBB_SYMBOL( ?destroy@captured_exception@tbb@@UEAAXXZ ) -__TBB_SYMBOL( ?set@captured_exception@tbb@@QEAAXPEBD0@Z ) -__TBB_SYMBOL( ?clear@captured_exception@tbb@@QEAAXXZ ) -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -// Symbols for exceptions thrown from TBB -__TBB_SYMBOL( ?throw_bad_last_alloc_exception_v4@internal@tbb@@YAXXZ ) -__TBB_SYMBOL( ?throw_exception_v4@internal@tbb@@YAXW4exception_id@12@@Z ) -__TBB_SYMBOL( ?what@bad_last_alloc@tbb@@UEBAPEBDXZ ) -__TBB_SYMBOL( ?what@missing_wait@tbb@@UEBAPEBDXZ ) -__TBB_SYMBOL( ?what@invalid_multiple_scheduling@tbb@@UEBAPEBDXZ ) -__TBB_SYMBOL( ?what@improper_lock@tbb@@UEBAPEBDXZ ) -__TBB_SYMBOL( ?what@user_abort@tbb@@UEBAPEBDXZ ) - -// tbb_misc.cpp -__TBB_SYMBOL( ?assertion_failure@tbb@@YAXPEBDH00@Z ) -__TBB_SYMBOL( ?get_initial_auto_partitioner_divisor@internal@tbb@@YA_KXZ ) -__TBB_SYMBOL( ?handle_perror@internal@tbb@@YAXHPEBD@Z ) -__TBB_SYMBOL( ?set_assertion_handler@tbb@@YAP6AXPEBDH00@ZP6AX0H00@Z@Z ) -__TBB_SYMBOL( ?runtime_warning@internal@tbb@@YAXPEBDZZ ) -__TBB_SYMBOL( TBB_runtime_interface_version ) - -// tbb_main.cpp -__TBB_SYMBOL( ?itt_load_pointer_with_acquire_v3@internal@tbb@@YAPEAXPEBX@Z ) -__TBB_SYMBOL( ?itt_store_pointer_with_release_v3@internal@tbb@@YAXPEAX0@Z ) -__TBB_SYMBOL( ?call_itt_notify_v5@internal@tbb@@YAXHPEAX@Z ) -__TBB_SYMBOL( ?itt_load_pointer_v3@internal@tbb@@YAPEAXPEBX@Z ) -__TBB_SYMBOL( ?itt_set_sync_name_v3@internal@tbb@@YAXPEAXPEB_W@Z ) -#if __TBB_ITT_STRUCTURE_API -__TBB_SYMBOL( ?itt_make_task_group_v7@internal@tbb@@YAXW4itt_domain_enum@12@PEAX_K12W4string_index@12@@Z ) -__TBB_SYMBOL( ?itt_metadata_str_add_v7@internal@tbb@@YAXW4itt_domain_enum@12@PEAX_KW4string_index@12@PEBD@Z ) -__TBB_SYMBOL( ?itt_relation_add_v7@internal@tbb@@YAXW4itt_domain_enum@12@PEAX_KW4itt_relation@12@12@Z ) -__TBB_SYMBOL( ?itt_task_begin_v7@internal@tbb@@YAXW4itt_domain_enum@12@PEAX_K12W4string_index@12@@Z ) -__TBB_SYMBOL( ?itt_task_end_v7@internal@tbb@@YAXW4itt_domain_enum@12@@Z ) -#endif - -// pipeline.cpp -__TBB_SYMBOL( ??_7pipeline@tbb@@6B@ ) -__TBB_SYMBOL( ??0pipeline@tbb@@QEAA@XZ ) -__TBB_SYMBOL( ??1filter@tbb@@UEAA@XZ ) -__TBB_SYMBOL( ??1pipeline@tbb@@UEAA@XZ ) -__TBB_SYMBOL( ?add_filter@pipeline@tbb@@QEAAXAEAVfilter@2@@Z ) -__TBB_SYMBOL( ?clear@pipeline@tbb@@QEAAXXZ ) -__TBB_SYMBOL( ?inject_token@pipeline@tbb@@AEAAXAEAVtask@2@@Z ) -__TBB_SYMBOL( ?run@pipeline@tbb@@QEAAX_K@Z ) -#if __TBB_TASK_GROUP_CONTEXT -__TBB_SYMBOL( ?run@pipeline@tbb@@QEAAX_KAEAVtask_group_context@2@@Z ) -#endif -__TBB_SYMBOL( ?process_item@thread_bound_filter@tbb@@QEAA?AW4result_type@12@XZ ) -__TBB_SYMBOL( ?try_process_item@thread_bound_filter@tbb@@QEAA?AW4result_type@12@XZ ) -__TBB_SYMBOL( ?set_end_of_input@filter@tbb@@IEAAXXZ ) - -// queuing_rw_mutex.cpp -__TBB_SYMBOL( ?internal_construct@queuing_rw_mutex@tbb@@QEAAXXZ ) -__TBB_SYMBOL( ?acquire@scoped_lock@queuing_rw_mutex@tbb@@QEAAXAEAV23@_N@Z ) -__TBB_SYMBOL( ?downgrade_to_reader@scoped_lock@queuing_rw_mutex@tbb@@QEAA_NXZ ) -__TBB_SYMBOL( ?release@scoped_lock@queuing_rw_mutex@tbb@@QEAAXXZ ) -__TBB_SYMBOL( ?upgrade_to_writer@scoped_lock@queuing_rw_mutex@tbb@@QEAA_NXZ ) -__TBB_SYMBOL( ?try_acquire@scoped_lock@queuing_rw_mutex@tbb@@QEAA_NAEAV23@_N@Z ) - -// reader_writer_lock.cpp -__TBB_SYMBOL( ?try_lock_read@reader_writer_lock@interface5@tbb@@QEAA_NXZ ) -__TBB_SYMBOL( ?try_lock@reader_writer_lock@interface5@tbb@@QEAA_NXZ ) -__TBB_SYMBOL( ?unlock@reader_writer_lock@interface5@tbb@@QEAAXXZ ) -__TBB_SYMBOL( ?lock_read@reader_writer_lock@interface5@tbb@@QEAAXXZ ) -__TBB_SYMBOL( ?lock@reader_writer_lock@interface5@tbb@@QEAAXXZ ) -__TBB_SYMBOL( ?internal_construct@reader_writer_lock@interface5@tbb@@AEAAXXZ ) -__TBB_SYMBOL( ?internal_destroy@reader_writer_lock@interface5@tbb@@AEAAXXZ ) -__TBB_SYMBOL( ?internal_construct@scoped_lock@reader_writer_lock@interface5@tbb@@AEAAXAEAV234@@Z ) -__TBB_SYMBOL( ?internal_destroy@scoped_lock@reader_writer_lock@interface5@tbb@@AEAAXXZ ) -__TBB_SYMBOL( ?internal_construct@scoped_lock_read@reader_writer_lock@interface5@tbb@@AEAAXAEAV234@@Z ) -__TBB_SYMBOL( ?internal_destroy@scoped_lock_read@reader_writer_lock@interface5@tbb@@AEAAXXZ ) - -#if !TBB_NO_LEGACY -// spin_rw_mutex.cpp v2 -__TBB_SYMBOL( ?internal_itt_releasing@spin_rw_mutex@tbb@@CAXPEAV12@@Z ) -__TBB_SYMBOL( ?internal_acquire_writer@spin_rw_mutex@tbb@@CA_NPEAV12@@Z ) -__TBB_SYMBOL( ?internal_acquire_reader@spin_rw_mutex@tbb@@CAXPEAV12@@Z ) -__TBB_SYMBOL( ?internal_downgrade@spin_rw_mutex@tbb@@CAXPEAV12@@Z ) -__TBB_SYMBOL( ?internal_upgrade@spin_rw_mutex@tbb@@CA_NPEAV12@@Z ) -__TBB_SYMBOL( ?internal_release_reader@spin_rw_mutex@tbb@@CAXPEAV12@@Z ) -__TBB_SYMBOL( ?internal_release_writer@spin_rw_mutex@tbb@@CAXPEAV12@@Z ) -__TBB_SYMBOL( ?internal_try_acquire_writer@spin_rw_mutex@tbb@@CA_NPEAV12@@Z ) -__TBB_SYMBOL( ?internal_try_acquire_reader@spin_rw_mutex@tbb@@CA_NPEAV12@@Z ) -#endif - -// spin_rw_mutex v3 -__TBB_SYMBOL( ?internal_construct@spin_rw_mutex_v3@tbb@@AEAAXXZ ) -__TBB_SYMBOL( ?internal_upgrade@spin_rw_mutex_v3@tbb@@AEAA_NXZ ) -__TBB_SYMBOL( ?internal_downgrade@spin_rw_mutex_v3@tbb@@AEAAXXZ ) -__TBB_SYMBOL( ?internal_acquire_reader@spin_rw_mutex_v3@tbb@@AEAAXXZ ) -__TBB_SYMBOL( ?internal_acquire_writer@spin_rw_mutex_v3@tbb@@AEAA_NXZ ) -__TBB_SYMBOL( ?internal_release_reader@spin_rw_mutex_v3@tbb@@AEAAXXZ ) -__TBB_SYMBOL( ?internal_release_writer@spin_rw_mutex_v3@tbb@@AEAAXXZ ) -__TBB_SYMBOL( ?internal_try_acquire_reader@spin_rw_mutex_v3@tbb@@AEAA_NXZ ) -__TBB_SYMBOL( ?internal_try_acquire_writer@spin_rw_mutex_v3@tbb@@AEAA_NXZ ) - -// x86_rtm_rw_mutex.cpp -__TBB_SYMBOL( ?internal_acquire_writer@x86_rtm_rw_mutex@internal@interface8@tbb@@AEAAXAEAVscoped_lock@1234@_N@Z ) -__TBB_SYMBOL( ?internal_acquire_reader@x86_rtm_rw_mutex@internal@interface8@tbb@@AEAAXAEAVscoped_lock@1234@_N@Z ) -__TBB_SYMBOL( ?internal_upgrade@x86_rtm_rw_mutex@internal@interface8@tbb@@AEAA_NAEAVscoped_lock@1234@@Z ) -__TBB_SYMBOL( ?internal_downgrade@x86_rtm_rw_mutex@internal@interface8@tbb@@AEAA_NAEAVscoped_lock@1234@@Z ) -__TBB_SYMBOL( ?internal_try_acquire_writer@x86_rtm_rw_mutex@internal@interface8@tbb@@AEAA_NAEAVscoped_lock@1234@@Z ) -__TBB_SYMBOL( ?internal_release@x86_rtm_rw_mutex@internal@interface8@tbb@@AEAAXAEAVscoped_lock@1234@@Z ) -__TBB_SYMBOL( ?internal_construct@x86_rtm_rw_mutex@internal@interface8@tbb@@AEAAXXZ ) - -// spin_mutex.cpp -__TBB_SYMBOL( ?internal_construct@spin_mutex@tbb@@QEAAXXZ ) -__TBB_SYMBOL( ?internal_acquire@scoped_lock@spin_mutex@tbb@@AEAAXAEAV23@@Z ) -__TBB_SYMBOL( ?internal_release@scoped_lock@spin_mutex@tbb@@AEAAXXZ ) -__TBB_SYMBOL( ?internal_try_acquire@scoped_lock@spin_mutex@tbb@@AEAA_NAEAV23@@Z ) - -// mutex.cpp -__TBB_SYMBOL( ?internal_acquire@scoped_lock@mutex@tbb@@AEAAXAEAV23@@Z ) -__TBB_SYMBOL( ?internal_release@scoped_lock@mutex@tbb@@AEAAXXZ ) -__TBB_SYMBOL( ?internal_try_acquire@scoped_lock@mutex@tbb@@AEAA_NAEAV23@@Z ) -__TBB_SYMBOL( ?internal_construct@mutex@tbb@@AEAAXXZ ) -__TBB_SYMBOL( ?internal_destroy@mutex@tbb@@AEAAXXZ ) - -// recursive_mutex.cpp -__TBB_SYMBOL( ?internal_construct@recursive_mutex@tbb@@AEAAXXZ ) -__TBB_SYMBOL( ?internal_destroy@recursive_mutex@tbb@@AEAAXXZ ) -__TBB_SYMBOL( ?internal_acquire@scoped_lock@recursive_mutex@tbb@@AEAAXAEAV23@@Z ) -__TBB_SYMBOL( ?internal_try_acquire@scoped_lock@recursive_mutex@tbb@@AEAA_NAEAV23@@Z ) -__TBB_SYMBOL( ?internal_release@scoped_lock@recursive_mutex@tbb@@AEAAXXZ ) - -// queuing_mutex.cpp -__TBB_SYMBOL( ?internal_construct@queuing_mutex@tbb@@QEAAXXZ ) -__TBB_SYMBOL( ?acquire@scoped_lock@queuing_mutex@tbb@@QEAAXAEAV23@@Z ) -__TBB_SYMBOL( ?release@scoped_lock@queuing_mutex@tbb@@QEAAXXZ ) -__TBB_SYMBOL( ?try_acquire@scoped_lock@queuing_mutex@tbb@@QEAA_NAEAV23@@Z ) - -//critical_section.cpp -__TBB_SYMBOL( ?internal_construct@critical_section_v4@internal@tbb@@QEAAXXZ ) - -#if !TBB_NO_LEGACY -// concurrent_hash_map.cpp -__TBB_SYMBOL( ?internal_grow_predicate@hash_map_segment_base@internal@tbb@@QEBA_NXZ ) - -// concurrent_queue.cpp v2 -__TBB_SYMBOL( ??0concurrent_queue_base@internal@tbb@@IEAA@_K@Z ) -__TBB_SYMBOL( ??0concurrent_queue_iterator_base@internal@tbb@@IEAA@AEBVconcurrent_queue_base@12@@Z ) -__TBB_SYMBOL( ??1concurrent_queue_base@internal@tbb@@MEAA@XZ ) -__TBB_SYMBOL( ??1concurrent_queue_iterator_base@internal@tbb@@IEAA@XZ ) -__TBB_SYMBOL( ?advance@concurrent_queue_iterator_base@internal@tbb@@IEAAXXZ ) -__TBB_SYMBOL( ?assign@concurrent_queue_iterator_base@internal@tbb@@IEAAXAEBV123@@Z ) -__TBB_SYMBOL( ?internal_pop@concurrent_queue_base@internal@tbb@@IEAAXPEAX@Z ) -__TBB_SYMBOL( ?internal_pop_if_present@concurrent_queue_base@internal@tbb@@IEAA_NPEAX@Z ) -__TBB_SYMBOL( ?internal_push@concurrent_queue_base@internal@tbb@@IEAAXPEBX@Z ) -__TBB_SYMBOL( ?internal_push_if_not_full@concurrent_queue_base@internal@tbb@@IEAA_NPEBX@Z ) -__TBB_SYMBOL( ?internal_set_capacity@concurrent_queue_base@internal@tbb@@IEAAX_J_K@Z ) -__TBB_SYMBOL( ?internal_size@concurrent_queue_base@internal@tbb@@IEBA_JXZ ) -#endif - -// concurrent_queue v3 -__TBB_SYMBOL( ??0concurrent_queue_iterator_base_v3@internal@tbb@@IEAA@AEBVconcurrent_queue_base_v3@12@@Z ) -__TBB_SYMBOL( ??0concurrent_queue_iterator_base_v3@internal@tbb@@IEAA@AEBVconcurrent_queue_base_v3@12@_K@Z ) -__TBB_SYMBOL( ??1concurrent_queue_iterator_base_v3@internal@tbb@@IEAA@XZ ) -__TBB_SYMBOL( ?assign@concurrent_queue_iterator_base_v3@internal@tbb@@IEAAXAEBV123@@Z ) -__TBB_SYMBOL( ?advance@concurrent_queue_iterator_base_v3@internal@tbb@@IEAAXXZ ) -__TBB_SYMBOL( ??0concurrent_queue_base_v3@internal@tbb@@IEAA@_K@Z ) -__TBB_SYMBOL( ??1concurrent_queue_base_v3@internal@tbb@@MEAA@XZ ) -__TBB_SYMBOL( ?internal_push@concurrent_queue_base_v3@internal@tbb@@IEAAXPEBX@Z ) -__TBB_SYMBOL( ?internal_push_move@concurrent_queue_base_v8@internal@tbb@@IEAAXPEBX@Z ) -__TBB_SYMBOL( ?internal_push_if_not_full@concurrent_queue_base_v3@internal@tbb@@IEAA_NPEBX@Z ) -__TBB_SYMBOL( ?internal_push_move_if_not_full@concurrent_queue_base_v8@internal@tbb@@IEAA_NPEBX@Z ) -__TBB_SYMBOL( ?internal_pop@concurrent_queue_base_v3@internal@tbb@@IEAAXPEAX@Z ) -__TBB_SYMBOL( ?internal_pop_if_present@concurrent_queue_base_v3@internal@tbb@@IEAA_NPEAX@Z ) -__TBB_SYMBOL( ?internal_abort@concurrent_queue_base_v3@internal@tbb@@IEAAXXZ ) -__TBB_SYMBOL( ?internal_size@concurrent_queue_base_v3@internal@tbb@@IEBA_JXZ ) -__TBB_SYMBOL( ?internal_empty@concurrent_queue_base_v3@internal@tbb@@IEBA_NXZ ) -__TBB_SYMBOL( ?internal_finish_clear@concurrent_queue_base_v3@internal@tbb@@IEAAXXZ ) -__TBB_SYMBOL( ?internal_set_capacity@concurrent_queue_base_v3@internal@tbb@@IEAAX_J_K@Z ) -__TBB_SYMBOL( ?internal_throw_exception@concurrent_queue_base_v3@internal@tbb@@IEBAXXZ ) -__TBB_SYMBOL( ?assign@concurrent_queue_base_v3@internal@tbb@@IEAAXAEBV123@@Z ) -__TBB_SYMBOL( ?move_content@concurrent_queue_base_v8@internal@tbb@@IEAAXAEAV123@@Z ) - -#if !TBB_NO_LEGACY -// concurrent_vector.cpp v2 -__TBB_SYMBOL( ?internal_assign@concurrent_vector_base@internal@tbb@@IEAAXAEBV123@_KP6AXPEAX1@ZP6AX2PEBX1@Z5@Z ) -__TBB_SYMBOL( ?internal_capacity@concurrent_vector_base@internal@tbb@@IEBA_KXZ ) -__TBB_SYMBOL( ?internal_clear@concurrent_vector_base@internal@tbb@@IEAAXP6AXPEAX_K@Z_N@Z ) -__TBB_SYMBOL( ?internal_copy@concurrent_vector_base@internal@tbb@@IEAAXAEBV123@_KP6AXPEAXPEBX1@Z@Z ) -__TBB_SYMBOL( ?internal_grow_by@concurrent_vector_base@internal@tbb@@IEAA_K_K0P6AXPEAX0@Z@Z ) -__TBB_SYMBOL( ?internal_grow_to_at_least@concurrent_vector_base@internal@tbb@@IEAAX_K0P6AXPEAX0@Z@Z ) -__TBB_SYMBOL( ?internal_push_back@concurrent_vector_base@internal@tbb@@IEAAPEAX_KAEA_K@Z ) -__TBB_SYMBOL( ?internal_reserve@concurrent_vector_base@internal@tbb@@IEAAX_K00@Z ) -#endif - -// concurrent_vector v3 -__TBB_SYMBOL( ??1concurrent_vector_base_v3@internal@tbb@@IEAA@XZ ) -__TBB_SYMBOL( ?internal_assign@concurrent_vector_base_v3@internal@tbb@@IEAAXAEBV123@_KP6AXPEAX1@ZP6AX2PEBX1@Z5@Z ) -__TBB_SYMBOL( ?internal_capacity@concurrent_vector_base_v3@internal@tbb@@IEBA_KXZ ) -__TBB_SYMBOL( ?internal_clear@concurrent_vector_base_v3@internal@tbb@@IEAA_KP6AXPEAX_K@Z@Z ) -__TBB_SYMBOL( ?internal_copy@concurrent_vector_base_v3@internal@tbb@@IEAAXAEBV123@_KP6AXPEAXPEBX1@Z@Z ) -__TBB_SYMBOL( ?internal_grow_by@concurrent_vector_base_v3@internal@tbb@@IEAA_K_K0P6AXPEAXPEBX0@Z2@Z ) -__TBB_SYMBOL( ?internal_grow_to_at_least@concurrent_vector_base_v3@internal@tbb@@IEAAX_K0P6AXPEAXPEBX0@Z2@Z ) -__TBB_SYMBOL( ?internal_push_back@concurrent_vector_base_v3@internal@tbb@@IEAAPEAX_KAEA_K@Z ) -__TBB_SYMBOL( ?internal_reserve@concurrent_vector_base_v3@internal@tbb@@IEAAX_K00@Z ) -__TBB_SYMBOL( ?internal_compact@concurrent_vector_base_v3@internal@tbb@@IEAAPEAX_KPEAXP6AX10@ZP6AX1PEBX0@Z@Z ) -__TBB_SYMBOL( ?internal_swap@concurrent_vector_base_v3@internal@tbb@@IEAAXAEAV123@@Z ) -__TBB_SYMBOL( ?internal_throw_exception@concurrent_vector_base_v3@internal@tbb@@IEBAX_K@Z ) -__TBB_SYMBOL( ?internal_resize@concurrent_vector_base_v3@internal@tbb@@IEAAX_K00PEBXP6AXPEAX0@ZP6AX210@Z@Z ) -__TBB_SYMBOL( ?internal_grow_to_at_least_with_result@concurrent_vector_base_v3@internal@tbb@@IEAA_K_K0P6AXPEAXPEBX0@Z2@Z ) - -// tbb_thread -__TBB_SYMBOL( ?allocate_closure_v3@internal@tbb@@YAPEAX_K@Z ) -__TBB_SYMBOL( ?detach@tbb_thread_v3@internal@tbb@@QEAAXXZ ) -__TBB_SYMBOL( ?free_closure_v3@internal@tbb@@YAXPEAX@Z ) -__TBB_SYMBOL( ?hardware_concurrency@tbb_thread_v3@internal@tbb@@SAIXZ ) -__TBB_SYMBOL( ?internal_start@tbb_thread_v3@internal@tbb@@AEAAXP6AIPEAX@Z0@Z ) -__TBB_SYMBOL( ?join@tbb_thread_v3@internal@tbb@@QEAAXXZ ) -__TBB_SYMBOL( ?move_v3@internal@tbb@@YAXAEAVtbb_thread_v3@12@0@Z ) -__TBB_SYMBOL( ?thread_get_id_v3@internal@tbb@@YA?AVid@tbb_thread_v3@12@XZ ) -__TBB_SYMBOL( ?thread_sleep_v3@internal@tbb@@YAXAEBVinterval_t@tick_count@2@@Z ) -__TBB_SYMBOL( ?thread_yield_v3@internal@tbb@@YAXXZ ) - -// condition_variable -__TBB_SYMBOL( ?internal_initialize_condition_variable@internal@interface5@tbb@@YAXAEATcondvar_impl_t@123@@Z ) -__TBB_SYMBOL( ?internal_condition_variable_wait@internal@interface5@tbb@@YA_NAEATcondvar_impl_t@123@PEAVmutex@3@PEBVinterval_t@tick_count@3@@Z ) -__TBB_SYMBOL( ?internal_condition_variable_notify_one@internal@interface5@tbb@@YAXAEATcondvar_impl_t@123@@Z ) -__TBB_SYMBOL( ?internal_condition_variable_notify_all@internal@interface5@tbb@@YAXAEATcondvar_impl_t@123@@Z ) -__TBB_SYMBOL( ?internal_destroy_condition_variable@internal@interface5@tbb@@YAXAEATcondvar_impl_t@123@@Z ) - -#undef __TBB_SYMBOL diff --git a/src/tbb/src/tbb/winrt-tbb-export.lst b/src/tbb/src/tbb/winrt-tbb-export.lst deleted file mode 100644 index d1ffa358a..000000000 --- a/src/tbb/src/tbb/winrt-tbb-export.lst +++ /dev/null @@ -1,297 +0,0 @@ -; Copyright 2005-2014 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. Threading Building Blocks is free software; -; you can redistribute it and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. Threading Building Blocks is -; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -; See the GNU General Public License for more details. You should have received a copy of -; the GNU General Public License along with Threading Building Blocks; if not, write to the -; Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software library without -; restriction. Specifically, if other files instantiate templates or use macros or inline -; functions from this file, or you compile this file and link it with other files to produce -; an executable, this file does not by itself cause the resulting executable to be covered -; by the GNU General Public License. This exception does not however invalidate any other -; reasons why the executable file might be covered by the GNU General Public License. - -#include "tbb/tbb_config.h" - -// cache_aligned_allocator.cpp -__TBB_SYMBOL( ?NFS_Allocate@internal@tbb@@YAPAXIIPAX@Z ) -__TBB_SYMBOL( ?NFS_GetLineSize@internal@tbb@@YAIXZ ) -__TBB_SYMBOL( ?NFS_Free@internal@tbb@@YAXPAX@Z ) -__TBB_SYMBOL( ?allocate_via_handler_v3@internal@tbb@@YAPAXI@Z ) -__TBB_SYMBOL( ?deallocate_via_handler_v3@internal@tbb@@YAXPAX@Z ) -__TBB_SYMBOL( ?is_malloc_used_v3@internal@tbb@@YA_NXZ ) - -// task.cpp v3 -__TBB_SYMBOL( ?allocate@allocate_additional_child_of_proxy@internal@tbb@@QBAAAVtask@3@I@Z ) -__TBB_SYMBOL( ?allocate@allocate_child_proxy@internal@tbb@@QBAAAVtask@3@I@Z ) -__TBB_SYMBOL( ?allocate@allocate_continuation_proxy@internal@tbb@@QBAAAVtask@3@I@Z ) -__TBB_SYMBOL( ?allocate@allocate_root_proxy@internal@tbb@@SAAAVtask@3@I@Z ) -__TBB_SYMBOL( ?destroy@task_base@internal@interface5@tbb@@SAXAAVtask@4@@Z ) -__TBB_SYMBOL( ?free@allocate_additional_child_of_proxy@internal@tbb@@QBAXAAVtask@3@@Z ) -__TBB_SYMBOL( ?free@allocate_child_proxy@internal@tbb@@QBAXAAVtask@3@@Z ) -__TBB_SYMBOL( ?free@allocate_continuation_proxy@internal@tbb@@QBAXAAVtask@3@@Z ) -__TBB_SYMBOL( ?free@allocate_root_proxy@internal@tbb@@SAXAAVtask@3@@Z ) -__TBB_SYMBOL( ?internal_set_ref_count@task@tbb@@AAAXH@Z ) -__TBB_SYMBOL( ?internal_decrement_ref_count@task@tbb@@AAAHXZ ) -__TBB_SYMBOL( ?is_owned_by_current_thread@task@tbb@@QBA_NXZ ) -__TBB_SYMBOL( ?note_affinity@task@tbb@@UAAXG@Z ) -__TBB_SYMBOL( ?resize@affinity_partitioner_base_v3@internal@tbb@@AAAXI@Z ) -__TBB_SYMBOL( ?self@task@tbb@@SAAAV12@XZ ) -__TBB_SYMBOL( ?spawn_and_wait_for_all@task@tbb@@QAAXAAVtask_list@2@@Z ) -__TBB_SYMBOL( ?default_num_threads@task_scheduler_init@tbb@@SAHXZ ) -__TBB_SYMBOL( ?initialize@task_scheduler_init@tbb@@QAAXHI@Z ) -__TBB_SYMBOL( ?initialize@task_scheduler_init@tbb@@QAAXH@Z ) -__TBB_SYMBOL( ?terminate@task_scheduler_init@tbb@@QAAXXZ ) -#if __TBB_SCHEDULER_OBSERVER -__TBB_SYMBOL( ?observe@task_scheduler_observer_v3@internal@tbb@@QAAX_N@Z ) -#endif /* __TBB_SCHEDULER_OBSERVER */ - -#if __TBB_TASK_ARENA -/* arena.cpp */ -__TBB_SYMBOL( ?internal_current_slot@task_arena_base@internal@interface7@tbb@@KAHXZ ) -__TBB_SYMBOL( ?internal_initialize@task_arena_base@internal@interface7@tbb@@IAAXXZ ) -__TBB_SYMBOL( ?internal_terminate@task_arena_base@internal@interface7@tbb@@IAAXXZ ) -__TBB_SYMBOL( ?internal_enqueue@task_arena_base@internal@interface7@tbb@@IBAXAAVtask@4@H@Z ) -__TBB_SYMBOL( ?internal_execute@task_arena_base@internal@interface7@tbb@@IBAXAAVdelegate_base@234@@Z ) -__TBB_SYMBOL( ?internal_wait@task_arena_base@internal@interface7@tbb@@IBAXXZ ) -#endif /* __TBB_TASK_ARENA */ - -#if !TBB_NO_LEGACY -// task_v2.cpp -__TBB_SYMBOL( ?destroy@task@tbb@@QAAXAAV12@@Z ) -#endif - -// exception handling support -#if __TBB_TASK_GROUP_CONTEXT -__TBB_SYMBOL( ?allocate@allocate_root_with_context_proxy@internal@tbb@@QBAAAVtask@3@I@Z ) -__TBB_SYMBOL( ?free@allocate_root_with_context_proxy@internal@tbb@@QBAXAAVtask@3@@Z ) -__TBB_SYMBOL( ?change_group@task@tbb@@QAAXAAVtask_group_context@2@@Z ) -__TBB_SYMBOL( ?is_group_execution_cancelled@task_group_context@tbb@@QBA_NXZ ) -__TBB_SYMBOL( ?cancel_group_execution@task_group_context@tbb@@QAA_NXZ ) -__TBB_SYMBOL( ?reset@task_group_context@tbb@@QAAXXZ ) -__TBB_SYMBOL( ?capture_fp_settings@task_group_context@tbb@@QAAXXZ ) -__TBB_SYMBOL( ?init@task_group_context@tbb@@IAAXXZ ) -__TBB_SYMBOL( ?register_pending_exception@task_group_context@tbb@@QAAXXZ ) -__TBB_SYMBOL( ??1task_group_context@tbb@@QAA@XZ ) -#if __TBB_TASK_PRIORITY -__TBB_SYMBOL( ?set_priority@task_group_context@tbb@@QAAXW4priority_t@2@@Z ) -__TBB_SYMBOL( ?priority@task_group_context@tbb@@QBA?AW4priority_t@2@XZ ) -#endif /* __TBB_TASK_PRIORITY */ -__TBB_SYMBOL( ?name@captured_exception@tbb@@UBAPBDXZ ) -__TBB_SYMBOL( ?what@captured_exception@tbb@@UBAPBDXZ ) -__TBB_SYMBOL( ??1captured_exception@tbb@@UAA@XZ ) -__TBB_SYMBOL( ?move@captured_exception@tbb@@UAAPAV12@XZ ) -__TBB_SYMBOL( ?destroy@captured_exception@tbb@@UAAXXZ ) -__TBB_SYMBOL( ?set@captured_exception@tbb@@QAAXPBD0@Z ) -__TBB_SYMBOL( ?clear@captured_exception@tbb@@QAAXXZ ) -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -// Symbols for exceptions thrown from TBB -__TBB_SYMBOL( ?throw_bad_last_alloc_exception_v4@internal@tbb@@YAXXZ ) -__TBB_SYMBOL( ?throw_exception_v4@internal@tbb@@YAXW4exception_id@12@@Z ) -__TBB_SYMBOL( ?what@bad_last_alloc@tbb@@UBAPBDXZ ) -__TBB_SYMBOL( ?what@missing_wait@tbb@@UBAPBDXZ ) -__TBB_SYMBOL( ?what@invalid_multiple_scheduling@tbb@@UBAPBDXZ ) -__TBB_SYMBOL( ?what@improper_lock@tbb@@UBAPBDXZ ) -__TBB_SYMBOL( ?what@user_abort@tbb@@UBAPBDXZ ) - -// tbb_misc.cpp -__TBB_SYMBOL( ?assertion_failure@tbb@@YAXPBDH00@Z ) -__TBB_SYMBOL( ?get_initial_auto_partitioner_divisor@internal@tbb@@YAIXZ ) -__TBB_SYMBOL( ?handle_perror@internal@tbb@@YAXHPBD@Z ) -__TBB_SYMBOL( ?set_assertion_handler@tbb@@YAP6AXPBDH00@ZP6AX0H00@Z@Z ) -__TBB_SYMBOL( ?runtime_warning@internal@tbb@@YAXPBDZZ ) -__TBB_SYMBOL( TBB_runtime_interface_version ) - -// tbb_main.cpp -__TBB_SYMBOL( ?itt_load_pointer_with_acquire_v3@internal@tbb@@YAPAXPBX@Z ) -__TBB_SYMBOL( ?itt_store_pointer_with_release_v3@internal@tbb@@YAXPAX0@Z ) -__TBB_SYMBOL( ?call_itt_notify_v5@internal@tbb@@YAXHPAX@Z ) -__TBB_SYMBOL( ?itt_set_sync_name_v3@internal@tbb@@YAXPAXPB_W@Z ) -__TBB_SYMBOL( ?itt_load_pointer_v3@internal@tbb@@YAPAXPBX@Z ) - -// pipeline.cpp -__TBB_SYMBOL( ??0pipeline@tbb@@QAA@XZ ) -__TBB_SYMBOL( ??1filter@tbb@@UAA@XZ ) -__TBB_SYMBOL( ??1pipeline@tbb@@UAA@XZ ) -__TBB_SYMBOL( ??_7pipeline@tbb@@6B@ ) -__TBB_SYMBOL( ?add_filter@pipeline@tbb@@QAAXAAVfilter@2@@Z ) -__TBB_SYMBOL( ?clear@pipeline@tbb@@QAAXXZ ) -__TBB_SYMBOL( ?inject_token@pipeline@tbb@@AAAXAAVtask@2@@Z ) -__TBB_SYMBOL( ?run@pipeline@tbb@@QAAXI@Z ) -#if __TBB_TASK_GROUP_CONTEXT -__TBB_SYMBOL( ?run@pipeline@tbb@@QAAXIAAVtask_group_context@2@@Z ) -#endif -__TBB_SYMBOL( ?process_item@thread_bound_filter@tbb@@QAA?AW4result_type@12@XZ ) -__TBB_SYMBOL( ?try_process_item@thread_bound_filter@tbb@@QAA?AW4result_type@12@XZ ) -__TBB_SYMBOL( ?set_end_of_input@filter@tbb@@IAAXXZ ) - -// queuing_rw_mutex.cpp -__TBB_SYMBOL( ?internal_construct@queuing_rw_mutex@tbb@@QAAXXZ ) -__TBB_SYMBOL( ?acquire@scoped_lock@queuing_rw_mutex@tbb@@QAAXAAV23@_N@Z ) -__TBB_SYMBOL( ?downgrade_to_reader@scoped_lock@queuing_rw_mutex@tbb@@QAA_NXZ ) -__TBB_SYMBOL( ?release@scoped_lock@queuing_rw_mutex@tbb@@QAAXXZ ) -__TBB_SYMBOL( ?upgrade_to_writer@scoped_lock@queuing_rw_mutex@tbb@@QAA_NXZ ) -__TBB_SYMBOL( ?try_acquire@scoped_lock@queuing_rw_mutex@tbb@@QAA_NAAV23@_N@Z ) - -// reader_writer_lock.cpp -__TBB_SYMBOL( ?try_lock_read@reader_writer_lock@interface5@tbb@@QAA_NXZ ) -__TBB_SYMBOL( ?try_lock@reader_writer_lock@interface5@tbb@@QAA_NXZ ) -__TBB_SYMBOL( ?unlock@reader_writer_lock@interface5@tbb@@QAAXXZ ) -__TBB_SYMBOL( ?lock_read@reader_writer_lock@interface5@tbb@@QAAXXZ ) -__TBB_SYMBOL( ?lock@reader_writer_lock@interface5@tbb@@QAAXXZ ) -__TBB_SYMBOL( ?internal_construct@reader_writer_lock@interface5@tbb@@AAAXXZ ) -__TBB_SYMBOL( ?internal_destroy@reader_writer_lock@interface5@tbb@@AAAXXZ ) -__TBB_SYMBOL( ?internal_construct@scoped_lock@reader_writer_lock@interface5@tbb@@AAAXAAV234@@Z ) -__TBB_SYMBOL( ?internal_destroy@scoped_lock@reader_writer_lock@interface5@tbb@@AAAXXZ ) -__TBB_SYMBOL( ?internal_construct@scoped_lock_read@reader_writer_lock@interface5@tbb@@AAAXAAV234@@Z ) -__TBB_SYMBOL( ?internal_destroy@scoped_lock_read@reader_writer_lock@interface5@tbb@@AAAXXZ ) - -#if !TBB_NO_LEGACY -// spin_rw_mutex.cpp v2 -__TBB_SYMBOL( ?internal_acquire_reader@spin_rw_mutex@tbb@@CAXPAV12@@Z ) -__TBB_SYMBOL( ?internal_acquire_writer@spin_rw_mutex@tbb@@CA_NPAV12@@Z ) -__TBB_SYMBOL( ?internal_downgrade@spin_rw_mutex@tbb@@CAXPAV12@@Z ) -__TBB_SYMBOL( ?internal_itt_releasing@spin_rw_mutex@tbb@@CAXPAV12@@Z ) -__TBB_SYMBOL( ?internal_release_reader@spin_rw_mutex@tbb@@CAXPAV12@@Z ) -__TBB_SYMBOL( ?internal_release_writer@spin_rw_mutex@tbb@@CAXPAV12@@Z ) -__TBB_SYMBOL( ?internal_upgrade@spin_rw_mutex@tbb@@CA_NPAV12@@Z ) -__TBB_SYMBOL( ?internal_try_acquire_writer@spin_rw_mutex@tbb@@CA_NPAV12@@Z ) -__TBB_SYMBOL( ?internal_try_acquire_reader@spin_rw_mutex@tbb@@CA_NPAV12@@Z ) -#endif - -// spin_rw_mutex v3 -__TBB_SYMBOL( ?internal_construct@spin_rw_mutex_v3@tbb@@AAAXXZ ) -__TBB_SYMBOL( ?internal_upgrade@spin_rw_mutex_v3@tbb@@AAA_NXZ ) -__TBB_SYMBOL( ?internal_downgrade@spin_rw_mutex_v3@tbb@@AAAXXZ ) -__TBB_SYMBOL( ?internal_acquire_reader@spin_rw_mutex_v3@tbb@@AAAXXZ ) -__TBB_SYMBOL( ?internal_acquire_writer@spin_rw_mutex_v3@tbb@@AAA_NXZ ) -__TBB_SYMBOL( ?internal_release_reader@spin_rw_mutex_v3@tbb@@AAAXXZ ) -__TBB_SYMBOL( ?internal_release_writer@spin_rw_mutex_v3@tbb@@AAAXXZ ) -__TBB_SYMBOL( ?internal_try_acquire_reader@spin_rw_mutex_v3@tbb@@AAA_NXZ ) -__TBB_SYMBOL( ?internal_try_acquire_writer@spin_rw_mutex_v3@tbb@@AAA_NXZ ) - -// spin_mutex.cpp -__TBB_SYMBOL( ?internal_construct@spin_mutex@tbb@@QAAXXZ ) -__TBB_SYMBOL( ?internal_acquire@scoped_lock@spin_mutex@tbb@@AAAXAAV23@@Z ) -__TBB_SYMBOL( ?internal_release@scoped_lock@spin_mutex@tbb@@AAAXXZ ) -__TBB_SYMBOL( ?internal_try_acquire@scoped_lock@spin_mutex@tbb@@AAA_NAAV23@@Z ) - -// mutex.cpp -__TBB_SYMBOL( ?internal_acquire@scoped_lock@mutex@tbb@@AAAXAAV23@@Z ) -__TBB_SYMBOL( ?internal_release@scoped_lock@mutex@tbb@@AAAXXZ ) -__TBB_SYMBOL( ?internal_try_acquire@scoped_lock@mutex@tbb@@AAA_NAAV23@@Z ) -__TBB_SYMBOL( ?internal_construct@mutex@tbb@@AAAXXZ ) -__TBB_SYMBOL( ?internal_destroy@mutex@tbb@@AAAXXZ ) - -// recursive_mutex.cpp -__TBB_SYMBOL( ?internal_acquire@scoped_lock@recursive_mutex@tbb@@AAAXAAV23@@Z ) -__TBB_SYMBOL( ?internal_release@scoped_lock@recursive_mutex@tbb@@AAAXXZ ) -__TBB_SYMBOL( ?internal_try_acquire@scoped_lock@recursive_mutex@tbb@@AAA_NAAV23@@Z ) -__TBB_SYMBOL( ?internal_construct@recursive_mutex@tbb@@AAAXXZ ) -__TBB_SYMBOL( ?internal_destroy@recursive_mutex@tbb@@AAAXXZ ) - -// queuing_mutex.cpp -__TBB_SYMBOL( ?internal_construct@queuing_mutex@tbb@@QAAXXZ ) -__TBB_SYMBOL( ?acquire@scoped_lock@queuing_mutex@tbb@@QAAXAAV23@@Z ) -__TBB_SYMBOL( ?release@scoped_lock@queuing_mutex@tbb@@QAAXXZ ) -__TBB_SYMBOL( ?try_acquire@scoped_lock@queuing_mutex@tbb@@QAA_NAAV23@@Z ) - -// critical_section.cpp -__TBB_SYMBOL( ?internal_construct@critical_section_v4@internal@tbb@@QAAXXZ ) - -#if !TBB_NO_LEGACY -// concurrent_hash_map.cpp -__TBB_SYMBOL( ?internal_grow_predicate@hash_map_segment_base@internal@tbb@@QBA_NXZ ) - -// concurrent_queue.cpp v2 -__TBB_SYMBOL( ?advance@concurrent_queue_iterator_base@internal@tbb@@IAAXXZ ) -__TBB_SYMBOL( ?assign@concurrent_queue_iterator_base@internal@tbb@@IAAXABV123@@Z ) -__TBB_SYMBOL( ?internal_size@concurrent_queue_base@internal@tbb@@IBAHXZ ) -__TBB_SYMBOL( ??0concurrent_queue_base@internal@tbb@@IAA@I@Z ) -__TBB_SYMBOL( ??0concurrent_queue_iterator_base@internal@tbb@@IAA@ABVconcurrent_queue_base@12@@Z ) -__TBB_SYMBOL( ??1concurrent_queue_base@internal@tbb@@MAA@XZ ) -__TBB_SYMBOL( ??1concurrent_queue_iterator_base@internal@tbb@@IAA@XZ ) -__TBB_SYMBOL( ?internal_pop@concurrent_queue_base@internal@tbb@@IAAXPAX@Z ) -__TBB_SYMBOL( ?internal_pop_if_present@concurrent_queue_base@internal@tbb@@IAA_NPAX@Z ) -__TBB_SYMBOL( ?internal_push@concurrent_queue_base@internal@tbb@@IAAXPBX@Z ) -__TBB_SYMBOL( ?internal_push_if_not_full@concurrent_queue_base@internal@tbb@@IAA_NPBX@Z ) -__TBB_SYMBOL( ?internal_set_capacity@concurrent_queue_base@internal@tbb@@IAAXHI@Z ) -#endif - -// concurrent_queue v3 -__TBB_SYMBOL( ??1concurrent_queue_iterator_base_v3@internal@tbb@@IAA@XZ ) -__TBB_SYMBOL( ??0concurrent_queue_iterator_base_v3@internal@tbb@@IAA@ABVconcurrent_queue_base_v3@12@@Z ) -__TBB_SYMBOL( ??0concurrent_queue_iterator_base_v3@internal@tbb@@IAA@ABVconcurrent_queue_base_v3@12@I@Z ) -__TBB_SYMBOL( ?advance@concurrent_queue_iterator_base_v3@internal@tbb@@IAAXXZ ) -__TBB_SYMBOL( ?assign@concurrent_queue_iterator_base_v3@internal@tbb@@IAAXABV123@@Z ) -__TBB_SYMBOL( ??0concurrent_queue_base_v3@internal@tbb@@IAA@I@Z ) -__TBB_SYMBOL( ??1concurrent_queue_base_v3@internal@tbb@@MAA@XZ ) -__TBB_SYMBOL( ?internal_pop@concurrent_queue_base_v3@internal@tbb@@IAAXPAX@Z ) -__TBB_SYMBOL( ?internal_pop_if_present@concurrent_queue_base_v3@internal@tbb@@IAA_NPAX@Z ) -__TBB_SYMBOL( ?internal_abort@concurrent_queue_base_v3@internal@tbb@@IAAXXZ ) -__TBB_SYMBOL( ?internal_push@concurrent_queue_base_v3@internal@tbb@@IAAXPBX@Z ) -__TBB_SYMBOL( ?internal_push_move@concurrent_queue_base_v8@internal@tbb@@IAAXPBX@Z ) -__TBB_SYMBOL( ?internal_push_if_not_full@concurrent_queue_base_v3@internal@tbb@@IAA_NPBX@Z ) -__TBB_SYMBOL( ?internal_push_move_if_not_full@concurrent_queue_base_v8@internal@tbb@@IAA_NPBX@Z ) -__TBB_SYMBOL( ?internal_size@concurrent_queue_base_v3@internal@tbb@@IBAHXZ ) -__TBB_SYMBOL( ?internal_empty@concurrent_queue_base_v3@internal@tbb@@IBA_NXZ ) -__TBB_SYMBOL( ?internal_set_capacity@concurrent_queue_base_v3@internal@tbb@@IAAXHI@Z ) -__TBB_SYMBOL( ?internal_finish_clear@concurrent_queue_base_v3@internal@tbb@@IAAXXZ ) -__TBB_SYMBOL( ?internal_throw_exception@concurrent_queue_base_v3@internal@tbb@@IBAXXZ ) -__TBB_SYMBOL( ?assign@concurrent_queue_base_v3@internal@tbb@@IAAXABV123@@Z ) -__TBB_SYMBOL( ?move_content@concurrent_queue_base_v8@internal@tbb@@IAAXAAV123@@Z ) - -#if !TBB_NO_LEGACY -// concurrent_vector.cpp v2 -__TBB_SYMBOL( ?internal_assign@concurrent_vector_base@internal@tbb@@IAAXABV123@IP6AXPAXI@ZP6AX1PBXI@Z4@Z ) -__TBB_SYMBOL( ?internal_capacity@concurrent_vector_base@internal@tbb@@IBAIXZ ) -__TBB_SYMBOL( ?internal_clear@concurrent_vector_base@internal@tbb@@IAAXP6AXPAXI@Z_N@Z ) -__TBB_SYMBOL( ?internal_copy@concurrent_vector_base@internal@tbb@@IAAXABV123@IP6AXPAXPBXI@Z@Z ) -__TBB_SYMBOL( ?internal_grow_by@concurrent_vector_base@internal@tbb@@IAAIIIP6AXPAXI@Z@Z ) -__TBB_SYMBOL( ?internal_grow_to_at_least@concurrent_vector_base@internal@tbb@@IAAXIIP6AXPAXI@Z@Z ) -__TBB_SYMBOL( ?internal_push_back@concurrent_vector_base@internal@tbb@@IAAPAXIAAI@Z ) -__TBB_SYMBOL( ?internal_reserve@concurrent_vector_base@internal@tbb@@IAAXIII@Z ) -#endif - -// concurrent_vector v3 -__TBB_SYMBOL( ??1concurrent_vector_base_v3@internal@tbb@@IAA@XZ ) -__TBB_SYMBOL( ?internal_assign@concurrent_vector_base_v3@internal@tbb@@IAAXABV123@IP6AXPAXI@ZP6AX1PBXI@Z4@Z ) -__TBB_SYMBOL( ?internal_capacity@concurrent_vector_base_v3@internal@tbb@@IBAIXZ ) -__TBB_SYMBOL( ?internal_clear@concurrent_vector_base_v3@internal@tbb@@IAAIP6AXPAXI@Z@Z ) -__TBB_SYMBOL( ?internal_copy@concurrent_vector_base_v3@internal@tbb@@IAAXABV123@IP6AXPAXPBXI@Z@Z ) -__TBB_SYMBOL( ?internal_grow_by@concurrent_vector_base_v3@internal@tbb@@IAAIIIP6AXPAXPBXI@Z1@Z ) -__TBB_SYMBOL( ?internal_grow_to_at_least@concurrent_vector_base_v3@internal@tbb@@IAAXIIP6AXPAXPBXI@Z1@Z ) -__TBB_SYMBOL( ?internal_push_back@concurrent_vector_base_v3@internal@tbb@@IAAPAXIAAI@Z ) -__TBB_SYMBOL( ?internal_reserve@concurrent_vector_base_v3@internal@tbb@@IAAXIII@Z ) -__TBB_SYMBOL( ?internal_compact@concurrent_vector_base_v3@internal@tbb@@IAAPAXIPAXP6AX0I@ZP6AX0PBXI@Z@Z ) -__TBB_SYMBOL( ?internal_swap@concurrent_vector_base_v3@internal@tbb@@IAAXAAV123@@Z ) -__TBB_SYMBOL( ?internal_throw_exception@concurrent_vector_base_v3@internal@tbb@@IBAXI@Z ) -__TBB_SYMBOL( ?internal_resize@concurrent_vector_base_v3@internal@tbb@@IAAXIIIPBXP6AXPAXI@ZP6AX10I@Z@Z ) -__TBB_SYMBOL( ?internal_grow_to_at_least_with_result@concurrent_vector_base_v3@internal@tbb@@IAAIIIP6AXPAXPBXI@Z1@Z ) - -// tbb_thread -__TBB_SYMBOL( ?join@tbb_thread_v3@internal@tbb@@QAAXXZ ) -__TBB_SYMBOL( ?detach@tbb_thread_v3@internal@tbb@@QAAXXZ ) -__TBB_SYMBOL( ?internal_start@tbb_thread_v3@internal@tbb@@AAAXP6AIPAX@Z0@Z ) -__TBB_SYMBOL( ?allocate_closure_v3@internal@tbb@@YAPAXI@Z ) -__TBB_SYMBOL( ?free_closure_v3@internal@tbb@@YAXPAX@Z ) -__TBB_SYMBOL( ?hardware_concurrency@tbb_thread_v3@internal@tbb@@SAIXZ ) -__TBB_SYMBOL( ?thread_yield_v3@internal@tbb@@YAXXZ ) -__TBB_SYMBOL( ?thread_sleep_v3@internal@tbb@@YAXABVinterval_t@tick_count@2@@Z ) -__TBB_SYMBOL( ?move_v3@internal@tbb@@YAXAAVtbb_thread_v3@12@0@Z ) -__TBB_SYMBOL( ?thread_get_id_v3@internal@tbb@@YA?AVid@tbb_thread_v3@12@XZ ) - -// condition_variable -__TBB_SYMBOL( ?internal_initialize_condition_variable@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z ) -__TBB_SYMBOL( ?internal_condition_variable_wait@internal@interface5@tbb@@YA_NAATcondvar_impl_t@123@PAVmutex@3@PBVinterval_t@tick_count@3@@Z ) -__TBB_SYMBOL( ?internal_condition_variable_notify_one@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z ) -__TBB_SYMBOL( ?internal_condition_variable_notify_all@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z ) -__TBB_SYMBOL( ?internal_destroy_condition_variable@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z ) - -#undef __TBB_SYMBOL diff --git a/src/tbb/src/tbb/x86_rtm_rw_mutex.cpp b/src/tbb/src/tbb/x86_rtm_rw_mutex.cpp deleted file mode 100644 index d3ed62469..000000000 --- a/src/tbb/src/tbb/x86_rtm_rw_mutex.cpp +++ /dev/null @@ -1,267 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" -#if __TBB_TSX_AVAILABLE -#include "tbb/spin_rw_mutex.h" -#include "tbb/tbb_machine.h" -#include "itt_notify.h" -#include "governor.h" -#include "tbb/atomic.h" - -// __TBB_RW_MUTEX_DELAY_TEST shifts the point where flags aborting speculation are -// added to the read-set of the operation. If 1, will add the test just before -// the transaction is ended. -#ifndef __TBB_RW_MUTEX_DELAY_TEST - #define __TBB_RW_MUTEX_DELAY_TEST 1 -#endif - -#if defined(_MSC_VER) && defined(_Wp64) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (disable: 4244) -#endif - -namespace tbb { - -namespace interface8 { -namespace internal { - -// abort code for mutexes that detect a conflict with another thread. -// value is hexadecimal -enum { - speculation_transaction_aborted = 0x01, - speculation_can_retry = 0x02, - speculation_memadd_conflict = 0x04, - speculation_buffer_overflow = 0x08, - speculation_breakpoint_hit = 0x10, - speculation_nested_abort = 0x20, - speculation_xabort_mask = 0xFF000000, - speculation_xabort_shift = 24, - speculation_retry = speculation_transaction_aborted - | speculation_can_retry - | speculation_memadd_conflict -}; - -// maximum number of times to retry -static const int retry_threshold_read = 10; -static const int retry_threshold_write = 10; - -//! Release speculative mutex -void x86_rtm_rw_mutex::internal_release(x86_rtm_rw_mutex::scoped_lock& s) { - switch(s.transaction_state) { - case RTM_transacting_writer: - case RTM_transacting_reader: - { - __TBB_ASSERT(__TBB_machine_is_in_transaction(), "transaction_state && not speculating"); -#if __TBB_RW_MUTEX_DELAY_TEST - if(s.transaction_state == RTM_transacting_reader) { - if(this->w_flag) __TBB_machine_transaction_conflict_abort(); - } else { - if(this->state) __TBB_machine_transaction_conflict_abort(); - } -#endif - __TBB_machine_end_transaction(); - s.my_scoped_lock.internal_set_mutex(NULL); - } - break; - case RTM_real_reader: - __TBB_ASSERT(!this->w_flag, "w_flag set but read lock acquired"); - s.my_scoped_lock.release(); - break; - case RTM_real_writer: - __TBB_ASSERT(this->w_flag, "w_flag unset but write lock acquired"); - this->w_flag = false; - s.my_scoped_lock.release(); - break; - case RTM_not_in_mutex: - __TBB_ASSERT(false, "RTM_not_in_mutex, but in release"); - default: - __TBB_ASSERT(false, "invalid transaction_state"); - } - s.transaction_state = RTM_not_in_mutex; -} - -//! Acquire write lock on the given mutex. -void x86_rtm_rw_mutex::internal_acquire_writer(x86_rtm_rw_mutex::scoped_lock& s, bool only_speculate) -{ - __TBB_ASSERT(s.transaction_state == RTM_not_in_mutex, "scoped_lock already in transaction"); - if(tbb::internal::governor::speculation_enabled()) { - int num_retries = 0; - unsigned int abort_code; - do { - tbb::internal::atomic_backoff backoff; - if(this->state) { - if(only_speculate) return; - do { - backoff.pause(); // test the spin_rw_mutex (real readers or writers) - } while(this->state); - } - // _xbegin returns -1 on success or the abort code, so capture it - if(( abort_code = __TBB_machine_begin_transaction()) == ~(unsigned int)(0) ) - { - // started speculation -#if !__TBB_RW_MUTEX_DELAY_TEST - if(this->state) { // add spin_rw_mutex to read-set. - // reader or writer grabbed the lock, so abort. - __TBB_machine_transaction_conflict_abort(); - } -#endif - s.transaction_state = RTM_transacting_writer; - s.my_scoped_lock.internal_set_mutex(this); // need mutex for release() - return; // successfully started speculation - } - ++num_retries; - } while( (abort_code & speculation_retry) != 0 && (num_retries < retry_threshold_write) ); - } - - if(only_speculate) return; // should apply a real try_lock... - s.my_scoped_lock.acquire(*this, true); // kill transactional writers - __TBB_ASSERT(!w_flag, "After acquire for write, w_flag already true"); - w_flag = true; // kill transactional readers - s.transaction_state = RTM_real_writer; - return; -} - -//! Acquire read lock on given mutex. -// only_speculate : true if we are doing a try_acquire. If true and we fail to speculate, don't -// really acquire the lock, return and do a try_acquire on the contained spin_rw_mutex. If -// the lock is already held by a writer, just return. -void x86_rtm_rw_mutex::internal_acquire_reader(x86_rtm_rw_mutex::scoped_lock& s, bool only_speculate) { - __TBB_ASSERT(s.transaction_state == RTM_not_in_mutex, "scoped_lock already in transaction"); - if(tbb::internal::governor::speculation_enabled()) { - int num_retries = 0; - unsigned int abort_code; - do { - tbb::internal::atomic_backoff backoff; - // if in try_acquire, and lock is held as writer, don't attempt to speculate. - if(w_flag) { - if(only_speculate) return; - do { - backoff.pause(); // test the spin_rw_mutex (real readers or writers) - } while(w_flag); - } - // _xbegin returns -1 on success or the abort code, so capture it - if((abort_code = __TBB_machine_begin_transaction()) == ~(unsigned int)(0) ) - { - // started speculation -#if !__TBB_RW_MUTEX_DELAY_TEST - if(w_flag) { // add w_flag to read-set. - __TBB_machine_transaction_conflict_abort(); // writer grabbed the lock, so abort. - } -#endif - s.transaction_state = RTM_transacting_reader; - s.my_scoped_lock.internal_set_mutex(this); // need mutex for release() - return; // successfully started speculation - } - // fallback path - // retry only if there is any hope of getting into a transaction soon - // Retry in the following cases (from Section 8.3.5 of Intel(R) - // Architecture Instruction Set Extensions Programming Reference): - // 1. abort caused by XABORT instruction (bit 0 of EAX register is set) - // 2. the transaction may succeed on a retry (bit 1 of EAX register is set) - // 3. if another logical processor conflicted with a memory address - // that was part of the transaction that aborted (bit 2 of EAX register is set) - // That is, retry if (abort_code & 0x7) is non-zero - ++num_retries; - } while( (abort_code & speculation_retry) != 0 && (num_retries < retry_threshold_read) ); - } - - if(only_speculate) return; - s.my_scoped_lock.acquire( *this, false ); - s.transaction_state = RTM_real_reader; -} - -//! Upgrade reader to become a writer. -/** Returns whether the upgrade happened without releasing and re-acquiring the lock */ -bool x86_rtm_rw_mutex::internal_upgrade(x86_rtm_rw_mutex::scoped_lock& s) -{ - switch(s.transaction_state) { - case RTM_real_reader: { - s.transaction_state = RTM_real_writer; - bool no_release = s.my_scoped_lock.upgrade_to_writer(); - __TBB_ASSERT(!w_flag, "After upgrade_to_writer, w_flag already true"); - w_flag = true; - return no_release; - } - case RTM_transacting_reader: - s.transaction_state = RTM_transacting_writer; - // don't need to add w_flag to read_set even if __TBB_RW_MUTEX_DELAY_TEST - // because the this pointer (the spin_rw_mutex) will be sufficient on release. - return true; - default: - __TBB_ASSERT(false, "Invalid state for upgrade"); - return false; - } -} - -//! Downgrade writer to a reader. -bool x86_rtm_rw_mutex::internal_downgrade(x86_rtm_rw_mutex::scoped_lock& s) { - switch(s.transaction_state) { - case RTM_real_writer: - s.transaction_state = RTM_real_reader; - __TBB_ASSERT(w_flag, "Before downgrade_to_reader w_flag not true"); - w_flag = false; - return s.my_scoped_lock.downgrade_to_reader(); - case RTM_transacting_writer: -#if __TBB_RW_MUTEX_DELAY_TEST - if(this->state) { // a reader or writer has acquired mutex for real. - __TBB_machine_transaction_conflict_abort(); - } -#endif - s.transaction_state = RTM_transacting_reader; - return true; - default: - __TBB_ASSERT(false, "Invalid state for downgrade"); - return false; - } -} - -//! Try to acquire write lock on the given mutex. -// There may be reader(s) which acquired the spin_rw_mutex, as well as possibly -// transactional reader(s). If this is the case, the acquire will fail, and assigning -// w_flag will kill the transactors. So we only assign w_flag if we have successfully -// acquired the lock. -bool x86_rtm_rw_mutex::internal_try_acquire_writer(x86_rtm_rw_mutex::scoped_lock& s) -{ - internal_acquire_writer(s, /*only_speculate=*/true); - if(s.transaction_state == RTM_transacting_writer) { - return true; - } - __TBB_ASSERT(s.transaction_state == RTM_not_in_mutex, "Trying to acquire writer which is already allocated"); - // transacting write acquire failed. try_acquire the real mutex - bool result = s.my_scoped_lock.try_acquire(*this, true); - if(result) { - // only shoot down readers if we're not transacting ourselves - __TBB_ASSERT(!w_flag, "After try_acquire_writer, w_flag already true"); - w_flag = true; - s.transaction_state = RTM_real_writer; - } - return result; -} - -void x86_rtm_rw_mutex::internal_construct() { - ITT_SYNC_CREATE(this, _T("tbb::x86_rtm_rw_mutex"), _T("")); -} - -} // namespace internal -} // namespace interface8 -} // namespace tbb - -#endif /* __TBB_TSX_AVAILABLE */ diff --git a/src/tbb/src/tbb/xbox360-tbb-export.def b/src/tbb/src/tbb/xbox360-tbb-export.def deleted file mode 100644 index fb9cb9c01..000000000 --- a/src/tbb/src/tbb/xbox360-tbb-export.def +++ /dev/null @@ -1,226 +0,0 @@ -; Copyright 2005-2014 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. Threading Building Blocks is free software; -; you can redistribute it and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. Threading Building Blocks is -; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -; See the GNU General Public License for more details. You should have received a copy of -; the GNU General Public License along with Threading Building Blocks; if not, write to the -; Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software library without -; restriction. Specifically, if other files instantiate templates or use macros or inline -; functions from this file, or you compile this file and link it with other files to produce -; an executable, this file does not by itself cause the resulting executable to be covered -; by the GNU General Public License. This exception does not however invalidate any other -; reasons why the executable file might be covered by the GNU General Public License. - -EXPORTS - -; Assembly-language support that is called directly by clients -;__TBB_machine_cmpswp1 -;__TBB_machine_cmpswp2 -;__TBB_machine_cmpswp4 -;__TBB_machine_cmpswp8 -;__TBB_machine_fetchadd1 -;__TBB_machine_fetchadd2 -;__TBB_machine_fetchadd4 -;__TBB_machine_fetchadd8 -;__TBB_machine_fetchstore1 -;__TBB_machine_fetchstore2 -;__TBB_machine_fetchstore4 -;__TBB_machine_fetchstore8 -;__TBB_machine_store8 -;__TBB_machine_load8 -;__TBB_machine_trylockbyte - -; cache_aligned_allocator.cpp -?NFS_Allocate@internal@tbb@@YAPAXIIPAX@Z @1 -?NFS_GetLineSize@internal@tbb@@YAIXZ @2 -?NFS_Free@internal@tbb@@YAXPAX@Z @3 -?allocate_via_handler_v3@internal@tbb@@YAPAXI@Z @4 -?deallocate_via_handler_v3@internal@tbb@@YAXPAX@Z @5 -?is_malloc_used_v3@internal@tbb@@YA_NXZ @6 - -; task.cpp v3 -?allocate@allocate_additional_child_of_proxy@internal@tbb@@QBAAAVtask@3@I@Z @7 -?allocate@allocate_child_proxy@internal@tbb@@QBAAAVtask@3@I@Z @8 -?allocate@allocate_continuation_proxy@internal@tbb@@QBAAAVtask@3@I@Z @9 -?allocate@allocate_root_proxy@internal@tbb@@SAAAVtask@3@I@Z @10 -?destroy@task@tbb@@QAAXAAV12@@Z @11 -?free@allocate_additional_child_of_proxy@internal@tbb@@QBAXAAVtask@3@@Z @12 -?free@allocate_child_proxy@internal@tbb@@QBAXAAVtask@3@@Z @13 -?free@allocate_continuation_proxy@internal@tbb@@QBAXAAVtask@3@@Z @14 -?free@allocate_root_proxy@internal@tbb@@SAXAAVtask@3@@Z @15 -?internal_set_ref_count@task@tbb@@AAAXH@Z @16 -?is_owned_by_current_thread@task@tbb@@QBA_NXZ @17 -?note_affinity@task@tbb@@UAAXG@Z @18 -?resize@affinity_partitioner_base_v3@internal@tbb@@AAAXI@Z @19 -?self@task@tbb@@SAAAV12@XZ @20 -?spawn_and_wait_for_all@task@tbb@@QAAXAAVtask_list@2@@Z @21 -?default_num_threads@task_scheduler_init@tbb@@SAHXZ @22 -?initialize@task_scheduler_init@tbb@@QAAXHI@Z @23 -?initialize@task_scheduler_init@tbb@@QAAXH@Z @24 -?terminate@task_scheduler_init@tbb@@QAAXXZ @25 -?observe@task_scheduler_observer_v3@internal@tbb@@QAAX_N@Z @26 - -; exception handling support -?allocate@allocate_root_with_context_proxy@internal@tbb@@QBAAAVtask@3@I@Z @27 -?free@allocate_root_with_context_proxy@internal@tbb@@QBAXAAVtask@3@@Z @28 -?is_group_execution_cancelled@task_group_context@tbb@@QBA_NXZ @29 -?cancel_group_execution@task_group_context@tbb@@QAA_NXZ @30 -?reset@task_group_context@tbb@@QAAXXZ @31 -?init@task_group_context@tbb@@IAAXXZ @32 -??1task_group_context@tbb@@QAA@XZ @33 -?name@captured_exception@tbb@@UBAPBDXZ @34 -?what@captured_exception@tbb@@UBAPBDXZ @35 -??1captured_exception@tbb@@UAA@XZ @36 - -; tbb_misc.cpp -?assertion_failure@tbb@@YAXPBDH00@Z @37 -?get_initial_auto_partitioner_divisor@internal@tbb@@YAIXZ @38 -?handle_perror@internal@tbb@@YAXHPBD@Z @39 -?set_assertion_handler@tbb@@YAP6AXPBDH00@ZP6AX0H00@Z@Z @40 -?runtime_warning@internal@tbb@@YAXPBDZZ @41 - -; tbb_main.cpp -?itt_load_pointer_with_acquire_v3@internal@tbb@@YAPAXPBX@Z @42 -?itt_store_pointer_with_release_v3@internal@tbb@@YAXPAX0@Z @43 - -; pipeline.cpp -??0pipeline@tbb@@QAA@XZ @44 -??1filter@tbb@@UAA@XZ @45 -??1pipeline@tbb@@UAA@XZ @46 -??_7pipeline@tbb@@6B@ @47 -?add_filter@pipeline@tbb@@QAAXAAVfilter@2@@Z @48 -?clear@pipeline@tbb@@QAAXXZ @49 -?inject_token@pipeline@tbb@@AAAXAAVtask@2@@Z @50 -?run@pipeline@tbb@@QAAXI@Z @51 - -; queuing_rw_mutex.cpp -?acquire@scoped_lock@queuing_rw_mutex@tbb@@QAAXAAV23@_N@Z @52 -?downgrade_to_reader@scoped_lock@queuing_rw_mutex@tbb@@QAA_NXZ @53 -?release@scoped_lock@queuing_rw_mutex@tbb@@QAAXXZ @54 -?upgrade_to_writer@scoped_lock@queuing_rw_mutex@tbb@@QAA_NXZ @55 -?try_acquire@scoped_lock@queuing_rw_mutex@tbb@@QAA_NAAV23@_N@Z @56 - -#if !TBB_NO_LEGACY -; spin_rw_mutex.cpp v2 -?internal_acquire_reader@spin_rw_mutex@tbb@@CAXPAV12@@Z @57 -?internal_acquire_writer@spin_rw_mutex@tbb@@CA_NPAV12@@Z @58 -?internal_downgrade@spin_rw_mutex@tbb@@CAXPAV12@@Z @59 -?internal_itt_releasing@spin_rw_mutex@tbb@@CAXPAV12@@Z @60 -?internal_release_reader@spin_rw_mutex@tbb@@CAXPAV12@@Z @61 -?internal_release_writer@spin_rw_mutex@tbb@@CAXPAV12@@Z @62 -?internal_upgrade@spin_rw_mutex@tbb@@CA_NPAV12@@Z @63 -?internal_try_acquire_writer@spin_rw_mutex@tbb@@CA_NPAV12@@Z @64 -?internal_try_acquire_reader@spin_rw_mutex@tbb@@CA_NPAV12@@Z @65 -#endif - -; spin_rw_mutex v3 -?internal_upgrade@spin_rw_mutex_v3@tbb@@AAA_NXZ @66 -?internal_downgrade@spin_rw_mutex_v3@tbb@@AAAXXZ @67 -?internal_acquire_reader@spin_rw_mutex_v3@tbb@@AAAXXZ @68 -?internal_acquire_writer@spin_rw_mutex_v3@tbb@@AAA_NXZ @69 -?internal_release_reader@spin_rw_mutex_v3@tbb@@AAAXXZ @70 -?internal_release_writer@spin_rw_mutex_v3@tbb@@AAAXXZ @71 -?internal_try_acquire_reader@spin_rw_mutex_v3@tbb@@AAA_NXZ @72 -?internal_try_acquire_writer@spin_rw_mutex_v3@tbb@@AAA_NXZ @73 - -; spin_mutex.cpp -?internal_acquire@scoped_lock@spin_mutex@tbb@@AAAXAAV23@@Z @74 -?internal_release@scoped_lock@spin_mutex@tbb@@AAAXXZ @75 -?internal_try_acquire@scoped_lock@spin_mutex@tbb@@AAA_NAAV23@@Z @76 - -; mutex.cpp -?internal_acquire@scoped_lock@mutex@tbb@@AAAXAAV23@@Z @77 -?internal_release@scoped_lock@mutex@tbb@@AAAXXZ @78 -?internal_try_acquire@scoped_lock@mutex@tbb@@AAA_NAAV23@@Z @79 -?internal_construct@mutex@tbb@@AAAXXZ @80 -?internal_destroy@mutex@tbb@@AAAXXZ @81 - -; recursive_mutex.cpp -?internal_acquire@scoped_lock@recursive_mutex@tbb@@AAAXAAV23@@Z @82 -?internal_release@scoped_lock@recursive_mutex@tbb@@AAAXXZ @83 -?internal_try_acquire@scoped_lock@recursive_mutex@tbb@@AAA_NAAV23@@Z @84 -?internal_construct@recursive_mutex@tbb@@AAAXXZ @85 -?internal_destroy@recursive_mutex@tbb@@AAAXXZ @86 - -; queuing_mutex.cpp -?acquire@scoped_lock@queuing_mutex@tbb@@QAAXAAV23@@Z @87 -?release@scoped_lock@queuing_mutex@tbb@@QAAXXZ @88 -?try_acquire@scoped_lock@queuing_mutex@tbb@@QAA_NAAV23@@Z @89 - -; concurrent_hash_map.cpp -?internal_grow_predicate@hash_map_segment_base@internal@tbb@@QBA_NXZ @90 - -#if !TBB_NO_LEGACY -; concurrent_queue.cpp v2 -?advance@concurrent_queue_iterator_base@internal@tbb@@IAAXXZ @91 -?assign@concurrent_queue_iterator_base@internal@tbb@@IAAXABV123@@Z @92 -?internal_size@concurrent_queue_base@internal@tbb@@IBAHXZ @93 -??0concurrent_queue_base@internal@tbb@@IAA@I@Z @94 -??0concurrent_queue_iterator_base@internal@tbb@@IAA@ABVconcurrent_queue_base@12@@Z @95 -??1concurrent_queue_base@internal@tbb@@MAA@XZ @96 -??1concurrent_queue_iterator_base@internal@tbb@@IAA@XZ @97 -?internal_pop@concurrent_queue_base@internal@tbb@@IAAXPAX@Z @98 -?internal_pop_if_present@concurrent_queue_base@internal@tbb@@IAA_NPAX@Z @99 -?internal_push@concurrent_queue_base@internal@tbb@@IAAXPBX@Z @100 -?internal_push_if_not_full@concurrent_queue_base@internal@tbb@@IAA_NPBX@Z @101 -?internal_set_capacity@concurrent_queue_base@internal@tbb@@IAAXHI@Z @102 -#endif - -; concurrent_queue v3 -??1concurrent_queue_iterator_base_v3@internal@tbb@@IAA@XZ @103 -??0concurrent_queue_iterator_base_v3@internal@tbb@@IAA@ABVconcurrent_queue_base_v3@12@@Z @104 -?advance@concurrent_queue_iterator_base_v3@internal@tbb@@IAAXXZ @105 -?assign@concurrent_queue_iterator_base_v3@internal@tbb@@IAAXABV123@@Z @106 -??0concurrent_queue_base_v3@internal@tbb@@IAA@I@Z @107 -??1concurrent_queue_base_v3@internal@tbb@@MAA@XZ @108 -?internal_pop@concurrent_queue_base_v3@internal@tbb@@IAAXPAX@Z @109 -?internal_pop_if_present@concurrent_queue_base_v3@internal@tbb@@IAA_NPAX@Z @110 -?internal_push@concurrent_queue_base_v3@internal@tbb@@IAAXPBX@Z @111 -?internal_push_if_not_full@concurrent_queue_base_v3@internal@tbb@@IAA_NPBX@Z @112 -?internal_size@concurrent_queue_base_v3@internal@tbb@@IBAHXZ @113 -?internal_set_capacity@concurrent_queue_base_v3@internal@tbb@@IAAXHI@Z @114 -?internal_finish_clear@concurrent_queue_base_v3@internal@tbb@@IAAXXZ @115 -?internal_throw_exception@concurrent_queue_base_v3@internal@tbb@@IBAXXZ @116 - -#if !TBB_NO_LEGACY -; concurrent_vector.cpp v2 -?internal_assign@concurrent_vector_base@internal@tbb@@IAAXABV123@IP6AXPAXI@ZP6AX1PBXI@Z4@Z @117 -?internal_capacity@concurrent_vector_base@internal@tbb@@IBAIXZ @118 -?internal_clear@concurrent_vector_base@internal@tbb@@IAAXP6AXPAXI@Z_N@Z @119 -?internal_copy@concurrent_vector_base@internal@tbb@@IAAXABV123@IP6AXPAXPBXI@Z@Z @120 -?internal_grow_by@concurrent_vector_base@internal@tbb@@IAAIIIP6AXPAXI@Z@Z @121 -?internal_grow_to_at_least@concurrent_vector_base@internal@tbb@@IAAXIIP6AXPAXI@Z@Z @122 -?internal_push_back@concurrent_vector_base@internal@tbb@@IAAPAXIAAI@Z @123 -?internal_reserve@concurrent_vector_base@internal@tbb@@IAAXIII@Z @124 -#endif - -; concurrent_vector v3 -??1concurrent_vector_base_v3@internal@tbb@@IAA@XZ @125 -?internal_assign@concurrent_vector_base_v3@internal@tbb@@IAAXABV123@IP6AXPAXI@ZP6AX1PBXI@Z4@Z @126 -?internal_capacity@concurrent_vector_base_v3@internal@tbb@@IBAIXZ @127 -?internal_clear@concurrent_vector_base_v3@internal@tbb@@IAAIP6AXPAXI@Z@Z @128 -?internal_copy@concurrent_vector_base_v3@internal@tbb@@IAAXABV123@IP6AXPAXPBXI@Z@Z @129 -?internal_grow_by@concurrent_vector_base_v3@internal@tbb@@IAAIIIP6AXPAXPBXI@Z1@Z @130 -?internal_grow_to_at_least@concurrent_vector_base_v3@internal@tbb@@IAAXIIP6AXPAXPBXI@Z1@Z @131 -?internal_push_back@concurrent_vector_base_v3@internal@tbb@@IAAPAXIAAI@Z @132 -?internal_reserve@concurrent_vector_base_v3@internal@tbb@@IAAXIII@Z @133 -?internal_compact@concurrent_vector_base_v3@internal@tbb@@IAAPAXIPAXP6AX0I@ZP6AX0PBXI@Z@Z @134 -?internal_swap@concurrent_vector_base_v3@internal@tbb@@IAAXAAV123@@Z @135 -?internal_throw_exception@concurrent_vector_base_v3@internal@tbb@@IBAXI@Z @136 - -; tbb_thread -?join@tbb_thread_v3@internal@tbb@@QAAXXZ @137 -?detach@tbb_thread_v3@internal@tbb@@QAAXXZ @138 -?internal_start@tbb_thread_v3@internal@tbb@@AAAXP6AIPAX@Z0@Z @139 -?allocate_closure_v3@internal@tbb@@YAPAXI@Z @140 -?free_closure_v3@internal@tbb@@YAXPAX@Z @141 -?hardware_concurrency@tbb_thread_v3@internal@tbb@@SAIXZ @142 -?thread_yield_v3@internal@tbb@@YAXXZ @143 -?thread_sleep_v3@internal@tbb@@YAXABVinterval_t@tick_count@2@@Z @144 -?move_v3@internal@tbb@@YAXAAVtbb_thread_v3@12@0@Z @145 -?thread_get_id_v3@internal@tbb@@YA?AVid@tbb_thread_v3@12@XZ @146 diff --git a/src/tbb/src/tbbbind/CMakeLists.txt b/src/tbb/src/tbbbind/CMakeLists.txt new file mode 100644 index 000000000..b5e9261ab --- /dev/null +++ b/src/tbb/src/tbbbind/CMakeLists.txt @@ -0,0 +1,103 @@ +# Copyright (c) 2020-2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set(CMAKE_SKIP_BUILD_RPATH TRUE) + +function(tbbbind_build TBBBIND_NAME REQUIRED_HWLOC_TARGET) + if (NOT TARGET ${REQUIRED_HWLOC_TARGET}) + message(STATUS "HWLOC target ${REQUIRED_HWLOC_TARGET} doesn't exist." + " The ${TBBBIND_NAME} target cannot be created") + return() + endif() + add_library(${TBBBIND_NAME} tbb_bind.cpp) + + if (WIN32) + target_sources(${TBBBIND_NAME} PRIVATE tbb_bind.rc) + endif() + + add_library(TBB::${TBBBIND_NAME} ALIAS ${TBBBIND_NAME}) + + target_compile_definitions(${TBBBIND_NAME} + PUBLIC + $<$<CONFIG:DEBUG>:TBB_USE_DEBUG> + PRIVATE + __TBBBIND_BUILD) + target_include_directories(${TBBBIND_NAME} + PUBLIC + $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../../include> + $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}> + ${HWLOC_INCLUDE_DIRS} # pkg-config defined + ) + + target_compile_options(${TBBBIND_NAME} + PRIVATE + ${TBB_CXX_STD_FLAG} # TODO: consider making it PUBLIC. + ${TBB_MMD_FLAG} + ${TBB_DSE_FLAG} + ${TBB_WARNING_LEVEL} + ${TBB_LIB_COMPILE_FLAGS} + ${TBB_COMMON_COMPILE_FLAGS} + ) + + # Avoid use of target_link_libraries here as it changes /DEF option to \DEF on Windows. + set_target_properties(${TBBBIND_NAME} PROPERTIES + DEFINE_SYMBOL "" + ) + + tbb_handle_ipo(${TBBBIND_NAME}) + + if (TBB_DEF_FILE_PREFIX) # If there's no prefix, assume we're using export directives + set_target_properties(${TBBBIND_NAME} PROPERTIES + LINK_FLAGS "${TBB_LINK_DEF_FILE_FLAG}\"${CMAKE_CURRENT_SOURCE_DIR}/def/${TBB_DEF_FILE_PREFIX}-tbbbind.def\"" + LINK_DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/def/${TBB_DEF_FILE_PREFIX}-tbbbind.def" + ) + endif() + + # Prefer using target_link_options instead of target_link_libraries to specify link options because + # target_link_libraries may incorrectly handle some options (on Windows, for example). + if (COMMAND target_link_options) + target_link_options(${TBBBIND_NAME} + PRIVATE + ${TBB_LIB_LINK_FLAGS} + ${TBB_COMMON_LINK_FLAGS} + ) + else() + target_link_libraries(${TBBBIND_NAME} + PRIVATE + ${TBB_LIB_LINK_FLAGS} + ${TBB_COMMON_LINK_FLAGS} + ) + endif() + + target_link_libraries(${TBBBIND_NAME} + PUBLIC + ${REQUIRED_HWLOC_TARGET} + PRIVATE + ${TBB_LIB_LINK_LIBS} + ${TBB_COMMON_LINK_LIBS} + ) + + tbb_install_target(${TBBBIND_NAME}) + +endfunction() + +if (NOT DEFINED HWLOC_TARGET_EXPLICITLY_DEFINED AND TARGET PkgConfig::HWLOC) + message(STATUS "The ${TBBBIND_LIBRARY_NAME} target will be configured using the HWLOC ${HWLOC_VERSION}") + tbbbind_build(${TBBBIND_LIBRARY_NAME} PkgConfig::HWLOC) +else() + tbbbind_build(tbbbind HWLOC::hwloc_1_11) + tbbbind_build(tbbbind_2_0 HWLOC::hwloc_2 ) + tbbbind_build(tbbbind_2_5 HWLOC::hwloc_2_5 ) +endif() + diff --git a/src/tbb/src/tbbbind/def/lin32-tbbbind.def b/src/tbb/src/tbbbind/def/lin32-tbbbind.def new file mode 100644 index 000000000..221dc8467 --- /dev/null +++ b/src/tbb/src/tbbbind/def/lin32-tbbbind.def @@ -0,0 +1,26 @@ +/* + Copyright (c) 2019-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +{ +global: +__TBB_internal_initialize_system_topology; +__TBB_internal_apply_affinity; +__TBB_internal_restore_affinity; +__TBB_internal_allocate_binding_handler; +__TBB_internal_deallocate_binding_handler; +__TBB_internal_get_default_concurrency; +__TBB_internal_destroy_system_topology; +}; diff --git a/src/tbb/src/tbbbind/def/lin64-tbbbind.def b/src/tbb/src/tbbbind/def/lin64-tbbbind.def new file mode 100644 index 000000000..221dc8467 --- /dev/null +++ b/src/tbb/src/tbbbind/def/lin64-tbbbind.def @@ -0,0 +1,26 @@ +/* + Copyright (c) 2019-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +{ +global: +__TBB_internal_initialize_system_topology; +__TBB_internal_apply_affinity; +__TBB_internal_restore_affinity; +__TBB_internal_allocate_binding_handler; +__TBB_internal_deallocate_binding_handler; +__TBB_internal_get_default_concurrency; +__TBB_internal_destroy_system_topology; +}; diff --git a/src/tbb/src/tbbbind/def/mac64-tbbbind.def b/src/tbb/src/tbbbind/def/mac64-tbbbind.def new file mode 100755 index 000000000..be72bcf9a --- /dev/null +++ b/src/tbb/src/tbbbind/def/mac64-tbbbind.def @@ -0,0 +1,18 @@ +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +___TBB_internal_initialize_system_topology +___TBB_internal_get_default_concurrency +___TBB_internal_destroy_system_topology + diff --git a/src/tbb/src/tbbbind/def/win32-tbbbind.def b/src/tbb/src/tbbbind/def/win32-tbbbind.def new file mode 100644 index 000000000..be4f714ed --- /dev/null +++ b/src/tbb/src/tbbbind/def/win32-tbbbind.def @@ -0,0 +1,23 @@ +; Copyright (c) 2019-2021 Intel Corporation +; +; Licensed under the Apache License, Version 2.0 (the "License"); +; you may not use this file except in compliance with the License. +; You may obtain a copy of the License at +; +; http://www.apache.org/licenses/LICENSE-2.0 +; +; Unless required by applicable law or agreed to in writing, software +; distributed under the License is distributed on an "AS IS" BASIS, +; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +; See the License for the specific language governing permissions and +; limitations under the License. + +EXPORTS + +__TBB_internal_initialize_system_topology +__TBB_internal_apply_affinity +__TBB_internal_restore_affinity +__TBB_internal_allocate_binding_handler +__TBB_internal_deallocate_binding_handler +__TBB_internal_get_default_concurrency +__TBB_internal_destroy_system_topology diff --git a/src/tbb/src/tbbbind/def/win64-tbbbind.def b/src/tbb/src/tbbbind/def/win64-tbbbind.def new file mode 100644 index 000000000..be4f714ed --- /dev/null +++ b/src/tbb/src/tbbbind/def/win64-tbbbind.def @@ -0,0 +1,23 @@ +; Copyright (c) 2019-2021 Intel Corporation +; +; Licensed under the Apache License, Version 2.0 (the "License"); +; you may not use this file except in compliance with the License. +; You may obtain a copy of the License at +; +; http://www.apache.org/licenses/LICENSE-2.0 +; +; Unless required by applicable law or agreed to in writing, software +; distributed under the License is distributed on an "AS IS" BASIS, +; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +; See the License for the specific language governing permissions and +; limitations under the License. + +EXPORTS + +__TBB_internal_initialize_system_topology +__TBB_internal_apply_affinity +__TBB_internal_restore_affinity +__TBB_internal_allocate_binding_handler +__TBB_internal_deallocate_binding_handler +__TBB_internal_get_default_concurrency +__TBB_internal_destroy_system_topology diff --git a/src/tbb/src/tbbbind/tbb_bind.cpp b/src/tbb/src/tbbbind/tbb_bind.cpp new file mode 100644 index 000000000..a15a37ac9 --- /dev/null +++ b/src/tbb/src/tbbbind/tbb_bind.cpp @@ -0,0 +1,541 @@ +/* + Copyright (c) 2019-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include <vector> +#include <mutex> + +#include "../tbb/assert_impl.h" // Out-of-line TBB assertion handling routines are instantiated here. +#include "oneapi/tbb/detail/_assert.h" +#include "oneapi/tbb/detail/_config.h" + +#if _MSC_VER && !__INTEL_COMPILER && !__clang__ +// #pragma warning( push ) +// #pragma warning( disable : 4100 ) +#elif _MSC_VER && __clang__ +// #pragma GCC diagnostic push +// #pragma GCC diagnostic ignored "-Wunused-parameter" +#endif +#include <hwloc.h> +#if _MSC_VER && !__INTEL_COMPILER && !__clang__ +// #pragma warning( pop ) +#elif _MSC_VER && __clang__ +// #pragma GCC diagnostic pop +#endif + +#define __TBBBIND_HWLOC_HYBRID_CPUS_INTERFACES_PRESENT (HWLOC_API_VERSION >= 0x20400) +#define __TBBBIND_HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_CPUBINDING_PRESENT (HWLOC_API_VERSION >= 0x20500) + +// Most of hwloc calls returns negative exit code on error. +// This macro tracks error codes that are returned from the hwloc interfaces. +#define assertion_hwloc_wrapper(command, ...) \ + __TBB_ASSERT_EX( (command(__VA_ARGS__)) >= 0, "Error occurred during call to hwloc API."); + +namespace tbb { +namespace detail { +namespace r1 { + +//------------------------------------------------------------------------ +// Information about the machine's hardware TBB is happen to work on +//------------------------------------------------------------------------ +class system_topology { + friend class binding_handler; + + // Common topology members + hwloc_topology_t topology{nullptr}; + hwloc_cpuset_t process_cpu_affinity_mask{nullptr}; + hwloc_nodeset_t process_node_affinity_mask{nullptr}; + std::size_t number_of_processors_groups{1}; + + // NUMA API related topology members + std::vector<hwloc_cpuset_t> numa_affinity_masks_list{}; + std::vector<int> numa_indexes_list{}; + int numa_nodes_count{0}; + + // Hybrid CPUs API related topology members + std::vector<hwloc_cpuset_t> core_types_affinity_masks_list{}; + std::vector<int> core_types_indexes_list{}; + + enum init_stages { uninitialized, + started, + topology_allocated, + topology_loaded, + topology_parsed } initialization_state; + + // Binding threads that locate in another Windows Processor groups + // is allowed only if machine topology contains several Windows Processors groups + // and process affinity mask wasn't limited manually (affinity mask cannot violates + // processors group boundaries). + bool intergroup_binding_allowed(std::size_t groups_num) { return groups_num > 1; } + +private: + void topology_initialization(std::size_t groups_num) { + initialization_state = started; + + // Parse topology + if ( hwloc_topology_init( &topology ) == 0 ) { + initialization_state = topology_allocated; +#if __TBBBIND_HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_CPUBINDING_PRESENT + unsigned long flags = 0; + if (groups_num > 1) { + // HWLOC x86 backend might interfere with process affinity mask on + // Windows systems with multiple processor groups. + flags = HWLOC_TOPOLOGY_FLAG_DONT_CHANGE_BINDING; + } else { + flags = HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM | HWLOC_TOPOLOGY_FLAG_RESTRICT_TO_CPUBINDING; + } + if (hwloc_topology_set_flags(topology, flags) != 0) { + return; + } +#endif + if ( hwloc_topology_load( topology ) == 0 ) { + initialization_state = topology_loaded; + } + } + if ( initialization_state != topology_loaded ) + return; + +#if __TBB_CPUBIND_PRESENT + // Getting process affinity mask + if ( intergroup_binding_allowed(groups_num) ) { + process_cpu_affinity_mask = hwloc_bitmap_dup(hwloc_topology_get_complete_cpuset (topology)); + process_node_affinity_mask = hwloc_bitmap_dup(hwloc_topology_get_complete_nodeset(topology)); + } else { + process_cpu_affinity_mask = hwloc_bitmap_alloc(); + process_node_affinity_mask = hwloc_bitmap_alloc(); + + assertion_hwloc_wrapper(hwloc_get_cpubind, topology, process_cpu_affinity_mask, 0); + hwloc_cpuset_to_nodeset(topology, process_cpu_affinity_mask, process_node_affinity_mask); + } +#else + process_cpu_affinity_mask = hwloc_bitmap_dup(hwloc_topology_get_complete_cpuset (topology)); + process_node_affinity_mask = hwloc_bitmap_dup(hwloc_topology_get_complete_nodeset(topology)); +#endif + + number_of_processors_groups = groups_num; + } + + void numa_topology_parsing() { + // Fill parameters with stubs if topology parsing is broken. + if ( initialization_state != topology_loaded ) { + numa_nodes_count = 1; + numa_indexes_list.push_back(-1); + return; + } + + // If system contains no NUMA nodes, HWLOC 1.11 returns an infinitely filled bitmap. + // hwloc_bitmap_weight() returns negative value for such bitmaps, so we use this check + // to change way of topology initialization. + numa_nodes_count = hwloc_bitmap_weight(process_node_affinity_mask); + if (numa_nodes_count <= 0) { + // numa_nodes_count may be empty if the process affinity mask is empty too (invalid case) + // or if some internal HWLOC error occurred. + // So we place -1 as index in this case. + numa_indexes_list.push_back(numa_nodes_count == 0 ? -1 : 0); + numa_nodes_count = 1; + + numa_affinity_masks_list.push_back(hwloc_bitmap_dup(process_cpu_affinity_mask)); + } else { + // Get NUMA logical indexes list + unsigned counter = 0; + int i = 0; + int max_numa_index = -1; + numa_indexes_list.resize(numa_nodes_count); + hwloc_obj_t node_buffer; + hwloc_bitmap_foreach_begin(i, process_node_affinity_mask) { + node_buffer = hwloc_get_numanode_obj_by_os_index(topology, i); + numa_indexes_list[counter] = static_cast<int>(node_buffer->logical_index); + + if ( numa_indexes_list[counter] > max_numa_index ) { + max_numa_index = numa_indexes_list[counter]; + } + + counter++; + } hwloc_bitmap_foreach_end(); + __TBB_ASSERT(max_numa_index >= 0, "Maximal NUMA index must not be negative"); + + // Fill concurrency and affinity masks lists + numa_affinity_masks_list.resize(max_numa_index + 1); + int index = 0; + hwloc_bitmap_foreach_begin(i, process_node_affinity_mask) { + node_buffer = hwloc_get_numanode_obj_by_os_index(topology, i); + index = static_cast<int>(node_buffer->logical_index); + + hwloc_cpuset_t& current_mask = numa_affinity_masks_list[index]; + current_mask = hwloc_bitmap_dup(node_buffer->cpuset); + + hwloc_bitmap_and(current_mask, current_mask, process_cpu_affinity_mask); + __TBB_ASSERT(!hwloc_bitmap_iszero(current_mask), "hwloc detected unavailable NUMA node"); + } hwloc_bitmap_foreach_end(); + } + } + + void core_types_topology_parsing() { + // Fill parameters with stubs if topology parsing is broken. + if ( initialization_state != topology_loaded ) { + core_types_indexes_list.push_back(-1); + return; + } +#if __TBBBIND_HWLOC_HYBRID_CPUS_INTERFACES_PRESENT + __TBB_ASSERT(hwloc_get_api_version() >= 0x20400, "Hybrid CPUs support interfaces required HWLOC >= 2.4"); + // Parsing the hybrid CPU topology + int core_types_number = hwloc_cpukinds_get_nr(topology, 0); + bool core_types_parsing_broken = core_types_number <= 0; + if (!core_types_parsing_broken) { + core_types_affinity_masks_list.resize(core_types_number); + int efficiency{-1}; + + for (int core_type = 0; core_type < core_types_number; ++core_type) { + hwloc_cpuset_t& current_mask = core_types_affinity_masks_list[core_type]; + current_mask = hwloc_bitmap_alloc(); + + if (!hwloc_cpukinds_get_info(topology, core_type, current_mask, &efficiency, nullptr, nullptr, 0) + && efficiency >= 0 + ) { + hwloc_bitmap_and(current_mask, current_mask, process_cpu_affinity_mask); + + if (hwloc_bitmap_weight(current_mask) > 0) { + core_types_indexes_list.push_back(core_type); + } + __TBB_ASSERT(hwloc_bitmap_weight(current_mask) >= 0, "Infinivitely filled core type mask"); + } else { + core_types_parsing_broken = true; + break; + } + } + } +#else /*!__TBBBIND_HWLOC_HYBRID_CPUS_INTERFACES_PRESENT*/ + bool core_types_parsing_broken{true}; +#endif /*__TBBBIND_HWLOC_HYBRID_CPUS_INTERFACES_PRESENT*/ + + if (core_types_parsing_broken) { + for (auto& core_type_mask : core_types_affinity_masks_list) { + hwloc_bitmap_free(core_type_mask); + } + core_types_affinity_masks_list.resize(1); + core_types_indexes_list.resize(1); + + core_types_affinity_masks_list[0] = hwloc_bitmap_dup(process_cpu_affinity_mask); + core_types_indexes_list[0] = -1; + } + } + + void enforce_hwloc_2_5_runtime_linkage() { + // Without the call of this function HWLOC 2.4 can be successfully loaded during the tbbbind_2_5 loading. + // It is possible since tbbbind_2_5 don't use any new entry points that were introduced in HWLOC 2.5 + // But tbbbind_2_5 compiles with HWLOC 2.5 header, therefore such situation requires binary forward compatibility + // which are not guaranteed by the HWLOC library. To enforce linkage tbbbind_2_5 only with HWLOC >= 2.5 version + // this function calls the interface that is available in the HWLOC 2.5 only. +#if HWLOC_API_VERSION >= 0x20500 + auto some_core = hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_CORE, nullptr); + hwloc_get_obj_with_same_locality(topology, some_core, HWLOC_OBJ_CORE, nullptr, nullptr, 0); +#endif + } + + + void initialize( std::size_t groups_num ) { + if ( initialization_state != uninitialized ) + return; + + topology_initialization(groups_num); + numa_topology_parsing(); + core_types_topology_parsing(); + + enforce_hwloc_2_5_runtime_linkage(); + + if (initialization_state == topology_loaded) + initialization_state = topology_parsed; + } + + static system_topology* instance_ptr; +public: + typedef hwloc_cpuset_t affinity_mask; + typedef hwloc_const_cpuset_t const_affinity_mask; + + bool is_topology_parsed() { return initialization_state == topology_parsed; } + + static void construct( std::size_t groups_num ) { + if (instance_ptr == nullptr) { + instance_ptr = new system_topology(); + instance_ptr->initialize(groups_num); + } + } + + static system_topology& instance() { + __TBB_ASSERT(instance_ptr != nullptr, "Getting instance of non-constructed topology"); + return *instance_ptr; + } + + static void destroy() { + __TBB_ASSERT(instance_ptr != nullptr, "Destroying non-constructed topology"); + delete instance_ptr; + } + + ~system_topology() { + if ( is_topology_parsed() ) { + for (auto& numa_node_mask : numa_affinity_masks_list) { + hwloc_bitmap_free(numa_node_mask); + } + + for (auto& core_type_mask : core_types_affinity_masks_list) { + hwloc_bitmap_free(core_type_mask); + } + + hwloc_bitmap_free(process_node_affinity_mask); + hwloc_bitmap_free(process_cpu_affinity_mask); + } + + if ( initialization_state >= topology_allocated ) { + hwloc_topology_destroy(topology); + } + + initialization_state = uninitialized; + } + + void fill_topology_information( + int& _numa_nodes_count, int*& _numa_indexes_list, + int& _core_types_count, int*& _core_types_indexes_list + ) { + __TBB_ASSERT(is_topology_parsed(), "Trying to get access to uninitialized system_topology"); + _numa_nodes_count = numa_nodes_count; + _numa_indexes_list = numa_indexes_list.data(); + + _core_types_count = (int)core_types_indexes_list.size(); + _core_types_indexes_list = core_types_indexes_list.data(); + } + + void fill_constraints_affinity_mask(affinity_mask input_mask, int numa_node_index, int core_type_index, int max_threads_per_core) { + __TBB_ASSERT(is_topology_parsed(), "Trying to get access to uninitialized system_topology"); + __TBB_ASSERT(numa_node_index < (int)numa_affinity_masks_list.size(), "Wrong NUMA node id"); + __TBB_ASSERT(core_type_index < (int)core_types_affinity_masks_list.size(), "Wrong core type id"); + __TBB_ASSERT(max_threads_per_core == -1 || max_threads_per_core > 0, "Wrong max_threads_per_core"); + + hwloc_cpuset_t constraints_mask = hwloc_bitmap_alloc(); + hwloc_cpuset_t core_mask = hwloc_bitmap_alloc(); + + hwloc_bitmap_copy(constraints_mask, process_cpu_affinity_mask); + if (numa_node_index >= 0) { + hwloc_bitmap_and(constraints_mask, constraints_mask, numa_affinity_masks_list[numa_node_index]); + } + if (core_type_index >= 0) { + hwloc_bitmap_and(constraints_mask, constraints_mask, core_types_affinity_masks_list[core_type_index]); + } + if (max_threads_per_core > 0) { + // clear input mask + hwloc_bitmap_zero(input_mask); + + hwloc_obj_t current_core = nullptr; + while ((current_core = hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_CORE, current_core)) != nullptr) { + hwloc_bitmap_and(core_mask, constraints_mask, current_core->cpuset); + + // fit the core mask to required bits number + int current_threads_per_core = 0; + for (int id = hwloc_bitmap_first(core_mask); id != -1; id = hwloc_bitmap_next(core_mask, id)) { + if (++current_threads_per_core > max_threads_per_core) { + hwloc_bitmap_clr(core_mask, id); + } + } + + hwloc_bitmap_or(input_mask, input_mask, core_mask); + } + } else { + hwloc_bitmap_copy(input_mask, constraints_mask); + } + + hwloc_bitmap_free(core_mask); + hwloc_bitmap_free(constraints_mask); + } + + void fit_num_threads_per_core(affinity_mask result_mask, affinity_mask current_mask, affinity_mask constraints_mask) { + hwloc_bitmap_zero(result_mask); + hwloc_obj_t current_core = nullptr; + while ((current_core = hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_CORE, current_core)) != nullptr) { + if (hwloc_bitmap_intersects(current_mask, current_core->cpuset)) { + hwloc_bitmap_or(result_mask, result_mask, current_core->cpuset); + } + } + hwloc_bitmap_and(result_mask, result_mask, constraints_mask); + } + + int get_default_concurrency(int numa_node_index, int core_type_index, int max_threads_per_core) { + __TBB_ASSERT(is_topology_parsed(), "Trying to get access to uninitialized system_topology"); + + hwloc_cpuset_t constraints_mask = hwloc_bitmap_alloc(); + fill_constraints_affinity_mask(constraints_mask, numa_node_index, core_type_index, max_threads_per_core); + + int default_concurrency = hwloc_bitmap_weight(constraints_mask); + hwloc_bitmap_free(constraints_mask); + return default_concurrency; + } + + affinity_mask allocate_process_affinity_mask() { + __TBB_ASSERT(is_topology_parsed(), "Trying to get access to uninitialized system_topology"); + return hwloc_bitmap_dup(process_cpu_affinity_mask); + } + + void free_affinity_mask( affinity_mask mask_to_free ) { + hwloc_bitmap_free(mask_to_free); // If bitmap is nullptr, no operation is performed. + } + + void store_current_affinity_mask( affinity_mask current_mask ) { + assertion_hwloc_wrapper(hwloc_get_cpubind, topology, current_mask, HWLOC_CPUBIND_THREAD); + + hwloc_bitmap_and(current_mask, current_mask, process_cpu_affinity_mask); + __TBB_ASSERT(!hwloc_bitmap_iszero(current_mask), + "Current affinity mask must intersects with process affinity mask"); + } + + void set_affinity_mask( const_affinity_mask mask ) { + if (hwloc_bitmap_weight(mask) > 0) { + assertion_hwloc_wrapper(hwloc_set_cpubind, topology, mask, HWLOC_CPUBIND_THREAD); + } + } +}; + +system_topology* system_topology::instance_ptr{nullptr}; + +class binding_handler { + // Following vector saves thread affinity mask on scheduler entry to return it to this thread + // on scheduler exit. + typedef std::vector<system_topology::affinity_mask> affinity_masks_container; + affinity_masks_container affinity_backup; + system_topology::affinity_mask handler_affinity_mask; + +#ifdef _WIN32 + affinity_masks_container affinity_buffer; + int my_numa_node_id; + int my_core_type_id; + int my_max_threads_per_core; +#endif + +public: + binding_handler( std::size_t size, int numa_node_id, int core_type_id, int max_threads_per_core ) + : affinity_backup(size) +#ifdef _WIN32 + , affinity_buffer(size) + , my_numa_node_id(numa_node_id) + , my_core_type_id(core_type_id) + , my_max_threads_per_core(max_threads_per_core) +#endif + { + for (std::size_t i = 0; i < size; ++i) { + affinity_backup[i] = system_topology::instance().allocate_process_affinity_mask(); +#ifdef _WIN32 + affinity_buffer[i] = system_topology::instance().allocate_process_affinity_mask(); +#endif + } + handler_affinity_mask = system_topology::instance().allocate_process_affinity_mask(); + system_topology::instance().fill_constraints_affinity_mask + (handler_affinity_mask, numa_node_id, core_type_id, max_threads_per_core); + } + + ~binding_handler() { + for (std::size_t i = 0; i < affinity_backup.size(); ++i) { + system_topology::instance().free_affinity_mask(affinity_backup[i]); +#ifdef _WIN32 + system_topology::instance().free_affinity_mask(affinity_buffer[i]); +#endif + } + system_topology::instance().free_affinity_mask(handler_affinity_mask); + } + + void apply_affinity( unsigned slot_num ) { + auto& topology = system_topology::instance(); + __TBB_ASSERT(slot_num < affinity_backup.size(), + "The slot number is greater than the number of slots in the arena"); + __TBB_ASSERT(topology.is_topology_parsed(), + "Trying to get access to uninitialized system_topology"); + + topology.store_current_affinity_mask(affinity_backup[slot_num]); + +#ifdef _WIN32 + // TBBBind supports only systems where NUMA nodes and core types do not cross the border + // between several processor groups. So if a certain NUMA node or core type constraint + // specified, then the constraints affinity mask will not cross the processor groups' border. + + // But if we have constraint based only on the max_threads_per_core setting, then the + // constraints affinity mask does may cross the border between several processor groups + // on machines with more then 64 hardware threads. That is why we need to use the special + // function, which regulates the number of threads in the current threads mask. + if (topology.number_of_processors_groups > 1 && my_max_threads_per_core != -1 && + (my_numa_node_id == -1 || topology.numa_indexes_list.size() == 1) && + (my_core_type_id == -1 || topology.core_types_indexes_list.size() == 1) + ) { + topology.fit_num_threads_per_core(affinity_buffer[slot_num], affinity_backup[slot_num], handler_affinity_mask); + topology.set_affinity_mask(affinity_buffer[slot_num]); + return; + } +#endif + topology.set_affinity_mask(handler_affinity_mask); + } + + void restore_previous_affinity_mask( unsigned slot_num ) { + auto& topology = system_topology::instance(); + __TBB_ASSERT(topology.is_topology_parsed(), + "Trying to get access to uninitialized system_topology"); + topology.set_affinity_mask(affinity_backup[slot_num]); + }; + +}; + +extern "C" { // exported to TBB interfaces + +TBBBIND_EXPORT void __TBB_internal_initialize_system_topology( + std::size_t groups_num, + int& numa_nodes_count, int*& numa_indexes_list, + int& core_types_count, int*& core_types_indexes_list +) { + system_topology::construct(groups_num); + system_topology::instance().fill_topology_information( + numa_nodes_count, numa_indexes_list, + core_types_count, core_types_indexes_list + ); +} + +TBBBIND_EXPORT binding_handler* __TBB_internal_allocate_binding_handler(int number_of_slots, int numa_id, int core_type_id, int max_threads_per_core) { + __TBB_ASSERT(number_of_slots > 0, "Trying to create numa handler for 0 threads."); + return new binding_handler(number_of_slots, numa_id, core_type_id, max_threads_per_core); +} + +TBBBIND_EXPORT void __TBB_internal_deallocate_binding_handler(binding_handler* handler_ptr) { + __TBB_ASSERT(handler_ptr != nullptr, "Trying to deallocate nullptr pointer."); + delete handler_ptr; +} + +TBBBIND_EXPORT void __TBB_internal_apply_affinity(binding_handler* handler_ptr, int slot_num) { + __TBB_ASSERT(handler_ptr != nullptr, "Trying to get access to uninitialized metadata."); + handler_ptr->apply_affinity(slot_num); +} + +TBBBIND_EXPORT void __TBB_internal_restore_affinity(binding_handler* handler_ptr, int slot_num) { + __TBB_ASSERT(handler_ptr != nullptr, "Trying to get access to uninitialized metadata."); + handler_ptr->restore_previous_affinity_mask(slot_num); +} + +TBBBIND_EXPORT int __TBB_internal_get_default_concurrency(int numa_id, int core_type_id, int max_threads_per_core) { + return system_topology::instance().get_default_concurrency(numa_id, core_type_id, max_threads_per_core); +} + +void __TBB_internal_destroy_system_topology() { + return system_topology::destroy(); +} + +} // extern "C" + +} // namespace r1 +} // namespace detail +} // namespace tbb + +#undef assertion_hwloc_wrapper diff --git a/src/tbb/src/tbbbind/tbb_bind.rc b/src/tbb/src/tbbbind/tbb_bind.rc new file mode 100644 index 000000000..2d2b806e2 --- /dev/null +++ b/src/tbb/src/tbbbind/tbb_bind.rc @@ -0,0 +1,74 @@ +// Copyright (c) 2005-2024 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +///////////////////////////////////////////////////////////////////////////// +// +// Includes +// +#include <winresrc.h> +#include "../../include/oneapi/tbb/version.h" + +///////////////////////////////////////////////////////////////////////////// +// Neutral resources + +#ifdef _WIN32 +LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL +#pragma code_page(1252) +#endif //_WIN32 + +///////////////////////////////////////////////////////////////////////////// +// +// Version +// +#define TBB_VERNUMBERS TBB_VERSION_MAJOR,TBB_VERSION_MINOR,TBB_VERSION_PATCH +#define TBB_VERSION TBB_VERSION_STRING + +VS_VERSION_INFO VERSIONINFO + FILEVERSION TBB_VERNUMBERS + PRODUCTVERSION TBB_VERNUMBERS + FILEFLAGSMASK 0x17L +#ifdef _DEBUG + FILEFLAGS 0x1L +#else + FILEFLAGS 0x0L +#endif + FILEOS 0x40004L + FILETYPE 0x2L + FILESUBTYPE 0x0L +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "000004b0" + BEGIN + VALUE "CompanyName", "Intel Corporation\0" + VALUE "FileDescription", "oneAPI Threading Building Blocks (oneTBB) library\0" + VALUE "FileVersion", TBB_VERSION "\0" + VALUE "LegalCopyright", "Copyright 2005-2024 Intel Corporation. All Rights Reserved.\0" + VALUE "LegalTrademarks", "\0" +#ifndef TBB_USE_DEBUG + VALUE "OriginalFilename", "tbbbind.dll\0" +#else + VALUE "OriginalFilename", "tbbbind_debug.dll\0" +#endif + VALUE "ProductName", "oneAPI Threading Building Blocks (oneTBB)\0" + VALUE "ProductVersion", TBB_VERSION "\0" + VALUE "PrivateBuild", "\0" + VALUE "SpecialBuild", "\0" + END + END + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x0, 1200 + END +END diff --git a/src/tbb/src/tbbmalloc/CMakeLists.txt b/src/tbb/src/tbbmalloc/CMakeLists.txt new file mode 100644 index 000000000..26c1891d7 --- /dev/null +++ b/src/tbb/src/tbbmalloc/CMakeLists.txt @@ -0,0 +1,121 @@ +# Copyright (c) 2020-2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +add_library(tbbmalloc + backend.cpp + backref.cpp + frontend.cpp + large_objects.cpp + tbbmalloc.cpp + ../tbb/itt_notify.cpp) + +if (WIN32) + target_sources(tbbmalloc PRIVATE tbbmalloc.rc) +endif() + +add_library(TBB::tbbmalloc ALIAS tbbmalloc) + +target_compile_definitions(tbbmalloc + PUBLIC + $<$<CONFIG:DEBUG>:TBB_USE_DEBUG> + PRIVATE + __TBBMALLOC_BUILD + $<$<NOT:$<BOOL:${BUILD_SHARED_LIBS}>>:__TBB_DYNAMIC_LOAD_ENABLED=0> + $<$<NOT:$<BOOL:${BUILD_SHARED_LIBS}>>:__TBB_SOURCE_DIRECTLY_INCLUDED=1>) + +if (NOT ("${CMAKE_SYSTEM_PROCESSOR}" MATCHES "(armv7-a|aarch64|mips|arm64|riscv)" OR + "${CMAKE_OSX_ARCHITECTURES}" MATCHES "arm64" OR + WINDOWS_STORE OR + TBB_WINDOWS_DRIVER OR + TBB_SANITIZE MATCHES "thread")) + target_compile_definitions(tbbmalloc PRIVATE __TBB_USE_ITT_NOTIFY) +endif() + +target_include_directories(tbbmalloc + PUBLIC + $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../../include> + $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>) + +# TODO: fix warnings +if (MSVC) + # signed unsigned mismatch, declaration hides class member + set(TBB_WARNING_SUPPRESS ${TBB_WARNING_SUPPRESS} /wd4267 /wd4244 /wd4245 /wd4458) +endif() + +target_compile_options(tbbmalloc + PRIVATE + ${TBB_CXX_STD_FLAG} # TODO: consider making it PUBLIC. + ${TBB_MMD_FLAG} + ${TBB_DSE_FLAG} + ${TBB_WARNING_LEVEL} + ${TBB_WARNING_SUPPRESS} + ${TBB_LIB_COMPILE_FLAGS} + ${TBBMALLOC_LIB_COMPILE_FLAGS} + ${TBB_COMMON_COMPILE_FLAGS} +) + +enable_language(C) + +# Avoid use of target_link_libraries here as it changes /DEF option to \DEF on Windows. +set_target_properties(tbbmalloc PROPERTIES + DEFINE_SYMBOL "" + LINKER_LANGUAGE C +) + +tbb_handle_ipo(tbbmalloc) + +if (TBB_DEF_FILE_PREFIX) # If there's no prefix, assume we're using export directives + set_target_properties(tbbmalloc PROPERTIES + LINK_FLAGS "${TBB_LINK_DEF_FILE_FLAG}\"${CMAKE_CURRENT_SOURCE_DIR}/def/${TBB_DEF_FILE_PREFIX}-tbbmalloc.def\"" + LINK_DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/def/${TBB_DEF_FILE_PREFIX}-tbbmalloc.def" + ) +endif() + +set(CMAKE_CXX_IMPLICIT_LINK_LIBRARIES "") + +# Prefer using target_link_options instead of target_link_libraries to specify link options because +# target_link_libraries may incorrectly handle some options (on Windows, for example). +if (COMMAND target_link_options) + target_link_options(tbbmalloc + PRIVATE + ${TBB_LIB_LINK_FLAGS} + ${TBB_COMMON_LINK_FLAGS} + ) +else() + target_link_libraries(tbbmalloc + PRIVATE + ${TBB_LIB_LINK_FLAGS} + ${TBB_COMMON_LINK_FLAGS} + ) +endif() + +target_link_libraries(tbbmalloc + PRIVATE + Threads::Threads + ${TBB_LIB_LINK_LIBS} + ${TBB_COMMON_LINK_LIBS} +) + +if(TBB_BUILD_APPLE_FRAMEWORKS) + set_target_properties(tbbmalloc PROPERTIES + FRAMEWORK TRUE + FRAMEWORK_VERSION ${TBBMALLOC_BINARY_VERSION}.${TBB_BINARY_MINOR_VERSION} + XCODE_ATTRIBUTE_PRODUCT_BUNDLE_IDENTIFIER com.intel.tbbmalloc + MACOSX_FRAMEWORK_IDENTIFIER com.intel.tbbmalloc + MACOSX_FRAMEWORK_BUNDLE_VERSION ${TBBMALLOC_BINARY_VERSION}.${TBB_BINARY_MINOR_VERSION} + MACOSX_FRAMEWORK_SHORT_VERSION_STRING ${TBBMALLOC_BINARY_VERSION} + ) +endif() + +tbb_install_target(tbbmalloc) diff --git a/src/tbb/src/tbbmalloc/Customize.h b/src/tbb/src/tbbmalloc/Customize.h index 39df40ce3..fdb616432 100644 --- a/src/tbb/src/tbbmalloc/Customize.h +++ b/src/tbb/src/tbbmalloc/Customize.h @@ -1,148 +1,76 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef _TBB_malloc_Customize_H_ #define _TBB_malloc_Customize_H_ // customizing MALLOC_ASSERT macro -#include "tbb/tbb_stddef.h" #define MALLOC_ASSERT(assertion, message) __TBB_ASSERT(assertion, message) +#define MALLOC_ASSERT_EX(assertion, message) __TBB_ASSERT_EX(assertion, message) #ifndef MALLOC_DEBUG #define MALLOC_DEBUG TBB_USE_DEBUG #endif -#include "tbb/tbb_machine.h" +#include "oneapi/tbb/detail/_utils.h" +#include "oneapi/tbb/detail/_assert.h" + +#include "Synchronize.h" -#if DO_ITT_NOTIFY -#include "tbb/itt_notify.h" +#if __TBB_USE_ITT_NOTIFY +#include "../tbb/itt_notify.h" #define MALLOC_ITT_SYNC_PREPARE(pointer) ITT_NOTIFY(sync_prepare, (pointer)) #define MALLOC_ITT_SYNC_ACQUIRED(pointer) ITT_NOTIFY(sync_acquired, (pointer)) #define MALLOC_ITT_SYNC_RELEASING(pointer) ITT_NOTIFY(sync_releasing, (pointer)) #define MALLOC_ITT_SYNC_CANCEL(pointer) ITT_NOTIFY(sync_cancel, (pointer)) #define MALLOC_ITT_FINI_ITTLIB() ITT_FINI_ITTLIB() +#define MALLOC_ITT_RELEASE_RESOURCES() ITT_RELEASE_RESOURCES() #else #define MALLOC_ITT_SYNC_PREPARE(pointer) ((void)0) #define MALLOC_ITT_SYNC_ACQUIRED(pointer) ((void)0) #define MALLOC_ITT_SYNC_RELEASING(pointer) ((void)0) #define MALLOC_ITT_SYNC_CANCEL(pointer) ((void)0) #define MALLOC_ITT_FINI_ITTLIB() ((void)0) +#define MALLOC_ITT_RELEASE_RESOURCES() ((void)0) #endif -//! Stripped down version of spin_mutex. -/** Instances of MallocMutex must be declared in memory that is zero-initialized. - There are no constructors. This is a feature that lets it be - used in situations where the mutex might be used while file-scope constructors - are running. - - There are no methods "acquire" or "release". The scoped_lock must be used - in a strict block-scoped locking pattern. Omitting these methods permitted - further simplification. */ -class MallocMutex : tbb::internal::no_copy { - __TBB_atomic_flag flag; - -public: - class scoped_lock : tbb::internal::no_copy { - MallocMutex& mutex; - bool taken; - public: - scoped_lock( MallocMutex& m ) : mutex(m), taken(true) { __TBB_LockByte(m.flag); } - scoped_lock( MallocMutex& m, bool block, bool *locked ) : mutex(m), taken(false) { - if (block) { - __TBB_LockByte(m.flag); - taken = true; - } else { - taken = __TBB_TryLockByte(m.flag); - } - if (locked) *locked = taken; - } - ~scoped_lock() { - if (taken) __TBB_UnlockByte(mutex.flag); - } - }; - friend class scoped_lock; -}; - -// TODO: use signed/unsigned in atomics more consistently -inline intptr_t AtomicIncrement( volatile intptr_t& counter ) { - return __TBB_FetchAndAddW( &counter, 1 )+1; -} - -inline uintptr_t AtomicAdd( volatile intptr_t& counter, intptr_t value ) { - return __TBB_FetchAndAddW( &counter, value ); -} - -inline intptr_t AtomicCompareExchange( volatile intptr_t& location, intptr_t new_value, intptr_t comparand) { - return __TBB_CompareAndSwapW( &location, new_value, comparand ); -} - -inline uintptr_t AtomicFetchStore(volatile void* location, uintptr_t value) { - return __TBB_FetchAndStoreW(location, value); -} - -inline void AtomicOr(volatile void *operand, uintptr_t addend) { - __TBB_AtomicOR(operand, addend); -} - -inline void AtomicAnd(volatile void *operand, uintptr_t addend) { - __TBB_AtomicAND(operand, addend); -} - -inline intptr_t FencedLoad( const volatile intptr_t &location ) { - return __TBB_load_with_acquire(location); -} - -inline void FencedStore( volatile intptr_t &location, intptr_t value ) { - __TBB_store_with_release(location, value); -} - -inline void SpinWaitWhileEq(const volatile intptr_t &location, const intptr_t value) { - tbb::internal::spin_wait_while_eq(location, value); -} - -inline void SpinWaitUntilEq(const volatile intptr_t &location, const intptr_t value) { - tbb::internal::spin_wait_until_eq(location, value); -} - inline intptr_t BitScanRev(uintptr_t x) { - return !x? -1 : __TBB_Log2(x); + return x == 0 ? -1 : static_cast<intptr_t>(tbb::detail::log2(x)); } template<typename T> static inline bool isAligned(T* arg, uintptr_t alignment) { - return tbb::internal::is_aligned(arg,alignment); + return tbb::detail::is_aligned(arg,alignment); } static inline bool isPowerOfTwo(uintptr_t arg) { - return tbb::internal::is_power_of_two(arg); + return tbb::detail::is_power_of_two(arg); } -static inline bool isPowerOfTwoMultiple(uintptr_t arg, uintptr_t divisor) { - return arg && tbb::internal::is_power_of_two_factor(arg,divisor); +static inline bool isPowerOfTwoAtLeast(uintptr_t arg, uintptr_t power2) { + return arg && tbb::detail::is_power_of_two_at_least(arg,power2); } -#define MALLOC_STATIC_ASSERT(condition,msg) __TBB_STATIC_ASSERT(condition,msg) +inline void do_yield() { + tbb::detail::yield(); +} #define USE_DEFAULT_MEMORY_MAPPING 1 // To support malloc replacement -#include "proxy.h" +#include "../tbbmalloc_proxy/proxy.h" #if MALLOC_UNIXLIKE_OVERLOAD_ENABLED #define malloc_proxy __TBB_malloc_proxy @@ -163,62 +91,49 @@ namespace internal { // Need these to work regardless of tools support. namespace tbb { - namespace internal { - - enum notify_type {prepare=0, cancel, acquired, releasing}; - -#if TBB_USE_THREADING_TOOLS - inline void call_itt_notify(notify_type t, void *ptr) { - switch ( t ) { - case prepare: - MALLOC_ITT_SYNC_PREPARE( ptr ); - break; - case cancel: - MALLOC_ITT_SYNC_CANCEL( ptr ); - break; - case acquired: - MALLOC_ITT_SYNC_ACQUIRED( ptr ); - break; - case releasing: - MALLOC_ITT_SYNC_RELEASING( ptr ); - break; - } +namespace detail { +namespace d1 { + + enum notify_type {prepare=0, cancel, acquired, releasing}; + +#if TBB_USE_PROFILING_TOOLS + inline void call_itt_notify(notify_type t, void *ptr) { + // unreferenced formal parameter warning + detail::suppress_unused_warning(ptr); + switch ( t ) { + case prepare: + MALLOC_ITT_SYNC_PREPARE( ptr ); + break; + case cancel: + MALLOC_ITT_SYNC_CANCEL( ptr ); + break; + case acquired: + MALLOC_ITT_SYNC_ACQUIRED( ptr ); + break; + case releasing: + MALLOC_ITT_SYNC_RELEASING( ptr ); + break; } + } #else - inline void call_itt_notify(notify_type /*t*/, void * /*ptr*/) {} -#endif // TBB_USE_THREADING_TOOLS - - template <typename T> - inline void itt_store_word_with_release(T& dst, T src) { -#if TBB_USE_THREADING_TOOLS - call_itt_notify(releasing, &dst); -#endif // TBB_USE_THREADING_TOOLS - FencedStore(*(intptr_t*)&dst, src); - } + inline void call_itt_notify(notify_type /*t*/, void * /*ptr*/) {} +#endif // TBB_USE_PROFILING_TOOLS - template <typename T> - inline T itt_load_word_with_acquire(T& src) { - T result = FencedLoad(*(intptr_t*)&src); -#if TBB_USE_THREADING_TOOLS - call_itt_notify(acquired, &src); -#endif // TBB_USE_THREADING_TOOLS - return result; - - } - } // namespace internal +} // namespace d1 +} // namespace detail } // namespace tbb -#include "tbb/internal/_aggregator_impl.h" +#include "oneapi/tbb/detail/_aggregator.h" template <typename OperationType> struct MallocAggregator { - typedef tbb::internal::aggregator_generic<OperationType> type; + typedef tbb::detail::d1::aggregator_generic<OperationType> type; }; //! aggregated_operation base class template <typename Derived> struct MallocAggregatedOperation { - typedef tbb::internal::aggregated_operation<Derived> type; + typedef tbb::detail::d1::aggregated_operation<Derived> type; }; #endif /* _TBB_malloc_Customize_H_ */ diff --git a/src/tbb/src/tbbmalloc/MapMemory.h b/src/tbb/src/tbbmalloc/MapMemory.h index ea5df2af7..63b4bc375 100644 --- a/src/tbb/src/tbbmalloc/MapMemory.h +++ b/src/tbb/src/tbbmalloc/MapMemory.h @@ -1,21 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef _itt_shared_malloc_MapMemory_H @@ -23,16 +19,7 @@ #include <stdlib.h> -void *ErrnoPreservingMalloc(size_t bytes) -{ - int prevErrno = errno; - void *ret = malloc( bytes ); - if (!ret) - errno = prevErrno; - return ret; -} - -#if __linux__ || __APPLE__ || __sun || __FreeBSD__ +#if __unix__ || __APPLE__ || __sun || __FreeBSD__ #if __sun && !defined(_XPG4_2) // To have void* as mmap's 1st argument @@ -41,9 +28,9 @@ void *ErrnoPreservingMalloc(size_t bytes) #endif #include <sys/mman.h> -#if __linux__ +#if __unix__ /* __TBB_MAP_HUGETLB is MAP_HUGETLB from system header linux/mman.h. - The header do not included here, as on some Linux flavors inclusion of + The header is not included here, as on some Linux flavors inclusion of linux/mman.h leads to compilation error, while changing of MAP_HUGETLB is highly unexpected. */ @@ -57,20 +44,108 @@ void *ErrnoPreservingMalloc(size_t bytes) #undef XPG4_WAS_DEFINED #endif -#define MEMORY_MAPPING_USES_MALLOC 0 -void* MapMemory (size_t bytes, bool hugePages) -{ - void* result = 0; - int prevErrno = errno; +inline void* mmap_impl(size_t map_size, void* map_hint = nullptr, int map_flags = 0) { #ifndef MAP_ANONYMOUS -// OS X* defines MAP_ANON, which is deprecated in Linux*. +// macOS* defines MAP_ANON, which is deprecated in Linux*. #define MAP_ANONYMOUS MAP_ANON #endif /* MAP_ANONYMOUS */ - int addFlags = hugePages? __TBB_MAP_HUGETLB : 0; - result = mmap(NULL, bytes, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|addFlags, -1, 0); - if (result==MAP_FAILED) + return mmap(map_hint, map_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | map_flags, -1, 0); +} + +inline void* mmapTHP(size_t bytes) { + // Initializes in zero-initialized data section + static void* hint; + + // Optimistically try to use a last huge page aligned region end + // as a hint for mmap. + hint = hint ? (void*)((uintptr_t)hint - bytes) : hint; + void* result = mmap_impl(bytes, hint); + + // Something went wrong + if (result == MAP_FAILED) { + hint = nullptr; + return MAP_FAILED; + } + + // Otherwise, fall back to the slow path - map oversized region + // and trim excess parts. + if (!isAligned(result, HUGE_PAGE_SIZE)) { + // Undo previous try + munmap(result, bytes); + + // Map oversized on huge page size region + result = mmap_impl(bytes + HUGE_PAGE_SIZE); + + // Something went wrong + if (result == MAP_FAILED) { + hint = nullptr; + return MAP_FAILED; + } + + // Misalignment offset + uintptr_t offset = 0; + + if (!isAligned(result, HUGE_PAGE_SIZE)) { + // Trim excess head of a region if it is no aligned + offset = HUGE_PAGE_SIZE - ((uintptr_t)result & (HUGE_PAGE_SIZE - 1)); + munmap(result, offset); + + // New region beginning + result = (void*)((uintptr_t)result + offset); + } + + // Trim excess tail of a region + munmap((void*)((uintptr_t)result + bytes), HUGE_PAGE_SIZE - offset); + } + + // Assume, that mmap virtual addresses grow down by default + // So, set a hint as a result of a last successful allocation + // and then use it minus requested size as a new mapping point. + // TODO: Atomic store is meant here, fence not needed, but + // currently we don't have such function. + hint = result; + + MALLOC_ASSERT(isAligned(result, HUGE_PAGE_SIZE), "Mapped address is not aligned on huge page size."); + + return result; +} + +#define MEMORY_MAPPING_USES_MALLOC 0 +void* MapMemory (size_t bytes, PageType pageType) +{ + void* result = nullptr; + int prevErrno = errno; + + switch (pageType) { + case REGULAR: + { + result = mmap_impl(bytes); + break; + } + case PREALLOCATED_HUGE_PAGE: + { + MALLOC_ASSERT((bytes % HUGE_PAGE_SIZE) == 0, "Mapping size should be divisible by huge page size"); + result = mmap_impl(bytes, nullptr, __TBB_MAP_HUGETLB); + break; + } + case TRANSPARENT_HUGE_PAGE: + { + MALLOC_ASSERT((bytes % HUGE_PAGE_SIZE) == 0, "Mapping size should be divisible by huge page size"); + result = mmapTHP(bytes); + break; + } + default: + { + MALLOC_ASSERT(false, "Unknown page type"); + } + } + + if (result == MAP_FAILED) { errno = prevErrno; - return result==MAP_FAILED? 0: result; + return nullptr; + } + + return result; } int UnmapMemory(void *area, size_t bytes) @@ -82,14 +157,14 @@ int UnmapMemory(void *area, size_t bytes) return ret; } -#elif (_WIN32 || _WIN64) && !_XBOX && !__TBB_WIN8UI_SUPPORT +#elif (_WIN32 || _WIN64) && !__TBB_WIN8UI_SUPPORT #include <windows.h> #define MEMORY_MAPPING_USES_MALLOC 0 -void* MapMemory (size_t bytes, bool) +void* MapMemory (size_t bytes, PageType) { /* Is VirtualAlloc thread safe? */ - return VirtualAlloc(NULL, bytes, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); + return VirtualAlloc(nullptr, bytes, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); } int UnmapMemory(void *area, size_t /*bytes*/) @@ -100,8 +175,17 @@ int UnmapMemory(void *area, size_t /*bytes*/) #else +void *ErrnoPreservingMalloc(size_t bytes) +{ + int prevErrno = errno; + void *ret = malloc( bytes ); + if (!ret) + errno = prevErrno; + return ret; +} + #define MEMORY_MAPPING_USES_MALLOC 1 -void* MapMemory (size_t bytes, bool) +void* MapMemory (size_t bytes, PageType) { return ErrnoPreservingMalloc( bytes ); } diff --git a/src/tbb/src/tbbmalloc/Statistics.h b/src/tbb/src/tbbmalloc/Statistics.h index 15f9e16b4..a8767a819 100644 --- a/src/tbb/src/tbbmalloc/Statistics.h +++ b/src/tbb/src/tbbmalloc/Statistics.h @@ -1,21 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. + Copyright (c) 2005-2022 Intel Corporation - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #define MAX_THREADS 1024 @@ -67,7 +63,7 @@ static inline int STAT_increment(int thread, int bin, int ctr) static inline void initStatisticsCollection() { #if defined(MALLOCENV_COLLECT_STATISTICS) - if (NULL != getenv(MALLOCENV_COLLECT_STATISTICS)) + if (nullptr != getenv(MALLOCENV_COLLECT_STATISTICS)) reportAllocationStatistics = true; #endif } diff --git a/src/tbb/src/tbbmalloc/Synchronize.h b/src/tbb/src/tbbmalloc/Synchronize.h new file mode 100644 index 000000000..b25d7e245 --- /dev/null +++ b/src/tbb/src/tbbmalloc/Synchronize.h @@ -0,0 +1,97 @@ +/* + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_malloc_Synchronize_H_ +#define __TBB_malloc_Synchronize_H_ + +#include "oneapi/tbb/detail/_utils.h" + +#include <atomic> + +//! Stripped down version of spin_mutex. +/** Instances of MallocMutex must be declared in memory that is zero-initialized. + There are no constructors. This is a feature that lets it be + used in situations where the mutex might be used while file-scope constructors + are running. + + There are no methods "acquire" or "release". The scoped_lock must be used + in a strict block-scoped locking pattern. Omitting these methods permitted + further simplification. */ +class MallocMutex : tbb::detail::no_copy { + std::atomic_flag m_flag = ATOMIC_FLAG_INIT; + + void lock() { + tbb::detail::atomic_backoff backoff; + while (m_flag.test_and_set()) backoff.pause(); + } + bool try_lock() { + return !m_flag.test_and_set(); + } + void unlock() { + m_flag.clear(std::memory_order_release); + } + +public: + class scoped_lock : tbb::detail::no_copy { + MallocMutex& m_mutex; + bool m_taken; + + public: + scoped_lock(MallocMutex& m) : m_mutex(m), m_taken(true) { + m.lock(); + } + scoped_lock(MallocMutex& m, bool block, bool *locked) : m_mutex(m), m_taken(false) { + if (block) { + m.lock(); + m_taken = true; + } else { + m_taken = m.try_lock(); + } + if (locked) *locked = m_taken; + } + + scoped_lock(scoped_lock& other) = delete; + scoped_lock& operator=(scoped_lock&) = delete; + + ~scoped_lock() { + if (m_taken) { + m_mutex.unlock(); + } + } + }; + friend class scoped_lock; +}; + +inline void SpinWaitWhileEq(const std::atomic<intptr_t>& location, const intptr_t value) { + tbb::detail::spin_wait_while_eq(location, value); +} + +#if USE_PTHREAD && __TBB_SOURCE_DIRECTLY_INCLUDED + +inline void SpinWaitUntilEq(const std::atomic<intptr_t>& location, const intptr_t value) { + tbb::detail::spin_wait_until_eq(location, value); +} + +#endif + +class AtomicBackoff { + tbb::detail::atomic_backoff backoff; +public: + AtomicBackoff() {} + void pause() { backoff.pause(); } +}; + +#endif /* __TBB_malloc_Synchronize_H_ */ diff --git a/src/tbb/src/tbbmalloc/TypeDefinitions.h b/src/tbb/src/tbbmalloc/TypeDefinitions.h index 76e063a0b..bfadf61d6 100644 --- a/src/tbb/src/tbbmalloc/TypeDefinitions.h +++ b/src/tbb/src/tbbmalloc/TypeDefinitions.h @@ -1,21 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef _itt_shared_malloc_TypeDefinitions_H_ @@ -29,7 +25,7 @@ # define __ARCH_ipf 1 # elif defined(_M_IX86)||defined(__i386__) // the latter for MinGW support # define __ARCH_x86_32 1 -# elif defined(_M_ARM) +# elif defined(_M_ARM) || defined(_M_ARM64) || defined(__aarch64__) // the latter for MinGW support # define __ARCH_other 1 # else # error Unknown processor architecture for Windows diff --git a/src/tbb/src/tbbmalloc/backend.cpp b/src/tbb/src/tbbmalloc/backend.cpp index 4f5a9deee..87531f816 100644 --- a/src/tbb/src/tbbmalloc/backend.cpp +++ b/src/tbb/src/tbbmalloc/backend.cpp @@ -1,21 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #include <string.h> /* for memset */ @@ -43,115 +39,123 @@ namespace internal { /* assume MapMemory and UnmapMemory are customized */ #endif -void* getRawMemory (size_t size, bool hugePages) { - return MapMemory(size, hugePages); +void* getRawMemory (size_t size, PageType pageType) { + return MapMemory(size, pageType); } -bool freeRawMemory (void *object, size_t size) { +int freeRawMemory (void *object, size_t size) { return UnmapMemory(object, size); } -void HugePagesStatus::registerAllocation(bool gotPage) -{ - if (gotPage) { - if (!wasObserved) - FencedStore(wasObserved, 1); - } else - FencedStore(enabled, 0); - // reports huge page status only once - if (needActualStatusPrint - && AtomicCompareExchange(needActualStatusPrint, 0, 1)) - doPrintStatus(gotPage, "available"); -} +#if CHECK_ALLOCATION_RANGE -void HugePagesStatus::registerReleasing(size_t size) +void Backend::UsedAddressRange::registerAlloc(uintptr_t left, uintptr_t right) { - // We: 1) got huge page at least once, - // 2) something that looks like a huge page is been released, - // and 3) user requested huge pages, - // so a huge page might be available at next allocation. - // TODO: keep page status in regions and use exact check here - // Use isPowerOfTwoMultiple because it's faster then generic reminder. - if (FencedLoad(wasObserved) && isPowerOfTwoMultiple(size, pageSize)) - FencedStore(enabled, requestedMode.get()); -} - -void HugePagesStatus::printStatus() { - doPrintStatus(requestedMode.get(), "requested"); - if (requestedMode.get()) { // report actual status iff requested - if (pageSize) - FencedStore(needActualStatusPrint, 1); - else - doPrintStatus(/*state=*/false, "available"); - } + MallocMutex::scoped_lock lock(mutex); + if (left < leftBound.load(std::memory_order_relaxed)) + leftBound.store(left, std::memory_order_relaxed); + if (right > rightBound.load(std::memory_order_relaxed)) + rightBound.store(right, std::memory_order_relaxed); + MALLOC_ASSERT(leftBound.load(std::memory_order_relaxed), ASSERT_TEXT); + MALLOC_ASSERT(leftBound.load(std::memory_order_relaxed) < rightBound.load(std::memory_order_relaxed), ASSERT_TEXT); + MALLOC_ASSERT(leftBound.load(std::memory_order_relaxed) <= left && right <= rightBound.load(std::memory_order_relaxed), ASSERT_TEXT); } -void HugePagesStatus::doPrintStatus(bool state, const char *stateName) +void Backend::UsedAddressRange::registerFree(uintptr_t left, uintptr_t right) { - // Under OS X* fprintf/snprintf acquires an internal lock, so when - // 1st allocation is done under the lock, we got a deadlock. - // Do not use fprintf etc during initialization. - fputs("TBBmalloc: huge pages\t", stderr); - if (!state) - fputs("not ", stderr); - fputs(stateName, stderr); - fputs("\n", stderr); + MallocMutex::scoped_lock lock(mutex); + if (leftBound.load(std::memory_order_relaxed) == left) { + if (rightBound.load(std::memory_order_relaxed) == right) { + leftBound.store(ADDRESS_UPPER_BOUND, std::memory_order_relaxed); + rightBound.store(0, std::memory_order_relaxed); + } else + leftBound.store(right, std::memory_order_relaxed); + } else if (rightBound.load(std::memory_order_relaxed) == right) + rightBound.store(left, std::memory_order_relaxed); + MALLOC_ASSERT((!rightBound.load(std::memory_order_relaxed) && leftBound.load(std::memory_order_relaxed) == ADDRESS_UPPER_BOUND) + || leftBound.load(std::memory_order_relaxed) < rightBound.load(std::memory_order_relaxed), ASSERT_TEXT); } +#endif // CHECK_ALLOCATION_RANGE + +// Initialized in frontend inside defaultMemPool +extern HugePagesStatus hugePages; -void *Backend::allocRawMem(size_t &size) const +void *Backend::allocRawMem(size_t &size) { - void *res = NULL; - size_t allocSize; + void *res = nullptr; + size_t allocSize = 0; if (extMemPool->userPool()) { + if (extMemPool->fixedPool && bootsrapMemDone == bootsrapMemStatus.load(std::memory_order_acquire)) + return nullptr; + MALLOC_ASSERT(bootsrapMemStatus != bootsrapMemNotDone, + "Backend::allocRawMem() called prematurely?"); + // TODO: support for raw mem not aligned at sizeof(uintptr_t) // memory from fixed pool is asked once and only once - if (!extMemPool->fixedPool || !rawMemReceived) { - allocSize = alignUpGeneric(size, extMemPool->granularity); - res = (*extMemPool->rawAlloc)(extMemPool->poolId, allocSize); - if (extMemPool->fixedPool) - const_cast<bool&>(rawMemReceived) = true; - } + allocSize = alignUpGeneric(size, extMemPool->granularity); + res = (*extMemPool->rawAlloc)(extMemPool->poolId, allocSize); } else { - // try to get them at 1st allocation and still use, if successful - // if 1st try is unsuccessful, no more trying - if (FencedLoad(hugePages.enabled)) { - allocSize = alignUpGeneric(size, hugePages.getSize()); - res = getRawMemory(allocSize, /*hugePages=*/true); - hugePages.registerAllocation(res); + // Align allocation on page size + size_t pageSize = hugePages.isEnabled ? hugePages.getGranularity() : extMemPool->granularity; + MALLOC_ASSERT(pageSize, "Page size cannot be zero."); + allocSize = alignUpGeneric(size, pageSize); + + // If user requested huge pages and they are available, try to use preallocated ones firstly. + // If there are none, lets check transparent huge pages support and use them instead. + if (hugePages.isEnabled) { + if (hugePages.isHPAvailable) { + res = getRawMemory(allocSize, PREALLOCATED_HUGE_PAGE); + } + if (!res && hugePages.isTHPAvailable) { + res = getRawMemory(allocSize, TRANSPARENT_HUGE_PAGE); + } } - if ( !res ) { - allocSize = alignUpGeneric(size, extMemPool->granularity); - res = getRawMemory(allocSize, /*hugePages=*/false); + if (!res) { + res = getRawMemory(allocSize, REGULAR); } } - if ( res ) { + if (res) { + MALLOC_ASSERT(allocSize > 0, "Invalid size of an allocated region."); size = allocSize; - AtomicAdd((intptr_t&)totalMemSize, size); + if (!extMemPool->userPool()) + usedAddrRange.registerAlloc((uintptr_t)res, (uintptr_t)res+size); +#if MALLOC_DEBUG + volatile size_t curTotalSize = totalMemSize; // to read global value once + MALLOC_ASSERT(curTotalSize+size > curTotalSize, "Overflow allocation size."); +#endif + totalMemSize.fetch_add(size); } return res; } -void Backend::freeRawMem(void *object, size_t size) const +bool Backend::freeRawMem(void *object, size_t size) { - AtomicAdd((intptr_t&)totalMemSize, -size); + bool fail; +#if MALLOC_DEBUG + volatile size_t curTotalSize = totalMemSize; // to read global value once + MALLOC_ASSERT(curTotalSize-size < curTotalSize, "Negative allocation size."); +#endif + totalMemSize.fetch_sub(size); if (extMemPool->userPool()) { MALLOC_ASSERT(!extMemPool->fixedPool, "No free for fixed-size pools."); - (*extMemPool->rawFree)(extMemPool->poolId, object, size); + fail = (*extMemPool->rawFree)(extMemPool->poolId, object, size); } else { - hugePages.registerReleasing(size); - freeRawMemory(object, size); + usedAddrRange.registerFree((uintptr_t)object, (uintptr_t)object + size); + fail = freeRawMemory(object, size); } + // TODO: use result in all freeRawMem() callers + return !fail; } /********* End memory acquisition code ********************************/ // Protected object size. After successful locking returns size of locked block, // and releasing requires setting block size. -class GuardedSize : tbb::internal::no_copy { - uintptr_t value; +class GuardedSize : tbb::detail::no_copy { + std::atomic<uintptr_t> value; public: enum State { LOCKED, @@ -162,30 +166,30 @@ class GuardedSize : tbb::internal::no_copy { MAX_SPEC_VAL = LAST_REGION_BLOCK }; - void initLocked() { value = LOCKED; } + void initLocked() { value.store(LOCKED, std::memory_order_release); } // TBB_REVAMP_TODO: was relaxed void makeCoalscing() { - MALLOC_ASSERT(value == LOCKED, ASSERT_TEXT); - value = COAL_BLOCK; + MALLOC_ASSERT(value.load(std::memory_order_relaxed) == LOCKED, ASSERT_TEXT); + value.store(COAL_BLOCK, std::memory_order_release); // TBB_REVAMP_TODO: was relaxed } size_t tryLock(State state) { - size_t szVal, sz; MALLOC_ASSERT(state <= MAX_LOCKED_VAL, ASSERT_TEXT); + size_t sz = value.load(std::memory_order_acquire); for (;;) { - sz = FencedLoad((intptr_t&)value); - if (sz <= MAX_LOCKED_VAL) + if (sz <= MAX_LOCKED_VAL) { break; - szVal = AtomicCompareExchange((intptr_t&)value, state, sz); - - if (szVal==sz) + } + if (value.compare_exchange_strong(sz, state)) { break; + } } return sz; } void unlock(size_t size) { - MALLOC_ASSERT(value <= MAX_LOCKED_VAL, "The lock is not locked"); + MALLOC_ASSERT(value.load(std::memory_order_relaxed) <= MAX_LOCKED_VAL, "The lock is not locked"); MALLOC_ASSERT(size > MAX_LOCKED_VAL, ASSERT_TEXT); - FencedStore((intptr_t&)value, size); + value.store(size, std::memory_order_release); } + bool isLastRegionBlock() const { return value.load(std::memory_order_relaxed) == LAST_REGION_BLOCK; } friend void Backend::IndexedBins::verify(); }; @@ -193,7 +197,7 @@ struct MemRegion { MemRegion *next, // keep all regions in any pool to release all them on *prev; // pool destroying, 2-linked list to release individual // regions. - size_t allocSz, // got from poll callback + size_t allocSz, // got from pool callback blockSz; // initial and maximal inner block size MemRegionType type; }; @@ -216,7 +220,7 @@ class FreeBlock : BlockMutexes { // valid only when block is in processing, i.e. one is not free and not size_t sizeTmp; // used outside of backend int myBin; // bin that is owner of the block - bool aligned; + bool slabAligned; bool blockInBin; // this block in myBin already FreeBlock *rightNeig(size_t sz) const { @@ -231,6 +235,7 @@ class FreeBlock : BlockMutexes { void initHeader() { myL.initLocked(); leftL.initLocked(); } void setMeFree(size_t size) { myL.unlock(size); } size_t trySetMeUsed(GuardedSize::State s) { return myL.tryLock(s); } + bool isLastRegionBlock() const { return myL.isLastRegionBlock(); } void setLeftFree(size_t sz) { leftL.unlock(sz); } size_t trySetLeftUsed(GuardedSize::State s) { return leftL.tryLock(s); } @@ -252,12 +257,12 @@ class FreeBlock : BlockMutexes { myL.makeCoalscing(); rightNeig(blockSz)->leftL.makeCoalscing(); sizeTmp = blockSz; - nextToFree = NULL; + nextToFree = nullptr; } void markUsed() { myL.initLocked(); rightNeig(sizeTmp)->leftL.initLocked(); - nextToFree = NULL; + nextToFree = nullptr; } static void markBlocks(FreeBlock *fBlock, int num, size_t size) { for (int i=1; i<num; i++) { @@ -276,63 +281,124 @@ struct LastFreeBlock : public FreeBlock { const size_t FreeBlock::minBlockSize = sizeof(FreeBlock); +inline bool BackendSync::waitTillBlockReleased(intptr_t startModifiedCnt) +{ + AtomicBackoff backoff; +#if __TBB_MALLOC_BACKEND_STAT + class ITT_Guard { + void *ptr; + public: + ITT_Guard(void *p) : ptr(p) { + MALLOC_ITT_SYNC_PREPARE(ptr); + } + ~ITT_Guard() { + MALLOC_ITT_SYNC_ACQUIRED(ptr); + } + }; + ITT_Guard ittGuard(&inFlyBlocks); +#endif + intptr_t myBinsInFlyBlocks = inFlyBlocks.load(std::memory_order_acquire); + intptr_t myCoalescQInFlyBlocks = backend->blocksInCoalescing(); + while (true) { + MALLOC_ASSERT(myBinsInFlyBlocks>=0 && myCoalescQInFlyBlocks>=0, nullptr); + + intptr_t currBinsInFlyBlocks = inFlyBlocks.load(std::memory_order_acquire); + intptr_t currCoalescQInFlyBlocks = backend->blocksInCoalescing(); + WhiteboxTestingYield(); + // Stop waiting iff: + + // 1) blocks were removed from processing, not added + if (myBinsInFlyBlocks > currBinsInFlyBlocks + // 2) released during delayed coalescing queue + || myCoalescQInFlyBlocks > currCoalescQInFlyBlocks) + break; + // 3) if there are blocks in coalescing, and no progress in its processing, + // try to scan coalescing queue and stop waiting, if changes were made + // (if there are no changes and in-fly blocks exist, we continue + // waiting to not increase load on coalescQ) + if (currCoalescQInFlyBlocks > 0 && backend->scanCoalescQ(/*forceCoalescQDrop=*/false)) + break; + // 4) when there are no blocks + if (!currBinsInFlyBlocks && !currCoalescQInFlyBlocks) { + // re-scan make sense only if bins were modified since scanned + auto pool = backend->extMemPool; + if (pool->hardCachesCleanupInProgress.load(std::memory_order_acquire) || + pool->softCachesCleanupInProgress.load(std::memory_order_acquire)) { + backoff.pause(); + continue; + } + + return startModifiedCnt != getNumOfMods(); + } + myBinsInFlyBlocks = currBinsInFlyBlocks; + myCoalescQInFlyBlocks = currCoalescQInFlyBlocks; + backoff.pause(); + } + return true; +} + void CoalRequestQ::putBlock(FreeBlock *fBlock) { MALLOC_ASSERT(fBlock->sizeTmp >= FreeBlock::minBlockSize, ASSERT_TEXT); fBlock->markUsed(); + // the block is in the queue, do not forget that it's here + inFlyBlocks++; + FreeBlock *myBlToFree = blocksToFree.load(std::memory_order_acquire); for (;;) { - FreeBlock *myBlToFree = (FreeBlock*)FencedLoad((intptr_t&)blocksToFree); - fBlock->nextToFree = myBlToFree; - if (myBlToFree == - (FreeBlock*)AtomicCompareExchange((intptr_t&)blocksToFree, - (intptr_t)fBlock, - (intptr_t)myBlToFree)) + if (blocksToFree.compare_exchange_strong(myBlToFree, fBlock)) { return; + } } } FreeBlock *CoalRequestQ::getAll() { for (;;) { - FreeBlock *myBlToFree = (FreeBlock*)FencedLoad((intptr_t&)blocksToFree); - - if (!myBlToFree) - return NULL; - else { - if (myBlToFree == - (FreeBlock*)AtomicCompareExchange((intptr_t&)blocksToFree, - 0, (intptr_t)myBlToFree)) + FreeBlock *myBlToFree = blocksToFree.load(std::memory_order_acquire); + + if (!myBlToFree) { + return nullptr; + } else { + if (blocksToFree.compare_exchange_strong(myBlToFree, nullptr)) { return myBlToFree; - else + } else { continue; + } } } } +inline void CoalRequestQ::blockWasProcessed() +{ + bkndSync->binsModified(); + int prev = inFlyBlocks.fetch_sub(1); + tbb::detail::suppress_unused_warning(prev); + MALLOC_ASSERT(prev > 0, ASSERT_TEXT); +} + // Try to get a block from a bin. // If the remaining free space would stay in the same bin, // split the block without removing it. // If the free space should go to other bin(s), remove the block. // alignedBin is true, if all blocks in the bin have slab-aligned right side. -FreeBlock *Backend::IndexedBins::getFromBin(int binIdx, BackendSync *sync, - size_t size, bool needAlignedRes, bool alignedBin, bool wait, - int *binLocked) +FreeBlock *Backend::IndexedBins::getFromBin(int binIdx, BackendSync *sync, size_t size, + bool needAlignedRes, bool alignedBin, bool wait, int *binLocked) { Bin *b = &freeBins[binIdx]; try_next: - FreeBlock *fBlock = NULL; - if (b->head) { - bool locked; + FreeBlock *fBlock = nullptr; + if (!b->empty()) { + bool locked = false; MallocMutex::scoped_lock scopedLock(b->tLock, wait, &locked); if (!locked) { if (binLocked) (*binLocked)++; - return NULL; + return nullptr; } - for (FreeBlock *curr = b->head; curr; curr = curr->next) { + for (FreeBlock *curr = b->head.load(std::memory_order_relaxed); curr; curr = curr->next) { size_t szBlock = curr->tryLockBlock(); if (!szBlock) { // block is locked, re-do bin lock, as there is no place to spin @@ -340,50 +406,36 @@ FreeBlock *Backend::IndexedBins::getFromBin(int binIdx, BackendSync *sync, goto try_next; } + // GENERAL CASE if (alignedBin || !needAlignedRes) { size_t splitSz = szBlock - size; - // If we got a block as split result, - // it must have a room for control structures. - if (szBlock >= size && (splitSz >= FreeBlock::minBlockSize || - !splitSz)) + // If we got a block as split result, it must have a room for control structures. + if (szBlock >= size && (splitSz >= FreeBlock::minBlockSize || !splitSz)) fBlock = curr; } else { + // SPECIAL CASE, to get aligned block from unaligned bin we have to cut the middle of a block + // and return remaining left and right part. Possible only in fixed pool scenario, assert for this + // is set inside splitBlock() function. + void *newB = alignUp(curr, slabSize); uintptr_t rightNew = (uintptr_t)newB + size; uintptr_t rightCurr = (uintptr_t)curr + szBlock; - // appropriate size, and left and right split results - // are either big enough or non-existent + // Check if the block size is sufficient, + // and also left and right split results are either big enough or non-existent if (rightNew <= rightCurr - && (newB==curr || - (uintptr_t)newB-(uintptr_t)curr >= FreeBlock::minBlockSize) - && (rightNew==rightCurr || - rightCurr - rightNew >= FreeBlock::minBlockSize)) + && (newB == curr || ((uintptr_t)newB - (uintptr_t)curr) >= FreeBlock::minBlockSize) + && (rightNew == rightCurr || (rightCurr - rightNew) >= FreeBlock::minBlockSize)) fBlock = curr; } + if (fBlock) { - // consume must be called before result of removing from a bin - // is visible externally. + // consume must be called before result of removing from a bin is visible externally. sync->blockConsumed(); - if (alignedBin && needAlignedRes && - Backend::sizeToBin(szBlock-size) == Backend::sizeToBin(szBlock)) { - // free remainder of fBlock stay in same bin, - // so no need to remove it from the bin - // TODO: add more "still here" cases - FreeBlock *newFBlock = fBlock; - // return block from right side of fBlock - fBlock = (FreeBlock*)((uintptr_t)newFBlock + szBlock - size); - MALLOC_ASSERT(isAligned(fBlock, slabSize), "Invalid free block"); - fBlock->initHeader(); - fBlock->setLeftFree(szBlock - size); - newFBlock->setMeFree(szBlock - size); - - fBlock->sizeTmp = size; - } else { - b->removeBlock(fBlock); - if (freeBins[binIdx].empty()) - bitMask.set(binIdx, false); - fBlock->sizeTmp = szBlock; - } + // TODO: think about cases when block stays in the same bin + b->removeBlock(fBlock); + if (freeBins[binIdx].empty()) + bitMask.set(binIdx, false); + fBlock->sizeTmp = szBlock; break; } else { // block size is not valid, search for next block in the bin curr->setMeFree(szBlock); @@ -397,14 +449,14 @@ FreeBlock *Backend::IndexedBins::getFromBin(int binIdx, BackendSync *sync, bool Backend::IndexedBins::tryReleaseRegions(int binIdx, Backend *backend) { Bin *b = &freeBins[binIdx]; - FreeBlock *fBlockList = NULL; + FreeBlock *fBlockList = nullptr; // got all blocks from the bin and re-do coalesce on them // to release single-block regions try_next: - if (b->head) { + if (!b->empty()) { MallocMutex::scoped_lock binLock(b->tLock); - for (FreeBlock *curr = b->head; curr; ) { + for (FreeBlock *curr = b->head.load(std::memory_order_relaxed); curr; ) { size_t szBlock = curr->tryLockBlock(); if (!szBlock) goto try_next; @@ -418,15 +470,16 @@ bool Backend::IndexedBins::tryReleaseRegions(int binIdx, Backend *backend) curr = next; } } - return backend->coalescAndPutList(fBlockList, /*forceCoalescQDrop=*/true); + return backend->coalescAndPutList(fBlockList, /*forceCoalescQDrop=*/true, + /*reportBlocksProcessed=*/false); } void Backend::Bin::removeBlock(FreeBlock *fBlock) { - MALLOC_ASSERT(fBlock->next||fBlock->prev||fBlock==head, + MALLOC_ASSERT(fBlock->next||fBlock->prev||fBlock== head.load(std::memory_order_relaxed), "Detected that a block is not in the bin."); - if (head == fBlock) - head = fBlock->next; + if (head.load(std::memory_order_relaxed) == fBlock) + head.store(fBlock->next, std::memory_order_relaxed); if (tail == fBlock) tail = fBlock->prev; if (fBlock->prev) @@ -435,13 +488,11 @@ void Backend::Bin::removeBlock(FreeBlock *fBlock) fBlock->next->prev = fBlock->prev; } -void Backend::IndexedBins::addBlock(int binIdx, FreeBlock *fBlock, size_t blockSz, bool addToTail) +void Backend::IndexedBins::addBlock(int binIdx, FreeBlock *fBlock, size_t /* blockSz */, bool addToTail) { Bin *b = &freeBins[binIdx]; - fBlock->myBin = binIdx; - fBlock->aligned = toAlignedBin(fBlock, blockSz); - fBlock->next = fBlock->prev = NULL; + fBlock->next = fBlock->prev = nullptr; { MallocMutex::scoped_lock scopedLock(b->tLock); if (addToTail) { @@ -449,11 +500,11 @@ void Backend::IndexedBins::addBlock(int binIdx, FreeBlock *fBlock, size_t blockS b->tail = fBlock; if (fBlock->prev) fBlock->prev->next = fBlock; - if (!b->head) - b->head = fBlock; + if (!b->head.load(std::memory_order_relaxed)) + b->head.store(fBlock, std::memory_order_relaxed); } else { - fBlock->next = b->head; - b->head = fBlock; + fBlock->next = b->head.load(std::memory_order_relaxed); + b->head.store(fBlock, std::memory_order_relaxed); if (fBlock->next) fBlock->next->prev = fBlock; if (!b->tail) @@ -465,13 +516,11 @@ void Backend::IndexedBins::addBlock(int binIdx, FreeBlock *fBlock, size_t blockS bool Backend::IndexedBins::tryAddBlock(int binIdx, FreeBlock *fBlock, bool addToTail) { - bool locked; + bool locked = false; Bin *b = &freeBins[binIdx]; - fBlock->myBin = binIdx; - fBlock->aligned = toAlignedBin(fBlock, fBlock->sizeTmp); if (addToTail) { - fBlock->next = NULL; + fBlock->next = nullptr; { MallocMutex::scoped_lock scopedLock(b->tLock, /*wait=*/false, &locked); if (!locked) @@ -480,17 +529,17 @@ bool Backend::IndexedBins::tryAddBlock(int binIdx, FreeBlock *fBlock, bool addTo b->tail = fBlock; if (fBlock->prev) fBlock->prev->next = fBlock; - if (!b->head) - b->head = fBlock; + if (!b->head.load(std::memory_order_relaxed)) + b->head.store(fBlock, std::memory_order_relaxed); } } else { - fBlock->prev = NULL; + fBlock->prev = nullptr; { MallocMutex::scoped_lock scopedLock(b->tLock, /*wait=*/false, &locked); if (!locked) return false; - fBlock->next = b->head; - b->head = fBlock; + fBlock->next = b->head.load(std::memory_order_relaxed); + b->head.store(fBlock, std::memory_order_relaxed); if (fBlock->next) fBlock->next->prev = fBlock; if (!b->tail) @@ -503,7 +552,7 @@ bool Backend::IndexedBins::tryAddBlock(int binIdx, FreeBlock *fBlock, bool addTo void Backend::IndexedBins::reset() { - for (int i=0; i<Backend::freeBinsNum; i++) + for (unsigned i=0; i<Backend::freeBinsNum; i++) freeBins[i].reset(); bitMask.reset(); } @@ -521,86 +570,93 @@ bool ExtMemoryPool::regionsAreReleaseable() const return !keepAllMemory && !delayRegsReleasing; } -FreeBlock *Backend::splitUnalignedBlock(FreeBlock *fBlock, int num, size_t size, - bool needAlignedBlock) +FreeBlock *Backend::splitBlock(FreeBlock *fBlock, int num, size_t size, bool blockIsAligned, bool needAlignedBlock) { - const size_t totalSize = num*size; - if (needAlignedBlock) { - size_t fBlockSz = fBlock->sizeTmp; - uintptr_t fBlockEnd = (uintptr_t)fBlock + fBlockSz; - FreeBlock *newB = alignUp(fBlock, slabSize); - FreeBlock *rightPart = (FreeBlock*)((uintptr_t)newB + totalSize); - - // Space to use is in the middle, - // ... return free right part + const size_t totalSize = num * size; + + // SPECIAL CASE, for unaligned block we have to cut the middle of a block + // and return remaining left and right part. Possible only in a fixed pool scenario. + if (needAlignedBlock && !blockIsAligned) { + MALLOC_ASSERT(extMemPool->fixedPool, + "Aligned block request from unaligned bin possible only in fixed pool scenario."); + + // Space to use is in the middle + FreeBlock *newBlock = alignUp(fBlock, slabSize); + FreeBlock *rightPart = (FreeBlock*)((uintptr_t)newBlock + totalSize); + uintptr_t fBlockEnd = (uintptr_t)fBlock + fBlock->sizeTmp; + + // Return free right part if ((uintptr_t)rightPart != fBlockEnd) { rightPart->initHeader(); // to prevent coalescing rightPart with fBlock - coalescAndPut(rightPart, fBlockEnd - (uintptr_t)rightPart); + size_t rightSize = fBlockEnd - (uintptr_t)rightPart; + coalescAndPut(rightPart, rightSize, toAlignedBin(rightPart, rightSize)); } - // ... and free left part - if (newB != fBlock) { - newB->initHeader(); // to prevent coalescing fBlock with newB - coalescAndPut(fBlock, (uintptr_t)newB - (uintptr_t)fBlock); + // And free left part + if (newBlock != fBlock) { + newBlock->initHeader(); // to prevent coalescing fBlock with newB + size_t leftSize = (uintptr_t)newBlock - (uintptr_t)fBlock; + coalescAndPut(fBlock, leftSize, toAlignedBin(fBlock, leftSize)); } - - fBlock = newB; - MALLOC_ASSERT(isAligned(fBlock, slabSize), ASSERT_TEXT); - } else { - if (size_t splitSz = fBlock->sizeTmp - totalSize) { - // split block and return free right part - FreeBlock *splitB = (FreeBlock*)((uintptr_t)fBlock + totalSize); - splitB->initHeader(); - coalescAndPut(splitB, splitSz); - } - } - FreeBlock::markBlocks(fBlock, num, size); - return fBlock; -} - -FreeBlock *Backend::splitAlignedBlock(FreeBlock *fBlock, int num, size_t size, - bool needAlignedBlock) -{ - if (fBlock->sizeTmp != num*size) { // i.e., need to split the block - FreeBlock *newAlgnd; - size_t newSz; - + fBlock = newBlock; + } else if (size_t splitSize = fBlock->sizeTmp - totalSize) { // need to split the block + // GENERAL CASE, cut the left or right part of the block + FreeBlock *splitBlock = nullptr; if (needAlignedBlock) { - newAlgnd = fBlock; - fBlock = (FreeBlock*)((uintptr_t)newAlgnd + newAlgnd->sizeTmp - - num*size); - MALLOC_ASSERT(isAligned(fBlock, slabSize), "Invalid free block"); + // For slab aligned blocks cut the right side of the block + // and return it to a requester, original block returns to backend + splitBlock = fBlock; + fBlock = (FreeBlock*)((uintptr_t)splitBlock + splitSize); fBlock->initHeader(); - newSz = newAlgnd->sizeTmp - num*size; } else { - newAlgnd = (FreeBlock*)((uintptr_t)fBlock + num*size); - newSz = fBlock->sizeTmp - num*size; - newAlgnd->initHeader(); + // For large object blocks cut original block and put free right part to backend + splitBlock = (FreeBlock*)((uintptr_t)fBlock + totalSize); + splitBlock->initHeader(); } - coalescAndPut(newAlgnd, newSz); + // Mark free block as it`s parent only when the requested type (needAlignedBlock) + // and returned from Bins/OS block (isAligned) are equal (XOR operation used) + bool markAligned = (blockIsAligned ^ needAlignedBlock) ? toAlignedBin(splitBlock, splitSize) : blockIsAligned; + coalescAndPut(splitBlock, splitSize, markAligned); } - MALLOC_ASSERT(!needAlignedBlock || isAligned(fBlock, slabSize), - "Expect to get aligned block, if one was requested."); + MALLOC_ASSERT(!needAlignedBlock || isAligned(fBlock, slabSize), "Expect to get aligned block, if one was requested."); FreeBlock::markBlocks(fBlock, num, size); return fBlock; } -inline size_t Backend::getMaxBinnedSize() const +size_t Backend::getMaxBinnedSize() const { - return hugePages.wasObserved && !inUserPool()? + return hugePages.isEnabled && !inUserPool() ? maxBinned_HugePage : maxBinned_SmallPage; } -inline bool Backend::MaxRequestComparator::operator()(size_t oldMaxReq, - size_t requestSize) const +inline bool Backend::MaxRequestComparator::operator()(size_t oldMaxReq, size_t requestSize) const { return requestSize > oldMaxReq && requestSize < backend->getMaxBinnedSize(); } +// last chance to get memory +FreeBlock *Backend::releaseMemInCaches(intptr_t startModifiedCnt, + int *lockedBinsThreshold, int numOfLockedBins) +{ + // something released from caches + if (extMemPool->hardCachesCleanup(false)) + return (FreeBlock*)VALID_BLOCK_IN_BIN; + + if (bkndSync.waitTillBlockReleased(startModifiedCnt)) + return (FreeBlock*)VALID_BLOCK_IN_BIN; + + // OS can't give us more memory, but we have some in locked bins + if (*lockedBinsThreshold && numOfLockedBins) { + *lockedBinsThreshold = 0; + return (FreeBlock*)VALID_BLOCK_IN_BIN; + } + return nullptr; // nothing found, give up +} + FreeBlock *Backend::askMemFromOS(size_t blockSize, intptr_t startModifiedCnt, int *lockedBinsThreshold, int numOfLockedBins, - bool *splittableRet) + bool *splittableRet, bool needSlabRegion) { - FreeBlock *block = (FreeBlock*)VALID_BLOCK_IN_BIN; + FreeBlock *block; // The block sizes can be divided into 3 groups: // 1. "quite small": popular object size, we are in bootstarp or something // like; request several regions. @@ -613,19 +669,18 @@ FreeBlock *Backend::askMemFromOS(size_t blockSize, intptr_t startModifiedCnt, // leads to excessive address space consumption). If a region is "too // large", allocate only one, to prevent fragmentation. It supposedly // doesn't hurt performance, because the object requested by user is large. + // Bounds for the groups are: const size_t maxBinned = getMaxBinnedSize(); const size_t quiteSmall = maxBinned / 8; const size_t quiteLarge = maxBinned; if (blockSize >= quiteLarge) { - // Do not interact with other threads via semaphors, as for exact fit + // Do not interact with other threads via semaphores, as for exact fit // we can't share regions with them, memory requesting is individual. block = addNewRegion(blockSize, MEMREG_ONE_BLOCK, /*addToBin=*/false); - // last chance to get memory - if (!block && extMemPool->hardCachesCleanup()) - return (FreeBlock*)VALID_BLOCK_IN_BIN; - if (block) - *splittableRet = false; + if (!block) + return releaseMemInCaches(startModifiedCnt, lockedBinsThreshold, numOfLockedBins); + *splittableRet = false; } else { const size_t regSz_sizeBased = alignUp(4*maxRequestedSize, 1024*1024); // Another thread is modifying backend while we can't get the block. @@ -641,39 +696,28 @@ FreeBlock *Backend::askMemFromOS(size_t blockSize, intptr_t startModifiedCnt, return (FreeBlock*)VALID_BLOCK_IN_BIN; } - if ( blockSize < quiteSmall ) { + if (blockSize < quiteSmall) { // For this size of blocks, add NUM_OF_REG "advance" regions in bin, // and return one as a result. // TODO: add to bin first, because other threads can use them right away. // This must be done carefully, because blocks in bins can be released // in releaseCachesToLimit(). const unsigned NUM_OF_REG = 3; - block = addNewRegion(regSz_sizeBased, MEMREG_FLEXIBLE_SIZE, /*addToBin=*/false); + MemRegionType regType = needSlabRegion ? MEMREG_SLAB_BLOCKS : MEMREG_LARGE_BLOCKS; + block = addNewRegion(regSz_sizeBased, regType, /*addToBin=*/false); if (block) for (unsigned idx=0; idx<NUM_OF_REG; idx++) - if (! addNewRegion(regSz_sizeBased, MEMREG_FLEXIBLE_SIZE, /*addToBin=*/true)) + if (! addNewRegion(regSz_sizeBased, regType, /*addToBin=*/true)) break; } else { - block = addNewRegion(regSz_sizeBased, MEMREG_SEVERAL_BLOCKS, /*addToBin=*/false); + block = addNewRegion(regSz_sizeBased, MEMREG_LARGE_BLOCKS, /*addToBin=*/false); } memExtendingSema.signal(); // no regions found, try to clean cache - if (!block || block == (FreeBlock*)VALID_BLOCK_IN_BIN) { - // something released from caches - if (extMemPool->hardCachesCleanup() - // ..or can use blocks that are in processing now - || bkndSync.waitTillBlockReleased(startModifiedCnt)) - return (FreeBlock*)VALID_BLOCK_IN_BIN; - // OS can't give us more memory, but we have some in locked bins - if (*lockedBinsThreshold && numOfLockedBins) { - *lockedBinsThreshold = 0; - return (FreeBlock*)VALID_BLOCK_IN_BIN; - } - return NULL; // nothing found, give up - } - MALLOC_ASSERT(block, ASSERT_TEXT); - // Since a region can hold more than one block it can be splitted. + if (!block || block == (FreeBlock*)VALID_BLOCK_IN_BIN) + return releaseMemInCaches(startModifiedCnt, lockedBinsThreshold, numOfLockedBins); + // Since a region can hold more than one block it can be split. *splittableRet = true; } // after asking memory from OS, release caches if we above the memory limits @@ -684,49 +728,72 @@ FreeBlock *Backend::askMemFromOS(size_t blockSize, intptr_t startModifiedCnt, void Backend::releaseCachesToLimit() { - if (!memSoftLimit || totalMemSize <= memSoftLimit) + if (!memSoftLimit.load(std::memory_order_relaxed) + || totalMemSize.load(std::memory_order_relaxed) <= memSoftLimit.load(std::memory_order_relaxed)) { return; + } size_t locTotalMemSize, locMemSoftLimit; scanCoalescQ(/*forceCoalescQDrop=*/false); if (extMemPool->softCachesCleanup() && - (locTotalMemSize = FencedLoad((intptr_t&)totalMemSize)) <= - (locMemSoftLimit = FencedLoad((intptr_t&)memSoftLimit))) + (locTotalMemSize = totalMemSize.load(std::memory_order_acquire)) <= + (locMemSoftLimit = memSoftLimit.load(std::memory_order_acquire))) return; // clean global large-object cache, if this is not enough, clean local caches // do this in several tries, because backend fragmentation can prevent // region from releasing for (int cleanLocal = 0; cleanLocal<2; cleanLocal++) - while (cleanLocal? - extMemPool->allLocalCaches.cleanup(extMemPool, /*cleanOnlyUnused=*/true) - : extMemPool->loc.decreasingCleanup()) - if ((locTotalMemSize = FencedLoad((intptr_t&)totalMemSize)) <= - (locMemSoftLimit = FencedLoad((intptr_t&)memSoftLimit))) + while (cleanLocal ? + extMemPool->allLocalCaches.cleanup(/*cleanOnlyUnused=*/true) : + extMemPool->loc.decreasingCleanup()) + if ((locTotalMemSize = totalMemSize.load(std::memory_order_acquire)) <= + (locMemSoftLimit = memSoftLimit.load(std::memory_order_acquire))) return; // last chance to match memSoftLimit - extMemPool->hardCachesCleanup(); + extMemPool->hardCachesCleanup(true); } -FreeBlock *Backend::IndexedBins:: - findBlock(int nativeBin, BackendSync *sync, size_t size, - bool resSlabAligned, bool alignedBin, int *numOfLockedBins) +int Backend::IndexedBins::getMinNonemptyBin(unsigned startBin) const { - for (int i=getMinNonemptyBin(nativeBin); i<freeBinsNum; i=getMinNonemptyBin(i+1)) - if (FreeBlock *block = getFromBin(i, sync, size, resSlabAligned, alignedBin, - /*wait=*/false, numOfLockedBins)) + int p = bitMask.getMinTrue(startBin); + return p == -1 ? Backend::freeBinsNum : p; +} + +FreeBlock *Backend::IndexedBins::findBlock(int nativeBin, BackendSync *sync, size_t size, + bool needAlignedBlock, bool alignedBin, int *numOfLockedBins) +{ + for (int i=getMinNonemptyBin(nativeBin); i<(int)freeBinsNum; i=getMinNonemptyBin(i+1)) + if (FreeBlock *block = getFromBin(i, sync, size, needAlignedBlock, alignedBin, /*wait=*/false, numOfLockedBins)) return block; - return NULL; + return nullptr; +} + +void Backend::requestBootstrapMem() +{ + if (bootsrapMemDone == bootsrapMemStatus.load(std::memory_order_acquire)) + return; + MallocMutex::scoped_lock lock( bootsrapMemStatusMutex ); + if (bootsrapMemDone == bootsrapMemStatus) + return; + MALLOC_ASSERT(bootsrapMemNotDone == bootsrapMemStatus, ASSERT_TEXT); + bootsrapMemStatus = bootsrapMemInitializing; + // request some rather big region during bootstrap in advance + // ok to get nullptr here, as later we re-do a request with more modest size + addNewRegion(2*1024*1024, MEMREG_SLAB_BLOCKS, /*addToBin=*/true); + bootsrapMemStatus = bootsrapMemDone; } // try to allocate size Byte block in available bins // needAlignedRes is true if result must be slab-aligned FreeBlock *Backend::genericGetBlock(int num, size_t size, bool needAlignedBlock) { - FreeBlock *block = NULL; + FreeBlock *block = nullptr; const size_t totalReqSize = num*size; // no splitting after requesting new region, asks exact size const int nativeBin = sizeToBin(totalReqSize); + + requestBootstrapMem(); // If we found 2 or less locked bins, it's time to ask more memory from OS. // But nothing can be asked from fixed pool. And we prefer wait, not ask // for more memory, if block is quite large. @@ -740,56 +807,52 @@ FreeBlock *Backend::genericGetBlock(int num, size_t size, bool needAlignedBlock) for (;;) { const intptr_t startModifiedCnt = bkndSync.getNumOfMods(); int numOfLockedBins; - + intptr_t cleanCnt; do { + cleanCnt = backendCleanCnt.load(std::memory_order_acquire); numOfLockedBins = 0; - - // TODO: try different bin search order if (needAlignedBlock) { - block = freeAlignedBins.findBlock(nativeBin, &bkndSync, num*size, - /*needAlignedBlock=*/true, /*alignedBin=*/true, - &numOfLockedBins); - if (!block) - block = freeLargeBins.findBlock(nativeBin, &bkndSync, num*size, - /*needAlignedBlock=*/true, /*alignedBin=*/false, - &numOfLockedBins); + block = freeSlabAlignedBins.findBlock(nativeBin, &bkndSync, num*size, needAlignedBlock, + /*alignedBin=*/true, &numOfLockedBins); + if (!block && extMemPool->fixedPool) + block = freeLargeBlockBins.findBlock(nativeBin, &bkndSync, num*size, needAlignedBlock, + /*alignedBin=*/false, &numOfLockedBins); } else { - block = freeLargeBins.findBlock(nativeBin, &bkndSync, num*size, - /*needAlignedBlock=*/false, /*alignedBin=*/false, - &numOfLockedBins); - if (!block) - block = freeAlignedBins.findBlock(nativeBin, &bkndSync, num*size, - /*needAlignedBlock=*/false, /*alignedBin=*/true, - &numOfLockedBins); + block = freeLargeBlockBins.findBlock(nativeBin, &bkndSync, num*size, needAlignedBlock, + /*alignedBin=*/false, &numOfLockedBins); + if (!block && extMemPool->fixedPool) + block = freeSlabAlignedBins.findBlock(nativeBin, &bkndSync, num*size, needAlignedBlock, + /*alignedBin=*/true, &numOfLockedBins); } - } while (!block && numOfLockedBins>lockedBinsThreshold); + } while (!block && (numOfLockedBins>lockedBinsThreshold || cleanCnt % 2 == 1 || + cleanCnt != backendCleanCnt.load(std::memory_order_acquire))); if (block) break; - if (!(scanCoalescQ(/*forceCoalescQDrop=*/true) - | extMemPool->softCachesCleanup())) { + bool retScanCoalescQ = scanCoalescQ(/*forceCoalescQDrop=*/true); + bool retSoftCachesCleanup = extMemPool->softCachesCleanup(); + if (!(retScanCoalescQ || retSoftCachesCleanup)) { // bins are not updated, // only remaining possibility is to ask for more memory - block = - askMemFromOS(totalReqSize, startModifiedCnt, &lockedBinsThreshold, - numOfLockedBins, &splittable); + block = askMemFromOS(totalReqSize, startModifiedCnt, &lockedBinsThreshold, + numOfLockedBins, &splittable, needAlignedBlock); if (!block) - return NULL; + return nullptr; if (block != (FreeBlock*)VALID_BLOCK_IN_BIN) { // size can be increased in askMemFromOS, that's why >= MALLOC_ASSERT(block->sizeTmp >= size, ASSERT_TEXT); break; } // valid block somewhere in bins, let's find it - block = NULL; + block = nullptr; } } MALLOC_ASSERT(block, ASSERT_TEXT); - if (splittable) - block = toAlignedBin(block, block->sizeTmp)? - splitAlignedBlock(block, num, size, needAlignedBlock) : - splitUnalignedBlock(block, num, size, needAlignedBlock); + if (splittable) { + // At this point we have to be sure that slabAligned attribute describes the right block state + block = splitBlock(block, num, size, block->slabAligned, needAlignedBlock); + } // matched blockConsumed() from startUseBlock() bkndSync.blockReleased(); @@ -802,18 +865,28 @@ LargeMemoryBlock *Backend::getLargeBlock(size_t size) (LargeMemoryBlock*)genericGetBlock(1, size, /*needAlignedRes=*/false); if (lmb) { lmb->unalignedSize = size; - if (extMemPool->mustBeAddedToGlobalLargeBlockList()) + if (extMemPool->userPool()) extMemPool->lmbList.add(lmb); } return lmb; } +BlockI *Backend::getSlabBlock(int num) { + BlockI *b = (BlockI*)genericGetBlock(num, slabSize, /*slabAligned=*/true); + MALLOC_ASSERT(isAligned(b, slabSize), ASSERT_TEXT); + return b; +} + +void Backend::putSlabBlock(BlockI *block) { + genericPutBlock((FreeBlock *)block, slabSize, /*slabAligned=*/true); +} + void *Backend::getBackRefSpace(size_t size, bool *rawMemUsed) { // This block is released only at shutdown, so it can prevent // a entire region releasing when it's received from the backend, // so prefer getRawMemory using. - if (void *ret = getRawMemory(size, /*hugePages=*/false)) { + if (void *ret = getRawMemory(size, REGULAR)) { *rawMemUsed = true; return ret; } @@ -832,24 +905,24 @@ void Backend::putBackRefSpace(void *b, size_t size, bool rawMemUsed) void Backend::removeBlockFromBin(FreeBlock *fBlock) { if (fBlock->myBin != Backend::NO_BIN) { - if (fBlock->aligned) - freeAlignedBins.lockRemoveBlock(fBlock->myBin, fBlock); + if (fBlock->slabAligned) + freeSlabAlignedBins.lockRemoveBlock(fBlock->myBin, fBlock); else - freeLargeBins.lockRemoveBlock(fBlock->myBin, fBlock); + freeLargeBlockBins.lockRemoveBlock(fBlock->myBin, fBlock); } } -void Backend::genericPutBlock(FreeBlock *fBlock, size_t blockSz) +void Backend::genericPutBlock(FreeBlock *fBlock, size_t blockSz, bool slabAligned) { bkndSync.blockConsumed(); - coalescAndPut(fBlock, blockSz); + coalescAndPut(fBlock, blockSz, slabAligned); bkndSync.blockReleased(); } void AllLargeBlocksList::add(LargeMemoryBlock *lmb) { MallocMutex::scoped_lock scoped_cs(largeObjLock); - lmb->gPrev = NULL; + lmb->gPrev = nullptr; lmb->gNext = loHead; if (lmb->gNext) lmb->gNext->gPrev = lmb; @@ -869,9 +942,9 @@ void AllLargeBlocksList::remove(LargeMemoryBlock *lmb) void Backend::putLargeBlock(LargeMemoryBlock *lmb) { - if (extMemPool->mustBeAddedToGlobalLargeBlockList()) + if (extMemPool->userPool()) extMemPool->lmbList.remove(lmb); - genericPutBlock((FreeBlock *)lmb, lmb->unalignedSize); + genericPutBlock((FreeBlock *)lmb, lmb->unalignedSize, false); } void Backend::returnLargeObject(LargeMemoryBlock *lmb) @@ -881,17 +954,87 @@ void Backend::returnLargeObject(LargeMemoryBlock *lmb) STAT_increment(getThreadId(), ThreadCommonCounters, freeLargeObj); } -void Backend::releaseRegion(MemRegion *memRegion) +#if BACKEND_HAS_MREMAP +void *Backend::remap(void *ptr, size_t oldSize, size_t newSize, size_t alignment) { - { - MallocMutex::scoped_lock lock(regionListLock); - if (regionList == memRegion) - regionList = memRegion->next; - if (memRegion->next) - memRegion->next->prev = memRegion->prev; - if (memRegion->prev) - memRegion->prev->next = memRegion->next; + // no remap for user pools and for object too small that living in bins + if (inUserPool() || min(oldSize, newSize)<maxBinned_SmallPage + // during remap, can't guarantee alignment more strict than current or + // more strict than page alignment + || !isAligned(ptr, alignment) || alignment>extMemPool->granularity) + return nullptr; + const LargeMemoryBlock* lmbOld = ((LargeObjectHdr *)ptr - 1)->memoryBlock; + const size_t oldUnalignedSize = lmbOld->unalignedSize; + FreeBlock *oldFBlock = (FreeBlock *)lmbOld; + FreeBlock *right = oldFBlock->rightNeig(oldUnalignedSize); + // in every region only one block can have LAST_REGION_BLOCK on right, + // so don't need no synchronization + if (!right->isLastRegionBlock()) + return nullptr; + + MemRegion *oldRegion = static_cast<LastFreeBlock*>(right)->memRegion; + MALLOC_ASSERT( oldRegion < ptr, ASSERT_TEXT ); + const size_t oldRegionSize = oldRegion->allocSz; + if (oldRegion->type != MEMREG_ONE_BLOCK) + return nullptr; // we are not single in the region + const size_t userOffset = (uintptr_t)ptr - (uintptr_t)oldRegion; + const size_t alignedSize = LargeObjectCache::alignToBin(newSize + userOffset); + const size_t requestSize = + alignUp(sizeof(MemRegion) + alignedSize + sizeof(LastFreeBlock), extMemPool->granularity); + if (requestSize < alignedSize) // is wrapped around? + return nullptr; + regionList.remove(oldRegion); + + // The deallocation should be registered in address range before mremap to + // prevent a race condition with allocation on another thread. + // (OS can reuse the memory and registerAlloc will be missed on another thread) + usedAddrRange.registerFree((uintptr_t)oldRegion, (uintptr_t)oldRegion + oldRegionSize); + + void *ret = mremap(oldRegion, oldRegion->allocSz, requestSize, MREMAP_MAYMOVE); + if (MAP_FAILED == ret) { // can't remap, revert and leave + regionList.add(oldRegion); + usedAddrRange.registerAlloc((uintptr_t)oldRegion, (uintptr_t)oldRegion + oldRegionSize); + return nullptr; } + MemRegion *region = (MemRegion*)ret; + MALLOC_ASSERT(region->type == MEMREG_ONE_BLOCK, ASSERT_TEXT); + region->allocSz = requestSize; + region->blockSz = alignedSize; + + FreeBlock *fBlock = (FreeBlock *)alignUp((uintptr_t)region + sizeof(MemRegion), + largeObjectAlignment); + + regionList.add(region); + startUseBlock(region, fBlock, /*addToBin=*/false); + MALLOC_ASSERT(fBlock->sizeTmp == region->blockSz, ASSERT_TEXT); + // matched blockConsumed() in startUseBlock(). + // TODO: get rid of useless pair blockConsumed()/blockReleased() + bkndSync.blockReleased(); + + // object must start at same offset from region's start + void *object = (void*)((uintptr_t)region + userOffset); + MALLOC_ASSERT(isAligned(object, alignment), ASSERT_TEXT); + LargeObjectHdr *header = (LargeObjectHdr*)object - 1; + setBackRef(header->backRefIdx, header); + + LargeMemoryBlock *lmb = (LargeMemoryBlock*)fBlock; + lmb->unalignedSize = region->blockSz; + lmb->objectSize = newSize; + lmb->backRefIdx = header->backRefIdx; + header->memoryBlock = lmb; + MALLOC_ASSERT((uintptr_t)lmb + lmb->unalignedSize >= + (uintptr_t)object + lmb->objectSize, "An object must fit to the block."); + + usedAddrRange.registerAlloc((uintptr_t)region, (uintptr_t)region + requestSize); + totalMemSize.fetch_add(region->allocSz - oldRegionSize); + + return object; +} +#endif /* BACKEND_HAS_MREMAP */ + +void Backend::releaseRegion(MemRegion *memRegion) +{ + regionList.remove(memRegion); freeRawMem(memRegion, memRegion->allocSz); } @@ -900,7 +1043,7 @@ FreeBlock *Backend::doCoalesc(FreeBlock *fBlock, MemRegion **mRegion) { FreeBlock *resBlock = fBlock; size_t resSize = fBlock->sizeTmp; - MemRegion *memRegion = NULL; + MemRegion *memRegion = nullptr; fBlock->markCoalescing(resSize); resBlock->blockInBin = false; @@ -910,14 +1053,14 @@ FreeBlock *Backend::doCoalesc(FreeBlock *fBlock, MemRegion **mRegion) if (leftSz != GuardedSize::LOCKED) { if (leftSz == GuardedSize::COAL_BLOCK) { coalescQ.putBlock(fBlock); - return NULL; + return nullptr; } else { FreeBlock *left = fBlock->leftNeig(leftSz); size_t lSz = left->trySetMeUsed(GuardedSize::COAL_BLOCK); if (lSz <= GuardedSize::MAX_LOCKED_VAL) { fBlock->setLeftFree(leftSz); // rollback coalescQ.putBlock(fBlock); - return NULL; + return nullptr; } else { MALLOC_ASSERT(lSz == leftSz, "Invalid header"); left->blockInBin = true; @@ -941,7 +1084,7 @@ FreeBlock *Backend::doCoalesc(FreeBlock *fBlock, MemRegion **mRegion) removeBlockFromBin(resBlock); } coalescQ.putBlock(resBlock); - return NULL; + return nullptr; } else { size_t rSz = right->rightNeig(rightSz)-> trySetLeftUsed(GuardedSize::COAL_BLOCK); @@ -952,7 +1095,7 @@ FreeBlock *Backend::doCoalesc(FreeBlock *fBlock, MemRegion **mRegion) removeBlockFromBin(resBlock); } coalescQ.putBlock(resBlock); - return NULL; + return nullptr; } else { MALLOC_ASSERT(rSz == rightSz, "Invalid header"); removeBlockFromBin(right); @@ -977,19 +1120,22 @@ FreeBlock *Backend::doCoalesc(FreeBlock *fBlock, MemRegion **mRegion) MALLOC_ASSERT((uintptr_t)memRegion < (uintptr_t)resBlock, ASSERT_TEXT); *mRegion = memRegion; } else - *mRegion = NULL; + *mRegion = nullptr; resBlock->sizeTmp = resSize; return resBlock; } -bool Backend::coalescAndPutList(FreeBlock *list, bool forceCoalescQDrop) +bool Backend::coalescAndPutList(FreeBlock *list, bool forceCoalescQDrop, bool reportBlocksProcessed) { bool regionReleased = false; - FreeBlock *helper; - MemRegion *memRegion; - for (;list; list = helper) { + for (FreeBlock *helper; list; + list = helper, + // matches block enqueue in CoalRequestQ::putBlock() + reportBlocksProcessed? coalescQ.blockWasProcessed() : (void)0) { + MemRegion *memRegion; bool addToTail = false; + helper = list->nextToFree; FreeBlock *toRet = doCoalesc(list, &memRegion); if (!toRet) @@ -1009,12 +1155,12 @@ bool Backend::coalescAndPutList(FreeBlock *list, bool forceCoalescQDrop) } size_t currSz = toRet->sizeTmp; int bin = sizeToBin(currSz); - bool toAligned = toAlignedBin(toRet, currSz); + bool toAligned = extMemPool->fixedPool ? toAlignedBin(toRet, currSz) : toRet->slabAligned; bool needAddToBin = true; if (toRet->blockInBin) { - // is it stay in same bin? - if (toRet->myBin == bin && toRet->aligned == toAligned) + // Does it stay in same bin? + if (toRet->myBin == bin && toRet->slabAligned == toAligned) needAddToBin = false; else { toRet->blockInBin = false; @@ -1022,16 +1168,17 @@ bool Backend::coalescAndPutList(FreeBlock *list, bool forceCoalescQDrop) } } - // not stay in same bin, or bin-less, add it + // Does not stay in same bin, or bin-less; add it if (needAddToBin) { - toRet->prev = toRet->next = toRet->nextToFree = NULL; + toRet->prev = toRet->next = toRet->nextToFree = nullptr; toRet->myBin = NO_BIN; + toRet->slabAligned = toAligned; // If the block is too small to fit in any bin, keep it bin-less. // It's not a leak because the block later can be coalesced. if (currSz >= minBinnedSize) { toRet->sizeTmp = currSz; - IndexedBins *target = toAligned? &freeAlignedBins : &freeLargeBins; + IndexedBins *target = toRet->slabAligned ? &freeSlabAlignedBins : &freeLargeBlockBins; if (forceCoalescQDrop) { target->addBlock(bin, toRet, toRet->sizeTmp, addToTail); } else if (!target->tryAddBlock(bin, toRet, addToTail)) { @@ -1054,12 +1201,13 @@ bool Backend::coalescAndPutList(FreeBlock *list, bool forceCoalescQDrop) // Coalesce fBlock and add it back to a bin; // processing delayed coalescing requests. -void Backend::coalescAndPut(FreeBlock *fBlock, size_t blockSz) +void Backend::coalescAndPut(FreeBlock *fBlock, size_t blockSz, bool slabAligned) { fBlock->sizeTmp = blockSz; - fBlock->nextToFree = NULL; + fBlock->nextToFree = nullptr; + fBlock->slabAligned = slabAligned; - coalescAndPutList(fBlock, /*forceCoalescQDrop=*/false); + coalescAndPutList(fBlock, /*forceCoalescQDrop=*/false, /*reportBlocksProcessed=*/false); } bool Backend::scanCoalescQ(bool forceCoalescQDrop) @@ -1067,7 +1215,12 @@ bool Backend::scanCoalescQ(bool forceCoalescQDrop) FreeBlock *currCoalescList = coalescQ.getAll(); if (currCoalescList) - coalescAndPutList(currCoalescList, forceCoalescQDrop); + // reportBlocksProcessed=true informs that the blocks leave coalescQ, + // matches blockConsumed() from CoalRequestQ::putBlock() + coalescAndPutList(currCoalescList, forceCoalescQDrop, + /*reportBlocksProcessed=*/true); + // returns status of coalescQ.getAll(), as an indication of possible changes in backend + // TODO: coalescAndPutList() may report is some new free blocks became available or not return currCoalescList; } @@ -1078,30 +1231,32 @@ FreeBlock *Backend::findBlockInRegion(MemRegion *region, size_t exactBlockSize) uintptr_t fBlockEnd, lastFreeBlock = (uintptr_t)region + region->allocSz - sizeof(LastFreeBlock); + static_assert(sizeof(LastFreeBlock) % sizeof(uintptr_t) == 0, + "Atomic applied on LastFreeBlock, and we put it at the end of region, that" + " is uintptr_t-aligned, so no unaligned atomic operations are possible."); // right bound is slab-aligned, keep LastFreeBlock after it - if (region->type==MEMREG_FLEXIBLE_SIZE) { - fBlock = (FreeBlock *)((uintptr_t)region + sizeof(MemRegion)); + if (region->type == MEMREG_SLAB_BLOCKS) { + fBlock = (FreeBlock *)alignUp((uintptr_t)region + sizeof(MemRegion), sizeof(uintptr_t)); fBlockEnd = alignDown(lastFreeBlock, slabSize); } else { - fBlock = (FreeBlock *)alignUp((uintptr_t)region + sizeof(MemRegion), - largeObjectAlignment); + fBlock = (FreeBlock *)alignUp((uintptr_t)region + sizeof(MemRegion), largeObjectAlignment); fBlockEnd = (uintptr_t)fBlock + exactBlockSize; MALLOC_ASSERT(fBlockEnd <= lastFreeBlock, ASSERT_TEXT); } if (fBlockEnd <= (uintptr_t)fBlock) - return NULL; // allocSz is too small + return nullptr; // allocSz is too small blockSz = fBlockEnd - (uintptr_t)fBlock; // TODO: extend getSlabBlock to support degradation, i.e. getting less blocks // then requested, and then relax this check // (now all or nothing is implemented, check according to this) if (blockSz < numOfSlabAllocOnMiss*slabSize) - return NULL; + return nullptr; region->blockSz = blockSz; return fBlock; } -// startUseBlock adds free block to a bin, the block can be used and +// startUseBlock may add the free block to a bin, the block can be used and // even released after this, so the region must be added to regionList already void Backend::startUseBlock(MemRegion *region, FreeBlock *fBlock, bool addToBin) { @@ -1110,6 +1265,8 @@ void Backend::startUseBlock(MemRegion *region, FreeBlock *fBlock, bool addToBin) fBlock->setMeFree(blockSz); LastFreeBlock *lastBl = static_cast<LastFreeBlock*>(fBlock->rightNeig(blockSz)); + // to not get unaligned atomics during LastFreeBlock access + MALLOC_ASSERT(isAligned(lastBl, sizeof(uintptr_t)), nullptr); lastBl->initHeader(); lastBl->setMeFree(GuardedSize::LAST_REGION_BLOCK); lastBl->setLeftFree(blockSz); @@ -1120,27 +1277,65 @@ void Backend::startUseBlock(MemRegion *region, FreeBlock *fBlock, bool addToBin) unsigned targetBin = sizeToBin(blockSz); // during adding advance regions, register bin for a largest block in region advRegBins.registerBin(targetBin); - if (region->type!=MEMREG_ONE_BLOCK && toAlignedBin(fBlock, blockSz)) { - freeAlignedBins.addBlock(targetBin, fBlock, blockSz, /*addToTail=*/false); + if (region->type == MEMREG_SLAB_BLOCKS) { + fBlock->slabAligned = true; + freeSlabAlignedBins.addBlock(targetBin, fBlock, blockSz, /*addToTail=*/false); } else { - freeLargeBins.addBlock(targetBin, fBlock, blockSz, /*addToTail=*/false); + fBlock->slabAligned = false; + freeLargeBlockBins.addBlock(targetBin, fBlock, blockSz, /*addToTail=*/false); } } else { // to match with blockReleased() in genericGetBlock bkndSync.blockConsumed(); + // Understand our alignment for correct splitBlock operation + fBlock->slabAligned = region->type == MEMREG_SLAB_BLOCKS ? true : false; fBlock->sizeTmp = fBlock->tryLockBlock(); + MALLOC_ASSERT(fBlock->sizeTmp >= FreeBlock::minBlockSize, "Locking must be successful"); } } +void MemRegionList::add(MemRegion *r) +{ + r->prev = nullptr; + MallocMutex::scoped_lock lock(regionListLock); + r->next = head; + head = r; + if (head->next) + head->next->prev = head; +} + +void MemRegionList::remove(MemRegion *r) +{ + MallocMutex::scoped_lock lock(regionListLock); + if (head == r) + head = head->next; + if (r->next) + r->next->prev = r->prev; + if (r->prev) + r->prev->next = r->next; +} + +#if __TBB_MALLOC_BACKEND_STAT +int MemRegionList::reportStat(FILE *f) +{ + int regNum = 0; + MallocMutex::scoped_lock lock(regionListLock); + for (MemRegion *curr = head; curr; curr = curr->next) { + fprintf(f, "%p: max block %lu B, ", curr, curr->blockSz); + regNum++; + } + return regNum; +} +#endif + FreeBlock *Backend::addNewRegion(size_t size, MemRegionType memRegType, bool addToBin) { - MALLOC_STATIC_ASSERT(sizeof(BlockMutexes) <= sizeof(BlockI), - "Header must be not overwritten in used blocks"); + static_assert(sizeof(BlockMutexes) <= sizeof(BlockI), "Header must be not overwritten in used blocks"); MALLOC_ASSERT(FreeBlock::minBlockSize > GuardedSize::MAX_SPEC_VAL, "Block length must not conflict with special values of GuardedSize"); - // If the region is not "flexible size" we should reserve some space for + // If the region is not "for slabs" we should reserve some space for // a region header, the worst case alignment and the last block mark. - const size_t requestSize = memRegType == MEMREG_FLEXIBLE_SIZE ? size : + const size_t requestSize = memRegType == MEMREG_SLAB_BLOCKS ? size : size + sizeof(MemRegion) + largeObjectAlignment + FreeBlock::minBlockSize + sizeof(LastFreeBlock); @@ -1148,12 +1343,12 @@ FreeBlock *Backend::addNewRegion(size_t size, MemRegionType memRegType, bool add MemRegion *region = (MemRegion*)allocRawMem(rawSize); if (!region) { MALLOC_ASSERT(rawSize==requestSize, "getRawMem has not allocated memory but changed the allocated size."); - return NULL; + return nullptr; } if (rawSize < sizeof(MemRegion)) { if (!extMemPool->fixedPool) freeRawMem(region, rawSize); - return NULL; + return nullptr; } region->type = memRegType; @@ -1162,26 +1357,20 @@ FreeBlock *Backend::addNewRegion(size_t size, MemRegionType memRegType, bool add if (!fBlock) { if (!extMemPool->fixedPool) freeRawMem(region, rawSize); - return NULL; - } - // adding to global list of all regions - { - region->prev = NULL; - MallocMutex::scoped_lock lock(regionListLock); - region->next = regionList; - regionList = region; - if (regionList->next) - regionList->next->prev = regionList; + return nullptr; } + regionList.add(region); startUseBlock(region, fBlock, addToBin); bkndSync.binsModified(); return addToBin? (FreeBlock*)VALID_BLOCK_IN_BIN : fBlock; } -bool Backend::bootstrap(ExtMemoryPool *extMemoryPool) +void Backend::init(ExtMemoryPool *extMemoryPool) { extMemPool = extMemoryPool; - return addNewRegion(2*1024*1024, MEMREG_FLEXIBLE_SIZE, /*addToBin=*/true); + usedAddrRange.init(); + coalescQ.init(&bkndSync); + bkndSync.init(this); } void Backend::reset() @@ -1190,11 +1379,11 @@ void Backend::reset() // no active threads are allowed in backend while reset() called verify(); - freeLargeBins.reset(); - freeAlignedBins.reset(); + freeLargeBlockBins.reset(); + freeSlabAlignedBins.reset(); advRegBins.reset(); - for (MemRegion *curr = regionList; curr; curr = curr->next) { + for (MemRegion *curr = regionList.head; curr; curr = curr->next) { FreeBlock *fBlock = findBlockInRegion(curr, curr->blockSz); MALLOC_ASSERT(fBlock, "A memory region unexpectedly got smaller"); startUseBlock(curr, fBlock, /*addToBin=*/true); @@ -1203,47 +1392,47 @@ void Backend::reset() bool Backend::destroy() { + bool noError = true; // no active threads are allowed in backend while destroy() called verify(); - - freeLargeBins.reset(); - freeAlignedBins.reset(); - - while (regionList) { - MemRegion *helper = regionList->next; - if (inUserPool()) - (*extMemPool->rawFree)(extMemPool->poolId, regionList, - regionList->allocSz); - else { - freeRawMemory(regionList, regionList->allocSz); - } - regionList = helper; + if (!inUserPool()) { + freeLargeBlockBins.reset(); + freeSlabAlignedBins.reset(); } - return true; + while (regionList.head) { + MemRegion *helper = regionList.head->next; + noError &= freeRawMem(regionList.head, regionList.head->allocSz); + regionList.head = helper; + } + return noError; } bool Backend::clean() { + scanCoalescQ(/*forceCoalescQDrop=*/false); + // Backend::clean is always called under synchronization so only one thread can + // enter to this method at once. + // backendCleanCnt%2== 1 means that clean operation is in progress + backendCleanCnt.fetch_add(1, std::memory_order_acq_rel); bool res = false; - // We can have several blocks, occupaing whole region, + // We can have several blocks occupying a whole region, // because such regions are added in advance (see askMemFromOS() and reset()), // and never used. Release them all. for (int i = advRegBins.getMinUsedBin(0); i != -1; i = advRegBins.getMinUsedBin(i+1)) { - if (i == freeAlignedBins.getMinNonemptyBin(i)) - res |= freeAlignedBins.tryReleaseRegions(i, this); - if (i == freeLargeBins.getMinNonemptyBin(i)) - res |= freeLargeBins.tryReleaseRegions(i, this); + if (i == freeSlabAlignedBins.getMinNonemptyBin(i)) + res |= freeSlabAlignedBins.tryReleaseRegions(i, this); + if (i == freeLargeBlockBins.getMinNonemptyBin(i)) + res |= freeLargeBlockBins.tryReleaseRegions(i, this); } - - scanCoalescQ(/*forceCoalescQDrop=*/false); - + backendCleanCnt.fetch_add(1, std::memory_order_acq_rel); return res; } void Backend::IndexedBins::verify() { - for (int i=0; i<freeBinsNum; i++) { - for (FreeBlock *fb = freeBins[i].head; fb; fb=fb->next) { +#if MALLOC_DEBUG + for (int i=0; i<(int)freeBinsNum; i++) { + for (FreeBlock *fb = freeBins[i].head.load(std::memory_order_relaxed); fb; fb=fb->next) { uintptr_t mySz = fb->myL.value; MALLOC_ASSERT(mySz>GuardedSize::MAX_SPEC_VAL, ASSERT_TEXT); FreeBlock *right = (FreeBlock*)((uintptr_t)fb + mySz); @@ -1253,6 +1442,7 @@ void Backend::IndexedBins::verify() MALLOC_ASSERT(fb->leftL.value<=GuardedSize::MAX_SPEC_VAL, ASSERT_TEXT); } } +#endif } // For correct operation, it must be called when no other threads @@ -1261,10 +1451,10 @@ void Backend::verify() { #if MALLOC_DEBUG scanCoalescQ(/*forceCoalescQDrop=*/false); - - freeLargeBins.verify(); - freeAlignedBins.verify(); #endif // MALLOC_DEBUG + + freeLargeBlockBins.verify(); + freeSlabAlignedBins.verify(); } #if __TBB_MALLOC_BACKEND_STAT @@ -1279,37 +1469,43 @@ size_t Backend::Bin::countFreeBlocks() return cnt; } +size_t Backend::Bin::reportFreeBlocks(FILE *f) +{ + size_t totalSz = 0; + MallocMutex::scoped_lock lock(tLock); + for (FreeBlock *fb = head; fb; fb = fb->next) { + size_t sz = fb->tryLockBlock(); + fb->setMeFree(sz); + fb->rightNeig(sz)->setLeftFree(sz); + fprintf(f, " [%p;%p]", fb, (void*)((uintptr_t)fb+sz)); + totalSz += sz; + } + return totalSz; +} + void Backend::IndexedBins::reportStat(FILE *f) { size_t totalSize = 0; for (int i=0; i<Backend::freeBinsNum; i++) if (size_t cnt = freeBins[i].countFreeBlocks()) { - totalSize += cnt*Backend::binToSize(i); - fprintf(f, "%d:%lu ", i, cnt); + totalSize += freeBins[i].reportFreeBlocks(f); + fprintf(f, " %d:%lu, ", i, cnt); } fprintf(f, "\ttotal size %lu KB", totalSize/1024); } void Backend::reportStat(FILE *f) { - int regNum = 0; - scanCoalescQ(/*forceCoalescQDrop=*/false); fprintf(f, "\n regions:\n"); - { - MallocMutex::scoped_lock lock(regionListLock); - for (MemRegion *curr = regionList; curr; curr = curr->next) { - fprintf(f, "%p: max block %lu B, ", curr, curr->blockSz); - regNum++; - } - } - fprintf(f, "\n%d regions, %lu KB in all regions\n free bins:\nlarge bins ", + int regNum = regionList.reportStat(f); + fprintf(f, "\n%d regions, %lu KB in all regions\n free bins:\nlarge bins: ", regNum, totalMemSize/1024); - freeLargeBins.reportStat(f); - fprintf(f, "\naligned bins "); - freeAlignedBins.reportStat(f); + freeLargeBlockBins.reportStat(f); + fprintf(f, "\naligned bins: "); + freeSlabAlignedBins.reportStat(f); fprintf(f, "\n"); } #endif // __TBB_MALLOC_BACKEND_STAT diff --git a/src/tbb/src/tbbmalloc/backend.h b/src/tbb/src/tbbmalloc/backend.h new file mode 100644 index 000000000..cbf62c0b0 --- /dev/null +++ b/src/tbb/src/tbbmalloc/backend.h @@ -0,0 +1,392 @@ +/* + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_tbbmalloc_internal_H + #error tbbmalloc_internal.h must be included at this point +#endif + +#ifndef __TBB_backend_H +#define __TBB_backend_H + +// Included from namespace rml::internal + +// global state of blocks currently in processing +class BackendSync { + // Class instances should reside in zero-initialized memory! + // The number of blocks currently removed from a bin and not returned back + std::atomic<intptr_t> inFlyBlocks; // to another + std::atomic<intptr_t> binsModifications; // incremented on every bin modification + Backend *backend; +public: + void init(Backend *b) { backend = b; } + void blockConsumed() { inFlyBlocks++; } + void binsModified() { binsModifications++; } + void blockReleased() { +#if __TBB_MALLOC_BACKEND_STAT + MALLOC_ITT_SYNC_RELEASING(&inFlyBlocks); +#endif + binsModifications++; + intptr_t prev = inFlyBlocks.fetch_sub(1); + MALLOC_ASSERT(prev > 0, ASSERT_TEXT); + suppress_unused_warning(prev); + } + intptr_t getNumOfMods() const { return binsModifications.load(std::memory_order_acquire); } + // return true if need re-do the blocks search + inline bool waitTillBlockReleased(intptr_t startModifiedCnt); +}; + +class CoalRequestQ { // queue of free blocks that coalescing was delayed +private: + std::atomic<FreeBlock*> blocksToFree; + BackendSync *bkndSync; + // counted blocks in blocksToFree and that are leaved blocksToFree + // and still in active coalescing + std::atomic<intptr_t> inFlyBlocks; +public: + void init(BackendSync *bSync) { bkndSync = bSync; } + FreeBlock *getAll(); // return current list of blocks and make queue empty + void putBlock(FreeBlock *fBlock); + inline void blockWasProcessed(); + intptr_t blocksInFly() const { return inFlyBlocks.load(std::memory_order_acquire); } +}; + +class MemExtendingSema { + std::atomic<intptr_t> active; +public: + bool wait() { + bool rescanBins = false; + // up to 3 threads can add more memory from OS simultaneously, + // rest of threads have to wait + intptr_t prevCnt = active.load(std::memory_order_acquire); + for (;;) { + if (prevCnt < 3) { + if (active.compare_exchange_strong(prevCnt, prevCnt + 1)) { + break; + } + } else { + SpinWaitWhileEq(active, prevCnt); + rescanBins = true; + break; + } + } + return rescanBins; + } + void signal() { active.fetch_sub(1); } +}; + +enum MemRegionType { + // The region holds only slabs + MEMREG_SLAB_BLOCKS = 0, + // The region can hold several large object blocks + MEMREG_LARGE_BLOCKS, + // The region holds only one block with a requested size + MEMREG_ONE_BLOCK +}; + +class MemRegionList { + MallocMutex regionListLock; +public: + MemRegion *head; + void add(MemRegion *r); + void remove(MemRegion *r); + int reportStat(FILE *f); +}; + +class Backend { +private: +/* Blocks in range [minBinnedSize; getMaxBinnedSize()] are kept in bins, + one region can contains several blocks. Larger blocks are allocated directly + and one region always contains one block. +*/ + enum { + minBinnedSize = 8*1024UL, + /* If huge pages are available, maxBinned_HugePage used. + If not, maxBinned_SmallPage is the threshold. + TODO: use pool's granularity for upper bound setting.*/ + maxBinned_SmallPage = 1024*1024UL, + // TODO: support other page sizes + maxBinned_HugePage = 4*1024*1024UL + }; + enum { + VALID_BLOCK_IN_BIN = 1 // valid block added to bin, not returned as result + }; +public: + // Backend bins step is the same as CacheStep for large object cache + static const size_t freeBinsStep = LargeObjectCache::LargeBSProps::CacheStep; + static const unsigned freeBinsNum = (maxBinned_HugePage-minBinnedSize)/freeBinsStep + 1; + + // if previous access missed per-thread slabs pool, + // allocate numOfSlabAllocOnMiss blocks in advance + static const int numOfSlabAllocOnMiss = 2; + + enum { + NO_BIN = -1, + // special bin for blocks >= maxBinned_HugePage, blocks go to this bin + // when pool is created with keepAllMemory policy + // TODO: currently this bin is scanned using "1st fit", as it accumulates + // blocks of different sizes, "best fit" is preferred in terms of fragmentation + HUGE_BIN = freeBinsNum-1 + }; + + // Bin keeps 2-linked list of free blocks. It must be 2-linked + // because during coalescing a block it's removed from a middle of the list. + struct Bin { + std::atomic<FreeBlock*> head; + FreeBlock* tail; + MallocMutex tLock; + + void removeBlock(FreeBlock *fBlock); + void reset() { + head.store(nullptr, std::memory_order_relaxed); + tail = nullptr; + } + bool empty() const { return !head.load(std::memory_order_relaxed); } + + size_t countFreeBlocks(); + size_t reportFreeBlocks(FILE *f); + void reportStat(FILE *f); + }; + + typedef BitMaskMin<Backend::freeBinsNum> BitMaskBins; + + // array of bins supplemented with bitmask for fast finding of non-empty bins + class IndexedBins { + BitMaskBins bitMask; + Bin freeBins[Backend::freeBinsNum]; + FreeBlock *getFromBin(int binIdx, BackendSync *sync, size_t size, + bool needAlignedBlock, bool alignedBin, bool wait, int *resLocked); + public: + FreeBlock *findBlock(int nativeBin, BackendSync *sync, size_t size, + bool needAlignedBlock, bool alignedBin,int *numOfLockedBins); + bool tryReleaseRegions(int binIdx, Backend *backend); + void lockRemoveBlock(int binIdx, FreeBlock *fBlock); + void addBlock(int binIdx, FreeBlock *fBlock, size_t blockSz, bool addToTail); + bool tryAddBlock(int binIdx, FreeBlock *fBlock, bool addToTail); + int getMinNonemptyBin(unsigned startBin) const; + void verify(); + void reset(); + void reportStat(FILE *f); + }; + +private: + class AdvRegionsBins { + BitMaskBins bins; + public: + void registerBin(int regBin) { bins.set(regBin, 1); } + int getMinUsedBin(int start) const { return bins.getMinTrue(start); } + void reset() { bins.reset(); } + }; + // auxiliary class to atomic maximum request finding + class MaxRequestComparator { + const Backend *backend; + public: + MaxRequestComparator(const Backend *be) : backend(be) {} + inline bool operator()(size_t oldMaxReq, size_t requestSize) const; + }; + +#if CHECK_ALLOCATION_RANGE + // Keep min and max of all addresses requested from OS, + // use it for checking memory possibly allocated by replaced allocators + // and for debugging purposes. Valid only for default memory pool. + class UsedAddressRange { + static const uintptr_t ADDRESS_UPPER_BOUND = UINTPTR_MAX; + + std::atomic<uintptr_t> leftBound, + rightBound; + MallocMutex mutex; + public: + // rightBound is zero-initialized + void init() { leftBound.store(ADDRESS_UPPER_BOUND, std::memory_order_relaxed); } + void registerAlloc(uintptr_t left, uintptr_t right); + void registerFree(uintptr_t left, uintptr_t right); + // as only left and right bounds are kept, we can return true + // for pointer not allocated by us, if more than single region + // was requested from OS + bool inRange(void *ptr) const { + const uintptr_t p = (uintptr_t)ptr; + return leftBound.load(std::memory_order_relaxed)<=p && + p<=rightBound.load(std::memory_order_relaxed); + } + }; +#else + class UsedAddressRange { + public: + void init() { } + void registerAlloc(uintptr_t, uintptr_t) {} + void registerFree(uintptr_t, uintptr_t) {} + bool inRange(void *) const { return true; } + }; +#endif + + ExtMemoryPool *extMemPool; + // used for release every region on pool destroying + MemRegionList regionList; + + CoalRequestQ coalescQ; // queue of coalescing requests + BackendSync bkndSync; + // semaphore protecting adding more more memory from OS + MemExtendingSema memExtendingSema; + //size_t totalMemSize, + // memSoftLimit; + std::atomic<size_t> totalMemSize; + std::atomic<size_t> memSoftLimit; + UsedAddressRange usedAddrRange; + // to keep 1st allocation large than requested, keep bootstrapping status + enum { + bootsrapMemNotDone = 0, + bootsrapMemInitializing, + bootsrapMemDone + }; + std::atomic<intptr_t> bootsrapMemStatus; + MallocMutex bootsrapMemStatusMutex; + + // Using of maximal observed requested size allows decrease + // memory consumption for small requests and decrease fragmentation + // for workloads when small and large allocation requests are mixed. + // TODO: decrease, not only increase it + std::atomic<size_t> maxRequestedSize; + + // register bins related to advance regions + AdvRegionsBins advRegBins; + // Storage for split FreeBlocks + IndexedBins freeLargeBlockBins, + freeSlabAlignedBins; + + std::atomic<intptr_t> backendCleanCnt; + // Our friends + friend class BackendSync; + + /******************************** Backend methods ******************************/ + + /*--------------------------- Coalescing functions ----------------------------*/ + void coalescAndPut(FreeBlock *fBlock, size_t blockSz, bool slabAligned); + bool coalescAndPutList(FreeBlock *head, bool forceCoalescQDrop, bool reportBlocksProcessed); + + // Main coalescing operation + FreeBlock *doCoalesc(FreeBlock *fBlock, MemRegion **memRegion); + + // Queue for conflicted blocks during coalescing + bool scanCoalescQ(bool forceCoalescQDrop); + intptr_t blocksInCoalescing() const { return coalescQ.blocksInFly(); } + + /*--------------------- FreeBlock backend accessors ---------------------------*/ + FreeBlock *genericGetBlock(int num, size_t size, bool slabAligned); + void genericPutBlock(FreeBlock *fBlock, size_t blockSz, bool slabAligned); + + // Split the block and return remaining parts to backend if possible + FreeBlock *splitBlock(FreeBlock *fBlock, int num, size_t size, bool isAligned, bool needAlignedBlock); + + void removeBlockFromBin(FreeBlock *fBlock); + + // TODO: combine with returnLargeObject + void putLargeBlock(LargeMemoryBlock *lmb); + + /*------------------- Starting point for OS allocation ------------------------*/ + void requestBootstrapMem(); + FreeBlock *askMemFromOS(size_t totalReqSize, intptr_t startModifiedCnt, + int *lockedBinsThreshold, int numOfLockedBins, + bool *splittable, bool needSlabRegion); + + /*---------------------- Memory regions allocation ----------------------------*/ + FreeBlock *addNewRegion(size_t size, MemRegionType type, bool addToBin); + void releaseRegion(MemRegion *region); + + // TODO: combine in one initMemoryRegion function + FreeBlock *findBlockInRegion(MemRegion *region, size_t exactBlockSize); + void startUseBlock(MemRegion *region, FreeBlock *fBlock, bool addToBin); + + /*------------------------- Raw memory accessors ------------------------------*/ + void *allocRawMem(size_t &size); + bool freeRawMem(void *object, size_t size); + + /*------------------------------ Cleanup functions ----------------------------*/ + // Clean all memory from all caches (extMemPool hard cleanup) + FreeBlock *releaseMemInCaches(intptr_t startModifiedCnt, int *lockedBinsThreshold, int numOfLockedBins); + // Soft heap limit (regular cleanup, then maybe hard cleanup) + void releaseCachesToLimit(); + + /*---------------------------------- Utility ----------------------------------*/ + // TODO: move inside IndexedBins class + static int sizeToBin(size_t size) { + if (size >= maxBinned_HugePage) + return HUGE_BIN; + else if (size < minBinnedSize) + return NO_BIN; + + int bin = (size - minBinnedSize)/freeBinsStep; + + MALLOC_ASSERT(bin < HUGE_BIN, "Invalid size."); + return bin; + } + static bool toAlignedBin(FreeBlock *block, size_t size) { + return isAligned((char*)block + size, slabSize) && size >= slabSize; + } + +public: + /*--------------------- Init, reset, destroy, verify -------------------------*/ + void init(ExtMemoryPool *extMemoryPool); + bool destroy(); + + void verify(); + void reset(); + bool clean(); // clean on caches cleanup + + /*------------------------- Slab block request --------------------------------*/ + BlockI *getSlabBlock(int num); + void putSlabBlock(BlockI *block); + + /*-------------------------- Large object request -----------------------------*/ + LargeMemoryBlock *getLargeBlock(size_t size); + // TODO: make consistent with getLargeBlock + void returnLargeObject(LargeMemoryBlock *lmb); + + /*-------------------------- Backreference memory request ----------------------*/ + void *getBackRefSpace(size_t size, bool *rawMemUsed); + void putBackRefSpace(void *b, size_t size, bool rawMemUsed); + + /*----------------------------- Remap object ----------------------------------*/ + void *remap(void *ptr, size_t oldSize, size_t newSize, size_t alignment); + + /*---------------------------- Validation -------------------------------------*/ + bool inUserPool() const; + bool ptrCanBeValid(void *ptr) const { return usedAddrRange.inRange(ptr); } + + /*-------------------------- Configuration API --------------------------------*/ + // Soft heap limit + void setRecommendedMaxSize(size_t softLimit) { + memSoftLimit = softLimit; + releaseCachesToLimit(); + } + + /*------------------------------- Info ----------------------------------------*/ + size_t getMaxBinnedSize() const; + + /*-------------------------- Testing, statistics ------------------------------*/ +#if __TBB_MALLOC_WHITEBOX_TEST + size_t getTotalMemSize() const { return totalMemSize.load(std::memory_order_relaxed); } +#endif +#if __TBB_MALLOC_BACKEND_STAT + void reportStat(FILE *f); +private: + static size_t binToSize(int bin) { + MALLOC_ASSERT(bin <= HUGE_BIN, "Invalid bin."); + + return bin*freeBinsStep + minBinnedSize; + } +#endif +}; + +#endif // __TBB_backend_H diff --git a/src/tbb/src/tbbmalloc/backref.cpp b/src/tbb/src/tbbmalloc/backref.cpp index 611c1e7b0..b0ea83069 100644 --- a/src/tbb/src/tbbmalloc/backref.cpp +++ b/src/tbb/src/tbbmalloc/backref.cpp @@ -1,21 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #include "tbbmalloc_internal.h" @@ -35,48 +31,52 @@ struct BackRefBlock : public BlockI { FreeObject *freeList; // list of all blocks that were allocated from raw mem (i.e., not from backend) BackRefBlock *nextRawMemBlock; - int allocatedCount; // the number of objects allocated - int myNum; // the index in the master + std::atomic<int> allocatedCount; // the number of objects allocated + BackRefIdx::main_t myNum; // the index in the main MallocMutex blockMutex; // true if this block has been added to the listForUse chain, - // modifications protected by masterMutex - bool addedToForUse; + // modifications protected by mainMutex + std::atomic<bool> addedToForUse; - BackRefBlock(const BackRefBlock *blockToUse, int num) : - nextForUse(NULL), bumpPtr((FreeObject*)((uintptr_t)blockToUse + slabSize - sizeof(void*))), - freeList(NULL), nextRawMemBlock(NULL), allocatedCount(0), myNum(num), + BackRefBlock(const BackRefBlock *blockToUse, intptr_t num) : + nextForUse(nullptr), bumpPtr((FreeObject*)((uintptr_t)blockToUse + slabSize - sizeof(void*))), + freeList(nullptr), nextRawMemBlock(nullptr), allocatedCount(0), myNum(num), addedToForUse(false) { - memset(&blockMutex, 0, sizeof(MallocMutex)); - // index in BackRefMaster must fit to uint16_t - MALLOC_ASSERT(!(myNum >> 16), ASSERT_TEXT); + memset(static_cast<void*>(&blockMutex), 0, sizeof(MallocMutex)); + + MALLOC_ASSERT(!(num >> CHAR_BIT*sizeof(BackRefIdx::main_t)), + "index in BackRefMain must fit to BackRefIdx::main"); } // clean all but header - void zeroSet() { memset(this+1, 0, BackRefBlock::bytes-sizeof(BackRefBlock)); } + void zeroSet() { memset(static_cast<void*>(this+1), 0, BackRefBlock::bytes-sizeof(BackRefBlock)); } static const int bytes = slabSize; }; // max number of backreference pointers in slab block static const int BR_MAX_CNT = (BackRefBlock::bytes-sizeof(BackRefBlock))/sizeof(void*); -struct BackRefMaster { -/* A slab block can hold up to ~2K back pointers to slab blocks or large objects, - * so it can address at least 32MB. The array of 64KB holds 8K pointers - * to such blocks, addressing ~256 GB. +struct BackRefMain { +/* On 64-bit systems a slab block can hold up to ~2K back pointers to slab blocks + * or large objects, so it can address at least 32MB. The main array of 256KB + * holds 32K pointers to such blocks, addressing ~1 TB. + * On 32-bit systems there is ~4K back pointers in a slab block, so ~64MB can be addressed. + * The main array of 8KB holds 2K pointers to leaves, so ~128 GB can addressed. */ - static const size_t bytes = 64*1024; + static const size_t bytes = sizeof(uintptr_t)>4? 256*1024 : 8*1024; static const int dataSz; -/* space is reserved for master table and 4 leaves +/* space is reserved for main table and 4 leaves taking into account VirtualAlloc allocation granularity */ static const int leaves = 4; - static const size_t masterSize = BackRefMaster::bytes+leaves*BackRefBlock::bytes; - // take into account VirtualAlloc 64KB granularity + static const size_t mainSize = BackRefMain::bytes+leaves*BackRefBlock::bytes; + // The size of memory request for a few more leaf blocks; + // selected to match VirtualAlloc granularity static const size_t blockSpaceSize = 64*1024; Backend *backend; - BackRefBlock *active; // if defined, use it for allocations - BackRefBlock *listForUse; // the chain of data blocks with free items + std::atomic<BackRefBlock*> active; // if defined, use it for allocations + std::atomic<BackRefBlock*> listForUse; // the chain of data blocks with free items BackRefBlock *allRawMemBlocks; - intptr_t lastUsed; // index of the last used block + std::atomic <intptr_t> lastUsed; // index of the last used block bool rawMemUsed; MallocMutex requestNewSpaceMutex; BackRefBlock *backRefBl[1]; // the real size of the array is dataSz @@ -87,148 +87,175 @@ struct BackRefMaster { bool requestNewSpace(); }; -const int BackRefMaster::dataSz - = 1+(BackRefMaster::bytes-sizeof(BackRefMaster))/sizeof(BackRefBlock*); +const int BackRefMain::dataSz + = 1+(BackRefMain::bytes-sizeof(BackRefMain))/sizeof(BackRefBlock*); -static MallocMutex masterMutex; -static BackRefMaster *backRefMaster; +static MallocMutex mainMutex; +static std::atomic<BackRefMain*> backRefMain; -bool initBackRefMaster(Backend *backend) +bool initBackRefMain(Backend *backend) { bool rawMemUsed; - BackRefMaster *master = - (BackRefMaster*)backend->getBackRefSpace(BackRefMaster::masterSize, + BackRefMain *main = + (BackRefMain*)backend->getBackRefSpace(BackRefMain::mainSize, &rawMemUsed); - if (! master) + if (! main) return false; - master->backend = backend; - master->listForUse = master->allRawMemBlocks = NULL; - master->rawMemUsed = rawMemUsed; - master->lastUsed = -1; - memset(&master->requestNewSpaceMutex, 0, sizeof(MallocMutex)); - for (int i=0; i<BackRefMaster::leaves; i++) { - BackRefBlock *bl = (BackRefBlock*)((uintptr_t)master + BackRefMaster::bytes + i*BackRefBlock::bytes); + main->backend = backend; + main->listForUse.store(nullptr, std::memory_order_relaxed); + main->allRawMemBlocks = nullptr; + main->rawMemUsed = rawMemUsed; + main->lastUsed = -1; + memset(static_cast<void*>(&main->requestNewSpaceMutex), 0, sizeof(MallocMutex)); + for (int i=0; i<BackRefMain::leaves; i++) { + BackRefBlock *bl = (BackRefBlock*)((uintptr_t)main + BackRefMain::bytes + i*BackRefBlock::bytes); bl->zeroSet(); - master->initEmptyBackRefBlock(bl); + main->initEmptyBackRefBlock(bl); if (i) - master->addToForUseList(bl); + main->addToForUseList(bl); else // active leaf is not needed in listForUse - master->active = bl; + main->active.store(bl, std::memory_order_relaxed); } - // backRefMaster is read in getBackRef, so publish it in consistent state - FencedStore((intptr_t&)backRefMaster, (intptr_t)master); + // backRefMain is read in getBackRef, so publish it in consistent state + backRefMain.store(main, std::memory_order_release); return true; } -void destroyBackRefMaster(Backend *backend) +#if __TBB_SOURCE_DIRECTLY_INCLUDED +void destroyBackRefMain(Backend *backend) { - if (backRefMaster) { // Is initBackRefMaster() called? - for (BackRefBlock *curr=backRefMaster->allRawMemBlocks; curr; ) { + if (backRefMain.load(std::memory_order_acquire)) { // Is initBackRefMain() called? + for (BackRefBlock *curr = backRefMain.load(std::memory_order_relaxed)->allRawMemBlocks; curr; ) { BackRefBlock *next = curr->nextRawMemBlock; // allRawMemBlocks list is only for raw mem blocks - backend->putBackRefSpace(curr, BackRefMaster::blockSpaceSize, + backend->putBackRefSpace(curr, BackRefMain::blockSpaceSize, /*rawMemUsed=*/true); curr = next; } - backend->putBackRefSpace(backRefMaster, BackRefMaster::masterSize, - backRefMaster->rawMemUsed); + backend->putBackRefSpace(backRefMain.load(std::memory_order_relaxed), BackRefMain::mainSize, + backRefMain.load(std::memory_order_relaxed)->rawMemUsed); } } +#endif -void BackRefMaster::addToForUseList(BackRefBlock *bl) +void BackRefMain::addToForUseList(BackRefBlock *bl) { - bl->nextForUse = listForUse; - listForUse = bl; - bl->addedToForUse = true; + bl->nextForUse = listForUse.load(std::memory_order_relaxed); + listForUse.store(bl, std::memory_order_relaxed); + bl->addedToForUse.store(true, std::memory_order_relaxed); } -void BackRefMaster::initEmptyBackRefBlock(BackRefBlock *newBl) +void BackRefMain::initEmptyBackRefBlock(BackRefBlock *newBl) { intptr_t nextLU = lastUsed+1; new (newBl) BackRefBlock(newBl, nextLU); + MALLOC_ASSERT(nextLU < dataSz, nullptr); backRefBl[nextLU] = newBl; // lastUsed is read in getBackRef, and access to backRefBl[lastUsed] // is possible only after checking backref against current lastUsed - FencedStore(lastUsed, nextLU); + lastUsed.store(nextLU, std::memory_order_release); } -bool BackRefMaster::requestNewSpace() +bool BackRefMain::requestNewSpace() { - bool rawMemUsed; - MALLOC_STATIC_ASSERT(!(blockSpaceSize % BackRefBlock::bytes), + bool isRawMemUsed; + static_assert(!(blockSpaceSize % BackRefBlock::bytes), "Must request space for whole number of blocks."); + if (BackRefMain::dataSz <= lastUsed + 1) // no space in main + return false; + // only one thread at a time may add blocks MallocMutex::scoped_lock newSpaceLock(requestNewSpaceMutex); - if (listForUse) // double check that only one block is available + if (listForUse.load(std::memory_order_relaxed)) // double check that only one block is available return true; - BackRefBlock *newBl = - (BackRefBlock*)backend->getBackRefSpace(blockSpaceSize, &rawMemUsed); + BackRefBlock *newBl = (BackRefBlock*)backend->getBackRefSpace(blockSpaceSize, &isRawMemUsed); if (!newBl) return false; - // touch a page for the 1st time without taking masterMutex ... + // touch a page for the 1st time without taking mainMutex ... for (BackRefBlock *bl = newBl; (uintptr_t)bl < (uintptr_t)newBl + blockSpaceSize; - bl = (BackRefBlock*)((uintptr_t)bl + BackRefBlock::bytes)) + bl = (BackRefBlock*)((uintptr_t)bl + BackRefBlock::bytes)) { bl->zeroSet(); + } + + MallocMutex::scoped_lock lock(mainMutex); // ... and share under lock + + const size_t numOfUnusedIdxs = BackRefMain::dataSz - lastUsed - 1; + if (numOfUnusedIdxs <= 0) { // no space in main under lock, roll back + backend->putBackRefSpace(newBl, blockSpaceSize, isRawMemUsed); + return false; + } + // It's possible that only part of newBl is used, due to lack of indices in main. + // This is OK as such underutilization is possible only once for backreferneces table. + int blocksToUse = min(numOfUnusedIdxs, blockSpaceSize / BackRefBlock::bytes); - MallocMutex::scoped_lock lock(masterMutex); // ... and share under lock // use the first block in the batch to maintain the list of "raw" memory // to be released at shutdown - if (rawMemUsed) { - newBl->nextRawMemBlock = backRefMaster->allRawMemBlocks; - backRefMaster->allRawMemBlocks = newBl; + if (isRawMemUsed) { + newBl->nextRawMemBlock = backRefMain.load(std::memory_order_relaxed)->allRawMemBlocks; + backRefMain.load(std::memory_order_relaxed)->allRawMemBlocks = newBl; } - for (BackRefBlock *bl = newBl; (uintptr_t)bl < (uintptr_t)newBl + blockSpaceSize; - bl = (BackRefBlock*)((uintptr_t)bl + BackRefBlock::bytes)) { + for (BackRefBlock *bl = newBl; blocksToUse>0; bl = (BackRefBlock*)((uintptr_t)bl + BackRefBlock::bytes), blocksToUse--) { initEmptyBackRefBlock(bl); - if (active->allocatedCount == BR_MAX_CNT) - active = bl; // active leaf is not needed in listForUse - else + if (active.load(std::memory_order_relaxed)->allocatedCount.load(std::memory_order_relaxed) == BR_MAX_CNT) { + active.store(bl, std::memory_order_release); // active leaf is not needed in listForUse + } else { addToForUseList(bl); + } } return true; } -BackRefBlock *BackRefMaster::findFreeBlock() +BackRefBlock *BackRefMain::findFreeBlock() { - if (active->allocatedCount < BR_MAX_CNT) - return active; - - if (listForUse) { // use released list - MallocMutex::scoped_lock lock(masterMutex); - - if (active->allocatedCount == BR_MAX_CNT && listForUse) { - active = listForUse; - listForUse = listForUse->nextForUse; - MALLOC_ASSERT(active->addedToForUse, ASSERT_TEXT); - active->addedToForUse = false; + BackRefBlock* active_block = active.load(std::memory_order_acquire); + MALLOC_ASSERT(active_block, ASSERT_TEXT); + + if (active_block->allocatedCount.load(std::memory_order_relaxed) < BR_MAX_CNT) + return active_block; + + if (listForUse.load(std::memory_order_relaxed)) { // use released list + MallocMutex::scoped_lock lock(mainMutex); + + if (active_block->allocatedCount.load(std::memory_order_relaxed) == BR_MAX_CNT) { + active_block = listForUse.load(std::memory_order_relaxed); + if (active_block) { + active.store(active_block, std::memory_order_release); + listForUse.store(active_block->nextForUse, std::memory_order_relaxed); + MALLOC_ASSERT(active_block->addedToForUse.load(std::memory_order_relaxed), ASSERT_TEXT); + active_block->addedToForUse.store(false, std::memory_order_relaxed); + } } - } else if (lastUsed-1 < backRefMaster->dataSz) { // allocate new data node - if (!requestNewSpace()) return NULL; - } else // no free space in BackRefMaster, give up - return NULL; - return active; + } else // allocate new data node + if (!requestNewSpace()) + return nullptr; + return active.load(std::memory_order_acquire); // reread because of requestNewSpace } void *getBackRef(BackRefIdx backRefIdx) { - // !backRefMaster means no initialization done, so it can't be valid memory + // !backRefMain means no initialization done, so it can't be valid memory // see addEmptyBackRefBlock for fences around lastUsed - if (!FencedLoad((intptr_t&)backRefMaster) - || backRefIdx.getMaster() > FencedLoad(backRefMaster->lastUsed) + if (!(backRefMain.load(std::memory_order_acquire)) + || backRefIdx.getMain() > (backRefMain.load(std::memory_order_relaxed)->lastUsed.load(std::memory_order_acquire)) || backRefIdx.getOffset() >= BR_MAX_CNT) - return NULL; - return *(void**)((uintptr_t)backRefMaster->backRefBl[backRefIdx.getMaster()] - + sizeof(BackRefBlock)+backRefIdx.getOffset()*sizeof(void*)); + { + return nullptr; + } + std::atomic<void*>& backRefEntry = *(std::atomic<void*>*)( + (uintptr_t)backRefMain.load(std::memory_order_relaxed)->backRefBl[backRefIdx.getMain()] + + sizeof(BackRefBlock) + backRefIdx.getOffset() * sizeof(std::atomic<void*>) + ); + return backRefEntry.load(std::memory_order_relaxed); } void setBackRef(BackRefIdx backRefIdx, void *newPtr) { - MALLOC_ASSERT(backRefIdx.getMaster()<=backRefMaster->lastUsed && backRefIdx.getOffset()<BR_MAX_CNT, - ASSERT_TEXT); - *(void**)((uintptr_t)backRefMaster->backRefBl[backRefIdx.getMaster()] - + sizeof(BackRefBlock) + backRefIdx.getOffset()*sizeof(void*)) = newPtr; + MALLOC_ASSERT(backRefIdx.getMain()<=backRefMain.load(std::memory_order_relaxed)->lastUsed.load(std::memory_order_relaxed) + && backRefIdx.getOffset()<BR_MAX_CNT, ASSERT_TEXT); + ((std::atomic<void*>*)((uintptr_t)backRefMain.load(std::memory_order_relaxed)->backRefBl[backRefIdx.getMain()] + + sizeof(BackRefBlock) + backRefIdx.getOffset() * sizeof(void*)))->store(newPtr, std::memory_order_relaxed); } BackRefIdx BackRefIdx::newBackRef(bool largeObj) @@ -239,11 +266,11 @@ BackRefIdx BackRefIdx::newBackRef(bool largeObj) bool lastBlockFirstUsed = false; do { - MALLOC_ASSERT(backRefMaster, ASSERT_TEXT); - blockToUse = backRefMaster->findFreeBlock(); + MALLOC_ASSERT(backRefMain.load(std::memory_order_relaxed), ASSERT_TEXT); + blockToUse = backRefMain.load(std::memory_order_relaxed)->findFreeBlock(); if (!blockToUse) return BackRefIdx(); - toUse = NULL; + toUse = nullptr; { // the block is locked to find a reference MallocMutex::scoped_lock lock(blockToUse->blockMutex); @@ -254,30 +281,32 @@ BackRefIdx BackRefIdx::newBackRef(bool largeObj) ((uintptr_t)blockToUse->freeList>=(uintptr_t)blockToUse && (uintptr_t)blockToUse->freeList < (uintptr_t)blockToUse + slabSize), ASSERT_TEXT); - } else if (blockToUse->allocatedCount < BR_MAX_CNT) { + } else if (blockToUse->allocatedCount.load(std::memory_order_relaxed) < BR_MAX_CNT) { toUse = (void**)blockToUse->bumpPtr; blockToUse->bumpPtr = (FreeObject*)((uintptr_t)blockToUse->bumpPtr - sizeof(void*)); - if (blockToUse->allocatedCount == BR_MAX_CNT-1) { + if (blockToUse->allocatedCount.load(std::memory_order_relaxed) == BR_MAX_CNT-1) { MALLOC_ASSERT((uintptr_t)blockToUse->bumpPtr < (uintptr_t)blockToUse+sizeof(BackRefBlock), ASSERT_TEXT); - blockToUse->bumpPtr = NULL; + blockToUse->bumpPtr = nullptr; } } if (toUse) { - if (!blockToUse->allocatedCount && !backRefMaster->listForUse) + if (!blockToUse->allocatedCount.load(std::memory_order_relaxed) && + !backRefMain.load(std::memory_order_relaxed)->listForUse.load(std::memory_order_relaxed)) { lastBlockFirstUsed = true; - blockToUse->allocatedCount++; + } + blockToUse->allocatedCount.store(blockToUse->allocatedCount.load(std::memory_order_relaxed) + 1, std::memory_order_relaxed); } } // end of lock scope } while (!toUse); // The first thread that uses the last block requests new space in advance; // possible failures are ignored. if (lastBlockFirstUsed) - backRefMaster->requestNewSpace(); + backRefMain.load(std::memory_order_relaxed)->requestNewSpace(); - res.master = blockToUse->myNum; + res.main = blockToUse->myNum; uintptr_t offset = ((uintptr_t)toUse - ((uintptr_t)blockToUse + sizeof(BackRefBlock)))/sizeof(void*); // Is offset too big? @@ -291,30 +320,34 @@ BackRefIdx BackRefIdx::newBackRef(bool largeObj) void removeBackRef(BackRefIdx backRefIdx) { MALLOC_ASSERT(!backRefIdx.isInvalid(), ASSERT_TEXT); - MALLOC_ASSERT(backRefIdx.getMaster()<=backRefMaster->lastUsed + MALLOC_ASSERT(backRefIdx.getMain()<=backRefMain.load(std::memory_order_relaxed)->lastUsed.load(std::memory_order_relaxed) && backRefIdx.getOffset()<BR_MAX_CNT, ASSERT_TEXT); - BackRefBlock *currBlock = backRefMaster->backRefBl[backRefIdx.getMaster()]; - FreeObject *freeObj = (FreeObject*)((uintptr_t)currBlock + sizeof(BackRefBlock) - + backRefIdx.getOffset()*sizeof(void*)); - MALLOC_ASSERT(((uintptr_t)freeObj>(uintptr_t)currBlock && - (uintptr_t)freeObj<(uintptr_t)currBlock + slabSize), ASSERT_TEXT); + BackRefBlock *currBlock = backRefMain.load(std::memory_order_relaxed)->backRefBl[backRefIdx.getMain()]; + std::atomic<void*>& backRefEntry = *(std::atomic<void*>*)((uintptr_t)currBlock + sizeof(BackRefBlock) + + backRefIdx.getOffset()*sizeof(std::atomic<void*>)); + MALLOC_ASSERT(((uintptr_t)&backRefEntry >(uintptr_t)currBlock && + (uintptr_t)&backRefEntry <(uintptr_t)currBlock + slabSize), ASSERT_TEXT); { MallocMutex::scoped_lock lock(currBlock->blockMutex); - freeObj->next = currBlock->freeList; - MALLOC_ASSERT(!freeObj->next || - ((uintptr_t)freeObj->next > (uintptr_t)currBlock - && (uintptr_t)freeObj->next < - (uintptr_t)currBlock + slabSize), ASSERT_TEXT); - currBlock->freeList = freeObj; - currBlock->allocatedCount--; + backRefEntry.store(currBlock->freeList, std::memory_order_relaxed); +#if MALLOC_DEBUG + uintptr_t backRefEntryValue = (uintptr_t)backRefEntry.load(std::memory_order_relaxed); + MALLOC_ASSERT(!backRefEntryValue || + (backRefEntryValue > (uintptr_t)currBlock + && backRefEntryValue < (uintptr_t)currBlock + slabSize), ASSERT_TEXT); +#endif + currBlock->freeList = (FreeObject*)&backRefEntry; + currBlock->allocatedCount.store(currBlock->allocatedCount.load(std::memory_order_relaxed)-1, std::memory_order_relaxed); } // TODO: do we need double-check here? - if (!currBlock->addedToForUse && currBlock!=backRefMaster->active) { - MallocMutex::scoped_lock lock(masterMutex); + if (!currBlock->addedToForUse.load(std::memory_order_relaxed) && + currBlock!=backRefMain.load(std::memory_order_relaxed)->active.load(std::memory_order_relaxed)) { + MallocMutex::scoped_lock lock(mainMutex); - if (!currBlock->addedToForUse && currBlock!=backRefMaster->active) - backRefMaster->addToForUseList(currBlock); + if (!currBlock->addedToForUse.load(std::memory_order_relaxed) && + currBlock!=backRefMain.load(std::memory_order_relaxed)->active.load(std::memory_order_relaxed)) + backRefMain.load(std::memory_order_relaxed)->addToForUseList(currBlock); } } diff --git a/src/tbb/src/tbbmalloc/def/lin32-tbbmalloc.def b/src/tbb/src/tbbmalloc/def/lin32-tbbmalloc.def new file mode 100644 index 000000000..e096de62f --- /dev/null +++ b/src/tbb/src/tbbmalloc/def/lin32-tbbmalloc.def @@ -0,0 +1,73 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +{ +global: + +scalable_calloc; +scalable_free; +scalable_malloc; +scalable_realloc; +scalable_posix_memalign; +scalable_aligned_malloc; +scalable_aligned_realloc; +scalable_aligned_free; +scalable_msize; +scalable_allocation_mode; +scalable_allocation_command; +__TBB_malloc_safer_aligned_msize; +__TBB_malloc_safer_aligned_realloc; +__TBB_malloc_safer_free; +__TBB_malloc_safer_msize; +__TBB_malloc_safer_realloc; + +/* memory pool stuff */ +_ZN3rml10pool_resetEPNS_10MemoryPoolE; +_ZN3rml11pool_createEiPKNS_13MemPoolPolicyE; +_ZN3rml14pool_create_v1EiPKNS_13MemPoolPolicyEPPNS_10MemoryPoolE; +_ZN3rml11pool_mallocEPNS_10MemoryPoolEj; +_ZN3rml12pool_destroyEPNS_10MemoryPoolE; +_ZN3rml9pool_freeEPNS_10MemoryPoolEPv; +_ZN3rml12pool_reallocEPNS_10MemoryPoolEPvj; +_ZN3rml20pool_aligned_reallocEPNS_10MemoryPoolEPvjj; +_ZN3rml19pool_aligned_mallocEPNS_10MemoryPoolEjj; +_ZN3rml13pool_identifyEPv; +_ZN3rml10pool_msizeEPNS_10MemoryPoolEPv; + +local: + +/* TBB symbols */ +*3rml*; +*3tbb*; +*__TBB*; +__itt_*; +ITT_DoOneTimeInitialization; +TBB_runtime_interface_version; + +/* Intel Compiler (libirc) symbols */ +__intel_*; +_intel_*; +get_memcpy_largest_cachelinesize; +get_memcpy_largest_cache_size; +get_mem_ops_method; +init_mem_ops_method; +irc__get_msg; +irc__print; +override_mem_ops_method; +set_memcpy_largest_cachelinesize; +set_memcpy_largest_cache_size; + +}; diff --git a/src/tbb/src/tbbmalloc/def/lin64-tbbmalloc.def b/src/tbb/src/tbbmalloc/def/lin64-tbbmalloc.def new file mode 100644 index 000000000..96abb2bff --- /dev/null +++ b/src/tbb/src/tbbmalloc/def/lin64-tbbmalloc.def @@ -0,0 +1,73 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +{ +global: + +scalable_calloc; +scalable_free; +scalable_malloc; +scalable_realloc; +scalable_posix_memalign; +scalable_aligned_malloc; +scalable_aligned_realloc; +scalable_aligned_free; +scalable_msize; +scalable_allocation_mode; +scalable_allocation_command; +__TBB_malloc_safer_aligned_msize; +__TBB_malloc_safer_aligned_realloc; +__TBB_malloc_safer_free; +__TBB_malloc_safer_msize; +__TBB_malloc_safer_realloc; + +/* memory pool stuff */ +_ZN3rml11pool_createElPKNS_13MemPoolPolicyE; +_ZN3rml14pool_create_v1ElPKNS_13MemPoolPolicyEPPNS_10MemoryPoolE; +_ZN3rml10pool_resetEPNS_10MemoryPoolE; +_ZN3rml11pool_mallocEPNS_10MemoryPoolEm; +_ZN3rml12pool_destroyEPNS_10MemoryPoolE; +_ZN3rml9pool_freeEPNS_10MemoryPoolEPv; +_ZN3rml12pool_reallocEPNS_10MemoryPoolEPvm; +_ZN3rml20pool_aligned_reallocEPNS_10MemoryPoolEPvmm; +_ZN3rml19pool_aligned_mallocEPNS_10MemoryPoolEmm; +_ZN3rml13pool_identifyEPv; +_ZN3rml10pool_msizeEPNS_10MemoryPoolEPv; + +local: + +/* TBB symbols */ +*3rml*; +*3tbb*; +*__TBB*; +__itt_*; +ITT_DoOneTimeInitialization; +TBB_runtime_interface_version; + +/* Intel Compiler (libirc) symbols */ +__intel_*; +_intel_*; +get_memcpy_largest_cachelinesize; +get_memcpy_largest_cache_size; +get_mem_ops_method; +init_mem_ops_method; +irc__get_msg; +irc__print; +override_mem_ops_method; +set_memcpy_largest_cachelinesize; +set_memcpy_largest_cache_size; + +}; diff --git a/src/tbb/src/tbbmalloc/def/mac64-tbbmalloc.def b/src/tbb/src/tbbmalloc/def/mac64-tbbmalloc.def new file mode 100644 index 000000000..71253e0aa --- /dev/null +++ b/src/tbb/src/tbbmalloc/def/mac64-tbbmalloc.def @@ -0,0 +1,44 @@ +# Copyright (c) 2005-2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +_scalable_calloc +_scalable_free +_scalable_malloc +_scalable_realloc +_scalable_posix_memalign +_scalable_aligned_malloc +_scalable_aligned_realloc +_scalable_aligned_free +_scalable_msize +_scalable_allocation_mode +_scalable_allocation_command +___TBB_malloc_safer_aligned_msize +___TBB_malloc_safer_aligned_realloc +___TBB_malloc_safer_free +___TBB_malloc_safer_msize +___TBB_malloc_safer_realloc +___TBB_malloc_free_definite_size +/* memory pool stuff */ +__ZN3rml11pool_createElPKNS_13MemPoolPolicyE +__ZN3rml14pool_create_v1ElPKNS_13MemPoolPolicyEPPNS_10MemoryPoolE +__ZN3rml10pool_resetEPNS_10MemoryPoolE +__ZN3rml12pool_destroyEPNS_10MemoryPoolE +__ZN3rml11pool_mallocEPNS_10MemoryPoolEm +__ZN3rml9pool_freeEPNS_10MemoryPoolEPv +__ZN3rml12pool_reallocEPNS_10MemoryPoolEPvm +__ZN3rml20pool_aligned_reallocEPNS_10MemoryPoolEPvmm +__ZN3rml19pool_aligned_mallocEPNS_10MemoryPoolEmm +__ZN3rml13pool_identifyEPv +__ZN3rml10pool_msizeEPNS_10MemoryPoolEPv + diff --git a/src/tbb/src/tbbmalloc/def/win32-tbbmalloc.def b/src/tbb/src/tbbmalloc/def/win32-tbbmalloc.def new file mode 100644 index 000000000..bfad79f4e --- /dev/null +++ b/src/tbb/src/tbbmalloc/def/win32-tbbmalloc.def @@ -0,0 +1,47 @@ +; Copyright (c) 2005-2021 Intel Corporation +; +; Licensed under the Apache License, Version 2.0 (the "License"); +; you may not use this file except in compliance with the License. +; You may obtain a copy of the License at +; +; http://www.apache.org/licenses/LICENSE-2.0 +; +; Unless required by applicable law or agreed to in writing, software +; distributed under the License is distributed on an "AS IS" BASIS, +; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +; See the License for the specific language governing permissions and +; limitations under the License. + +EXPORTS + +; frontend.cpp +scalable_calloc +scalable_free +scalable_malloc +scalable_realloc +scalable_posix_memalign +scalable_aligned_malloc +scalable_aligned_realloc +scalable_aligned_free +scalable_msize +scalable_allocation_mode +scalable_allocation_command +__TBB_malloc_safer_free +__TBB_malloc_safer_realloc +__TBB_malloc_safer_msize +__TBB_malloc_safer_aligned_msize +__TBB_malloc_safer_aligned_realloc + +; memory pool stuff +?pool_create@rml@@YAPAVMemoryPool@1@HPBUMemPoolPolicy@1@@Z +?pool_create_v1@rml@@YA?AW4MemPoolError@1@HPBUMemPoolPolicy@1@PAPAVMemoryPool@1@@Z +?pool_destroy@rml@@YA_NPAVMemoryPool@1@@Z +?pool_malloc@rml@@YAPAXPAVMemoryPool@1@I@Z +?pool_free@rml@@YA_NPAVMemoryPool@1@PAX@Z +?pool_reset@rml@@YA_NPAVMemoryPool@1@@Z +?pool_realloc@rml@@YAPAXPAVMemoryPool@1@PAXI@Z +?pool_aligned_realloc@rml@@YAPAXPAVMemoryPool@1@PAXII@Z +?pool_aligned_malloc@rml@@YAPAXPAVMemoryPool@1@II@Z +?pool_identify@rml@@YAPAVMemoryPool@1@PAX@Z +?pool_msize@rml@@YAIPAVMemoryPool@1@PAX@Z + diff --git a/src/tbb/src/tbbmalloc/def/win64-tbbmalloc.def b/src/tbb/src/tbbmalloc/def/win64-tbbmalloc.def new file mode 100644 index 000000000..ba6ae2d2e --- /dev/null +++ b/src/tbb/src/tbbmalloc/def/win64-tbbmalloc.def @@ -0,0 +1,47 @@ +; Copyright (c) 2005-2021 Intel Corporation +; +; Licensed under the Apache License, Version 2.0 (the "License"); +; you may not use this file except in compliance with the License. +; You may obtain a copy of the License at +; +; http://www.apache.org/licenses/LICENSE-2.0 +; +; Unless required by applicable law or agreed to in writing, software +; distributed under the License is distributed on an "AS IS" BASIS, +; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +; See the License for the specific language governing permissions and +; limitations under the License. + +EXPORTS + +; frontend.cpp +scalable_calloc +scalable_free +scalable_malloc +scalable_realloc +scalable_posix_memalign +scalable_aligned_malloc +scalable_aligned_realloc +scalable_aligned_free +scalable_msize +scalable_allocation_mode +scalable_allocation_command +__TBB_malloc_safer_free +__TBB_malloc_safer_realloc +__TBB_malloc_safer_msize +__TBB_malloc_safer_aligned_msize +__TBB_malloc_safer_aligned_realloc + +; memory pool stuff +?pool_create@rml@@YAPEAVMemoryPool@1@_JPEBUMemPoolPolicy@1@@Z +?pool_create_v1@rml@@YA?AW4MemPoolError@1@_JPEBUMemPoolPolicy@1@PEAPEAVMemoryPool@1@@Z +?pool_destroy@rml@@YA_NPEAVMemoryPool@1@@Z +?pool_malloc@rml@@YAPEAXPEAVMemoryPool@1@_K@Z +?pool_free@rml@@YA_NPEAVMemoryPool@1@PEAX@Z +?pool_reset@rml@@YA_NPEAVMemoryPool@1@@Z +?pool_realloc@rml@@YAPEAXPEAVMemoryPool@1@PEAX_K@Z +?pool_aligned_realloc@rml@@YAPEAXPEAVMemoryPool@1@PEAX_K2@Z +?pool_aligned_malloc@rml@@YAPEAXPEAVMemoryPool@1@_K1@Z +?pool_identify@rml@@YAPEAVMemoryPool@1@PEAX@Z +?pool_msize@rml@@YA_KPEAVMemoryPool@1@PEAX@Z + diff --git a/src/tbb/src/tbbmalloc/frontend.cpp b/src/tbb/src/tbbmalloc/frontend.cpp index deb95c980..c9aaf4633 100644 --- a/src/tbb/src/tbbmalloc/frontend.cpp +++ b/src/tbb/src/tbbmalloc/frontend.cpp @@ -1,63 +1,56 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ #include "tbbmalloc_internal.h" #include <errno.h> #include <new> /* for placement new */ #include <string.h> /* for memset */ -//! Define the main synchronization method -#define FINE_GRAIN_LOCKS +#include "oneapi/tbb/version.h" +#include "../tbb/environment.h" +#include "../tbb/itt_notify.h" // for __TBB_load_ittnotify() #if USE_PTHREAD #define TlsSetValue_func pthread_setspecific #define TlsGetValue_func pthread_getspecific #define GetMyTID() pthread_self() #include <sched.h> - inline void do_yield() {sched_yield();} extern "C" { static void mallocThreadShutdownNotification(void*); } #if __sun || __SUNPRO_CC #define __asm__ asm #endif - + #include <unistd.h> // sysconf(_SC_PAGESIZE) #elif USE_WINTHREAD #define GetMyTID() GetCurrentThreadId() #if __TBB_WIN8UI_SUPPORT -#include<thread> + #include<thread> #define TlsSetValue_func FlsSetValue #define TlsGetValue_func FlsGetValue - #define TlsAlloc() FlsAlloc(NULL) + #define TlsAlloc() FlsAlloc(nullptr) + #define TLS_ALLOC_FAILURE FLS_OUT_OF_INDEXES #define TlsFree FlsFree - inline void do_yield() {std::this_thread::yield();} #else #define TlsSetValue_func TlsSetValue #define TlsGetValue_func TlsGetValue - inline void do_yield() {SwitchToThread();} + #define TLS_ALLOC_FAILURE TLS_OUT_OF_INDEXES #endif #else #error Must define USE_PTHREAD or USE_WINTHREAD - #endif - #define FREELIST_NONBLOCKING 1 namespace rml { @@ -71,24 +64,22 @@ class MemoryPool; inline bool isMallocInitialized(); -bool RecursiveMallocCallProtector::noRecursion() { - MALLOC_ASSERT(isMallocInitialized(), - "Recursion status can be checked only when initialization was done."); - return !mallocRecursionDetected; -} - #endif // MALLOC_CHECK_RECURSION +/** Support for handling the special UNUSABLE pointer state **/ +const intptr_t UNUSABLE = 0x1; +inline bool isSolidPtr( void* ptr ) { + return (UNUSABLE|(intptr_t)ptr)!=UNUSABLE; +} +inline bool isNotForUse( void* ptr ) { + return (intptr_t)ptr==UNUSABLE; +} + /* * Block::objectSize value used to mark blocks allocated by startupAlloc */ const uint16_t startupAllocObjSizeMark = ~(uint16_t)0; -/* - * This number of bins in the TLS that leads to blocks that we can allocate in. - */ -const uint32_t numBlockBinLimit = 31; - /* * The following constant is used to define the size of struct Block, the block header. * The intent is to have the size of a Block multiple of the cache line size, this allows us to @@ -117,122 +108,129 @@ class BootStrapBlocks { #if USE_INTERNAL_TID class ThreadId { static tls_key_t Tid_key; - static intptr_t ThreadIdCount; + std::atomic<intptr_t> ThreadCount; unsigned int id; - static ThreadId get() { - ThreadId result; - result.id = reinterpret_cast<intptr_t>(TlsGetValue_func(Tid_key)); - if( !result.id ) { + static unsigned int tlsNumber() { + unsigned int result = reinterpret_cast<intptr_t>(TlsGetValue_func(Tid_key)); + if( !result ) { RecursiveMallocCallProtector scoped; // Thread-local value is zero -> first call from this thread, // need to initialize with next ID value (IDs start from 1) - result.id = AtomicIncrement(ThreadIdCount); // returned new value! - TlsSetValue_func( Tid_key, reinterpret_cast<void*>(result.id) ); + result = ++ThreadCount; // returned new value! + TlsSetValue_func( Tid_key, reinterpret_cast<void*>(result) ); } return result; } public: - static void init() { + static bool init() { #if USE_WINTHREAD Tid_key = TlsAlloc(); + if (Tid_key == TLS_ALLOC_FAILURE) + return false; #else - int status = pthread_key_create( &Tid_key, NULL ); + int status = pthread_key_create( &Tid_key, nullptr ); if ( status ) { - fprintf (stderr, "The memory manager cannot create tls key during initialization; exiting \n"); - exit(1); + fprintf (stderr, "The memory manager cannot create tls key during initialization\n"); + return false; } #endif /* USE_WINTHREAD */ + return true; } +#if __TBB_SOURCE_DIRECTLY_INCLUDED static void destroy() { if( Tid_key ) { #if USE_WINTHREAD - TlsFree( Tid_key ); + BOOL status = !(TlsFree( Tid_key )); // fail is zero #else int status = pthread_key_delete( Tid_key ); - if ( status ) { - fprintf (stderr, "The memory manager cannot delete tls key; exiting \n"); - exit(1); - } #endif /* USE_WINTHREAD */ + if ( status ) + fprintf (stderr, "The memory manager cannot delete tls key\n"); Tid_key = 0; } } +#endif - bool isCurrentThreadId() const { return id == ThreadId::get().id; } - void saveCurrentThreadId() { id = ThreadId::get().id; } + ThreadId() : id(ThreadId::tlsNumber()) {} + bool isCurrentThreadId() const { return id == ThreadId::tlsNumber(); } +#if COLLECT_STATISTICS || MALLOC_TRACE + friend unsigned int getThreadId() { return ThreadId::tlsNumber(); } +#endif #if COLLECT_STATISTICS - static unsigned getMaxThreadId() { return ThreadIdCount; } + static unsigned getMaxThreadId() { return ThreadCount.load(std::memory_order_relaxed); } - friend unsigned int getThreadId(); friend int STAT_increment(ThreadId tid, int bin, int ctr); #endif }; tls_key_t ThreadId::Tid_key; -intptr_t ThreadId::ThreadIdCount; +intptr_t ThreadId::ThreadCount; #if COLLECT_STATISTICS -unsigned int getThreadId() { return ThreadId::get().id; } +int STAT_increment(ThreadId tid, int bin, int ctr) +{ + return ::STAT_increment(tid.id, bin, ctr); +} #endif #else // USE_INTERNAL_TID class ThreadId { #if USE_PTHREAD - pthread_t tid; + std::atomic<pthread_t> tid; #else - DWORD tid; + std::atomic<DWORD> tid; #endif public: ThreadId() : tid(GetMyTID()) {} - void saveCurrentThreadId() { tid = GetMyTID(); } + ThreadId(ThreadId &other) = delete; + ~ThreadId() = default; + #if USE_PTHREAD - bool isCurrentThreadId() const { return pthread_equal(pthread_self(), tid); } + bool isCurrentThreadId() const { return pthread_equal(pthread_self(), tid.load(std::memory_order_relaxed)); } #else - bool isCurrentThreadId() const { return GetCurrentThreadId() == tid; } + bool isCurrentThreadId() const { return GetCurrentThreadId() == tid.load(std::memory_order_relaxed); } #endif - static void init() {} + ThreadId& operator=(const ThreadId& other) { + tid.store(other.tid.load(std::memory_order_relaxed), std::memory_order_relaxed); + return *this; + } + static bool init() { return true; } +#if __TBB_SOURCE_DIRECTLY_INCLUDED static void destroy() {} +#endif }; #endif // USE_INTERNAL_TID -#if COLLECT_STATISTICS -int STAT_increment(ThreadId tid, int bin, int ctr) -{ - return ::STAT_increment(tid.id, bin, ctr); -} -#endif - /*********** Code to provide thread ID and a thread-local void pointer **********/ -TLSKey::TLSKey() +bool TLSKey::init() { #if USE_WINTHREAD TLS_pointer_key = TlsAlloc(); + if (TLS_pointer_key == TLS_ALLOC_FAILURE) + return false; #else int status = pthread_key_create( &TLS_pointer_key, mallocThreadShutdownNotification ); - if ( status ) { - fprintf (stderr, "The memory manager cannot create tls key during initialization; exiting \n"); - exit(1); - } + if ( status ) + return false; #endif /* USE_WINTHREAD */ + return true; } -TLSKey::~TLSKey() +bool TLSKey::destroy() { #if USE_WINTHREAD - TlsFree(TLS_pointer_key); + BOOL status1 = !(TlsFree(TLS_pointer_key)); // fail is zero #else int status1 = pthread_key_delete(TLS_pointer_key); - if ( status1 ) { - fprintf (stderr, "The memory manager cannot delete tls key during; exiting \n"); - exit(1); - } #endif /* USE_WINTHREAD */ + MALLOC_ASSERT(!status1, "The memory manager cannot delete tls key."); + return status1==0; } inline TLSData* TLSKey::getThreadMallocTLS() const @@ -259,43 +257,13 @@ inline void TLSKey::setThreadMallocTLS( TLSData * newvalue ) { */ class Bin; class StartupBlock; -class TLSData; - -class LifoList { -public: - inline LifoList(); - inline void push(Block *block); - inline Block *pop(); - -private: - Block *top; -#ifdef FINE_GRAIN_LOCKS - MallocMutex lock; -#endif /* FINE_GRAIN_LOCKS */ -}; - -/* - * When a block that is not completely free is returned for reuse by other threads - * this is where the block goes. - * - * LifoList assumes zero initialization; so below its constructors are omitted, - * to avoid linking with C++ libraries on Linux. - */ - -class OrphanedBlocks { - LifoList bins[numBlockBinLimit]; -public: - Block *get(TLSData *tls, unsigned int size); - void put(Bin *bin, Block *block); - void reset(); -}; class MemoryPool { // if no explicit grainsize, expect to see malloc in user's pAlloc // and set reasonable low granularity static const size_t defaultGranularity = estimatedCacheLineSize; - MemoryPool(); // deny + MemoryPool() = delete; // deny public: static MallocMutex memPoolListLock; @@ -304,17 +272,17 @@ class MemoryPool { MemoryPool *next, *prev; ExtMemoryPool extMemPool; - OrphanedBlocks orphanedBlocks; BootStrapBlocks bootStrapBlocks; - bool init(intptr_t poolId, const MemPoolPolicy* memPoolPolicy); static void initDefaultPool(); - void reset(); - void destroy(); - void processThreadShutdown(TLSData *tlsData); + + bool init(intptr_t poolId, const MemPoolPolicy* memPoolPolicy); + bool reset(); + bool destroy(); + void onThreadShutdown(TLSData *tlsData); inline TLSData *getTLS(bool create); - void clearTLS() { extMemPool.tlsPointerKey.setThreadMallocTLS(NULL); } + void clearTLS() { extMemPool.tlsPointerKey.setThreadMallocTLS(nullptr); } Block *getEmptyBlock(size_t size); void returnEmptyBlock(Block *block, bool poolTheBlock); @@ -324,150 +292,195 @@ class MemoryPool { void putToLLOCache(TLSData *tls, void *object); }; -static char defaultMemPool_space[sizeof(MemoryPool)]; -static MemoryPool *defaultMemPool = (MemoryPool *)defaultMemPool_space; +static intptr_t defaultMemPool_space[sizeof(MemoryPool)/sizeof(intptr_t) + + (sizeof(MemoryPool)%sizeof(intptr_t)? 1 : 0)]; +static MemoryPool *defaultMemPool = (MemoryPool*)defaultMemPool_space; const size_t MemoryPool::defaultGranularity; // zero-initialized MallocMutex MemoryPool::memPoolListLock; // TODO: move huge page status to default pool, because that's its states HugePagesStatus hugePages; -static bool usedBySrcIncluded; - -// Slab block is 16KB-aligned. To prevent false sharing, separate locally-accessed -// fields and fields commonly accessed by not owner threads. -class GlobalBlockFields : public BlockI { -protected: - FreeObject *publicFreeList; - Block *nextPrivatizable; -}; +static bool usedBySrcIncluded = false; +// Padding helpers template<size_t padd> -struct Padding { +struct PaddingImpl { size_t __padding[padd]; }; template<> -struct Padding<0> { -}; +struct PaddingImpl<0> {}; + +template<int N> +struct Padding : PaddingImpl<N/sizeof(size_t)> {}; -class LocalBlockFields : public GlobalBlockFields { +// Slab block is 16KB-aligned. To prevent false sharing, separate locally-accessed +// fields and fields commonly accessed by not owner threads. +class GlobalBlockFields : public BlockI { protected: - Padding<(blockHeaderAlignment - - sizeof(GlobalBlockFields))/sizeof(size_t)> pad_local; + std::atomic<FreeObject*> publicFreeList; + std::atomic<Block*> nextPrivatizable; + MemoryPool *poolPtr; +}; +class LocalBlockFields : public GlobalBlockFields, Padding<blockHeaderAlignment - sizeof(GlobalBlockFields)> { +protected: Block *next; Block *previous; /* Use double linked list to speed up removal */ FreeObject *bumpPtr; /* Bump pointer moves from the end to the beginning of a block */ FreeObject *freeList; /* Pointer to local data for the owner thread. Used for fast finding tls when releasing object from a block that current thread owned. - NULL for orphaned blocks. */ - TLSData *tlsPtr; - ThreadId ownerTid; + nullptr for orphaned blocks. */ + std::atomic<TLSData*> tlsPtr; + ThreadId ownerTid; /* the ID of the thread that owns or last owned the block */ BackRefIdx backRefIdx; uint16_t allocatedCount; /* Number of objects allocated (obviously by the owning thread) */ uint16_t objectSize; bool isFull; - friend void *BootStrapBlocks::allocate(MemoryPool *memPool, size_t size); friend class FreeBlockPool; friend class StartupBlock; friend class LifoList; - friend Block *MemoryPool::getEmptyBlock(size_t size); + friend void *BootStrapBlocks::allocate(MemoryPool *, size_t); + friend bool OrphanedBlocks::cleanup(Backend*); + friend Block *MemoryPool::getEmptyBlock(size_t); }; -class Block : public LocalBlockFields { - Padding<(2*blockHeaderAlignment - - sizeof(LocalBlockFields))/sizeof(size_t)> pad_public; +// Use inheritance to guarantee that a user data start on next cache line. +// Can't use member for it, because when LocalBlockFields already on cache line, +// we must have no additional memory consumption for all compilers. +class Block : public LocalBlockFields, + Padding<2*blockHeaderAlignment - sizeof(LocalBlockFields)> { public: - bool empty() const { return allocatedCount==0 && publicFreeList==NULL; } + bool empty() const { + if (allocatedCount > 0) return false; + MALLOC_ASSERT(!isSolidPtr(publicFreeList.load(std::memory_order_relaxed)), ASSERT_TEXT); + return true; + } inline FreeObject* allocate(); inline FreeObject *allocateFromFreeList(); - inline bool emptyEnoughToUse(); + + inline bool adjustFullness(); + void adjustPositionInBin(Bin* bin = nullptr); +#if MALLOC_DEBUG bool freeListNonNull() { return freeList; } +#endif void freePublicObject(FreeObject *objectToFree); - inline void freeOwnObject(MemoryPool *memPool, TLSData *tls, void *object); - void makeEmpty(); - void privatizePublicFreeList(); + inline void freeOwnObject(void *object); + void reset(); + void privatizePublicFreeList( bool reset = true ); void restoreBumpPtr(); void privatizeOrphaned(TLSData *tls, unsigned index); - void shareOrphaned(const Bin *bin, unsigned index); + bool readyToShare(); + void shareOrphaned(intptr_t binTag, unsigned index); unsigned int getSize() const { MALLOC_ASSERT(isStartupAllocObject() || objectSize<minLargeObjectSize, "Invalid object size"); return isStartupAllocObject()? 0 : objectSize; } const BackRefIdx *getBackRefIdx() const { return &backRefIdx; } - inline TLSData *ownBlock() const; + inline bool isOwnedByCurrentThread() const; bool isStartupAllocObject() const { return objectSize == startupAllocObjSizeMark; } - inline FreeObject *findObjectToFree(void *object) const; - bool checkFreePrecond(void *object) const { - if (allocatedCount>0) { - if (startupAllocObjSizeMark == objectSize) // startup block - return object<=bumpPtr; - else - return allocatedCount <= (slabSize-sizeof(Block))/objectSize - && (!bumpPtr || object>bumpPtr); + inline FreeObject *findObjectToFree(const void *object) const; + void checkFreePrecond(const void *object) const { +#if MALLOC_DEBUG + const char *msg = "Possible double free or heap corruption."; + // small objects are always at least sizeof(size_t) Byte aligned, + // try to check this before this dereference as for invalid objects + // this may be unreadable + MALLOC_ASSERT(isAligned(object, sizeof(size_t)), "Try to free invalid small object"); +#if !__TBB_USE_THREAD_SANITIZER + // releasing to free slab + MALLOC_ASSERT(allocatedCount>0, msg); +#endif + // must not point to slab's header + MALLOC_ASSERT((uintptr_t)object - (uintptr_t)this >= sizeof(Block), msg); + if (startupAllocObjSizeMark == objectSize) // startup block + MALLOC_ASSERT(object<=bumpPtr, msg); + else { + // non-startup objects are 8 Byte aligned + MALLOC_ASSERT(isAligned(object, 8), "Try to free invalid small object"); + FreeObject *toFree = findObjectToFree(object); +#if !__TBB_USE_THREAD_SANITIZER + MALLOC_ASSERT(allocatedCount <= (slabSize-sizeof(Block))/objectSize + && (!bumpPtr || object>bumpPtr), msg); + // check against head of freeList, as this is mostly + // expected after double free + MALLOC_ASSERT(toFree != freeList, msg); +#endif + // check against head of publicFreeList, to detect double free + // involving foreign thread + MALLOC_ASSERT(toFree != publicFreeList.load(std::memory_order_relaxed), msg); } - return false; +#else + suppress_unused_warning(object); +#endif } - const BackRefIdx *getBackRef() const { return &backRefIdx; } void initEmptyBlock(TLSData *tls, size_t size); size_t findObjectSize(void *object) const; + MemoryPool *getMemPool() const { return poolPtr; } // do not use on the hot path! protected: void cleanBlockHeader(); private: - static const float emptyEnoughRatio; /* "Reactivate" a block if this share of its objects is free. */ + static const float emptyEnoughRatio; /* Threshold on free space needed to "reactivate" a block */ inline FreeObject *allocateFromBumpPtr(); inline FreeObject *findAllocatedObject(const void *address) const; +#if MALLOC_DEBUG inline bool isProperlyPlaced(const void *object) const; - inline bool isOrphaned() const { return !tlsPtr; } +#endif inline void markOwned(TLSData *tls) { - ownerTid.saveCurrentThreadId(); - tlsPtr = tls; + MALLOC_ASSERT(!tlsPtr.load(std::memory_order_relaxed), ASSERT_TEXT); + ownerTid = ThreadId(); /* save the ID of the current thread */ + tlsPtr.store(tls, std::memory_order_relaxed); } inline void markOrphaned() { - MALLOC_ASSERT(tlsPtr, ASSERT_TEXT); - tlsPtr = NULL; + MALLOC_ASSERT(tlsPtr.load(std::memory_order_relaxed), ASSERT_TEXT); + tlsPtr.store(nullptr, std::memory_order_relaxed); } friend class Bin; friend class TLSData; - friend void MemoryPool::destroy(); + friend bool MemoryPool::destroy(); }; const float Block::emptyEnoughRatio = 1.0 / 4.0; -MALLOC_STATIC_ASSERT(sizeof(Block) <= 2*estimatedCacheLineSize, +static_assert(sizeof(Block) <= 2*estimatedCacheLineSize, "The class Block does not fit into 2 cache lines on this platform. " "Defining USE_INTERNAL_TID may help to fix it."); class Bin { - Block *activeBlk; - Block *mailbox; +private: +public: + Block *activeBlk; + std::atomic<Block*> mailbox; MallocMutex mailLock; public: inline Block* getActiveBlock() const { return activeBlk; } - void resetActiveBlock() { activeBlk = 0; } - bool activeBlockUnused() const { return activeBlk && !activeBlk->allocatedCount; } + void resetActiveBlock() { activeBlk = nullptr; } inline void setActiveBlock(Block *block); inline Block* setPreviousBlockActive(); - Block* getPublicFreeListBlock(); - void moveBlockToBinFront(Block *block); - void processLessUsedBlock(MemoryPool *memPool, Block *block); - - void outofTLSBin (Block* block); - void verifyTLSBin (size_t size) const; + Block* getPrivatizedFreeListBlock(); + void moveBlockToFront(Block *block); + bool cleanPublicFreeLists(); + void processEmptyBlock(Block *block, bool poolTheBlock); + void addPublicFreeListBlock(Block* block); + + void outofTLSBin(Block* block); + void verifyTLSBin(size_t size) const; void pushTLSBin(Block* block); +#if MALLOC_DEBUG void verifyInitState() const { - MALLOC_ASSERT( activeBlk == 0, ASSERT_TEXT ); - MALLOC_ASSERT( mailbox == 0, ASSERT_TEXT ); + MALLOC_ASSERT( !activeBlk, ASSERT_TEXT ); + MALLOC_ASSERT( !mailbox.load(std::memory_order_relaxed), ASSERT_TEXT ); } +#endif friend void Block::freePublicObject (FreeObject *objectToFree); }; @@ -518,31 +531,21 @@ const uint32_t numBlockBins = minFittingIndex+numFittingBins; */ const uint32_t minLargeObjectSize = fittingSize5 + 1; -/* - * Default granularity of memory pools - */ - -#if USE_WINTHREAD -const size_t scalableMallocPoolGranularity = 64*1024; // for VirtualAlloc use -#else -const size_t scalableMallocPoolGranularity = 4*1024; // page size, for mmap use -#endif - /* * Per-thread pool of slab blocks. Idea behind it is to not share with other * threads memory that are likely in local cache(s) of our CPU. */ class FreeBlockPool { - Block *head; +private: + std::atomic<Block*> head; int size; Backend *backend; - bool lastAccessMiss; public: static const int POOL_HIGH_MARK = 32; static const int POOL_LOW_MARK = 8; class ResOfGet { - ResOfGet(); + ResOfGet() = delete; public: Block* block; bool lastAccMiss; @@ -558,10 +561,11 @@ class FreeBlockPool { template<int LOW_MARK, int HIGH_MARK> class LocalLOCImpl { +private: static const size_t MAX_TOTAL_SIZE = 4*1024*1024; // TODO: can single-linked list be faster here? - LargeMemoryBlock *head, - *tail; // need it when do releasing on overflow + LargeMemoryBlock *tail; // need it when do releasing on overflow + std::atomic<LargeMemoryBlock*> head; size_t totalSize; int numOfBlocks; public: @@ -569,7 +573,7 @@ class LocalLOCImpl { LargeMemoryBlock *get(size_t size); bool externalCleanup(ExtMemoryPool *extMemPool); #if __TBB_MALLOC_WHITEBOX_TEST - LocalLOCImpl() : head(NULL), tail(NULL), totalSize(0), numOfBlocks(0) {} + LocalLOCImpl() : tail(nullptr), head(nullptr), totalSize(0), numOfBlocks(0) {} static size_t getMaxSize() { return MAX_TOTAL_SIZE; } static const int LOC_HIGH_MARK = HIGH_MARK; #else @@ -580,33 +584,32 @@ class LocalLOCImpl { typedef LocalLOCImpl<8,32> LocalLOC; // set production code parameters class TLSData : public TLSRemote { -#if USE_PTHREAD MemoryPool *memPool; -#endif public: Bin bin[numBlockBinLimit]; FreeBlockPool freeSlabBlocks; LocalLOC lloc; unsigned currCacheIdx; private: - bool unused; + std::atomic<bool> unused; public: -#if USE_PTHREAD - TLSData(MemoryPool *mPool, Backend *bknd) : memPool(mPool), freeSlabBlocks(bknd) {} + TLSData(MemoryPool *mPool, Backend *bknd) : memPool(mPool), freeSlabBlocks(bknd), currCacheIdx(0) {} MemoryPool *getMemPool() const { return memPool; } -#else - TLSData(MemoryPool * /*memPool*/, Backend *bknd) : freeSlabBlocks(bknd) {} -#endif Bin* getAllocationBin(size_t size); - void release(MemoryPool *mPool); - bool externalCleanup(ExtMemoryPool *mPool, bool cleanOnlyUnused) { - if (!unused && cleanOnlyUnused) return false; + void release(); + bool externalCleanup(bool cleanOnlyUnused, bool cleanBins) { + if (!unused.load(std::memory_order_relaxed) && cleanOnlyUnused) return false; + // Heavy operation in terms of synchronization complexity, + // should be called only for the current thread + bool released = cleanBins ? cleanupBlockBins() : false; // both cleanups to be called, and the order is not important - return lloc.externalCleanup(mPool) | freeSlabBlocks.externalCleanup(); + bool lloc_cleaned = lloc.externalCleanup(&memPool->extMemPool); + bool free_slab_blocks_cleaned = freeSlabBlocks.externalCleanup(); + return released || lloc_cleaned || free_slab_blocks_cleaned; } - bool cleanUnusedActiveBlocks(Backend *backend, bool userPool); - void markUsed() { unused = false; } // called by owner when TLS touched - void markUnused() { unused = true; } // can be called by not owner thread + bool cleanupBlockBins(); + void markUsed() { unused.store(false, std::memory_order_relaxed); } // called by owner when TLS touched + void markUnused() { unused.store(true, std::memory_order_relaxed); } // can be called by not owner thread }; TLSData *TLSKey::createTLS(MemoryPool *memPool, Backend *backend) @@ -614,7 +617,7 @@ TLSData *TLSKey::createTLS(MemoryPool *memPool, Backend *backend) MALLOC_ASSERT( sizeof(TLSData) >= sizeof(Bin) * numBlockBins + sizeof(FreeBlockPool), ASSERT_TEXT ); TLSData* tls = (TLSData*) memPool->bootStrapBlocks.allocate(memPool, sizeof(TLSData)); if ( !tls ) - return NULL; + return nullptr; new(tls) TLSData(memPool, backend); /* the block contains zeroes after bootStrapMalloc, so bins are initialized */ #if MALLOC_DEBUG @@ -626,38 +629,38 @@ TLSData *TLSKey::createTLS(MemoryPool *memPool, Backend *backend) return tls; } -bool TLSData::cleanUnusedActiveBlocks(Backend *backend, bool userPool) +bool TLSData::cleanupBlockBins() { bool released = false; - // active blocks can be not used, so return them to backend - for (uint32_t i=0; i<numBlockBinLimit; i++) - if (bin[i].activeBlockUnused()) { - Block *block = bin[i].getActiveBlock(); + for (uint32_t i = 0; i < numBlockBinLimit; i++) { + released |= bin[i].cleanPublicFreeLists(); + // After cleaning public free lists, only the active block might be empty. + // Do not use processEmptyBlock because it will just restore bumpPtr. + Block *block = bin[i].getActiveBlock(); + if (block && block->empty()) { bin[i].outofTLSBin(block); - // slab blocks in user's pools do not have valid backRefIdx - if (!userPool) - removeBackRef(*(block->getBackRefIdx())); - backend->putSlabBlock(block); - + memPool->returnEmptyBlock(block, /*poolTheBlock=*/false); released = true; } + } return released; } bool ExtMemoryPool::releaseAllLocalCaches() { - bool released = allLocalCaches.cleanup(this, /*cleanOnlyUnused=*/false); + // Iterate all registered TLS data and clean LLOC and Slab pools + bool released = allLocalCaches.cleanup(/*cleanOnlyUnused=*/false); + // Bins privatization is done only for the current thread if (TLSData *tlsData = tlsPointerKey.getThreadMallocTLS()) - // released only for current thread for now - released |= tlsData->cleanUnusedActiveBlocks(&backend, userPool()); + released |= tlsData->cleanupBlockBins(); return released; } void AllLocalCaches::registerThread(TLSRemote *tls) { - tls->prev = NULL; + tls->prev = nullptr; MallocMutex::scoped_lock lock(listLock); MALLOC_ASSERT(head!=tls, ASSERT_TEXT); tls->next = head; @@ -670,6 +673,7 @@ void AllLocalCaches::registerThread(TLSRemote *tls) void AllLocalCaches::unregisterThread(TLSRemote *tls) { MallocMutex::scoped_lock lock(listLock); + MALLOC_ASSERT(head, "Can't unregister thread: no threads are registered."); if (head == tls) head = tls->next; if (tls->next) @@ -679,22 +683,20 @@ void AllLocalCaches::unregisterThread(TLSRemote *tls) MALLOC_ASSERT(!tls->next || tls->next->next!=tls->next, ASSERT_TEXT); } -bool AllLocalCaches::cleanup(ExtMemoryPool *extPool, bool cleanOnlyUnused) +bool AllLocalCaches::cleanup(bool cleanOnlyUnused) { - bool total = false; + bool released = false; { MallocMutex::scoped_lock lock(listLock); - for (TLSRemote *curr=head; curr; curr=curr->next) - total |= static_cast<TLSData*>(curr)-> - externalCleanup(extPool, cleanOnlyUnused); + released |= static_cast<TLSData*>(curr)->externalCleanup(cleanOnlyUnused, /*cleanBins=*/false); } - return total; + return released; } void AllLocalCaches::markUnused() { - bool locked; + bool locked = false; MallocMutex::scoped_lock lock(listLock, /*block=*/false, &locked); if (!locked) // not wait for marking if someone doing something with it return; @@ -705,8 +707,8 @@ void AllLocalCaches::markUnused() #if MALLOC_CHECK_RECURSION MallocMutex RecursiveMallocCallProtector::rmc_mutex; -pthread_t RecursiveMallocCallProtector::owner_thread; -void *RecursiveMallocCallProtector::autoObjPtr; +std::atomic<pthread_t> RecursiveMallocCallProtector::owner_thread; +std::atomic<void*> RecursiveMallocCallProtector::autoObjPtr; bool RecursiveMallocCallProtector::mallocRecursionDetected; #if __FreeBSD__ bool RecursiveMallocCallProtector::canUsePthread; @@ -716,6 +718,21 @@ bool RecursiveMallocCallProtector::canUsePthread; /*********** End code to provide thread ID and a TLS pointer **********/ +// Parameter for isLargeObject, keeps our expectations on memory origin. +// Assertions must use unknownMem to reliably report object invalidity. +enum MemoryOrigin { + ourMem, // allocated by TBB allocator + unknownMem // can be allocated by system allocator or TBB allocator +}; + +template<MemoryOrigin> +#if __TBB_USE_THREAD_SANITIZER +// We have a real race when accessing the large object header for +// non large objects (e.g. small or foreign objects). +// Therefore, we need to hide this access from the thread sanitizer +__attribute__((no_sanitize("thread"))) +#endif +bool isLargeObject(void *object); static void *internalMalloc(size_t size); static void internalFree(void *object); static void *internalPoolMalloc(MemoryPool* mPool, size_t size); @@ -733,7 +750,7 @@ static bool internalPoolFree(MemoryPool *mPool, void *object, size_t size); #define ALWAYSINLINE(decl) decl #endif -static NOINLINE( void doInitialization() ); +static NOINLINE( bool doInitialization() ); ALWAYSINLINE( bool isMallocInitialized() ); #undef ALWAYSINLINE @@ -745,7 +762,7 @@ ALWAYSINLINE( bool isMallocInitialized() ); /* * Given a number return the highest non-zero bit in it. It is intended to work with 32-bit values only. - * Moreover, on IPF, for sake of simplicity and performance, it is narrowed to only serve for 64 to 1023. + * Moreover, on some platforms, for sake of simplicity and performance, it is narrowed to only serve for 64 to 1023. * This is enough for current algorithm of distribution of sizes among bins. * __TBB_Log2 is not used here to minimize dependencies on TBB specific sources. */ @@ -759,7 +776,7 @@ static inline unsigned int highestBitPos(unsigned int n) unsigned int pos; #if __ARCH_x86_32||__ARCH_x86_64 -# if __linux__||__APPLE__||__FreeBSD__||__NetBSD__||__sun||__MINGW32__ +# if __unix__||__APPLE__||__MINGW32__ __asm__ ("bsr %1,%0" : "=r"(pos) : "r"(n)); # elif (_WIN32 && (!_WIN64 || __INTEL_COMPILER)) __asm @@ -786,16 +803,29 @@ static inline unsigned int highestBitPos(unsigned int n) return pos; } +unsigned int getSmallObjectIndex(unsigned int size) +{ + unsigned int result = (size-1)>>3; + constexpr bool is_64bit = (8 == sizeof(void*)); + if (is_64bit) { + // For 64-bit malloc, 16 byte alignment is needed except for bin 0. + if (result) result |= 1; // 0,1,3,5,7; bins 2,4,6 are not aligned to 16 bytes + } + return result; +} + /* * Depending on indexRequest, for a given size return either the index into the bin * for objects of this size, or the actual size of objects in this bin. + * TODO: Change return type to unsigned short. */ template<bool indexRequest> static unsigned int getIndexOrObjectSize (unsigned int size) { - if (size <= maxSmallObjectSize) { // selection from 4/8/16/24/32/40/48/56/64 - /* Index 0 holds up to 8 bytes, Index 1 16 and so forth */ - return indexRequest ? (size - 1) >> 3 : alignUp(size,8); + if (size <= maxSmallObjectSize) { // selection from 8/16/24/32/40/48/56/64 + unsigned int index = getSmallObjectIndex( size ); + /* Bin 0 is for 8 bytes, bin 1 is for 16, and so forth */ + return indexRequest ? index : (index+1)<<3; } else if (size <= maxSegregatedObjectSize ) { // 80/96/112/128 / 160/192/224/256 / 320/384/448/512 / 640/768/896/1024 unsigned int order = highestBitPos(size-1); // which group of bin sizes? @@ -833,12 +863,12 @@ static unsigned int getIndexOrObjectSize (unsigned int size) static unsigned int getIndex (unsigned int size) { - return getIndexOrObjectSize</*indexRequest*/true>(size); + return getIndexOrObjectSize</*indexRequest=*/true>(size); } static unsigned int getObjectSize (unsigned int size) { - return getIndexOrObjectSize</*indexRequest*/false>(size); + return getIndexOrObjectSize</*indexRequest=*/false>(size); } @@ -857,19 +887,18 @@ void *BootStrapBlocks::allocate(MemoryPool *memPool, size_t size) } else { if (!bootStrapBlock) { bootStrapBlock = memPool->getEmptyBlock(size); - if (!bootStrapBlock) return NULL; + if (!bootStrapBlock) return nullptr; } result = bootStrapBlock->bumpPtr; bootStrapBlock->bumpPtr = (FreeObject *)((uintptr_t)bootStrapBlock->bumpPtr - bootStrapBlock->objectSize); if ((uintptr_t)bootStrapBlock->bumpPtr < (uintptr_t)bootStrapBlock+sizeof(Block)) { - bootStrapBlock->bumpPtr = NULL; + bootStrapBlock->bumpPtr = nullptr; bootStrapBlock->next = bootStrapBlockUsed; bootStrapBlockUsed = bootStrapBlock; - bootStrapBlock = NULL; + bootStrapBlock = nullptr; } } } // Unlock with release - memset (result, 0, size); return (void*)result; } @@ -886,66 +915,63 @@ void BootStrapBlocks::free(void* ptr) void BootStrapBlocks::reset() { - bootStrapBlock = bootStrapBlockUsed = NULL; - bootStrapObjectList = NULL; + bootStrapBlock = bootStrapBlockUsed = nullptr; + bootStrapObjectList = nullptr; } #if !(FREELIST_NONBLOCKING) static MallocMutex publicFreeListLock; // lock for changes of publicFreeList #endif -const uintptr_t UNUSABLE = 0x1; -inline bool isSolidPtr( void* ptr ) -{ - return (UNUSABLE|(uintptr_t)ptr)!=UNUSABLE; -} -inline bool isNotForUse( void* ptr ) -{ - return (uintptr_t)ptr==UNUSABLE; -} - /********* End rough utility code **************/ -#ifdef FINE_GRAIN_LOCKS /* LifoList assumes zero initialization so a vector of it can be created * by just allocating some space with no call to constructor. * On Linux, it seems to be necessary to avoid linking with C++ libraries. * * By usage convention there is no race on the initialization. */ -LifoList::LifoList( ) : top(NULL) +LifoList::LifoList( ) : top(nullptr) { // MallocMutex assumes zero initialization - memset(&lock, 0, sizeof(MallocMutex)); + memset(static_cast<void*>(&lock), 0, sizeof(MallocMutex)); } void LifoList::push(Block *block) { MallocMutex::scoped_lock scoped_cs(lock); - block->next = top; - top = block; + block->next = top.load(std::memory_order_relaxed); + top.store(block, std::memory_order_relaxed); } Block *LifoList::pop() { - Block *block=NULL; - if (!top) goto done; - { + Block* block = nullptr; + if (top.load(std::memory_order_relaxed)) { MallocMutex::scoped_lock scoped_cs(lock); - if (!top) goto done; - block = top; - top = block->next; + block = top.load(std::memory_order_relaxed); + if (block) { + top.store(block->next, std::memory_order_relaxed); + } } -done: return block; } -#endif /* FINE_GRAIN_LOCKS */ +Block *LifoList::grab() +{ + Block *block = nullptr; + if (top.load(std::memory_order_relaxed)) { + MallocMutex::scoped_lock scoped_cs(lock); + block = top.load(std::memory_order_relaxed); + top.store(nullptr, std::memory_order_relaxed); + } + return block; +} /********* Thread and block related code *************/ template<bool poolDestroy> void AllLargeBlocksList::releaseAll(Backend *backend) { LargeMemoryBlock *next, *lmb = loHead; - loHead = NULL; + loHead = nullptr; for (; lmb; lmb = next) { next = lmb->gNext; @@ -956,7 +982,7 @@ template<bool poolDestroy> void AllLargeBlocksList::releaseAll(Backend *backend) } else { // clean g(Next|Prev) to prevent removing lmb // from AllLargeBlocksList inside returnLargeObject - lmb->gNext = lmb->gPrev = NULL; + lmb->gNext = lmb->gPrev = nullptr; backend->returnLargeObject(lmb); } } @@ -965,11 +991,8 @@ template<bool poolDestroy> void AllLargeBlocksList::releaseAll(Backend *backend) TLSData* MemoryPool::getTLS(bool create) { TLSData* tls = extMemPool.tlsPointerKey.getThreadMallocTLS(); - if( create && !tls ) { + if (create && !tls) tls = extMemPool.tlsPointerKey.createTLS(this, &extMemPool.backend); - MALLOC_ASSERT( tls, ASSERT_TEXT ); - } - if (tls) tls->markUsed(); return tls; } @@ -984,10 +1007,10 @@ inline Bin* TLSData::getAllocationBin(size_t size) /* Return an empty uninitialized block in a non-blocking fashion. */ Block *MemoryPool::getEmptyBlock(size_t size) { - TLSData* tls = extMemPool.tlsPointerKey.getThreadMallocTLS(); + TLSData* tls = getTLS(/*create=*/false); // try to use per-thread cache, if TLS available FreeBlockPool::ResOfGet resOfGet = tls? - tls->freeSlabBlocks.getBlock() : FreeBlockPool::ResOfGet(NULL, false); + tls->freeSlabBlocks.getBlock() : FreeBlockPool::ResOfGet(nullptr, false); Block *result = resOfGet.block; if (!result) { // not found in local cache, asks backend for slabs @@ -995,7 +1018,7 @@ Block *MemoryPool::getEmptyBlock(size_t size) BackRefIdx backRefIdx[Backend::numOfSlabAllocOnMiss]; result = static_cast<Block*>(extMemPool.backend.getSlabBlock(num)); - if (!result) return NULL; + if (!result) return nullptr; if (!extMemPool.userPool()) for (int i=0; i<num; i++) { @@ -1007,7 +1030,7 @@ Block *MemoryPool::getEmptyBlock(size_t size) Block *b = result; for (int j=0; j<num; b=(Block*)((uintptr_t)b+slabSize), j++) extMemPool.backend.putSlabBlock(b); - return NULL; + return nullptr; } } // resources were allocated, register blocks @@ -1020,7 +1043,8 @@ Block *MemoryPool::getEmptyBlock(size_t size) setBackRef(backRefIdx[i], b); b->backRefIdx = backRefIdx[i]; } - b->tlsPtr = tls; + b->tlsPtr.store(tls, std::memory_order_relaxed); + b->poolPtr = this; // all but first one go to per-thread pool if (i > 0) { MALLOC_ASSERT(tls, ASSERT_TEXT); @@ -1030,17 +1054,16 @@ Block *MemoryPool::getEmptyBlock(size_t size) } MALLOC_ASSERT(result, ASSERT_TEXT); result->initEmptyBlock(tls, size); - STAT_increment(result->owner, getIndex(result->objectSize), allocBlockNew); + STAT_increment(getThreadId(), getIndex(result->objectSize), allocBlockNew); return result; } void MemoryPool::returnEmptyBlock(Block *block, bool poolTheBlock) { - block->makeEmpty(); + block->reset(); if (poolTheBlock) { - extMemPool.tlsPointerKey.getThreadMallocTLS()->freeSlabBlocks.returnBlock(block); - } - else { + getTLS(/*create=*/false)->freeSlabBlocks.returnBlock(block); + } else { // slab blocks in user's pools do not have valid backRefIdx if (!extMemPool.userPool()) removeBackRef(*(block->getBackRefIdx())); @@ -1059,13 +1082,15 @@ bool ExtMemoryPool::init(intptr_t poolId, rawAllocType rawAlloc, this->keepAllMemory = keepAllMemory; this->fixedPool = fixedPool; this->delayRegsReleasing = false; - initTLS(); + if (!initTLS()) + return false; loc.init(this); - // allocate initial region for user's objects placement - return backend.bootstrap(this); + backend.init(this); + MALLOC_ASSERT(isPoolValid(), nullptr); + return true; } -void ExtMemoryPool::initTLS() { new (&tlsPointerKey) TLSKey(); } +bool ExtMemoryPool::initTLS() { return tlsPointerKey.init(); } bool MemoryPool::init(intptr_t poolId, const MemPoolPolicy *policy) { @@ -1084,7 +1109,7 @@ bool MemoryPool::init(intptr_t poolId, const MemPoolPolicy *policy) return true; } -void MemoryPool::reset() +bool MemoryPool::reset() { MALLOC_ASSERT(extMemPool.userPool(), "No reset for the system pool."); // memory is not releasing during pool reset @@ -1092,16 +1117,24 @@ void MemoryPool::reset() extMemPool.delayRegionsReleasing(true); bootStrapBlocks.reset(); - orphanedBlocks.reset(); extMemPool.lmbList.releaseAll</*poolDestroy=*/false>(&extMemPool.backend); - extMemPool.reset(); + if (!extMemPool.reset()) + return false; - extMemPool.initTLS(); + if (!extMemPool.initTLS()) + return false; extMemPool.delayRegionsReleasing(false); + return true; } -void MemoryPool::destroy() +bool MemoryPool::destroy() { +#if __TBB_MALLOC_LOCACHE_STAT + extMemPool.loc.reportStat(stdout); +#endif +#if __TBB_MALLOC_BACKEND_STAT + extMemPool.backend.reportStat(stdout); +#endif { MallocMutex::scoped_lock lock(memPoolListLock); // remove itself from global pool list @@ -1110,20 +1143,29 @@ void MemoryPool::destroy() if (next) next->prev = prev; } - bootStrapBlocks.reset(); - orphanedBlocks.reset(); // slab blocks in non-default pool do not have backreferences, // only large objects do if (extMemPool.userPool()) extMemPool.lmbList.releaseAll</*poolDestroy=*/true>(&extMemPool.backend); - extMemPool.destroy(); + else { + // only one non-userPool() is supported now + MALLOC_ASSERT(this==defaultMemPool, nullptr); + // There and below in extMemPool.destroy(), do not restore initial state + // for user pool, because it's just about to be released. But for system + // pool restoring, we do not want to do zeroing of it on subsequent reload. + bootStrapBlocks.reset(); + extMemPool.orphanedBlocks.reset(); + } + return extMemPool.destroy(); } -void MemoryPool::processThreadShutdown(TLSData *tlsData) +void MemoryPool::onThreadShutdown(TLSData *tlsData) { - tlsData->release(this); - bootStrapBlocks.free(tlsData); - clearTLS(); + if (tlsData) { // might be called for "empty" TLS + tlsData->release(); + bootStrapBlocks.free(tlsData); + clearTLS(); + } } #if MALLOC_DEBUG @@ -1133,12 +1175,12 @@ void Bin::verifyTLSBin (size_t size) const uint32_t objSize = getObjectSize(size); if (activeBlk) { - MALLOC_ASSERT( activeBlk->ownerTid.isCurrentThreadId(), ASSERT_TEXT ); + MALLOC_ASSERT( activeBlk->isOwnedByCurrentThread(), ASSERT_TEXT ); MALLOC_ASSERT( activeBlk->objectSize == objSize, ASSERT_TEXT ); #if MALLOC_DEBUG>1 for (Block* temp = activeBlk->next; temp; temp=temp->next) { MALLOC_ASSERT( temp!=activeBlk, ASSERT_TEXT ); - MALLOC_ASSERT( temp->owner.own(), ASSERT_TEXT ); + MALLOC_ASSERT( temp->isOwnedByCurrentThread(), ASSERT_TEXT ); MALLOC_ASSERT( temp->objectSize == objSize, ASSERT_TEXT ); MALLOC_ASSERT( temp->previous->next == temp, ASSERT_TEXT ); if (temp->next) { @@ -1147,7 +1189,7 @@ void Bin::verifyTLSBin (size_t size) const } for (Block* temp = activeBlk->previous; temp; temp=temp->previous) { MALLOC_ASSERT( temp!=activeBlk, ASSERT_TEXT ); - MALLOC_ASSERT( temp->owner.own(), ASSERT_TEXT ); + MALLOC_ASSERT( temp->isOwnedByCurrentThread(), ASSERT_TEXT ); MALLOC_ASSERT( temp->objectSize == objSize, ASSERT_TEXT ); MALLOC_ASSERT( temp->next->previous == temp, ASSERT_TEXT ); if (temp->previous) { @@ -1170,10 +1212,10 @@ void Bin::pushTLSBin(Block* block) because the function is applied to partially filled blocks as well */ unsigned int size = block->objectSize; - MALLOC_ASSERT( block->ownerTid.isCurrentThreadId(), ASSERT_TEXT ); + MALLOC_ASSERT( block->isOwnedByCurrentThread(), ASSERT_TEXT ); MALLOC_ASSERT( block->objectSize != 0, ASSERT_TEXT ); - MALLOC_ASSERT( block->next == NULL, ASSERT_TEXT ); - MALLOC_ASSERT( block->previous == NULL, ASSERT_TEXT ); + MALLOC_ASSERT( block->next == nullptr, ASSERT_TEXT ); + MALLOC_ASSERT( block->previous == nullptr, ASSERT_TEXT ); MALLOC_ASSERT( this, ASSERT_TEXT ); verifyTLSBin(size); @@ -1198,7 +1240,7 @@ void Bin::outofTLSBin(Block* block) { unsigned int size = block->objectSize; - MALLOC_ASSERT( block->ownerTid.isCurrentThreadId(), ASSERT_TEXT ); + MALLOC_ASSERT( block->isOwnedByCurrentThread(), ASSERT_TEXT ); MALLOC_ASSERT( block->objectSize != 0, ASSERT_TEXT ); MALLOC_ASSERT( this, ASSERT_TEXT ); @@ -1207,7 +1249,7 @@ void Bin::outofTLSBin(Block* block) if (block == activeBlk) { activeBlk = block->previous? block->previous : block->next; } - /* Delink the block */ + /* Unlink the block */ if (block->previous) { MALLOC_ASSERT( block->previous->next == block, ASSERT_TEXT ); block->previous->next = block->next; @@ -1216,116 +1258,157 @@ void Bin::outofTLSBin(Block* block) MALLOC_ASSERT( block->next->previous == block, ASSERT_TEXT ); block->next->previous = block->previous; } - block->next = NULL; - block->previous = NULL; + block->next = nullptr; + block->previous = nullptr; verifyTLSBin(size); } -Block* Bin::getPublicFreeListBlock() +Block* Bin::getPrivatizedFreeListBlock() { Block* block; MALLOC_ASSERT( this, ASSERT_TEXT ); // if this method is called, active block usage must be unsuccessful - MALLOC_ASSERT( !activeBlk && !mailbox || activeBlk && activeBlk->isFull, ASSERT_TEXT ); + MALLOC_ASSERT( (!activeBlk && !mailbox.load(std::memory_order_relaxed)) || (activeBlk && activeBlk->isFull), ASSERT_TEXT ); // the counter should be changed STAT_increment(getThreadId(), ThreadCommonCounters, lockPublicFreeList); - if (!FencedLoad((intptr_t&)mailbox)) // hotpath is empty mailbox - return NULL; + if (!mailbox.load(std::memory_order_acquire)) // hotpath is empty mailbox + return nullptr; else { // mailbox is not empty, take lock and inspect it MallocMutex::scoped_lock scoped_cs(mailLock); - block = mailbox; + block = mailbox.load(std::memory_order_relaxed); if( block ) { - MALLOC_ASSERT( block->ownBlock(), ASSERT_TEXT ); - MALLOC_ASSERT( !isNotForUse(block->nextPrivatizable), ASSERT_TEXT ); - mailbox = block->nextPrivatizable; - block->nextPrivatizable = (Block*) this; + MALLOC_ASSERT( block->isOwnedByCurrentThread(), ASSERT_TEXT ); + MALLOC_ASSERT( !isNotForUse(block->nextPrivatizable.load(std::memory_order_relaxed)), ASSERT_TEXT ); + mailbox.store(block->nextPrivatizable.load(std::memory_order_relaxed), std::memory_order_relaxed); + block->nextPrivatizable.store((Block*)this, std::memory_order_relaxed); } } if( block ) { - MALLOC_ASSERT( isSolidPtr(block->publicFreeList), ASSERT_TEXT ); + MALLOC_ASSERT( isSolidPtr(block->publicFreeList.load(std::memory_order_relaxed)), ASSERT_TEXT ); block->privatizePublicFreeList(); + block->adjustPositionInBin(this); } return block; } -bool Block::emptyEnoughToUse() +void Bin::addPublicFreeListBlock(Block* block) +{ + MallocMutex::scoped_lock scoped_cs(mailLock); + block->nextPrivatizable.store(mailbox.load(std::memory_order_relaxed), std::memory_order_relaxed); + mailbox.store(block, std::memory_order_relaxed); +} + +// Process publicly freed objects in all blocks and return empty blocks +// to the backend in order to reduce overall footprint. +bool Bin::cleanPublicFreeLists() { - const float threshold = (slabSize - sizeof(Block)) * (1-emptyEnoughRatio); + Block* block; + if (!mailbox.load(std::memory_order_acquire)) + return false; + else { + // Grab all the blocks in the mailbox + MallocMutex::scoped_lock scoped_cs(mailLock); + block = mailbox.load(std::memory_order_relaxed); + mailbox.store(nullptr, std::memory_order_relaxed); + } + bool released = false; + while (block) { + MALLOC_ASSERT( block->isOwnedByCurrentThread(), ASSERT_TEXT ); + Block* tmp = block->nextPrivatizable.load(std::memory_order_relaxed); + block->nextPrivatizable.store((Block*)this, std::memory_order_relaxed); + block->privatizePublicFreeList(); + if (block->empty()) { + processEmptyBlock(block, /*poolTheBlock=*/false); + released = true; + } else + block->adjustPositionInBin(this); + block = tmp; + } + return released; +} +bool Block::adjustFullness() +{ if (bumpPtr) { /* If we are still using a bump ptr for this block it is empty enough to use. */ - STAT_increment(owner, getIndex(objectSize), examineEmptyEnough); + STAT_increment(getThreadId(), getIndex(objectSize), examineEmptyEnough); isFull = false; - return 1; - } - - /* allocatedCount shows how many objects in the block are in use; however it still counts - blocks freed by other threads; so prior call to privatizePublicFreeList() is recommended */ - isFull = (allocatedCount*objectSize > threshold)? true: false; + } else { + const float threshold = (slabSize - sizeof(Block)) * (1 - emptyEnoughRatio); + /* allocatedCount shows how many objects in the block are in use; however it still counts + * blocks freed by other threads; so prior call to privatizePublicFreeList() is recommended */ + isFull = (allocatedCount*objectSize > threshold) ? true : false; #if COLLECT_STATISTICS - if (isFull) - STAT_increment(owner, getIndex(objectSize), examineNotEmpty); - else - STAT_increment(owner, getIndex(objectSize), examineEmptyEnough); + if (isFull) + STAT_increment(getThreadId(), getIndex(objectSize), examineNotEmpty); + else + STAT_increment(getThreadId(), getIndex(objectSize), examineEmptyEnough); #endif - return !isFull; + } + return isFull; +} + +// This method resides in class Block, and not in class Bin, in order to avoid +// calling getAllocationBin on a reasonably hot path in Block::freeOwnObject +void Block::adjustPositionInBin(Bin* bin/*=nullptr*/) +{ + // If the block were full, but became empty enough to use, + // move it to the front of the list + if (isFull && !adjustFullness()) { + if (!bin) + bin = tlsPtr.load(std::memory_order_relaxed)->getAllocationBin(objectSize); + bin->moveBlockToFront(this); + } } /* Restore the bump pointer for an empty block that is planned to use */ void Block::restoreBumpPtr() { MALLOC_ASSERT( allocatedCount == 0, ASSERT_TEXT ); - MALLOC_ASSERT( publicFreeList == NULL, ASSERT_TEXT ); - STAT_increment(owner, getIndex(objectSize), freeRestoreBumpPtr); + MALLOC_ASSERT( !isSolidPtr(publicFreeList.load(std::memory_order_relaxed)), ASSERT_TEXT ); + STAT_increment(getThreadId(), getIndex(objectSize), freeRestoreBumpPtr); bumpPtr = (FreeObject *)((uintptr_t)this + slabSize - objectSize); - freeList = NULL; - isFull = 0; + freeList = nullptr; + isFull = false; } -void Block::freeOwnObject(MemoryPool *memPool, TLSData *tls, void *object) +void Block::freeOwnObject(void *object) { + tlsPtr.load(std::memory_order_relaxed)->markUsed(); allocatedCount--; MALLOC_ASSERT( allocatedCount < (slabSize-sizeof(Block))/objectSize, ASSERT_TEXT ); #if COLLECT_STATISTICS - if (tls->getAllocationBin(objectSize)->getActiveBlock() != this) - STAT_increment(owner, getIndex(objectSize), freeToInactiveBlock); + // Note that getAllocationBin is not called on the hottest path with statistics off. + if (tlsPtr.load(std::memory_order_relaxed)->getAllocationBin(objectSize)->getActiveBlock() != this) + STAT_increment(getThreadId(), getIndex(objectSize), freeToInactiveBlock); else - STAT_increment(owner, getIndex(objectSize), freeToActiveBlock); + STAT_increment(getThreadId(), getIndex(objectSize), freeToActiveBlock); #endif - if (allocatedCount==0 && publicFreeList==NULL) { - // The bump pointer is about to be restored for the block, - // no need to find objectToFree here (this is costly). - - // if the last object of a slab is freed, the slab cannot be marked full + if (empty()) { + // If the last object of a slab is freed, the slab cannot be marked full MALLOC_ASSERT(!isFull, ASSERT_TEXT); - tls->getAllocationBin(objectSize)->processLessUsedBlock(memPool, this); - } else { + tlsPtr.load(std::memory_order_relaxed)->getAllocationBin(objectSize)->processEmptyBlock(this, /*poolTheBlock=*/true); + } else { // hot path FreeObject *objectToFree = findObjectToFree(object); objectToFree->next = freeList; freeList = objectToFree; - - if (isFull) { - if (emptyEnoughToUse()) - tls->getAllocationBin(objectSize)->moveBlockToBinFront(this); - } + adjustPositionInBin(); } } void Block::freePublicObject (FreeObject *objectToFree) { - FreeObject *localPublicFreeList; + FreeObject* localPublicFreeList{}; MALLOC_ITT_SYNC_RELEASING(&publicFreeList); #if FREELIST_NONBLOCKING - FreeObject *temp = publicFreeList; + // TBB_REVAMP_TODO: make it non atomic in non-blocking scenario + localPublicFreeList = publicFreeList.load(std::memory_order_relaxed); do { - localPublicFreeList = objectToFree->next = temp; - temp = (FreeObject*)AtomicCompareExchange( - (intptr_t&)publicFreeList, - (intptr_t)objectToFree, (intptr_t)localPublicFreeList ); + objectToFree->next = localPublicFreeList; // no backoff necessary because trying to make change, not waiting for a change - } while( temp != localPublicFreeList ); + } while( !publicFreeList.compare_exchange_strong(localPublicFreeList, objectToFree) ); #else STAT_increment(getThreadId(), ThreadCommonCounters, lockPublicFreeList); { @@ -1335,144 +1418,160 @@ void Block::freePublicObject (FreeObject *objectToFree) } #endif - if( localPublicFreeList==NULL ) { + if( localPublicFreeList==nullptr ) { // if the block is abandoned, its nextPrivatizable pointer should be UNUSABLE // otherwise, it should point to the bin the block belongs to. // reading nextPrivatizable is thread-safe below, because: - // 1) the executing thread atomically got localPublicFreeList==NULL and changed it to non-NULL; - // 2) only owning thread can change it back to NULL, + // 1) the executing thread atomically got publicFreeList==nullptr and changed it to non-nullptr; + // 2) only owning thread can change it back to nullptr, // 3) but it can not be done until the block is put to the mailbox // So the executing thread is now the only one that can change nextPrivatizable - if( !isNotForUse(nextPrivatizable) ) { - MALLOC_ASSERT( nextPrivatizable!=NULL, ASSERT_TEXT ); - Bin* theBin = (Bin*) nextPrivatizable; - MallocMutex::scoped_lock scoped_cs(theBin->mailLock); - nextPrivatizable = theBin->mailbox; - theBin->mailbox = this; + Block* next = nextPrivatizable.load(std::memory_order_acquire); + if( !isNotForUse(next) ) { + MALLOC_ASSERT( next!=nullptr, ASSERT_TEXT ); + Bin* theBin = (Bin*) next; +#if MALLOC_DEBUG && TBB_REVAMP_TODO + // FIXME: The thread that returns the block is not the block's owner. + // The below assertion compares 'theBin' against the caller's local bin, thus, it always fails. + // Need to find a way to get the correct remote bin for comparison. + { // check that nextPrivatizable points to the bin the block belongs to + uint32_t index = getIndex( objectSize ); + TLSData* tls = getThreadMallocTLS(); + MALLOC_ASSERT( theBin==tls->bin+index, ASSERT_TEXT ); + } +#endif // MALLOC_DEBUG + theBin->addPublicFreeListBlock(this); } } - STAT_increment(ThreadId::get(), ThreadCommonCounters, freeToOtherThread); - STAT_increment(owner, getIndex(objectSize), freeByOtherThread); + STAT_increment(getThreadId(), ThreadCommonCounters, freeToOtherThread); + STAT_increment(ownerTid.load(std::memory_order_relaxed), getIndex(objectSize), freeByOtherThread); } -void Block::privatizePublicFreeList() +// Make objects freed by other threads available for use again +void Block::privatizePublicFreeList( bool reset ) { - FreeObject *temp, *localPublicFreeList; + FreeObject *localPublicFreeList; + // If reset is false, publicFreeList should not be zeroed but set to UNUSABLE + // to properly synchronize with other threads freeing objects to this slab. + const intptr_t endMarker = reset ? 0 : UNUSABLE; - MALLOC_ASSERT( ownerTid.isCurrentThreadId(), ASSERT_TEXT ); + // Only the owner thread may reset the pointer to nullptr + MALLOC_ASSERT( isOwnedByCurrentThread() || !reset, ASSERT_TEXT ); #if FREELIST_NONBLOCKING - temp = publicFreeList; - do { - localPublicFreeList = temp; - temp = (FreeObject*)AtomicCompareExchange( - (intptr_t&)publicFreeList, - 0, (intptr_t)localPublicFreeList); - // no backoff necessary because trying to make change, not waiting for a change - } while( temp != localPublicFreeList ); + localPublicFreeList = publicFreeList.exchange((FreeObject*)endMarker); #else - STAT_increment(owner, ThreadCommonCounters, lockPublicFreeList); + STAT_increment(getThreadId(), ThreadCommonCounters, lockPublicFreeList); { MallocMutex::scoped_lock scoped_cs(publicFreeListLock); localPublicFreeList = publicFreeList; - publicFreeList = NULL; + publicFreeList = endMarker; } - temp = localPublicFreeList; #endif MALLOC_ITT_SYNC_ACQUIRED(&publicFreeList); + MALLOC_ASSERT( !(reset && isNotForUse(publicFreeList)), ASSERT_TEXT ); - MALLOC_ASSERT( localPublicFreeList && localPublicFreeList==temp, ASSERT_TEXT ); // there should be something in publicFreeList! - if( !isNotForUse(temp) ) { // return/getPartialBlock could set it to UNUSABLE + // publicFreeList must have been UNUSABLE or valid, but not nullptr + MALLOC_ASSERT( localPublicFreeList!=nullptr, ASSERT_TEXT ); + if( isSolidPtr(localPublicFreeList) ) { MALLOC_ASSERT( allocatedCount <= (slabSize-sizeof(Block))/objectSize, ASSERT_TEXT ); /* other threads did not change the counter freeing our blocks */ allocatedCount--; - while( isSolidPtr(temp->next) ){ // the list will end with either NULL or UNUSABLE + FreeObject *temp = localPublicFreeList; + while( isSolidPtr(temp->next) ){ // the list will end with either nullptr or UNUSABLE temp = temp->next; allocatedCount--; + MALLOC_ASSERT( allocatedCount < (slabSize-sizeof(Block))/objectSize, ASSERT_TEXT ); } - MALLOC_ASSERT( allocatedCount < (slabSize-sizeof(Block))/objectSize, ASSERT_TEXT ); /* merge with local freeList */ temp->next = freeList; freeList = localPublicFreeList; - STAT_increment(owner, getIndex(objectSize), allocPrivatized); + STAT_increment(getThreadId(), getIndex(objectSize), allocPrivatized); } } void Block::privatizeOrphaned(TLSData *tls, unsigned index) { Bin* bin = tls->bin + index; - STAT_increment(owner, index, allocBlockPublic); - next = NULL; - previous = NULL; - MALLOC_ASSERT( publicFreeList!=NULL, ASSERT_TEXT ); + STAT_increment(getThreadId(), index, allocBlockPublic); + next = nullptr; + previous = nullptr; + MALLOC_ASSERT( publicFreeList.load(std::memory_order_relaxed) != nullptr, ASSERT_TEXT ); /* There is not a race here since no other thread owns this block */ - MALLOC_ASSERT(isOrphaned(), ASSERT_TEXT); markOwned(tls); // It is safe to change nextPrivatizable, as publicFreeList is not null - MALLOC_ASSERT( isNotForUse(nextPrivatizable), ASSERT_TEXT ); - nextPrivatizable = (Block*)bin; + MALLOC_ASSERT( isNotForUse(nextPrivatizable.load(std::memory_order_relaxed)), ASSERT_TEXT ); + nextPrivatizable.store((Block*)bin, std::memory_order_relaxed); // the next call is required to change publicFreeList to 0 privatizePublicFreeList(); - if( allocatedCount ) { - emptyEnoughToUse(); // check its fullness and set result->isFull - } else { + if( empty() ) { restoreBumpPtr(); + } else { + adjustFullness(); // check the block fullness and set isFull } - MALLOC_ASSERT( !isNotForUse(publicFreeList), ASSERT_TEXT ); + MALLOC_ASSERT( !isNotForUse(publicFreeList.load(std::memory_order_relaxed)), ASSERT_TEXT ); } -void Block::shareOrphaned(const Bin *bin, unsigned index) + +bool Block::readyToShare() { - MALLOC_ASSERT( bin, ASSERT_TEXT ); - STAT_increment(owner, index, freeBlockPublic); - markOrphaned(); - // need to set publicFreeList to non-zero, so other threads - // will not change nextPrivatizable and it can be zeroed. - if ((intptr_t)nextPrivatizable==(intptr_t)bin) { - void* oldval; + FreeObject* oldVal = nullptr; #if FREELIST_NONBLOCKING - oldval = (void*)AtomicCompareExchange((intptr_t&)publicFreeList, (intptr_t)UNUSABLE, 0); + publicFreeList.compare_exchange_strong(oldVal, (FreeObject*)UNUSABLE); #else - STAT_increment(owner, ThreadCommonCounters, lockPublicFreeList); - { - MallocMutex::scoped_lock scoped_cs(publicFreeListLock); - if ( (oldval=publicFreeList)==NULL ) - (uintptr_t&)(publicFreeList) = UNUSABLE; - } + STAT_increment(getThreadId(), ThreadCommonCounters, lockPublicFreeList); + { + MallocMutex::scoped_lock scoped_cs(publicFreeListLock); + if ( (oldVal=publicFreeList)==nullptr ) + publicFreeList = reinterpret_cast<FreeObject *>(UNUSABLE); + } #endif - if ( oldval!=NULL ) { + return oldVal==nullptr; +} + +void Block::shareOrphaned(intptr_t binTag, unsigned index) +{ + MALLOC_ASSERT( binTag, ASSERT_TEXT ); + // unreferenced formal parameter warning + tbb::detail::suppress_unused_warning(index); + STAT_increment(getThreadId(), index, freeBlockPublic); + markOrphaned(); + if ((intptr_t)nextPrivatizable.load(std::memory_order_relaxed) == binTag) { + // First check passed: the block is not in mailbox yet. + // Need to set publicFreeList to non-zero, so other threads + // will not change nextPrivatizable and it can be zeroed. + if ( !readyToShare() ) { // another thread freed an object; we need to wait until it finishes. - // I believe there is no need for exponential backoff, as the wait here is not for a lock; + // There is no need for exponential backoff, as the wait here is not for a lock; // but need to yield, so the thread we wait has a chance to run. + // TODO: add a pause to also be friendly to hyperthreads int count = 256; - while( (intptr_t)const_cast<Block* volatile &>(nextPrivatizable)==(intptr_t)bin ) { + while ((intptr_t)nextPrivatizable.load(std::memory_order_relaxed) == binTag) { if (--count==0) { do_yield(); count = 256; } } } - } else { - MALLOC_ASSERT( isSolidPtr(publicFreeList), ASSERT_TEXT ); } - MALLOC_ASSERT( publicFreeList!=NULL, ASSERT_TEXT ); + MALLOC_ASSERT( publicFreeList.load(std::memory_order_relaxed) !=nullptr, ASSERT_TEXT ); // now it is safe to change our data - previous = NULL; + previous = nullptr; // it is caller responsibility to ensure that the list of blocks // formed by nextPrivatizable pointers is kept consistent if required. // if only called from thread shutdown code, it does not matter. - (uintptr_t&)(nextPrivatizable) = UNUSABLE; + nextPrivatizable.store((Block*)UNUSABLE, std::memory_order_relaxed); } void Block::cleanBlockHeader() { - next = NULL; - previous = NULL; - freeList = NULL; + next = nullptr; + previous = nullptr; + freeList = nullptr; allocatedCount = 0; - isFull = 0; - tlsPtr = NULL; + isFull = false; + tlsPtr.store(nullptr, std::memory_order_relaxed); - publicFreeList = NULL; + publicFreeList.store(nullptr, std::memory_order_relaxed); } void Block::initEmptyBlock(TLSData *tls, size_t size) @@ -1483,16 +1582,17 @@ void Block::initEmptyBlock(TLSData *tls, size_t size) unsigned int objSz = getObjectSize(size); cleanBlockHeader(); + MALLOC_ASSERT(objSz <= USHRT_MAX, "objSz must not be less 2^16-1"); objectSize = objSz; markOwned(tls); // bump pointer should be prepared for first allocation - thus mode it down to objectSize bumpPtr = (FreeObject *)((uintptr_t)this + slabSize - objectSize); // each block should have the address where the head of the list of "privatizable" blocks is kept - // the only exception is a block for boot strap which is initialized when TLS is yet NULL - nextPrivatizable = tls? (Block*)(tls->bin + index) : NULL; - TRACEF(( "[ScalableMalloc trace] Empty block %p is initialized, owner is %d, objectSize is %d, bumpPtr is %p\n", - this, owner, objectSize, bumpPtr )); + // the only exception is a block for boot strap which is initialized when TLS is yet nullptr + nextPrivatizable.store( tls? (Block*)(tls->bin + index) : nullptr, std::memory_order_relaxed); + TRACEF(( "[ScalableMalloc trace] Empty block %p is initialized, owner is %ld, objectSize is %d, bumpPtr is %p\n", + this, tlsPtr.load(std::memory_order_relaxed) ? getThreadId() : -1, objectSize, bumpPtr )); } Block *OrphanedBlocks::get(TLSData *tls, unsigned int size) @@ -1507,10 +1607,10 @@ Block *OrphanedBlocks::get(TLSData *tls, unsigned int size) return block; } -void OrphanedBlocks::put(Bin* bin, Block *block) +void OrphanedBlocks::put(intptr_t binTag, Block *block) { unsigned int index = getIndex(block->getSize()); - block->shareOrphaned(bin, index); + block->shareOrphaned(binTag, index); MALLOC_ITT_SYNC_RELEASING(bins+index); bins[index].push(block); } @@ -1521,29 +1621,56 @@ void OrphanedBlocks::reset() new (bins+i) LifoList(); } +bool OrphanedBlocks::cleanup(Backend* backend) +{ + bool released = false; + for (uint32_t i=0; i<numBlockBinLimit; i++) { + Block* block = bins[i].grab(); + MALLOC_ITT_SYNC_ACQUIRED(bins+i); + while (block) { + Block* next = block->next; + block->privatizePublicFreeList( /*reset=*/false ); // do not set publicFreeList to nullptr + if (block->empty()) { + block->reset(); + // slab blocks in user's pools do not have valid backRefIdx + if (!backend->inUserPool()) + removeBackRef(*(block->getBackRefIdx())); + backend->putSlabBlock(block); + released = true; + } else { + MALLOC_ITT_SYNC_RELEASING(bins+i); + bins[i].push(block); + } + block = next; + } + } + return released; +} + FreeBlockPool::ResOfGet FreeBlockPool::getBlock() { - Block *b = (Block*)AtomicFetchStore(&head, 0); + Block *b = head.exchange(nullptr); + bool lastAccessMiss; if (b) { size--; Block *newHead = b->next; lastAccessMiss = false; - FencedStore((intptr_t&)head, (intptr_t)newHead); - } else + head.store(newHead, std::memory_order_release); + } else { lastAccessMiss = true; - + } return ResOfGet(b, lastAccessMiss); } void FreeBlockPool::returnBlock(Block *block) { MALLOC_ASSERT( size <= POOL_HIGH_MARK, ASSERT_TEXT ); - Block *localHead = (Block*)AtomicFetchStore(&head, 0); + Block *localHead = head.exchange(nullptr); - if (!localHead) + if (!localHead) { size = 0; // head was stolen by externalClean, correct size accordingly - else if (size == POOL_HIGH_MARK) { + } else if (size == POOL_HIGH_MARK) { // release cold blocks and add hot one, // so keep POOL_LOW_MARK-1 blocks and add new block to head Block *headToFree = localHead, *helper; @@ -1551,7 +1678,7 @@ void FreeBlockPool::returnBlock(Block *block) headToFree = headToFree->next; Block *last = headToFree; headToFree = headToFree->next; - last->next = NULL; + last->next = nullptr; size = POOL_LOW_MARK-1; for (Block *currBl = headToFree; currBl; currBl = helper) { helper = currBl->next; @@ -1563,37 +1690,37 @@ void FreeBlockPool::returnBlock(Block *block) } size++; block->next = localHead; - FencedStore((intptr_t&)head, (intptr_t)block); + head.store(block, std::memory_order_release); } bool FreeBlockPool::externalCleanup() { Block *helper; - bool nonEmpty = false; + bool released = false; - for (Block *currBl=(Block*)AtomicFetchStore(&head, 0); currBl; currBl=helper) { + for (Block *currBl=head.exchange(nullptr); currBl; currBl=helper) { helper = currBl->next; // slab blocks in user's pools do not have valid backRefIdx if (!backend->inUserPool()) removeBackRef(currBl->backRefIdx); backend->putSlabBlock(currBl); - nonEmpty = true; + released = true; } - return nonEmpty; + return released; } -/* We have a block give it back to the malloc block manager */ -void Block::makeEmpty() +/* Prepare the block for returning to FreeBlockPool */ +void Block::reset() { // it is caller's responsibility to ensure no data is lost before calling this MALLOC_ASSERT( allocatedCount==0, ASSERT_TEXT ); - MALLOC_ASSERT( publicFreeList==NULL, ASSERT_TEXT ); + MALLOC_ASSERT( !isSolidPtr(publicFreeList.load(std::memory_order_relaxed)), ASSERT_TEXT ); if (!isStartupAllocObject()) - STAT_increment(owner, getIndex(objectSize), freeBlockBack); + STAT_increment(getThreadId(), getIndex(objectSize), freeBlockBack); cleanBlockHeader(); - nextPrivatizable = NULL; + nextPrivatizable.store(nullptr, std::memory_order_relaxed); objectSize = 0; // for an empty block, bump pointer should point right after the end of the block @@ -1603,7 +1730,7 @@ void Block::makeEmpty() inline void Bin::setActiveBlock (Block *block) { // MALLOC_ASSERT( bin, ASSERT_TEXT ); - MALLOC_ASSERT( block->ownBlock(), ASSERT_TEXT ); + MALLOC_ASSERT( block->isOwnedByCurrentThread(), ASSERT_TEXT ); // it is the caller responsibility to keep bin consistence (i.e. ensure this block is in the bin list) activeBlk = block; } @@ -1613,19 +1740,17 @@ inline Block* Bin::setPreviousBlockActive() MALLOC_ASSERT( activeBlk, ASSERT_TEXT ); Block* temp = activeBlk->previous; if( temp ) { - MALLOC_ASSERT( temp->isFull == 0, ASSERT_TEXT ); + MALLOC_ASSERT( !(temp->isFull), ASSERT_TEXT ); activeBlk = temp; } return temp; } -inline TLSData *Block::ownBlock() const { - if (!tlsPtr || !ownerTid.isCurrentThreadId()) return NULL; - tlsPtr->markUsed(); - return tlsPtr; +inline bool Block::isOwnedByCurrentThread() const { + return tlsPtr.load(std::memory_order_relaxed) && ownerTid.isCurrentThreadId(); } -FreeObject *Block::findObjectToFree(void *object) const +FreeObject *Block::findObjectToFree(const void *object) const { FreeObject *objectToFree; // Due to aligned allocations, a pointer passed to scalable_free @@ -1650,23 +1775,25 @@ FreeObject *Block::findObjectToFree(void *object) const return objectToFree; } -void TLSData::release(MemoryPool *mPool) +void TLSData::release() { - mPool->extMemPool.allLocalCaches.unregisterThread(this); - externalCleanup(&mPool->extMemPool, /*cleanOnlyUnused=*/false); + memPool->extMemPool.allLocalCaches.unregisterThread(this); + externalCleanup(/*cleanOnlyUnused=*/false, /*cleanBins=*/false); for (unsigned index = 0; index < numBlockBins; index++) { Block *activeBlk = bin[index].getActiveBlock(); if (!activeBlk) continue; Block *threadlessBlock = activeBlk->previous; + bool syncOnMailbox = false; while (threadlessBlock) { Block *threadBlock = threadlessBlock->previous; if (threadlessBlock->empty()) { /* we destroy the thread, so not use its block pool */ - mPool->returnEmptyBlock(threadlessBlock, /*poolTheBlock=*/false); + memPool->returnEmptyBlock(threadlessBlock, /*poolTheBlock=*/false); } else { - mPool->orphanedBlocks.put(bin+index, threadlessBlock); + memPool->extMemPool.orphanedBlocks.put(intptr_t(bin+index), threadlessBlock); + syncOnMailbox = true; } threadlessBlock = threadBlock; } @@ -1675,13 +1802,21 @@ void TLSData::release(MemoryPool *mPool) Block *threadBlock = threadlessBlock->next; if (threadlessBlock->empty()) { /* we destroy the thread, so not use its block pool */ - mPool->returnEmptyBlock(threadlessBlock, /*poolTheBlock=*/false); + memPool->returnEmptyBlock(threadlessBlock, /*poolTheBlock=*/false); } else { - mPool->orphanedBlocks.put(bin+index, threadlessBlock); + memPool->extMemPool.orphanedBlocks.put(intptr_t(bin+index), threadlessBlock); + syncOnMailbox = true; } threadlessBlock = threadBlock; } bin[index].resetActiveBlock(); + + if (syncOnMailbox) { + // Although, we synchronized on nextPrivatizable inside a block, we still need to + // synchronize on the bin lifetime because the thread releasing an object into the public + // free list is touching the bin (mailbox and mailLock) + MallocMutex::scoped_lock scoped_cs(bin[index].mailLock); + } } } @@ -1697,6 +1832,7 @@ void TLSData::release(MemoryPool *mPool) * allocations are performed by moving bump pointer and increasing of object counter, * releasing is done via counter of objects allocated in the block * or moving bump pointer if releasing object is on a bound. + * TODO: make bump pointer to grow to the same backward direction as all the others. */ class StartupBlock : public Block { @@ -1716,11 +1852,11 @@ static StartupBlock *firstStartupBlock; StartupBlock *StartupBlock::getBlock() { BackRefIdx backRefIdx = BackRefIdx::newBackRef(/*largeObj=*/false); - if (backRefIdx.isInvalid()) return NULL; + if (backRefIdx.isInvalid()) return nullptr; StartupBlock *block = static_cast<StartupBlock*>( defaultMemPool->extMemPool.backend.getSlabBlock(1)); - if (!block) return NULL; + if (!block) return nullptr; block->cleanBlockHeader(); setBackRef(backRefIdx, block); @@ -1734,43 +1870,31 @@ StartupBlock *StartupBlock::getBlock() FreeObject *StartupBlock::allocate(size_t size) { FreeObject *result; - StartupBlock *newBlock = NULL; - bool newBlockUnused = false; + StartupBlock *newBlock = nullptr; /* Objects must be aligned on their natural bounds, and objects bigger than word on word's bound. */ size = alignUp(size, sizeof(size_t)); // We need size of an object to implement msize. size_t reqSize = size + sizeof(size_t); - // speculatively allocates newBlock to try avoid allocation while holding lock - /* TODO: The function is called when malloc nested call is detected, - so simultaneous usage from different threads seems unlikely. - If pre-allocation is found useless, the code might be simplified. */ - if (!firstStartupBlock || firstStartupBlock->availableSize() < reqSize) { - newBlock = StartupBlock::getBlock(); - if (!newBlock) return NULL; - } { MallocMutex::scoped_lock scoped_cs(startupMallocLock); // Re-check whether we need a new block (conditions might have changed) if (!firstStartupBlock || firstStartupBlock->availableSize() < reqSize) { if (!newBlock) { newBlock = StartupBlock::getBlock(); - if (!newBlock) return NULL; + if (!newBlock) return nullptr; } newBlock->next = (Block*)firstStartupBlock; if (firstStartupBlock) firstStartupBlock->previous = (Block*)newBlock; firstStartupBlock = newBlock; - } else - newBlockUnused = true; + } result = firstStartupBlock->bumpPtr; firstStartupBlock->allocatedCount++; firstStartupBlock->bumpPtr = (FreeObject *)((uintptr_t)firstStartupBlock->bumpPtr + reqSize); } - if (newBlock && newBlockUnused) - defaultMemPool->returnEmptyBlock(newBlock, /*poolTheBlock=*/false); // keep object size at the negative offset *((size_t*)result) = size; @@ -1779,7 +1903,7 @@ FreeObject *StartupBlock::allocate(size_t size) void StartupBlock::free(void *ptr) { - Block* blockToRelease = NULL; + Block* blockToRelease = nullptr; { MallocMutex::scoped_lock scoped_cs(startupMallocLock); @@ -1806,7 +1930,7 @@ void StartupBlock::free(void *ptr) } } if (blockToRelease) { - blockToRelease->previous = blockToRelease->next = NULL; + blockToRelease->previous = blockToRelease->next = nullptr; defaultMemPool->returnEmptyBlock(blockToRelease, /*poolTheBlock=*/false); } } @@ -1824,63 +1948,14 @@ void StartupBlock::free(void *ptr) * In theory, we only need values 0 and 2. But value 1 is nonetheless * useful for detecting errors in the double-check pattern. */ -static intptr_t mallocInitialized; // implicitly initialized to 0 +static std::atomic<intptr_t> mallocInitialized{0}; // implicitly initialized to 0 static MallocMutex initMutex; -#include "../tbb/tbb_version.h" - /** The leading "\0" is here so that applying "strings" to the binary delivers a clean result. */ static char VersionString[] = "\0" TBBMALLOC_VERSION_STRINGS; -#if _XBOX || __TBB_WIN8UI_SUPPORT -bool GetBoolEnvironmentVariable(const char *) { return false; } -#else -bool GetBoolEnvironmentVariable(const char *name) -{ - if( const char* s = getenv(name) ) - return strcmp(s,"0") != 0; - return false; -} -#endif - -void AllocControlledMode::initReadEnv(const char *envName, intptr_t defaultVal) -{ - if (!setDone) { -#if !_XBOX && !__TBB_WIN8UI_SUPPORT - const char *envVal = getenv(envName); - if (envVal && !strcmp(envVal, "1")) - val = 1; - else -#endif - val = defaultVal; - setDone = true; - } -} - -void MemoryPool::initDefaultPool() -{ - long long unsigned hugePageSize = 0; -#if __linux__ - if (FILE *f = fopen("/proc/meminfo", "r")) { - const int READ_BUF_SIZE = 100; - char buf[READ_BUF_SIZE]; - MALLOC_ASSERT(sizeof(hugePageSize) >= 8, - "At least 64 bits required for keeping page size/numbers."); - - while (fgets(buf, READ_BUF_SIZE, f)) { - if (1 == sscanf(buf, "Hugepagesize: %llu kB", &hugePageSize)) { - hugePageSize *= 1024; - break; - } - } - fclose(f); - } -#endif - hugePages.init(hugePageSize); -} - -#if USE_PTHREAD && (__TBB_SOURCE_DIRECTLY_INCLUDED || __TBB_USE_DLOPEN_REENTRANCY_WORKAROUND) +#if USE_PTHREAD && __TBB_SOURCE_DIRECTLY_INCLUDED /* Decrease race interval between dynamic library unloading and pthread key destructor. Protect only Pthreads with supported unloading. */ @@ -1888,27 +1963,28 @@ class ShutdownSync { /* flag is the number of threads in pthread key dtor body (i.e., between threadDtorStart() and threadDtorDone()) or the signal to skip dtor, if flag < 0 */ - intptr_t flag; + std::atomic<intptr_t> flag; static const intptr_t skipDtor = INTPTR_MIN/2; public: - void init() { flag = 0; } -/* Suppose that 2*abs(skipDtor) or more threads never call threadExitStart() - simultaneously, so flag is never becomes negative because of that. */ + void init() { flag.store(0, std::memory_order_release); } +/* Suppose that 2*abs(skipDtor) or more threads never call threadDtorStart() + simultaneously, so flag never becomes negative because of that. */ bool threadDtorStart() { - if (flag < 0) + if (flag.load(std::memory_order_acquire) < 0) return false; - if (AtomicIncrement(flag) <= 0) { // note that new value returned - AtomicAdd(flag, -1); // flag is spoiled by us, restore it + if (++flag <= 0) { // note that new value returned + flag.fetch_sub(1); // flag is spoiled by us, restore it return false; } return true; } void threadDtorDone() { - AtomicAdd(flag, -1); + flag.fetch_sub(1); } void processExit() { - if (AtomicAdd(flag, skipDtor) != 0) + if (flag.fetch_add(skipDtor) != 0) { SpinWaitUntilEq(flag, skipDtor); + } } }; @@ -1922,40 +1998,58 @@ class ShutdownSync { void processExit() { } }; -#endif // USE_PTHREAD && (__TBB_SOURCE_DIRECTLY_INCLUDED || __TBB_USE_DLOPEN_REENTRANCY_WORKAROUND) +#endif // USE_PTHREAD && __TBB_SOURCE_DIRECTLY_INCLUDED static ShutdownSync shutdownSync; inline bool isMallocInitialized() { // Load must have acquire fence; otherwise thread taking "initialized" path // might perform textually later loads *before* mallocInitialized becomes 2. - return 2 == FencedLoad(mallocInitialized); + return 2 == mallocInitialized.load(std::memory_order_acquire); +} + +/* Caller is responsible for ensuring this routine is called exactly once. */ +extern "C" void MallocInitializeITT() { +#if __TBB_USE_ITT_NOTIFY + if (!usedBySrcIncluded) + tbb::detail::r1::__TBB_load_ittnotify(); +#endif } -bool isMallocInitializedExt() { - return isMallocInitialized(); +void MemoryPool::initDefaultPool() { + hugePages.init(); } /* * Allocator initialization routine; * it is called lazily on the very first scalable_malloc call. */ -static void initMemoryManager() +static bool initMemoryManager() { TRACEF(( "[ScalableMalloc trace] sizeof(Block) is %d (expected 128); sizeof(uintptr_t) is %d\n", sizeof(Block), sizeof(uintptr_t) )); MALLOC_ASSERT( 2*blockHeaderAlignment == sizeof(Block), ASSERT_TEXT ); MALLOC_ASSERT( sizeof(FreeObject) == sizeof(void*), ASSERT_TEXT ); + MALLOC_ASSERT( isAligned(defaultMemPool, sizeof(intptr_t)), + "Memory pool must be void*-aligned for atomic to work over aligned arguments."); +#if USE_WINTHREAD + const size_t granularity = 64*1024; // granulatity of VirtualAlloc +#else + // POSIX.1-2001-compliant way to get page size + const size_t granularity = sysconf(_SC_PAGESIZE); +#endif + if (!defaultMemPool) { + // Do not rely on static constructors and do the assignment in case + // of library static section not initialized at this call yet. + defaultMemPool = (MemoryPool*)defaultMemPool_space; + } bool initOk = defaultMemPool-> - extMemPool.init(0, NULL, NULL, scalableMallocPoolGranularity, + extMemPool.init(0, nullptr, nullptr, granularity, /*keepAllMemory=*/false, /*fixedPool=*/false); -// TODO: add error handling, and on error do something better than exit(1) - if (!initOk || !initBackRefMaster(&defaultMemPool->extMemPool.backend)) { - fprintf (stderr, "The memory manager cannot access sufficient memory to initialize; exiting \n"); - exit(1); - } - ThreadId::init(); // Create keys for thread id +// TODO: extMemPool.init() to not allocate memory + if (!initOk || !initBackRefMain(&defaultMemPool->extMemPool.backend) || !ThreadId::init()) + return false; MemoryPool::initDefaultPool(); // init() is required iff initMemoryManager() is called // after mallocProcessShutdownNotification() @@ -1963,37 +2057,46 @@ static void initMemoryManager() #if COLLECT_STATISTICS initStatisticsCollection(); #endif + return true; +} + +static bool GetBoolEnvironmentVariable(const char* name) { + return tbb::detail::r1::GetBoolEnvironmentVariable(name); } //! Ensures that initMemoryManager() is called once and only once. /** Does not return until initMemoryManager() has been completed by a thread. There is no need to call this routine if mallocInitialized==2 . */ -static void doInitialization() +static bool doInitialization() { MallocMutex::scoped_lock lock( initMutex ); - if (mallocInitialized!=2) { - MALLOC_ASSERT( mallocInitialized==0, ASSERT_TEXT ); - mallocInitialized = 1; + if (mallocInitialized.load(std::memory_order_relaxed)!=2) { + MALLOC_ASSERT( mallocInitialized.load(std::memory_order_relaxed)==0, ASSERT_TEXT ); + mallocInitialized.store(1, std::memory_order_relaxed); RecursiveMallocCallProtector scoped; - initMemoryManager(); + if (!initMemoryManager()) { + mallocInitialized.store(0, std::memory_order_relaxed); // restore and out + return false; + } #ifdef MALLOC_EXTRA_INITIALIZATION MALLOC_EXTRA_INITIALIZATION; #endif #if MALLOC_CHECK_RECURSION RecursiveMallocCallProtector::detectNaiveOverload(); #endif - MALLOC_ASSERT( mallocInitialized==1, ASSERT_TEXT ); + MALLOC_ASSERT( mallocInitialized.load(std::memory_order_relaxed)==1, ASSERT_TEXT ); // Store must have release fence, otherwise mallocInitialized==2 // might become remotely visible before side effects of // initMemoryManager() become remotely visible. - FencedStore( mallocInitialized, 2 ); + mallocInitialized.store(2, std::memory_order_release); if( GetBoolEnvironmentVariable("TBB_VERSION") ) { fputs(VersionString+1,stderr); hugePages.printStatus(); } } /* It can't be 0 or I would have initialized it */ - MALLOC_ASSERT( mallocInitialized==2, ASSERT_TEXT ); + MALLOC_ASSERT( mallocInitialized.load(std::memory_order_relaxed)==2, ASSERT_TEXT ); + return true; } /********* End library initialization *************/ @@ -2005,7 +2108,7 @@ FreeObject *Block::allocateFromFreeList() { FreeObject *result; - if (!freeList) return NULL; + if (!freeList) return nullptr; result = freeList; MALLOC_ASSERT( result, ASSERT_TEXT ); @@ -2013,7 +2116,7 @@ FreeObject *Block::allocateFromFreeList() freeList = result->next; MALLOC_ASSERT( allocatedCount < (slabSize-sizeof(Block))/objectSize, ASSERT_TEXT ); allocatedCount++; - STAT_increment(owner, getIndex(objectSize), allocFreeListUsed); + STAT_increment(getThreadId(), getIndex(objectSize), allocFreeListUsed); return result; } @@ -2024,18 +2127,18 @@ FreeObject *Block::allocateFromBumpPtr() if (result) { bumpPtr = (FreeObject *) ((uintptr_t) bumpPtr - objectSize); if ( (uintptr_t)bumpPtr < (uintptr_t)this+sizeof(Block) ) { - bumpPtr = NULL; + bumpPtr = nullptr; } MALLOC_ASSERT( allocatedCount < (slabSize-sizeof(Block))/objectSize, ASSERT_TEXT ); allocatedCount++; - STAT_increment(owner, getIndex(objectSize), allocBumpPtrUsed); + STAT_increment(getThreadId(), getIndex(objectSize), allocBumpPtrUsed); } return result; } inline FreeObject* Block::allocate() { - MALLOC_ASSERT( ownBlock(), ASSERT_TEXT ); + MALLOC_ASSERT( isOwnedByCurrentThread(), ASSERT_TEXT ); /* for better cache locality, first looking in the free list. */ if ( FreeObject *result = allocateFromFreeList() ) { @@ -2050,8 +2153,8 @@ inline FreeObject* Block::allocate() MALLOC_ASSERT( !bumpPtr, ASSERT_TEXT ); /* the block is considered full. */ - isFull = 1; - return NULL; + isFull = true; + return nullptr; } size_t Block::findObjectSize(void *object) const @@ -2071,7 +2174,7 @@ size_t Block::findObjectSize(void *object) const return size; } -void Bin::moveBlockToBinFront(Block *block) +void Bin::moveBlockToFront(Block *block) { /* move the block to the front of the bin */ if (block == activeBlk) return; @@ -2079,12 +2182,12 @@ void Bin::moveBlockToBinFront(Block *block) pushTLSBin(block); } -void Bin::processLessUsedBlock(MemoryPool *memPool, Block *block) +void Bin::processEmptyBlock(Block *block, bool poolTheBlock) { if (block != activeBlk) { - /* We are not actively using this block; return it to the general block pool */ + /* We are not using this block; return it to the pool */ outofTLSBin(block); - memPool->returnEmptyBlock(block, /*poolTheBlock=*/true); + block->getMemPool()->returnEmptyBlock(block, poolTheBlock); } else { /* all objects are free - let's restore the bump pointer */ block->restoreBumpPtr(); @@ -2098,9 +2201,9 @@ bool LocalLOCImpl<LOW_MARK, HIGH_MARK>::put(LargeMemoryBlock *object, ExtMemoryP // not spoil cache with too large object, that can cause its total cleanup if (size > MAX_TOTAL_SIZE) return false; - LargeMemoryBlock *localHead = (LargeMemoryBlock*)AtomicFetchStore(&head, 0); + LargeMemoryBlock *localHead = head.exchange(nullptr); - object->prev = NULL; + object->prev = nullptr; object->next = localHead; if (localHead) localHead->prev = object; @@ -2122,27 +2225,28 @@ bool LocalLOCImpl<LOW_MARK, HIGH_MARK>::put(LargeMemoryBlock *object, ExtMemoryP tail = tail->prev; } LargeMemoryBlock *headToRelease = tail->next; - tail->next = NULL; + tail->next = nullptr; extMemPool->freeLargeObjectList(headToRelease); } - FencedStore((intptr_t&)head, (intptr_t)localHead); + head.store(localHead, std::memory_order_release); return true; } template<int LOW_MARK, int HIGH_MARK> LargeMemoryBlock *LocalLOCImpl<LOW_MARK, HIGH_MARK>::get(size_t size) { - LargeMemoryBlock *localHead, *res=NULL; + LargeMemoryBlock *localHead, *res = nullptr; if (size > MAX_TOTAL_SIZE) - return NULL; + return nullptr; - if (!head || !(localHead = (LargeMemoryBlock*)AtomicFetchStore(&head, 0))) { + // TBB_REVAMP_TODO: review this line + if (!head.load(std::memory_order_acquire) || (localHead = head.exchange(nullptr)) == nullptr) { // do not restore totalSize, numOfBlocks and tail at this point, // as they are used only in put(), where they must be restored - return NULL; + return nullptr; } for (LargeMemoryBlock *curr = localHead; curr; curr=curr->next) { @@ -2161,14 +2265,15 @@ LargeMemoryBlock *LocalLOCImpl<LOW_MARK, HIGH_MARK>::get(size_t size) break; } } - FencedStore((intptr_t&)head, (intptr_t)localHead); + + head.store(localHead, std::memory_order_release); return res; } template<int LOW_MARK, int HIGH_MARK> bool LocalLOCImpl<LOW_MARK, HIGH_MARK>::externalCleanup(ExtMemoryPool *extMemPool) { - if (LargeMemoryBlock *localHead = (LargeMemoryBlock*)AtomicFetchStore(&head, 0)) { + if (LargeMemoryBlock *localHead = head.exchange(nullptr)) { extMemPool->freeLargeObjectList(localHead); return true; } @@ -2177,17 +2282,20 @@ bool LocalLOCImpl<LOW_MARK, HIGH_MARK>::externalCleanup(ExtMemoryPool *extMemPoo void *MemoryPool::getFromLLOCache(TLSData* tls, size_t size, size_t alignment) { - LargeMemoryBlock *lmb = NULL; + LargeMemoryBlock *lmb = nullptr; size_t headersSize = sizeof(LargeMemoryBlock)+sizeof(LargeObjectHdr); size_t allocationSize = LargeObjectCache::alignToBin(size+headersSize+alignment); if (allocationSize < size) // allocationSize is wrapped around after alignToBin - return NULL; + return nullptr; + MALLOC_ASSERT(allocationSize >= alignment, "Overflow must be checked before."); - if (tls) + if (tls) { + tls->markUsed(); lmb = tls->lloc.get(allocationSize); + } if (!lmb) - lmb = extMemPool.mallocLargeObject(allocationSize); + lmb = extMemPool.mallocLargeObject(this, allocationSize); if (lmb) { // doing shuffle we suppose that alignment offset guarantees @@ -2224,12 +2332,12 @@ void *MemoryPool::getFromLLOCache(TLSData* tls, size_t size, size_t alignment) lmb->objectSize = size; - MALLOC_ASSERT( isLargeObject(alignedArea), ASSERT_TEXT ); + MALLOC_ASSERT( isLargeObject<unknownMem>(alignedArea), ASSERT_TEXT ); MALLOC_ASSERT( isAligned(alignedArea, alignment), ASSERT_TEXT ); return alignedArea; } - return NULL; + return nullptr; } void MemoryPool::putToLLOCache(TLSData *tls, void *object) @@ -2238,8 +2346,12 @@ void MemoryPool::putToLLOCache(TLSData *tls, void *object) // overwrite backRefIdx to simplify double free detection header->backRefIdx = BackRefIdx(); - if (!tls || !tls->lloc.put(header->memoryBlock, &extMemPool)) - extMemPool.freeLargeObject(header->memoryBlock); + if (tls) { + tls->markUsed(); + if (tls->lloc.put(header->memoryBlock, &extMemPool)) + return; + } + extMemPool.freeLargeObject(header->memoryBlock); } /* @@ -2258,7 +2370,9 @@ static void *allocateAligned(MemoryPool *memPool, size_t size, size_t alignment) { MALLOC_ASSERT( isPowerOfTwo(alignment), ASSERT_TEXT ); - if (!isMallocInitialized()) doInitialization(); + if (!isMallocInitialized()) + if (!doInitialization()) + return nullptr; void *result; if (size<=maxSegregatedObjectSize && alignment<=maxSegregatedObjectSize) @@ -2268,15 +2382,12 @@ static void *allocateAligned(MemoryPool *memPool, size_t size, size_t alignment) result = internalPoolMalloc(memPool, size); else if (size+alignment < minLargeObjectSize) { void *unaligned = internalPoolMalloc(memPool, size+alignment); - if (!unaligned) return NULL; + if (!unaligned) return nullptr; result = alignUp(unaligned, alignment); } else goto LargeObjAlloc; } else { LargeObjAlloc: - /* This can be the first allocation call. */ - if (!isMallocInitialized()) - doInitialization(); TLSData *tls = memPool->getTLS(/*create=*/true); // take into account only alignment that are higher then natural result = @@ -2289,44 +2400,66 @@ static void *allocateAligned(MemoryPool *memPool, size_t size, size_t alignment) } static void *reallocAligned(MemoryPool *memPool, void *ptr, - size_t size, size_t alignment = 0) + size_t newSize, size_t alignment = 0) { void *result; size_t copySize; - if (isLargeObject(ptr)) { + if (isLargeObject<ourMem>(ptr)) { LargeMemoryBlock* lmb = ((LargeObjectHdr *)ptr - 1)->memoryBlock; copySize = lmb->unalignedSize-((uintptr_t)ptr-(uintptr_t)lmb); - if (size <= copySize && (0==alignment || isAligned(ptr, alignment))) { - lmb->objectSize = size; - return ptr; - } else { - copySize = lmb->objectSize; - result = alignment ? allocateAligned(memPool, size, alignment) : - internalPoolMalloc(memPool, size); + + // Apply different strategies if size decreases + if (newSize <= copySize && (0 == alignment || isAligned(ptr, alignment))) { + + // For huge objects (that do not fit in backend cache), keep the same space unless + // the new size is at least twice smaller + bool isMemoryBlockHuge = copySize > memPool->extMemPool.backend.getMaxBinnedSize(); + size_t threshold = isMemoryBlockHuge ? copySize / 2 : 0; + if (newSize > threshold) { + lmb->objectSize = newSize; + return ptr; + } + // TODO: For large objects suitable for the backend cache, + // split out the excessive part and put it to the backend. } + // Reallocate for real + copySize = lmb->objectSize; +#if BACKEND_HAS_MREMAP + if (void *r = memPool->extMemPool.remap(ptr, copySize, newSize, + alignment < largeObjectAlignment ? largeObjectAlignment : alignment)) + return r; +#endif + result = alignment ? allocateAligned(memPool, newSize, alignment) : + internalPoolMalloc(memPool, newSize); + } else { Block* block = (Block *)alignDown(ptr, slabSize); copySize = block->findObjectSize(ptr); - if (size <= copySize && (0==alignment || isAligned(ptr, alignment))) { + + // TODO: Move object to another bin if size decreases and the current bin is "empty enough". + // Currently, in case of size decreasing, old pointer is returned + if (newSize <= copySize && (0==alignment || isAligned(ptr, alignment))) { return ptr; } else { - result = alignment ? allocateAligned(memPool, size, alignment) : - internalPoolMalloc(memPool, size); + result = alignment ? allocateAligned(memPool, newSize, alignment) : + internalPoolMalloc(memPool, newSize); } } if (result) { - memcpy(result, ptr, copySize<size? copySize: size); + memcpy(result, ptr, copySize < newSize ? copySize : newSize); internalPoolFree(memPool, ptr, 0); } return result; } +#if MALLOC_DEBUG /* A predicate checks if an object is properly placed inside its block */ inline bool Block::isProperlyPlaced(const void *object) const { return 0 == ((uintptr_t)this + slabSize - (uintptr_t)object) % objectSize; } +#endif /* Finds the real object inside the block */ FreeObject *Block::findAllocatedObject(const void *address) const @@ -2350,7 +2483,7 @@ static inline BackRefIdx safer_dereference (const BackRefIdx *ptr) #if _MSC_VER __try { #endif - id = *ptr; + id = dereference(ptr); #if _MSC_VER } __except( GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH ) { @@ -2360,14 +2493,18 @@ static inline BackRefIdx safer_dereference (const BackRefIdx *ptr) return id; } +template<MemoryOrigin memOrigin> bool isLargeObject(void *object) { if (!isAligned(object, largeObjectAlignment)) return false; LargeObjectHdr *header = (LargeObjectHdr*)object - 1; - BackRefIdx idx = safer_dereference(&header->backRefIdx); + BackRefIdx idx = (memOrigin == unknownMem) ? + safer_dereference(&header->backRefIdx) : dereference(&header->backRefIdx); return idx.isLargeObject() + // in valid LargeObjectHdr memoryBlock is not nullptr + && header->memoryBlock // in valid LargeObjectHdr memoryBlock points somewhere before header // TODO: more strict check && (uintptr_t)header->memoryBlock < (uintptr_t)header @@ -2376,24 +2513,27 @@ bool isLargeObject(void *object) static inline bool isSmallObject (void *ptr) { - void* expected = alignDown(ptr, slabSize); - const BackRefIdx* idx = ((Block*)expected)->getBackRef(); + Block* expectedBlock = (Block*)alignDown(ptr, slabSize); + const BackRefIdx* idx = expectedBlock->getBackRefIdx(); - return expected == getBackRef(safer_dereference(idx)); + bool isSmall = expectedBlock == getBackRef(safer_dereference(idx)); + if (isSmall) + expectedBlock->checkFreePrecond(ptr); + return isSmall; } /**** Check if an object was allocated by scalable_malloc ****/ static inline bool isRecognized (void* ptr) { - return isLargeObject(ptr) || isSmallObject(ptr); + return defaultMemPool->extMemPool.backend.ptrCanBeValid(ptr) && + (isLargeObject<unknownMem>(ptr) || isSmallObject(ptr)); } -static inline void freeSmallObject(MemoryPool *memPool, void *object) +static inline void freeSmallObject(void *object) { /* mask low bits to get the block */ Block *block = (Block *)alignDown(object, slabSize); - MALLOC_ASSERT( block->checkFreePrecond(object), - "Possible double free or heap corruption." ); + block->checkFreePrecond(object); #if MALLOC_CHECK_RECURSION if (block->isStartupAllocObject()) { @@ -2401,9 +2541,9 @@ static inline void freeSmallObject(MemoryPool *memPool, void *object) return; } #endif - if (TLSData *tls = block->ownBlock()) - block->freeOwnObject(memPool, tls, object); - else { /* Slower path to add to the shared list, the allocatedCount is updated by the owner thread in malloc. */ + if (block->isOwnedByCurrentThread()) { + block->freeOwnObject(object); + } else { /* Slower path to add to the shared list, the allocatedCount is updated by the owner thread in malloc. */ FreeObject *objectToFree = block->findObjectToFree(object); block->freePublicObject(objectToFree); } @@ -2414,23 +2554,25 @@ static void *internalPoolMalloc(MemoryPool* memPool, size_t size) Bin* bin; Block * mallocBlock; - if (!memPool) return NULL; + if (!memPool) return nullptr; if (!size) size = sizeof(size_t); TLSData *tls = memPool->getTLS(/*create=*/true); - /* - * Use Large Object Allocation - */ + + /* Allocate a large object */ if (size >= minLargeObjectSize) return memPool->getFromLLOCache(tls, size, largeObjectAlignment); + if (!tls) return nullptr; + + tls->markUsed(); /* * Get an element in thread-local array corresponding to the given size; * It keeps ptr to the active block for allocations of this size */ bin = tls->getAllocationBin(size); - if ( !bin ) return NULL; + if ( !bin ) return nullptr; /* Get a block to try to allocate in. */ for( mallocBlock = bin->getActiveBlock(); mallocBlock; @@ -2443,11 +2585,8 @@ static void *internalPoolMalloc(MemoryPool* memPool, size_t size) /* * else privatize publicly freed objects in some block and allocate from it */ - mallocBlock = bin->getPublicFreeListBlock(); + mallocBlock = bin->getPrivatizedFreeListBlock(); if (mallocBlock) { - if (mallocBlock->emptyEnoughToUse()) { - bin->moveBlockToBinFront(mallocBlock); - } MALLOC_ASSERT( mallocBlock->freeListNonNull(), ASSERT_TEXT ); if ( FreeObject *result = mallocBlock->allocateFromFreeList() ) return result; @@ -2459,13 +2598,13 @@ static void *internalPoolMalloc(MemoryPool* memPool, size_t size) /* * no suitable own blocks, try to get a partial block that some other thread has discarded. */ - mallocBlock = memPool->orphanedBlocks.get(tls, size); + mallocBlock = memPool->extMemPool.orphanedBlocks.get(tls, size); while (mallocBlock) { bin->pushTLSBin(mallocBlock); bin->setActiveBlock(mallocBlock); // TODO: move under the below condition? if( FreeObject *result = mallocBlock->allocate() ) return result; - mallocBlock = memPool->orphanedBlocks.get(tls, size); + mallocBlock = memPool->extMemPool.orphanedBlocks.get(tls, size); } /* @@ -2482,13 +2621,16 @@ static void *internalPoolMalloc(MemoryPool* memPool, size_t size) return internalPoolMalloc(memPool, size); } /* - * else nothing works so return NULL + * else nothing works so return nullptr */ - TRACEF(( "[ScalableMalloc trace] No memory found, returning NULL.\n" )); - return NULL; + TRACEF(( "[ScalableMalloc trace] No memory found, returning nullptr.\n" )); + return nullptr; } // When size==0 (i.e. unknown), detect here whether the object is large. +// For size is known and < minLargeObjectSize, we still need to check +// if the actual object is large, because large objects might be used +// for aligned small allocations. static bool internalPoolFree(MemoryPool *memPool, void *object, size_t size) { if (!memPool || !object) return false; @@ -2499,10 +2641,10 @@ static bool internalPoolFree(MemoryPool *memPool, void *object, size_t size) MALLOC_ASSERT(memPool->extMemPool.userPool() || isRecognized(object), "Invalid pointer during object releasing is detected."); - if (size >= minLargeObjectSize || (!size && isLargeObject(object))) + if (size >= minLargeObjectSize || isLargeObject<ourMem>(object)) memPool->putToLLOCache(memPool->getTLS(/*create=*/false), object); else - freeSmallObject(memPool, object); + freeSmallObject(object); return true; } @@ -2514,12 +2656,12 @@ static void *internalMalloc(size_t size) if (RecursiveMallocCallProtector::sameThreadActive()) return size<minLargeObjectSize? StartupBlock::allocate(size) : // nested allocation, so skip tls - (FreeObject*)defaultMemPool->getFromLLOCache(NULL, size, slabSize); + (FreeObject*)defaultMemPool->getFromLLOCache(nullptr, size, slabSize); #endif if (!isMallocInitialized()) - doInitialization(); - + if (!doInitialization()) + return nullptr; return internalPoolMalloc(defaultMemPool, size); } @@ -2530,18 +2672,15 @@ static void internalFree(void *object) static size_t internalMsize(void* ptr) { - if (ptr) { - MALLOC_ASSERT(isRecognized(ptr), "Invalid pointer in scalable_msize detected."); - if (isLargeObject(ptr)) { - LargeMemoryBlock* lmb = ((LargeObjectHdr*)ptr - 1)->memoryBlock; - return lmb->objectSize; - } else - return ((Block*)alignDown(ptr, slabSize))->findObjectSize(ptr); + MALLOC_ASSERT(ptr, "Invalid pointer passed to internalMsize"); + if (isLargeObject<ourMem>(ptr)) { + // TODO: return the maximum memory size, that can be written to this object + LargeMemoryBlock* lmb = ((LargeObjectHdr*)ptr - 1)->memoryBlock; + return lmb->objectSize; + } else { + Block *block = (Block*)alignDown(ptr, slabSize); + return block->findObjectSize(ptr); } - errno = EINVAL; - // Unlike _msize, return 0 in case of parameter error. - // Returning size_t(-1) looks more like the way to troubles. - return 0; } } // namespace internal @@ -2550,7 +2689,7 @@ using namespace rml::internal; // legacy entry point saved for compatibility with binaries complied // with pre-6003 versions of TBB -rml::MemoryPool *pool_create(intptr_t pool_id, const MemPoolPolicy *policy) +TBBMALLOC_EXPORT rml::MemoryPool *pool_create(intptr_t pool_id, const MemPoolPolicy *policy) { rml::MemoryPool *pool; MemPoolPolicy pol(policy->pAlloc, policy->pFree, policy->granularity); @@ -2565,29 +2704,31 @@ rml::MemPoolError pool_create_v1(intptr_t pool_id, const MemPoolPolicy *policy, if ( !policy->pAlloc || policy->version<MemPoolPolicy::TBBMALLOC_POOL_VERSION // empty pFree allowed only for fixed pools || !(policy->fixedPool || policy->pFree)) { - *pool = NULL; + *pool = nullptr; return INVALID_POLICY; } if ( policy->version>MemPoolPolicy::TBBMALLOC_POOL_VERSION // future versions are not supported // new flags can be added in place of reserved, but default // behaviour must be supported by this version || policy->reserved ) { - *pool = NULL; + *pool = nullptr; return UNSUPPORTED_POLICY; } if (!isMallocInitialized()) - doInitialization(); - + if (!doInitialization()) { + *pool = nullptr; + return NO_MEMORY; + } rml::internal::MemoryPool *memPool = (rml::internal::MemoryPool*)internalMalloc((sizeof(rml::internal::MemoryPool))); if (!memPool) { - *pool = NULL; + *pool = nullptr; return NO_MEMORY; } - memset(memPool, 0, sizeof(rml::internal::MemoryPool)); + memset(static_cast<void*>(memPool), 0, sizeof(rml::internal::MemoryPool)); if (!memPool->init(pool_id, policy)) { internalFree(memPool); - *pool = NULL; + *pool = nullptr; return NO_MEMORY; } @@ -2598,18 +2739,17 @@ rml::MemPoolError pool_create_v1(intptr_t pool_id, const MemPoolPolicy *policy, bool pool_destroy(rml::MemoryPool* memPool) { if (!memPool) return false; - ((rml::internal::MemoryPool*)memPool)->destroy(); + bool ret = ((rml::internal::MemoryPool*)memPool)->destroy(); internalFree(memPool); - return true; + return ret; } bool pool_reset(rml::MemoryPool* memPool) { if (!memPool) return false; - ((rml::internal::MemoryPool*)memPool)->reset(); - return true; + return ((rml::internal::MemoryPool*)memPool)->reset(); } void *pool_malloc(rml::MemoryPool* mPool, size_t size) @@ -2623,7 +2763,7 @@ void *pool_realloc(rml::MemoryPool* mPool, void *object, size_t size) return internalPoolMalloc((rml::internal::MemoryPool*)mPool, size); if (!size) { internalPoolFree((rml::internal::MemoryPool*)mPool, object, 0); - return NULL; + return nullptr; } return reallocAligned((rml::internal::MemoryPool*)mPool, object, size, 0); } @@ -2631,7 +2771,7 @@ void *pool_realloc(rml::MemoryPool* mPool, void *object, size_t size) void *pool_aligned_malloc(rml::MemoryPool* mPool, size_t size, size_t alignment) { if (!isPowerOfTwo(alignment) || 0==size) - return NULL; + return nullptr; return allocateAligned((rml::internal::MemoryPool*)mPool, size, alignment); } @@ -2639,7 +2779,7 @@ void *pool_aligned_malloc(rml::MemoryPool* mPool, size_t size, size_t alignment) void *pool_aligned_realloc(rml::MemoryPool* memPool, void *ptr, size_t size, size_t alignment) { if (!isPowerOfTwo(alignment)) - return NULL; + return nullptr; rml::internal::MemoryPool *mPool = (rml::internal::MemoryPool*)memPool; void *tmp; @@ -2647,7 +2787,7 @@ void *pool_aligned_realloc(rml::MemoryPool* memPool, void *ptr, size_t size, siz tmp = allocateAligned(mPool, size, alignment); else if (!size) { internalPoolFree(mPool, ptr, 0); - return NULL; + return nullptr; } else tmp = reallocAligned(mPool, ptr, size, alignment); @@ -2659,6 +2799,37 @@ bool pool_free(rml::MemoryPool *mPool, void *object) return internalPoolFree((rml::internal::MemoryPool*)mPool, object, 0); } +rml::MemoryPool *pool_identify(void *object) +{ + rml::internal::MemoryPool *pool; + if (isLargeObject<ourMem>(object)) { + LargeObjectHdr *header = (LargeObjectHdr*)object - 1; + pool = header->memoryBlock->pool; + } else { + Block *block = (Block*)alignDown(object, slabSize); + pool = block->getMemPool(); + } + // do not return defaultMemPool, as it can't be used in pool_free() etc + __TBB_ASSERT_RELEASE(pool!=defaultMemPool, + "rml::pool_identify() can't be used for scalable_malloc() etc results."); + return (rml::MemoryPool*)pool; +} + +size_t pool_msize(rml::MemoryPool *mPool, void* object) +{ + if (object) { + // No assert for object recognition, cause objects allocated from non-default + // memory pool do not participate in range checking and do not have valid backreferences for + // small objects. Instead, check that an object belong to the certain memory pool. + MALLOC_ASSERT_EX(mPool == pool_identify(object), "Object does not belong to the specified pool"); + return internalMsize(object); + } + errno = EINVAL; + // Unlike _msize, return 0 in case of parameter error. + // Returning size_t(-1) looks more like the way to troubles. + return 0; +} + } // namespace rml using namespace rml::internal; @@ -2669,76 +2840,91 @@ static unsigned int threadGoingDownCount = 0; /* * When a thread is shutting down this routine should be called to remove all the thread ids - * from the malloc blocks and replace them with a NULL thread id. + * from the malloc blocks and replace them with a nullptr thread id. * * For pthreads, the function is set as a callback in pthread_key_create for TLS bin. - * For non-NULL keys it will be automatically called at thread exit with the key value - * as the argument. + * It will be automatically called at thread exit with the key value as the argument, + * unless that value is nullptr. + * For Windows, it is called from DllMain( DLL_THREAD_DETACH ). + * + * However neither of the above is called for the main process thread, so the routine + * also needs to be called during the process shutdown. * - * for Windows, it should be called directly e.g. from DllMain */ -void mallocThreadShutdownNotification(void* arg) +// TODO: Consider making this function part of class MemoryPool. +void doThreadShutdownNotification(TLSData* tls, bool main_thread) { - // Check whether TLS has been initialized - if (!isMallocInitialized()) return; - TRACEF(( "[ScalableMalloc trace] Thread id %d blocks return start %d\n", getThreadId(), threadGoingDownCount++ )); -#if USE_WINTHREAD - suppress_unused_warning(arg); - MallocMutex::scoped_lock lock(MemoryPool::memPoolListLock); - // The routine is called once per thread, need to walk through all pools on Windows - for (MemoryPool *memPool = defaultMemPool; memPool; memPool = memPool->next) - if (TLSData *tls = memPool->getTLS(/*create=*/false)) - memPool->processThreadShutdown(tls); -#else - if (!shutdownSync.threadDtorStart()) return; - // The routine is called for each memPool, gets memPool from TLSData. - TLSData *tls = (TLSData*)arg; - tls->getMemPool()->processThreadShutdown(tls); - shutdownSync.threadDtorDone(); + +#if USE_PTHREAD + if (tls) { + if (!shutdownSync.threadDtorStart()) return; + tls->getMemPool()->onThreadShutdown(tls); + shutdownSync.threadDtorDone(); + } else #endif + { + suppress_unused_warning(tls); // not used on Windows + // The default pool is safe to use at this point: + // on Linux, only the main thread can go here before destroying defaultMemPool; + // on Windows, shutdown is synchronized via loader lock and isMallocInitialized(). + // See also __TBB_mallocProcessShutdownNotification() + defaultMemPool->onThreadShutdown(defaultMemPool->getTLS(/*create=*/false)); + // Take lock to walk through other pools; but waiting might be dangerous at this point + // (e.g. on Windows the main thread might deadlock) + bool locked = false; + MallocMutex::scoped_lock lock(MemoryPool::memPoolListLock, /*wait=*/!main_thread, &locked); + if (locked) { // the list is safe to process + for (MemoryPool *memPool = defaultMemPool->next; memPool; memPool = memPool->next) + memPool->onThreadShutdown(memPool->getTLS(/*create=*/false)); + } + } TRACEF(( "[ScalableMalloc trace] Thread id %d blocks return end\n", getThreadId() )); } -#if USE_WINTHREAD +#if USE_PTHREAD +void mallocThreadShutdownNotification(void* arg) +{ + // The routine is called for each pool (as TLS dtor) on each thread, except for the main thread + if (!isMallocInitialized()) return; + doThreadShutdownNotification((TLSData*)arg, false); +} +#else extern "C" void __TBB_mallocThreadShutdownNotification() { - mallocThreadShutdownNotification(NULL); + // The routine is called once per thread on Windows + if (!isMallocInitialized()) return; + doThreadShutdownNotification(nullptr, false); } #endif -extern "C" void __TBB_mallocProcessShutdownNotification() +extern "C" void __TBB_mallocProcessShutdownNotification(bool windows_process_dying) { if (!isMallocInitialized()) return; -#if __TBB_MALLOC_LOCACHE_STAT + // Don't clean allocator internals if the entire process is exiting + if (!windows_process_dying) { + doThreadShutdownNotification(nullptr, /*main_thread=*/true); + } +#if __TBB_MALLOC_LOCACHE_STAT printf("cache hit ratio %f, size hit %f\n", 1.*cacheHits/mallocCalls, 1.*memHitKB/memAllocKB); defaultMemPool->extMemPool.loc.reportStat(stdout); #endif + shutdownSync.processExit(); #if __TBB_SOURCE_DIRECTLY_INCLUDED /* Pthread keys must be deleted as soon as possible to not call key dtor on thread termination when then the tbbmalloc code can be already unloaded. */ defaultMemPool->destroy(); - destroyBackRefMaster(&defaultMemPool->extMemPool.backend); + destroyBackRefMain(&defaultMemPool->extMemPool.backend); ThreadId::destroy(); // Delete key for thread id hugePages.reset(); // new total malloc initialization is possible after this point - FencedStore(mallocInitialized, 0); -#elif __TBB_USE_DLOPEN_REENTRANCY_WORKAROUND -/* In most cases we prevent unloading tbbmalloc, and don't clean up memory - on process shutdown. When impossible to prevent, library unload results - in shutdown notification, and it makes sense to release unused memory - at that point (we can't release all memory because it's possible that - it will be accessed after this point). - TODO: better support systems where we can't prevent unloading by removing - pthread destructors and releasing caches. - */ - defaultMemPool->extMemPool.hardCachesCleanup(); + mallocInitialized.store(0, std::memory_order_release); #endif // __TBB_SOURCE_DIRECTLY_INCLUDED #if COLLECT_STATISTICS @@ -2746,8 +2932,10 @@ extern "C" void __TBB_mallocProcessShutdownNotification() for( int i=1; i<=nThreads && i<MAX_THREADS; ++i ) STAT_print(i); #endif - if (!usedBySrcIncluded) + if (!usedBySrcIncluded) { MALLOC_ITT_FINI_ITTLIB(); + MALLOC_ITT_RELEASE_RESOURCES(); + } } extern "C" void * scalable_malloc(size_t size) @@ -2757,12 +2945,14 @@ extern "C" void * scalable_malloc(size_t size) return ptr; } -extern "C" void scalable_free (void *object) { +extern "C" void scalable_free(void *object) +{ internalFree(object); } #if MALLOC_ZONE_OVERLOAD_ENABLED -extern "C" void __TBB_malloc_free_definite_size(void *object, size_t size) { +extern "C" void __TBB_malloc_free_definite_size(void *object, size_t size) +{ internalPoolFree(defaultMemPool, object, size); } #endif @@ -2771,20 +2961,26 @@ extern "C" void __TBB_malloc_free_definite_size(void *object, size_t size) { * A variant that provides additional memory safety, by checking whether the given address * was obtained with this allocator, and if not redirecting to the provided alternative call. */ -extern "C" void __TBB_malloc_safer_free(void *object, void (*original_free)(void*)) +extern "C" TBBMALLOC_EXPORT void __TBB_malloc_safer_free(void *object, void (*original_free)(void*)) { if (!object) return; - // must check 1st for large object, because small object check touches 4 pages on left, - // and it can be inaccessible - if (isLargeObject(object)) { - TLSData *tls = defaultMemPool->getTLS(/*create=*/false); - - defaultMemPool->putToLLOCache(tls, object); - } else if (isSmallObject(object)) { - freeSmallObject(defaultMemPool, object); - } else if (original_free) + // tbbmalloc can allocate object only when tbbmalloc has been initialized + if (mallocInitialized.load(std::memory_order_acquire) && defaultMemPool->extMemPool.backend.ptrCanBeValid(object)) { + if (isLargeObject<unknownMem>(object)) { + // must check 1st for large object, because small object check touches 4 pages on left, + // and it can be inaccessible + TLSData *tls = defaultMemPool->getTLS(/*create=*/false); + + defaultMemPool->putToLLOCache(tls, object); + return; + } else if (isSmallObject(object)) { + freeSmallObject(object); + return; + } + } + if (original_free) original_free(object); } @@ -2797,7 +2993,7 @@ extern "C" void __TBB_malloc_safer_free(void *object, void (*original_free)(void * "realloc changes the size of the object pointed to by p to size. The contents will * be unchanged up to the minimum of the old and the new sizes. If the new size is larger, * the new space is uninitialized. realloc returns a pointer to the new space, or - * NULL if the request cannot be satisfied, in which case *p is unchanged." + * nullptr if the request cannot be satisfied, in which case *p is unchanged." * */ extern "C" void* scalable_realloc(void* ptr, size_t size) @@ -2808,7 +3004,7 @@ extern "C" void* scalable_realloc(void* ptr, size_t size) tmp = internalMalloc(size); else if (!size) { internalFree(ptr); - return NULL; + return nullptr; } else tmp = reallocAligned(defaultMemPool, ptr, size, 0); @@ -2820,16 +3016,16 @@ extern "C" void* scalable_realloc(void* ptr, size_t size) * A variant that provides additional memory safety, by checking whether the given address * was obtained with this allocator, and if not redirecting to the provided alternative call. */ -extern "C" void* __TBB_malloc_safer_realloc(void* ptr, size_t sz, void* original_realloc) +extern "C" TBBMALLOC_EXPORT void* __TBB_malloc_safer_realloc(void* ptr, size_t sz, void* original_realloc) { void *tmp; // TODO: fix warnings about uninitialized use of tmp if (!ptr) { tmp = internalMalloc(sz); - } else if (isRecognized(ptr)) { + } else if (mallocInitialized.load(std::memory_order_acquire) && isRecognized(ptr)) { if (!sz) { internalFree(ptr); - return NULL; + return nullptr; } else { tmp = reallocAligned(defaultMemPool, ptr, sz, 0); } @@ -2837,17 +3033,17 @@ extern "C" void* __TBB_malloc_safer_realloc(void* ptr, size_t sz, void* original #if USE_WINTHREAD else if (original_realloc && sz) { orig_ptrs *original_ptrs = static_cast<orig_ptrs*>(original_realloc); - if ( original_ptrs->orig_msize ){ - size_t oldSize = original_ptrs->orig_msize(ptr); + if ( original_ptrs->msize ){ + size_t oldSize = original_ptrs->msize(ptr); tmp = internalMalloc(sz); if (tmp) { memcpy(tmp, ptr, sz<oldSize? sz : oldSize); - if ( original_ptrs->orig_free ){ - original_ptrs->orig_free( ptr ); + if ( original_ptrs->free ){ + original_ptrs->free( ptr ); } } } else - tmp = NULL; + tmp = nullptr; } #else else if (original_realloc) { @@ -2857,7 +3053,7 @@ extern "C" void* __TBB_malloc_safer_realloc(void* ptr, size_t sz, void* original tmp = original_realloc_ptr(ptr,sz); } #endif - else tmp = NULL; + else tmp = nullptr; if (!tmp) errno = ENOMEM; return tmp; @@ -2870,14 +3066,23 @@ extern "C" void* __TBB_malloc_safer_realloc(void* ptr, size_t sz, void* original /* * From K&R * calloc returns a pointer to space for an array of nobj objects, - * each of size size, or NULL if the request cannot be satisfied. + * each of size size, or nullptr if the request cannot be satisfied. * The space is initialized to zero bytes. * */ extern "C" void * scalable_calloc(size_t nobj, size_t size) { - size_t arraySize = nobj * size; + // it's square root of maximal size_t value + const size_t mult_not_overflow = size_t(1) << (sizeof(size_t)*CHAR_BIT/2); + const size_t arraySize = nobj * size; + + // check for overflow during multiplication: + if (nobj>=mult_not_overflow || size>=mult_not_overflow) // 1) heuristic check + if (nobj && arraySize / nobj != size) { // 2) exact check + errno = ENOMEM; + return nullptr; + } void* result = internalMalloc(arraySize); if (result) memset(result, 0, arraySize); @@ -2892,7 +3097,7 @@ extern "C" void * scalable_calloc(size_t nobj, size_t size) extern "C" int scalable_posix_memalign(void **memptr, size_t alignment, size_t size) { - if ( !isPowerOfTwoMultiple(alignment, sizeof(void*)) ) + if ( !isPowerOfTwoAtLeast(alignment, sizeof(void*)) ) return EINVAL; void *result = allocateAligned(defaultMemPool, size, alignment); if (!result) @@ -2905,7 +3110,7 @@ extern "C" void * scalable_aligned_malloc(size_t size, size_t alignment) { if (!isPowerOfTwo(alignment) || 0==size) { errno = EINVAL; - return NULL; + return nullptr; } void *tmp = allocateAligned(defaultMemPool, size, alignment); if (!tmp) errno = ENOMEM; @@ -2916,7 +3121,7 @@ extern "C" void * scalable_aligned_realloc(void *ptr, size_t size, size_t alignm { if (!isPowerOfTwo(alignment)) { errno = EINVAL; - return NULL; + return nullptr; } void *tmp; @@ -2924,7 +3129,7 @@ extern "C" void * scalable_aligned_realloc(void *ptr, size_t size, size_t alignm tmp = allocateAligned(defaultMemPool, size, alignment); else if (!size) { scalable_free(ptr); - return NULL; + return nullptr; } else tmp = reallocAligned(defaultMemPool, ptr, size, alignment); @@ -2932,46 +3137,47 @@ extern "C" void * scalable_aligned_realloc(void *ptr, size_t size, size_t alignm return tmp; } -extern "C" void * __TBB_malloc_safer_aligned_realloc(void *ptr, size_t size, size_t alignment, void* orig_function) +extern "C" TBBMALLOC_EXPORT void * __TBB_malloc_safer_aligned_realloc(void *ptr, size_t size, size_t alignment, void* orig_function) { /* corner cases left out of reallocAligned to not deal with errno there */ if (!isPowerOfTwo(alignment)) { errno = EINVAL; - return NULL; + return nullptr; } - void *tmp = NULL; + void *tmp = nullptr; if (!ptr) { tmp = allocateAligned(defaultMemPool, size, alignment); - } else if (isRecognized(ptr)) { + } else if (mallocInitialized.load(std::memory_order_acquire) && isRecognized(ptr)) { if (!size) { internalFree(ptr); - return NULL; + return nullptr; } else { tmp = reallocAligned(defaultMemPool, ptr, size, alignment); } } #if USE_WINTHREAD else { - orig_ptrs *original_ptrs = static_cast<orig_ptrs*>(orig_function); + orig_aligned_ptrs *original_ptrs = static_cast<orig_aligned_ptrs*>(orig_function); if (size) { // Without orig_msize, we can't do anything with this. // Just keeping old pointer. - if ( original_ptrs->orig_msize ){ - size_t oldSize = original_ptrs->orig_msize(ptr); + if ( original_ptrs->aligned_msize ){ + // set alignment and offset to have possibly correct oldSize + size_t oldSize = original_ptrs->aligned_msize(ptr, sizeof(void*), 0); tmp = allocateAligned(defaultMemPool, size, alignment); if (tmp) { memcpy(tmp, ptr, size<oldSize? size : oldSize); - if ( original_ptrs->orig_free ){ - original_ptrs->orig_free( ptr ); + if ( original_ptrs->aligned_free ){ + original_ptrs->aligned_free( ptr ); } } } } else { - if ( original_ptrs->orig_free ){ - original_ptrs->orig_free( ptr ); + if ( original_ptrs->aligned_free ){ + original_ptrs->aligned_free( ptr ); } - return NULL; + return nullptr; } } #else @@ -2997,23 +3203,30 @@ extern "C" void scalable_aligned_free(void *ptr) */ extern "C" size_t scalable_msize(void* ptr) { - return internalMsize(ptr); + if (ptr) { + MALLOC_ASSERT(isRecognized(ptr), "Invalid pointer in scalable_msize detected."); + return internalMsize(ptr); + } + errno = EINVAL; + // Unlike _msize, return 0 in case of parameter error. + // Returning size_t(-1) looks more like the way to troubles. + return 0; } /* * A variant that provides additional memory safety, by checking whether the given address * was obtained with this allocator, and if not redirecting to the provided alternative call. */ -extern "C" size_t __TBB_malloc_safer_msize(void *object, size_t (*original_msize)(void*)) +extern "C" TBBMALLOC_EXPORT size_t __TBB_malloc_safer_msize(void *object, size_t (*original_msize)(void*)) { if (object) { // Check if the memory was allocated by scalable_malloc - if (isRecognized(object)) + if (mallocInitialized.load(std::memory_order_acquire) && isRecognized(object)) return internalMsize(object); else if (original_msize) return original_msize(object); } - // object is NULL or unknown, or foreign and no original_msize + // object is nullptr or unknown, or foreign and no original_msize #if USE_WINTHREAD errno = EINVAL; // errno expected to be set only on this platform #endif @@ -3023,16 +3236,16 @@ extern "C" size_t __TBB_malloc_safer_msize(void *object, size_t (*original_msize /* * The same as above but for _aligned_msize case */ -extern "C" size_t __TBB_malloc_safer_aligned_msize(void *object, size_t alignment, size_t offset, size_t (*orig_aligned_msize)(void*,size_t,size_t)) +extern "C" TBBMALLOC_EXPORT size_t __TBB_malloc_safer_aligned_msize(void *object, size_t alignment, size_t offset, size_t (*orig_aligned_msize)(void*,size_t,size_t)) { if (object) { // Check if the memory was allocated by scalable_malloc - if (isRecognized(object)) + if (mallocInitialized.load(std::memory_order_acquire) && isRecognized(object)) return internalMsize(object); else if (orig_aligned_msize) return orig_aligned_msize(object,alignment,offset); } - // object is NULL or unknown + // object is nullptr or unknown errno = EINVAL; return 0; } @@ -3045,7 +3258,7 @@ extern "C" int scalable_allocation_mode(int param, intptr_t value) defaultMemPool->extMemPool.backend.setRecommendedMaxSize((size_t)value); return TBBMALLOC_OK; } else if (param == USE_HUGE_PAGES) { -#if __linux__ +#if __unix__ switch (value) { case 0: case 1: @@ -3068,6 +3281,9 @@ extern "C" int scalable_allocation_mode(int param, intptr_t value) return TBBMALLOC_INVALID_PARAM; } #endif + } else if (param == TBBMALLOC_SET_HUGE_SIZE_THRESHOLD) { + defaultMemPool->extMemPool.loc.setHugeSizeThreshold((size_t)value); + return TBBMALLOC_OK; } return TBBMALLOC_INVALID_PARAM; } @@ -3076,16 +3292,18 @@ extern "C" int scalable_allocation_command(int cmd, void *param) { if (param) return TBBMALLOC_INVALID_PARAM; + + bool released = false; switch(cmd) { case TBBMALLOC_CLEAN_THREAD_BUFFERS: if (TLSData *tls = defaultMemPool->getTLS(/*create=*/false)) - return tls->externalCleanup(&defaultMemPool->extMemPool, - /*cleanOnlyUnused=*/false)? - TBBMALLOC_OK : TBBMALLOC_NO_EFFECT; - return TBBMALLOC_NO_EFFECT; + released = tls->externalCleanup(/*cleanOnlyUnused*/false, /*cleanBins=*/true); + break; case TBBMALLOC_CLEAN_ALL_BUFFERS: - return defaultMemPool->extMemPool.hardCachesCleanup()? - TBBMALLOC_OK : TBBMALLOC_NO_EFFECT; + released = defaultMemPool->extMemPool.hardCachesCleanup(true); + break; + default: + return TBBMALLOC_INVALID_PARAM; } - return TBBMALLOC_INVALID_PARAM; + return released ? TBBMALLOC_OK : TBBMALLOC_NO_EFFECT; } diff --git a/src/tbb/src/tbbmalloc/large_objects.cpp b/src/tbb/src/tbbmalloc/large_objects.cpp index ac0bf0935..43b17d058 100644 --- a/src/tbb/src/tbbmalloc/large_objects.cpp +++ b/src/tbb/src/tbbmalloc/large_objects.cpp @@ -1,50 +1,149 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #include "tbbmalloc_internal.h" +#include "../src/tbb/environment.h" -/********* Allocation of large objects ************/ +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) + // Suppress warning: unary minus operator applied to unsigned type, result still unsigned + // TBB_REVAMP_TODO: review this warning + // #pragma warning(push) + // #pragma warning(disable:4146) +#endif +/******************************* Allocation of large objects *********************************************/ namespace rml { namespace internal { +/* ---------------------------- Large Object cache init section ---------------------------------------- */ -// ---------------- Cache Bin Aggregator Operation Helpers ---------------- // -// The list of possible operations. -enum CacheBinOperationType { - CBOP_INVALID = 0, - CBOP_GET, - CBOP_PUT_LIST, - CBOP_CLEAN_TO_THRESHOLD, - CBOP_CLEAN_ALL, - CBOP_DECR_USED_SIZE -}; +void LargeObjectCache::init(ExtMemoryPool *memPool) +{ + extMemPool = memPool; + // scalable_allocation_mode can be called before allocator initialization, respect this manual request + if (hugeSizeThreshold == 0) { + // Huge size threshold initialization if environment variable was set + long requestedThreshold = tbb::detail::r1::GetIntegralEnvironmentVariable("TBB_MALLOC_SET_HUGE_SIZE_THRESHOLD"); + // Read valid env or initialize by default with max possible values + if (requestedThreshold != -1) { + setHugeSizeThreshold(requestedThreshold); + } else { + setHugeSizeThreshold(maxHugeSize); + } + } +} + +/* ----------------------------- Huge size threshold settings ----------------------------------------- */ + +void LargeObjectCache::setHugeSizeThreshold(size_t value) +{ + // Valid in the huge cache range: [MaxLargeSize, MaxHugeSize]. + if (value <= maxHugeSize) { + hugeSizeThreshold = value >= maxLargeSize ? alignToBin(value) : maxLargeSize; + + // Calculate local indexes for the global threshold size (for fast search inside a regular cleanup) + largeCache.hugeSizeThresholdIdx = LargeCacheType::numBins; + hugeCache.hugeSizeThresholdIdx = HugeCacheType::sizeToIdx(hugeSizeThreshold); + } +} + +bool LargeObjectCache::sizeInCacheRange(size_t size) +{ + return size < maxHugeSize && (size <= defaultMaxHugeSize || size >= hugeSizeThreshold); +} + +/* ----------------------------------------------------------------------------------------------------- */ -// The operation status list. CBST_NOWAIT can be specified for non-blocking operations. -enum CacheBinOperationStatus { - CBST_WAIT = 0, - CBST_NOWAIT, - CBST_DONE +/* The functor called by the aggregator for the operation list */ +template<typename Props> +class CacheBinFunctor { + typename LargeObjectCacheImpl<Props>::CacheBin *const bin; + ExtMemoryPool *const extMemPool; + typename LargeObjectCacheImpl<Props>::BinBitMask *const bitMask; + const int idx; + + LargeMemoryBlock *toRelease; + bool needCleanup; + uintptr_t currTime; + + /* Do preprocessing under the operation list. */ + /* All the OP_PUT_LIST operations are merged in the one operation. + All OP_GET operations are merged with the OP_PUT_LIST operations but + it demands the update of the moving average value in the bin. + Only the last OP_CLEAN_TO_THRESHOLD operation has sense. + The OP_CLEAN_ALL operation also should be performed only once. + Moreover it cancels the OP_CLEAN_TO_THRESHOLD operation. */ + class OperationPreprocessor { + // TODO: remove the dependency on CacheBin. + typename LargeObjectCacheImpl<Props>::CacheBin *const bin; + + /* Contains the relative time in the operation list. + It counts in the reverse order since the aggregator also + provides operations in the reverse order. */ + uintptr_t lclTime; + + /* opGet contains only OP_GET operations which cannot be merge with OP_PUT operations + opClean contains all OP_CLEAN_TO_THRESHOLD and OP_CLEAN_ALL operations. */ + CacheBinOperation *opGet, *opClean; + /* The time of the last OP_CLEAN_TO_THRESHOLD operations */ + uintptr_t cleanTime; + + /* lastGetOpTime - the time of the last OP_GET operation. + lastGet - the same meaning as CacheBin::lastGet */ + uintptr_t lastGetOpTime, lastGet; + + /* The total sum of all usedSize changes requested with CBOP_UPDATE_USED_SIZE operations. */ + size_t updateUsedSize; + + /* The list of blocks for the OP_PUT_LIST operation. */ + LargeMemoryBlock *head, *tail; + int putListNum; + + /* if the OP_CLEAN_ALL is requested. */ + bool isCleanAll; + + inline void commitOperation(CacheBinOperation *op) const; + inline void addOpToOpList(CacheBinOperation *op, CacheBinOperation **opList) const; + bool getFromPutList(CacheBinOperation* opGet, uintptr_t currTime); + void addToPutList( LargeMemoryBlock *head, LargeMemoryBlock *tail, int num ); + + public: + OperationPreprocessor(typename LargeObjectCacheImpl<Props>::CacheBin *bin) : + bin(bin), lclTime(0), opGet(nullptr), opClean(nullptr), cleanTime(0), + lastGetOpTime(0), lastGet(0), updateUsedSize(0), head(nullptr), tail(nullptr), putListNum(0), isCleanAll(false) {} + void operator()(CacheBinOperation* opList); + uintptr_t getTimeRange() const { return -lclTime; } + + friend class CacheBinFunctor; + }; + +public: + CacheBinFunctor(typename LargeObjectCacheImpl<Props>::CacheBin *bin, ExtMemoryPool *extMemPool, + typename LargeObjectCacheImpl<Props>::BinBitMask *bitMask, int idx) : + bin(bin), extMemPool(extMemPool), bitMask(bitMask), idx(idx), toRelease(nullptr), needCleanup(false), currTime(0) {} + void operator()(CacheBinOperation* opList); + + bool isCleanupNeeded() const { return needCleanup; } + LargeMemoryBlock *getToRelease() const { return toRelease; } + uintptr_t getCurrTime() const { return currTime; } }; +/* ---------------- Cache Bin Aggregator Operation Helpers ---------------- */ + // The list of structures which describe the operation data struct OpGet { static const CacheBinOperationType type = CBOP_GET; @@ -69,8 +168,8 @@ struct OpCleanAll { LargeMemoryBlock **res; }; -struct OpDecrUsedSize { - static const CacheBinOperationType type = CBOP_DECR_USED_SIZE; +struct OpUpdateUsedSize { + static const CacheBinOperationType type = CBOP_UPDATE_USED_SIZE; size_t size; }; @@ -80,7 +179,7 @@ union CacheBinOperationData { OpPutList opPutList; OpCleanToThreshold opCleanToThreshold; OpCleanAll opCleanAll; - OpDecrUsedSize opDecrUsedSize; + OpUpdateUsedSize opUpdateUsedSize; }; // Forward declarations @@ -113,35 +212,41 @@ template <typename OpTypeData> OpTypeData& opCast(CacheBinOperation &op) { return *reinterpret_cast<OpTypeData*>(&op.data); } -// ------------------------------------------------------------------------ // + +/* ------------------------------------------------------------------------ */ #if __TBB_MALLOC_LOCACHE_STAT -intptr_t mallocCalls, cacheHits; -intptr_t memAllocKB, memHitKB; +//intptr_t mallocCalls, cacheHits; +std::atomic<intptr_t> mallocCalls, cacheHits; +//intptr_t memAllocKB, memHitKB; +std::atomic<intptr_t> memAllocKB, memHitKB; #endif +#if MALLOC_DEBUG inline bool lessThanWithOverflow(intptr_t a, intptr_t b) { - return (a < b && (b - a < UINTPTR_MAX/2)) || - (a > b && (a - b > UINTPTR_MAX/2)); + return (a < b && (b - a < static_cast<intptr_t>(UINTPTR_MAX/2))) || + (a > b && (a - b > static_cast<intptr_t>(UINTPTR_MAX/2))); } +#endif /* ----------------------------------- Operation processing methods ------------------------------------ */ -template<typename Props> void LargeObjectCacheImpl<Props>::CacheBin::CacheBinFunctor:: - OperationPreprocessor::commitOperation(CacheBinOperation *op) const +template<typename Props> void CacheBinFunctor<Props>:: + OperationPreprocessor::commitOperation(CacheBinOperation *op) const { - FencedStore( (intptr_t&)(op->status), CBST_DONE ); + // FencedStore( (intptr_t&)(op->status), CBST_DONE ); + op->status.store(CBST_DONE, std::memory_order_release); } -template<typename Props> void LargeObjectCacheImpl<Props>::CacheBin::CacheBinFunctor:: +template<typename Props> void CacheBinFunctor<Props>:: OperationPreprocessor::addOpToOpList(CacheBinOperation *op, CacheBinOperation **opList) const { op->next = *opList; *opList = op; } -template<typename Props> bool LargeObjectCacheImpl<Props>::CacheBin::CacheBinFunctor:: +template<typename Props> bool CacheBinFunctor<Props>:: OperationPreprocessor::getFromPutList(CacheBinOperation *opGet, uintptr_t currTime) { if ( head ) { @@ -160,7 +265,7 @@ template<typename Props> bool LargeObjectCacheImpl<Props>::CacheBin::CacheBinFun return false; } -template<typename Props> void LargeObjectCacheImpl<Props>::CacheBin::CacheBinFunctor:: +template<typename Props> void CacheBinFunctor<Props>:: OperationPreprocessor::addToPutList(LargeMemoryBlock *h, LargeMemoryBlock *t, int num) { if ( head ) { @@ -176,7 +281,7 @@ template<typename Props> void LargeObjectCacheImpl<Props>::CacheBin::CacheBinFun } } -template<typename Props> void LargeObjectCacheImpl<Props>::CacheBin::CacheBinFunctor:: +template<typename Props> void CacheBinFunctor<Props>:: OperationPreprocessor::operator()(CacheBinOperation* opList) { for ( CacheBinOperation *op = opList, *opNext; op; op = opNext ) { @@ -200,7 +305,7 @@ template<typename Props> void LargeObjectCacheImpl<Props>::CacheBin::CacheBinFun case CBOP_PUT_LIST: { LargeMemoryBlock *head = opCast<OpPutList>(*op).head; - LargeMemoryBlock *curr = head, *prev = NULL; + LargeMemoryBlock *curr = head, *prev = nullptr; int num = 0; do { @@ -218,7 +323,7 @@ template<typename Props> void LargeObjectCacheImpl<Props>::CacheBin::CacheBinFun num += 1; STAT_increment(getThreadId(), ThreadCommonCounters, cacheLargeObj); - } while (( curr = curr->next )); + } while ((curr = curr->next) != nullptr); LargeMemoryBlock *tail = prev; addToPutList(head, tail, num); @@ -232,8 +337,8 @@ template<typename Props> void LargeObjectCacheImpl<Props>::CacheBin::CacheBinFun } break; - case CBOP_DECR_USED_SIZE: - decrUsedSize += opCast<OpDecrUsedSize>(*op).size; + case CBOP_UPDATE_USED_SIZE: + updateUsedSize += opCast<OpUpdateUsedSize>(*op).size; commitOperation( op ); break; @@ -259,8 +364,7 @@ template<typename Props> void LargeObjectCacheImpl<Props>::CacheBin::CacheBinFun MALLOC_ASSERT( !( opGet && head ), "Not all put/get pairs are processed!" ); } -template<typename Props> void LargeObjectCacheImpl<Props>::CacheBin:: - CacheBinFunctor::operator()(CacheBinOperation* opList) +template<typename Props> void CacheBinFunctor<Props>::operator()(CacheBinOperation* opList) { MALLOC_ASSERT( opList, "Empty operation list is passed into operation handler." ); @@ -304,7 +408,7 @@ template<typename Props> void LargeObjectCacheImpl<Props>::CacheBin:: if ( prep.lastGetOpTime ) bin->setLastGet( prep.lastGetOpTime + endTime ); } else if ( LargeMemoryBlock *curr = prep.head ) { - curr->prev = NULL; + curr->prev = nullptr; while ( curr ) { // Update local times to global times curr->age += endTime; @@ -313,7 +417,7 @@ template<typename Props> void LargeObjectCacheImpl<Props>::CacheBin:: #if __TBB_MALLOC_WHITEBOX_TEST tbbmalloc_whitebox::locPutProcessed+=prep.putListNum; #endif - toRelease = bin->putList(prep.head, prep.tail, bitMask, idx, prep.putListNum); + toRelease = bin->putList(prep.head, prep.tail, bitMask, idx, prep.putListNum, extMemPool->loc.hugeSizeThreshold); } needCleanup = extMemPool->loc.isCleanupNeededOnRange(timeRange, startTime); currTime = endTime - 1; @@ -328,35 +432,37 @@ template<typename Props> void LargeObjectCacheImpl<Props>::CacheBin:: CacheBinOperation *opNext = opClean->next; prep.commitOperation( opClean ); - while (( opClean = opNext )) { + while ((opClean = opNext) != nullptr) { opNext = opClean->next; prep.commitOperation(opClean); } } - if ( size_t decrUsedSize = prep.decrUsedSize ) - bin->updateUsedSize(-decrUsedSize, bitMask, idx); + if ( size_t size = prep.updateUsedSize ) + bin->updateUsedSize(size, bitMask, idx); } /* ----------------------------------------------------------------------------------------------------- */ /* --------------------------- Methods for creating and executing operations --------------------------- */ template<typename Props> void LargeObjectCacheImpl<Props>:: CacheBin::ExecuteOperation(CacheBinOperation *op, ExtMemoryPool *extMemPool, BinBitMask *bitMask, int idx, bool longLifeTime) { - CacheBinFunctor func( this, extMemPool, bitMask, idx ); + CacheBinFunctor<Props> func( this, extMemPool, bitMask, idx ); aggregator.execute( op, func, longLifeTime ); - if ( LargeMemoryBlock *toRelease = func.getToRelease() ) + if ( LargeMemoryBlock *toRelease = func.getToRelease()) { extMemPool->backend.returnLargeObject(toRelease); + } - if ( func.isCleanupNeeded() ) + if ( func.isCleanupNeeded() ) { extMemPool->loc.doCleanup( func.getCurrTime(), /*doThreshDecr=*/false); + } } template<typename Props> LargeMemoryBlock *LargeObjectCacheImpl<Props>:: CacheBin::get(ExtMemoryPool *extMemPool, size_t size, BinBitMask *bitMask, int idx) { - LargeMemoryBlock *lmb=NULL; - OpGet data = {&lmb, size}; + LargeMemoryBlock *lmb=nullptr; + OpGet data = {&lmb, size, static_cast<uintptr_t>(0)}; CacheBinOperation op(data); ExecuteOperation( &op, extMemPool, bitMask, idx ); return lmb; @@ -375,11 +481,12 @@ template<typename Props> void LargeObjectCacheImpl<Props>:: template<typename Props> bool LargeObjectCacheImpl<Props>:: CacheBin::cleanToThreshold(ExtMemoryPool *extMemPool, BinBitMask *bitMask, uintptr_t currTime, int idx) { - LargeMemoryBlock *toRelease = NULL; + LargeMemoryBlock *toRelease = nullptr; /* oldest may be more recent then age, that's why cast to signed type was used. age overflow is also processed correctly. */ - if (last && (intptr_t)(currTime - oldest) > ageThreshold) { + if (last.load(std::memory_order_relaxed) && + (intptr_t)(currTime - oldest.load(std::memory_order_relaxed)) > ageThreshold.load(std::memory_order_relaxed)) { OpCleanToThreshold data = {&toRelease, currTime}; CacheBinOperation op(data); ExecuteOperation( &op, extMemPool, bitMask, idx ); @@ -398,9 +505,9 @@ template<typename Props> bool LargeObjectCacheImpl<Props>:: template<typename Props> bool LargeObjectCacheImpl<Props>:: CacheBin::releaseAllToBackend(ExtMemoryPool *extMemPool, BinBitMask *bitMask, int idx) { - LargeMemoryBlock *toRelease = NULL; + LargeMemoryBlock *toRelease = nullptr; - if (last) { + if (last.load(std::memory_order_relaxed)) { OpCleanAll data = {&toRelease}; CacheBinOperation op(data); ExecuteOperation(&op, extMemPool, bitMask, idx); @@ -419,21 +526,26 @@ template<typename Props> bool LargeObjectCacheImpl<Props>:: } template<typename Props> void LargeObjectCacheImpl<Props>:: - CacheBin::decrUsedSize(ExtMemoryPool *extMemPool, size_t size, BinBitMask *bitMask, int idx) { - OpDecrUsedSize data = {size}; + CacheBin::updateUsedSize(ExtMemoryPool *extMemPool, size_t size, BinBitMask *bitMask, int idx) +{ + OpUpdateUsedSize data = {size}; CacheBinOperation op(data); ExecuteOperation( &op, extMemPool, bitMask, idx ); } -/* ----------------------------------------------------------------------------------------------------- */ + /* ------------------------------ Unsafe methods used with the aggregator ------------------------------ */ + template<typename Props> LargeMemoryBlock *LargeObjectCacheImpl<Props>:: - CacheBin::putList(LargeMemoryBlock *head, LargeMemoryBlock *tail, BinBitMask *bitMask, int idx, int num) + CacheBin::putList(LargeMemoryBlock *head, LargeMemoryBlock *tail, BinBitMask *bitMask, int idx, int num, size_t hugeSizeThreshold) { size_t size = head->unalignedSize; - usedSize -= num*size; - MALLOC_ASSERT( !last || (last->age != 0 && last->age != -1U), ASSERT_TEXT ); - LargeMemoryBlock *toRelease = NULL; - if (!lastCleanedAge) { + usedSize.store(usedSize.load(std::memory_order_relaxed) - num * size, std::memory_order_relaxed); + MALLOC_ASSERT( !last.load(std::memory_order_relaxed) || + (last.load(std::memory_order_relaxed)->age != 0 && last.load(std::memory_order_relaxed)->age != -1U), ASSERT_TEXT ); + MALLOC_ASSERT( (tail==head && num==1) || (tail!=head && num>1), ASSERT_TEXT ); + MALLOC_ASSERT( tail, ASSERT_TEXT ); + LargeMemoryBlock *toRelease = nullptr; + if (size < hugeSizeThreshold && !lastCleanedAge) { // 1st object of such size was released. // Not cache it, and remember when this occurs // to take into account during cache miss. @@ -441,9 +553,9 @@ template<typename Props> LargeMemoryBlock *LargeObjectCacheImpl<Props>:: toRelease = tail; tail = tail->prev; if (tail) - tail->next = NULL; + tail->next = nullptr; else - head = NULL; + head = nullptr; num--; } if (num) { @@ -452,17 +564,17 @@ template<typename Props> LargeMemoryBlock *LargeObjectCacheImpl<Props>:: if (first) first->prev = tail; first = head; - if (!last) { - MALLOC_ASSERT(0 == oldest, ASSERT_TEXT); - oldest = tail->age; - last = tail; + if (!last.load(std::memory_order_relaxed)) { + MALLOC_ASSERT(0 == oldest.load(std::memory_order_relaxed), ASSERT_TEXT); + oldest.store(tail->age, std::memory_order_relaxed); + last.store(tail, std::memory_order_relaxed); } - cachedSize += num*size; + cachedSize.store(cachedSize.load(std::memory_order_relaxed) + num * size, std::memory_order_relaxed); } // No used object, and nothing in the bin, mark the bin as empty - if (!usedSize && !first) + if (!usedSize.load(std::memory_order_relaxed) && !first) bitMask->set(idx, false); return toRelease; @@ -475,17 +587,16 @@ template<typename Props> LargeMemoryBlock *LargeObjectCacheImpl<Props>:: if (result) { first = result->next; if (first) - first->prev = NULL; + first->prev = nullptr; else { - last = NULL; - oldest = 0; + last.store(nullptr, std::memory_order_relaxed); + oldest.store(0, std::memory_order_relaxed); } } return result; } -// forget the history for the bin if it was unused for long time template<typename Props> void LargeObjectCacheImpl<Props>:: CacheBin::forgetOutdatedState(uintptr_t currTime) { @@ -498,14 +609,15 @@ template<typename Props> void LargeObjectCacheImpl<Props>:: const uintptr_t sinceLastGet = currTime - lastGet; bool doCleanup = false; - if (ageThreshold) - doCleanup = sinceLastGet > Props::LongWaitFactor*ageThreshold; + intptr_t threshold = ageThreshold.load(std::memory_order_relaxed); + if (threshold) + doCleanup = sinceLastGet > static_cast<uintptr_t>(Props::LongWaitFactor * threshold); else if (lastCleanedAge) - doCleanup = sinceLastGet > Props::LongWaitFactor*(lastCleanedAge - lastGet); + doCleanup = sinceLastGet > static_cast<uintptr_t>(Props::LongWaitFactor * (lastCleanedAge - lastGet)); if (doCleanup) { lastCleanedAge = 0; - ageThreshold = 0; + ageThreshold.store(0, std::memory_order_relaxed); } } @@ -515,7 +627,9 @@ template<typename Props> LargeMemoryBlock *LargeObjectCacheImpl<Props>:: { /* oldest may be more recent then age, that's why cast to signed type was used. age overflow is also processed correctly. */ - if ( !last || (intptr_t)(currTime - last->age) < ageThreshold ) return NULL; + if ( !last.load(std::memory_order_relaxed) || + (intptr_t)(currTime - last.load(std::memory_order_relaxed)->age) < ageThreshold.load(std::memory_order_relaxed) ) + return nullptr; #if MALLOC_DEBUG uintptr_t nextAge = 0; @@ -523,24 +637,25 @@ template<typename Props> LargeMemoryBlock *LargeObjectCacheImpl<Props>:: do { #if MALLOC_DEBUG // check that list ordered - MALLOC_ASSERT(!nextAge || lessThanWithOverflow(nextAge, last->age), + MALLOC_ASSERT(!nextAge || lessThanWithOverflow(nextAge, last.load(std::memory_order_relaxed)->age), ASSERT_TEXT); - nextAge = last->age; + nextAge = last.load(std::memory_order_relaxed)->age; #endif - cachedSize -= last->unalignedSize; - last = last->prev; - } while (last && (intptr_t)(currTime - last->age) > ageThreshold); - - LargeMemoryBlock *toRelease = NULL; - if (last) { - toRelease = last->next; - oldest = last->age; - last->next = NULL; + cachedSize.store(cachedSize.load(std::memory_order_relaxed) - last.load(std::memory_order_relaxed)->unalignedSize, std::memory_order_relaxed); + last.store(last.load(std::memory_order_relaxed)->prev, std::memory_order_relaxed); + } while (last.load(std::memory_order_relaxed) && + (intptr_t)(currTime - last.load(std::memory_order_relaxed)->age) > ageThreshold.load(std::memory_order_relaxed)); + + LargeMemoryBlock *toRelease = nullptr; + if (last.load(std::memory_order_relaxed)) { + toRelease = last.load(std::memory_order_relaxed)->next; + oldest.store(last.load(std::memory_order_relaxed)->age, std::memory_order_relaxed); + last.load(std::memory_order_relaxed)->next = nullptr; } else { toRelease = first; - first = NULL; - oldest = 0; - if (!usedSize) + first = nullptr; + oldest.store(0, std::memory_order_relaxed); + if (!usedSize.load(std::memory_order_relaxed)) bitMask->set(idx, false); } MALLOC_ASSERT( toRelease, ASSERT_TEXT ); @@ -552,20 +667,22 @@ template<typename Props> LargeMemoryBlock *LargeObjectCacheImpl<Props>:: template<typename Props> LargeMemoryBlock *LargeObjectCacheImpl<Props>:: CacheBin::cleanAll(BinBitMask *bitMask, int idx) { - if (!last) return NULL; + if (!last.load(std::memory_order_relaxed)) return nullptr; LargeMemoryBlock *toRelease = first; - last = NULL; - first = NULL; - oldest = 0; - cachedSize = 0; - if (!usedSize) + last.store(nullptr, std::memory_order_relaxed); + first = nullptr; + oldest.store(0, std::memory_order_relaxed); + cachedSize.store(0, std::memory_order_relaxed); + if (!usedSize.load(std::memory_order_relaxed)) bitMask->set(idx, false); return toRelease; } + /* ----------------------------------------------------------------------------------------------------- */ +#if __TBB_MALLOC_BACKEND_STAT template<typename Props> size_t LargeObjectCacheImpl<Props>:: CacheBin::reportStat(int num, FILE *f) { @@ -573,49 +690,57 @@ template<typename Props> size_t LargeObjectCacheImpl<Props>:: if (first) printf("%d(%lu): total %lu KB thr %ld lastCln %lu oldest %lu\n", num, num*Props::CacheStep+Props::MinSize, - cachedSize/1024, ageThreshold, lastCleanedAge, oldest); + cachedSize.load(std::memory_order_relaxed)/1024, ageThresholdageThreshold.load(std::memory_order_relaxed), lastCleanedAge, oldest.load(std::memory_order_relaxed)); #else suppress_unused_warning(num); suppress_unused_warning(f); #endif - return cachedSize; + return cachedSize.load(std::memory_order_relaxed); } +#endif -// release from cache blocks that are older than ageThreshold +// Release objects from cache blocks that are older than ageThreshold template<typename Props> bool LargeObjectCacheImpl<Props>::regularCleanup(ExtMemoryPool *extMemPool, uintptr_t currTime, bool doThreshDecr) { bool released = false; BinsSummary binsSummary; - for (int i = bitMask.getMaxTrue(numBins-1); i >= 0; - i = bitMask.getMaxTrue(i-1)) { + // Threshold settings is below this cache or starts from zero index + if (hugeSizeThresholdIdx == 0) return false; + + // Starting searching for bin that is less than huge size threshold (can be cleaned-up) + int startSearchIdx = hugeSizeThresholdIdx - 1; + + for (int i = bitMask.getMaxTrue(startSearchIdx); i >= 0; i = bitMask.getMaxTrue(i-1)) { bin[i].updateBinsSummary(&binsSummary); - if (!doThreshDecr && tooLargeLOC>2 && binsSummary.isLOCTooLarge()) { + if (!doThreshDecr && tooLargeLOC.load(std::memory_order_relaxed) > 2 && binsSummary.isLOCTooLarge()) { // if LOC is too large for quite long time, decrease the threshold // based on bin hit statistics. // For this, redo cleanup from the beginning. // Note: on this iteration total usedSz can be not too large // in comparison to total cachedSz, as we calculated it only // partially. We are ok with it. - i = bitMask.getMaxTrue(numBins-1)+1; + i = bitMask.getMaxTrue(startSearchIdx)+1; doThreshDecr = true; binsSummary.reset(); continue; } if (doThreshDecr) bin[i].decreaseThreshold(); - if (bin[i].cleanToThreshold(extMemPool, &bitMask, currTime, i)) + + if (bin[i].cleanToThreshold(extMemPool, &bitMask, currTime, i)) { released = true; + } } - // We want to find if LOC was too large for some time continuously, // so OK with races between incrementing and zeroing, but incrementing // must be atomic. - if (binsSummary.isLOCTooLarge()) - AtomicIncrement(tooLargeLOC); - else - tooLargeLOC = 0; + if (binsSummary.isLOCTooLarge()) { + tooLargeLOC++; + } else { + tooLargeLOC.store(0, std::memory_order_relaxed); + } return released; } @@ -623,11 +748,20 @@ template<typename Props> bool LargeObjectCacheImpl<Props>::cleanAll(ExtMemoryPool *extMemPool) { bool released = false; - for (int i = numBins-1; i >= 0; i--) + for (int i = numBins-1; i >= 0; i--) { released |= bin[i].releaseAllToBackend(extMemPool, &bitMask, i); + } return released; } +template<typename Props> +void LargeObjectCacheImpl<Props>::reset() { + tooLargeLOC.store(0, std::memory_order_relaxed); + for (int i = numBins-1; i >= 0; i--) + bin[i].init(); + bitMask.reset(); +} + #if __TBB_MALLOC_WHITEBOX_TEST template<typename Props> size_t LargeObjectCacheImpl<Props>::getLOCSize() const @@ -670,30 +804,39 @@ bool LargeObjectCache::doCleanup(uintptr_t currTime, bool doThreshDecr) { if (!doThreshDecr) extMemPool->allLocalCaches.markUnused(); - return largeCache.regularCleanup(extMemPool, currTime, doThreshDecr) - | hugeCache.regularCleanup(extMemPool, currTime, doThreshDecr); + + bool large_cache_cleaned = largeCache.regularCleanup(extMemPool, currTime, doThreshDecr); + bool huge_cache_cleaned = hugeCache.regularCleanup(extMemPool, currTime, doThreshDecr); + return large_cache_cleaned || huge_cache_cleaned; } bool LargeObjectCache::decreasingCleanup() { - return doCleanup(FencedLoad((intptr_t&)cacheCurrTime), /*doThreshDecr=*/true); + return doCleanup(cacheCurrTime.load(std::memory_order_acquire), /*doThreshDecr=*/true); } bool LargeObjectCache::regularCleanup() { - return doCleanup(FencedLoad((intptr_t&)cacheCurrTime), /*doThreshDecr=*/false); + return doCleanup(cacheCurrTime.load(std::memory_order_acquire), /*doThreshDecr=*/false); } bool LargeObjectCache::cleanAll() { - return largeCache.cleanAll(extMemPool) | hugeCache.cleanAll(extMemPool); + bool large_cache_cleaned = largeCache.cleanAll(extMemPool); + bool huge_cache_cleaned = hugeCache.cleanAll(extMemPool); + return large_cache_cleaned || huge_cache_cleaned; +} + +void LargeObjectCache::reset() +{ + largeCache.reset(); + hugeCache.reset(); } template<typename Props> LargeMemoryBlock *LargeObjectCacheImpl<Props>::get(ExtMemoryPool *extMemoryPool, size_t size) { - MALLOC_ASSERT( size%Props::CacheStep==0, ASSERT_TEXT ); - int idx = sizeToIdx(size); + int idx = Props::sizeToIdx(size); LargeMemoryBlock *lmb = bin[idx].get(extMemoryPool, size, &bitMask, idx); @@ -705,11 +848,11 @@ LargeMemoryBlock *LargeObjectCacheImpl<Props>::get(ExtMemoryPool *extMemoryPool, } template<typename Props> -void LargeObjectCacheImpl<Props>::rollbackCacheState(ExtMemoryPool *extMemPool, size_t size) +void LargeObjectCacheImpl<Props>::updateCacheState(ExtMemoryPool *extMemPool, DecreaseOrIncrease op, size_t size) { - int idx = sizeToIdx(size); - MALLOC_ASSERT(idx<numBins, ASSERT_TEXT); - bin[idx].decrUsedSize(extMemPool, size, &bitMask, idx); + int idx = Props::sizeToIdx(size); + MALLOC_ASSERT(idx < static_cast<int>(numBins), ASSERT_TEXT); + bin[idx].updateUsedSize(extMemPool, op==decrease? -size : size, &bitMask, idx); } #if __TBB_MALLOC_LOCACHE_STAT @@ -726,33 +869,50 @@ void LargeObjectCache::reportStat(FILE *f) { largeCache.reportStat(f); hugeCache.reportStat(f); + fprintf(f, "cache time %lu\n", cacheCurrTime.load(std::memory_order_relaxed)); } #endif template<typename Props> void LargeObjectCacheImpl<Props>::putList(ExtMemoryPool *extMemPool, LargeMemoryBlock *toCache) { - int toBinIdx = sizeToIdx(toCache->unalignedSize); + int toBinIdx = Props::sizeToIdx(toCache->unalignedSize); MALLOC_ITT_SYNC_RELEASING(bin+toBinIdx); bin[toBinIdx].putList(extMemPool, toCache, &bitMask, toBinIdx); } -void LargeObjectCache::rollbackCacheState(size_t size) +void LargeObjectCache::updateCacheState(DecreaseOrIncrease op, size_t size) { if (size < maxLargeSize) - largeCache.rollbackCacheState(extMemPool, size); + largeCache.updateCacheState(extMemPool, op, size); else if (size < maxHugeSize) - hugeCache.rollbackCacheState(extMemPool, size); + hugeCache.updateCacheState(extMemPool, op, size); } -// return artifical bin index, it's used only during sorting and never saved + +uintptr_t LargeObjectCache::getCurrTimeRange(uintptr_t range) +{ + return (cacheCurrTime.fetch_add(range) + 1); +} + +void LargeObjectCache::registerRealloc(size_t oldSize, size_t newSize) +{ + updateCacheState(decrease, oldSize); + updateCacheState(increase, alignToBin(newSize)); +} + +size_t LargeObjectCache::alignToBin(size_t size) { + return size < maxLargeSize ? LargeCacheType::alignToBin(size) : HugeCacheType::alignToBin(size); +} + +// Used for internal purpose int LargeObjectCache::sizeToIdx(size_t size) { - MALLOC_ASSERT(size < maxHugeSize, ASSERT_TEXT); - return size < maxLargeSize? + MALLOC_ASSERT(size <= maxHugeSize, ASSERT_TEXT); + return size < maxLargeSize ? LargeCacheType::sizeToIdx(size) : - LargeCacheType::getNumBins()+HugeCacheType::sizeToIdx(size); + LargeCacheType::numBins + HugeCacheType::sizeToIdx(size); } void LargeObjectCache::putList(LargeMemoryBlock *list) @@ -762,7 +922,7 @@ void LargeObjectCache::putList(LargeMemoryBlock *list) for (LargeMemoryBlock *curr = list; curr; curr = toProcess) { LargeMemoryBlock *tail = curr; toProcess = curr->next; - if (curr->unalignedSize >= maxHugeSize) { + if (!sizeInCacheRange(curr->unalignedSize)) { extMemPool->backend.returnLargeObject(curr); continue; } @@ -785,7 +945,7 @@ void LargeObjectCache::putList(LargeMemoryBlock *list) } } } - tail->next = NULL; + tail->next = nullptr; if (curr->unalignedSize < maxLargeSize) largeCache.putList(extMemPool, curr); else @@ -795,54 +955,54 @@ void LargeObjectCache::putList(LargeMemoryBlock *list) void LargeObjectCache::put(LargeMemoryBlock *largeBlock) { - if (largeBlock->unalignedSize < maxHugeSize) { - largeBlock->next = NULL; - if (largeBlock->unalignedSize<maxLargeSize) + size_t blockSize = largeBlock->unalignedSize; + if (sizeInCacheRange(blockSize)) { + largeBlock->next = nullptr; + if (blockSize < maxLargeSize) largeCache.putList(extMemPool, largeBlock); else hugeCache.putList(extMemPool, largeBlock); - } else + } else { extMemPool->backend.returnLargeObject(largeBlock); + } } LargeMemoryBlock *LargeObjectCache::get(size_t size) { - MALLOC_ASSERT( size%largeBlockCacheStep==0, ASSERT_TEXT ); - MALLOC_ASSERT( size>=minLargeSize, ASSERT_TEXT ); - - if ( size < maxHugeSize) { - return size < maxLargeSize? + MALLOC_ASSERT( size >= minLargeSize, ASSERT_TEXT ); + if (sizeInCacheRange(size)) { + return size < maxLargeSize ? largeCache.get(extMemPool, size) : hugeCache.get(extMemPool, size); } - return NULL; + return nullptr; } - -LargeMemoryBlock *ExtMemoryPool::mallocLargeObject(size_t allocationSize) +LargeMemoryBlock *ExtMemoryPool::mallocLargeObject(MemoryPool *pool, size_t allocationSize) { #if __TBB_MALLOC_LOCACHE_STAT - AtomicIncrement(mallocCalls); - AtomicAdd(memAllocKB, allocationSize/1024); + mallocCalls++; + memAllocKB.fetch_add(allocationSize/1024); #endif LargeMemoryBlock* lmb = loc.get(allocationSize); if (!lmb) { BackRefIdx backRefIdx = BackRefIdx::newBackRef(/*largeObj=*/true); if (backRefIdx.isInvalid()) - return NULL; + return nullptr; // unalignedSize is set in getLargeBlock lmb = backend.getLargeBlock(allocationSize); if (!lmb) { removeBackRef(backRefIdx); - loc.rollbackCacheState(allocationSize); - return NULL; + loc.updateCacheState(decrease, allocationSize); + return nullptr; } lmb->backRefIdx = backRefIdx; + lmb->pool = pool; STAT_increment(getThreadId(), ThreadCommonCounters, allocNewLargeObj); } else { #if __TBB_MALLOC_LOCACHE_STAT - AtomicIncrement(cacheHits); - AtomicAdd(memHitKB, allocationSize/1024); + cacheHits++; + memHitKB.fetch_add(allocationSize/1024); #endif } return lmb; @@ -860,22 +1020,54 @@ void ExtMemoryPool::freeLargeObjectList(LargeMemoryBlock *head) bool ExtMemoryPool::softCachesCleanup() { - return loc.regularCleanup(); + bool ret = false; + if (!softCachesCleanupInProgress.exchange(1, std::memory_order_acq_rel)) { + ret = loc.regularCleanup(); + softCachesCleanupInProgress.store(0, std::memory_order_release); + } + return ret; } -bool ExtMemoryPool::hardCachesCleanup() +bool ExtMemoryPool::hardCachesCleanup(bool wait) { + if (hardCachesCleanupInProgress.exchange(1, std::memory_order_acq_rel)) { + if (!wait) + return false; + + AtomicBackoff backoff; + while (hardCachesCleanupInProgress.exchange(1, std::memory_order_acq_rel)) + backoff.pause(); + } + // thread-local caches must be cleaned before LOC, // because object from thread-local cache can be released to LOC bool ret = releaseAllLocalCaches(); + ret |= orphanedBlocks.cleanup(&backend); ret |= loc.cleanAll(); ret |= backend.clean(); + + hardCachesCleanupInProgress.store(0, std::memory_order_release); return ret; } +#if BACKEND_HAS_MREMAP +void *ExtMemoryPool::remap(void *ptr, size_t oldSize, size_t newSize, size_t alignment) +{ + const size_t oldUnalignedSize = ((LargeObjectHdr*)ptr - 1)->memoryBlock->unalignedSize; + void *o = backend.remap(ptr, oldSize, newSize, alignment); + if (o) { + LargeMemoryBlock *lmb = ((LargeObjectHdr*)o - 1)->memoryBlock; + loc.registerRealloc(oldUnalignedSize, lmb->unalignedSize); + } + return o; +} +#endif /* BACKEND_HAS_MREMAP */ /*********** End allocation of large objects **********/ } // namespace internal } // namespace rml +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) + // #pragma warning(pop) +#endif diff --git a/src/tbb/src/tbbmalloc/large_objects.h b/src/tbb/src/tbbmalloc/large_objects.h new file mode 100644 index 000000000..58d7c81a7 --- /dev/null +++ b/src/tbb/src/tbbmalloc/large_objects.h @@ -0,0 +1,380 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_tbbmalloc_internal_H + #error tbbmalloc_internal.h must be included at this point +#endif + +#ifndef __TBB_large_objects_H +#define __TBB_large_objects_H + +//! The list of possible Cache Bin Aggregator operations. +/* Declared here to avoid Solaris Studio* 12.2 "multiple definitions" error */ +enum CacheBinOperationType { + CBOP_INVALID = 0, + CBOP_GET, + CBOP_PUT_LIST, + CBOP_CLEAN_TO_THRESHOLD, + CBOP_CLEAN_ALL, + CBOP_UPDATE_USED_SIZE +}; + +// The Cache Bin Aggregator operation status list. +// CBST_NOWAIT can be specified for non-blocking operations. +enum CacheBinOperationStatus { + CBST_WAIT = 0, + CBST_NOWAIT, + CBST_DONE +}; + +/* + * Bins that grow with arithmetic step + */ +template<size_t MIN_SIZE, size_t MAX_SIZE> +struct LargeBinStructureProps { +public: + static const size_t MinSize = MIN_SIZE, MaxSize = MAX_SIZE; + static const size_t CacheStep = 8 * 1024; + static const unsigned NumBins = (MaxSize - MinSize) / CacheStep; + + static size_t alignToBin(size_t size) { + return alignUp(size, CacheStep); + } + + static int sizeToIdx(size_t size) { + MALLOC_ASSERT(MinSize <= size && size < MaxSize, ASSERT_TEXT); + MALLOC_ASSERT(size % CacheStep == 0, ASSERT_TEXT); + return (size - MinSize) / CacheStep; + } +}; + +/* + * Bins that grow with special geometric progression. + */ +template<size_t MIN_SIZE, size_t MAX_SIZE> +struct HugeBinStructureProps { + +private: + // Sizes grow with the following formula: Size = MinSize * (2 ^ (Index / StepFactor)) + // There are StepFactor bins (8 be default) between each power of 2 bin + static const int MaxSizeExp = Log2<MAX_SIZE>::value; + static const int MinSizeExp = Log2<MIN_SIZE>::value; + static const int StepFactor = 8; + static const int StepFactorExp = Log2<StepFactor>::value; + +public: + static const size_t MinSize = MIN_SIZE, MaxSize = MAX_SIZE; + static const unsigned NumBins = (MaxSizeExp - MinSizeExp) * StepFactor; + + static size_t alignToBin(size_t size) { + MALLOC_ASSERT(size >= StepFactor, "Size must not be less than the StepFactor"); + + int sizeExp = (int)BitScanRev(size); + MALLOC_ASSERT(sizeExp >= 0, "BitScanRev() cannot return -1, as size >= stepfactor > 0"); + MALLOC_ASSERT(sizeExp >= StepFactorExp, "sizeExp >= StepFactorExp, because size >= stepFactor"); + int minorStepExp = sizeExp - StepFactorExp; + + return alignUp(size, 1ULL << minorStepExp); + } + + // Sizes between the power of 2 values are approximated to StepFactor. + static int sizeToIdx(size_t size) { + MALLOC_ASSERT(MinSize <= size && size <= MaxSize, ASSERT_TEXT); + + int sizeExp = (int)BitScanRev(size); // same as __TBB_Log2 + MALLOC_ASSERT(sizeExp >= 0, "BitScanRev() cannot return -1, as size >= stepfactor > 0"); + MALLOC_ASSERT(sizeExp >= StepFactorExp, "sizeExp >= StepFactorExp, because size >= stepFactor"); + int minorStepExp = sizeExp - StepFactorExp; + + size_t majorStepSize = 1ULL << sizeExp; + int minorIdx = (size - majorStepSize) >> minorStepExp; + MALLOC_ASSERT(size == majorStepSize + ((size_t)minorIdx << minorStepExp), + "Size is not aligned on the bin"); + return StepFactor * (sizeExp - MinSizeExp) + minorIdx; + } +}; + +/* + * Cache properties accessor + * + * TooLargeFactor -- when cache size treated "too large" in comparison to user data size + * OnMissFactor -- If cache miss occurred and cache was cleaned, + * set ageThreshold to OnMissFactor * the difference + * between current time and last time cache was cleaned. + * LongWaitFactor -- to detect rarely-used bins and forget about their usage history + */ +template<typename StructureProps, int TOO_LARGE, int ON_MISS, int LONG_WAIT> +struct LargeObjectCacheProps : public StructureProps { + static const int TooLargeFactor = TOO_LARGE, OnMissFactor = ON_MISS, LongWaitFactor = LONG_WAIT; +}; + +template<typename Props> +class LargeObjectCacheImpl { +private: + + // Current sizes of used and cached objects. It's calculated while we are + // traversing bins, and used for isLOCTooLarge() check at the same time. + class BinsSummary { + size_t usedSz; + size_t cachedSz; + public: + BinsSummary() : usedSz(0), cachedSz(0) {} + // "too large" criteria + bool isLOCTooLarge() const { return cachedSz > Props::TooLargeFactor * usedSz; } + void update(size_t usedSize, size_t cachedSize) { + usedSz += usedSize; + cachedSz += cachedSize; + } + void reset() { usedSz = cachedSz = 0; } + }; + +public: + // The number of bins to cache large/huge objects. + static const uint32_t numBins = Props::NumBins; + + typedef BitMaskMax<numBins> BinBitMask; + + // 2-linked list of same-size cached blocks ordered by age (oldest on top) + // TODO: are we really want the list to be 2-linked? This allows us + // reduce memory consumption and do less operations under lock. + // TODO: try to switch to 32-bit logical time to save space in CacheBin + // and move bins to different cache lines. + class CacheBin { + private: + LargeMemoryBlock* first; + std::atomic<LargeMemoryBlock*> last; + /* age of an oldest block in the list; equal to last->age, if last defined, + used for quick checking it without acquiring the lock. */ + std::atomic<uintptr_t> oldest; + /* currAge when something was excluded out of list because of the age, + not because of cache hit */ + uintptr_t lastCleanedAge; + /* Current threshold value for the blocks of a particular size. + Set on cache miss. */ + std::atomic<intptr_t> ageThreshold; + + /* total size of all objects corresponding to the bin and allocated by user */ + std::atomic<size_t> usedSize; + /* total size of all objects cached in the bin */ + std::atomic<size_t> cachedSize; + /* mean time of presence of block in the bin before successful reuse */ + std::atomic<intptr_t> meanHitRange; + /* time of last get called for the bin */ + uintptr_t lastGet; + + typename MallocAggregator<CacheBinOperation>::type aggregator; + + void ExecuteOperation(CacheBinOperation *op, ExtMemoryPool *extMemPool, BinBitMask *bitMask, int idx, bool longLifeTime = true); + + /* should be placed in zero-initialized memory, ctor not needed. */ + CacheBin(); + + public: + void init() { + memset(static_cast<void*>(this), 0, sizeof(CacheBin)); + } + + /* ---------- Cache accessors ---------- */ + void putList(ExtMemoryPool *extMemPool, LargeMemoryBlock *head, BinBitMask *bitMask, int idx); + LargeMemoryBlock *get(ExtMemoryPool *extMemPool, size_t size, BinBitMask *bitMask, int idx); + + /* ---------- Cleanup functions -------- */ + bool cleanToThreshold(ExtMemoryPool *extMemPool, BinBitMask *bitMask, uintptr_t currTime, int idx); + bool releaseAllToBackend(ExtMemoryPool *extMemPool, BinBitMask *bitMask, int idx); + /* ------------------------------------- */ + + void updateUsedSize(ExtMemoryPool *extMemPool, size_t size, BinBitMask *bitMask, int idx); + void decreaseThreshold() { + intptr_t threshold = ageThreshold.load(std::memory_order_relaxed); + if (threshold) + ageThreshold.store((threshold + meanHitRange.load(std::memory_order_relaxed)) / 2, std::memory_order_relaxed); + } + void updateBinsSummary(BinsSummary *binsSummary) const { + binsSummary->update(usedSize.load(std::memory_order_relaxed), cachedSize.load(std::memory_order_relaxed)); + } + size_t getSize() const { return cachedSize.load(std::memory_order_relaxed); } + size_t getUsedSize() const { return usedSize.load(std::memory_order_relaxed); } + size_t reportStat(int num, FILE *f); + + /* --------- Unsafe methods used with the aggregator ------- */ + void forgetOutdatedState(uintptr_t currTime); + LargeMemoryBlock *putList(LargeMemoryBlock *head, LargeMemoryBlock *tail, BinBitMask *bitMask, + int idx, int num, size_t hugeObjectThreshold); + LargeMemoryBlock *get(); + LargeMemoryBlock *cleanToThreshold(uintptr_t currTime, BinBitMask *bitMask, int idx); + LargeMemoryBlock *cleanAll(BinBitMask *bitMask, int idx); + void updateUsedSize(size_t size, BinBitMask *bitMask, int idx) { + if (!usedSize.load(std::memory_order_relaxed)) bitMask->set(idx, true); + usedSize.store(usedSize.load(std::memory_order_relaxed) + size, std::memory_order_relaxed); + if (!usedSize.load(std::memory_order_relaxed) && !first) bitMask->set(idx, false); + } + void updateMeanHitRange( intptr_t hitRange ) { + hitRange = hitRange >= 0 ? hitRange : 0; + intptr_t mean = meanHitRange.load(std::memory_order_relaxed); + mean = mean ? (mean + hitRange) / 2 : hitRange; + meanHitRange.store(mean, std::memory_order_relaxed); + } + void updateAgeThreshold( uintptr_t currTime ) { + if (lastCleanedAge) + ageThreshold.store(Props::OnMissFactor * (currTime - lastCleanedAge), std::memory_order_relaxed); + } + void updateCachedSize(size_t size) { + cachedSize.store(cachedSize.load(std::memory_order_relaxed) + size, std::memory_order_relaxed); + } + void setLastGet( uintptr_t newLastGet ) { + lastGet = newLastGet; + } + /* -------------------------------------------------------- */ + }; + + // Huge bins index for fast regular cleanup searching in case of + // the "huge size threshold" setting defined + intptr_t hugeSizeThresholdIdx; + +private: + // How many times LOC was "too large" + std::atomic<intptr_t> tooLargeLOC; + // for fast finding of used bins and bins with non-zero usedSize; + // indexed from the end, as we need largest 1st + BinBitMask bitMask; + // bins with lists of recently freed large blocks cached for reuse + CacheBin bin[numBins]; + +public: + /* ------------ CacheBin structure dependent stuff ------------ */ + static size_t alignToBin(size_t size) { + return Props::alignToBin(size); + } + static int sizeToIdx(size_t size) { + return Props::sizeToIdx(size); + } + + /* --------- Main cache functions (put, get object) ------------ */ + void putList(ExtMemoryPool *extMemPool, LargeMemoryBlock *largeBlock); + LargeMemoryBlock *get(ExtMemoryPool *extMemPool, size_t size); + + /* ------------------------ Cleanup ---------------------------- */ + bool regularCleanup(ExtMemoryPool *extMemPool, uintptr_t currAge, bool doThreshDecr); + bool cleanAll(ExtMemoryPool *extMemPool); + + /* -------------------------- Other ---------------------------- */ + void updateCacheState(ExtMemoryPool *extMemPool, DecreaseOrIncrease op, size_t size); + + void reset(); + void reportStat(FILE *f); +#if __TBB_MALLOC_WHITEBOX_TEST + size_t getLOCSize() const; + size_t getUsedSize() const; +#endif +}; + +class LargeObjectCache { +private: + // Large bins [minLargeSize, maxLargeSize) + // Huge bins [maxLargeSize, maxHugeSize) + static const size_t minLargeSize = 8 * 1024, + maxLargeSize = 8 * 1024 * 1024, + // Cache memory up to 1TB (or 2GB for 32-bit arch), but sieve objects from the special threshold + maxHugeSize = tbb::detail::select_size_t_constant<2147483648U, 1099511627776ULL>::value; + +public: + // Upper bound threshold for caching size. After that size all objects sieve through cache + // By default - 64MB, previous value was 129MB (needed by some Intel(R) Math Kernel Library (Intel(R) MKL) benchmarks) + static const size_t defaultMaxHugeSize = 64UL * 1024UL * 1024UL; + // After that size large object interpreted as huge and does not participate in regular cleanup. + // Can be changed during the program execution. + size_t hugeSizeThreshold; + +private: + // Large objects cache properties + typedef LargeBinStructureProps<minLargeSize, maxLargeSize> LargeBSProps; + typedef LargeObjectCacheProps<LargeBSProps, 2, 2, 16> LargeCacheTypeProps; + + // Huge objects cache properties + typedef HugeBinStructureProps<maxLargeSize, maxHugeSize> HugeBSProps; + typedef LargeObjectCacheProps<HugeBSProps, 1, 1, 4> HugeCacheTypeProps; + + // Cache implementation type with properties + typedef LargeObjectCacheImpl< LargeCacheTypeProps > LargeCacheType; + typedef LargeObjectCacheImpl< HugeCacheTypeProps > HugeCacheType; + + // Beginning of largeCache is more actively used and smaller than hugeCache, + // so put hugeCache first to prevent false sharing + // with LargeObjectCache's predecessor + HugeCacheType hugeCache; + LargeCacheType largeCache; + + /* logical time, incremented on each put/get operation + To prevent starvation between pools, keep separately for each pool. + Overflow is OK, as we only want difference between + its current value and some recent. + + Both malloc and free should increment logical time, as in + a different case multiple cached blocks would have same age, + and accuracy of predictors suffers. + */ + std::atomic<uintptr_t> cacheCurrTime; + + // Memory pool that owns this LargeObjectCache. + // strict 1:1 relation, never changed + ExtMemoryPool *extMemPool; + + // Returns artificial bin index, + // it's used only during sorting and never saved + static int sizeToIdx(size_t size); + + // Our friends + friend class Backend; + +public: + void init(ExtMemoryPool *memPool); + + // Item accessors + void put(LargeMemoryBlock *largeBlock); + void putList(LargeMemoryBlock *head); + LargeMemoryBlock *get(size_t size); + + void updateCacheState(DecreaseOrIncrease op, size_t size); + bool isCleanupNeededOnRange(uintptr_t range, uintptr_t currTime); + + // Cleanup operations + bool doCleanup(uintptr_t currTime, bool doThreshDecr); + bool decreasingCleanup(); + bool regularCleanup(); + bool cleanAll(); + void reset(); + + void reportStat(FILE *f); +#if __TBB_MALLOC_WHITEBOX_TEST + size_t getLOCSize() const; + size_t getUsedSize() const; +#endif + + // Cache deals with exact-fit sizes, so need to align each size + // to the specific bin when put object to cache + static size_t alignToBin(size_t size); + + void setHugeSizeThreshold(size_t value); + + // Check if we should cache or sieve this size + bool sizeInCacheRange(size_t size); + + uintptr_t getCurrTimeRange(uintptr_t range); + void registerRealloc(size_t oldSize, size_t newSize); +}; + +#endif // __TBB_large_objects_H + diff --git a/src/tbb/src/tbbmalloc/lin32-proxy-export.def b/src/tbb/src/tbbmalloc/lin32-proxy-export.def deleted file mode 100644 index 2ccdba8e9..000000000 --- a/src/tbb/src/tbbmalloc/lin32-proxy-export.def +++ /dev/null @@ -1,58 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -{ -global: -calloc; -free; -malloc; -realloc; -posix_memalign; -memalign; -valloc; -pvalloc; -mallinfo; -mallopt; -malloc_usable_size; -__libc_malloc; -__libc_realloc; -__libc_calloc; -__libc_free; -__libc_memalign; -__libc_pvalloc; -__libc_valloc; -__TBB_malloc_proxy; -_ZdaPv; /* next ones are new/delete */ -_ZdaPvRKSt9nothrow_t; -_ZdlPv; -_ZdlPvRKSt9nothrow_t; -_Znaj; -_ZnajRKSt9nothrow_t; -_Znwj; -_ZnwjRKSt9nothrow_t; - -local: - -/* TBB symbols */ -*3rml8internal*; -*3tbb*; -*__TBB*; - -}; diff --git a/src/tbb/src/tbbmalloc/lin32-tbbmalloc-export.def b/src/tbb/src/tbbmalloc/lin32-tbbmalloc-export.def deleted file mode 100644 index 0eeb5ddfc..000000000 --- a/src/tbb/src/tbbmalloc/lin32-tbbmalloc-export.def +++ /dev/null @@ -1,75 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -{ -global: - -scalable_calloc; -scalable_free; -scalable_malloc; -scalable_realloc; -scalable_posix_memalign; -scalable_aligned_malloc; -scalable_aligned_realloc; -scalable_aligned_free; -scalable_msize; -scalable_allocation_mode; -scalable_allocation_command; -__TBB_malloc_safer_aligned_msize; -__TBB_malloc_safer_aligned_realloc; -__TBB_malloc_safer_free; -__TBB_malloc_safer_msize; -__TBB_malloc_safer_realloc; - -/* memory pool stuff */ -_ZN3rml10pool_resetEPNS_10MemoryPoolE; -_ZN3rml11pool_createEiPKNS_13MemPoolPolicyE; -_ZN3rml14pool_create_v1EiPKNS_13MemPoolPolicyEPPNS_10MemoryPoolE; -_ZN3rml11pool_mallocEPNS_10MemoryPoolEj; -_ZN3rml12pool_destroyEPNS_10MemoryPoolE; -_ZN3rml9pool_freeEPNS_10MemoryPoolEPv; -_ZN3rml12pool_reallocEPNS_10MemoryPoolEPvj; -_ZN3rml20pool_aligned_reallocEPNS_10MemoryPoolEPvjj; -_ZN3rml19pool_aligned_mallocEPNS_10MemoryPoolEjj; - -local: - -/* TBB symbols */ -*3rml*; -*3tbb*; -*__TBB*; -__itt_*; -ITT_DoOneTimeInitialization; -TBB_runtime_interface_version; - -/* Intel Compiler (libirc) symbols */ -__intel_*; -_intel_*; -get_memcpy_largest_cachelinesize; -get_memcpy_largest_cache_size; -get_mem_ops_method; -init_mem_ops_method; -irc__get_msg; -irc__print; -override_mem_ops_method; -set_memcpy_largest_cachelinesize; -set_memcpy_largest_cache_size; - -}; diff --git a/src/tbb/src/tbbmalloc/lin64-proxy-export.def b/src/tbb/src/tbbmalloc/lin64-proxy-export.def deleted file mode 100644 index 8cc8a0ce3..000000000 --- a/src/tbb/src/tbbmalloc/lin64-proxy-export.def +++ /dev/null @@ -1,58 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -{ -global: -calloc; -free; -malloc; -realloc; -posix_memalign; -memalign; -valloc; -pvalloc; -mallinfo; -mallopt; -malloc_usable_size; -__libc_malloc; -__libc_realloc; -__libc_calloc; -__libc_free; -__libc_memalign; -__libc_pvalloc; -__libc_valloc; -__TBB_malloc_proxy; -_ZdaPv; /* next ones are new/delete */ -_ZdaPvRKSt9nothrow_t; -_ZdlPv; -_ZdlPvRKSt9nothrow_t; -_Znam; -_ZnamRKSt9nothrow_t; -_Znwm; -_ZnwmRKSt9nothrow_t; - -local: - -/* TBB symbols */ -*3rml8internal*; -*3tbb*; -*__TBB*; - -}; diff --git a/src/tbb/src/tbbmalloc/lin64-tbbmalloc-export.def b/src/tbb/src/tbbmalloc/lin64-tbbmalloc-export.def deleted file mode 100644 index 7d523d498..000000000 --- a/src/tbb/src/tbbmalloc/lin64-tbbmalloc-export.def +++ /dev/null @@ -1,75 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -{ -global: - -scalable_calloc; -scalable_free; -scalable_malloc; -scalable_realloc; -scalable_posix_memalign; -scalable_aligned_malloc; -scalable_aligned_realloc; -scalable_aligned_free; -scalable_msize; -scalable_allocation_mode; -scalable_allocation_command; -__TBB_malloc_safer_aligned_msize; -__TBB_malloc_safer_aligned_realloc; -__TBB_malloc_safer_free; -__TBB_malloc_safer_msize; -__TBB_malloc_safer_realloc; - -/* memory pool stuff */ -_ZN3rml11pool_createElPKNS_13MemPoolPolicyE; -_ZN3rml14pool_create_v1ElPKNS_13MemPoolPolicyEPPNS_10MemoryPoolE; -_ZN3rml10pool_resetEPNS_10MemoryPoolE; -_ZN3rml11pool_mallocEPNS_10MemoryPoolEm; -_ZN3rml12pool_destroyEPNS_10MemoryPoolE; -_ZN3rml9pool_freeEPNS_10MemoryPoolEPv; -_ZN3rml12pool_reallocEPNS_10MemoryPoolEPvm; -_ZN3rml20pool_aligned_reallocEPNS_10MemoryPoolEPvmm; -_ZN3rml19pool_aligned_mallocEPNS_10MemoryPoolEmm; - -local: - -/* TBB symbols */ -*3rml*; -*3tbb*; -*__TBB*; -__itt_*; -ITT_DoOneTimeInitialization; -TBB_runtime_interface_version; - -/* Intel Compiler (libirc) symbols */ -__intel_*; -_intel_*; -get_memcpy_largest_cachelinesize; -get_memcpy_largest_cache_size; -get_mem_ops_method; -init_mem_ops_method; -irc__get_msg; -irc__print; -override_mem_ops_method; -set_memcpy_largest_cachelinesize; -set_memcpy_largest_cache_size; - -}; diff --git a/src/tbb/src/tbbmalloc/lin64ipf-proxy-export.def b/src/tbb/src/tbbmalloc/lin64ipf-proxy-export.def deleted file mode 100644 index 8cc8a0ce3..000000000 --- a/src/tbb/src/tbbmalloc/lin64ipf-proxy-export.def +++ /dev/null @@ -1,58 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -{ -global: -calloc; -free; -malloc; -realloc; -posix_memalign; -memalign; -valloc; -pvalloc; -mallinfo; -mallopt; -malloc_usable_size; -__libc_malloc; -__libc_realloc; -__libc_calloc; -__libc_free; -__libc_memalign; -__libc_pvalloc; -__libc_valloc; -__TBB_malloc_proxy; -_ZdaPv; /* next ones are new/delete */ -_ZdaPvRKSt9nothrow_t; -_ZdlPv; -_ZdlPvRKSt9nothrow_t; -_Znam; -_ZnamRKSt9nothrow_t; -_Znwm; -_ZnwmRKSt9nothrow_t; - -local: - -/* TBB symbols */ -*3rml8internal*; -*3tbb*; -*__TBB*; - -}; diff --git a/src/tbb/src/tbbmalloc/lin64ipf-tbbmalloc-export.def b/src/tbb/src/tbbmalloc/lin64ipf-tbbmalloc-export.def deleted file mode 100644 index 7d523d498..000000000 --- a/src/tbb/src/tbbmalloc/lin64ipf-tbbmalloc-export.def +++ /dev/null @@ -1,75 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -{ -global: - -scalable_calloc; -scalable_free; -scalable_malloc; -scalable_realloc; -scalable_posix_memalign; -scalable_aligned_malloc; -scalable_aligned_realloc; -scalable_aligned_free; -scalable_msize; -scalable_allocation_mode; -scalable_allocation_command; -__TBB_malloc_safer_aligned_msize; -__TBB_malloc_safer_aligned_realloc; -__TBB_malloc_safer_free; -__TBB_malloc_safer_msize; -__TBB_malloc_safer_realloc; - -/* memory pool stuff */ -_ZN3rml11pool_createElPKNS_13MemPoolPolicyE; -_ZN3rml14pool_create_v1ElPKNS_13MemPoolPolicyEPPNS_10MemoryPoolE; -_ZN3rml10pool_resetEPNS_10MemoryPoolE; -_ZN3rml11pool_mallocEPNS_10MemoryPoolEm; -_ZN3rml12pool_destroyEPNS_10MemoryPoolE; -_ZN3rml9pool_freeEPNS_10MemoryPoolEPv; -_ZN3rml12pool_reallocEPNS_10MemoryPoolEPvm; -_ZN3rml20pool_aligned_reallocEPNS_10MemoryPoolEPvmm; -_ZN3rml19pool_aligned_mallocEPNS_10MemoryPoolEmm; - -local: - -/* TBB symbols */ -*3rml*; -*3tbb*; -*__TBB*; -__itt_*; -ITT_DoOneTimeInitialization; -TBB_runtime_interface_version; - -/* Intel Compiler (libirc) symbols */ -__intel_*; -_intel_*; -get_memcpy_largest_cachelinesize; -get_memcpy_largest_cache_size; -get_mem_ops_method; -init_mem_ops_method; -irc__get_msg; -irc__print; -override_mem_ops_method; -set_memcpy_largest_cachelinesize; -set_memcpy_largest_cache_size; - -}; diff --git a/src/tbb/src/tbbmalloc/mac32-tbbmalloc-export.def b/src/tbb/src/tbbmalloc/mac32-tbbmalloc-export.def deleted file mode 100644 index bd20aee87..000000000 --- a/src/tbb/src/tbbmalloc/mac32-tbbmalloc-export.def +++ /dev/null @@ -1,47 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -_scalable_calloc -_scalable_free -_scalable_malloc -_scalable_realloc -_scalable_posix_memalign -_scalable_aligned_malloc -_scalable_aligned_realloc -_scalable_aligned_free -_scalable_msize -_scalable_allocation_mode -_scalable_allocation_command -___TBB_malloc_safer_aligned_msize -___TBB_malloc_safer_aligned_realloc -___TBB_malloc_safer_free -___TBB_malloc_safer_msize -___TBB_malloc_safer_realloc -___TBB_malloc_free_definite_size -/* memory pool stuff */ -__ZN3rml11pool_createElPKNS_13MemPoolPolicyE -__ZN3rml14pool_create_v1ElPKNS_13MemPoolPolicyEPPNS_10MemoryPoolE -__ZN3rml10pool_resetEPNS_10MemoryPoolE -__ZN3rml12pool_destroyEPNS_10MemoryPoolE -__ZN3rml11pool_mallocEPNS_10MemoryPoolEm -__ZN3rml9pool_freeEPNS_10MemoryPoolEPv -__ZN3rml12pool_reallocEPNS_10MemoryPoolEPvm -__ZN3rml20pool_aligned_reallocEPNS_10MemoryPoolEPvmm -__ZN3rml19pool_aligned_mallocEPNS_10MemoryPoolEmm diff --git a/src/tbb/src/tbbmalloc/mac64-tbbmalloc-export.def b/src/tbb/src/tbbmalloc/mac64-tbbmalloc-export.def deleted file mode 100644 index bd20aee87..000000000 --- a/src/tbb/src/tbbmalloc/mac64-tbbmalloc-export.def +++ /dev/null @@ -1,47 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -_scalable_calloc -_scalable_free -_scalable_malloc -_scalable_realloc -_scalable_posix_memalign -_scalable_aligned_malloc -_scalable_aligned_realloc -_scalable_aligned_free -_scalable_msize -_scalable_allocation_mode -_scalable_allocation_command -___TBB_malloc_safer_aligned_msize -___TBB_malloc_safer_aligned_realloc -___TBB_malloc_safer_free -___TBB_malloc_safer_msize -___TBB_malloc_safer_realloc -___TBB_malloc_free_definite_size -/* memory pool stuff */ -__ZN3rml11pool_createElPKNS_13MemPoolPolicyE -__ZN3rml14pool_create_v1ElPKNS_13MemPoolPolicyEPPNS_10MemoryPoolE -__ZN3rml10pool_resetEPNS_10MemoryPoolE -__ZN3rml12pool_destroyEPNS_10MemoryPoolE -__ZN3rml11pool_mallocEPNS_10MemoryPoolEm -__ZN3rml9pool_freeEPNS_10MemoryPoolEPv -__ZN3rml12pool_reallocEPNS_10MemoryPoolEPvm -__ZN3rml20pool_aligned_reallocEPNS_10MemoryPoolEPvmm -__ZN3rml19pool_aligned_mallocEPNS_10MemoryPoolEmm diff --git a/src/tbb/src/tbbmalloc/proxy.cpp b/src/tbb/src/tbbmalloc/proxy.cpp deleted file mode 100644 index 9ffc1f709..000000000 --- a/src/tbb/src/tbbmalloc/proxy.cpp +++ /dev/null @@ -1,591 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "proxy.h" -#include "tbb/tbb_config.h" - -#if !defined(__EXCEPTIONS) && !defined(_CPPUNWIND) && !defined(__SUNPRO_CC) || defined(_XBOX) - #if TBB_USE_EXCEPTIONS - #error Compilation settings do not support exception handling. Please do not set TBB_USE_EXCEPTIONS macro or set it to 0. - #elif !defined(TBB_USE_EXCEPTIONS) - #define TBB_USE_EXCEPTIONS 0 - #endif -#elif !defined(TBB_USE_EXCEPTIONS) - #define TBB_USE_EXCEPTIONS 1 -#endif - -#if MALLOC_UNIXLIKE_OVERLOAD_ENABLED || MALLOC_ZONE_OVERLOAD_ENABLED - -#ifndef __THROW -#define __THROW -#endif - -/*** service functions and variables ***/ - -#include <string.h> // for memset -#include <unistd.h> // for sysconf - -static long memoryPageSize; - -static inline void initPageSize() -{ - memoryPageSize = sysconf(_SC_PAGESIZE); -} - -#if MALLOC_UNIXLIKE_OVERLOAD_ENABLED -#include "Customize.h" // FencedStore -#include <dlfcn.h> -#include <malloc.h> // mallinfo - -/* __TBB_malloc_proxy used as a weak symbol by libtbbmalloc for: - 1) detection that the proxy library is loaded - 2) check that dlsym("malloc") found something different from our replacement malloc -*/ -extern "C" void *__TBB_malloc_proxy(size_t) __attribute__ ((alias ("malloc"))); - -#elif MALLOC_ZONE_OVERLOAD_ENABLED - -#include "proxy_overload_osx.h" - -#endif // MALLOC_ZONE_OVERLOAD_ENABLED - -// Original (i.e., replaced) functions, -// they are never changed for MALLOC_ZONE_OVERLOAD_ENABLED. -static void *orig_free, - *orig_realloc, - *orig_msize; - -#if MALLOC_UNIXLIKE_OVERLOAD_ENABLED -#define ZONE_ARG -#define PREFIX(name) name - -static void *orig_libc_free, - *orig_libc_realloc; - -// We already tried to find ptr to original functions. -static intptr_t origFuncSearched; - -inline void InitOrigPointers() -{ - // race is OK here, as different threads found same functions - if (!origFuncSearched) { - orig_free = dlsym(RTLD_NEXT, "free"); - orig_realloc = dlsym(RTLD_NEXT, "realloc"); - orig_msize = dlsym(RTLD_NEXT, "malloc_usable_size"); - orig_libc_free = dlsym(RTLD_NEXT, "__libc_free"); - orig_libc_realloc = dlsym(RTLD_NEXT, "__libc_realloc"); - - FencedStore(origFuncSearched, 1); - } -} - -/*** replacements for malloc and the family ***/ -extern "C" { -#elif MALLOC_ZONE_OVERLOAD_ENABLED - -// each impl_* function has such 1st argument, it's unused -#define ZONE_ARG struct _malloc_zone_t *, -#define PREFIX(name) impl_##name -// not interested in original functions for zone overload -inline void InitOrigPointers() {} - -#endif // MALLOC_UNIXLIKE_OVERLOAD_ENABLED and MALLOC_ZONE_OVERLOAD_ENABLED - -void *PREFIX(malloc)(ZONE_ARG size_t size) __THROW -{ - return scalable_malloc(size); -} - -void *PREFIX(calloc)(ZONE_ARG size_t num, size_t size) __THROW -{ - return scalable_calloc(num, size); -} - -void PREFIX(free)(ZONE_ARG void *object) __THROW -{ - InitOrigPointers(); - __TBB_malloc_safer_free(object, (void (*)(void*))orig_free); -} - -void *PREFIX(realloc)(ZONE_ARG void* ptr, size_t sz) __THROW -{ - InitOrigPointers(); - return __TBB_malloc_safer_realloc(ptr, sz, orig_realloc); -} - -/* The older *NIX interface for aligned allocations; - it's formally substituted by posix_memalign and deprecated, - so we do not expect it to cause cyclic dependency with C RTL. */ -void *PREFIX(memalign)(ZONE_ARG size_t alignment, size_t size) __THROW -{ - return scalable_aligned_malloc(size, alignment); -} - -/* valloc allocates memory aligned on a page boundary */ -void *PREFIX(valloc)(ZONE_ARG size_t size) __THROW -{ - if (! memoryPageSize) initPageSize(); - - return scalable_aligned_malloc(size, memoryPageSize); -} - -// match prototype from system headers -#if __ANDROID__ || MALLOC_ZONE_OVERLOAD_ENABLED -size_t PREFIX(malloc_usable_size)(ZONE_ARG const void *ptr) __THROW -#else -size_t malloc_usable_size(void *ptr) __THROW -#endif -{ - InitOrigPointers(); - return __TBB_malloc_safer_msize(const_cast<void*>(ptr), (size_t (*)(void*))orig_msize); -} - -#undef ZONE_ARG -#undef PREFIX - -#if MALLOC_UNIXLIKE_OVERLOAD_ENABLED - -int posix_memalign(void **memptr, size_t alignment, size_t size) __THROW -{ - return scalable_posix_memalign(memptr, alignment, size); -} - -/* pvalloc allocates smallest set of complete pages which can hold - the requested number of bytes. Result is aligned on page boundary. */ -void *pvalloc(size_t size) __THROW -{ - if (! memoryPageSize) initPageSize(); - // align size up to the page size, - // pvalloc(0) returns 1 page, see man libmpatrol - size = size? ((size-1) | (memoryPageSize-1)) + 1 : memoryPageSize; - - return scalable_aligned_malloc(size, memoryPageSize); -} - -int mallopt(int /*param*/, int /*value*/) __THROW -{ - return 1; -} - -struct mallinfo mallinfo() __THROW -{ - struct mallinfo m; - memset(&m, 0, sizeof(struct mallinfo)); - - return m; -} - -#if __ANDROID__ -// Android doesn't have malloc_usable_size, provide it to be compatible -// with Linux, in addition overload dlmalloc_usable_size() that presented -// under Android. -size_t dlmalloc_usable_size(const void *ptr) __attribute__ ((alias ("malloc_usable_size"))); -#else // __ANDROID__ -// Those non-standard functions are exported by GLIBC, and might be used -// in conjunction with standard malloc/free, so we must ovberload them. -// Bionic doesn't have them. Not removing from the linker scripts, -// as absent entry points are ignored by the linker. -void *__libc_malloc(size_t size) __attribute__ ((alias ("malloc"))); -void *__libc_calloc(size_t num, size_t size) __attribute__ ((alias ("calloc"))); -void *__libc_memalign(size_t alignment, size_t size) __attribute__ ((alias ("memalign"))); -void *__libc_pvalloc(size_t size) __attribute__ ((alias ("pvalloc"))); -void *__libc_valloc(size_t size) __attribute__ ((alias ("valloc"))); - -// call original __libc_* to support naive replacement of free via __libc_free etc -void __libc_free(void *ptr) -{ - InitOrigPointers(); - __TBB_malloc_safer_free(ptr, (void (*)(void*))orig_libc_free); -} - -void *__libc_realloc(void *ptr, size_t size) -{ - InitOrigPointers(); - return __TBB_malloc_safer_realloc(ptr, size, orig_libc_realloc); -} -#endif // !__ANDROID__ - -} /* extern "C" */ - -/*** replacements for global operators new and delete ***/ - -#include <new> - -void * operator new(size_t sz) throw (std::bad_alloc) { - void *res = scalable_malloc(sz); -#if TBB_USE_EXCEPTIONS - if (NULL == res) - throw std::bad_alloc(); -#endif /* TBB_USE_EXCEPTIONS */ - return res; -} -void* operator new[](size_t sz) throw (std::bad_alloc) { - void *res = scalable_malloc(sz); -#if TBB_USE_EXCEPTIONS - if (NULL == res) - throw std::bad_alloc(); -#endif /* TBB_USE_EXCEPTIONS */ - return res; -} -void operator delete(void* ptr) throw() { - InitOrigPointers(); - __TBB_malloc_safer_free(ptr, (void (*)(void*))orig_free); -} -void operator delete[](void* ptr) throw() { - InitOrigPointers(); - __TBB_malloc_safer_free(ptr, (void (*)(void*))orig_free); -} -void* operator new(size_t sz, const std::nothrow_t&) throw() { - return scalable_malloc(sz); -} -void* operator new[](std::size_t sz, const std::nothrow_t&) throw() { - return scalable_malloc(sz); -} -void operator delete(void* ptr, const std::nothrow_t&) throw() { - InitOrigPointers(); - __TBB_malloc_safer_free(ptr, (void (*)(void*))orig_free); -} -void operator delete[](void* ptr, const std::nothrow_t&) throw() { - InitOrigPointers(); - __TBB_malloc_safer_free(ptr, (void (*)(void*))orig_free); -} - -#endif /* MALLOC_UNIXLIKE_OVERLOAD_ENABLED */ -#endif /* MALLOC_UNIXLIKE_OVERLOAD_ENABLED || MALLOC_ZONE_OVERLOAD_ENABLED */ - - -#ifdef _WIN32 -#include <windows.h> - -#if !__TBB_WIN8UI_SUPPORT - -#include <stdio.h> -#include "tbb_function_replacement.h" -#include "shared_utils.h" - -void __TBB_malloc_safer_delete( void *ptr) -{ - __TBB_malloc_safer_free( ptr, NULL ); -} - -void* safer_aligned_malloc( size_t size, size_t alignment ) -{ - // workaround for "is power of 2 pow N" bug that accepts zeros - return scalable_aligned_malloc( size, alignment>sizeof(size_t*)?alignment:sizeof(size_t*) ); -} - -// we do not support _expand(); -void* safer_expand( void *, size_t ) -{ - return NULL; -} - -#define __TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(CRTLIB) \ -void (*orig_free_##CRTLIB)(void*); \ -void __TBB_malloc_safer_free_##CRTLIB(void *ptr) \ -{ \ - __TBB_malloc_safer_free( ptr, orig_free_##CRTLIB ); \ -} \ - \ -size_t (*orig_msize_##CRTLIB)(void*); \ -size_t __TBB_malloc_safer_msize_##CRTLIB(void *ptr) \ -{ \ - return __TBB_malloc_safer_msize( ptr, orig_msize_##CRTLIB ); \ -} \ - \ -size_t (*orig_aligned_msize_##CRTLIB)(void*, size_t, size_t); \ -size_t __TBB_malloc_safer_aligned_msize_##CRTLIB( void *ptr, size_t alignment, size_t offset) \ -{ \ - return __TBB_malloc_safer_aligned_msize( ptr, alignment, offset, orig_aligned_msize_##CRTLIB ); \ -} \ - \ -void* __TBB_malloc_safer_realloc_##CRTLIB( void *ptr, size_t size ) \ -{ \ - orig_ptrs func_ptrs = {orig_free_##CRTLIB, orig_msize_##CRTLIB}; \ - return __TBB_malloc_safer_realloc( ptr, size, &func_ptrs ); \ -} \ - \ -void* __TBB_malloc_safer_aligned_realloc_##CRTLIB( void *ptr, size_t size, size_t aligment ) \ -{ \ - orig_ptrs func_ptrs = {orig_free_##CRTLIB, orig_msize_##CRTLIB}; \ - return __TBB_malloc_safer_aligned_realloc( ptr, size, aligment, &func_ptrs ); \ -} - -// limit is 30 bytes/60 symbols per line -const char* known_bytecodes[] = { -#if _WIN64 - "4883EC284885C974", //release free() win64 - "4883EC384885C975", //release msize() win64 - "4885C974375348", //release free() 8.0.50727.42 win64 - "48894C24084883EC28BA", //debug prologue for win64 - "4C8BC1488B0DA6E4040033", //win64 SDK - "4883EC284885C975", //release msize() 10.0.21003.1 win64 - "48895C2408574883EC20", //release _aligned_msize() win64 - "4C894424184889542410", //debug _aligned_msize() win64 -#else - "558BEC6A018B", //debug free() & _msize() 8.0.50727.4053 win32 - "6A1868********E8", //release free() 8.0.50727.4053 win32 - "6A1C68********E8", //release _msize() 8.0.50727.4053 win32 - "558BEC837D08000F", //release _msize() 11.0.51106.1 win32 - "8BFF558BEC6A", //debug free() & _msize() 9.0.21022.8 win32 - "8BFF558BEC83", //debug free() & _msize() 10.0.21003.1 win32 - "8BFF558BEC8B4508", //release _aligned_msize() 10.0 win32 - "8BFF558BEC8B4510", //debug _aligned_msize() 10.0 win32 - "558BEC8B451050", //debug _aligned_msize() 11.0 win32 -#endif - NULL - }; - -#if _WIN64 -#define __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(CRT_VER)\ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "free", (FUNCPTR)__TBB_malloc_safer_free_ ## CRT_VER ## d, known_bytecodes, (FUNCPTR*)&orig_free_ ## CRT_VER ## d ); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "free", (FUNCPTR)__TBB_malloc_safer_free_ ## CRT_VER, known_bytecodes, (FUNCPTR*)&orig_free_ ## CRT_VER ); \ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "_msize",(FUNCPTR)__TBB_malloc_safer_msize_ ## CRT_VER ## d, known_bytecodes, (FUNCPTR*)&orig_msize_ ## CRT_VER ## d ); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "_msize",(FUNCPTR)__TBB_malloc_safer_msize_ ## CRT_VER, known_bytecodes, (FUNCPTR*)&orig_msize_ ## CRT_VER ); \ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "realloc", (FUNCPTR)__TBB_malloc_safer_realloc_ ## CRT_VER ## d, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "realloc", (FUNCPTR)__TBB_malloc_safer_realloc_ ## CRT_VER, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "_aligned_free", (FUNCPTR)__TBB_malloc_safer_free_ ## CRT_VER ## d, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "_aligned_free", (FUNCPTR)__TBB_malloc_safer_free_ ## CRT_VER, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "_aligned_realloc",(FUNCPTR)__TBB_malloc_safer_aligned_realloc_ ## CRT_VER ## d, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "_aligned_realloc",(FUNCPTR)__TBB_malloc_safer_aligned_realloc_ ## CRT_VER, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "_aligned_msize",(FUNCPTR)__TBB_malloc_safer_aligned_msize_ ## CRT_VER ## d, known_bytecodes, (FUNCPTR*)&orig_aligned_msize_ ## CRT_VER ## d ); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "_aligned_msize",(FUNCPTR)__TBB_malloc_safer_aligned_msize_ ## CRT_VER, known_bytecodes, (FUNCPTR*)&orig_aligned_msize_ ## CRT_VER ); -#else -#define __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(CRT_VER)\ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "free", (FUNCPTR)__TBB_malloc_safer_free_ ## CRT_VER ## d, known_bytecodes, (FUNCPTR*)&orig_free_ ## CRT_VER ## d ); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "free", (FUNCPTR)__TBB_malloc_safer_free_ ## CRT_VER, known_bytecodes, (FUNCPTR*)&orig_free_ ## CRT_VER ); \ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "_msize",(FUNCPTR)__TBB_malloc_safer_msize_ ## CRT_VER ## d, known_bytecodes, (FUNCPTR*)&orig_msize_ ## CRT_VER ## d ); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "_msize",(FUNCPTR)__TBB_malloc_safer_msize_ ## CRT_VER, known_bytecodes, (FUNCPTR*)&orig_msize_ ## CRT_VER ); \ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "realloc", (FUNCPTR)__TBB_malloc_safer_realloc_ ## CRT_VER ## d, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "realloc", (FUNCPTR)__TBB_malloc_safer_realloc_ ## CRT_VER, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "_aligned_free", (FUNCPTR)__TBB_malloc_safer_free_ ## CRT_VER ## d, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "_aligned_free", (FUNCPTR)__TBB_malloc_safer_free_ ## CRT_VER, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "_aligned_realloc",(FUNCPTR)__TBB_malloc_safer_aligned_realloc_ ## CRT_VER ## d, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "_aligned_realloc",(FUNCPTR)__TBB_malloc_safer_aligned_realloc_ ## CRT_VER, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "_aligned_msize",(FUNCPTR)__TBB_malloc_safer_aligned_msize_ ## CRT_VER ## d, known_bytecodes, (FUNCPTR*)&orig_aligned_msize_ ## CRT_VER ## d ); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "_aligned_msize",(FUNCPTR)__TBB_malloc_safer_aligned_msize_ ## CRT_VER, known_bytecodes, (FUNCPTR*)&orig_aligned_msize_ ## CRT_VER ); -#endif - -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr70d); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr70); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr71d); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr71); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr80d); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr80); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr90d); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr90); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr100d); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr100); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr110d); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr110); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr120d); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr120); - - -/*** replacements for global operators new and delete ***/ - -#include <new> - -#if _MSC_VER && !defined(__INTEL_COMPILER) -#pragma warning( push ) -#pragma warning( disable : 4290 ) -#endif - -void * operator_new(size_t sz) throw (std::bad_alloc) { - void *res = scalable_malloc(sz); - if (NULL == res) throw std::bad_alloc(); - return res; -} -void* operator_new_arr(size_t sz) throw (std::bad_alloc) { - void *res = scalable_malloc(sz); - if (NULL == res) throw std::bad_alloc(); - return res; -} -void operator_delete(void* ptr) throw() { - __TBB_malloc_safer_delete(ptr); -} -#if _MSC_VER && !defined(__INTEL_COMPILER) -#pragma warning( pop ) -#endif - -void operator_delete_arr(void* ptr) throw() { - __TBB_malloc_safer_delete(ptr); -} -void* operator_new_t(size_t sz, const std::nothrow_t&) throw() { - return scalable_malloc(sz); -} -void* operator_new_arr_t(std::size_t sz, const std::nothrow_t&) throw() { - return scalable_malloc(sz); -} -void operator_delete_t(void* ptr, const std::nothrow_t&) throw() { - __TBB_malloc_safer_delete(ptr); -} -void operator_delete_arr_t(void* ptr, const std::nothrow_t&) throw() { - __TBB_malloc_safer_delete(ptr); -} - -const char* modules_to_replace[] = { - "msvcr80d.dll", - "msvcr80.dll", - "msvcr90d.dll", - "msvcr90.dll", - "msvcr100d.dll", - "msvcr100.dll", - "msvcr110d.dll", - "msvcr110.dll", - "msvcr120d.dll", - "msvcr120.dll", - "msvcr70d.dll", - "msvcr70.dll", - "msvcr71d.dll", - "msvcr71.dll", - }; - -/* -We need to replace following functions: -malloc -calloc -_aligned_malloc -_expand (by dummy implementation) -??2@YAPAXI@Z operator new (ia32) -??_U@YAPAXI@Z void * operator new[] (size_t size) (ia32) -??3@YAXPAX@Z operator delete (ia32) -??_V@YAXPAX@Z operator delete[] (ia32) -??2@YAPEAX_K@Z void * operator new(unsigned __int64) (intel64) -??_V@YAXPEAX@Z void * operator new[](unsigned __int64) (intel64) -??3@YAXPEAX@Z operator delete (intel64) -??_V@YAXPEAX@Z operator delete[] (intel64) -??2@YAPAXIABUnothrow_t@std@@@Z void * operator new (size_t sz, const std::nothrow_t&) throw() (optional) -??_U@YAPAXIABUnothrow_t@std@@@Z void * operator new[] (size_t sz, const std::nothrow_t&) throw() (optional) - -and these functions have runtime-specific replacement: -realloc -free -_msize -_aligned_realloc -_aligned_free -*/ - -typedef struct FRData_t { - //char *_module; - const char *_func; - FUNCPTR _fptr; - FRR_ON_ERROR _on_error; -} FRDATA; - -FRDATA routines_to_replace[] = { - { "malloc", (FUNCPTR)scalable_malloc, FRR_FAIL }, - { "calloc", (FUNCPTR)scalable_calloc, FRR_FAIL }, - { "_aligned_malloc", (FUNCPTR)safer_aligned_malloc, FRR_FAIL }, - { "_expand", (FUNCPTR)safer_expand, FRR_IGNORE }, -#if _WIN64 - { "??2@YAPEAX_K@Z", (FUNCPTR)operator_new, FRR_FAIL }, - { "??_U@YAPEAX_K@Z", (FUNCPTR)operator_new_arr, FRR_FAIL }, - { "??3@YAXPEAX@Z", (FUNCPTR)operator_delete, FRR_FAIL }, - { "??_V@YAXPEAX@Z", (FUNCPTR)operator_delete_arr, FRR_FAIL }, -#else - { "??2@YAPAXI@Z", (FUNCPTR)operator_new, FRR_FAIL }, - { "??_U@YAPAXI@Z", (FUNCPTR)operator_new_arr, FRR_FAIL }, - { "??3@YAXPAX@Z", (FUNCPTR)operator_delete, FRR_FAIL }, - { "??_V@YAXPAX@Z", (FUNCPTR)operator_delete_arr, FRR_FAIL }, -#endif - { "??2@YAPAXIABUnothrow_t@std@@@Z", (FUNCPTR)operator_new_t, FRR_IGNORE }, - { "??_U@YAPAXIABUnothrow_t@std@@@Z", (FUNCPTR)operator_new_arr_t, FRR_IGNORE } -}; - -#ifndef UNICODE -void ReplaceFunctionWithStore( const char*dllName, const char *funcName, FUNCPTR newFunc, const char ** opcodes, FUNCPTR* origFunc ) -#else -void ReplaceFunctionWithStore( const wchar_t *dllName, const char *funcName, FUNCPTR newFunc, const char ** opcodes, FUNCPTR* origFunc ) -#endif -{ - FRR_TYPE type = ReplaceFunction( dllName, funcName, newFunc, opcodes, origFunc ); - if (type == FRR_NODLL) return; - if ( type != FRR_OK ) - { - fprintf(stderr, "Failed to replace function %s in module %s\n", - funcName, dllName); - exit(1); - } -} - -void doMallocReplacement() -{ - // Replace functions and keep backup of original code (separate for each runtime) - __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr70) - __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr71) - __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr80) - __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr90) - __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr100) - __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr110) - __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr120) - - // Replace functions without storing original code - for ( size_t j=0; j < arrayLength(modules_to_replace); j++ ) - for (size_t i = 0; i < arrayLength(routines_to_replace); i++) - { -#if !_WIN64 - // in Microsoft* Visual Studio* 2012 and 2013 32-bit operator delete consists of 2 bytes only: short jump to free(ptr); - // replacement should be skipped for this particular case. - if ( ((strcmp(modules_to_replace[j], "msvcr110.dll") == 0) || (strcmp(modules_to_replace[j], "msvcr120.dll") == 0)) && (strcmp(routines_to_replace[i]._func, "??3@YAXPAX@Z") == 0)) continue; - // in Microsoft* Visual Studio* 2013 32-bit operator delete[] consists of 2 bytes only: short jump to free(ptr); - // replacement should be skipped for this particular case. - if ((strcmp(modules_to_replace[j], "msvcr120.dll") == 0) && (strcmp(routines_to_replace[i]._func, "??_V@YAXPAX@Z") == 0)) continue; -#endif - FRR_TYPE type = ReplaceFunction( modules_to_replace[j], routines_to_replace[i]._func, routines_to_replace[i]._fptr, NULL, NULL ); - if (type == FRR_NODLL) break; - if (type != FRR_OK && routines_to_replace[i]._on_error==FRR_FAIL) - { - fprintf(stderr, "Failed to replace function %s in module %s\n", - routines_to_replace[i]._func, modules_to_replace[j]); - exit(1); - } - } -} - -#endif // !__TBB_WIN8UI_SUPPORT - -extern "C" BOOL WINAPI DllMain( HINSTANCE hInst, DWORD callReason, LPVOID reserved ) -{ - - if ( callReason==DLL_PROCESS_ATTACH && reserved && hInst ) { -#if !__TBB_WIN8UI_SUPPORT -#if TBBMALLOC_USE_TBB_FOR_ALLOCATOR_ENV_CONTROLLED - char pinEnvVariable[50]; - if( GetEnvironmentVariable("TBBMALLOC_USE_TBB_FOR_ALLOCATOR", pinEnvVariable, 50)) - { - doMallocReplacement(); - } -#else - doMallocReplacement(); -#endif -#endif // !__TBB_WIN8UI_SUPPORT - } - - return TRUE; -} - -// Just to make the linker happy and link the DLL to the application -extern "C" __declspec(dllexport) void __TBB_malloc_proxy() -{ - -} - -#endif //_WIN32 diff --git a/src/tbb/src/tbbmalloc/proxy.h b/src/tbb/src/tbbmalloc/proxy.h deleted file mode 100644 index 5b6e9dbce..000000000 --- a/src/tbb/src/tbbmalloc/proxy.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef _TBB_malloc_proxy_H_ -#define _TBB_malloc_proxy_H_ - -#define MALLOC_UNIXLIKE_OVERLOAD_ENABLED __linux__ -#define MALLOC_ZONE_OVERLOAD_ENABLED __APPLE__ - -// MALLOC_UNIXLIKE_OVERLOAD_ENABLED depends on MALLOC_CHECK_RECURSION stuff -// TODO: limit MALLOC_CHECK_RECURSION to *_OVERLOAD_ENABLED only -#if __linux__ || __APPLE__ || __sun || __FreeBSD__ || MALLOC_UNIXLIKE_OVERLOAD_ENABLED -#define MALLOC_CHECK_RECURSION 1 -#endif - -#include <stddef.h> - -extern "C" { - void * scalable_malloc(size_t size); - void * scalable_calloc(size_t nobj, size_t size); - void scalable_free(void *ptr); - void * scalable_realloc(void* ptr, size_t size); - void * scalable_aligned_malloc(size_t size, size_t alignment); - void * scalable_aligned_realloc(void* ptr, size_t size, size_t alignment); - int scalable_posix_memalign(void **memptr, size_t alignment, size_t size); - size_t scalable_msize(void *ptr); - void __TBB_malloc_safer_free( void *ptr, void (*original_free)(void*)); - void * __TBB_malloc_safer_realloc( void *ptr, size_t, void* ); - void * __TBB_malloc_safer_aligned_realloc( void *ptr, size_t, size_t, void* ); - size_t __TBB_malloc_safer_msize( void *ptr, size_t (*orig_msize_crt80d)(void*)); - size_t __TBB_malloc_safer_aligned_msize( void *ptr, size_t, size_t, size_t (*orig_msize_crt80d)(void*,size_t,size_t)); - -#if MALLOC_ZONE_OVERLOAD_ENABLED - void __TBB_malloc_free_definite_size(void *object, size_t size); -#endif -} // extern "C" - -// Struct with original free() and _msize() pointers -struct orig_ptrs { - void (*orig_free) (void*); - size_t (*orig_msize)(void*); -}; - -#endif /* _TBB_malloc_proxy_H_ */ diff --git a/src/tbb/src/tbbmalloc/shared_utils.h b/src/tbb/src/tbbmalloc/shared_utils.h index cc6d4ba1a..7ece1891f 100644 --- a/src/tbb/src/tbbmalloc/shared_utils.h +++ b/src/tbb/src/tbbmalloc/shared_utils.h @@ -1,21 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef __TBB_shared_utils_H @@ -55,9 +51,96 @@ static inline T alignUpGeneric(T arg, uintptr_t alignment) { return arg; } -template<typename T, size_t N> // generic function to find length of array -inline size_t arrayLength(const T(&)[N]) { - return N; +/* + * Compile time Log2 calculation + */ +template <size_t NUM> +struct Log2 { static const int value = 1 + Log2<(NUM >> 1)>::value; }; +template <> +struct Log2<1> { static const int value = 0; }; + +#if defined(min) +#undef min +#endif + +template<typename T> +T min ( const T& val1, const T& val2 ) { + return val1 < val2 ? val1 : val2; } +/* + * Functions to parse files information (system files for example) + */ + +#include <stdio.h> + +#if defined(_MSC_VER) && (_MSC_VER<1900) && !defined(__INTEL_COMPILER) + // Suppress overzealous compiler warnings that default ctor and assignment + // operator cannot be generated and object 'class' can never be instantiated. + // #pragma warning(push) + // #pragma warning(disable:4510 4512 4610) +#endif + +#if __SUNPRO_CC + // Suppress overzealous compiler warnings that a class with a reference member + // lacks a user-defined constructor, which can lead to errors + #pragma error_messages (off, refmemnoconstr) +#endif + +// TODO: add a constructor to remove warnings suppression +struct parseFileItem { + const char* format; + long long& value; +}; + +#if defined(_MSC_VER) && (_MSC_VER<1900) && !defined(__INTEL_COMPILER) + // #pragma warning(pop) +#endif + +#if __SUNPRO_CC + #pragma error_messages (on, refmemnoconstr) +#endif + +template <int BUF_LINE_SIZE, int N> +void parseFile(const char* file, const parseFileItem (&items)[N]) { + // Tries to find all items in each line + int found[N] = { 0 }; + // If all items found, stop forward file reading + int numFound = 0; + // Line storage + char buf[BUF_LINE_SIZE]; + + if (FILE *f = fopen(file, "r")) { + while (numFound < N && fgets(buf, BUF_LINE_SIZE, f)) { + for (int i = 0; i < N; ++i) { + if (!found[i] && 1 == sscanf(buf, items[i].format, &items[i].value)) { + ++numFound; + found[i] = 1; + } + } + } + fclose(f); + } +} + +namespace rml { +namespace internal { + +/* + * Best estimate of cache line size, for the purpose of avoiding false sharing. + * Too high causes memory overhead, too low causes false-sharing overhead. + * Because, e.g., 32-bit code might run on a 64-bit system with a larger cache line size, + * it would probably be better to probe at runtime where possible and/or allow for an environment variable override, + * but currently this is still used for compile-time layout of class Block, so the change is not entirely trivial. + */ +#if __powerpc64__ || __ppc64__ || __bgp__ +const uint32_t estimatedCacheLineSize = 128; +#else +const uint32_t estimatedCacheLineSize = 64; +#endif + +} // namespace internal +} // namespace rml + #endif /* __TBB_shared_utils_H */ + diff --git a/src/tbb/src/tbbmalloc/tbbmalloc.cpp b/src/tbb/src/tbbmalloc/tbbmalloc.cpp index c0e3cfb87..b72e03a74 100644 --- a/src/tbb/src/tbbmalloc/tbbmalloc.cpp +++ b/src/tbb/src/tbbmalloc/tbbmalloc.cpp @@ -1,69 +1,58 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2023 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #include "TypeDefinitions.h" // Customize.h and proxy.h get included #include "tbbmalloc_internal_api.h" -#include "../tbb/itt_notify.h" // for __TBB_load_ittnotify() - -#include "../tbb/tbb_assert_impl.h" // Out-of-line TBB assertion handling routines are instantiated here. +#include "../tbb/assert_impl.h" // Out-of-line TBB assertion handling routines are instantiated here. +#include "oneapi/tbb/version.h" +#include "oneapi/tbb/scalable_allocator.h" #undef UNICODE #if USE_PTHREAD #include <dlfcn.h> // dlopen #elif USE_WINTHREAD -#include "tbb/machine/windows_api.h" +#include <windows.h> #endif namespace rml { namespace internal { -/** Caller is responsible for ensuring this routine is called exactly once. */ -extern "C" void MallocInitializeITT() { -#if DO_ITT_NOTIFY - tbb::internal::__TBB_load_ittnotify(); -#endif -} - #if TBB_USE_DEBUG #define DEBUG_SUFFIX "_debug" #else #define DEBUG_SUFFIX #endif /* TBB_USE_DEBUG */ -// MALLOCLIB_NAME is the name of the TBB memory allocator library. +// MALLOCLIB_NAME is the name of the oneTBB memory allocator library. #if _WIN32||_WIN64 #define MALLOCLIB_NAME "tbbmalloc" DEBUG_SUFFIX ".dll" #elif __APPLE__ -#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX ".dylib" -#elif __FreeBSD__ || __NetBSD__ || __sun || _AIX || __ANDROID__ +#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX ".2.dylib" +#elif __FreeBSD__ || __NetBSD__ || __OpenBSD__ || __sun || _AIX || __ANDROID__ #define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX ".so" -#elif __linux__ -#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX __TBB_STRING(.so.TBB_COMPATIBLE_INTERFACE_VERSION) +#elif __unix__ +#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX __TBB_STRING(.so.2) #else #error Unknown OS #endif void init_tbbmalloc() { -#if DO_ITT_NOTIFY +#if __TBB_USE_ITT_NOTIFY MallocInitializeITT(); #endif @@ -78,36 +67,43 @@ void init_tbbmalloc() { |GET_MODULE_HANDLE_EX_FLAG_PIN, (LPCTSTR)&scalable_malloc, &lib); MALLOC_ASSERT(lib && ret, "Allocator can't find itself."); + tbb::detail::suppress_unused_warning(ret); SetErrorMode (prev_mode); #endif /* USE_PTHREAD && !__TBB_SOURCE_DIRECTLY_INCLUDED */ } #if !__TBB_SOURCE_DIRECTLY_INCLUDED #if USE_WINTHREAD -extern "C" BOOL WINAPI DllMain( HINSTANCE /*hInst*/, DWORD callReason, LPVOID ) +extern "C" BOOL WINAPI DllMain( HINSTANCE /*hInst*/, DWORD callReason, LPVOID lpvReserved) { - if (callReason==DLL_THREAD_DETACH) { __TBB_mallocThreadShutdownNotification(); } else if (callReason==DLL_PROCESS_DETACH) { - __TBB_mallocProcessShutdownNotification(); + __TBB_mallocProcessShutdownNotification(lpvReserved != nullptr); } return TRUE; } #else /* !USE_WINTHREAD */ struct RegisterProcessShutdownNotification { // Work around non-reentrancy in dlopen() on Android -#if !__TBB_USE_DLOPEN_REENTRANCY_WORKAROUND RegisterProcessShutdownNotification() { // prevents unloading, POSIX case + + // We need better support for the library pinning + // when dlopen can't find TBBmalloc library. + // for example: void *ret = dlopen(MALLOCLIB_NAME, RTLD_NOW); + // MALLOC_ASSERT(ret, "Allocator can't load itself."); dlopen(MALLOCLIB_NAME, RTLD_NOW); } -#endif /* !__ANDROID__ */ + + RegisterProcessShutdownNotification(RegisterProcessShutdownNotification&) = delete; + RegisterProcessShutdownNotification& operator=(const RegisterProcessShutdownNotification&) = delete; + ~RegisterProcessShutdownNotification() { - __TBB_mallocProcessShutdownNotification(); + __TBB_mallocProcessShutdownNotification(false); } }; @@ -117,15 +113,3 @@ static RegisterProcessShutdownNotification reg; } } // namespaces -#if __TBB_ipf -/* It was found that on IA-64 architecture inlining of __TBB_machine_lockbyte leads - to serious performance regression with ICC. So keep it out-of-line. - - This code is copy-pasted from tbb_misc.cpp. - */ -extern "C" intptr_t __TBB_machine_lockbyte( volatile unsigned char& flag ) { - tbb::internal::atomic_backoff backoff; - while( !__TBB_TryLockByte(flag) ) backoff.pause(); - return 0; -} -#endif diff --git a/src/tbb/src/tbbmalloc/tbbmalloc.rc b/src/tbb/src/tbbmalloc/tbbmalloc.rc index 3125b1892..2821addab 100644 --- a/src/tbb/src/tbbmalloc/tbbmalloc.rc +++ b/src/tbb/src/tbbmalloc/tbbmalloc.rc @@ -1,71 +1,41 @@ -// Copyright 2005-2014 Intel Corporation. All Rights Reserved. +// Copyright (c) 2005-2024 Intel Corporation // -// This file is part of Threading Building Blocks. Threading Building Blocks is free software; -// you can redistribute it and/or modify it under the terms of the GNU General Public License -// version 2 as published by the Free Software Foundation. Threading Building Blocks is -// distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. You should have received a copy of -// the GNU General Public License along with Threading Building Blocks; if not, write to the -// Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// As a special exception, you may use this file as part of a free software library without -// restriction. Specifically, if other files instantiate templates or use macros or inline -// functions from this file, or you compile this file and link it with other files to produce -// an executable, this file does not by itself cause the resulting executable to be covered -// by the GNU General Public License. This exception does not however invalidate any other -// reasons why the executable file might be covered by the GNU General Public License. - -// Microsoft Visual C++ generated resource script. +// http://www.apache.org/licenses/LICENSE-2.0 // -#ifdef APSTUDIO_INVOKED -#ifndef APSTUDIO_READONLY_SYMBOLS -#define _APS_NO_MFC 1 -#define _APS_NEXT_RESOURCE_VALUE 102 -#define _APS_NEXT_COMMAND_VALUE 40001 -#define _APS_NEXT_CONTROL_VALUE 1001 -#define _APS_NEXT_SYMED_VALUE 101 -#endif -#endif +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -#define APSTUDIO_READONLY_SYMBOLS ///////////////////////////////////////////////////////////////////////////// // -// Generated from the TEXTINCLUDE 2 resource. +// Includes // #include <winresrc.h> -#define ENDL "\r\n" -#include "tbb/tbb_version.h" - -#define TBBMALLOC_VERNUMBERS TBB_VERSION_MAJOR, TBB_VERSION_MINOR, __TBB_VERSION_YMD -#define TBBMALLOC_VERSION __TBB_STRING(TBBMALLOC_VERNUMBERS) - -///////////////////////////////////////////////////////////////////////////// -#undef APSTUDIO_READONLY_SYMBOLS +#include "../../include/oneapi/tbb/version.h" ///////////////////////////////////////////////////////////////////////////// // Neutral resources -#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_NEU) #ifdef _WIN32 LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL #pragma code_page(1252) #endif //_WIN32 -///////////////////////////////////////////////////////////////////////////// -// manifest integration -#ifdef TBB_MANIFEST -#include "winuser.h" -2 RT_MANIFEST tbbmanifest.exe.manifest -#endif - ///////////////////////////////////////////////////////////////////////////// // // Version // +#define TBB_VERNUMBERS TBB_VERSION_MAJOR,TBB_VERSION_MINOR,TBB_VERSION_PATCH +#define TBB_VERSION TBB_VERSION_STRING VS_VERSION_INFO VERSIONINFO - FILEVERSION TBBMALLOC_VERNUMBERS + FILEVERSION TBB_VERNUMBERS PRODUCTVERSION TBB_VERNUMBERS FILEFLAGSMASK 0x17L #ifdef _DEBUG @@ -82,16 +52,16 @@ BEGIN BLOCK "000004b0" BEGIN VALUE "CompanyName", "Intel Corporation\0" - VALUE "FileDescription", "Scalable Allocator library\0" - VALUE "FileVersion", TBBMALLOC_VERSION "\0" - VALUE "LegalCopyright", "Copyright 2005-2014 Intel Corporation. All Rights Reserved.\0" + VALUE "FileDescription", "oneAPI Threading Building Blocks (oneTBB) library\0" + VALUE "FileVersion", TBB_VERSION "\0" + VALUE "LegalCopyright", "Copyright 2005-2024 Intel Corporation. All Rights Reserved.\0" VALUE "LegalTrademarks", "\0" #ifndef TBB_USE_DEBUG VALUE "OriginalFilename", "tbbmalloc.dll\0" #else VALUE "OriginalFilename", "tbbmalloc_debug.dll\0" #endif - VALUE "ProductName", "Intel(R) Threading Building Blocks for Windows\0" + VALUE "ProductName", "oneAPI Threading Building Blocks (oneTBB)\0" VALUE "ProductVersion", TBB_VERSION "\0" VALUE "PrivateBuild", "\0" VALUE "SpecialBuild", "\0" @@ -102,18 +72,3 @@ BEGIN VALUE "Translation", 0x0, 1200 END END - -#endif // Neutral resources -///////////////////////////////////////////////////////////////////////////// - - -#ifndef APSTUDIO_INVOKED -///////////////////////////////////////////////////////////////////////////// -// -// Generated from the TEXTINCLUDE 3 resource. -// - - -///////////////////////////////////////////////////////////////////////////// -#endif // not APSTUDIO_INVOKED - diff --git a/src/tbb/src/tbbmalloc/tbbmalloc_internal.h b/src/tbb/src/tbbmalloc/tbbmalloc_internal.h index d0f44ea14..a8b538c7d 100644 --- a/src/tbb/src/tbbmalloc/tbbmalloc_internal.h +++ b/src/tbb/src/tbbmalloc/tbbmalloc_internal.h @@ -1,26 +1,21 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef __TBB_tbbmalloc_internal_H -#define __TBB_tbbmalloc_internal_H 1 - +#define __TBB_tbbmalloc_internal_H #include "TypeDefinitions.h" /* Also includes customization layer Customize.h */ @@ -29,13 +24,20 @@ #include <pthread.h> typedef pthread_key_t tls_key_t; #elif USE_WINTHREAD - #include "tbb/machine/windows_api.h" + #include <windows.h> typedef DWORD tls_key_t; #else #error Must define USE_PTHREAD or USE_WINTHREAD #endif -#include "tbb/tbb_config.h" // for __TBB_LIBSTDCPP_EXCEPTION_HEADERS_BROKEN +#include <atomic> + +// TODO: *BSD also has it +#define BACKEND_HAS_MREMAP __linux__ +#define CHECK_ALLOCATION_RANGE MALLOC_DEBUG || MALLOC_ZONE_OVERLOAD_ENABLED || MALLOC_UNIXLIKE_OVERLOAD_ENABLED + +#include "oneapi/tbb/detail/_config.h" // for __TBB_LIBSTDCPP_EXCEPTION_HEADERS_BROKEN +#include "oneapi/tbb/detail/_template_helpers.h" #if __TBB_LIBSTDCPP_EXCEPTION_HEADERS_BROKEN #define _EXCEPTION_PTR_H /* prevents exception_ptr.h inclusion */ #define _GLIBCXX_NESTED_EXCEPTION_H /* prevents nested_exception.h inclusion */ @@ -48,7 +50,7 @@ #if MALLOC_CHECK_RECURSION #include <new> /* for placement new */ #endif -#include "tbb/scalable_allocator.h" +#include "oneapi/tbb/scalable_allocator.h" #include "tbbmalloc_internal_api.h" /********* Various compile-time options **************/ @@ -65,11 +67,11 @@ #define TRACEF(x) ((void)0) #endif /* MALLOC_TRACE */ -#define ASSERT_TEXT NULL +#define ASSERT_TEXT nullptr #define COLLECT_STATISTICS ( MALLOC_DEBUG && MALLOCENV_COLLECT_STATISTICS ) #ifndef USE_INTERNAL_TID -#define USE_INTERNAL_TID COLLECT_STATISTICS +#define USE_INTERNAL_TID COLLECT_STATISTICS || MALLOC_TRACE #endif #include "Statistics.h" @@ -95,10 +97,23 @@ extern intptr_t memAllocKB, memHitKB; template<typename T> void suppress_unused_warning( const T& ) {} +/********** Various global default constants ********/ + +/* + * Default huge page size + */ +#if defined __loongarch64 +static const size_t HUGE_PAGE_SIZE = 32 * 1024 * 1024; +#else +static const size_t HUGE_PAGE_SIZE = 2 * 1024 * 1024; +#endif + +/********** End of global default constants *********/ + /********** Various numeric parameters controlling allocations ********/ /* - * smabSize - the size of a block for allocation of small objects, + * slabSize - the size of a block for allocation of small objects, * it must be larger than maxSegregatedObjectSize. */ const uintptr_t slabSize = 16*1024; @@ -110,26 +125,19 @@ const uintptr_t slabSize = 16*1024; const unsigned cacheCleanupFreq = 256; /* - * Best estimate of cache line size, for the purpose of avoiding false sharing. - * Too high causes memory overhead, too low causes false-sharing overhead. - * Because, e.g., 32-bit code might run on a 64-bit system with a larger cache line size, - * it would probably be better to probe at runtime where possible and/or allow for an environment variable override, - * but currently this is still used for compile-time layout of class Block, so the change is not entirely trivial. + * Alignment of large (>= minLargeObjectSize) objects. */ -#if __powerpc64__ || __ppc64__ || __bgp__ -const uint32_t estimatedCacheLineSize = 128; -#else -const uint32_t estimatedCacheLineSize = 64; -#endif +const size_t largeObjectAlignment = estimatedCacheLineSize; /* - * Alignment of large (>= minLargeObjectSize) objects. + * This number of bins in the TLS that leads to blocks that we can allocate in. */ -const size_t largeObjectAlignment = estimatedCacheLineSize; +const uint32_t numBlockBinLimit = 31; /********** End of numeric parameters controlling allocations *********/ class BlockI; +class Block; struct LargeMemoryBlock; struct ExtMemoryPool; struct MemRegion; @@ -140,47 +148,54 @@ class MemoryPool; struct CacheBinOperation; extern const uint32_t minLargeObjectSize; +enum DecreaseOrIncrease { + decrease, increase +}; + class TLSKey { tls_key_t TLS_pointer_key; public: - TLSKey(); - ~TLSKey(); + bool init(); + bool destroy(); TLSData* getThreadMallocTLS() const; void setThreadMallocTLS( TLSData * newvalue ); TLSData* createTLS(MemoryPool *memPool, Backend *backend); }; template<typename Arg, typename Compare> -inline void AtomicUpdate(Arg &location, Arg newVal, const Compare &cmp) +inline void AtomicUpdate(std::atomic<Arg>& location, Arg newVal, const Compare &cmp) { - MALLOC_STATIC_ASSERT(sizeof(Arg) == sizeof(intptr_t), - "Type of argument must match AtomicCompareExchange type."); - for (Arg old = location; cmp(old, newVal); ) { - Arg val = AtomicCompareExchange((intptr_t&)location, (intptr_t)newVal, old); - if (val == old) + static_assert(sizeof(Arg) == sizeof(intptr_t), "Type of argument must match AtomicCompareExchange type."); + Arg old = location.load(std::memory_order_acquire); + for (; cmp(old, newVal); ) { + if (location.compare_exchange_strong(old, newVal)) break; // TODO: do we need backoff after unsuccessful CAS? - old = val; + //old = val; } } // TODO: make BitMaskBasic more general +// TODO: check that BitMaskBasic is not used for synchronization // (currently, it fits BitMaskMin well, but not as suitable for BitMaskMax) template<unsigned NUM> class BitMaskBasic { static const unsigned SZ = (NUM-1)/(CHAR_BIT*sizeof(uintptr_t))+1; static const unsigned WORD_LEN = CHAR_BIT*sizeof(uintptr_t); - uintptr_t mask[SZ]; + + std::atomic<uintptr_t> mask[SZ]; + protected: void set(size_t idx, bool val) { MALLOC_ASSERT(idx<NUM, ASSERT_TEXT); size_t i = idx / WORD_LEN; int pos = WORD_LEN - idx % WORD_LEN - 1; - if (val) - AtomicOr(&mask[i], 1ULL << pos); - else - AtomicAnd(&mask[i], ~(1ULL << pos)); + if (val) { + mask[i].fetch_or(1ULL << pos); + } else { + mask[i].fetch_and(~(1ULL << pos)); + } } int getMinTrue(unsigned startIdx) const { unsigned idx = startIdx / WORD_LEN; @@ -189,19 +204,19 @@ class BitMaskBasic { if (startIdx % WORD_LEN) { // only interested in part of a word, clear bits before startIdx pos = WORD_LEN - startIdx % WORD_LEN; - uintptr_t actualMask = mask[idx] & (((uintptr_t)1<<pos) - 1); + uintptr_t actualMask = mask[idx].load(std::memory_order_relaxed) & (((uintptr_t)1<<pos) - 1); idx++; if (-1 != (pos = BitScanRev(actualMask))) return idx*WORD_LEN - pos - 1; } while (idx<SZ) - if (-1 != (pos = BitScanRev(mask[idx++]))) + if (-1 != (pos = BitScanRev(mask[idx++].load(std::memory_order_relaxed)))) return idx*WORD_LEN - pos - 1; return -1; } public: - void reset() { for (unsigned i=0; i<SZ; i++) mask[i] = 0; } + void reset() { for (unsigned i=0; i<SZ; i++) mask[i].store(0, std::memory_order_relaxed); } }; template<unsigned NUM> @@ -217,9 +232,13 @@ template<unsigned NUM> class BitMaskMax : public BitMaskBasic<NUM> { public: void set(size_t idx, bool val) { + MALLOC_ASSERT(NUM >= idx + 1, ASSERT_TEXT); + BitMaskBasic<NUM>::set(NUM - 1 - idx, val); } int getMaxTrue(unsigned startIdx) const { + MALLOC_ASSERT(NUM >= startIdx + 1, ASSERT_TEXT); + int p = BitMaskBasic<NUM>::getMinTrue(NUM-startIdx-1); return -1==p? -1 : (int)NUM - 1 - p; } @@ -240,315 +259,87 @@ class AllLocalCaches { public: void registerThread(TLSRemote *tls); void unregisterThread(TLSRemote *tls); - bool cleanup(ExtMemoryPool *extPool, bool cleanOnlyUnused); + bool cleanup(bool cleanOnlyUnused); void markUnused(); - void reset() { head = NULL; } + void reset() { head = nullptr; } }; -/* cache blocks in range [MinSize; MaxSize) in bins with CacheStep - TooLargeFactor -- when cache size treated "too large" in comparison to user data size - OnMissFactor -- If cache miss occurred and cache was cleaned, - set ageThreshold to OnMissFactor * the difference - between current time and last time cache was cleaned. - LongWaitFactor -- to detect rarely-used bins and forget about their usage history -*/ -template<size_t MIN_SIZE, size_t MAX_SIZE, uint32_t CACHE_STEP, int TOO_LARGE, - int ON_MISS, int LONG_WAIT> -struct LargeObjectCacheProps { - static const size_t MinSize = MIN_SIZE, MaxSize = MAX_SIZE; - static const uint32_t CacheStep = CACHE_STEP; - static const int TooLargeFactor = TOO_LARGE, OnMissFactor = ON_MISS, - LongWaitFactor = LONG_WAIT; -}; +class LifoList { +public: + inline LifoList(); + inline void push(Block *block); + inline Block *pop(); + inline Block *grab(); -template<typename Props> -class LargeObjectCacheImpl { private: - // The number of bins to cache large objects. - static const uint32_t numBins = (Props::MaxSize-Props::MinSize)/Props::CacheStep; - - typedef BitMaskMax<numBins> BinBitMask; - - // Current sizes of used and cached objects. It's calculated while we are - // traversing bins, and used for isLOCTooLarge() check at the same time. - class BinsSummary { - size_t usedSz; - size_t cachedSz; - public: - BinsSummary() : usedSz(0), cachedSz(0) {} - // "too large" criteria - bool isLOCTooLarge() const { return cachedSz > Props::TooLargeFactor*usedSz; } - void update(size_t usedSize, size_t cachedSize) { - usedSz += usedSize; - cachedSz += cachedSize; - } - void reset() { usedSz = cachedSz = 0; } - }; - - // 2-linked list of same-size cached blocks ordered by age (oldest on top) - // TODO: are we really want the list to be 2-linked? This allows us - // reduce memory consumption and do less operations under lock. - // TODO: try to switch to 32-bit logical time to save space in CacheBin - // and move bins to different cache lines. - class CacheBin { - private: - LargeMemoryBlock *first, - *last; - /* age of an oldest block in the list; equal to last->age, if last defined, - used for quick cheching it without acquiring the lock. */ - uintptr_t oldest; - /* currAge when something was excluded out of list because of the age, - not because of cache hit */ - uintptr_t lastCleanedAge; - /* Current threshold value for the blocks of a particular size. - Set on cache miss. */ - intptr_t ageThreshold; - - /* total size of all objects corresponding to the bin and allocated by user */ - size_t usedSize, - /* total size of all objects cached in the bin */ - cachedSize; - /* mean time of presence of block in the bin before successful reuse */ - intptr_t meanHitRange; - /* time of last get called for the bin */ - uintptr_t lastGet; - - /* The functor called by the aggregator for the operation list */ - class CacheBinFunctor { - CacheBin *const bin; - ExtMemoryPool *const extMemPool; - BinBitMask *const bitMask; - const int idx; - - LargeMemoryBlock *toRelease; - bool needCleanup; - uintptr_t currTime; - - /* Do preprocessing under the operation list. */ - /* All the OP_PUT_LIST operations are merged in the one operation. - All OP_GET operations are merged with the OP_PUT_LIST operations but - it demands the update of the moving average value in the bin. - Only the last OP_CLEAN_TO_THRESHOLD operation has sense. - The OP_CLEAN_ALL operation also should be performed only once. - Moreover it cancels the OP_CLEAN_TO_THRESHOLD operation. */ - class OperationPreprocessor { - // TODO: remove the dependency on CacheBin. - CacheBin *const bin; - - /* Contains the relative time in the operation list. - It counts in the reverse order since the aggregator also - provides operations in the reverse order. */ - uintptr_t lclTime; - - /* opGet contains only OP_GET operations which cannot be merge with OP_PUT operations - opClean contains all OP_CLEAN_TO_THRESHOLD and OP_CLEAN_ALL operations. */ - CacheBinOperation *opGet, *opClean; - /* The time of the last OP_CLEAN_TO_THRESHOLD operations */ - uintptr_t cleanTime; - - /* lastGetOpTime - the time of the last OP_GET operation. - lastGet - the same meaning as CacheBin::lastGet */ - uintptr_t lastGetOpTime, lastGet; - - /* The total sum of all usedSize decrements requested with CBOP_DECR_USED_SIZE operations. */ - size_t decrUsedSize; - - /* The list of blocks for the OP_PUT_LIST operation. */ - LargeMemoryBlock *head, *tail; - int putListNum; - - /* if the OP_CLEAN_ALL is requested. */ - bool isCleanAll; - - inline void commitOperation(CacheBinOperation *op) const; - inline void addOpToOpList(CacheBinOperation *op, CacheBinOperation **opList) const; - bool getFromPutList(CacheBinOperation* opGet, uintptr_t currTime); - void addToPutList( LargeMemoryBlock *head, LargeMemoryBlock *tail, int num ); - - public: - OperationPreprocessor(CacheBin *bin) : - bin(bin), lclTime(0), opGet(NULL), opClean(NULL), cleanTime(0), - lastGetOpTime(0), decrUsedSize(0), head(NULL), isCleanAll(false) {} - void operator()(CacheBinOperation* opList); - uintptr_t getTimeRange() const { return -lclTime; } - - friend class CacheBinFunctor; - }; - - public: - CacheBinFunctor(CacheBin *bin, ExtMemoryPool *extMemPool, BinBitMask *bitMask, int idx) : - bin(bin), extMemPool(extMemPool), bitMask(bitMask), idx(idx), toRelease(NULL), needCleanup(false) {} - void operator()(CacheBinOperation* opList); - - bool isCleanupNeeded() const { return needCleanup; } - LargeMemoryBlock *getToRelease() const { return toRelease; } - uintptr_t getCurrTime() const { return currTime; } - }; - - typename MallocAggregator<CacheBinOperation>::type aggregator; - - void ExecuteOperation(CacheBinOperation *op, ExtMemoryPool *extMemPool, BinBitMask *bitMask, int idx, bool longLifeTime = true); - /* ---------- unsafe methods used with the aggregator ---------- */ - void forgetOutdatedState(uintptr_t currTime); - LargeMemoryBlock *putList(LargeMemoryBlock *head, LargeMemoryBlock *tail, BinBitMask *bitMask, int idx, int num); - LargeMemoryBlock *get(); - LargeMemoryBlock *cleanToThreshold(uintptr_t currTime, BinBitMask *bitMask, int idx); - LargeMemoryBlock *cleanAll(BinBitMask *bitMask, int idx); - void updateUsedSize(size_t size, BinBitMask *bitMask, int idx) { - if (!usedSize) bitMask->set(idx, true); - usedSize += size; - if (!usedSize && !first) bitMask->set(idx, false); - } - void updateMeanHitRange( intptr_t hitRange ) { - hitRange = hitRange >= 0 ? hitRange : 0; - meanHitRange = meanHitRange ? (meanHitRange + hitRange)/2 : hitRange; - } - void updateAgeThreshold( uintptr_t currTime ) { - if (lastCleanedAge) - ageThreshold = Props::OnMissFactor*(currTime - lastCleanedAge); - } - void updateCachedSize(size_t size) { cachedSize += size; } - void setLastGet( uintptr_t newLastGet ) { lastGet = newLastGet; } - /* -------------------------------------------------------- */ - - /* should be placed in zero-initialized memory, ctor not needed. */ - CacheBin(); - public: - void init() { memset(this, 0, sizeof(CacheBin)); } - void putList(ExtMemoryPool *extMemPool, LargeMemoryBlock *head, BinBitMask *bitMask, int idx); - LargeMemoryBlock *get(ExtMemoryPool *extMemPool, size_t size, BinBitMask *bitMask, int idx); - bool cleanToThreshold(ExtMemoryPool *extMemPool, BinBitMask *bitMask, uintptr_t currTime, int idx); - bool releaseAllToBackend(ExtMemoryPool *extMemPool, BinBitMask *bitMask, int idx); - void decrUsedSize(ExtMemoryPool *extMemPool, size_t size, BinBitMask *bitMask, int idx); - - void decreaseThreshold() { - if (ageThreshold) - ageThreshold = (ageThreshold + meanHitRange)/2; - } - void updateBinsSummary(BinsSummary *binsSummary) const { - binsSummary->update(usedSize, cachedSize); - } - size_t getSize() const { return cachedSize; } - size_t getUsedSize() const { return usedSize; } - size_t reportStat(int num, FILE *f); - }; - - intptr_t tooLargeLOC; // how many times LOC was "too large" - // for fast finding of used bins and bins with non-zero usedSize; - // indexed from the end, as we need largest 1st - BinBitMask bitMask; - // bins with lists of recently freed large blocks cached for re-use - CacheBin bin[numBins]; + std::atomic<Block*> top; + MallocMutex lock; +}; + +/* + * When a block that is not completely free is returned for reuse by other threads + * this is where the block goes. + * + * LifoList assumes zero initialization; so below its constructors are omitted, + * to avoid linking with C++ libraries on Linux. + */ +class OrphanedBlocks { + LifoList bins[numBlockBinLimit]; public: - static int sizeToIdx(size_t size) { - MALLOC_ASSERT(Props::MinSize <= size && size < Props::MaxSize, ASSERT_TEXT); - return (size-Props::MinSize)/Props::CacheStep; - } - static int getNumBins() { return numBins; } + Block *get(TLSData *tls, unsigned int size); + void put(intptr_t binTag, Block *block); + void reset(); + bool cleanup(Backend* backend); +}; - void putList(ExtMemoryPool *extMemPool, LargeMemoryBlock *largeBlock); - LargeMemoryBlock *get(ExtMemoryPool *extMemPool, size_t size); +/* Large objects entities */ +#include "large_objects.h" - void rollbackCacheState(ExtMemoryPool *extMemPool, size_t size); - bool regularCleanup(ExtMemoryPool *extMemPool, uintptr_t currAge, bool doThreshDecr); - bool cleanAll(ExtMemoryPool *extMemPool); - void reset() { - tooLargeLOC = 0; - for (int i = numBins-1; i >= 0; i--) - bin[i].init(); - bitMask.reset(); - } -#if __TBB_MALLOC_LOCACHE_STAT - void reportStat(FILE *f); -#endif -#if __TBB_MALLOC_WHITEBOX_TEST - size_t getLOCSize() const; - size_t getUsedSize() const; -#endif +// select index size for BackRefMain based on word size: default is uint32_t, +// uint16_t for 32-bit platforms +template<bool> +struct MainIndexSelect { + typedef uint32_t main_type; }; -class LargeObjectCache { - static const size_t minLargeSize = 8*1024, - maxLargeSize = 8*1024*1024, - // There are benchmarks of interest that should work well with objects of this size - maxHugeSize = 129*1024*1024; -public: - // Difference between object sizes in large block bins - static const uint32_t largeBlockCacheStep = 8*1024, - hugeBlockCacheStep = 512*1024; -private: - typedef LargeObjectCacheProps<minLargeSize, maxLargeSize, largeBlockCacheStep, 2, 2, 16> LargeCacheTypeProps; - typedef LargeObjectCacheProps<maxLargeSize, maxHugeSize, hugeBlockCacheStep, 1, 1, 4> HugeCacheTypeProps; - typedef LargeObjectCacheImpl< LargeCacheTypeProps > LargeCacheType; - typedef LargeObjectCacheImpl< HugeCacheTypeProps > HugeCacheType; - - // beginning of largeCache is more actively used and smaller than hugeCache, - // so put hugeCache first to prevent false sharing - // with LargeObjectCache's predecessor - HugeCacheType hugeCache; - LargeCacheType largeCache; - - /* logical time, incremented on each put/get operation - To prevent starvation between pools, keep separately for each pool. - Overflow is OK, as we only want difference between - its current value and some recent. - - Both malloc and free should increment logical time, as in - a different case multiple cached blocks would have same age, - and accuracy of predictors suffers. - */ - uintptr_t cacheCurrTime; - - // memory pool that owns this LargeObjectCache, - ExtMemoryPool *extMemPool; // strict 1:1 relation, never changed - - static int sizeToIdx(size_t size); -public: - void init(ExtMemoryPool *memPool) { extMemPool = memPool; } - void put(LargeMemoryBlock *largeBlock); - void putList(LargeMemoryBlock *head); - LargeMemoryBlock *get(size_t size); - - void rollbackCacheState(size_t size); - bool isCleanupNeededOnRange(uintptr_t range, uintptr_t currTime); - bool doCleanup(uintptr_t currTime, bool doThreshDecr); - - bool decreasingCleanup(); - bool regularCleanup(); - bool cleanAll(); - void reset() { - largeCache.reset(); - hugeCache.reset(); - } -#if __TBB_MALLOC_LOCACHE_STAT - void reportStat(FILE *f); -#endif -#if __TBB_MALLOC_WHITEBOX_TEST - size_t getLOCSize() const; - size_t getUsedSize() const; -#endif - static size_t alignToBin(size_t size) { - return size<maxLargeSize? alignUp(size, largeBlockCacheStep) - : alignUp(size, hugeBlockCacheStep); - } - - uintptr_t getCurrTime() { return (uintptr_t)AtomicIncrement((intptr_t&)cacheCurrTime); } - uintptr_t getCurrTimeRange(uintptr_t range) { return (uintptr_t)AtomicAdd((intptr_t&)cacheCurrTime, range)+1; } +template<> +struct MainIndexSelect<false> { + typedef uint16_t main_type; }; class BackRefIdx { // composite index to backreference array +public: + typedef MainIndexSelect<4 < sizeof(uintptr_t)>::main_type main_t; private: - uint16_t master; // index in BackRefMaster + static const main_t invalid = ~main_t(0); + main_t main; // index in BackRefMain uint16_t largeObj:1; // is this object "large"? uint16_t offset :15; // offset from beginning of BackRefBlock public: - BackRefIdx() : master((uint16_t)-1) {} - bool isInvalid() const { return master == (uint16_t)-1; } + BackRefIdx() : main(invalid), largeObj(0), offset(0) {} + bool isInvalid() const { return main == invalid; } bool isLargeObject() const { return largeObj; } - uint16_t getMaster() const { return master; } + main_t getMain() const { return main; } uint16_t getOffset() const { return offset; } +#if __TBB_USE_THREAD_SANITIZER + friend + __attribute__((no_sanitize("thread"))) + BackRefIdx dereference(const BackRefIdx* ptr) { + BackRefIdx idx; + idx.main = ptr->main; + idx.largeObj = ptr->largeObj; + idx.offset = ptr->offset; + return idx; + } +#else + friend + BackRefIdx dereference(const BackRefIdx* ptr) { + return *ptr; + } +#endif + // only newBackRef can modify BackRefIdx static BackRefIdx newBackRef(bool largeObj); }; @@ -556,10 +347,18 @@ class BackRefIdx { // composite index to backreference array // Block header is used during block coalescing // and must be preserved in used blocks. class BlockI { +#if __clang__ && !__INTEL_COMPILER + // #pragma clang diagnostic push + // #pragma clang diagnostic ignored "-Wunused-private-field" +#endif intptr_t blockState[2]; +#if __clang__ && !__INTEL_COMPILER + // #pragma clang diagnostic pop // "-Wunused-private-field" +#endif }; struct LargeMemoryBlock : public BlockI { + MemoryPool *pool; // owner pool LargeMemoryBlock *next, // ptrs in list of cached blocks *prev, // 2-linked list of pool's large objects @@ -569,300 +368,195 @@ struct LargeMemoryBlock : public BlockI { *gNext; uintptr_t age; // age of block while in cache size_t objectSize; // the size requested by a client - size_t unalignedSize; // the size requested from getMemory + size_t unalignedSize; // the size requested from backend BackRefIdx backRefIdx; // cached here, used copy is in LargeObjectHdr }; -// global state of blocks currently in processing -class BackendSync { - // Class instances should reside in zero-initialized memory! - // The number of blocks currently removed from a bin and not returned back - intptr_t blocksInProcessing; // to another - intptr_t binsModifications; // incremented on every bin modification +// Classes and methods for backend.cpp +#include "backend.h" + +// An TBB allocator mode that can be controlled by user +// via API/environment variable. Must be placed in zero-initialized memory. +// External synchronization assumed. +// TODO: TBB_VERSION support +class AllocControlledMode { + intptr_t val; + bool setDone; + public: - void blockConsumed() { AtomicIncrement(blocksInProcessing); } - void binsModified() { AtomicIncrement(binsModifications); } - void blockReleased() { -#if __TBB_MALLOC_BACKEND_STAT - MALLOC_ITT_SYNC_RELEASING(&blocksInProcessing); -#endif - AtomicIncrement(binsModifications); - intptr_t prev = AtomicAdd(blocksInProcessing, -1); - MALLOC_ASSERT(prev > 0, ASSERT_TEXT); - suppress_unused_warning(prev); + intptr_t get() const { + MALLOC_ASSERT(setDone, ASSERT_TEXT); + return val; } - intptr_t getNumOfMods() const { return FencedLoad(binsModifications); } - // return true if need re-do the blocks search - bool waitTillBlockReleased(intptr_t startModifiedCnt) { -#if __TBB_MALLOC_BACKEND_STAT - MALLOC_ITT_SYNC_PREPARE(&blocksInProcessing); -#endif - for (intptr_t myBlocksNum = FencedLoad(blocksInProcessing); - // no blocks in processing, stop waiting - myBlocksNum; ) { - SpinWaitWhileEq(blocksInProcessing, myBlocksNum); - WhiteboxTestingYield(); - intptr_t newBlocksNum = FencedLoad(blocksInProcessing); - // stop waiting iff blocks were removed from processing, - // if blocks were added, there is no reason to stop waiting - if (newBlocksNum < myBlocksNum) - break; - myBlocksNum = newBlocksNum; - } -#if __TBB_MALLOC_BACKEND_STAT - MALLOC_ITT_SYNC_ACQUIRED(&blocksInProcessing); -#endif - // were bins modified since scanned? - return startModifiedCnt != getNumOfMods(); + + // Note: set() can be called before init() + void set(intptr_t newVal) { + val = newVal; + setDone = true; } -}; -class CoalRequestQ { // queue of free blocks that coalescing was delayed -private: - FreeBlock *blocksToFree; -public: - FreeBlock *getAll(); // return current list of blocks and make queue empty - void putBlock(FreeBlock *fBlock); -}; + bool ready() const { + return setDone; + } -class MemExtendingSema { - intptr_t active; -public: - bool wait() { - bool rescanBins = false; - // up to 3 threads can add more memory from OS simultaneously, - // rest of threads have to wait - for (;;) { - intptr_t prevCnt = FencedLoad(active); - if (prevCnt < 3) { - intptr_t n = AtomicCompareExchange(active, prevCnt+1, prevCnt); - if (n == prevCnt) - break; - } else { - SpinWaitWhileEq(active, prevCnt); - rescanBins = true; - break; - } + // envName - environment variable to get controlled mode + void initReadEnv(const char *envName, intptr_t defaultVal) { + if (!setDone) { + // unreferenced formal parameter warning + tbb::detail::suppress_unused_warning(envName); +#if !__TBB_WIN8UI_SUPPORT + // TODO: use strtol to get the actual value of the envirable + const char *envVal = getenv(envName); + if (envVal && !strcmp(envVal, "1")) + val = 1; + else +#endif + val = defaultVal; + setDone = true; } - return rescanBins; } - void signal() { AtomicAdd(active, -1); } }; -enum MemRegionType { - // The region does not guarantee the block size. - MEMREG_FLEXIBLE_SIZE = 0, - // The region can hold exact number of blocks with the size of the - // first reqested block. - MEMREG_SEVERAL_BLOCKS, - // The region holds only one block with a reqested size. - MEMREG_ONE_BLOCK +// Page type to be used inside MapMemory. +// Regular (4KB aligned), Huge and Transparent Huge Pages (2MB aligned). +enum PageType { + REGULAR = 0, + PREALLOCATED_HUGE_PAGE, + TRANSPARENT_HUGE_PAGE }; -class Backend { +// init() and printStatus() is called only under global initialization lock. +// Race is possible between registerAllocation() and registerReleasing(), +// harm is that up to single huge page releasing is missed (because failure +// to get huge page is registered only 1st time), that is negligible. +// setMode is also can be called concurrently. +// Object must reside in zero-initialized memory +// TODO: can we check for huge page presence during every 10th mmap() call +// in case huge page is released by another process? +class HugePagesStatus { private: -/* Blocks in range [minBinnedSize; getMaxBinnedSize()] are kept in bins, - one region can contains several blocks. Larger blocks are allocated directly - and one region always contains one block. -*/ - enum { - minBinnedSize = 8*1024UL, - /* If huge pages are available, maxBinned_HugePage used. - If not, maxBinned_SmallPage is the threshold. - TODO: use pool's granularity for upper bound setting.*/ - maxBinned_SmallPage = 1024*1024UL, - // TODO: support other page sizes - maxBinned_HugePage = 4*1024*1024UL - }; - enum { - VALID_BLOCK_IN_BIN = 1 // valid block added to bin, not returned as result - }; -public: - static const int freeBinsNum = - (maxBinned_HugePage-minBinnedSize)/LargeObjectCache::largeBlockCacheStep + 1; - - // if previous access missed per-thread slabs pool, - // allocate numOfSlabAllocOnMiss blocks in advance - static const int numOfSlabAllocOnMiss = 2; - - enum { - NO_BIN = -1, - HUGE_BIN = freeBinsNum-1 - }; - - // Bin keeps 2-linked list of free blocks. It must be 2-linked - // because during coalescing a block it's removed from a middle of the list. - struct Bin { - FreeBlock *head, - *tail; - MallocMutex tLock; - - void removeBlock(FreeBlock *fBlock); - void reset() { head = tail = 0; } -#if __TBB_MALLOC_BACKEND_STAT - size_t countFreeBlocks(); - void reportStat(FILE *f); -#endif - bool empty() const { return !head; } - }; - - typedef BitMaskMin<Backend::freeBinsNum> BitMaskBins; - - // array of bins supplemented with bitmask for fast finding of non-empty bins - class IndexedBins { - BitMaskBins bitMask; - Bin freeBins[Backend::freeBinsNum]; - FreeBlock *getFromBin(int binIdx, BackendSync *sync, size_t size, - bool resSlabAligned, bool alignedBin, bool wait, - int *resLocked); - public: - FreeBlock *findBlock(int nativeBin, BackendSync *sync, size_t size, - bool resSlabAligned, bool alignedBin, int *numOfLockedBins); - bool tryReleaseRegions(int binIdx, Backend *backend); - void lockRemoveBlock(int binIdx, FreeBlock *fBlock); - void addBlock(int binIdx, FreeBlock *fBlock, size_t blockSz, bool addToTail); - bool tryAddBlock(int binIdx, FreeBlock *fBlock, bool addToTail); - int getMinNonemptyBin(unsigned startBin) const { - int p = bitMask.getMinTrue(startBin); - return p == -1 ? Backend::freeBinsNum : p; + AllocControlledMode requestedMode; // changed only by user + // to keep enabled and requestedMode consistent + MallocMutex setModeLock; + size_t pageSize; + std::atomic<intptr_t> needActualStatusPrint; + + static void doPrintStatus(bool state, const char *stateName) { + // Under macOS* fprintf/snprintf acquires an internal lock, so when + // 1st allocation is done under the lock, we got a deadlock. + // Do not use fprintf etc during initialization. + fputs("TBBmalloc: huge pages\t", stderr); + if (!state) + fputs("not ", stderr); + fputs(stateName, stderr); + fputs("\n", stderr); + } + + void parseSystemMemInfo() { + bool hpAvailable = false; + bool thpAvailable = false; + long long hugePageSize = -1; + +#if __unix__ + // Check huge pages existence + long long meminfoHugePagesTotal = 0; + + parseFileItem meminfoItems[] = { + // Parse system huge page size + { "Hugepagesize: %lld kB", hugePageSize }, + // Check if there are preallocated huge pages on the system + // https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt + { "HugePages_Total: %lld", meminfoHugePagesTotal } }; + + parseFile</*BUFF_SIZE=*/100>("/proc/meminfo", meminfoItems); + + // Double check another system information regarding preallocated + // huge pages if there are no information in /proc/meminfo + long long vmHugePagesTotal = 0; + + parseFileItem vmItem[] = { { "%lld", vmHugePagesTotal } }; + + // We parse a counter number, it can't be huge + parseFile</*BUFF_SIZE=*/100>("/proc/sys/vm/nr_hugepages", vmItem); + + if (hugePageSize > -1 && (meminfoHugePagesTotal > 0 || vmHugePagesTotal > 0)) { + MALLOC_ASSERT(hugePageSize != 0, "Huge Page size can't be zero if we found preallocated."); + + // Any non zero value clearly states that there are preallocated + // huge pages on the system + hpAvailable = true; } - void verify(); -#if __TBB_MALLOC_BACKEND_STAT - void reportStat(FILE *f); -#endif - void reset(); - }; -private: - class AdvRegionsBins { - BitMaskBins bins; - public: - void registerBin(int regBin) { bins.set(regBin, 1); } - int getMinUsedBin(int start) const { return bins.getMinTrue(start); } - void reset() { bins.reset(); } - }; - // auxiliary class to atomic maximum request finding - class MaxRequestComparator { - const Backend *backend; - public: - MaxRequestComparator(const Backend *be) : backend(be) {} - inline bool operator()(size_t oldMaxReq, size_t requestSize) const; - }; - - ExtMemoryPool *extMemPool; - // used for release every region on pool destroying - MemRegion *regionList; - MallocMutex regionListLock; - - CoalRequestQ coalescQ; // queue of coalescing requests - BackendSync bkndSync; - // semaphore protecting adding more more memory from OS - MemExtendingSema memExtendingSema; - size_t totalMemSize, - memSoftLimit; - // Fixed pools request memory once per lifetime, during pool_create. - // Status of memory acquisition for such pool keeps here. - // So value is changed only for fixed pools, and without synchronization, - // as pool is not available till returning from pool_create. - bool rawMemReceived; - - // Using of maximal observed requested size allows decrease - // memory consumption for small requests and decrease fragmentation - // for workloads when small and large allocation requests are mixed. - // TODO: decrease, not only increase it - size_t maxRequestedSize; - - FreeBlock *addNewRegion(size_t size, MemRegionType type, bool addToBin); - FreeBlock *findBlockInRegion(MemRegion *region, size_t exactBlockSize); - void startUseBlock(MemRegion *region, FreeBlock *fBlock, bool addToBin); - void releaseRegion(MemRegion *region); - - FreeBlock *askMemFromOS(size_t totalReqSize, intptr_t startModifiedCnt, - int *lockedBinsThreshold, int numOfLockedBins, - bool *splittable); - FreeBlock *genericGetBlock(int num, size_t size, bool resSlabAligned); - void genericPutBlock(FreeBlock *fBlock, size_t blockSz); - FreeBlock *splitUnalignedBlock(FreeBlock *fBlock, int num, size_t size, - bool needAlignedRes); - FreeBlock *splitAlignedBlock(FreeBlock *fBlock, int num, size_t size, - bool needAlignedRes); - - FreeBlock *doCoalesc(FreeBlock *fBlock, MemRegion **memRegion); - bool coalescAndPutList(FreeBlock *head, bool forceCoalescQDrop); - bool scanCoalescQ(bool forceCoalescQDrop); - void coalescAndPut(FreeBlock *fBlock, size_t blockSz); - - void removeBlockFromBin(FreeBlock *fBlock); - - void *allocRawMem(size_t &size) const; - void freeRawMem(void *object, size_t size) const; - - void putLargeBlock(LargeMemoryBlock *lmb); - void releaseCachesToLimit(); -public: - void verify(); -#if __TBB_MALLOC_BACKEND_STAT - void reportStat(FILE *f); + // Check if there is transparent huge pages support on the system + long long thpPresent = 'n'; + parseFileItem thpItem[] = { { "[alwa%cs] madvise never\n", thpPresent } }; + parseFile</*BUFF_SIZE=*/100>("/sys/kernel/mm/transparent_hugepage/enabled", thpItem); + + if (hugePageSize > -1 && thpPresent == 'y') { + MALLOC_ASSERT(hugePageSize != 0, "Huge Page size can't be zero if we found thp existence."); + thpAvailable = true; + } #endif - bool bootstrap(ExtMemoryPool *extMemoryPool); - void reset(); - bool destroy(); - bool clean(); // clean on caches cleanup + MALLOC_ASSERT(!pageSize, "Huge page size can't be set twice. Double initialization."); - BlockI *getSlabBlock(int num) { - BlockI *b = (BlockI*) - genericGetBlock(num, slabSize, /*resSlabAligned=*/true); - MALLOC_ASSERT(isAligned(b, slabSize), ASSERT_TEXT); - return b; - } - void putSlabBlock(BlockI *block) { - genericPutBlock((FreeBlock *)block, slabSize); + // Initialize object variables + if (hugePageSize > -1) { + pageSize = hugePageSize * 1024; // was read in KB from meminfo + } else { + pageSize = 0; + } + isHPAvailable = hpAvailable; + isTHPAvailable = thpAvailable; } - void *getBackRefSpace(size_t size, bool *rawMemUsed); - void putBackRefSpace(void *b, size_t size, bool rawMemUsed); - bool inUserPool() const; - - LargeMemoryBlock *getLargeBlock(size_t size); - void returnLargeObject(LargeMemoryBlock *lmb); +public: - void setRecommendedMaxSize(size_t softLimit) { - memSoftLimit = softLimit; - releaseCachesToLimit(); - } - inline size_t getMaxBinnedSize() const; + // System information + bool isHPAvailable; + bool isTHPAvailable; -#if __TBB_MALLOC_WHITEBOX_TEST - size_t getTotalMemSize() const { return totalMemSize; } -#endif -private: - static int sizeToBin(size_t size) { - if (size >= maxBinned_HugePage) - return HUGE_BIN; - else if (size < minBinnedSize) - return NO_BIN; + // User defined value + bool isEnabled; - int bin = (size - minBinnedSize)/LargeObjectCache::largeBlockCacheStep; + void init() { + parseSystemMemInfo(); + MallocMutex::scoped_lock lock(setModeLock); + requestedMode.initReadEnv("TBB_MALLOC_USE_HUGE_PAGES", 0); + isEnabled = (isHPAvailable || isTHPAvailable) && requestedMode.get(); + } - MALLOC_ASSERT(bin < HUGE_BIN, "Invalid size."); - return bin; + // Could be set from user code at any place. + // If we didn't call init() at this place, isEnabled will be false + void setMode(intptr_t newVal) { + MallocMutex::scoped_lock lock(setModeLock); + requestedMode.set(newVal); + isEnabled = (isHPAvailable || isTHPAvailable) && newVal; } -#if __TBB_MALLOC_BACKEND_STAT - static size_t binToSize(int bin) { - MALLOC_ASSERT(bin <= HUGE_BIN, "Invalid bin."); - return bin*LargeObjectCache::largeBlockCacheStep + minBinnedSize; + void reset() { + needActualStatusPrint.store(0, std::memory_order_relaxed); + pageSize = 0; + isEnabled = isHPAvailable = isTHPAvailable = false; } -#endif - static bool toAlignedBin(FreeBlock *block, size_t size) { - return isAligned((char*)block+size, slabSize) - && size >= slabSize; + + // If memory mapping size is a multiple of huge page size, some OS kernels + // can use huge pages transparently. Use this when huge pages are requested. + size_t getGranularity() const { + if (requestedMode.ready()) + return requestedMode.get() ? pageSize : 0; + else + return HUGE_PAGE_SIZE; // the mode is not yet known; assume typical 2MB huge pages } - // register bins related to advance regions - AdvRegionsBins advRegBins; - IndexedBins freeLargeBins, - freeAlignedBins; + void printStatus() { + doPrintStatus(requestedMode.get(), "requested"); + if (requestedMode.get()) { // report actual status iff requested + if (pageSize) + needActualStatusPrint.store(1, std::memory_order_release); + else + doPrintStatus(/*state=*/false, "available"); + } + } }; class AllLargeBlocksList { @@ -878,9 +572,11 @@ struct ExtMemoryPool { Backend backend; LargeObjectCache loc; AllLocalCaches allLocalCaches; + OrphanedBlocks orphanedBlocks; intptr_t poolId; - // to find all large objects + // To find all large objects. Used during user pool destruction, + // to release all backreferences in large blocks (slab blocks do not have them). AllLargeBlocksList lmbList; // Callbacks to be used instead of MapMemory/UnmapMemory. rawAllocType rawAlloc; @@ -892,9 +588,12 @@ struct ExtMemoryPool { fixedPool; TLSKey tlsPointerKey; // per-pool TLS key + std::atomic<int> softCachesCleanupInProgress; + std::atomic<int> hardCachesCleanupInProgress; + bool init(intptr_t poolId, rawAllocType rawAlloc, rawFreeType rawFree, size_t granularity, bool keepAllMemory, bool fixedPool); - void initTLS(); + bool initTLS(); // i.e., not system default pool for scalable_malloc/scalable_free bool userPool() const { return rawAlloc; } @@ -902,29 +601,42 @@ struct ExtMemoryPool { // true if something has been released bool softCachesCleanup(); bool releaseAllLocalCaches(); - bool hardCachesCleanup(); - void reset() { + bool hardCachesCleanup(bool wait); + void *remap(void *ptr, size_t oldSize, size_t newSize, size_t alignment); + bool reset() { loc.reset(); allLocalCaches.reset(); - tlsPointerKey.~TLSKey(); + orphanedBlocks.reset(); + bool ret = tlsPointerKey.destroy(); backend.reset(); - } - void destroy() { - loc.reset(); - allLocalCaches.reset(); + return ret; + } + bool destroy() { + MALLOC_ASSERT(isPoolValid(), + "Possible double pool_destroy or heap corruption"); + if (!userPool()) { + loc.reset(); + allLocalCaches.reset(); + } // pthread_key_dtors must be disabled before memory unmapping // TODO: race-free solution - tlsPointerKey.~TLSKey(); + bool ret = tlsPointerKey.destroy(); if (rawFree || !userPool()) - backend.destroy(); + ret &= backend.destroy(); + // pool is not valid after this point + granularity = 0; + return ret; } - bool mustBeAddedToGlobalLargeBlockList() const { return userPool(); } void delayRegionsReleasing(bool mode) { delayRegsReleasing = mode; } inline bool regionsAreReleaseable() const; - LargeMemoryBlock *mallocLargeObject(size_t allocationSize); + LargeMemoryBlock *mallocLargeObject(MemoryPool *pool, size_t allocationSize); void freeLargeObject(LargeMemoryBlock *lmb); void freeLargeObjectList(LargeMemoryBlock *head); +#if MALLOC_DEBUG + // use granulatity as marker for pool validity + bool isPoolValid() const { return granularity; } +#endif }; inline bool Backend::inUserPool() const { return extMemPool->userPool(); } @@ -940,87 +652,15 @@ struct FreeObject { FreeObject *next; }; -// An TBB allocator mode that can be controlled by user -// via API/environment variable. Must be placed in zero-initialized memory. -// External synchronization assumed. -// TODO: TBB_VERSION support -class AllocControlledMode { - intptr_t val; - bool setDone; -public: - intptr_t get() const { - MALLOC_ASSERT(setDone, ASSERT_TEXT); - return val; - } - void set(intptr_t newVal) { // note set() can be called before init() - val = newVal; - setDone = true; - } - // envName - environment variable to get controlled mode - void initReadEnv(const char *envName, intptr_t defaultVal); -}; - -// init() and printStatus() is called only under global initialization lock. -// Race is possible between registerAllocation() and registerReleasing(), -// harm is that up to single huge page releasing is missed (because failure -// to get huge page is registered only 1st time), that is negligible. -// setMode is also can be called concurrently. -// Object must reside in zero-initialized memory -class HugePagesStatus { -private: - AllocControlledMode requestedMode; // changed only by user - // to keep enabled and requestedMode consistent - MallocMutex setModeLock; - size_t pageSize; - intptr_t needActualStatusPrint; - - static void doPrintStatus(bool state, const char *stateName); -public: - // both variables are changed only inside HugePagesStatus - intptr_t enabled; - // Have we got huge pages at all? It's used when large hugepage-aligned - // region is releasing, to find can it release some huge pages or not. - intptr_t wasObserved; - - size_t getSize() const { - MALLOC_ASSERT(pageSize, ASSERT_TEXT); - return pageSize; - } - void printStatus(); - void registerAllocation(bool available); - void registerReleasing(size_t size); - - void init(size_t hugePageSize) { - MALLOC_ASSERT(!hugePageSize || isPowerOfTwo(hugePageSize), - "Only memory pages of a power-of-two size are supported."); - MALLOC_ASSERT(!pageSize, "Huge page size can't be set twice."); - pageSize = hugePageSize; - - MallocMutex::scoped_lock lock(setModeLock); - requestedMode.initReadEnv("TBB_MALLOC_USE_HUGE_PAGES", 0); - enabled = pageSize && requestedMode.get(); - } - void setMode(intptr_t newVal) { - MallocMutex::scoped_lock lock(setModeLock); - requestedMode.set(newVal); - enabled = pageSize && newVal; - } - void reset() { - pageSize = 0; - needActualStatusPrint = enabled = wasObserved = 0; - } -}; - -extern HugePagesStatus hugePages; /******* A helper class to support overriding malloc with scalable_malloc *******/ #if MALLOC_CHECK_RECURSION class RecursiveMallocCallProtector { // pointer to an automatic data of holding thread - static void *autoObjPtr; + static std::atomic<void*> autoObjPtr; static MallocMutex rmc_mutex; - static pthread_t owner_thread; + static std::atomic<pthread_t> owner_thread; /* Under FreeBSD 8.0 1st call to any pthread function including pthread_self leads to pthread initialization, that causes malloc calls. As 1st usage of RecursiveMallocCallProtector can be before pthread initialized, pthread calls @@ -1042,32 +682,31 @@ class RecursiveMallocCallProtector { MallocMutex::scoped_lock* lock_acquired; char scoped_lock_space[sizeof(MallocMutex::scoped_lock)+1]; - - static uintptr_t absDiffPtr(void *x, void *y) { - uintptr_t xi = (uintptr_t)x, yi = (uintptr_t)y; - return xi > yi ? xi - yi : yi - xi; - } + public: - - RecursiveMallocCallProtector() : lock_acquired(NULL) { + RecursiveMallocCallProtector() : lock_acquired(nullptr) { lock_acquired = new (scoped_lock_space) MallocMutex::scoped_lock( rmc_mutex ); if (canUsePthread) - owner_thread = pthread_self(); - autoObjPtr = &scoped_lock_space; + owner_thread.store(pthread_self(), std::memory_order_relaxed); + autoObjPtr.store(&scoped_lock_space, std::memory_order_relaxed); } + + RecursiveMallocCallProtector(RecursiveMallocCallProtector&) = delete; + RecursiveMallocCallProtector& operator=(RecursiveMallocCallProtector) = delete; + ~RecursiveMallocCallProtector() { if (lock_acquired) { - autoObjPtr = NULL; + autoObjPtr.store(nullptr, std::memory_order_relaxed); lock_acquired->~scoped_lock(); } } static bool sameThreadActive() { - if (!autoObjPtr) // fast path + if (!autoObjPtr.load(std::memory_order_relaxed)) // fast path return false; // Some thread has an active recursive call protector; check if the current one. // Exact pthread_self based test if (canUsePthread) { - if (pthread_equal( owner_thread, pthread_self() )) { + if (pthread_equal( owner_thread.load(std::memory_order_relaxed), pthread_self() )) { mallocRecursionDetected = true; return true; } else @@ -1076,9 +715,13 @@ class RecursiveMallocCallProtector { // inexact stack size based test const uintptr_t threadStackSz = 2*1024*1024; int dummy; - return absDiffPtr(autoObjPtr, &dummy)<threadStackSz; + + uintptr_t xi = (uintptr_t)autoObjPtr.load(std::memory_order_relaxed), yi = (uintptr_t)&dummy; + uintptr_t diffPtr = xi > yi ? xi - yi : yi - xi; + + return diffPtr < threadStackSz; } - static bool noRecursion(); + /* The function is called on 1st scalable_malloc call to check if malloc calls scalable_malloc (nested call must set mallocRecursionDetected). */ static void detectNaiveOverload() { @@ -1088,7 +731,7 @@ class RecursiveMallocCallProtector { is already on, so can do it. */ if (!canUsePthread) { canUsePthread = true; - owner_thread = pthread_self(); + owner_thread.store(pthread_self(), std::memory_order_relaxed); } #endif free(malloc(1)); @@ -1106,14 +749,10 @@ class RecursiveMallocCallProtector { #endif /* MALLOC_CHECK_RECURSION */ -bool isMallocInitializedExt(); - -bool isLargeObject(void *object); - unsigned int getThreadId(); -bool initBackRefMaster(Backend *backend); -void destroyBackRefMaster(Backend *backend); +bool initBackRefMain(Backend *backend); +void destroyBackRefMain(Backend *backend); void removeBackRef(BackRefIdx backRefIdx); void setBackRef(BackRefIdx backRefIdx, void *newPtr); void *getBackRef(BackRefIdx backRefIdx); diff --git a/src/tbb/src/tbbmalloc/tbbmalloc_internal_api.h b/src/tbb/src/tbbmalloc/tbbmalloc_internal_api.h index 053e3bac2..0b36c8556 100644 --- a/src/tbb/src/tbbmalloc/tbbmalloc_internal_api.h +++ b/src/tbb/src/tbbmalloc/tbbmalloc_internal_api.h @@ -1,21 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef __TBB_tbbmalloc_internal_api_H @@ -31,7 +27,8 @@ typedef enum { TBBMALLOC_INTERNAL_SOURCE_INCLUDED = 65536 } AllocationModeInternalParam; -void __TBB_mallocProcessShutdownNotification(); +void MallocInitializeITT(); +void __TBB_mallocProcessShutdownNotification(bool); #if _WIN32||_WIN64 void __TBB_mallocThreadShutdownNotification(); #endif diff --git a/src/tbb/src/tbbmalloc/win32-gcc-tbbmalloc-export.def b/src/tbb/src/tbbmalloc/win32-gcc-tbbmalloc-export.def deleted file mode 100644 index ae7b5815f..000000000 --- a/src/tbb/src/tbbmalloc/win32-gcc-tbbmalloc-export.def +++ /dev/null @@ -1,51 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -{ -global: -scalable_calloc; -scalable_free; -scalable_malloc; -scalable_realloc; -scalable_posix_memalign; -scalable_aligned_malloc; -scalable_aligned_realloc; -scalable_aligned_free; -scalable_msize; -scalable_allocation_mode; -scalable_allocation_command; -__TBB_malloc_safer_free; -__TBB_malloc_safer_realloc; -__TBB_malloc_safer_msize; -__TBB_malloc_safer_aligned_msize; -__TBB_malloc_safer_aligned_realloc; -/* memory pool stuff */ -_ZN3rml10pool_resetEPNS_10MemoryPoolE; -_ZN3rml11pool_createEiPKNS_13MemPoolPolicyE; -_ZN3rml14pool_create_v1EiPKNS_13MemPoolPolicyEPPNS_10MemoryPoolE; -_ZN3rml11pool_mallocEPNS_10MemoryPoolEj; -_ZN3rml12pool_destroyEPNS_10MemoryPoolE; -_ZN3rml9pool_freeEPNS_10MemoryPoolEPv; -_ZN3rml12pool_reallocEPNS_10MemoryPoolEPvj; -_ZN3rml20pool_aligned_reallocEPNS_10MemoryPoolEPvjj; -_ZN3rml19pool_aligned_mallocEPNS_10MemoryPoolEjj; - -local:*; -}; diff --git a/src/tbb/src/tbbmalloc/win32-tbbmalloc-export.def b/src/tbb/src/tbbmalloc/win32-tbbmalloc-export.def deleted file mode 100644 index bda89b955..000000000 --- a/src/tbb/src/tbbmalloc/win32-tbbmalloc-export.def +++ /dev/null @@ -1,46 +0,0 @@ -; Copyright 2005-2014 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. Threading Building Blocks is free software; -; you can redistribute it and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. Threading Building Blocks is -; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -; See the GNU General Public License for more details. You should have received a copy of -; the GNU General Public License along with Threading Building Blocks; if not, write to the -; Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software library without -; restriction. Specifically, if other files instantiate templates or use macros or inline -; functions from this file, or you compile this file and link it with other files to produce -; an executable, this file does not by itself cause the resulting executable to be covered -; by the GNU General Public License. This exception does not however invalidate any other -; reasons why the executable file might be covered by the GNU General Public License. - -EXPORTS - -; frontend.cpp -scalable_calloc -scalable_free -scalable_malloc -scalable_realloc -scalable_posix_memalign -scalable_aligned_malloc -scalable_aligned_realloc -scalable_aligned_free -scalable_msize -scalable_allocation_mode -scalable_allocation_command -__TBB_malloc_safer_free -__TBB_malloc_safer_realloc -__TBB_malloc_safer_msize -__TBB_malloc_safer_aligned_msize -__TBB_malloc_safer_aligned_realloc -?pool_create@rml@@YAPAVMemoryPool@1@HPBUMemPoolPolicy@1@@Z -?pool_create_v1@rml@@YA?AW4MemPoolError@1@HPBUMemPoolPolicy@1@PAPAVMemoryPool@1@@Z -?pool_destroy@rml@@YA_NPAVMemoryPool@1@@Z -?pool_malloc@rml@@YAPAXPAVMemoryPool@1@I@Z -?pool_free@rml@@YA_NPAVMemoryPool@1@PAX@Z -?pool_reset@rml@@YA_NPAVMemoryPool@1@@Z -?pool_realloc@rml@@YAPAXPAVMemoryPool@1@PAXI@Z -?pool_aligned_realloc@rml@@YAPAXPAVMemoryPool@1@PAXII@Z -?pool_aligned_malloc@rml@@YAPAXPAVMemoryPool@1@II@Z diff --git a/src/tbb/src/tbbmalloc/win64-gcc-tbbmalloc-export.def b/src/tbb/src/tbbmalloc/win64-gcc-tbbmalloc-export.def deleted file mode 100644 index f25bd66a9..000000000 --- a/src/tbb/src/tbbmalloc/win64-gcc-tbbmalloc-export.def +++ /dev/null @@ -1,51 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -{ -global: -scalable_calloc; -scalable_free; -scalable_malloc; -scalable_realloc; -scalable_posix_memalign; -scalable_aligned_malloc; -scalable_aligned_realloc; -scalable_aligned_free; -scalable_msize; -scalable_allocation_mode; -scalable_allocation_command; -__TBB_malloc_safer_free; -__TBB_malloc_safer_realloc; -__TBB_malloc_safer_msize; -__TBB_malloc_safer_aligned_msize; -__TBB_malloc_safer_aligned_realloc; -/* memory pool stuff */ -_ZN3rml10pool_resetEPNS_10MemoryPoolE; -_ZN3rml11pool_createExPKNS_13MemPoolPolicyE; -_ZN3rml14pool_create_v1ExPKNS_13MemPoolPolicyEPPNS_10MemoryPoolE; -_ZN3rml11pool_mallocEPNS_10MemoryPoolEy; -_ZN3rml12pool_destroyEPNS_10MemoryPoolE; -_ZN3rml9pool_freeEPNS_10MemoryPoolEPv; -_ZN3rml12pool_reallocEPNS_10MemoryPoolEPvy; -_ZN3rml20pool_aligned_reallocEPNS_10MemoryPoolEPvyy; -_ZN3rml19pool_aligned_mallocEPNS_10MemoryPoolEyy; - -local:*; -}; diff --git a/src/tbb/src/tbbmalloc/win64-tbbmalloc-export.def b/src/tbb/src/tbbmalloc/win64-tbbmalloc-export.def deleted file mode 100644 index 135639328..000000000 --- a/src/tbb/src/tbbmalloc/win64-tbbmalloc-export.def +++ /dev/null @@ -1,47 +0,0 @@ -; Copyright 2005-2014 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. Threading Building Blocks is free software; -; you can redistribute it and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. Threading Building Blocks is -; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -; See the GNU General Public License for more details. You should have received a copy of -; the GNU General Public License along with Threading Building Blocks; if not, write to the -; Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software library without -; restriction. Specifically, if other files instantiate templates or use macros or inline -; functions from this file, or you compile this file and link it with other files to produce -; an executable, this file does not by itself cause the resulting executable to be covered -; by the GNU General Public License. This exception does not however invalidate any other -; reasons why the executable file might be covered by the GNU General Public License. - -EXPORTS - -; frontend.cpp -scalable_calloc -scalable_free -scalable_malloc -scalable_realloc -scalable_posix_memalign -scalable_aligned_malloc -scalable_aligned_realloc -scalable_aligned_free -scalable_msize -scalable_allocation_mode -scalable_allocation_command -__TBB_malloc_safer_free -__TBB_malloc_safer_realloc -__TBB_malloc_safer_msize -__TBB_malloc_safer_aligned_msize -__TBB_malloc_safer_aligned_realloc -; memory pool stuff -?pool_create@rml@@YAPEAVMemoryPool@1@_JPEBUMemPoolPolicy@1@@Z -?pool_create_v1@rml@@YA?AW4MemPoolError@1@_JPEBUMemPoolPolicy@1@PEAPEAVMemoryPool@1@@Z -?pool_destroy@rml@@YA_NPEAVMemoryPool@1@@Z -?pool_malloc@rml@@YAPEAXPEAVMemoryPool@1@_K@Z -?pool_free@rml@@YA_NPEAVMemoryPool@1@PEAX@Z -?pool_reset@rml@@YA_NPEAVMemoryPool@1@@Z -?pool_realloc@rml@@YAPEAXPEAVMemoryPool@1@PEAX_K@Z -?pool_aligned_realloc@rml@@YAPEAXPEAVMemoryPool@1@PEAX_K2@Z -?pool_aligned_malloc@rml@@YAPEAXPEAVMemoryPool@1@_K1@Z diff --git a/src/tbb/src/tbbmalloc/xbox360-tbbmalloc-export.def b/src/tbb/src/tbbmalloc/xbox360-tbbmalloc-export.def deleted file mode 100644 index 88545c884..000000000 --- a/src/tbb/src/tbbmalloc/xbox360-tbbmalloc-export.def +++ /dev/null @@ -1,35 +0,0 @@ -; Copyright 2005-2014 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. Threading Building Blocks is free software; -; you can redistribute it and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. Threading Building Blocks is -; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -; See the GNU General Public License for more details. You should have received a copy of -; the GNU General Public License along with Threading Building Blocks; if not, write to the -; Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software library without -; restriction. Specifically, if other files instantiate templates or use macros or inline -; functions from this file, or you compile this file and link it with other files to produce -; an executable, this file does not by itself cause the resulting executable to be covered -; by the GNU General Public License. This exception does not however invalidate any other -; reasons why the executable file might be covered by the GNU General Public License. - -EXPORTS - -; MemoryAllocator.cpp -scalable_calloc @1 -scalable_free @2 -scalable_malloc @3 -scalable_realloc @4 -scalable_posix_memalign @5 -scalable_aligned_malloc @6 -scalable_aligned_realloc @7 -scalable_aligned_free @8 -__TBB_malloc_safer_free @9 -__TBB_malloc_safer_realloc @10 -scalable_msize @11 -__TBB_malloc_safer_msize @12 -__TBB_malloc_safer_aligned_realloc @13 -__TBB_malloc_safer_aligned_msize @14 diff --git a/src/tbb/src/tbbmalloc_proxy/CMakeLists.txt b/src/tbb/src/tbbmalloc_proxy/CMakeLists.txt new file mode 100644 index 000000000..609e4f37a --- /dev/null +++ b/src/tbb/src/tbbmalloc_proxy/CMakeLists.txt @@ -0,0 +1,99 @@ +# Copyright (c) 2020-2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if (NOT BUILD_SHARED_LIBS) + return() +endif() + +add_library(tbbmalloc_proxy + function_replacement.cpp + proxy.cpp) + +if (WIN32) + target_sources(tbbmalloc_proxy PRIVATE tbbmalloc_proxy.rc) +endif() + +add_library(TBB::tbbmalloc_proxy ALIAS tbbmalloc_proxy) + +target_compile_definitions(tbbmalloc_proxy + PUBLIC + $<$<CONFIG:DEBUG>:TBB_USE_DEBUG> + PRIVATE + __TBBMALLOCPROXY_BUILD) + +target_include_directories(tbbmalloc_proxy + PUBLIC + $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../../include> + $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>) + +if (NOT APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + # gcc 5.0 and later have -Wno-sized-deallocation options + set(TBB_WARNING_SUPPRESS ${TBB_WARNING_SUPPRESS} + $<$<NOT:$<VERSION_LESS:${CMAKE_CXX_COMPILER_VERSION},5.0>>:-Wno-sized-deallocation>) +endif() + +target_compile_options(tbbmalloc_proxy + PRIVATE + ${TBB_CXX_STD_FLAG} # TODO: consider making it PUBLIC. + ${TBB_MMD_FLAG} + ${TBB_DSE_FLAG} + ${TBB_WARNING_LEVEL} + ${TBB_WARNING_SUPPRESS} + ${TBB_LIB_COMPILE_FLAGS} + ${TBB_COMMON_COMPILE_FLAGS} +) + +if (UNIX AND NOT APPLE) + # Avoid use of target_link_libraries here as it changes /DEF option to \DEF on Windows. + set_target_properties(tbbmalloc_proxy PROPERTIES + LINK_FLAGS "${TBB_LINK_DEF_FILE_FLAG}\"${CMAKE_CURRENT_SOURCE_DIR}/def/${TBB_DEF_FILE_PREFIX}-proxy.def\"" + LINK_DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/def/${TBB_DEF_FILE_PREFIX}-proxy.def" + DEFINE_SYMBOL "") +endif() + +# Prefer using target_link_options instead of target_link_libraries to specify link options because +# target_link_libraries may incorrectly handle some options (on Windows, for example). +if (COMMAND target_link_options) + target_link_options(tbbmalloc_proxy + PRIVATE + ${TBB_LIB_LINK_FLAGS} + ${TBB_COMMON_LINK_FLAGS} + ) +else() + target_link_libraries(tbbmalloc_proxy + PRIVATE + ${TBB_LIB_LINK_FLAGS} + ${TBB_COMMON_LINK_FLAGS} + ) +endif() + +target_link_libraries(tbbmalloc_proxy + PRIVATE + TBB::tbbmalloc + Threads::Threads + ${TBB_LIB_LINK_LIBS} + ${TBB_COMMON_LINK_LIBS} +) + +if(TBB_BUILD_APPLE_FRAMEWORKS) + set_target_properties(tbbmalloc_proxy PROPERTIES + FRAMEWORK TRUE + FRAMEWORK_VERSION ${TBBMALLOC_BINARY_VERSION}.${TBB_BINARY_MINOR_VERSION} + XCODE_ATTRIBUTE_PRODUCT_BUNDLE_IDENTIFIER com.intel.tbbmalloc-proxy + MACOSX_FRAMEWORK_IDENTIFIER com.intel.tbbmalloc-proxy + MACOSX_FRAMEWORK_BUNDLE_VERSION ${TBBMALLOC_BINARY_VERSION}.${TBB_BINARY_MINOR_VERSION} + MACOSX_FRAMEWORK_SHORT_VERSION_STRING ${TBBMALLOC_BINARY_VERSION}) +endif() + +tbb_install_target(tbbmalloc_proxy) diff --git a/src/tbb/src/tbbmalloc_proxy/def/lin32-proxy.def b/src/tbb/src/tbbmalloc_proxy/def/lin32-proxy.def new file mode 100644 index 000000000..d044325d9 --- /dev/null +++ b/src/tbb/src/tbbmalloc_proxy/def/lin32-proxy.def @@ -0,0 +1,55 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +{ +global: +calloc; +free; +malloc; +realloc; +posix_memalign; +memalign; +aligned_alloc; +valloc; +pvalloc; +mallinfo; +mallopt; +malloc_usable_size; +__libc_malloc; +__libc_realloc; +__libc_calloc; +__libc_free; +__libc_memalign; +__libc_pvalloc; +__libc_valloc; +__TBB_malloc_proxy; +_ZdaPv; /* next ones are new/delete */ +_ZdaPvRKSt9nothrow_t; +_ZdlPv; +_ZdlPvRKSt9nothrow_t; +_Znaj; +_ZnajRKSt9nothrow_t; +_Znwj; +_ZnwjRKSt9nothrow_t; + +local: + +/* TBB symbols */ +*3rml8internal*; +*3tbb*; +*__TBB*; + +}; diff --git a/src/tbb/src/tbbmalloc_proxy/def/lin64-proxy.def b/src/tbb/src/tbbmalloc_proxy/def/lin64-proxy.def new file mode 100644 index 000000000..f0764d443 --- /dev/null +++ b/src/tbb/src/tbbmalloc_proxy/def/lin64-proxy.def @@ -0,0 +1,55 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +{ +global: +calloc; +free; +malloc; +realloc; +posix_memalign; +memalign; +aligned_alloc; +valloc; +pvalloc; +mallinfo; +mallopt; +malloc_usable_size; +__libc_malloc; +__libc_realloc; +__libc_calloc; +__libc_free; +__libc_memalign; +__libc_pvalloc; +__libc_valloc; +__TBB_malloc_proxy; +_ZdaPv; /* next ones are new/delete */ +_ZdaPvRKSt9nothrow_t; +_ZdlPv; +_ZdlPvRKSt9nothrow_t; +_Znam; +_ZnamRKSt9nothrow_t; +_Znwm; +_ZnwmRKSt9nothrow_t; + +local: + +/* TBB symbols */ +*3rml8internal*; +*3tbb*; +*__TBB*; + +}; diff --git a/src/tbb/src/tbbmalloc/tbb_function_replacement.cpp b/src/tbb/src/tbbmalloc_proxy/function_replacement.cpp similarity index 53% rename from src/tbb/src/tbbmalloc/tbb_function_replacement.cpp rename to src/tbb/src/tbbmalloc_proxy/function_replacement.cpp index 2531d4934..ad05354fa 100644 --- a/src/tbb/src/tbbmalloc/tbb_function_replacement.cpp +++ b/src/tbb/src/tbbmalloc_proxy/function_replacement.cpp @@ -1,38 +1,93 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ -#include "tbb/tbb_config.h" +#include "oneapi/tbb/detail/_config.h" +#include "oneapi/tbb/detail/_assert.h" +#include "../tbb/assert_impl.h" #if !__TBB_WIN8UI_SUPPORT && defined(_WIN32) +#ifndef _CRT_SECURE_NO_DEPRECATE #define _CRT_SECURE_NO_DEPRECATE 1 -#define __TBB_NO_IMPLICIT_LINKAGE 1 +#endif + +// no standard-conforming implementation of snprintf prior to VS 2015 +#if !defined(_MSC_VER) || _MSC_VER>=1900 +#define LOG_PRINT(s, n, format, ...) snprintf(s, n, format, __VA_ARGS__) +#else +#define LOG_PRINT(s, n, format, ...) _snprintf_s(s, n, _TRUNCATE, format, __VA_ARGS__) +#endif #include <windows.h> #include <new> #include <stdio.h> -#include "tbb_function_replacement.h" +#include <string.h> + +#include "function_replacement.h" -#include "tbb/tbb_config.h" -#include "tbb/tbb_stddef.h" -#include "../tbb/tbb_assert_impl.h" +// The information about a standard memory allocation function for the replacement log +struct FunctionInfo { + const char* funcName; + const char* dllName; +}; + +// Namespace that processes and manages the output of records to the Log journal +// that will be provided to user by TBB_malloc_replacement_log() +namespace Log { + // Value of RECORDS_COUNT is set due to the fact that we maximally + // scan 8 modules, and in every module we can swap 6 opcodes. (rounded to 8) + static const unsigned RECORDS_COUNT = 8 * 8; + static const unsigned RECORD_LENGTH = MAX_PATH; + + // Need to add 1 to count of records, because last record must be always nullptr + static char *records[RECORDS_COUNT + 1]; + static bool replacement_status = true; + + // Internal counter that contains number of next string for record + static unsigned record_number = 0; + + // Function that writes info about (not)found opcodes to the Log journal + // functionInfo - information about a standard memory allocation function for the replacement log + // opcodeString - string, that contain byte code of this function + // status - information about function replacement status + static void record(FunctionInfo functionInfo, const char * opcodeString, bool status) { + __TBB_ASSERT(functionInfo.dllName, "Empty DLL name value"); + __TBB_ASSERT(functionInfo.funcName, "Empty function name value"); + __TBB_ASSERT(opcodeString, "Empty opcode"); + __TBB_ASSERT(record_number <= RECORDS_COUNT, "Incorrect record number"); + + //If some replacement failed -> set status to false + replacement_status &= status; + + // If we reach the end of the log, write this message to the last line + if (record_number == RECORDS_COUNT) { + // %s - workaround to fix empty variable argument parsing behavior in GCC + LOG_PRINT(records[RECORDS_COUNT - 1], RECORD_LENGTH, "%s", "Log was truncated."); + return; + } + + char* entry = (char*)HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, RECORD_LENGTH); + __TBB_ASSERT(entry, "Invalid memory was returned"); + + LOG_PRINT(entry, RECORD_LENGTH, "%s: %s (%s), byte pattern: <%s>", + status ? "Success" : "Fail", functionInfo.funcName, functionInfo.dllName, opcodeString); + + records[record_number++] = entry; + } +}; inline UINT_PTR Ptr2Addrint(LPVOID ptr) { @@ -66,7 +121,7 @@ inline bool IsInDistance(UINT_PTR addr1, UINT_PTR addr2, __int64 dist) * doesn't allocate memory dynamically. * * The struct MemoryBuffer holds the data about a page in the memory used for - * replacing functions in Intel64 where the target is too far to be replaced + * replacing functions in 64-bit code where the target is too far to be replaced * with a short jump. All the calculations of m_base and m_next are in a multiple * of SIZE_OF_ADDRESS (which is 8 in Win64). */ @@ -180,68 +235,89 @@ static MemoryProvider memProvider; // Compare opcodes from dictionary (str1) and opcodes from code (str2) // str1 might contain '*' to mask addresses -// RETURN: NULL if opcodes did not match, string length of str1 on success +// RETURN: 0 if opcodes did not match, 1 on success size_t compareStrings( const char *str1, const char *str2 ) { - size_t str1Length = strlen(str1); - for (size_t i=0; i<str1Length; i++){ - if( str1[i] != '*' && str1[i] != str2[i] ) return 0; + for (size_t i=0; str1[i]!=0; i++){ + if( str1[i]!='*' && str1[i]!='#' && str1[i]!=str2[i] ) return 0; } - return str1Length; + return 1; } -// Check function prologue with know prologues from the dictionary +// Check function prologue with known prologues from the dictionary // opcodes - dictionary // inpAddr - pointer to function prologue // Dictionary contains opcodes for several full asm instructions // + one opcode byte for the next asm instruction for safe address processing -// RETURN: number of bytes for safe bytes replacement -// (matched_pattern/2-1) -UINT CheckOpcodes( const char ** opcodes, void *inpAddr ) +// RETURN: 1 + the index of the matched pattern, or 0 if no match found. +static UINT CheckOpcodes( const char ** opcodes, void *inpAddr, bool abortOnError, const FunctionInfo* functionInfo = nullptr) { static size_t opcodesStringsCount = 0; static size_t maxOpcodesLength = 0; static size_t opcodes_pointer = (size_t)opcodes; - char opcodeString[61]; + char opcodeString[2*MAX_PATTERN_SIZE+1]; size_t i; - size_t result; + size_t result = 0; // Get the values for static variables // max length and number of patterns if( !opcodesStringsCount || opcodes_pointer != (size_t)opcodes ){ - while( *(opcodes + opcodesStringsCount)!= NULL ){ + while( *(opcodes + opcodesStringsCount)!= nullptr ){ if( (i=strlen(*(opcodes + opcodesStringsCount))) > maxOpcodesLength ) maxOpcodesLength = i; opcodesStringsCount++; } opcodes_pointer = (size_t)opcodes; - __TBB_ASSERT( maxOpcodesLength < 61, "Limit is 30 opcodes/60 symbols per pattern" ); + __TBB_ASSERT( maxOpcodesLength/2 <= MAX_PATTERN_SIZE, "Pattern exceeded the limit of 28 opcodes/56 symbols" ); } // Translate prologue opcodes to string format to compare - for( i=0; i< maxOpcodesLength/2; i++ ){ + for( i=0; i<maxOpcodesLength/2 && i<MAX_PATTERN_SIZE; ++i ){ sprintf( opcodeString + 2*i, "%.2X", *((unsigned char*)inpAddr+i) ); } - opcodeString[maxOpcodesLength] = 0; + opcodeString[2*i] = 0; // Compare translated opcodes with patterns - for( i=0; i< opcodesStringsCount; i++ ){ - result = compareStrings( opcodes[i],opcodeString ); - if( result ) - return (UINT)(result/2-1); + for( UINT idx=0; idx<opcodesStringsCount; ++idx ){ + result = compareStrings( opcodes[idx],opcodeString ); + if( result ) { + if (functionInfo) { + Log::record(*functionInfo, opcodeString, /*status*/ true); + } + return idx + 1; // avoid 0 which indicates a failure + } + } + if (functionInfo) { + Log::record(*functionInfo, opcodeString, /*status*/ false); + } + if (abortOnError) { + // Impossibility to find opcodes in the dictionary is a serious issue, + // as if we unable to call original function, leak or crash is expected result. + __TBB_ASSERT_RELEASE( false, "CheckOpcodes failed" ); } - // TODO: to add more stuff to patterns - __TBB_ASSERT( false, "CheckOpcodes failed" ); - - // No matches found just do not store original calls return 0; } +// Modify offsets in original code after moving it to a trampoline. +// We do not have more than one offset to correct in existing opcode patterns. +static void CorrectOffset( UINT_PTR address, const char* pattern, UINT distance ) +{ + const char* pos = strstr(pattern, "#*******"); + if( pos ) { + address += (pos - pattern)/2; // compute the offset position + UINT value; + // UINT assignment is not used to avoid potential alignment issues + memcpy(&value, Addrint2Ptr(address), sizeof(value)); + value += distance; + memcpy(Addrint2Ptr(address), &value, sizeof(value)); + } +} + // Insert jump relative instruction to the input address // RETURN: the size of the trampoline or 0 on failure -static DWORD InsertTrampoline32(void *inpAddr, void *targetAddr, const char ** opcodes, void** storedAddr) +static DWORD InsertTrampoline32(void *inpAddr, void *targetAddr, const char* pattern, void** storedAddr) { - UINT opcodesNumber = SIZE_OF_RELJUMP; + size_t bytesToMove = SIZE_OF_RELJUMP; UINT_PTR srcAddr = Ptr2Addrint(inpAddr); UINT_PTR tgtAddr = Ptr2Addrint(targetAddr); // Check that the target fits in 32 bits @@ -252,28 +328,25 @@ static DWORD InsertTrampoline32(void *inpAddr, void *targetAddr, const char ** o UINT offset32; UCHAR *codePtr = (UCHAR *)inpAddr; - // If requested, store original function code - if ( storedAddr ){ - opcodesNumber = CheckOpcodes( opcodes, inpAddr ); - if( opcodesNumber >= SIZE_OF_RELJUMP ){ - UINT_PTR strdAddr = memProvider.GetLocation(srcAddr); - if (!strdAddr) - return 0; - *storedAddr = Addrint2Ptr(strdAddr); - // Set 'executable' flag for original instructions in the new place - DWORD pageFlags = PAGE_EXECUTE_READWRITE; - if (!VirtualProtect(*storedAddr, MAX_PROBE_SIZE, pageFlags, &pageFlags)) return 0; - // Copy original instructions to the new place - memcpy(*storedAddr, codePtr, opcodesNumber); - // Set jump to the code after replacement - offset = srcAddr - strdAddr - SIZE_OF_RELJUMP; - offset32 = (UINT)((offset & 0xFFFFFFFF)); - *((UCHAR*)*storedAddr+opcodesNumber) = 0xE9; - memcpy(((UCHAR*)*storedAddr+opcodesNumber+1), &offset32, sizeof(offset32)); - }else{ - // No matches found just do not store original calls - *storedAddr = NULL; - } + if ( storedAddr ){ // If requested, store original function code + bytesToMove = strlen(pattern)/2-1; // The last byte matching the pattern must not be copied + __TBB_ASSERT_RELEASE( bytesToMove >= SIZE_OF_RELJUMP, "Incorrect bytecode pattern?" ); + UINT_PTR trampAddr = memProvider.GetLocation(srcAddr); + if (!trampAddr) + return 0; + *storedAddr = Addrint2Ptr(trampAddr); + // Set 'executable' flag for original instructions in the new place + DWORD pageFlags = PAGE_EXECUTE_READWRITE; + if (!VirtualProtect(*storedAddr, MAX_PROBE_SIZE, pageFlags, &pageFlags)) return 0; + // Copy original instructions to the new place + memcpy(*storedAddr, codePtr, bytesToMove); + offset = srcAddr - trampAddr; + offset32 = (UINT)(offset & 0xFFFFFFFF); + CorrectOffset( trampAddr, pattern, offset32 ); + // Set jump to the code after replacement + offset32 -= SIZE_OF_RELJUMP; + *(UCHAR*)(trampAddr+bytesToMove) = 0xE9; + memcpy((UCHAR*)(trampAddr+bytesToMove+1), &offset32, sizeof(offset32)); } // The following will work correctly even if srcAddr>tgtAddr, as long as @@ -285,7 +358,7 @@ static DWORD InsertTrampoline32(void *inpAddr, void *targetAddr, const char ** o memcpy(codePtr+1, &offset32, sizeof(offset32)); // Fill the rest with NOPs to correctly see disassembler of old code in debugger. - for( unsigned i=SIZE_OF_RELJUMP; i<opcodesNumber; i++ ){ + for( unsigned i=SIZE_OF_RELJUMP; i<bytesToMove; i++ ){ *(codePtr+i) = 0x90; } @@ -297,9 +370,9 @@ static DWORD InsertTrampoline32(void *inpAddr, void *targetAddr, const char ** o // 2 Put jump RIP relative indirect through the address in the close page // 3 Put the absolute address of the target in the allocated location // RETURN: the size of the trampoline or 0 on failure -static DWORD InsertTrampoline64(void *inpAddr, void *targetAddr, const char ** opcodes, void** storedAddr) +static DWORD InsertTrampoline64(void *inpAddr, void *targetAddr, const char* pattern, void** storedAddr) { - UINT opcodesNumber = SIZE_OF_INDJUMP; + size_t bytesToMove = SIZE_OF_INDJUMP; UINT_PTR srcAddr = Ptr2Addrint(inpAddr); UINT_PTR tgtAddr = Ptr2Addrint(targetAddr); @@ -317,39 +390,36 @@ static DWORD InsertTrampoline64(void *inpAddr, void *targetAddr, const char ** o UINT_PTR *locPtr = (UINT_PTR *)Addrint2Ptr(location); *locPtr = tgtAddr; - // If requested, store original function code - if( storedAddr ){ - opcodesNumber = CheckOpcodes( opcodes, inpAddr ); - if( opcodesNumber >= SIZE_OF_INDJUMP ){ - UINT_PTR strdAddr = memProvider.GetLocation(srcAddr); - if (!strdAddr) - return 0; - *storedAddr = Addrint2Ptr(strdAddr); - // Set 'executable' flag for original instructions in the new place - DWORD pageFlags = PAGE_EXECUTE_READWRITE; - if (!VirtualProtect(*storedAddr, MAX_PROBE_SIZE, pageFlags, &pageFlags)) return 0; - // Copy original instructions to the new place - memcpy(*storedAddr, codePtr, opcodesNumber); - // Set jump to the code after replacement. It is within the distance of relative jump! - offset = srcAddr - strdAddr - SIZE_OF_RELJUMP; - offset32 = (UINT)((offset & 0xFFFFFFFF)); - *((UCHAR*)*storedAddr+opcodesNumber) = 0xE9; - memcpy(((UCHAR*)*storedAddr+opcodesNumber+1), &offset32, sizeof(offset32)); - }else{ - // No matches found just do not store original calls - *storedAddr = NULL; - } + if ( storedAddr ){ // If requested, store original function code + bytesToMove = strlen(pattern)/2-1; // The last byte matching the pattern must not be copied + __TBB_ASSERT_RELEASE( bytesToMove >= SIZE_OF_INDJUMP, "Incorrect bytecode pattern?" ); + UINT_PTR trampAddr = memProvider.GetLocation(srcAddr); + if (!trampAddr) + return 0; + *storedAddr = Addrint2Ptr(trampAddr); + // Set 'executable' flag for original instructions in the new place + DWORD pageFlags = PAGE_EXECUTE_READWRITE; + if (!VirtualProtect(*storedAddr, MAX_PROBE_SIZE, pageFlags, &pageFlags)) return 0; + // Copy original instructions to the new place + memcpy(*storedAddr, codePtr, bytesToMove); + offset = srcAddr - trampAddr; + offset32 = (UINT)(offset & 0xFFFFFFFF); + CorrectOffset( trampAddr, pattern, offset32 ); + // Set jump to the code after replacement. It is within the distance of relative jump! + offset32 -= SIZE_OF_RELJUMP; + *(UCHAR*)(trampAddr+bytesToMove) = 0xE9; + memcpy((UCHAR*)(trampAddr+bytesToMove+1), &offset32, sizeof(offset32)); } // Fill the buffer - offset = location - srcAddr - SIZE_OF_INDJUMP; - offset32 = (UINT)(offset & 0xFFFFFFFF); + offset = location - srcAddr - SIZE_OF_INDJUMP; + offset32 = (UINT)(offset & 0xFFFFFFFF); *(codePtr) = 0xFF; *(codePtr+1) = 0x25; memcpy(codePtr+2, &offset32, sizeof(offset32)); // Fill the rest with NOPs to correctly see disassembler of old code in debugger. - for( unsigned i=SIZE_OF_INDJUMP; i<opcodesNumber; i++ ){ + for( unsigned i=SIZE_OF_INDJUMP; i<bytesToMove; i++ ){ *(codePtr+i) = 0x90; } @@ -369,9 +439,27 @@ static bool InsertTrampoline(void *inpAddr, void *targetAddr, const char ** opco DWORD origProt = 0; if (!VirtualProtect(inpAddr, MAX_PROBE_SIZE, PAGE_EXECUTE_WRITECOPY, &origProt)) return FALSE; - probeSize = InsertTrampoline32(inpAddr, targetAddr, opcodes, origFunc); + + const char* pattern = nullptr; + if ( origFunc ){ // Need to store original function code + UCHAR * const codePtr = (UCHAR *)inpAddr; + if ( *codePtr == 0xE9 ){ // JMP relative instruction + // For the special case when a system function consists of a single near jump, + // instead of moving it somewhere we use the target of the jump as the original function. + unsigned offsetInJmp = *(unsigned*)(codePtr + 1); + *origFunc = (void*)(Ptr2Addrint(inpAddr) + offsetInJmp + SIZE_OF_RELJUMP); + origFunc = nullptr; // now it must be ignored by InsertTrampoline32/64 + } else { + // find the right opcode pattern + UINT opcodeIdx = CheckOpcodes( opcodes, inpAddr, /*abortOnError=*/true ); + __TBB_ASSERT( opcodeIdx > 0, "abortOnError ignored in CheckOpcodes?" ); + pattern = opcodes[opcodeIdx-1]; // -1 compensates for +1 in CheckOpcodes + } + } + + probeSize = InsertTrampoline32(inpAddr, targetAddr, pattern, origFunc); if (!probeSize) - probeSize = InsertTrampoline64(inpAddr, targetAddr, opcodes, origFunc); + probeSize = InsertTrampoline64(inpAddr, targetAddr, pattern, origFunc); // Restore original protection VirtualProtect(inpAddr, MAX_PROBE_SIZE, origProt, &origProt); @@ -467,4 +555,28 @@ FRR_TYPE ReplaceFunctionW(const wchar_t *dllName, const char *funcName, FUNCPTR return FRR_OK; } +bool IsPrologueKnown(const char* dllName, const char *funcName, const char **opcodes, HMODULE module) +{ + FARPROC inpFunc = GetProcAddress(module, funcName); + FunctionInfo functionInfo = { funcName, dllName }; + + if (!inpFunc) { + Log::record(functionInfo, "unknown", /*status*/ false); + return false; + } + + return CheckOpcodes( opcodes, (void*)inpFunc, /*abortOnError=*/false, &functionInfo) != 0; +} + +// Public Windows API +extern "C" __declspec(dllexport) int TBB_malloc_replacement_log(char *** function_replacement_log_ptr) +{ + if (function_replacement_log_ptr != nullptr) { + *function_replacement_log_ptr = Log::records; + } + + // If we have no logs -> return false status + return Log::replacement_status && Log::records[0] != nullptr ? 0 : -1; +} + #endif /* !__TBB_WIN8UI_SUPPORT && defined(_WIN32) */ diff --git a/src/tbb/src/tbbmalloc/tbb_function_replacement.h b/src/tbb/src/tbbmalloc_proxy/function_replacement.h similarity index 53% rename from src/tbb/src/tbbmalloc/tbb_function_replacement.h rename to src/tbb/src/tbbmalloc_proxy/function_replacement.h index 34f8556fe..a3ec138a6 100644 --- a/src/tbb/src/tbbmalloc/tbb_function_replacement.h +++ b/src/tbb/src/tbbmalloc_proxy/function_replacement.h @@ -1,21 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #ifndef __TBB_function_replacement_H @@ -42,8 +38,10 @@ typedef void (*FUNCPTR)(); #define ReplaceFunction ReplaceFunctionW #endif //UNICODE -FRR_TYPE ReplaceFunctionA(const char *dllName, const char *funcName, FUNCPTR newFunc, const char ** opcodes, FUNCPTR* origFunc=NULL); -FRR_TYPE ReplaceFunctionW(const wchar_t *dllName, const char *funcName, FUNCPTR newFunc, const char ** opcodes, FUNCPTR* origFunc=NULL); +FRR_TYPE ReplaceFunctionA(const char *dllName, const char *funcName, FUNCPTR newFunc, const char ** opcodes, FUNCPTR* origFunc=nullptr); +FRR_TYPE ReplaceFunctionW(const wchar_t *dllName, const char *funcName, FUNCPTR newFunc, const char ** opcodes, FUNCPTR* origFunc=nullptr); + +bool IsPrologueKnown(const char* dllName, const char *funcName, const char **opcodes, HMODULE module); // Utilities to convert between ADDRESS and LPVOID union Int2Ptr { @@ -54,7 +52,7 @@ union Int2Ptr { inline UINT_PTR Ptr2Addrint(LPVOID ptr); inline LPVOID Addrint2Ptr(UINT_PTR ptr); -// Use this value as the maximum size the trampoline region +// The size of a trampoline region const unsigned MAX_PROBE_SIZE = 32; // The size of a jump relative instruction "e9 00 00 00 00" @@ -66,6 +64,10 @@ const unsigned SIZE_OF_INDJUMP = 6; // The size of address we put in the location (in Intel64) const unsigned SIZE_OF_ADDRESS = 8; +// The size limit (in bytes) for an opcode pattern to fit into a trampoline +// There should be enough space left for a relative jump; +1 is for the extra pattern byte. +const unsigned MAX_PATTERN_SIZE = MAX_PROBE_SIZE - SIZE_OF_RELJUMP + 1; + // The max distance covered in 32 bits: 2^31 - 1 - C // where C should not be smaller than the size of a probe. // The latter is important to correctly handle "backward" jumps. diff --git a/src/tbb/src/tbbmalloc_proxy/proxy.cpp b/src/tbb/src/tbbmalloc_proxy/proxy.cpp new file mode 100644 index 000000000..9dd17d91e --- /dev/null +++ b/src/tbb/src/tbbmalloc_proxy/proxy.cpp @@ -0,0 +1,796 @@ +/* + Copyright (c) 2005-2024 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#if __unix__ && !__ANDROID__ +// include <bits/c++config.h> indirectly so that <cstdlib> is not included +#include <cstddef> +// include <features.h> indirectly so that <stdlib.h> is not included +#include <unistd.h> +// Working around compiler issue with Anaconda's gcc 7.3 compiler package. +// New gcc ported for old libc may provide their inline implementation +// of aligned_alloc as required by new C++ standard, this makes it hard to +// redefine aligned_alloc here. However, running on systems with new libc +// version, it still needs it to be redefined, thus tricking system headers +#if defined(__GLIBC_PREREQ) +#if !__GLIBC_PREREQ(2, 16) && _GLIBCXX_HAVE_ALIGNED_ALLOC +// tell <cstdlib> that there is no aligned_alloc +#undef _GLIBCXX_HAVE_ALIGNED_ALLOC +// trick <stdlib.h> to define another symbol instead +#define aligned_alloc __hidden_redefined_aligned_alloc +// Fix the state and undefine the trick +#include <cstdlib> +#undef aligned_alloc +#endif // !__GLIBC_PREREQ(2, 16) && _GLIBCXX_HAVE_ALIGNED_ALLOC +#endif // defined(__GLIBC_PREREQ) +#include <cstdlib> +#endif // __unix__ && !__ANDROID__ + +#include "proxy.h" + +#include "oneapi/tbb/detail/_config.h" +#include "oneapi/tbb/scalable_allocator.h" +#include "../tbb/environment.h" + +#if !defined(__EXCEPTIONS) && !defined(_CPPUNWIND) && !defined(__SUNPRO_CC) + #if TBB_USE_EXCEPTIONS + #error Compilation settings do not support exception handling. Please do not set TBB_USE_EXCEPTIONS macro or set it to 0. + #elif !defined(TBB_USE_EXCEPTIONS) + #define TBB_USE_EXCEPTIONS 0 + #endif +#elif !defined(TBB_USE_EXCEPTIONS) + #define TBB_USE_EXCEPTIONS 1 +#endif + +#if MALLOC_UNIXLIKE_OVERLOAD_ENABLED || _WIN32 && !__TBB_WIN8UI_SUPPORT +/*** internal global operator new implementation (Linux, Windows) ***/ +#include <new> + +// Synchronization primitives to protect original library pointers and new_handler +#include "../tbbmalloc/Synchronize.h" +// Use MallocMutex implementation +typedef MallocMutex ProxyMutex; + +// Adds aliasing and copy attributes to function if available +#if defined(__has_attribute) + #if __has_attribute(__copy__) + #define __TBB_ALIAS_ATTR_COPY(name) __attribute__((alias (#name), __copy__(name))) + #endif +#endif + +#ifndef __TBB_ALIAS_ATTR_COPY + #define __TBB_ALIAS_ATTR_COPY(name) __attribute__((alias (#name))) +#endif + +// In case there is no std::get_new_handler function +// which provides synchronized access to std::new_handler +#if !__TBB_CPP11_GET_NEW_HANDLER_PRESENT +static ProxyMutex new_lock; +#endif + +static inline void* InternalOperatorNew(size_t sz) { + void* res = scalable_malloc(sz); +#if TBB_USE_EXCEPTIONS + while (!res) { + std::new_handler handler; +#if __TBB_CPP11_GET_NEW_HANDLER_PRESENT + handler = std::get_new_handler(); +#else + { + ProxyMutex::scoped_lock lock(new_lock); + handler = std::set_new_handler(0); + std::set_new_handler(handler); + } +#endif + if (handler) { + (*handler)(); + } else { + throw std::bad_alloc(); + } + res = scalable_malloc(sz); +} +#endif /* TBB_USE_EXCEPTIONS */ + return res; +} +/*** end of internal global operator new implementation ***/ +#endif // MALLOC_UNIXLIKE_OVERLOAD_ENABLED || _WIN32 && !__TBB_WIN8UI_SUPPORT + +#if MALLOC_UNIXLIKE_OVERLOAD_ENABLED || MALLOC_ZONE_OVERLOAD_ENABLED + +#ifndef __THROW +#define __THROW +#endif + +/*** service functions and variables ***/ +#include <string.h> // for memset +#include <unistd.h> // for sysconf + +static long memoryPageSize; + +static inline void initPageSize() +{ + memoryPageSize = sysconf(_SC_PAGESIZE); +} + +#if MALLOC_UNIXLIKE_OVERLOAD_ENABLED +#include <dlfcn.h> +#include <malloc.h> // mallinfo + +/* __TBB_malloc_proxy used as a weak symbol by libtbbmalloc for: + 1) detection that the proxy library is loaded + 2) check that dlsym("malloc") found something different from our replacement malloc +*/ + +extern "C" void *__TBB_malloc_proxy(size_t) __TBB_ALIAS_ATTR_COPY(malloc); + +static void *orig_msize; + +#elif MALLOC_ZONE_OVERLOAD_ENABLED + +#include "proxy_overload_osx.h" + +#endif // MALLOC_ZONE_OVERLOAD_ENABLED + +// Original (i.e., replaced) functions, +// they are never changed for MALLOC_ZONE_OVERLOAD_ENABLED. +static void *orig_free, + *orig_realloc; + +#if MALLOC_UNIXLIKE_OVERLOAD_ENABLED +#define ZONE_ARG +#define PREFIX(name) name + +static void *orig_libc_free, + *orig_libc_realloc; + +// We already tried to find ptr to original functions. +static std::atomic<bool> origFuncSearched{false}; + +inline void InitOrigPointers() +{ + // race is OK here, as different threads found same functions + if (!origFuncSearched.load(std::memory_order_acquire)) { + orig_free = dlsym(RTLD_NEXT, "free"); + orig_realloc = dlsym(RTLD_NEXT, "realloc"); + orig_msize = dlsym(RTLD_NEXT, "malloc_usable_size"); + orig_libc_free = dlsym(RTLD_NEXT, "__libc_free"); + orig_libc_realloc = dlsym(RTLD_NEXT, "__libc_realloc"); + + origFuncSearched.store(true, std::memory_order_release); + } +} + +/*** replacements for malloc and the family ***/ +extern "C" { +#elif MALLOC_ZONE_OVERLOAD_ENABLED + +// each impl_* function has such 1st argument, it's unused +#define ZONE_ARG struct _malloc_zone_t *, +#define PREFIX(name) impl_##name +// not interested in original functions for zone overload +inline void InitOrigPointers() {} + +#endif // MALLOC_UNIXLIKE_OVERLOAD_ENABLED and MALLOC_ZONE_OVERLOAD_ENABLED + +void *PREFIX(malloc)(ZONE_ARG size_t size) __THROW +{ + return scalable_malloc(size); +} + +void *PREFIX(calloc)(ZONE_ARG size_t num, size_t size) __THROW +{ + return scalable_calloc(num, size); +} + +void PREFIX(free)(ZONE_ARG void *object) __THROW +{ + InitOrigPointers(); + __TBB_malloc_safer_free(object, (void (*)(void*))orig_free); +} + +void *PREFIX(realloc)(ZONE_ARG void* ptr, size_t sz) __THROW +{ + InitOrigPointers(); + return __TBB_malloc_safer_realloc(ptr, sz, orig_realloc); +} + +/* The older *NIX interface for aligned allocations; + it's formally substituted by posix_memalign and deprecated, + so we do not expect it to cause cyclic dependency with C RTL. */ +void *PREFIX(memalign)(ZONE_ARG size_t alignment, size_t size) __THROW +{ + return scalable_aligned_malloc(size, alignment); +} + +/* valloc allocates memory aligned on a page boundary */ +void *PREFIX(valloc)(ZONE_ARG size_t size) __THROW +{ + if (! memoryPageSize) initPageSize(); + + return scalable_aligned_malloc(size, memoryPageSize); +} + +#undef ZONE_ARG +#undef PREFIX + +#if MALLOC_UNIXLIKE_OVERLOAD_ENABLED + +// match prototype from system headers +#if __ANDROID__ +size_t malloc_usable_size(const void *ptr) __THROW +#else +size_t malloc_usable_size(void *ptr) __THROW +#endif +{ + InitOrigPointers(); + return __TBB_malloc_safer_msize(const_cast<void*>(ptr), (size_t (*)(void*))orig_msize); +} + +int posix_memalign(void **memptr, size_t alignment, size_t size) __THROW +{ + return scalable_posix_memalign(memptr, alignment, size); +} + +/* pvalloc allocates smallest set of complete pages which can hold + the requested number of bytes. Result is aligned on page boundary. */ +void *pvalloc(size_t size) __THROW +{ + if (! memoryPageSize) initPageSize(); + // align size up to the page size, + // pvalloc(0) returns 1 page, see man libmpatrol + size = size? ((size-1) | (memoryPageSize-1)) + 1 : memoryPageSize; + + return scalable_aligned_malloc(size, memoryPageSize); +} + +int mallopt(int /*param*/, int /*value*/) __THROW +{ + return 1; +} + +#if defined(__GLIBC__) || defined(__ANDROID__) +struct mallinfo mallinfo() __THROW +{ + struct mallinfo m; + memset(&m, 0, sizeof(struct mallinfo)); + + return m; +} +#endif + +#if __ANDROID__ +// Android doesn't have malloc_usable_size, provide it to be compatible +// with Linux, in addition overload dlmalloc_usable_size() that presented +// under Android. +size_t dlmalloc_usable_size(const void *ptr) __TBB_ALIAS_ATTR_COPY(malloc_usable_size); +#else // __ANDROID__ +// TODO: consider using __typeof__ to guarantee the correct declaration types +// C11 function, supported starting GLIBC 2.16 +void *aligned_alloc(size_t alignment, size_t size) __TBB_ALIAS_ATTR_COPY(memalign); +// Those non-standard functions are exported by GLIBC, and might be used +// in conjunction with standard malloc/free, so we must overload them. +// Bionic doesn't have them. Not removing from the linker scripts, +// as absent entry points are ignored by the linker. + +void *__libc_malloc(size_t size) __TBB_ALIAS_ATTR_COPY(malloc); +void *__libc_calloc(size_t num, size_t size) __TBB_ALIAS_ATTR_COPY(calloc); +void *__libc_memalign(size_t alignment, size_t size) __TBB_ALIAS_ATTR_COPY(memalign); +void *__libc_pvalloc(size_t size) __TBB_ALIAS_ATTR_COPY(pvalloc); +void *__libc_valloc(size_t size) __TBB_ALIAS_ATTR_COPY(valloc); + +// call original __libc_* to support naive replacement of free via __libc_free etc +void __libc_free(void *ptr) +{ + InitOrigPointers(); + __TBB_malloc_safer_free(ptr, (void (*)(void*))orig_libc_free); +} + +void *__libc_realloc(void *ptr, size_t size) +{ + InitOrigPointers(); + return __TBB_malloc_safer_realloc(ptr, size, orig_libc_realloc); +} +#endif // !__ANDROID__ + +} /* extern "C" */ + +/*** replacements for global operators new and delete ***/ + +void* operator new(size_t sz) { + return InternalOperatorNew(sz); +} +void* operator new[](size_t sz) { + return InternalOperatorNew(sz); +} +void operator delete(void* ptr) noexcept { + InitOrigPointers(); + __TBB_malloc_safer_free(ptr, (void (*)(void*))orig_free); +} +void operator delete[](void* ptr) noexcept { + InitOrigPointers(); + __TBB_malloc_safer_free(ptr, (void (*)(void*))orig_free); +} +void* operator new(size_t sz, const std::nothrow_t&) noexcept { + return scalable_malloc(sz); +} +void* operator new[](std::size_t sz, const std::nothrow_t&) noexcept { + return scalable_malloc(sz); +} +void operator delete(void* ptr, const std::nothrow_t&) noexcept { + InitOrigPointers(); + __TBB_malloc_safer_free(ptr, (void (*)(void*))orig_free); +} +void operator delete[](void* ptr, const std::nothrow_t&) noexcept { + InitOrigPointers(); + __TBB_malloc_safer_free(ptr, (void (*)(void*))orig_free); +} + +#endif /* MALLOC_UNIXLIKE_OVERLOAD_ENABLED */ +#endif /* MALLOC_UNIXLIKE_OVERLOAD_ENABLED || MALLOC_ZONE_OVERLOAD_ENABLED */ + +#ifdef _WIN32 +#include <windows.h> + +#if !__TBB_WIN8UI_SUPPORT + +#include <stdio.h> + +#include "function_replacement.h" + +template<typename T, size_t N> // generic function to find length of array +inline size_t arrayLength(const T(&)[N]) { + return N; +} + +void __TBB_malloc_safer_delete( void *ptr) +{ + __TBB_malloc_safer_free( ptr, nullptr ); +} + +void* safer_aligned_malloc( size_t size, size_t alignment ) +{ + // workaround for "is power of 2 pow N" bug that accepts zeros + return scalable_aligned_malloc( size, alignment>sizeof(size_t*)?alignment:sizeof(size_t*) ); +} + +// we do not support _expand(); +void* safer_expand( void *, size_t ) +{ + return nullptr; +} + +#define __TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(CRTLIB) \ +void (*orig_free_##CRTLIB)(void*); \ +void __TBB_malloc_safer_free_##CRTLIB(void *ptr) \ +{ \ + __TBB_malloc_safer_free( ptr, orig_free_##CRTLIB ); \ +} \ + \ +void (*orig__aligned_free_##CRTLIB)(void*); \ +void __TBB_malloc_safer__aligned_free_##CRTLIB(void *ptr) \ +{ \ + __TBB_malloc_safer_free( ptr, orig__aligned_free_##CRTLIB ); \ +} \ + \ +size_t (*orig__msize_##CRTLIB)(void*); \ +size_t __TBB_malloc_safer__msize_##CRTLIB(void *ptr) \ +{ \ + return __TBB_malloc_safer_msize( ptr, orig__msize_##CRTLIB ); \ +} \ + \ +size_t (*orig__aligned_msize_##CRTLIB)(void*, size_t, size_t); \ +size_t __TBB_malloc_safer__aligned_msize_##CRTLIB( void *ptr, size_t alignment, size_t offset) \ +{ \ + return __TBB_malloc_safer_aligned_msize( ptr, alignment, offset, orig__aligned_msize_##CRTLIB ); \ +} \ + \ +void* __TBB_malloc_safer_realloc_##CRTLIB( void *ptr, size_t size ) \ +{ \ + orig_ptrs func_ptrs = {orig_free_##CRTLIB, orig__msize_##CRTLIB}; \ + return __TBB_malloc_safer_realloc( ptr, size, &func_ptrs ); \ +} \ + \ +void* __TBB_malloc_safer__aligned_realloc_##CRTLIB( void *ptr, size_t size, size_t alignment ) \ +{ \ + orig_aligned_ptrs func_ptrs = {orig__aligned_free_##CRTLIB, orig__aligned_msize_##CRTLIB}; \ + return __TBB_malloc_safer_aligned_realloc( ptr, size, alignment, &func_ptrs ); \ +} + +// Only for ucrtbase: substitution for _o_free +void (*orig__o_free)(void*); +void __TBB_malloc__o_free(void *ptr) +{ + __TBB_malloc_safer_free( ptr, orig__o_free ); +} +// Only for ucrtbase: substitution for _free_base +void(*orig__free_base)(void*); +void __TBB_malloc__free_base(void *ptr) +{ + __TBB_malloc_safer_free(ptr, orig__free_base); +} + +// Size limit is MAX_PATTERN_SIZE (28) byte codes / 56 symbols per line. +// * can be used to match any digit in byte codes. +// # followed by several * indicate a relative address that needs to be corrected. +// Purpose of the pattern is to mark an instruction bound; it should consist of several +// full instructions plus one extra byte code. It's not required for the patterns +// to be unique (i.e., it's OK to have same pattern for unrelated functions). +// TODO: use hot patch prologues if exist +const char* known_bytecodes[] = { +#if _WIN64 +// "========================================================" - 56 symbols + "E9********CCCC", // multiple - jmp(0xE9) with address followed by empty space (0xCC - INT 3) + "4883EC284885C974", // release free() + "4883EC284885C975", // release _msize() + "4885C974375348", // release free() 8.0.50727.42, 10.0 + "C7442410000000008B", // release free() ucrtbase.dll 10.0.14393.33 + "48895C24085748", // release _aligned_msize() ucrtbase.dll 10.0.14393.33 + "48894C24084883EC28BA", // debug prologue + "4C894424184889542410", // debug _aligned_msize() 10.0 + "48894C24084883EC2848", // debug _aligned_free 10.0 + "488BD1488D0D#*******E9", // _o_free(), ucrtbase.dll + #if __TBB_OVERLOAD_OLD_MSVCR + "48895C2408574883EC3049", // release _aligned_msize 9.0 + "4883EC384885C975", // release _msize() 9.0 + "4C8BC1488B0DA6E4040033", // an old win64 SDK + #endif +#else // _WIN32 +// "========================================================" - 56 symbols + "8BFF558BEC8B", // multiple + "8BFF558BEC83", // release free() & _msize() 10.0.40219.325, _msize() ucrtbase.dll + "8BFF558BECFF", // release _aligned_msize ucrtbase.dll + "8BFF558BEC51", // release free() & _msize() ucrtbase.dll 10.0.14393.33 + "558BEC8B450885C074", // release _aligned_free 11.0 + "558BEC837D08000F", // release _msize() 11.0.51106.1 + "558BEC837D08007419FF", // release free() 11.0.50727.1 + "558BEC8B450885C075", // release _aligned_msize() 11.0.50727.1 + "558BEC6A018B", // debug free() & _msize() 11.0 + "558BEC8B451050", // debug _aligned_msize() 11.0 + "558BEC8B450850", // debug _aligned_free 11.0 + "8BFF558BEC6A", // debug free() & _msize() 10.0.40219.325 + #if __TBB_OVERLOAD_OLD_MSVCR + "6A1868********E8", // release free() 8.0.50727.4053, 9.0 + "6A1C68********E8", // release _msize() 8.0.50727.4053, 9.0 + #endif +#endif // _WIN64/_WIN32 + nullptr + }; + +#define __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL_ENTRY(CRT_VER,function_name,dbgsuffix) \ + ReplaceFunctionWithStore( #CRT_VER #dbgsuffix ".dll", #function_name, \ + (FUNCPTR)__TBB_malloc_safer_##function_name##_##CRT_VER##dbgsuffix, \ + known_bytecodes, (FUNCPTR*)&orig_##function_name##_##CRT_VER##dbgsuffix ); + +#define __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL_ENTRY_NO_FALLBACK(CRT_VER,function_name,dbgsuffix) \ + ReplaceFunctionWithStore( #CRT_VER #dbgsuffix ".dll", #function_name, \ + (FUNCPTR)__TBB_malloc_safer_##function_name##_##CRT_VER##dbgsuffix, 0, nullptr ); + +#define __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL_ENTRY_REDIRECT(CRT_VER,function_name,dest_func,dbgsuffix) \ + ReplaceFunctionWithStore( #CRT_VER #dbgsuffix ".dll", #function_name, \ + (FUNCPTR)__TBB_malloc_safer_##dest_func##_##CRT_VER##dbgsuffix, 0, nullptr ); + +#define __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL_IMPL(CRT_VER,dbgsuffix) \ + if (BytecodesAreKnown(#CRT_VER #dbgsuffix ".dll")) { \ + __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL_ENTRY(CRT_VER,free,dbgsuffix) \ + __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL_ENTRY(CRT_VER,_msize,dbgsuffix) \ + __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL_ENTRY_NO_FALLBACK(CRT_VER,realloc,dbgsuffix) \ + __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL_ENTRY(CRT_VER,_aligned_free,dbgsuffix) \ + __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL_ENTRY(CRT_VER,_aligned_msize,dbgsuffix) \ + __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL_ENTRY_NO_FALLBACK(CRT_VER,_aligned_realloc,dbgsuffix) \ + } else \ + SkipReplacement(#CRT_VER #dbgsuffix ".dll"); + +#define __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL_RELEASE(CRT_VER) __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL_IMPL(CRT_VER,) +#define __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL_DEBUG(CRT_VER) __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL_IMPL(CRT_VER,d) + +#define __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(CRT_VER) \ + __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL_RELEASE(CRT_VER) \ + __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL_DEBUG(CRT_VER) + +#if __TBB_OVERLOAD_OLD_MSVCR +__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr70d); +__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr70); +__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr71d); +__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr71); +__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr80d); +__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr80); +__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr90d); +__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr90); +#endif +__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr100d); +__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr100); +__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr110d); +__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr110); +__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr120d); +__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr120); +__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(ucrtbase); + +/*** replacements for global operators new and delete ***/ + +#if _MSC_VER && !defined(__INTEL_COMPILER) +// #pragma warning( push ) +// #pragma warning( disable : 4290 ) +#endif + +/*** operator new overloads internals (Linux, Windows) ***/ + +void* operator_new(size_t sz) { + return InternalOperatorNew(sz); +} +void* operator_new_arr(size_t sz) { + return InternalOperatorNew(sz); +} +void operator_delete(void* ptr) noexcept { + __TBB_malloc_safer_delete(ptr); +} +#if _MSC_VER && !defined(__INTEL_COMPILER) +// #pragma warning( pop ) +#endif + +void operator_delete_arr(void* ptr) noexcept { + __TBB_malloc_safer_delete(ptr); +} +void* operator_new_t(size_t sz, const std::nothrow_t&) noexcept { + return scalable_malloc(sz); +} +void* operator_new_arr_t(std::size_t sz, const std::nothrow_t&) noexcept { + return scalable_malloc(sz); +} +void operator_delete_t(void* ptr, const std::nothrow_t&) noexcept { + __TBB_malloc_safer_delete(ptr); +} +void operator_delete_arr_t(void* ptr, const std::nothrow_t&) noexcept { + __TBB_malloc_safer_delete(ptr); +} + +struct Module { + const char *name; + bool doFuncReplacement; // do replacement in the DLL +}; + +Module modules_to_replace[] = { + {"msvcr100d.dll", true}, + {"msvcr100.dll", true}, + {"msvcr110d.dll", true}, + {"msvcr110.dll", true}, + {"msvcr120d.dll", true}, + {"msvcr120.dll", true}, + {"ucrtbase.dll", true}, +// "ucrtbased.dll" is not supported because of problems with _dbg functions +#if __TBB_OVERLOAD_OLD_MSVCR + {"msvcr90d.dll", true}, + {"msvcr90.dll", true}, + {"msvcr80d.dll", true}, + {"msvcr80.dll", true}, + {"msvcr70d.dll", true}, + {"msvcr70.dll", true}, + {"msvcr71d.dll", true}, + {"msvcr71.dll", true}, +#endif +#if __TBB_TODO + // TODO: Try enabling replacement for non-versioned system binaries below + {"msvcrtd.dll", true}, + {"msvcrt.dll", true}, +#endif + }; + +/* +We need to replace following functions: +malloc +calloc +_aligned_malloc +_expand (by dummy implementation) +??2@YAPAXI@Z operator new (ia32) +??_U@YAPAXI@Z void * operator new[] (size_t size) (ia32) +??3@YAXPAX@Z operator delete (ia32) +??_V@YAXPAX@Z operator delete[] (ia32) +??2@YAPEAX_K@Z void * operator new(unsigned __int64) (intel64) +??_V@YAXPEAX@Z void * operator new[](unsigned __int64) (intel64) +??3@YAXPEAX@Z operator delete (intel64) +??_V@YAXPEAX@Z operator delete[] (intel64) +??2@YAPAXIABUnothrow_t@std@@@Z void * operator new (size_t sz, const std::nothrow_t&) noexcept (optional) +??_U@YAPAXIABUnothrow_t@std@@@Z void * operator new[] (size_t sz, const std::nothrow_t&) noexcept (optional) + +and these functions have runtime-specific replacement: +realloc +free +_msize +_aligned_realloc +_aligned_free +_aligned_msize +*/ + +typedef struct FRData_t { + //char *_module; + const char *_func; + FUNCPTR _fptr; + FRR_ON_ERROR _on_error; +} FRDATA; + +FRDATA c_routines_to_replace[] = { + { "malloc", (FUNCPTR)scalable_malloc, FRR_FAIL }, + { "calloc", (FUNCPTR)scalable_calloc, FRR_FAIL }, + { "_aligned_malloc", (FUNCPTR)safer_aligned_malloc, FRR_FAIL }, + { "_expand", (FUNCPTR)safer_expand, FRR_IGNORE }, +}; + +FRDATA cxx_routines_to_replace[] = { +#if _WIN64 + { "??2@YAPEAX_K@Z", (FUNCPTR)operator_new, FRR_FAIL }, + { "??_U@YAPEAX_K@Z", (FUNCPTR)operator_new_arr, FRR_FAIL }, + { "??3@YAXPEAX@Z", (FUNCPTR)operator_delete, FRR_FAIL }, + { "??_V@YAXPEAX@Z", (FUNCPTR)operator_delete_arr, FRR_FAIL }, +#else + { "??2@YAPAXI@Z", (FUNCPTR)operator_new, FRR_FAIL }, + { "??_U@YAPAXI@Z", (FUNCPTR)operator_new_arr, FRR_FAIL }, + { "??3@YAXPAX@Z", (FUNCPTR)operator_delete, FRR_FAIL }, + { "??_V@YAXPAX@Z", (FUNCPTR)operator_delete_arr, FRR_FAIL }, +#endif + { "??2@YAPAXIABUnothrow_t@std@@@Z", (FUNCPTR)operator_new_t, FRR_IGNORE }, + { "??_U@YAPAXIABUnothrow_t@std@@@Z", (FUNCPTR)operator_new_arr_t, FRR_IGNORE } +}; + +#ifndef UNICODE +typedef char unicode_char_t; +#define WCHAR_SPEC "%s" +#else +typedef wchar_t unicode_char_t; +#define WCHAR_SPEC "%ls" +#endif + +// Check that we recognize bytecodes that should be replaced by trampolines. +// If some functions have unknown prologue patterns, replacement should not be done. +bool BytecodesAreKnown(const unicode_char_t *dllName) +{ + const char *funcName[] = {"free", "_msize", "_aligned_free", "_aligned_msize", 0}; + HMODULE module = GetModuleHandle(dllName); + + if (!module) + return false; + for (int i=0; funcName[i]; i++) + if (! IsPrologueKnown(dllName, funcName[i], known_bytecodes, module)) { + fprintf(stderr, "TBBmalloc: skip allocation functions replacement in " WCHAR_SPEC + ": unknown prologue for function " WCHAR_SPEC "\n", dllName, funcName[i]); + return false; + } + return true; +} + +void SkipReplacement(const unicode_char_t *dllName) +{ +#ifndef UNICODE + const char *dllStr = dllName; +#else + const size_t sz = 128; // all DLL name must fit + + char buffer[sz]; + size_t real_sz; + char *dllStr = buffer; + + errno_t ret = wcstombs_s(&real_sz, dllStr, sz, dllName, sz-1); + __TBB_ASSERT(!ret, "Dll name conversion failed"); +#endif + + for (size_t i=0; i<arrayLength(modules_to_replace); i++) + if (!strcmp(modules_to_replace[i].name, dllStr)) { + modules_to_replace[i].doFuncReplacement = false; + break; + } +} + +void ReplaceFunctionWithStore( const unicode_char_t *dllName, const char *funcName, FUNCPTR newFunc, const char ** opcodes, FUNCPTR* origFunc, FRR_ON_ERROR on_error = FRR_FAIL ) +{ + FRR_TYPE res = ReplaceFunction( dllName, funcName, newFunc, opcodes, origFunc ); + + if (res == FRR_OK || res == FRR_NODLL || (res == FRR_NOFUNC && on_error == FRR_IGNORE)) + return; + + fprintf(stderr, "Failed to %s function %s in module %s\n", + res==FRR_NOFUNC? "find" : "replace", funcName, dllName); + + // Unable to replace a required function + // Aborting because incomplete replacement of memory management functions + // may leave the program in an invalid state + abort(); +} + +void doMallocReplacement() +{ + // Replace functions and keep backup of original code (separate for each runtime) +#if __TBB_OVERLOAD_OLD_MSVCR + __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr70) + __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr71) + __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr80) + __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr90) +#endif + __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr100) + __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr110) + __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr120) + __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL_RELEASE(ucrtbase) + + // Replace functions without storing original code + for (size_t j = 0; j < arrayLength(modules_to_replace); j++) { + if (!modules_to_replace[j].doFuncReplacement) + continue; + for (size_t i = 0; i < arrayLength(c_routines_to_replace); i++) + { + ReplaceFunctionWithStore( modules_to_replace[j].name, c_routines_to_replace[i]._func, c_routines_to_replace[i]._fptr, nullptr, nullptr, c_routines_to_replace[i]._on_error ); + } + if ( strcmp(modules_to_replace[j].name, "ucrtbase.dll") == 0 ) { + HMODULE ucrtbase_handle = GetModuleHandle("ucrtbase.dll"); + if (!ucrtbase_handle) + continue; + // If _o_free function is present and patchable, redirect it to tbbmalloc as well + // This prevents issues with other _o_* functions which might allocate memory with malloc + if ( IsPrologueKnown("ucrtbase.dll", "_o_free", known_bytecodes, ucrtbase_handle)) { + ReplaceFunctionWithStore( "ucrtbase.dll", "_o_free", (FUNCPTR)__TBB_malloc__o_free, known_bytecodes, (FUNCPTR*)&orig__o_free, FRR_FAIL ); + } + // Similarly for _free_base + if (IsPrologueKnown("ucrtbase.dll", "_free_base", known_bytecodes, ucrtbase_handle)) { + ReplaceFunctionWithStore("ucrtbase.dll", "_free_base", (FUNCPTR)__TBB_malloc__free_base, known_bytecodes, (FUNCPTR*)&orig__free_base, FRR_FAIL); + } + // ucrtbase.dll does not export operator new/delete, so skip the rest of the loop. + continue; + } + + for (size_t i = 0; i < arrayLength(cxx_routines_to_replace); i++) + { +#if !_WIN64 + // in Microsoft* Visual Studio* 2012 and 2013 32-bit operator delete consists of 2 bytes only: short jump to free(ptr); + // replacement should be skipped for this particular case. + if ( ((strcmp(modules_to_replace[j].name, "msvcr110.dll") == 0) || (strcmp(modules_to_replace[j].name, "msvcr120.dll") == 0)) && (strcmp(cxx_routines_to_replace[i]._func, "??3@YAXPAX@Z") == 0) ) continue; + // in Microsoft* Visual Studio* 2013 32-bit operator delete[] consists of 2 bytes only: short jump to free(ptr); + // replacement should be skipped for this particular case. + if ( (strcmp(modules_to_replace[j].name, "msvcr120.dll") == 0) && (strcmp(cxx_routines_to_replace[i]._func, "??_V@YAXPAX@Z") == 0) ) continue; +#endif + ReplaceFunctionWithStore( modules_to_replace[j].name, cxx_routines_to_replace[i]._func, cxx_routines_to_replace[i]._fptr, nullptr, nullptr, cxx_routines_to_replace[i]._on_error ); + } + } +} + +#endif // !__TBB_WIN8UI_SUPPORT + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) + // Suppress warning for UWP build ('main' signature found without threading model) + // #pragma warning(push) + // #pragma warning(disable:4447) +#endif + +extern "C" BOOL WINAPI DllMain( HINSTANCE hInst, DWORD callReason, LPVOID reserved ) +{ + + if ( callReason==DLL_PROCESS_ATTACH && reserved && hInst ) { +#if !__TBB_WIN8UI_SUPPORT + if (!tbb::detail::r1::GetBoolEnvironmentVariable("TBB_MALLOC_DISABLE_REPLACEMENT")) + { + doMallocReplacement(); + } +#endif // !__TBB_WIN8UI_SUPPORT + } + + return TRUE; +} + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) + // #pragma warning(pop) +#endif + +// Just to make the linker happy and link the DLL to the application +extern "C" __declspec(dllexport) void __TBB_malloc_proxy() +{ + +} + +#endif //_WIN32 diff --git a/src/tbb/src/tbbmalloc_proxy/proxy.h b/src/tbb/src/tbbmalloc_proxy/proxy.h new file mode 100644 index 000000000..5f0133f9e --- /dev/null +++ b/src/tbb/src/tbbmalloc_proxy/proxy.h @@ -0,0 +1,55 @@ +/* + Copyright (c) 2005-2021 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef _TBB_malloc_proxy_H_ +#define _TBB_malloc_proxy_H_ + +#define MALLOC_UNIXLIKE_OVERLOAD_ENABLED __linux__ +#define MALLOC_ZONE_OVERLOAD_ENABLED __APPLE__ + +// MALLOC_UNIXLIKE_OVERLOAD_ENABLED depends on MALLOC_CHECK_RECURSION stuff +// TODO: limit MALLOC_CHECK_RECURSION to *_OVERLOAD_ENABLED only +#if __unix__ || __APPLE__ || MALLOC_UNIXLIKE_OVERLOAD_ENABLED +#define MALLOC_CHECK_RECURSION 1 +#endif + +#include "oneapi/tbb/detail/_config.h" +#include <stddef.h> + +extern "C" { + TBBMALLOC_EXPORT void __TBB_malloc_safer_free( void *ptr, void (*original_free)(void*)); + TBBMALLOC_EXPORT void * __TBB_malloc_safer_realloc( void *ptr, size_t, void* ); + TBBMALLOC_EXPORT void * __TBB_malloc_safer_aligned_realloc( void *ptr, size_t, size_t, void* ); + TBBMALLOC_EXPORT size_t __TBB_malloc_safer_msize( void *ptr, size_t (*orig_msize_crt80d)(void*)); + TBBMALLOC_EXPORT size_t __TBB_malloc_safer_aligned_msize( void *ptr, size_t, size_t, size_t (*orig_msize_crt80d)(void*,size_t,size_t)); + +#if MALLOC_ZONE_OVERLOAD_ENABLED + void __TBB_malloc_free_definite_size(void *object, size_t size); +#endif +} // extern "C" + +// Struct with original free() and _msize() pointers +struct orig_ptrs { + void (*free) (void*); + size_t (*msize)(void*); +}; + +struct orig_aligned_ptrs { + void (*aligned_free) (void*); + size_t (*aligned_msize)(void*,size_t,size_t); +}; + +#endif /* _TBB_malloc_proxy_H_ */ diff --git a/src/tbb/src/tbbmalloc/proxy_overload_osx.h b/src/tbb/src/tbbmalloc_proxy/proxy_overload_osx.h similarity index 73% rename from src/tbb/src/tbbmalloc/proxy_overload_osx.h rename to src/tbb/src/tbbmalloc_proxy/proxy_overload_osx.h index d9dd412cd..695829837 100644 --- a/src/tbb/src/tbbmalloc/proxy_overload_osx.h +++ b/src/tbb/src/tbbmalloc_proxy/proxy_overload_osx.h @@ -1,21 +1,17 @@ /* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. + Copyright (c) 2005-2022 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ // The original source for this code is @@ -50,6 +46,8 @@ #include <AvailabilityMacros.h> #include <malloc/malloc.h> +#include <mach/mach.h> +#include <stdlib.h> static kern_return_t enumerator(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t) @@ -93,7 +91,13 @@ static void impl_zone_discharge(malloc_zone_t *, void *) {} static void impl_zone_destroy(struct _malloc_zone_t *) {} /* note: impl_malloc_usable_size() is called for each free() call, so it must be fast */ -static size_t impl_malloc_usable_size(struct _malloc_zone_t *, const void *ptr); +static size_t impl_malloc_usable_size(struct _malloc_zone_t *, const void *ptr) +{ + // malloc_usable_size() is used by macOS* to recognize which memory manager + // allocated the address, so our wrapper must not redirect to the original function. + return __TBB_malloc_safer_msize(const_cast<void*>(ptr), nullptr); +} + static void *impl_malloc(struct _malloc_zone_t *, size_t size); static void *impl_calloc(struct _malloc_zone_t *, size_t num_items, size_t size); static void *impl_valloc(struct _malloc_zone_t *, size_t size); @@ -108,12 +112,12 @@ static void impl_free_definite_size(struct _malloc_zone_t*, void *ptr, size_t si } /* Empty out caches in the face of memory pressure. */ -static size_t impl_pressure_relief(struct _malloc_zone_t *, size_t goal) +static size_t impl_pressure_relief(struct _malloc_zone_t *, size_t /* goal */) { return 0; } -static malloc_zone_t *system_zone; +static malloc_zone_t *system_zone = nullptr; struct DoMallocReplacement { DoMallocReplacement() { @@ -151,11 +155,30 @@ struct DoMallocReplacement { // make sure that default purgeable zone is initialized malloc_default_purgeable_zone(); - // after unregistration of system zone, our zone became default + void* ptr = malloc(1); + // get all registered memory zones + unsigned zcount = 0; + malloc_zone_t** zone_array = nullptr; + kern_return_t errorcode = malloc_get_all_zones(mach_task_self(),nullptr,(vm_address_t**)&zone_array,&zcount); + if (!errorcode && zone_array && zcount>0) { + // find the zone that allocated ptr + for (unsigned i=0; i<zcount; ++i) { + malloc_zone_t* z = zone_array[i]; + if (z && z->size(z,ptr)>0) { // the right one is found + system_zone = z; + break; + } + } + } + free(ptr); + malloc_zone_register(&zone); - system_zone = malloc_default_zone(); - malloc_zone_unregister(system_zone); - malloc_zone_register(system_zone); + if (system_zone) { + // after unregistration of the system zone, the last registered (i.e. our) zone becomes the default + malloc_zone_unregister(system_zone); + // register the system zone back + malloc_zone_register(system_zone); + } } }; diff --git a/src/tbb/src/tbbmalloc_proxy/tbbmalloc_proxy.rc b/src/tbb/src/tbbmalloc_proxy/tbbmalloc_proxy.rc new file mode 100644 index 000000000..1884b119a --- /dev/null +++ b/src/tbb/src/tbbmalloc_proxy/tbbmalloc_proxy.rc @@ -0,0 +1,74 @@ +// Copyright (c) 2005-2024 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +///////////////////////////////////////////////////////////////////////////// +// +// Includes +// +#include <winresrc.h> +#include "../../include/oneapi/tbb/version.h" + +///////////////////////////////////////////////////////////////////////////// +// Neutral resources + +#ifdef _WIN32 +LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL +#pragma code_page(1252) +#endif //_WIN32 + +///////////////////////////////////////////////////////////////////////////// +// +// Version +// +#define TBB_VERNUMBERS TBB_VERSION_MAJOR,TBB_VERSION_MINOR,TBB_VERSION_PATCH +#define TBB_VERSION TBB_VERSION_STRING + +VS_VERSION_INFO VERSIONINFO + FILEVERSION TBB_VERNUMBERS + PRODUCTVERSION TBB_VERNUMBERS + FILEFLAGSMASK 0x17L +#ifdef _DEBUG + FILEFLAGS 0x1L +#else + FILEFLAGS 0x0L +#endif + FILEOS 0x40004L + FILETYPE 0x2L + FILESUBTYPE 0x0L +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "000004b0" + BEGIN + VALUE "CompanyName", "Intel Corporation\0" + VALUE "FileDescription", "oneAPI Threading Building Blocks (oneTBB) library\0" + VALUE "FileVersion", TBB_VERSION "\0" + VALUE "LegalCopyright", "Copyright 2005-2024 Intel Corporation. All Rights Reserved.\0" + VALUE "LegalTrademarks", "\0" +#ifndef TBB_USE_DEBUG + VALUE "OriginalFilename", "tbbmalloc_proxy.dll\0" +#else + VALUE "OriginalFilename", "tbbmalloc_proxy_debug.dll\0" +#endif + VALUE "ProductName", "oneAPI Threading Building Blocks (oneTBB)\0" + VALUE "ProductVersion", TBB_VERSION "\0" + VALUE "PrivateBuild", "\0" + VALUE "SpecialBuild", "\0" + END + END + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x0, 1200 + END +END diff --git a/src/tbb/src/tbbproxy/tbbproxy-windows.asm b/src/tbb/src/tbbproxy/tbbproxy-windows.asm deleted file mode 100644 index 890d6f9c3..000000000 --- a/src/tbb/src/tbbproxy/tbbproxy-windows.asm +++ /dev/null @@ -1,113 +0,0 @@ -; Copyright 2005-2014 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. Threading Building Blocks is free software; -; you can redistribute it and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. Threading Building Blocks is -; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the -; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -; See the GNU General Public License for more details. You should have received a copy of -; the GNU General Public License along with Threading Building Blocks; if not, write to the -; Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software library without -; restriction. Specifically, if other files instantiate templates or use macros or inline -; functions from this file, or you compile this file and link it with other files to produce -; an executable, this file does not by itself cause the resulting executable to be covered -; by the GNU General Public License. This exception does not however invalidate any other -; reasons why the executable file might be covered by the GNU General Public License. - -#include "tbb/tbb_config.h" - -// __TBB_STRING macro defined in "tbb_stddef.h". However, we cannot include "tbb_stddef.h" -// because it contains a lot of C/C++ definitions. So, we have to define __TBB_STRING here: -#define __TBB_STRING_AUX( x ) #x -#define __TBB_STRING( x ) __TBB_STRING_AUX( x ) - -// Eliminate difference between IA-32 and Intel 64: AWORD is a type of pointer, LANG is language -// specification for extern directive. -#ifdef ARCH_ia32 - #define AWORD dword - #define LANG c -#else - #define AWORD qword - #define LANG -#endif - -#ifdef ARCH_ia32 - // These directives are required for IA32 architecture only. - .686 - .model flat, syscall -#endif - -/* - Symbol names. -*/ - -// Note: masm for IA-32 does not like symbols defined as "name:" in data sections, -// so we have to define symbols with "name label type" directive instead. - -fname macro sym:req - align sizeof AWORD - Ln_&sym& label byte - byte "&sym&", 0 -endm - -.const // Symbol names are constants. -#define __TBB_SYMBOL( sym ) fname sym -#include __TBB_STRING( __TBB_LST ) - -/* - Symbol descriptors. -*/ - -extern LANG __tbb_internal_runtime_loader_stub : AWORD - -fsymbol macro sym:req - Ls_&sym& label AWORD - AWORD __tbb_internal_runtime_loader_stub - AWORD Ln_&sym& - dword sizeof AWORD - dword 1 -endm - -.data -align sizeof AWORD -public LANG __tbb_internal_runtime_loader_symbols -__tbb_internal_runtime_loader_symbols label AWORD -#define __TBB_SYMBOL( sym ) fsymbol sym -#include __TBB_STRING( __TBB_LST ) -AWORD 0, 0 // Terminator of the __tbb_internal_runtime_loader_symbols array. -dword 0, 0 - -/* - Generate functions. -*/ - -// Helper assembler macro to handle different naming conventions on IA-32 and Intel 64: -// IA-32: C++ names preserved, C names require leading underscore. -// Intel 64: All names preserved. -mangle macro name:req - #ifdef ARCH_ia32 - if @instr( 1, name, <?> ) - exitm @catstr( name ) - else - exitm @catstr( <_>, name ) - endif - #else - exitm @catstr( name ) - #endif -endm - -function macro sym:req - mangle( sym ) proc - jmp AWORD ptr Ls_&sym& - mangle( sym ) endp -endm - -.code -#define __TBB_SYMBOL( sym ) function sym -#include __TBB_STRING( __TBB_LST ) - -end - -// end of file // diff --git a/src/tbb/src/tbbproxy/tbbproxy.cpp b/src/tbb/src/tbbproxy/tbbproxy.cpp deleted file mode 100644 index 26932f881..000000000 --- a/src/tbb/src/tbbproxy/tbbproxy.cpp +++ /dev/null @@ -1,610 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" -#if !__TBB_WIN8UI_SUPPORT -#define TBB_PREVIEW_RUNTIME_LOADER 1 -#include "tbb/runtime_loader.h" -#include "tbb/tbb_stddef.h" - -// C standard headers. -#include <cctype> // isspace -#include <cstdarg> // va_list, etc. -#include <cstdio> // fprintf, stderr, etc. -#include <cstdlib> // malloc, free, abort. -#include <cstring> // strlen, etc. - -// C++ standard headers. -#include <typeinfo> - -// OS-specific includes. -#if _WIN32 || _WIN64 - #include <windows.h> - #define snprintf _snprintf - #undef max -#else - #include <dlfcn.h> // dlopen, dlsym, dlclose, dlerror. -#endif - -#if TBB_USE_ASSERT - // We cannot use __TBB_ASSERT as it is because it calls a function from tbb library which may - // be not yet loaded. Redefine __TBB_ASSERT not to call tbb functions. - #undef __TBB_ASSERT - #define __TBB_ASSERT( cond, msg ) { \ - if ( ! (cond) ) { \ - say( "%s:%d: Assertion failed: %s.", __FILE__, __LINE__, (msg) ); \ - } /* if */ \ - /* TODO: abort? */ \ - } -#endif - -// Declare here, define at the bottom. -extern "C" int __tbb_internal_runtime_loader_stub(); - -namespace tbb { - -namespace interface6 { - -namespace internal { - -namespace runtime_loader { - - -/* - ------------------------------------------------------------------------------------------------ - User interaction utilities. - ------------------------------------------------------------------------------------------------ -*/ - - -// Print message to stderr. Do not call it directly, use say() or tell() instead. -static void _say( char const * format, va_list args ) { - /* - On Linux Intel 64, vsnprintf() modifies args argument, so vsnprintf() crashes if it - is called for the second time with the same args. To prevent the crash, we have to - pass a fresh intact copy of args to vsnprintf() each time. - - On Windows, unfortunately, standard va_copy() macro is not available. However, it - seems vsnprintf() does not modify args argument. - */ - #if ! ( _WIN32 || _WIN64 ) - va_list _args; - __va_copy( _args, args ); // Make copy of args. - #define args _args // Substitute args with its copy, _args. - #endif - int len = vsnprintf( NULL, 0, format, args ); - #if ! ( _WIN32 || _WIN64 ) - #undef args // Remove substitution. - va_end( _args ); - #endif - char * buf = reinterpret_cast< char * >( malloc( len + 1 ) ); - if ( buf == NULL ) { - abort(); - } // if - vsnprintf( buf, len + 1, format, args ); - fprintf( stderr, "TBB: %s\n", buf ); - free( buf ); -} // _say - - -// Debug/test/troubleshooting printing controlled by TBB_VERSION environment variable. -// To enable printing, the variable must be set and not empty. -// Do not call it directly, use tell() instead. -static void _tell( char const * format, va_list args ) { - char const * var = getenv( "TBB_VERSION" ); - if ( var != NULL && var[ 0 ] != 0 ) { - _say( format, args ); - } // if -} // _tell - - -// Print message to stderr unconditionally. -static void say( char const * format, ... ) { - va_list args; - va_start( args, format ); - _say( format, args ); - va_end( args ); -} // say - - -// Debug/test/troubleshooting printing controlled by TBB_VERSION environment variable. -// To enable printing, the variable must be set and not empty. -static void tell( char const * format, ... ) { - va_list args; - va_start( args, format ); - _tell( format, args ); - va_end( args ); -} // tell - - -// Error reporting utility. Behavior depends on mode. -static tbb::runtime_loader::error_code error( tbb::runtime_loader::error_mode mode, tbb::runtime_loader::error_code err, char const * format, ... ) { - va_list args; - va_start( args, format ); - if ( mode == tbb::runtime_loader::em_abort ) { - // In em_abort mode error message printed unconditionally. - _say( format, args ); - } else { - // In other modes printing depends on TBB_VERSION environment variable. - _tell( format, args ); - } // if - va_end( args ); - switch ( mode ) { - case tbb::runtime_loader::em_abort : { - say( "Aborting..." ); - #if TBB_USE_DEBUG && ( _WIN32 || _WIN64 ) - DebugBreak(); - #endif - abort(); - } break; - case tbb::runtime_loader::em_throw : { - throw err; - } break; - case tbb::runtime_loader::em_status : { - // Do nothing. - } break; - } // switch - return err; -} // error - - -/* - ------------------------------------------------------------------------------------------------ - General-purpose string manipulation utilities. - ------------------------------------------------------------------------------------------------ -*/ - - -// Delete character ch from string str in-place. -static void strip( char * str, char ch ) { - int in = 0; // Input character index. - int out = 0; // Output character index. - for ( ; ; ) { - if ( str[ in ] != ch ) { - str[ out ] = str[ in ]; - ++ out; - } // if - if ( str[ in ] == 0 ) { - break; - } // if - ++ in; - } // forever -} // func strip - - -// Strip trailing whitespaces in-place. -static void trim( char * str ) { - size_t len = strlen( str ); - while ( len > 0 && isspace( str[ len - 1 ] ) ) { - -- len; - } // while - str[ len ] = 0; -} // func trim - - -#if _WIN32 || _WIN64 - // "When specifying a path, be sure to use backslashes (\), not forward slashes (/)." - // (see http://msdn.microsoft.com/en-us/library/ms886736.aspx). - const char proper_slash = '\\'; - inline char char_or_slash( char c ) { return c=='/'? '\\': c; } -#else - const char proper_slash = '/'; - inline char char_or_slash( char c ) { return c; } -#endif - -// Concatenate name of directory and name of file. -void cat_file( char const * dir, char const * file, char * buffer, size_t len ) { - size_t i = 0; - // Copy directory name - for( ; i<len && *dir; ++i, ++dir ) { - buffer[i] = char_or_slash(*dir); - } - // Append trailing slash if missed. - if( i>0 && i<len && buffer[i-1]!=proper_slash ) { - buffer[i++] = proper_slash; - } - // Copy file name - __TBB_ASSERT( char_or_slash(*file)!=proper_slash, "File name starts with a slash" ); - for( ; i<len && *file; ++i, ++file ) { - buffer[i] = *file; - } - // Append null terminator - buffer[ i<len? i: len-1 ] = '\0'; -} // cat_file - - -/* - ------------------------------------------------------------------------------------------------ - Windows implementation of dlopen, dlclose, dlsym, dlerror. - ------------------------------------------------------------------------------------------------ -*/ - - -#if _WIN32 || _WIN64 - - // Implement Unix-like interface (dlopen, dlclose, dlsym, dlerror) via Win32 API functions. - - // Type of dlopen result. - typedef HMODULE handle_t; - - enum rtld_flags_t { - RTLD_NOW, - RTLD_GLOBAL - }; // enum rtld_flags_t - - // Unix-like dlopen(). - static handle_t dlopen( char const * name, rtld_flags_t ) { - return LoadLibrary( name ); - } // dlopen - - // Unix-like dlsym(). - static void * dlsym( handle_t lib, char const * sym ) { - return (void*)GetProcAddress( lib, sym ); - } // dlsym - - // Unix-like dlclose(). - static int dlclose( handle_t lib ) { - return ! FreeLibrary( lib ); - } // dlclose - - // The function mimics Unix dlerror() function. - // Note: Not thread-safe due to statically allocated buffer. - static char * dlerror() { - - static char buffer[ 2048 ]; // Note: statically allocated buffer. - - DWORD err = GetLastError(); - if ( err == ERROR_SUCCESS ) { - return NULL; - } // if - - DWORD rc; - rc = - FormatMessage( - FORMAT_MESSAGE_FROM_SYSTEM, - NULL, - err, - MAKELANGID( LANG_NEUTRAL, SUBLANG_DEFAULT ), // Default language. - reinterpret_cast< LPTSTR >( & buffer ), - sizeof( buffer ), - NULL - ); - if ( rc == 0 ) { - // FormatMessage() failed to format system error message. Buffer to short or another issue. - snprintf( buffer, sizeof( buffer ), "System error %u.", err ); - } else { - /* - FormatMessage() returns Windows-style end-of-lines, "\r\n". When string is printed, - printf() also replaces all the occurrences of "\n" with "\r\n" (again!), so sequences - like "\r\r\r\n" appear in output. It is not too good. Stripping all "\r" normalizes - string and returns it to canonical form, so printf() will produce correct end-of-line - sequences. - */ - strip( buffer, '\r' ); // Delete carriage returns if any. - trim( buffer ); // Delete trailing newlines and spaces. - } // if - - return buffer; - - } // dlerror - -#else - - // Type of dlopen() result. - typedef void * handle_t; - -#endif - - -/* - ------------------------------------------------------------------------------------------------ - Runtime loader stuff. - ------------------------------------------------------------------------------------------------ -*/ - - -// Descriptor table declaration. It is defined in assembler file. -enum symbol_type_t { - st_object = 0, - st_function = 1 -}; // enum symbol_type_t -struct symbol_t { - void * addr; - char const * name; - int size; - symbol_type_t type; -}; // symbol_t -extern "C" symbol_t __tbb_internal_runtime_loader_symbols[]; - -// Hooks for internal use (e. g. for testing). -tbb::runtime_loader::error_mode stub_mode = tbb::runtime_loader::em_abort; - -static char const * tbb_dll_name = __TBB_STRING(__TBB_DLL_NAME); // Name of TBB library. -static handle_t handle = NULL; // Handle of loaded TBB library or NULL. -static int version = 0; // Version of the loaded library. -static int counter = 0; // Number of runtime_loader objects using the loaded library. - -#define ANOTHER_RTL "probably multiple runtime_loader objects work in parallel" - - -// One attempt to load library (dll_name can be a full path or just a file name). -static tbb::runtime_loader::error_code _load( char const * dll_name, int min_ver, int max_ver ) { - - tbb::runtime_loader::error_mode mode = tbb::runtime_loader::em_status; - tbb::runtime_loader::error_code code = tbb::runtime_loader::ec_ok; - - /* - If these variables declared at the first usage, Intel compiler (on Windows IA-32) isues - warning(s): - transfer of control [goto error] bypasses initialization of: ... - Declaring variables at the beginning of the function eliminates warnings. - */ - typedef int (*int_func_t)( void ); - char const * get_ver_name = "TBB_runtime_interface_version"; // Name of function. - int_func_t get_ver_func = NULL; // Pointer to function. - handle_t _handle = NULL; - int _version = 0; - int total = 0; - int not_found = 0; - - // This function should be called iff there is no loaded library. - __TBB_ASSERT( handle == NULL, "Handle is invalid; " ANOTHER_RTL ); - __TBB_ASSERT( version == 0, "Version is invalid; " ANOTHER_RTL ); - __TBB_ASSERT( counter == 0, "Counter is invalid; " ANOTHER_RTL ); - - tell( "Loading \"%s\"...", dll_name ); - - // First load the library. - _handle = dlopen( dll_name, RTLD_NOW ); - if ( _handle == NULL ) { - const char * msg = dlerror(); - code = error( mode, tbb::runtime_loader::ec_no_lib, "Loading \"%s\" failed; system error: %s", dll_name, msg ); - goto error; - } // if - - // Then try to find out its version. - /* - g++ 3.4 issues error: - ISO C++ forbids casting between pointer-to-function and pointer-to-object - on reinterpret_cast<>. Thus, we have no choice but using C-style type cast. - */ - get_ver_func = (int_func_t) dlsym( _handle, get_ver_name ); - if ( get_ver_func == NULL ) { - code = error( mode, tbb::runtime_loader::ec_bad_lib, "Symbol \"%s\" not found; library rejected.", get_ver_name ); - goto error; - } // if - _version = get_ver_func(); - if ( ! ( min_ver <= _version && _version <= max_ver ) ) { - code = error( mode, tbb::runtime_loader::ec_bad_ver, "Version %d is out of requested range; library rejected.", _version ); - goto error; - } // if - - // Library is suitable. Mark it as loaded. - handle = _handle; - version = _version; - counter += 1; - __TBB_ASSERT( counter == 1, "Counter is invalid; " ANOTHER_RTL ); - - // Now search for all known symbols. - for ( int i = 0; __tbb_internal_runtime_loader_symbols[ i ].name != NULL; ++ i ) { - symbol_t & symbol = __tbb_internal_runtime_loader_symbols[ i ]; - // Verify symbol descriptor. - __TBB_ASSERT( symbol.type == st_object || symbol.type == st_function, "Invalid symbol type" ); - #if _WIN32 || _WIN64 - __TBB_ASSERT( symbol.type == st_function, "Should not be symbols of object type on Windows" ); - #endif - if ( symbol.type == st_object ) { - __TBB_ASSERT( symbol.addr != NULL, "Object address invalid" ); - __TBB_ASSERT( symbol.size > 0, "Symbol size must be > 0" ); - __TBB_ASSERT( symbol.size <= 0x1000, "Symbol size too big" ); - } else { // Function - // __TBB_ASSERT( symbol.addr == reinterpret_cast< void * >( & stub ), "Invalid symbol address" ); - __TBB_ASSERT( symbol.size == sizeof( void * ), "Invalid symbol size" ); - } // if - void * addr = dlsym( _handle, symbol.name ); - if ( addr != NULL ) { - if ( symbol.type == st_object ) { - if ( strncmp( symbol.name, "_ZTS", 4 ) == 0 ) { - // If object name begins with "_ZTS", it is a string, mangled type name. - // Its value must equal to name of symbol without "_ZTS" prefix. - char const * name = static_cast< char const * >( addr ); - __TBB_ASSERT( strlen( name ) + 1 == size_t( symbol.size ), "Unexpected size of typeinfo name" ); - __TBB_ASSERT( strcmp( symbol.name + 4, name ) == 0, "Unexpected content of typeinfo name" ); - strncpy( reinterpret_cast< char * >( symbol.addr ), name, symbol.size ); - reinterpret_cast< char * >( symbol.addr )[ symbol.size - 1 ] = 0; - } else { - #if TBB_USE_ASSERT - // If object name begins with "_ZTI", it is an object of std::type_info class. - // Its protected value must equal to name of symbol without "_ZTI" prefix. - if ( strncmp( symbol.name, "_ZTI", 4 ) == 0 ) { - std::type_info const * info = static_cast< std::type_info const * >( addr ); - __TBB_ASSERT( size_t( symbol.size ) >= sizeof( std::type_info ), "typeinfo size is too small" ); - // std::type_info::name is not a virtual method, it is safe to call it. - __TBB_ASSERT( strcmp( symbol.name + 4, info->name() ) == 0, "Unexpected content of typeinfo" ); - } // if - #endif - // Copy object content from libtbb into runtime_loader. - memcpy( symbol.addr, addr, symbol.size ); - }; // if - } else { // Function - symbol.addr = addr; - } // if - } else { - char const * msg = dlerror(); - tell( "Symbol \"%s\" not found; system error: %s", symbol.name, msg ); - ++ not_found; - } // if - ++ total; - } // for i - - if ( not_found > 0 ) { - tell( "%d of %d symbols not found.", not_found, total ); - } // if - - tell( "The library successfully loaded." ); - return code; - - error: - if ( _handle != NULL ) { - int rc = dlclose( _handle ); - if ( rc != 0 ) { - // Error occurred. - __TBB_ASSERT( rc != 0, "Unexpected error: dlclose() failed" ); - } // if - } // if - _handle = NULL; - return code; - -} // _load - - -static tbb::runtime_loader::error_code load( tbb::runtime_loader::error_mode mode, char const * path[], int min_ver, int max_ver ) { - // Check arguments first. - if ( min_ver <= 0 ) { - return error( mode, tbb::runtime_loader::ec_bad_arg, "tbb::runtime_loader::load(): Invalid value of min_ver argument: %d.", min_ver ); - } // if - if ( max_ver <= 0 ) { - return error( mode, tbb::runtime_loader::ec_bad_arg, "tbb::runtime_loader::load(): Invalid value of max_ver argument: %d.", max_ver ); - } // if - if ( min_ver > max_ver ) { - return error( mode, tbb::runtime_loader::ec_bad_arg, "tbb::runtime_loader::load(): min_ver and max_ver specify empty range: [%d, %d].", min_ver, max_ver ); - } // if - if ( min_ver == max_ver ) { - tell( "Searching for \"%s\" version %d...", tbb_dll_name, min_ver ); - } else if ( max_ver == INT_MAX ) { - tell( "Searching for \"%s\" version %d+...", tbb_dll_name, min_ver ); - } else { - tell( "Searching for \"%s\" version in range [%d, %d]...", tbb_dll_name, min_ver, max_ver ); - } // if - // Then check whether a library already loaded. - if ( handle != NULL ) { - // Library already loaded. Check whether the version is compatible. - __TBB_ASSERT( version > 0, "Version is invalid; " ANOTHER_RTL ); - __TBB_ASSERT( counter > 0, "Counter is invalid; " ANOTHER_RTL ); - if ( min_ver <= version && version <= max_ver ) { - // Version is ok, let us use this library. - tell( "Library version %d is already loaded.", version ); - counter += 1; - return tbb::runtime_loader::ec_ok; - } else { - // Version is not suitable. - return error( mode, tbb::runtime_loader::ec_bad_ver, "Library version %d is already loaded.", version ); - } // if - } // if - // There is no loaded library, try to load it using provided directories. - __TBB_ASSERT( version == 0, "Version is invalid; " ANOTHER_RTL ); - __TBB_ASSERT( counter == 0, "Counter is invalid; " ANOTHER_RTL ); - size_t namelen = strlen(tbb_dll_name); - size_t buflen = 0; - char * buffer = NULL; - for ( int i = 0; path[i] != NULL; ++ i ) { - size_t len = strlen(path[i]) + namelen + 2; // 1 for slash and 1 for null terminator - if( buflen<len ) { - free( buffer ); - buflen = len; - buffer = (char*)malloc( buflen ); - } - cat_file( path[i], tbb_dll_name, buffer, buflen ); - __TBB_ASSERT(strstr(buffer,tbb_dll_name), "Name concatenation error"); - tbb::runtime_loader::error_code ec = _load( buffer, min_ver, max_ver ); - if ( ec == tbb::runtime_loader::ec_ok ) { - return ec; // Success. Exiting... - } // if - } // for i - free( buffer ); - return error( mode, tbb::runtime_loader::ec_no_lib, "No suitable library found." ); -} // load - - - - -// Supress "defined but not used" compiler warnings. -static void const * dummy[] = { - (void *) & strip, - (void *) & trim, - & dummy, - NULL -}; - - -} // namespace runtime_loader - -} // namespace internal - - -runtime_loader::runtime_loader( error_mode mode ) : - my_mode( mode ), - my_status( ec_ok ), - my_loaded( false ) -{ -} // ctor - - -runtime_loader::runtime_loader( char const * path[], int min_ver, int max_ver, error_mode mode ) : - my_mode( mode ), - my_status( ec_ok ), - my_loaded( false ) -{ - load( path, min_ver, max_ver ); -} // ctor - - -runtime_loader::~runtime_loader() { -} // dtor - - -tbb::runtime_loader::error_code runtime_loader::load( char const * path[], int min_ver, int max_ver ) { - if ( my_loaded ) { - my_status = tbb::interface6::internal::runtime_loader::error( my_mode, ec_bad_call, "tbb::runtime_loader::load(): Library already loaded by this runtime_loader object." ); - } else { - my_status = internal::runtime_loader::load( my_mode, path, min_ver, max_ver ); - if ( my_status == ec_ok ) { - my_loaded = true; - } // if - } // if - return my_status; -} // load - - - - -tbb::runtime_loader::error_code runtime_loader::status() { - return my_status; -} // status - - -} // namespace interface6 - -} // namespace tbb - - -// Stub function replaces all TBB entry points when no library is loaded. -int __tbb_internal_runtime_loader_stub() { - char const * msg = NULL; - if ( tbb::interface6::internal::runtime_loader::handle == NULL ) { - msg = "A function is called while TBB library is not loaded"; - } else { - msg = "A function is called which is not present in loaded TBB library"; - } // if - return tbb::interface6::internal::runtime_loader::error( tbb::interface6::internal::runtime_loader::stub_mode, tbb::runtime_loader::ec_no_lib, msg ); -} // stub - -#endif // !__TBB_WIN8UI_SUPPORT // -// end of file // diff --git a/src/tbb/src/test/harness.h b/src/tbb/src/test/harness.h deleted file mode 100644 index 5b1b6d0ce..000000000 --- a/src/tbb/src/test/harness.h +++ /dev/null @@ -1,691 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Declarations for rock-bottom simple test harness. -// Just include this file to use it. -// Every test is presumed to have a command line of the form "test [-v] [MinThreads[:MaxThreads]]" -// The default for MinThreads is 1, for MaxThreads 4. -// The defaults can be overridden by defining macros HARNESS_DEFAULT_MIN_THREADS -// and HARNESS_DEFAULT_MAX_THREADS before including harness.h - -#ifndef tbb_tests_harness_H -#define tbb_tests_harness_H - -#include "tbb/tbb_config.h" -#include "harness_defs.h" - -namespace Harness { - enum TestResult { - Done, - Skipped, - Unknown - }; -} - -//! Entry point to a TBB unit test application -/** It MUST be defined by the test application. - - If HARNESS_NO_PARSE_COMMAND_LINE macro was not explicitly set before including harness.h, - then global variables MinThread, and MaxThread will be available and - initialized when it is called. - - Returns Harness::Done when the tests passed successfully. When the test fail, it must - not return, calling exit(errcode) or abort() instead. When the test is not supported - for the given platform/compiler/etc, it should return Harness::Skipped. - - To provide non-standard variant of main() for the test, define HARNESS_CUSTOM_MAIN - before including harness.h **/ -int TestMain (); - -#if __SUNPRO_CC - #include <stdlib.h> - #include <string.h> - #include <ucontext.h> -#else /* !__SUNPRO_CC */ - #include <cstdlib> -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - #include <cstring> -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif -#endif /* !__SUNPRO_CC */ - -#include <new> - -#if __TBB_MIC_NATIVE - #include "harness_mic.h" -#else - #define HARNESS_EXPORT - #define REPORT_FATAL_ERROR REPORT -#endif /* !__MIC__ */ - -#if _WIN32||_WIN64 - #include "tbb/machine/windows_api.h" - #if _WIN32_WINNT > 0x0501 && _MSC_VER && !_M_ARM - #include <dbghelp.h> - #pragma comment (lib, "dbghelp.lib") - #endif - #if _XBOX - #undef HARNESS_NO_PARSE_COMMAND_LINE - #define HARNESS_NO_PARSE_COMMAND_LINE 1 - #endif - #if __TBB_WIN8UI_SUPPORT - #include <thread> - #endif - #if _MSC_VER - #include <crtdbg.h> - #endif - #include <process.h> -#else - #include <pthread.h> -#endif - -#if __linux__ - #include <sys/utsname.h> /* for uname */ - #include <errno.h> /* for use in LinuxKernelVersion() */ - #include <features.h> -#endif -// at least GLIBC 2.1 or OSX 10.5 -#if __GLIBC__>2 || ( __GLIBC__==2 && __GLIBC_MINOR__ >= 1) || __APPLE__ - #include <execinfo.h> /*backtrace*/ - #define BACKTRACE_FUNCTION_AVAILABLE 1 -#endif - -#include "harness_runtime_loader.h" -#include "harness_report.h" - -//! Prints current call stack -void print_call_stack() { - fflush(stdout); fflush(stderr); - #if BACKTRACE_FUNCTION_AVAILABLE - const int sz = 100; // max number of frames to capture - void *buff[sz]; - int n = backtrace(buff, sz); - REPORT("Call stack info (%d):\n", n); - backtrace_symbols_fd(buff, n, fileno(stdout)); - #elif __SUNPRO_CC - REPORT("Call stack info:\n"); - printstack(fileno(stdout)); - #elif _WIN32_WINNT > 0x0501 && _MSC_VER && !__TBB_WIN8UI_SUPPORT - const int sz = 62; // XP limitation for number of frames - void *buff[sz]; - int n = CaptureStackBackTrace(0, sz, buff, NULL); - REPORT("Call stack info (%d):\n", n); - static LONG once = 0; - if( !InterlockedExchange(&once, 1) ) - SymInitialize(GetCurrentProcess(), NULL, TRUE); - const int len = 255; // just some reasonable string buffer size - union { SYMBOL_INFO sym; char pad[sizeof(SYMBOL_INFO)+len]; }; - sym.MaxNameLen = len; - sym.SizeOfStruct = sizeof( SYMBOL_INFO ); - DWORD64 offset; - for(int i = 1; i < n; i++) { // skip current frame - if(!SymFromAddr( GetCurrentProcess(), DWORD64(buff[i]), &offset, &sym )) { - sym.Address = ULONG64(buff[i]); offset = 0; sym.Name[0] = 0; - } - REPORT("[%d] %016I64LX+%04I64LX: %s\n", i, sym.Address, offset, sym.Name); //TODO: print module name - } - #endif /*BACKTRACE_FUNCTION_AVAILABLE*/ -} - -#if !HARNESS_NO_ASSERT - #include <exception> //for set_terminate - #include "harness_assert.h" - #if TEST_USES_TBB - #include <tbb/tbb_stddef.h> /*set_assertion_handler*/ - #endif - - struct InitReporter { - void (*default_terminate_handler)() ; - InitReporter(): default_terminate_handler(NULL) { - #if TEST_USES_TBB - #if TBB_USE_ASSERT - tbb::set_assertion_handler(ReportError); - #endif - ASSERT_WARNING(TBB_INTERFACE_VERSION <= tbb::TBB_runtime_interface_version(), "runtime version mismatch"); - #endif - #if TBB_USE_EXCEPTIONS - default_terminate_handler = std::set_terminate(handle_terminate); - #endif - } - static void handle_terminate(); - }; - static InitReporter InitReportError; - - void InitReporter::handle_terminate(){ - REPORT("std::terminate called.\n"); - print_call_stack(); - if (InitReportError.default_terminate_handler){ - InitReportError.default_terminate_handler(); - } - } - - typedef void (*test_error_extra_t)(void); - static test_error_extra_t ErrorExtraCall; - //! Set additional handler to process failed assertions - void SetHarnessErrorProcessing( test_error_extra_t extra_call ) { - ErrorExtraCall = extra_call; - } - - //! Reports errors issued by failed assertions - void ReportError( const char* filename, int line, const char* expression, const char * message ) { - print_call_stack(); - #if __TBB_ICL_11_1_CODE_GEN_BROKEN - printf("%s:%d, assertion %s: %s\n", filename, line, expression, message ? message : "failed" ); - #else - REPORT_FATAL_ERROR("%s:%d, assertion %s: %s\n", filename, line, expression, message ? message : "failed" ); - #endif - - if( ErrorExtraCall ) - (*ErrorExtraCall)(); - fflush(stdout); fflush(stderr); - #if HARNESS_TERMINATE_ON_ASSERT - TerminateProcess(GetCurrentProcess(), 1); - #elif HARNESS_EXIT_ON_ASSERT - exit(1); - #elif HARNESS_CONTINUE_ON_ASSERT - // continue testing - #elif _MSC_VER && _DEBUG - // aligned with tbb_assert_impl.h behavior - if(1 == _CrtDbgReport(_CRT_ASSERT, filename, line, NULL, "%s\r\n%s", expression, message?message:"")) - _CrtDbgBreak(); - #else - abort(); - #endif /* HARNESS_EXIT_ON_ASSERT */ - } - //! Reports warnings issued by failed warning assertions - void ReportWarning( const char* filename, int line, const char* expression, const char * message ) { - REPORT("Warning: %s:%d, assertion %s: %s\n", filename, line, expression, message ? message : "failed" ); - } - -#else /* !HARNESS_NO_ASSERT */ - - #define ASSERT(p,msg) (Harness::suppress_unused_warning(p), (void)0) - #define ASSERT_WARNING(p,msg) (Harness::suppress_unused_warning(p), (void)0) - -#endif /* !HARNESS_NO_ASSERT */ - -namespace Harness { - //TODO: unify with utility::internal::array_length from examples common utilities - template<typename T, size_t N> - inline size_t array_length(const T(&)[N]) - { - return N; - } - - template<typename T, size_t N> - inline T* end( T(& array)[N]) - { - return array+ array_length(array) ; - } - -} //namespace Harness - -#if TEST_USES_TBB - #include "tbb/blocked_range.h" - - namespace Harness { - template<typename T, size_t N> - tbb::blocked_range<T*> make_blocked_range( T(& array)[N]){ return tbb::blocked_range<T*>(array, array + N);} - } -#endif - -#if !HARNESS_NO_PARSE_COMMAND_LINE - -//! Controls level of commentary printed via printf-like REMARK() macro. -/** If true, makes the test print commentary. If false, test should print "done" and nothing more. */ -static bool Verbose; - -#ifndef HARNESS_DEFAULT_MIN_THREADS - #define HARNESS_DEFAULT_MIN_THREADS 1 -#endif - -//! Minimum number of threads -static int MinThread = HARNESS_DEFAULT_MIN_THREADS; - -#ifndef HARNESS_DEFAULT_MAX_THREADS - #define HARNESS_DEFAULT_MAX_THREADS 4 -#endif - -//! Maximum number of threads -static int MaxThread = HARNESS_DEFAULT_MAX_THREADS; - -//! Parse command line of the form "name [-v] [MinThreads[:MaxThreads]]" -/** Sets Verbose, MinThread, and MaxThread accordingly. - The nthread argument can be a single number or a range of the form m:n. - A single number m is interpreted as if written m:m. - The numbers must be non-negative. - Clients often treat the value 0 as "run sequentially." */ -static void ParseCommandLine( int argc, char* argv[] ) { - if( !argc ) REPORT("Command line with 0 arguments\n"); - int i = 1; - if( i<argc ) { - if( strncmp( argv[i], "-v", 2 )==0 ) { - Verbose = true; - ++i; - } - } - if( i<argc ) { - char* endptr; - MinThread = strtol( argv[i], &endptr, 0 ); - if( *endptr==':' ) - MaxThread = strtol( endptr+1, &endptr, 0 ); - else if( *endptr=='\0' ) - MaxThread = MinThread; - if( *endptr!='\0' ) { - REPORT_FATAL_ERROR("garbled nthread range\n"); - exit(1); - } - if( MinThread<0 ) { - REPORT_FATAL_ERROR("nthread must be nonnegative\n"); - exit(1); - } - if( MaxThread<MinThread ) { - REPORT_FATAL_ERROR("nthread range is backwards\n"); - exit(1); - } - ++i; - } -#if __TBB_STDARGS_BROKEN - if ( !argc ) - argc = 1; - else { - while ( i < argc && argv[i][0] == 0 ) - ++i; - } -#endif /* __TBB_STDARGS_BROKEN */ - if( i!=argc ) { - REPORT_FATAL_ERROR("Usage: %s [-v] [nthread|minthread:maxthread]\n", argv[0] ); - exit(1); - } -} -#endif /* HARNESS_NO_PARSE_COMMAND_LINE */ - -#if !HARNESS_CUSTOM_MAIN - -#if __TBB_MPI_INTEROP -#undef SEEK_SET -#undef SEEK_CUR -#undef SEEK_END -#include "mpi.h" -#endif - -HARNESS_EXPORT -#if HARNESS_NO_PARSE_COMMAND_LINE -int main() { -#if __TBB_MPI_INTEROP - MPI_Init(NULL,NULL); -#endif -#else -int main(int argc, char* argv[]) { - ParseCommandLine( argc, argv ); -#if __TBB_MPI_INTEROP - MPI_Init(&argc,&argv); -#endif -#endif -#if HARNESS_SKIP_TEST - REPORT( "skip\n" ); - return 0; -#else -#if __TBB_MPI_INTEROP - // Simple TBB/MPI interoperability harness for most of tests - // Worker processes send blocking messages to the master process about their rank and group size - // Master process receives this info and print it in verbose mode - int rank, size, myrank; - MPI_Status status; - MPI_Comm_size(MPI_COMM_WORLD,&size); - MPI_Comm_rank(MPI_COMM_WORLD,&myrank); - if (myrank == 0) { -#if !HARNESS_NO_PARSE_COMMAND_LINE - REMARK("Hello mpi world. I am %d of %d\n", myrank, size); -#endif - for ( int i = 1; i < size; i++ ) { - MPI_Recv (&rank, 1, MPI_INT, i, 1, MPI_COMM_WORLD, &status); - MPI_Recv (&size, 1, MPI_INT, i, 1, MPI_COMM_WORLD, &status); -#if !HARNESS_NO_PARSE_COMMAND_LINE - REMARK("Hello mpi world. I am %d of %d\n", rank, size); -#endif - } - } else { - MPI_Send (&myrank, 1, MPI_INT, 0, 1, MPI_COMM_WORLD); - MPI_Send (&size, 1, MPI_INT, 0, 1, MPI_COMM_WORLD); - } -#endif - - int res = Harness::Unknown; -#if __TBB_MIC_OFFLOAD - // "mic:-1" or "mandatory" specifies execution on the target. The runtime - // system chooses the specific target. Execution on the CPU is not allowed. -#if __INTEL_COMPILER < 1400 - #pragma offload target(mic:-1) out(res) -#else - #pragma offload target(mic) out(res) mandatory -#endif -#endif - res = TestMain (); - - ASSERT( res==Harness::Done || res==Harness::Skipped, "Wrong return code by TestMain"); -#if __TBB_MPI_INTEROP - if (myrank == 0) { - REPORT( res==Harness::Done ? "done\n" : "skip\n" ); - } - MPI_Finalize(); -#else - REPORT( res==Harness::Done ? "done\n" : "skip\n" ); -#endif - return 0; -#endif /* HARNESS_SKIP_TEST */ -} - -#endif /* !HARNESS_CUSTOM_MAIN */ - -//! Base class for prohibiting compiler-generated operator= -class NoAssign { - //! Assignment not allowed - void operator=( const NoAssign& ); -public: -#if __GNUC__ - //! Explicitly define default construction, because otherwise gcc issues gratuitous warning. - NoAssign() {} -#endif /* __GNUC__ */ -}; - -//! Base class for prohibiting compiler-generated copy constructor or operator= -class NoCopy: NoAssign { - //! Copy construction not allowed - NoCopy( const NoCopy& ); -public: - NoCopy() {} -}; - -#if HARNESS_TBBMALLOC_THREAD_SHUTDOWN && __TBB_SOURCE_DIRECTLY_INCLUDED && (_WIN32||_WIN64) -#include "../tbbmalloc/tbbmalloc_internal_api.h" -#endif - -//! For internal use by template function NativeParallelFor -template<typename Index, typename Body> -class NativeParallelForTask: NoCopy { -public: - NativeParallelForTask( Index index_, const Body& body_ ) : - index(index_), - body(body_) - {} - - //! Start task - void start() { -#if _WIN32||_WIN64 - unsigned thread_id; -#if __TBB_WIN8UI_SUPPORT - std::thread* thread_tmp=new std::thread(thread_function, this); - thread_handle = thread_tmp->native_handle(); - thread_id = 0; -#else - thread_handle = (HANDLE)_beginthreadex( NULL, 0, thread_function, this, 0, &thread_id ); -#endif - ASSERT( thread_handle!=0, "NativeParallelFor: _beginthreadex failed" ); -#else -#if __ICC==1100 - #pragma warning (push) - #pragma warning (disable: 2193) -#endif /* __ICC==1100 */ - // Some machines may have very large hard stack limit. When the test is - // launched by make, the default stack size is set to the hard limit, and - // calls to pthread_create fail with out-of-memory error. - // Therefore we set the stack size explicitly (as for TBB worker threads). -// TODO: make a single definition of MByte used by all tests. - const size_t MByte = 1024*1024; -#if __i386__||__i386||__arm__ - const size_t stack_size = 1*MByte; -#elif __x86_64__ - const size_t stack_size = 2*MByte; -#else - const size_t stack_size = 4*MByte; -#endif - pthread_attr_t attr_stack; - int status = pthread_attr_init(&attr_stack); - ASSERT(0==status, "NativeParallelFor: pthread_attr_init failed"); - status = pthread_attr_setstacksize( &attr_stack, stack_size ); - ASSERT(0==status, "NativeParallelFor: pthread_attr_setstacksize failed"); - status = pthread_create(&thread_id, &attr_stack, thread_function, this); - ASSERT(0==status, "NativeParallelFor: pthread_create failed"); - pthread_attr_destroy(&attr_stack); -#if __ICC==1100 - #pragma warning (pop) -#endif -#endif /* _WIN32||_WIN64 */ - } - - //! Wait for task to finish - void wait_to_finish() { -#if _WIN32||_WIN64 - DWORD status = WaitForSingleObjectEx( thread_handle, INFINITE, FALSE ); - ASSERT( status!=WAIT_FAILED, "WaitForSingleObject failed" ); - CloseHandle( thread_handle ); -#else - int status = pthread_join( thread_id, NULL ); - ASSERT( !status, "pthread_join failed" ); -#endif -#if HARNESS_NO_ASSERT - (void)status; -#endif - } - -private: -#if _WIN32||_WIN64 - HANDLE thread_handle; -#else - pthread_t thread_id; -#endif - - //! Range over which task will invoke the body. - const Index index; - - //! Body to invoke over the range. - const Body body; - -#if _WIN32||_WIN64 - static unsigned __stdcall thread_function( void* object ) -#else - static void* thread_function(void* object) -#endif - { - NativeParallelForTask& self = *static_cast<NativeParallelForTask*>(object); - (self.body)(self.index); -#if HARNESS_TBBMALLOC_THREAD_SHUTDOWN && __TBB_SOURCE_DIRECTLY_INCLUDED && (_WIN32||_WIN64) - // in those cases can't release per-thread cache automatically, - // so do it manually - // TODO: investigate less-intrusive way to do it, for example via FLS keys - __TBB_mallocThreadShutdownNotification(); -#endif - return 0; - } -}; - -//! Execute body(i) in parallel for i in the interval [0,n). -/** Each iteration is performed by a separate thread. */ -template<typename Index, typename Body> -void NativeParallelFor( Index n, const Body& body ) { - typedef NativeParallelForTask<Index,Body> task; - - if( n>0 ) { - // Allocate array to hold the tasks - task* array = static_cast<task*>(operator new( n*sizeof(task) )); - - // Construct the tasks - for( Index i=0; i!=n; ++i ) - new( &array[i] ) task(i,body); - - // Start the tasks - for( Index i=0; i!=n; ++i ) - array[i].start(); - - // Wait for the tasks to finish and destroy each one. - for( Index i=n; i; --i ) { - array[i-1].wait_to_finish(); - array[i-1].~task(); - } - - // Deallocate the task array - operator delete(array); - } -} - -//! The function to zero-initialize arrays; useful to avoid warnings -template <typename T> -void zero_fill(void* array, size_t n) { - memset(array, 0, sizeof(T)*n); -} - -#if __SUNPRO_CC && defined(min) -#undef min -#undef max -#endif - -#ifndef min -//! Utility template function returning lesser of the two values. -/** Provided here to avoid including not strict safe <algorithm>.\n - In case operands cause signed/unsigned or size mismatch warnings it is caller's - responsibility to do the appropriate cast before calling the function. **/ -template<typename T1, typename T2> -T1 min ( const T1& val1, const T2& val2 ) { - return val1 < val2 ? val1 : val2; -} -#endif /* !min */ - -#ifndef max -//! Utility template function returning greater of the two values. -/** Provided here to avoid including not strict safe <algorithm>.\n - In case operands cause signed/unsigned or size mismatch warnings it is caller's - responsibility to do the appropriate cast before calling the function. **/ -template<typename T1, typename T2> -T1 max ( const T1& val1, const T2& val2 ) { - return val1 < val2 ? val2 : val1; -} -#endif /* !max */ - -#if __linux__ -inline unsigned LinuxKernelVersion() -{ - unsigned digit1, digit2, digit3; - struct utsname utsnameBuf; - - if (-1 == uname(&utsnameBuf)) { - REPORT_FATAL_ERROR("Can't call uname: errno %d\n", errno); - exit(1); - } - if (3 != sscanf(utsnameBuf.release, "%u.%u.%u", &digit1, &digit2, &digit3)) { - REPORT_FATAL_ERROR("Unable to parse OS release '%s'\n", utsnameBuf.release); - exit(1); - } - return 1000000*digit1+1000*digit2+digit3; -} -#endif - -namespace Harness { - -#if !HARNESS_NO_ASSERT -//! Base class that asserts that no operations are made with the object after its destruction. -class NoAfterlife { -protected: - enum state_t { - LIVE=0x56781234, - DEAD=0xDEADBEEF - } m_state; - -public: - NoAfterlife() : m_state(LIVE) {} - NoAfterlife( const NoAfterlife& src ) : m_state(LIVE) { - ASSERT( src.IsLive(), "Constructing from the dead source" ); - } - ~NoAfterlife() { - ASSERT( IsLive(), "Repeated destructor call" ); - m_state = DEAD; - } - const NoAfterlife& operator=( const NoAfterlife& src ) { - ASSERT( IsLive(), NULL ); - ASSERT( src.IsLive(), NULL ); - return *this; - } - void AssertLive() const { - ASSERT( IsLive(), "Already dead" ); - } - bool IsLive() const { - return m_state == LIVE; - } -}; // NoAfterlife -#endif /* !HARNESS_NO_ASSERT */ - -#if _WIN32 || _WIN64 - void Sleep ( int ms ) { -#if !__TBB_WIN8UI_SUPPORT - ::Sleep(ms); -#else - std::chrono::milliseconds sleep_time( ms ); - std::this_thread::sleep_for( sleep_time ); -#endif - - } - - typedef DWORD tid_t; - tid_t CurrentTid () { return GetCurrentThreadId(); } - -#else /* !WIN */ - - void Sleep ( int ms ) { - timespec requested = { ms / 1000, (ms % 1000)*1000000 }; - timespec remaining = { 0, 0 }; - nanosleep(&requested, &remaining); - } - - typedef pthread_t tid_t; - tid_t CurrentTid () { return pthread_self(); } -#endif /* !WIN */ - - static const unsigned Primes[] = { - 0x9e3779b1, 0xffe6cc59, 0x2109f6dd, 0x43977ab5, 0xba5703f5, 0xb495a877, 0xe1626741, 0x79695e6b, - 0xbc98c09f, 0xd5bee2b3, 0x287488f9, 0x3af18231, 0x9677cd4d, 0xbe3a6929, 0xadc6a877, 0xdcf0674b, - 0xbe4d6fe9, 0x5f15e201, 0x99afc3fd, 0xf3f16801, 0xe222cfff, 0x24ba5fdb, 0x0620452d, 0x79f149e3, - 0xc8b93f49, 0x972702cd, 0xb07dd827, 0x6c97d5ed, 0x085a3d61, 0x46eb5ea7, 0x3d9910ed, 0x2e687b5b, - 0x29609227, 0x6eb081f1, 0x0954c4e1, 0x9d114db9, 0x542acfa9, 0xb3e6bd7b, 0x0742d917, 0xe9f3ffa7, - 0x54581edb, 0xf2480f45, 0x0bb9288f, 0xef1affc7, 0x85fa0ca7, 0x3ccc14db, 0xe6baf34b, 0x343377f7, - 0x5ca19031, 0xe6d9293b, 0xf0a9f391, 0x5d2e980b, 0xfc411073, 0xc3749363, 0xb892d829, 0x3549366b, - 0x629750ad, 0xb98294e5, 0x892d9483, 0xc235baf3, 0x3d2402a3, 0x6bdef3c9, 0xbec333cd, 0x40c9520f - }; - - class FastRandom { - unsigned x, a; - public: - unsigned short get() { - unsigned short r = (unsigned short)(x >> 16); - x = x*a + 1; - return r; - } - FastRandom( unsigned seed ) { - x = seed; - a = Primes[seed % (sizeof(Primes) / sizeof(Primes[0]))]; - } - }; -} // namespace Harness - -#endif /* tbb_tests_harness_H */ diff --git a/src/tbb/src/test/harness_allocator.h b/src/tbb/src/test/harness_allocator.h deleted file mode 100644 index af72455dc..000000000 --- a/src/tbb/src/test/harness_allocator.h +++ /dev/null @@ -1,615 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Declarations for simple estimate of the memory being used by a program. -// Not yet implemented for OS X*. -// This header is an optional part of the test harness. -// It assumes that "harness_assert.h" has already been included. - -#ifndef tbb_test_harness_allocator_H -#define tbb_test_harness_allocator_H - -#if __linux__ || __APPLE__ || __sun -#include <unistd.h> -#elif _WIN32 -#include "tbb/machine/windows_api.h" -#endif /* OS specific */ -#include <memory> -#include <new> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include <stdexcept> - -#include <utility> // for std::swap - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "tbb/atomic.h" -#include "harness_defs.h" - -#if __SUNPRO_CC -using std::printf; -#endif - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (push) -#if defined(_Wp64) - #pragma warning (disable: 4267) -#endif - #pragma warning (disable: 4512) -#endif - -namespace Harness { -#if __TBB_ALLOCATOR_TRAITS_PRESENT - using std::true_type; - using std::false_type; -#else - using tbb::internal::true_type; - using tbb::internal::false_type; -#endif //__TBB_ALLOCATOR_TRAITS_PRESENT -} - -template<typename counter_type = size_t> -struct arena_data { - char * const my_buffer; - size_t const my_size; //in bytes - counter_type my_allocated; // in bytes - - template<typename T> - arena_data(T * a_buffer, size_t a_size) __TBB_NOEXCEPT(true) - : my_buffer(reinterpret_cast<char *>(a_buffer)) - , my_size(a_size * sizeof(T) ) - { - my_allocated =0; - } -}; - -template<typename T, typename pocma = Harness::false_type, typename counter_type = size_t> -struct arena { - typedef arena_data<counter_type> arena_data_t; -private: - arena_data_t * my_data; -public: - typedef T value_type; - typedef value_type* pointer; - typedef const value_type* const_pointer; - typedef value_type& reference; - typedef const value_type& const_reference; - typedef size_t size_type; - typedef ptrdiff_t difference_type; - template<typename U> struct rebind { - typedef arena<U, pocma, counter_type> other; - }; - - typedef pocma propagate_on_container_move_assignment; - - arena(arena_data_t & data) __TBB_NOEXCEPT(true) : my_data(&data) {} - - template<typename U1, typename U2, typename U3> - friend struct arena; - - template<typename U1, typename U2 > - arena(arena<U1, U2, counter_type> const& other) __TBB_NOEXCEPT(true) : my_data(other.my_data) {} - - friend void swap(arena & lhs ,arena & rhs){ - std::swap(lhs.my_data, rhs.my_data); - } - - pointer address(reference x) const {return &x;} - const_pointer address(const_reference x) const {return &x;} - - //! Allocate space for n objects, starting on a cache/sector line. - pointer allocate( size_type n, const void* =0) { - size_t new_size = (my_data->my_allocated += n*sizeof(T)); - __TBB_ASSERT(my_data->my_allocated <= my_data->my_size,"trying to allocate more than was reserved"); - char* result = &(my_data->my_buffer[new_size - n*sizeof(T)]); - return reinterpret_cast<pointer>(result); - } - - //! Free block of memory that starts on a cache line - void deallocate( pointer p_arg, size_type n) { - char* p = reinterpret_cast<char*>(p_arg); - __TBB_ASSERT(p >=my_data->my_buffer && p <= my_data->my_buffer + my_data->my_size, "trying to deallocate pointer not from arena ?"); - __TBB_ASSERT(p + n*sizeof(T) <= my_data->my_buffer + my_data->my_size, "trying to deallocate incorrect number of items?"); - tbb::internal::suppress_unused_warning(p_arg); - tbb::internal::suppress_unused_warning(p); - tbb::internal::suppress_unused_warning(n); - } - - //! Largest value for which method allocate might succeed. - size_type max_size() const throw() { - return my_data->my_size / sizeof(T); - } - - //! Copy-construct value at location pointed to by p. -#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - template<typename U, typename... Args> - void construct(U *p, Args&&... args) - { ::new((void *)p) U(std::forward<Args>(args)...); } -#else // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC -#if __TBB_CPP11_RVALUE_REF_PRESENT - void construct( pointer p, value_type&& value ) {::new((void*)(p)) value_type(std::move(value));} -#endif - void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);} -#endif // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - - //! Destroy value at location pointed to by p. - void destroy( pointer p ) { - p->~value_type(); -#if _MSC_VER == 1800 - tbb::internal::suppress_unused_warning(p); -#endif - } - - friend bool operator==(arena const& lhs, arena const& rhs){ - return lhs.my_data == rhs.my_data; - } - - friend bool operator!=(arena const& lhs, arena const& rhs){ - return !(lhs== rhs); - } -}; - -template <typename count_t = tbb::atomic<size_t> > -struct allocator_counters { - count_t items_allocated; - count_t items_freed; - count_t allocations; - count_t frees; - - friend bool operator==(allocator_counters const & lhs, allocator_counters const & rhs){ - return lhs.items_allocated == rhs.items_allocated - && lhs.items_freed == rhs.items_freed - && lhs.allocations == rhs.allocations - && lhs.frees == rhs.frees - ; - } -}; - -template <typename base_alloc_t, typename count_t = tbb::atomic<size_t> > -class static_counting_allocator : public base_alloc_t -{ -public: - typedef typename base_alloc_t::pointer pointer; - typedef typename base_alloc_t::const_pointer const_pointer; - typedef typename base_alloc_t::reference reference; - typedef typename base_alloc_t::const_reference const_reference; - typedef typename base_alloc_t::value_type value_type; - typedef typename base_alloc_t::size_type size_type; - typedef typename base_alloc_t::difference_type difference_type; - template<typename U> struct rebind { - typedef static_counting_allocator<typename base_alloc_t::template rebind<U>::other,count_t> other; - }; - - typedef allocator_counters<count_t> counters_t; - - static size_t max_items; - static count_t items_allocated; - static count_t items_freed; - static count_t allocations; - static count_t frees; - static bool verbose, throwing; - - static_counting_allocator() throw() { } - - static_counting_allocator(const base_alloc_t& src) throw() - : base_alloc_t(src) { } - - static_counting_allocator(const static_counting_allocator& src) throw() - : base_alloc_t(src) { } - - template<typename U, typename C> - static_counting_allocator(const static_counting_allocator<U, C>& src) throw() - : base_alloc_t(src) { } - - pointer allocate(const size_type n) - { - if(verbose) printf("\t+%d|", int(n)); - if(max_items && items_allocated + n >= max_items) { - if(verbose) printf("items limit hits!"); - if(throwing) - __TBB_THROW( std::bad_alloc() ); - return NULL; - } - allocations++; - items_allocated += n; - return base_alloc_t::allocate(n, pointer(0)); - } - - pointer allocate(const size_type n, const void * const) - { return allocate(n); } - - void deallocate(const pointer ptr, const size_type n) - { - if(verbose) printf("\t-%d|", int(n)); - frees++; - items_freed += n; - base_alloc_t::deallocate(ptr, n); - } - - static counters_t counters(){ - counters_t c = {items_allocated, items_freed, allocations, frees} ; - return c; - } - - static void init_counters(bool v = false) { - verbose = v; - if(verbose) printf("\n------------------------------------------- Allocations:\n"); - items_allocated = 0; - items_freed = 0; - allocations = 0; - frees = 0; - max_items = 0; - } - - static void set_limits(size_type max = 0, bool do_throw = true) { - max_items = max; - throwing = do_throw; - } -}; - -template <typename base_alloc_t, typename count_t> -size_t static_counting_allocator<base_alloc_t, count_t>::max_items; -template <typename base_alloc_t, typename count_t> -count_t static_counting_allocator<base_alloc_t, count_t>::items_allocated; -template <typename base_alloc_t, typename count_t> -count_t static_counting_allocator<base_alloc_t, count_t>::items_freed; -template <typename base_alloc_t, typename count_t> -count_t static_counting_allocator<base_alloc_t, count_t>::allocations; -template <typename base_alloc_t, typename count_t> -count_t static_counting_allocator<base_alloc_t, count_t>::frees; -template <typename base_alloc_t, typename count_t> -bool static_counting_allocator<base_alloc_t, count_t>::verbose; -template <typename base_alloc_t, typename count_t> -bool static_counting_allocator<base_alloc_t, count_t>::throwing; - - -template <typename tag, typename count_t = tbb::atomic<size_t> > -class static_shared_counting_allocator_base -{ -public: - typedef allocator_counters<count_t> counters_t; - - static size_t max_items; - static count_t items_allocated; - static count_t items_freed; - static count_t allocations; - static count_t frees; - static bool verbose, throwing; - - static counters_t counters(){ - counters_t c = {items_allocated, items_freed, allocations, frees} ; - return c; - } - - static void init_counters(bool v = false) { - verbose = v; - if(verbose) printf("\n------------------------------------------- Allocations:\n"); - items_allocated = 0; - items_freed = 0; - allocations = 0; - frees = 0; - max_items = 0; - } - - static void set_limits(size_t max = 0, bool do_throw = true) { - max_items = max; - throwing = do_throw; - } -}; - -template <typename tag, typename count_t> -size_t static_shared_counting_allocator_base<tag, count_t>::max_items; - -template <typename tag, typename count_t> -count_t static_shared_counting_allocator_base<tag, count_t>::items_allocated; - -template <typename tag, typename count_t> -count_t static_shared_counting_allocator_base<tag, count_t>::items_freed; - -template <typename tag, typename count_t> -count_t static_shared_counting_allocator_base<tag, count_t>::allocations; - -template <typename tag, typename count_t> -count_t static_shared_counting_allocator_base<tag, count_t>::frees; - -template <typename tag, typename count_t> -bool static_shared_counting_allocator_base<tag, count_t>::verbose; - -template <typename tag, typename count_t> -bool static_shared_counting_allocator_base<tag, count_t>::throwing; - -template <typename tag, typename base_alloc_t, typename count_t = tbb::atomic<size_t> > -class static_shared_counting_allocator : public static_shared_counting_allocator_base<tag, count_t>, public base_alloc_t -{ - typedef static_shared_counting_allocator_base<tag, count_t> base_t; -public: - typedef typename base_alloc_t::pointer pointer; - typedef typename base_alloc_t::const_pointer const_pointer; - typedef typename base_alloc_t::reference reference; - typedef typename base_alloc_t::const_reference const_reference; - typedef typename base_alloc_t::value_type value_type; - typedef typename base_alloc_t::size_type size_type; - typedef typename base_alloc_t::difference_type difference_type; - template<typename U> struct rebind { - typedef static_shared_counting_allocator<tag, typename base_alloc_t::template rebind<U>::other, count_t> other; - }; - - static_shared_counting_allocator() throw() { } - - static_shared_counting_allocator(const base_alloc_t& src) throw() - : base_alloc_t(src) { } - - static_shared_counting_allocator(const static_shared_counting_allocator& src) throw() - : base_alloc_t(src) { } - - template<typename U, typename C> - static_shared_counting_allocator(const static_shared_counting_allocator<tag, U, C>& src) throw() - : base_alloc_t(src) { } - - pointer allocate(const size_type n) - { - if(base_t::verbose) printf("\t+%d|", int(n)); - if(base_t::max_items && base_t::items_allocated + n >= base_t::max_items) { - if(base_t::verbose) printf("items limit hits!"); - if(base_t::throwing) - __TBB_THROW( std::bad_alloc() ); - return NULL; - } - base_t::allocations++; - base_t::items_allocated += n; - return base_alloc_t::allocate(n, pointer(0)); - } - - pointer allocate(const size_type n, const void * const) - { return allocate(n); } - - void deallocate(const pointer ptr, const size_type n) - { - if(base_t::verbose) printf("\t-%d|", int(n)); - base_t::frees++; - base_t::items_freed += n; - base_alloc_t::deallocate(ptr, n); - } -}; - -template <typename base_alloc_t, typename count_t = tbb::atomic<size_t> > -class local_counting_allocator : public base_alloc_t -{ -public: - typedef typename base_alloc_t::pointer pointer; - typedef typename base_alloc_t::const_pointer const_pointer; - typedef typename base_alloc_t::reference reference; - typedef typename base_alloc_t::const_reference const_reference; - typedef typename base_alloc_t::value_type value_type; - typedef typename base_alloc_t::size_type size_type; - typedef typename base_alloc_t::difference_type difference_type; - template<typename U> struct rebind { - typedef local_counting_allocator<typename base_alloc_t::template rebind<U>::other,count_t> other; - }; - - count_t items_allocated; - count_t items_freed; - count_t allocations; - count_t frees; - size_t max_items; - - void set_counters(const count_t & a_items_allocated, const count_t & a_items_freed, const count_t & a_allocations, const count_t & a_frees, const count_t & a_max_items){ - items_allocated = a_items_allocated; - items_freed = a_items_freed; - allocations = a_allocations; - frees = a_frees; - max_items = a_max_items; - } - - template< typename allocator_t> - void set_counters(const allocator_t & a){ - this->set_counters(a.items_allocated, a.items_freed, a.allocations, a.frees, a.max_items); - } - - void clear_counters(){ - count_t zero; - zero = 0; - this->set_counters(zero,zero,zero,zero,zero); - } - - local_counting_allocator() throw() { - this->clear_counters(); - } - - local_counting_allocator(const local_counting_allocator &a) throw() - : base_alloc_t(a) - , items_allocated(a.items_allocated) - , items_freed(a.items_freed) - , allocations(a.allocations) - , frees(a.frees) - , max_items(a.max_items) - { } - - template<typename U, typename C> - local_counting_allocator(const static_counting_allocator<U,C> & a) throw() { - this->set_counters(a); - } - - template<typename U, typename C> - local_counting_allocator(const local_counting_allocator<U,C> &a) throw() - : items_allocated(a.items_allocated) - , items_freed(a.items_freed) - , allocations(a.allocations) - , frees(a.frees) - , max_items(a.max_items) - { } - - bool operator==(const local_counting_allocator &a) const - { return static_cast<const base_alloc_t&>(a) == *this; } - - pointer allocate(const size_type n) - { - if(max_items && items_allocated + n >= max_items) - __TBB_THROW( std::bad_alloc() ); - ++allocations; - items_allocated += n; - return base_alloc_t::allocate(n, pointer(0)); - } - - pointer allocate(const size_type n, const void * const) - { return allocate(n); } - - void deallocate(const pointer ptr, const size_type n) - { - ++frees; - items_freed += n; - base_alloc_t::deallocate(ptr, n); - } - - void set_limits(size_type max = 0) { - max_items = max; - } -}; - -template <typename T, template<typename X> class Allocator = std::allocator> -class debug_allocator : public Allocator<T> -{ -public: - typedef Allocator<T> base_allocator_type; - typedef typename base_allocator_type::value_type value_type; - typedef typename base_allocator_type::pointer pointer; - typedef typename base_allocator_type::const_pointer const_pointer; - typedef typename base_allocator_type::reference reference; - typedef typename base_allocator_type::const_reference const_reference; - typedef typename base_allocator_type::size_type size_type; - typedef typename base_allocator_type::difference_type difference_type; - template<typename U> struct rebind { - typedef debug_allocator<U, Allocator> other; - }; - - debug_allocator() throw() { } - debug_allocator(const debug_allocator &a) throw() : base_allocator_type( a ) { } - template<typename U> - debug_allocator(const debug_allocator<U> &a) throw() : base_allocator_type( Allocator<U>( a ) ) { } - - pointer allocate(const size_type n, const void *hint = 0 ) { - pointer ptr = base_allocator_type::allocate( n, hint ); - std::memset( ptr, 0xE3E3E3E3, n * sizeof(value_type) ); - return ptr; - } -}; - -//! Analogous to std::allocator<void>, as defined in ISO C++ Standard, Section 20.4.1 -/** @ingroup memory_allocation */ -template<template<typename T> class Allocator> -class debug_allocator<void, Allocator> : public Allocator<void> { -public: - typedef Allocator<void> base_allocator_type; - typedef typename base_allocator_type::value_type value_type; - typedef typename base_allocator_type::pointer pointer; - typedef typename base_allocator_type::const_pointer const_pointer; - template<typename U> struct rebind { - typedef debug_allocator<U, Allocator> other; - }; -}; - -template<typename T1, template<typename X1> class B1, typename T2, template<typename X2> class B2> -inline bool operator==( const debug_allocator<T1,B1> &a, const debug_allocator<T2,B2> &b) { - return static_cast< B1<T1> >(a) == static_cast< B2<T2> >(b); -} -template<typename T1, template<typename X1> class B1, typename T2, template<typename X2> class B2> -inline bool operator!=( const debug_allocator<T1,B1> &a, const debug_allocator<T2,B2> &b) { - return static_cast< B1<T1> >(a) != static_cast< B2<T2> >(b); -} - -template <typename T, typename pocma = Harness::false_type, template<typename X> class Allocator = std::allocator> -class stateful_allocator : public Allocator<T> -{ - void* unique_pointer; - - template<typename T1, typename pocma1, template<typename X1> class Allocator1> - friend class stateful_allocator; -public: - typedef Allocator<T> base_allocator_type; - typedef typename base_allocator_type::value_type value_type; - typedef typename base_allocator_type::pointer pointer; - typedef typename base_allocator_type::const_pointer const_pointer; - typedef typename base_allocator_type::reference reference; - typedef typename base_allocator_type::const_reference const_reference; - typedef typename base_allocator_type::size_type size_type; - typedef typename base_allocator_type::difference_type difference_type; - template<typename U> struct rebind { - typedef stateful_allocator<U, pocma, Allocator> other; - }; - typedef pocma propagate_on_container_move_assignment; - - stateful_allocator() throw() : unique_pointer(this) { } - - template<typename U> - stateful_allocator(const stateful_allocator<U, pocma> &a) throw() : base_allocator_type( Allocator<U>( a ) ), unique_pointer(a.uniqe_pointer) { } - - friend bool operator==(stateful_allocator const& lhs, stateful_allocator const& rhs){ - return lhs.unique_pointer == rhs.unique_pointer; - } - - friend bool operator!=(stateful_allocator const& rhs, stateful_allocator const& lhs){ - return !(lhs == rhs); - } - -}; - -#if defined(_MSC_VER) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (pop) -#endif // warning 4267,4512 is back - -namespace Harness { - - struct IsEqual { -#if __TBB_CPP11_SMART_POINTERS_PRESENT - template <typename T> - static bool compare( const std::weak_ptr<T> &t1, const std::weak_ptr<T> &t2 ) { - // Compare real pointers. - return t1.lock().get() == t2.lock().get(); - } - template <typename T1, typename T2> - static bool compare( const std::pair< const std::weak_ptr<T1>, std::weak_ptr<T2> > &t1, - const std::pair< const std::weak_ptr<T1>, std::weak_ptr<T2> > &t2 ) { - // Compare real pointers. - return t1.first.lock().get() == t2.first.lock().get() && - t1.second.lock().get() == t2.second.lock().get(); - } -#endif /* __TBB_CPP11_SMART_POINTERS_PRESENT */ - template <typename T1, typename T2> - static bool compare( const T1 &t1, const T2 &t2 ) { - return t1 == t2; - } - template <typename T1, typename T2> - bool operator()( T1 &t1, T2 &t2) const { - return compare( (const T1&)t1, (const T2&)t2 ); - } - }; - -} // Harness -#endif // tbb_test_harness_allocator_H diff --git a/src/tbb/src/test/harness_assert.h b/src/tbb/src/test/harness_assert.h deleted file mode 100644 index 47df3e30f..000000000 --- a/src/tbb/src/test/harness_assert.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Just the assertion portion of the harness. -// This is useful for writing portions of tests that include -// the minimal number of necessary header files. -// -// The full "harness.h" must be included later. - -#ifndef harness_assert_H -#define harness_assert_H - -void ReportError( const char* filename, int line, const char* expression, const char* message); -void ReportWarning( const char* filename, int line, const char* expression, const char* message); - -#define ASSERT(p,message) ((p)?(void)0:ReportError(__FILE__,__LINE__,#p,message)) -#define ASSERT_WARNING(p,message) ((p)?(void)0:ReportWarning(__FILE__,__LINE__,#p,message)) - -//! Compile-time error if x and y have different types -template<typename T> -void AssertSameType( const T& /*x*/, const T& /*y*/ ) {} - -#endif /* harness_assert_H */ diff --git a/src/tbb/src/test/harness_bad_expr.h b/src/tbb/src/test/harness_bad_expr.h deleted file mode 100644 index 7d20aa20f..000000000 --- a/src/tbb/src/test/harness_bad_expr.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Declarations for checking __TBB_ASSERT checks inside TBB. -// This header is an optional part of the test harness. -// It assumes that "harness.h" has already been included. - -#define TRY_BAD_EXPR_ENABLED (TBB_USE_ASSERT && TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN) - -#if TRY_BAD_EXPR_ENABLED - -//! Check that expression x raises assertion failure with message containing given substring. -/** Assumes that tbb::set_assertion_handler( AssertionFailureHandler ) was called earlier. */ -#define TRY_BAD_EXPR(x,substr) \ - { \ - const char* message = NULL; \ - bool okay = false; \ - try { \ - x; \ - } catch( AssertionFailure a ) { \ - okay = true; \ - message = a.message; \ - } \ - CheckAssertionFailure(__LINE__,#x,okay,message,substr); \ - } - -//! Exception object that holds a message. -struct AssertionFailure { - const char* message; - AssertionFailure( const char* filename, int line, const char* expression, const char* comment ); -}; - -AssertionFailure::AssertionFailure( const char* filename, int line, const char* expression, const char* comment ) : - message(comment) -{ - ASSERT(filename,"missing filename"); - ASSERT(0<line,"line number must be positive"); - // All of our current files have fewer than 4000 lines. - ASSERT(line<5000,"dubiously high line number"); - ASSERT(expression,"missing expression"); -} - -void AssertionFailureHandler( const char* filename, int line, const char* expression, const char* comment ) { - throw AssertionFailure(filename,line,expression,comment); -} - -void CheckAssertionFailure( int line, const char* expression, bool okay, const char* message, const char* substr ) { - if( !okay ) { - REPORT("Line %d, %s failed to fail\n", line, expression ); - abort(); - } else if( !message ) { - REPORT("Line %d, %s failed without a message\n", line, expression ); - abort(); - } else if( strstr(message,substr)==0 ) { - REPORT("Line %d, %s failed with message '%s' missing substring '%s'\n", __LINE__, expression, message, substr ); - abort(); - } -} - -#endif /* TRY_BAD_EXPR_ENABLED */ diff --git a/src/tbb/src/test/harness_barrier.h b/src/tbb/src/test/harness_barrier.h deleted file mode 100644 index ba1728e0d..000000000 --- a/src/tbb/src/test/harness_barrier.h +++ /dev/null @@ -1,126 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/atomic.h" -#include "tbb/tick_count.h" - -#ifndef harness_barrier_H -#define harness_barrier_H - -namespace Harness { - -//! Spin WHILE the value of the variable is equal to a given value -/** T and U should be comparable types. */ -class TimedWaitWhileEq { - //! Assignment not allowed - void operator=( const TimedWaitWhileEq& ); - double &my_limit; -public: - TimedWaitWhileEq(double &n_seconds) : my_limit(n_seconds) {} - TimedWaitWhileEq(const TimedWaitWhileEq &src) : my_limit(src.my_limit) {} - template<typename T, typename U> - void operator()( const volatile T& location, U value ) const { - tbb::tick_count start = tbb::tick_count::now(); - double time_passed; - do { - time_passed = (tbb::tick_count::now()-start).seconds(); - if( time_passed < 0.0001 ) __TBB_Pause(10); else __TBB_Yield(); - } while( time_passed < my_limit && location == value); - my_limit -= time_passed; - } -}; -//! Spin WHILE the value of the variable is equal to a given value -/** T and U should be comparable types. */ -class WaitWhileEq { - //! Assignment not allowed - void operator=( const WaitWhileEq& ); -public: - template<typename T, typename U> - void operator()( const volatile T& location, U value ) const { - tbb::internal::spin_wait_while_eq(location, value); - } -}; -class SpinBarrier -{ - unsigned numThreads; - tbb::atomic<unsigned> numThreadsFinished; /* threads reached barrier in this epoch */ - tbb::atomic<unsigned> epoch; /* how many times this barrier used - XXX move to a separate cache line */ - - struct DummyCallback { - void operator() () const {} - template<typename T, typename U> - void operator()( const T&, U) const {} - }; - - SpinBarrier( const SpinBarrier& ); // no copy ctor - void operator=( const SpinBarrier& ); // no assignment -public: - SpinBarrier( unsigned nthreads = 0 ) { initialize(nthreads); }; - - void initialize( unsigned nthreads ) { - numThreads = nthreads; - numThreadsFinished = 0; - epoch = 0; - }; - - // onOpenBarrierCallback is called by last thread arrived on a barrier - template<typename WaitEq, typename Callback> - bool custom_wait(const WaitEq &onWaitCallback, const Callback &onOpenBarrierCallback) - { // return true if last thread - unsigned myEpoch = epoch; - int threadsLeft = numThreads - numThreadsFinished.fetch_and_increment() - 1; - ASSERT(threadsLeft>=0, "Broken barrier"); - if (threadsLeft > 0) { - /* not the last threading reaching barrier, wait until epoch changes & return 0 */ - onWaitCallback(epoch, myEpoch); - return false; - } - onOpenBarrierCallback(); - /* No more threads left to enter, so I'm the last one reaching this epoch; - reset the barrier, increment epoch, and return non-zero */ - threadsLeft = numThreadsFinished -= numThreads; - ASSERT( threadsLeft == 0, "Broken barrier"); - /* wakes up threads waiting to exit this epoch */ - myEpoch -= epoch++; - ASSERT( myEpoch == 0, "Broken barrier"); - return true; - } - bool timed_wait(double n_seconds, const char *msg="Time is out while waiting on a barrier") { - bool is_last = custom_wait(TimedWaitWhileEq(n_seconds), DummyCallback()); - ASSERT( n_seconds >= 0, msg); // TODO: refactor to avoid passing msg here and rising assertion - return is_last; - } - //! onOpenBarrierCallback is called by last thread arrived on a barrier - template<typename Callback> - bool wait(const Callback &onOpenBarrierCallback) { - return custom_wait(WaitWhileEq(), onOpenBarrierCallback); - } - bool wait(){ - return wait(DummyCallback()); - } - //! signal to the barrier, rather a semaphore functionality - bool signal_nowait() { - return custom_wait(DummyCallback(),DummyCallback()); - } -}; - -} - -#endif //harness_barrier_H diff --git a/src/tbb/src/test/harness_checktype.h b/src/tbb/src/test/harness_checktype.h deleted file mode 100644 index 8fd5c8692..000000000 --- a/src/tbb/src/test/harness_checktype.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef tbb_tests_harness_checktype_H -#define tbb_tests_harness_checktype_H - -// type that checks construction and destruction. - -#ifndef __HARNESS_CHECKTYPE_DEFAULT_CTOR - #define __HARNESS_CHECKTYPE_DEFAULT_CTOR 1 -#endif - -template<class Counter> -class check_type : Harness::NoAfterlife { - Counter id; - bool am_ready; -public: - static tbb::atomic<int> check_type_counter; - // if only non-default constructors are desired, set __HARNESS_CHECKTYPE_NODEFAULT_CTOR - check_type(Counter _n -#if __HARNESS_CHECKTYPE_DEFAULT_CTOR - = 0 -#endif - ) : id(_n), am_ready(false) { - ++check_type_counter; - } - - check_type(const check_type& other) : Harness::NoAfterlife(other) { - other.AssertLive(); - AssertLive(); - id = other.id; - am_ready = other.am_ready; - ++check_type_counter; - } - - operator int() const { return (int)my_id(); } - check_type& operator++() { ++id; return *this;; } - - ~check_type() { - AssertLive(); - --check_type_counter; - ASSERT(check_type_counter >= 0, "too many destructions"); - } - - check_type &operator=(const check_type &other) { - other.AssertLive(); - AssertLive(); - id = other.id; - am_ready = other.am_ready; - return *this; - } - - Counter my_id() const { AssertLive(); return id; } - bool is_ready() { AssertLive(); return am_ready; } - void function() { - AssertLive(); - if( id == (Counter)0 ) { - id = (Counter)1; - am_ready = true; - } - } - -}; - -template<class Counter> -tbb::atomic<int> check_type<Counter>::check_type_counter; - -// provide a class that for a check_type will initialize the counter on creation, and on -// destruction will check that the constructions and destructions of check_type match. -template<class MyClass> -struct Check { - Check() {} // creation does nothing - ~Check() {} // destruction checks nothing -}; - -template<class Counttype> -struct Check<check_type< Counttype > > { - Check() { check_type<Counttype>::check_type_counter = 0; } - ~Check() { ASSERT(check_type<Counttype>::check_type_counter == 0, "check_type constructions and destructions don't match"); } -}; - -#endif // tbb_tests_harness_checktype_H diff --git a/src/tbb/src/test/harness_concurrency.h b/src/tbb/src/test/harness_concurrency.h deleted file mode 100644 index 21f1bf6ae..000000000 --- a/src/tbb/src/test/harness_concurrency.h +++ /dev/null @@ -1,105 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef tbb_tests_harness_concurrency_H -#define tbb_tests_harness_concurrency_H - -#if _WIN32||_WIN64 -#include "tbb/machine/windows_api.h" -#elif __linux__ -#include <unistd.h> -#include <sys/sysinfo.h> -#include <string.h> -#include <sched.h> -#elif __FreeBSD__ -#include <unistd.h> -#include <errno.h> -#include <string.h> -#include <sys/param.h> // Required by <sys/cpuset.h> -#include <sys/cpuset.h> -#endif - -#include <limits.h> - -namespace Harness { - static int maxProcs = 0; - static int GetMaxProcs() { - if ( !maxProcs ) { -#if _WIN32||_WIN64 - SYSTEM_INFO si; - GetNativeSystemInfo(&si); - maxProcs = si.dwNumberOfProcessors; -#elif __linux__ - maxProcs = get_nprocs(); -#else /* __FreeBSD__ */ - maxProcs = sysconf(_SC_NPROCESSORS_ONLN); -#endif - } - return maxProcs; - } - - int LimitNumberOfThreads(int max_threads) { - ASSERT( max_threads >= 1 , "The limited number of threads should be positive." ); - maxProcs = GetMaxProcs(); - if ( maxProcs < max_threads ) - // Suppose that process mask is not set so the number of available threads equals maxProcs - return maxProcs; - -#if _WIN32||_WIN64 - ASSERT( max_threads <= 64 , "LimitNumberOfThreads doesn't support max_threads to be more than 64 on Windows." ); - DWORD_PTR mask = 1; - for ( int i = 1; i < max_threads; ++i ) - mask |= mask << 1; - bool err = !SetProcessAffinityMask( GetCurrentProcess(), mask ); -#else /* !WIN */ -#if __linux__ - typedef cpu_set_t mask_t; -#if __TBB_MAIN_THREAD_AFFINITY_BROKEN -#define setaffinity(mask) sched_setaffinity(0 /*get the mask of the calling thread*/, sizeof(mask_t), &mask) -#else -#define setaffinity(mask) sched_setaffinity(getpid(), sizeof(mask_t), &mask) -#endif -#else /* __FreeBSD__ */ - typedef cpuset_t mask_t; -#if __TBB_MAIN_THREAD_AFFINITY_BROKEN -#define setaffinity(mask) cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, -1, sizeof(mask_t), &mask) -#else -#define setaffinity(mask) cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_PID, -1, sizeof(mask_t), &mask) -#endif -#endif /* __FreeBSD__ */ - mask_t newMask; - CPU_ZERO(&newMask); - - int maskSize = (int)sizeof(mask_t) * CHAR_BIT; - ASSERT_WARNING( maskSize >= maxProcs, "The mask size doesn't seem to be big enough to call setaffinity. The call may return an error." ); - - ASSERT( max_threads <= (int)sizeof(mask_t) * CHAR_BIT , "The mask size is not enough to set the requested number of threads." ); - for ( int i = 0; i < max_threads; ++i ) - CPU_SET( i, &newMask ); - int err = setaffinity( newMask ); -#endif /* !WIN */ - ASSERT( !err, "Setting process affinity failed" ); - - return max_threads; - } - -} // namespace Harness - -#endif /* tbb_tests_harness_concurrency_H */ diff --git a/src/tbb/src/test/harness_concurrency_tracker.h b/src/tbb/src/test/harness_concurrency_tracker.h deleted file mode 100644 index 75702d325..000000000 --- a/src/tbb/src/test/harness_concurrency_tracker.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef tbb_tests_harness_concurrency_tracker_H -#define tbb_tests_harness_concurrency_tracker_H - -#include "harness.h" -#include "tbb/atomic.h" -#include "../tbb/tls.h" - -namespace Harness { - -static tbb::atomic<unsigned> ctInstantParallelism; -static tbb::atomic<unsigned> ctPeakParallelism; -static tbb::internal::tls<uintptr_t> ctNested; - -class ConcurrencyTracker { - bool m_Outer; - - static void Started () { - unsigned p = ++ctInstantParallelism; - unsigned q = ctPeakParallelism; - while( q<p ) { - q = ctPeakParallelism.compare_and_swap(p,q); - } - } - - static void Stopped () { - ASSERT ( ctInstantParallelism > 0, "Mismatched call to ConcurrencyTracker::Stopped()" ); - --ctInstantParallelism; - } -public: - ConcurrencyTracker() : m_Outer(false) { - uintptr_t nested = ctNested; - ASSERT (nested == 0 || nested == 1, NULL); - if ( !ctNested ) { - Started(); - m_Outer = true; - ctNested = 1; - } - } - ~ConcurrencyTracker() { - if ( m_Outer ) { - Stopped(); - ctNested = 0; - } - } - - static unsigned PeakParallelism() { return ctPeakParallelism; } - static unsigned InstantParallelism() { return ctInstantParallelism; } - - static void Reset() { - ASSERT (ctInstantParallelism == 0, "Reset cannot be called when concurrency tracking is underway"); - ctInstantParallelism = ctPeakParallelism = 0; - } -}; // ConcurrencyTracker - -} // namespace Harness - -#endif /* tbb_tests_harness_concurrency_tracker_H */ diff --git a/src/tbb/src/test/harness_cpu.h b/src/tbb/src/test/harness_cpu.h deleted file mode 100644 index f5bd03738..000000000 --- a/src/tbb/src/test/harness_cpu.h +++ /dev/null @@ -1,122 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Declarations for simple estimate of CPU time being used by a program. -// This header is an optional part of the test harness. -// It assumes that "harness_assert.h" has already been included. - -#if _WIN32 -#if !_XBOX - #include <windows.h> -#endif -#else - #include <sys/time.h> - #include <sys/resource.h> -#endif - -//! Return time (in seconds) spent by the current process in user mode. -/* Returns 0 if not implemented on platform. */ -static double GetCPUUserTime() { -#if _XBOX || __TBB_WIN8UI_SUPPORT - return 0; -#elif _WIN32 - FILETIME my_times[4]; - bool status = GetProcessTimes(GetCurrentProcess(), my_times, my_times+1, my_times+2, my_times+3)!=0; - ASSERT( status, NULL ); - LARGE_INTEGER usrtime; - usrtime.LowPart = my_times[3].dwLowDateTime; - usrtime.HighPart = my_times[3].dwHighDateTime; - return double(usrtime.QuadPart)*1E-7; -#else - // Generic UNIX, including __APPLE__ - - // On Linux, there is no good way to get CPU usage info for the current process: - // getrusage(RUSAGE_SELF, ...) that is used now only returns info for the calling thread; - // getrusage(RUSAGE_CHILDREN, ...) only counts for finished children threads; - // tms_utime and tms_cutime got with times(struct tms*) are equivalent to the above items; - // finally, /proc/self/task/<task_id>/stat doesn't exist on older kernels - // and it isn't quite convenient to read it for every task_id. - - struct rusage resources; - bool status = getrusage(RUSAGE_SELF, &resources)==0; - ASSERT( status, NULL ); - return (double(resources.ru_utime.tv_sec)*1E6 + double(resources.ru_utime.tv_usec))*1E-6; -#endif -} - -#include "tbb/tick_count.h" -#include <cstdio> - -// The resolution of GetCPUUserTime is 10-15 ms or so; waittime should be a few times bigger. -const double WAITTIME = 0.1; // in seconds, i.e. 100 ms -const double THRESHOLD = WAITTIME/100; - -static void TestCPUUserTime( int nthreads, int nactive = 1 ) { - // The test will always pass on Linux; read the comments in GetCPUUserTime for details - // Also it will not detect spinning issues on systems with only one processing core. - - int nworkers = nthreads-nactive; - if( !nworkers ) return; - double lastusrtime = GetCPUUserTime(); - if( !lastusrtime ) return; - - static double minimal_waittime = WAITTIME, - maximal_waittime = WAITTIME * 10; - double usrtime_delta; - double waittime_delta; - tbb::tick_count stamp = tbb::tick_count::now(); - volatile intptr_t k = (intptr_t)&usrtime_delta; - // wait for GetCPUUserTime update - while( (usrtime_delta=GetCPUUserTime()-lastusrtime) < THRESHOLD ) { - for ( int i = 0; i < 1000; ++i ) ++k; // do fake work without which user time can stall - if ( (waittime_delta = (tbb::tick_count::now()-stamp).seconds()) > maximal_waittime ) { - REPORT( "Warning: %.2f sec elapsed but user mode time is still below its threshold (%g < %g)\n", - waittime_delta, usrtime_delta, THRESHOLD ); - break; - } - } - lastusrtime += usrtime_delta; - - // Wait for workers to go sleep - stamp = tbb::tick_count::now(); - while( ((waittime_delta=(tbb::tick_count::now()-stamp).seconds()) < minimal_waittime) - || ((usrtime_delta=GetCPUUserTime()-lastusrtime) < THRESHOLD) ) - { - for ( int i = 0; i < 1000; ++i ) ++k; // do fake work without which user time can stall - if ( waittime_delta > maximal_waittime ) { - REPORT( "Warning: %.2f sec elapsed but GetCPUUserTime reported only %g sec\n", waittime_delta, usrtime_delta ); - break; - } - } - - // Test that all workers sleep when no work. - while( nactive>1 && usrtime_delta-nactive*waittime_delta<0 ) { - // probably the number of active threads was mispredicted - --nactive; ++nworkers; - } - double avg_worker_usrtime = (usrtime_delta-nactive*waittime_delta)/nworkers; - - if( avg_worker_usrtime > waittime_delta/2 ) - REPORT( "ERROR: %d worker threads are spinning; waittime: %g; usrtime: %g; avg worker usrtime: %g\n", - nworkers, waittime_delta, usrtime_delta, avg_worker_usrtime); - else - REMARK("%d worker threads; waittime: %g; usrtime: %g; avg worker usrtime: %g\n", - nworkers, waittime_delta, usrtime_delta, avg_worker_usrtime); -} diff --git a/src/tbb/src/test/harness_defs.h b/src/tbb/src/test/harness_defs.h deleted file mode 100644 index 199279467..000000000 --- a/src/tbb/src/test/harness_defs.h +++ /dev/null @@ -1,174 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_harness_defs_H -#define __TBB_harness_defs_H - -#include "tbb/tbb_config.h" -#if __FreeBSD__ -#include <sys/param.h> // for __FreeBSD_version -#endif - -#if __TBB_TEST_PIC && !__PIC__ -#define __TBB_TEST_SKIP_PIC_MODE 1 -#else -#define __TBB_TEST_SKIP_PIC_MODE 0 -#endif - -// no need to test gcc builtins mode on ICC -#define __TBB_TEST_SKIP_GCC_BUILTINS_MODE ( __TBB_TEST_BUILTINS && (!__TBB_GCC_BUILTIN_ATOMICS_PRESENT || __INTEL_COMPILER) ) - -#define __TBB_TEST_SKIP_ICC_BUILTINS_MODE ( __TBB_TEST_BUILTINS && !__TBB_ICC_BUILTIN_ATOMICS_PRESENT ) - -#ifndef TBB_USE_GCC_BUILTINS - //Force TBB to use GCC intrinsics port, but not on ICC, as no need - #define TBB_USE_GCC_BUILTINS ( __TBB_TEST_BUILTINS && __TBB_GCC_BUILTIN_ATOMICS_PRESENT && !__INTEL_COMPILER ) -#endif - -#ifndef TBB_USE_ICC_BUILTINS - //Force TBB to use ICC c++11 style intrinsics port - #define TBB_USE_ICC_BUILTINS ( __TBB_TEST_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT ) -#endif - -//ICC has a bug in assumptions of the modifications made via atomic pointer -#define __TBB_ICC_BUILTIN_ATOMICS_POINTER_ALIASING_BROKEN (TBB_USE_ICC_BUILTINS && __INTEL_COMPILER < 1400 && __INTEL_COMPILER > 1200) - -#if (_WIN32 && !__TBB_WIN8UI_SUPPORT) || (__linux__ && !__ANDROID__ && !__bg__) || __FreeBSD_version >= 701000 -#define __TBB_TEST_SKIP_AFFINITY 0 -#else -#define __TBB_TEST_SKIP_AFFINITY 1 -#endif - -#if __INTEL_COMPILER - #define __TBB_LAMBDAS_PRESENT ( _TBB_CPP0X && __INTEL_COMPILER > 1100 ) - #define __TBB_CPP11_SMART_POINTERS_PRESENT ( _TBB_CPP0X && __INTEL_COMPILER >= 1200 && \ - ( _MSC_VER >= 1600 || __TBB_GCC_VERSION >= 40400 || ( __clang__ && __cplusplus >= 201103L ) ) ) - #define __TBB_CPP11_REFERENCE_WRAPPER_PRESENT ( _TBB_CPP0X && __INTEL_COMPILER >= 1200 && \ - ( _MSC_VER >= 1600 || __TBB_GCC_VERSION >= 40400 || ( __clang__ && __cplusplus >= 201103L ) ) ) - #define __TBB_RANGE_BASED_FOR_PRESENT ( _TBB_CPP0X && __INTEL_COMPILER >= 1300 ) - #define __TBB_SCOPED_ENUM_PRESENT ( _TBB_CPP0X && __INTEL_COMPILER > 1100 ) -#elif __clang__ - #define __TBB_LAMBDAS_PRESENT ( _TBB_CPP0X && __has_feature(cxx_lambdas) ) - #define __TBB_CPP11_SMART_POINTERS_PRESENT ( _TBB_CPP0X && __cplusplus >= 201103L && (__TBB_GCC_VERSION >= 40400 || _LIBCPP_VERSION) ) - #define __TBB_CPP11_REFERENCE_WRAPPER_PRESENT ( _TBB_CPP0X && __cplusplus >= 201103L && (__TBB_GCC_VERSION >= 40400 || _LIBCPP_VERSION) ) - #define __TBB_RANGE_BASED_FOR_PRESENT ( _TBB_CPP0X && __has_feature(__cxx_range_for) ) - #define __TBB_SCOPED_ENUM_PRESENT ( _TBB_CPP0X && __has_feature(cxx_strong_enums) ) -#elif __GNUC__ - #define __TBB_LAMBDAS_PRESENT ( _TBB_CPP0X && __TBB_GCC_VERSION >= 40500 ) - #define __TBB_CPP11_SMART_POINTERS_PRESENT ( _TBB_CPP0X && __TBB_GCC_VERSION >= 40400 ) - #define __TBB_CPP11_REFERENCE_WRAPPER_PRESENT ( _TBB_CPP0X && __TBB_GCC_VERSION >= 40400 ) - #define __TBB_RANGE_BASED_FOR_PRESENT ( _TBB_CPP0X && __TBB_GCC_VERSION >= 40500 ) - #define __TBB_SCOPED_ENUM_PRESENT ( _TBB_CPP0X && __TBB_GCC_VERSION >= 40400 ) -#elif _MSC_VER - #define __TBB_LAMBDAS_PRESENT ( _MSC_VER >= 1600 ) - #define __TBB_CPP11_SMART_POINTERS_PRESENT ( _MSC_VER >= 1600 ) - #define __TBB_CPP11_REFERENCE_WRAPPER_PRESENT ( _MSC_VER >= 1600 ) - #define __TBB_RANGE_BASED_FOR_PRESENT ( _MSC_VER >= 1700 ) - #define __TBB_SCOPED_ENUM_PRESENT ( _MSC_VER >= 1700 ) -#endif - -#define __TBB_TEST_SKIP_LAMBDA (__TBB_ICC_13_0_CPP11_STDLIB_SUPPORT_BROKEN || !__TBB_LAMBDAS_PRESENT) - -#if __GNUC__ && __ANDROID__ - /** Android GCC does not support _thread keyword **/ - #define __TBB_THREAD_LOCAL_VARIABLES_PRESENT 0 -#else - #define __TBB_THREAD_LOCAL_VARIABLES_PRESENT 1 -#endif - -#if __ANDROID__ - /** Android Bionic library does not support posix_memalign() **/ - #define __TBB_POSIX_MEMALIGN_PRESENT 0 - /** Android Bionic library does not support pvalloc() **/ - #define __TBB_PVALLOC_PRESENT 0 -#else - #define __TBB_POSIX_MEMALIGN_PRESENT 1 - #define __TBB_PVALLOC_PRESENT 1 -#endif - -//MSVC 2013 is unable to properly resolve call to overloaded operator= with std::initializer_list argument for std::pair list elements -#define __TBB_CPP11_INIT_LIST_ASSIGN_OP_RESOLUTION_BROKEN (_MSC_FULL_VER <= 180030501 && _MSC_VER && !__INTEL_COMPILER) -//MSVC 2013 is unable to manage lifetime of temporary objects passed to a std::initializer_list constructor properly -#define __TBB_CPP11_INIT_LIST_TEMP_OBJS_LIFETIME_BROKEN (_MSC_FULL_VER < 180030501 && _MSC_VER && !__INTEL_COMPILER) -//Implementation of C++11 std::placeholders in libstdc++ coming with gcc prior to 4.5 reveals bug in Intel Compiler 13 causing "multiple definition" link errors. -#define __TBB_CPP11_STD_PLACEHOLDERS_LINKAGE_BROKEN ((__INTEL_COMPILER == 1300 || __INTEL_COMPILER == 1310 )&& __GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION < 40500) - -#if __GNUC__ && __ANDROID__ - #define __TBB_EXCEPTION_TYPE_INFO_BROKEN ( __TBB_GCC_VERSION < 40600 ) -#elif _MSC_VER - #define __TBB_EXCEPTION_TYPE_INFO_BROKEN ( _MSC_VER < 1400 ) -#else - #define __TBB_EXCEPTION_TYPE_INFO_BROKEN 0 -#endif - -//! a function ptr cannot be converted to const T& template argument without explicit cast -#define __TBB_FUNC_PTR_AS_TEMPL_PARAM_BROKEN ( ((__linux__ || __APPLE__) && __INTEL_COMPILER && __INTEL_COMPILER < 1100) || __SUNPRO_CC ) -#define __TBB_UNQUALIFIED_CALL_OF_DTOR_BROKEN (__GNUC__==3 && __GNUC_MINOR__<=3) - -#define __TBB_CAS_8_CODEGEN_BROKEN (__TBB_x86_32 && __PIC__ && __TBB_GCC_VERSION == 40102 && !__INTEL_COMPILER) - -#define __TBB_THROW_FROM_DTOR_BROKEN (__clang__ && (__apple_build_version__ && __apple_build_version__ < 5000279 || __TBB_CLANG_VERSION && __TBB_CLANG_VERSION < 50000)) - -#if __TBB_LIBSTDCPP_EXCEPTION_HEADERS_BROKEN - #define _EXCEPTION_PTR_H /* prevents exception_ptr.h inclusion */ - #define _GLIBCXX_NESTED_EXCEPTION_H /* prevents nested_exception.h inclusion */ -#endif - -// The tuple-based tests with more inputs take a long time to compile. If changes -// are made to the tuple implementation or any switch that controls it, or if testing -// with a new platform implementation of std::tuple, the test should be compiled with -// MAX_TUPLE_TEST_SIZE >= 10 (or the largest number of elements supported) to ensure -// all tuple sizes are tested. Expect a very long compile time. -#ifndef MAX_TUPLE_TEST_SIZE - #if TBB_USE_DEBUG - #define MAX_TUPLE_TEST_SIZE 3 - #else - #define MAX_TUPLE_TEST_SIZE 5 - #endif -#else - #if _MSC_VER -// test sizes <= 8 don't get "decorated name length exceeded" errors. (disable : 4503) - #if MAX_TUPLE_TEST_SIZE > 8 - #undef MAX_TUPLE_TEST_SIZE - #define MAX_TUPLE_TEST_SIZE 8 - #endif - #endif - #if MAX_TUPLE_TEST_SIZE > __TBB_VARIADIC_MAX - #undef MAX_TUPLE_TEST_SIZE - #define MAX_TUPLE_TEST_SIZE __TBB_VARIADIC_MAX - #endif -#endif - -#ifndef TBB_PREVIEW_FLOW_GRAPH_FEATURES - #if __TBB_CPF_BUILD - #define TBB_PREVIEW_FLOW_GRAPH_FEATURES 1 - #endif -#endif - -namespace Harness { - //! Utility template function to prevent "unused" warnings by various compilers. - template<typename T> void suppress_unused_warning( const T& ) {} - - //TODO: unify with one in tbb::internal - //! Utility helper structure to ease overload resolution - template<int > struct int_to_type {}; -} - -#endif /* __TBB_harness_defs_H */ diff --git a/src/tbb/src/test/harness_dynamic_libs.h b/src/tbb/src/test/harness_dynamic_libs.h deleted file mode 100644 index e3ddb160a..000000000 --- a/src/tbb/src/test/harness_dynamic_libs.h +++ /dev/null @@ -1,122 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" - -// Include this header file before harness.h for HARNESS_SKIP_TEST to take effect -#if !__TBB_DYNAMIC_LOAD_ENABLED -#define HARNESS_SKIP_TEST 1 -#else - -#if _WIN32 || _WIN64 -#include "tbb/machine/windows_api.h" -#else -#include <dlfcn.h> -#endif -#include "harness_assert.h" - -namespace Harness { - -#if TBB_USE_DEBUG -#define SUFFIX1 "_debug" -#define SUFFIX2 -#else -#define SUFFIX1 -#define SUFFIX2 "_debug" -#endif /* TBB_USE_DEBUG */ - -#if _WIN32||_WIN64 -#define PREFIX -#define EXT ".dll" -#else -#define PREFIX "lib" -#if __APPLE__ -#define EXT ".dylib" -// Android SDK build system does not support .so file name versioning -#elif __FreeBSD__ || __NetBSD__ || __sun || _AIX || __ANDROID__ -#define EXT ".so" -#elif __linux__ // Order of these elif's matters! -#define EXT __TBB_STRING(.so.TBB_COMPATIBLE_INTERFACE_VERSION) -#else -#error Unknown OS -#endif -#endif - -// Form the names of the TBB memory allocator binaries. -#define MALLOCLIB_NAME1 PREFIX "tbbmalloc" SUFFIX1 EXT -#define MALLOCLIB_NAME2 PREFIX "tbbmalloc" SUFFIX2 EXT - -#if _WIN32 || _WIN64 -typedef HMODULE LIBRARY_HANDLE; -#else -typedef void *LIBRARY_HANDLE; -#endif - -#if _WIN32 || _WIN64 -#define TEST_LIBRARY_NAME(base) base".dll" -#elif __APPLE__ -#define TEST_LIBRARY_NAME(base) base".dylib" -#else -#define TEST_LIBRARY_NAME(base) base".so" -#endif - -LIBRARY_HANDLE OpenLibrary(const char *name) -{ -#if _WIN32 || _WIN64 -#if __TBB_WIN8UI_SUPPORT - TCHAR wlibrary[MAX_PATH]; - if ( MultiByteToWideChar(CP_UTF8, 0, name, -1, wlibrary, MAX_PATH) == 0 ) return false; - return :: LoadPackagedLibrary( wlibrary, 0 ); -#else - return ::LoadLibrary(name); -#endif -#else - return dlopen(name, RTLD_NOW|RTLD_GLOBAL); -#endif -} - -void CloseLibrary(LIBRARY_HANDLE lib) -{ -#if _WIN32 || _WIN64 - BOOL ret = FreeLibrary(lib); - ASSERT(ret, "FreeLibrary must be successful"); -#else - int ret = dlclose(lib); - ASSERT(ret == 0, "dlclose must be successful"); -#endif -} - -typedef void (*FunctionAddress)(); - -FunctionAddress GetAddress(Harness::LIBRARY_HANDLE lib, const char *name) -{ - union { FunctionAddress func; void *symb; } converter; -#if _WIN32 || _WIN64 - converter.symb = (void*)GetProcAddress(lib, name); -#else - converter.symb = (void*)dlsym(lib, name); -#endif - ASSERT(converter.func, "Can't find required symbol in dynamic library"); - return converter.func; -} - -} // namespace Harness - -#endif // __TBB_DYNAMIC_LOAD_ENABLED diff --git a/src/tbb/src/test/harness_eh.h b/src/tbb/src/test/harness_eh.h deleted file mode 100644 index 8cb15cae5..000000000 --- a/src/tbb/src/test/harness_eh.h +++ /dev/null @@ -1,319 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include <typeinfo> -#include "tbb/tbb_exception.h" -#include "tbb/atomic.h" -#if USE_TASK_SCHEDULER_OBSERVER -#include "tbb/task_scheduler_observer.h" -#endif -#include "harness.h" -#include "harness_concurrency_tracker.h" - -int g_NumThreads = 0; -Harness::tid_t g_Master = 0; -const char * g_Orig_Wakeup_Msg = "Missed wakeup or machine is overloaded?"; -const char * g_Wakeup_Msg = g_Orig_Wakeup_Msg; - -tbb::atomic<intptr_t> g_CurExecuted, - g_ExecutedAtLastCatch, - g_ExecutedAtFirstCatch, - g_ExceptionsThrown, - g_MasterExecutedThrow, // number of times master entered exception code - g_NonMasterExecutedThrow, // number of times nonmaster entered exception code - g_PipelinesStarted; -volatile bool g_ExceptionCaught = false, - g_UnknownException = false; - -#if USE_TASK_SCHEDULER_OBSERVER -tbb::atomic<intptr_t> g_ActualMaxThreads; -tbb::atomic<intptr_t> g_ActualCurrentThreads; -#endif - -volatile bool g_ThrowException = true, - // g_Flog is true for nested construct tests with catches (exceptions are not allowed to - // propagate to the tbb construct itself.) - g_Flog = false, - g_MasterExecuted = false, - g_NonMasterExecuted = false; - -bool g_ExceptionInMaster = false; -bool g_SolitaryException = false; -bool g_NestedPipelines = false; - -//! Number of exceptions propagated into the user code (i.e. intercepted by the tests) -tbb::atomic<intptr_t> g_NumExceptionsCaught; - -//----------------------------------------------------------- - -#if USE_TASK_SCHEDULER_OBSERVER -class eh_test_observer : public tbb::task_scheduler_observer { -public: - /*override*/ - void on_scheduler_entry(bool is_worker) { - if(is_worker) { // we've already counted the master - size_t p = ++g_ActualCurrentThreads; - size_t q = g_ActualMaxThreads; - while(q < p) { - q = g_ActualMaxThreads.compare_and_swap(p,q); - } - } - else { - // size_t q = g_ActualMaxThreads; - } - } - /*override*/ - void on_scheduler_exit(bool is_worker) { - if(is_worker) { - --g_ActualCurrentThreads; - } - } -}; -#endif -//----------------------------------------------------------- - -inline void ResetEhGlobals ( bool throwException = true, bool flog = false ) { - Harness::ConcurrencyTracker::Reset(); - g_CurExecuted = g_ExecutedAtLastCatch = g_ExecutedAtFirstCatch = 0; - g_ExceptionCaught = false; - g_UnknownException = false; - g_NestedPipelines = false; - g_ThrowException = throwException; - g_MasterExecutedThrow = 0; - g_NonMasterExecutedThrow = 0; - g_Flog = flog; - g_MasterExecuted = false; - g_NonMasterExecuted = false; -#if USE_TASK_SCHEDULER_OBSERVER - g_ActualMaxThreads = 1; // count master - g_ActualCurrentThreads = 1; // count master -#endif - g_ExceptionsThrown = g_NumExceptionsCaught = g_PipelinesStarted = 0; -} - -#if TBB_USE_EXCEPTIONS -class test_exception : public std::exception { - const char* my_description; -public: - test_exception ( const char* description ) : my_description(description) {} - - const char* what() const throw() { return my_description; } -}; - -class solitary_test_exception : public test_exception { -public: - solitary_test_exception ( const char* description ) : test_exception(description) {} -}; - -#if TBB_USE_CAPTURED_EXCEPTION - typedef tbb::captured_exception PropagatedException; - #define EXCEPTION_NAME(e) e.name() -#else - typedef test_exception PropagatedException; - #define EXCEPTION_NAME(e) typeid(e).name() -#endif - -#define EXCEPTION_DESCR "Test exception" - -#if HARNESS_EH_SIMPLE_MODE - -static void ThrowTestException () { - ++g_ExceptionsThrown; - throw test_exception(EXCEPTION_DESCR); -} - -#else /* !HARNESS_EH_SIMPLE_MODE */ - -static void ThrowTestException ( intptr_t threshold ) { - bool inMaster = (Harness::CurrentTid() == g_Master); - if ( !g_ThrowException || // if we're not supposed to throw - (!g_Flog && // if we're not catching throw in bodies and - (g_ExceptionInMaster ^ inMaster)) ) { // we're the master and not expected to throw - // or are the master and the master is not the one to throw (??) - return; - } - while ( Existed() < threshold ) - __TBB_Yield(); - if ( !g_SolitaryException ) { - ++g_ExceptionsThrown; - if(inMaster) ++g_MasterExecutedThrow; else ++g_NonMasterExecutedThrow; - throw test_exception(EXCEPTION_DESCR); - } - // g_SolitaryException == true - if(g_NestedPipelines) { - // only throw exception if we have started at least two inner pipelines - // else return - if(g_PipelinesStarted >= 3) { - if ( g_ExceptionsThrown.compare_and_swap(1, 0) == 0 ) { - if(inMaster) ++g_MasterExecutedThrow; else ++g_NonMasterExecutedThrow; - throw solitary_test_exception(EXCEPTION_DESCR); - } - } - } - else { - if ( g_ExceptionsThrown.compare_and_swap(1, 0) == 0 ) { - if(inMaster) ++g_MasterExecutedThrow; else ++g_NonMasterExecutedThrow; - throw solitary_test_exception(EXCEPTION_DESCR); - } - } -} -#endif /* !HARNESS_EH_SIMPLE_MODE */ - -#define UPDATE_COUNTS() \ - { \ - ++g_CurExecuted; \ - if(g_Master == Harness::CurrentTid()) g_MasterExecuted = true; \ - else g_NonMasterExecuted = true; \ - if( tbb::task::self().is_cancelled() ) ++g_TGCCancelled; \ - } - -#define CATCH() \ - } catch ( PropagatedException& e ) { \ - g_ExecutedAtFirstCatch.compare_and_swap(g_CurExecuted,0); \ - g_ExecutedAtLastCatch = g_CurExecuted; \ - ASSERT( e.what(), "Empty what() string" ); \ - ASSERT (__TBB_EXCEPTION_TYPE_INFO_BROKEN || strcmp(EXCEPTION_NAME(e), (g_SolitaryException ? typeid(solitary_test_exception) : typeid(test_exception)).name() ) == 0, "Unexpected original exception name"); \ - ASSERT (__TBB_EXCEPTION_TYPE_INFO_BROKEN || strcmp(e.what(), EXCEPTION_DESCR) == 0, "Unexpected original exception info"); \ - g_ExceptionCaught = l_ExceptionCaughtAtCurrentLevel = true; \ - ++g_NumExceptionsCaught; \ - } catch ( tbb::tbb_exception& e ) { \ - REPORT("Unexpected %s\n", e.name()); \ - ASSERT (g_UnknownException && !g_UnknownException, "Unexpected tbb::tbb_exception" ); \ - } catch ( std::exception& e ) { \ - REPORT("Unexpected %s\n", typeid(e).name()); \ - ASSERT (g_UnknownException && !g_UnknownException, "Unexpected std::exception" ); \ - } catch ( ... ) { \ - g_ExceptionCaught = l_ExceptionCaughtAtCurrentLevel = true; \ - g_UnknownException = unknownException = true; \ - } \ - if ( !g_SolitaryException ) \ - REMARK_ONCE ("Multiple exceptions mode: %d throws", (intptr_t)g_ExceptionsThrown); - -#define ASSERT_EXCEPTION() \ - { \ - ASSERT (!g_ExceptionsThrown || g_ExceptionCaught, "throw without catch"); \ - ASSERT (!g_ExceptionCaught || g_ExceptionsThrown, "catch without throw"); \ - ASSERT (g_ExceptionCaught || (g_ExceptionInMaster && !g_MasterExecutedThrow) || (!g_ExceptionInMaster && !g_NonMasterExecutedThrow), "no exception occurred"); \ - ASSERT (__TBB_EXCEPTION_TYPE_INFO_BROKEN || !g_UnknownException, "unknown exception was caught"); \ - } - -#define CATCH_AND_ASSERT() \ - CATCH() \ - ASSERT_EXCEPTION() - -#else /* !TBB_USE_EXCEPTIONS */ - -inline void ThrowTestException ( intptr_t ) {} - -#endif /* !TBB_USE_EXCEPTIONS */ - -#define TRY() \ - bool l_ExceptionCaughtAtCurrentLevel = false, unknownException = false; \ - __TBB_TRY { - -// "l_ExceptionCaughtAtCurrentLevel || unknownException" is used only to "touch" otherwise unused local variables -#define CATCH_AND_FAIL() } __TBB_CATCH(...) { \ - ASSERT (false, "Cancelling tasks must not cause any exceptions"); \ - (void)(l_ExceptionCaughtAtCurrentLevel && unknownException); \ - } - -const int c_Timeout = 1000000; - -void WaitUntilConcurrencyPeaks ( int expected_peak ) { - if ( g_Flog ) - return; - int n = 0; -retry: - while ( ++n < c_Timeout && (int)Harness::ConcurrencyTracker::PeakParallelism() < expected_peak ) - __TBB_Yield(); -#if USE_TASK_SCHEDULER_OBSERVER - ASSERT_WARNING( g_NumThreads == g_ActualMaxThreads, "Library did not provide sufficient threads"); -#endif - ASSERT_WARNING(n < c_Timeout,g_Wakeup_Msg); - // Workaround in case a missed wakeup takes place - if ( n == c_Timeout ) { - tbb::task &r = *new( tbb::task::allocate_root() ) tbb::empty_task(); - r.spawn(r); - n = 0; - goto retry; - } -} - -inline void WaitUntilConcurrencyPeaks () { WaitUntilConcurrencyPeaks(g_NumThreads); } - -inline bool IsMaster() { - return Harness::CurrentTid() == g_Master; -} - -inline bool IsThrowingThread() { - return g_ExceptionInMaster ^ IsMaster() ? true : false; -} - -class CancellatorTask : public tbb::task { - static volatile bool s_Ready; - tbb::task_group_context &m_groupToCancel; - intptr_t m_cancellationThreshold; - - tbb::task* execute () { - Harness::ConcurrencyTracker ct; - s_Ready = true; - while ( g_CurExecuted < m_cancellationThreshold ) - __TBB_Yield(); - m_groupToCancel.cancel_group_execution(); - g_ExecutedAtLastCatch = g_CurExecuted; - return NULL; - } -public: - CancellatorTask ( tbb::task_group_context& ctx, intptr_t threshold ) - : m_groupToCancel(ctx), m_cancellationThreshold(threshold) - { - s_Ready = false; - } - - static void Reset () { s_Ready = false; } - - static bool WaitUntilReady () { - const intptr_t limit = 10000000; - intptr_t n = 0; - do { - __TBB_Yield(); - } while( !s_Ready && ++n < limit ); - // should yield once, then continue if Cancellator is ready. - ASSERT( s_Ready || n == limit, NULL ); - return s_Ready; - } -}; - -volatile bool CancellatorTask::s_Ready = false; - -template<class LauncherTaskT, class CancellatorTaskT> -void RunCancellationTest ( intptr_t threshold = 1 ) -{ - tbb::task_group_context ctx; - tbb::empty_task &r = *new( tbb::task::allocate_root(ctx) ) tbb::empty_task; - r.set_ref_count(3); - r.spawn( *new( r.allocate_child() ) CancellatorTaskT(ctx, threshold) ); - __TBB_Yield(); - r.spawn( *new( r.allocate_child() ) LauncherTaskT(ctx) ); - TRY(); - r.wait_for_all(); - CATCH_AND_FAIL(); - r.destroy(r); -} diff --git a/src/tbb/src/test/harness_fp.h b/src/tbb/src/test/harness_fp.h deleted file mode 100644 index cc0511a5d..000000000 --- a/src/tbb/src/test/harness_fp.h +++ /dev/null @@ -1,172 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// include system header to prevent standard library to be included under private=public first time -#include <cstddef> -#define private public -#include "tbb/tbb_machine.h" -#undef private -#include "harness_assert.h" - -#if ( __TBB_x86_32 || __TBB_x86_64 ) && __TBB_CPU_CTL_ENV_PRESENT && !defined(__TBB_WIN32_USE_CL_BUILTINS) - -const int FE_TONEAREST = 0x0000, - FE_DOWNWARD = 0x0400, - FE_UPWARD = 0x0800, - FE_TOWARDZERO = 0x0c00, - FE_RND_MODE_MASK = FE_TOWARDZERO, - SSE_RND_MODE_MASK = FE_RND_MODE_MASK << 3, - SSE_DAZ = 0x0040, - SSE_FTZ = 0x8000, - SSE_MODE_MASK = SSE_DAZ | SSE_FTZ, - SSE_STATUS_MASK = 0x3F; - -const int NumSseModes = 4; -const int SseModes[NumSseModes] = { 0, SSE_DAZ, SSE_FTZ, SSE_DAZ | SSE_FTZ }; - -#if _WIN64 && !__TBB_X86_MSVC_INLINE_ASM_AVAILABLE && !__MINGW64__ -// MinGW uses inline implementation from tbb/machine/linux_intel64.h -// and when inline asm is not available, the library uses out of line assembly which is not exported -// thus reimplementing them here - -#include <float.h> - -inline void __TBB_get_cpu_ctl_env ( tbb::internal::cpu_ctl_env* fe ) { - fe->x87cw = short(_control87(0, 0) & _MCW_RC) << 2; - fe->mxcsr = _mm_getcsr(); -} -inline void __TBB_set_cpu_ctl_env ( const tbb::internal::cpu_ctl_env* fe ) { - ASSERT( (fe->x87cw & FE_RND_MODE_MASK) == ((fe->x87cw & FE_RND_MODE_MASK) >> 2 & _MCW_RC) << 2, "Check float.h constants" ); - _control87( (fe->x87cw & FE_RND_MODE_MASK) >> 6, _MCW_RC ); - _mm_setcsr( fe->mxcsr ); -} - -#endif /* _WIN64 && !__TBB_X86_MSVC_INLINE_ASM_AVAILABLE && !__MINGW64__ */ - -inline int GetRoundingMode ( bool checkConsistency = true ) { - tbb::internal::cpu_ctl_env ctl; - ctl.get_env(); - ASSERT( !checkConsistency || (ctl.mxcsr & SSE_RND_MODE_MASK) >> 3 == (ctl.x87cw & FE_RND_MODE_MASK), NULL ); - return ctl.x87cw & FE_RND_MODE_MASK; -} - -inline void SetRoundingMode ( int mode ) { - tbb::internal::cpu_ctl_env ctl; - ctl.get_env(); - ctl.mxcsr = (ctl.mxcsr & ~SSE_RND_MODE_MASK) | (mode & FE_RND_MODE_MASK) << 3; - ctl.x87cw = short((ctl.x87cw & ~FE_RND_MODE_MASK) | (mode & FE_RND_MODE_MASK)); - ctl.set_env(); -} - -inline int GetSseMode () { - tbb::internal::cpu_ctl_env ctl; - ctl.get_env(); - return ctl.mxcsr & SSE_MODE_MASK; -} - -inline void SetSseMode ( int mode ) { - tbb::internal::cpu_ctl_env ctl; - ctl.get_env(); - ctl.mxcsr = (ctl.mxcsr & ~SSE_MODE_MASK) | (mode & SSE_MODE_MASK); - ctl.set_env(); -} - -#elif defined(_M_ARM) || defined(__TBB_WIN32_USE_CL_BUILTINS) -const int NumSseModes = 1; -const int SseModes[NumSseModes] = { 0 }; - -inline int GetSseMode () { return 0; } -inline void SetSseMode ( int ) {} - -const int FE_TONEAREST = _RC_NEAR, - FE_DOWNWARD = _RC_DOWN, - FE_UPWARD = _RC_UP, - FE_TOWARDZERO = _RC_CHOP; - -inline int GetRoundingMode ( bool = true ) { - tbb::internal::cpu_ctl_env ctl; - ctl.get_env(); - return ctl.my_ctl; -} -inline void SetRoundingMode ( int mode ) { - tbb::internal::cpu_ctl_env ctl; - ctl.my_ctl = mode; - ctl.set_env(); -} - -#else /* Other archs */ - -#include <fenv.h> - -const int RND_MODE_MASK = FE_TONEAREST | FE_DOWNWARD | FE_UPWARD | FE_TOWARDZERO; - -const int NumSseModes = 1; -const int SseModes[NumSseModes] = { 0 }; - -inline int GetRoundingMode ( bool = true ) { return fegetround(); } -inline void SetRoundingMode ( int rnd ) { fesetround(rnd); } - -inline int GetSseMode () { return 0; } -inline void SetSseMode ( int ) {} - -#endif /* Other archs */ - -const int NumRoundingModes = 4; -const int RoundingModes[NumRoundingModes] = { FE_TONEAREST, FE_DOWNWARD, FE_UPWARD, FE_TOWARDZERO }; -const int numFPModes = NumRoundingModes*NumSseModes; - -inline void SetFPMode( int mode ) { - SetRoundingMode( RoundingModes[mode/NumSseModes%NumRoundingModes] ); - SetSseMode( SseModes[mode%NumSseModes] ); -} - -#define AssertFPMode( mode ) { \ - ASSERT( GetRoundingMode() == RoundingModes[mode/NumSseModes%NumRoundingModes], "FPU control state has not been set correctly." ); \ - ASSERT( GetSseMode() == SseModes[mode%NumSseModes], "SSE control state has not been set correctly." ); \ -} - -inline int SetNextFPMode( int mode, int step = 1 ) { - const int nextMode = (mode+step)%numFPModes; - SetFPMode( nextMode ); - return nextMode; -} - -class FPModeContext { - int origSse, origRounding; - int currentMode; -public: - FPModeContext(int newMode) { - origSse = GetSseMode(); - origRounding = GetRoundingMode(); - SetFPMode(currentMode = newMode); - } - ~FPModeContext() { - assertFPMode(); - SetRoundingMode(origRounding); - SetSseMode(origSse); - } - int setNextFPMode() { - assertFPMode(); - return currentMode = SetNextFPMode(currentMode); - } - void assertFPMode() { - AssertFPMode(currentMode); - } -}; diff --git a/src/tbb/src/test/harness_graph.h b/src/tbb/src/test/harness_graph.h deleted file mode 100644 index afaa0983f..000000000 --- a/src/tbb/src/test/harness_graph.h +++ /dev/null @@ -1,960 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -/** @file harness_graph.cpp - This contains common helper classes and functions for testing graph nodes -**/ - -#ifndef harness_graph_H -#define harness_graph_H - -#include "harness.h" -#include "tbb/flow_graph.h" -#include "tbb/null_rw_mutex.h" -#include "tbb/atomic.h" -#include "tbb/concurrent_unordered_map.h" -#include "tbb/task.h" - -// Needed conversion to and from continue_msg, but didn't want to add -// conversion operators to the class, since we don't want it in general, -// only in these tests. -template<typename InputType, typename OutputType> -struct convertor { - static OutputType convert_value(const InputType &i) { - return OutputType(i); - } -}; - -template<typename InputType> -struct convertor<InputType,tbb::flow::continue_msg> { - static tbb::flow::continue_msg convert_value(const InputType &/*i*/) { - return tbb::flow::continue_msg(); - } -}; - -template<typename OutputType> -struct convertor<tbb::flow::continue_msg,OutputType> { - static OutputType convert_value(const tbb::flow::continue_msg &/*i*/) { - return OutputType(); - } -}; - -// helper for multifunction_node tests. -template<size_t N> -struct mof_helper { - template<typename InputType, typename ports_type> - static inline void output_converted_value(const InputType &i, ports_type &p) { - (void)tbb::flow::get<N-1>(p).try_put(convertor<InputType,typename tbb::flow::tuple_element<N-1,ports_type>::type::output_type>::convert_value(i)); - output_converted_value<N-1>(i, p); - } -}; - -template<> -struct mof_helper<1> { - template<typename InputType, typename ports_type> - static inline void output_converted_value(const InputType &i, ports_type &p) { - // just emit a default-constructed object - (void)tbb::flow::get<0>(p).try_put(convertor<InputType,typename tbb::flow::tuple_element<0,ports_type>::type::output_type>::convert_value(i)); - } -}; - -template< typename InputType, typename OutputType > -struct harness_graph_default_functor { - static OutputType construct( InputType v ) { - return OutputType(v); - } -}; - -template< typename OutputType > -struct harness_graph_default_functor< tbb::flow::continue_msg, OutputType > { - static OutputType construct( tbb::flow::continue_msg ) { - return OutputType(); - } -}; - -template< typename InputType > -struct harness_graph_default_functor< InputType, tbb::flow::continue_msg > { - static tbb::flow::continue_msg construct( InputType ) { - return tbb::flow::continue_msg(); - } -}; - -template< > -struct harness_graph_default_functor< tbb::flow::continue_msg, tbb::flow::continue_msg > { - static tbb::flow::continue_msg construct( tbb::flow::continue_msg ) { - return tbb::flow::continue_msg(); - } -}; - -template<typename InputType, typename OutputSet> -struct harness_graph_default_multifunction_functor { - static const int N = tbb::flow::tuple_size<OutputSet>::value; - typedef typename tbb::flow::multifunction_node<InputType,OutputSet>::output_ports_type ports_type; - static void construct(const InputType &i, ports_type &p) { - mof_helper<N>::output_converted_value(i, p); - } -}; - -//! An executor that accepts InputType and generates OutputType -template< typename InputType, typename OutputType > -struct harness_graph_executor { - - typedef OutputType (*function_ptr_type)( InputType v ); - - template<typename RW> - struct mutex_holder { static RW mutex; }; - - static function_ptr_type fptr; - static tbb::atomic<size_t> execute_count; - static tbb::atomic<size_t> current_executors; - static size_t max_executors; - - static inline OutputType func( InputType v ) { - size_t c; // Declaration separate from initialization to avoid ICC internal error on IA-64 architecture - c = current_executors.fetch_and_increment(); - ASSERT( max_executors == 0 || c <= max_executors, NULL ); - ++execute_count; - OutputType v2 = (*fptr)(v); - current_executors.fetch_and_decrement(); - return v2; - } - - template< typename RW > - static inline OutputType tfunc( InputType v ) { - // Invocations allowed to be concurrent, the lock is acquired in shared ("read") mode. - // A test can take it exclusively, thus creating a barrier for invocations. - typename RW::scoped_lock l( mutex_holder<RW>::mutex, /*write=*/false ); - return func(v); - } - - template< typename RW > - struct tfunctor { - tbb::atomic<size_t> my_execute_count; - tfunctor() { my_execute_count = 0; } - tfunctor( const tfunctor &f ) { my_execute_count = f.my_execute_count; } - OutputType operator()( InputType i ) { - typename RW::scoped_lock l( harness_graph_executor::mutex_holder<RW>::mutex, /*write=*/false ); - my_execute_count.fetch_and_increment(); - return harness_graph_executor::func(i); - } - }; - typedef tfunctor<tbb::null_rw_mutex> functor; - -}; - -//! A multifunction executor that accepts InputType and has only one Output of OutputType. -template< typename InputType, typename OutputTuple > -struct harness_graph_multifunction_executor { - typedef typename tbb::flow::multifunction_node<InputType,OutputTuple>::output_ports_type ports_type; - typedef typename tbb::flow::tuple_element<0,OutputTuple>::type OutputType; - - typedef void (*mfunction_ptr_type)( const InputType& v, ports_type &p ); - - template<typename RW> - struct mutex_holder { static RW mutex; }; - - static mfunction_ptr_type fptr; - static tbb::atomic<size_t> execute_count; - static tbb::atomic<size_t> current_executors; - static size_t max_executors; - - - static inline void func( const InputType &v, ports_type &p ) { - size_t c; // Declaration separate from initialization to avoid ICC internal error on IA-64 architecture - c = current_executors.fetch_and_increment(); - ASSERT( max_executors == 0 || c <= max_executors, NULL ); - ASSERT(tbb::flow::tuple_size<OutputTuple>::value == 1, NULL); - ++execute_count; - (*fptr)(v,p); - current_executors.fetch_and_decrement(); - } - - template< typename RW > - static inline void tfunc( const InputType& v, ports_type &p ) { - // Shared lock in invocations, exclusive in a test; see a comment in harness_graph_executor. - typename RW::scoped_lock l( mutex_holder<RW>::mutex, /*write=*/false ); - func(v,p); - } - - template< typename RW > - struct tfunctor { - tbb::atomic<size_t> my_execute_count; - tfunctor() { my_execute_count = 0; } - tfunctor( const tfunctor &f ) { my_execute_count = f.my_execute_count; } - void operator()( const InputType &i, ports_type &p ) { - typename RW::scoped_lock l( harness_graph_multifunction_executor::mutex_holder<RW>::mutex, /*write=*/false ); - my_execute_count.fetch_and_increment(); - harness_graph_multifunction_executor::func(i,p); - } - }; - typedef tfunctor<tbb::null_rw_mutex> functor; - -}; - -// static vars for function_node tests -template< typename InputType, typename OutputType > -template< typename RW > -RW harness_graph_executor<InputType, OutputType>::mutex_holder<RW>::mutex; - -template< typename InputType, typename OutputType > -tbb::atomic<size_t> harness_graph_executor<InputType, OutputType>::execute_count; - -template< typename InputType, typename OutputType > -typename harness_graph_executor<InputType, OutputType>::function_ptr_type harness_graph_executor<InputType, OutputType>::fptr - = harness_graph_default_functor< InputType, OutputType >::construct; - -template< typename InputType, typename OutputType > -tbb::atomic<size_t> harness_graph_executor<InputType, OutputType>::current_executors; - -template< typename InputType, typename OutputType > -size_t harness_graph_executor<InputType, OutputType>::max_executors = 0; - -// static vars for multifunction_node tests -template< typename InputType, typename OutputTuple > -template< typename RW > -RW harness_graph_multifunction_executor<InputType, OutputTuple>::mutex_holder<RW>::mutex; - -template< typename InputType, typename OutputTuple > -tbb::atomic<size_t> harness_graph_multifunction_executor<InputType, OutputTuple>::execute_count; - -template< typename InputType, typename OutputTuple > -typename harness_graph_multifunction_executor<InputType, OutputTuple>::mfunction_ptr_type harness_graph_multifunction_executor<InputType, OutputTuple>::fptr - = harness_graph_default_multifunction_functor< InputType, OutputTuple >::construct; - -template< typename InputType, typename OutputTuple > -tbb::atomic<size_t> harness_graph_multifunction_executor<InputType, OutputTuple>::current_executors; - -template< typename InputType, typename OutputTuple > -size_t harness_graph_multifunction_executor<InputType, OutputTuple>::max_executors = 0; - -//! Counts the number of puts received -template< typename T > -struct harness_counting_receiver : public tbb::flow::receiver<T>, NoCopy { - - tbb::atomic< size_t > my_count; - T max_value; - size_t num_copies; - - harness_counting_receiver() : num_copies(1) { - my_count = 0; - } - - void initialize_map( const T& m, size_t c ) { - my_count = 0; - max_value = m; - num_copies = c; - } - - /* override */ tbb::task *try_put_task( const T & ) { - ++my_count; - return const_cast<tbb::task *>(tbb::flow::interface7::SUCCESSFULLY_ENQUEUED); - } - - void validate() { - size_t n = my_count; - ASSERT( n == num_copies*max_value, NULL ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void internal_add_built_predecessor(tbb::flow::sender<T> &) {} - /*override*/void internal_delete_built_predecessor(tbb::flow::sender<T> &) {} - /*override*/void copy_predecessors(std::vector<tbb::flow::sender<T> *> &) { } - /*override*/size_t predecessor_count() { return 0; } - /*override*/void reset_receiver(tbb::flow::reset_flags /*f*/) { my_count = 0; } -#else - /*override*/void reset_receiver() { my_count = 0; } -#endif - -}; - -//! Counts the number of puts received -template< typename T > -struct harness_mapped_receiver : public tbb::flow::receiver<T>, NoCopy { - - tbb::atomic< size_t > my_count; - T max_value; - size_t num_copies; - typedef tbb::concurrent_unordered_map< T, tbb::atomic< size_t > > map_type; - map_type *my_map; - - harness_mapped_receiver() : my_map(NULL) { - my_count = 0; - } - - ~harness_mapped_receiver() { - if ( my_map ) delete my_map; - } - - void initialize_map( const T& m, size_t c ) { - my_count = 0; - max_value = m; - num_copies = c; - if ( my_map ) delete my_map; - my_map = new map_type; - } - - /* override */ tbb::task * try_put_task( const T &t ) { - if ( my_map ) { - tbb::atomic<size_t> a; - a = 1; - std::pair< typename map_type::iterator, bool > r = (*my_map).insert( typename map_type::value_type( t, a ) ); - if ( r.second == false ) { - size_t v = r.first->second.fetch_and_increment(); - ASSERT( v < num_copies, NULL ); - } - } else { - ++my_count; - } - return const_cast<tbb::task *>(tbb::flow::interface7::SUCCESSFULLY_ENQUEUED); - } - - void validate() { - if ( my_map ) { - for ( size_t i = 0; i < (size_t)max_value; ++i ) { - size_t n = (*my_map)[(int)i]; - ASSERT( n == num_copies, NULL ); - } - } else { - size_t n = my_count; - ASSERT( n == num_copies*max_value, NULL ); - } - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void internal_add_built_predecessor(tbb::flow::sender<T> &) {} - /*override*/void internal_delete_built_predecessor(tbb::flow::sender<T> &) {} - /*override*/void copy_predecessors(std::vector<tbb::flow::sender<T> *> &) { } - /*override*/size_t predecessor_count() { return 0; } - /*override*/void reset_receiver(tbb::flow::reset_flags /*f*/) { my_count = 0; if(my_map) delete my_map; my_map = new map_type; } -#else - /*override*/void reset_receiver() { my_count = 0; if(my_map) delete my_map; my_map = new map_type; } -#endif - -}; - -//! Counts the number of puts received -template< typename T > -struct harness_counting_sender : public tbb::flow::sender<T>, NoCopy { - - typedef tbb::flow::receiver<T> successor_type; - tbb::atomic< successor_type * > my_receiver; - tbb::atomic< size_t > my_count; - tbb::atomic< size_t > my_received; - size_t my_limit; - - harness_counting_sender( ) : my_limit(~size_t(0)) { - my_receiver = NULL; - my_count = 0; - my_received = 0; - } - - harness_counting_sender( size_t limit ) : my_limit(limit) { - my_receiver = NULL; - my_count = 0; - my_received = 0; - } - - /* override */ bool register_successor( successor_type &r ) { - my_receiver = &r; - return true; - } - - /* override */ bool remove_successor( successor_type &r ) { - successor_type *s = my_receiver.fetch_and_store( NULL ); - ASSERT( s == &r, NULL ); - return true; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /* override */ void internal_add_built_successor( successor_type &) {} - /* override */ void internal_delete_built_successor( successor_type &) {} - /* override */ void copy_successors(std::vector<successor_type *> &) { } - /* override */ size_t successor_count() { return 0; } -#endif - - /* override */ bool try_get( T & v ) { - size_t i = my_count.fetch_and_increment(); - if ( i < my_limit ) { - v = T( i ); - ++my_received; - return true; - } else { - return false; - } - } - - bool try_put_once() { - successor_type *s = my_receiver; - size_t i = my_count.fetch_and_increment(); - if ( s->try_put( T(i) ) ) { - ++my_received; - return true; - } else { - return false; - } - } - - void try_put_until_false() { - successor_type *s = my_receiver; - size_t i = my_count.fetch_and_increment(); - - while ( s->try_put( T(i) ) ) { - ++my_received; - i = my_count.fetch_and_increment(); - } - } - - void try_put_until_limit() { - successor_type *s = my_receiver; - - for ( int i = 0; i < (int)my_limit; ++i ) { - ASSERT( s->try_put( T(i) ), NULL ); - ++my_received; - } - ASSERT( my_received == my_limit, NULL ); - } - -}; - -// test for resets of buffer-type nodes. -tbb::atomic<int> serial_fn_state0; -tbb::atomic<int> serial_fn_state1; -tbb::atomic<int> serial_continue_state0; - -template<typename T> -struct serial_fn_body { - tbb::atomic<int> *_flag; - serial_fn_body(tbb::atomic<int> &myatomic) : _flag(&myatomic) { } - T operator()(const T& in) { - if(*_flag == 0) { - *_flag = 1; - // wait until we are released - tbb::internal::atomic_backoff backoff; - do { - backoff.pause(); - } while(*_flag == 1); - } - // return value - return in; - } -}; - -template<typename T> -struct serial_continue_body { - tbb::atomic<int> *_flag; - serial_continue_body(tbb::atomic<int> &myatomic) : _flag(&myatomic) {} - T operator()(const tbb::flow::continue_msg& /*in*/) { - // signal we have received a value - *_flag = 1; - // wait until we are released - tbb::internal::atomic_backoff backoff; - do { - backoff.pause(); - } while(*_flag == 1); - // return value - return (T)1; - } -}; - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - - -// walk two lists via iterator, match elements of each, in possibly-different ordder, and -// return true if all elements of sv appear in tv. -template<typename SV, typename TV> -bool lists_match(SV &sv, TV &tv) { - if(sv.size() != tv.size()) return false; - std::vector<bool> bv(sv.size(), false); - for(typename TV::iterator itv = tv.begin(); itv != tv.end(); ++itv) { - int ibv = 0; - for(typename SV::iterator isv = sv.begin(); isv != sv.end(); ++isv) { - if(!bv[ibv]) { - if(*itv == *isv) { - bv[ibv] = true; - goto found_it;; - } - } - ++ibv; - } - return false; -found_it: - continue; - } - return true; -} - -template<typename T, typename BufferType> -void test_resets() { - const int NN = 3; - tbb::task_group_context tgc; - tbb::flow::graph g(tgc); - BufferType b0(g); - tbb::flow::queue_node<T> q0(g); - T j; - bool nFound[NN]; - - // reset empties buffer - for(T i = 0; i < NN; ++i) { - b0.try_put(i); - nFound[(int)i] = false; - } - g.wait_for_all(); - g.reset(); - ASSERT(!b0.try_get(j), "reset did not empty buffer"); - - // reset doesn't delete edge - - tbb::flow::make_edge(b0,q0); - g.reset(); - for(T i = 0; i < NN; ++i) { - b0.try_put(i); - } - - g.wait_for_all(); - for( T i = 0; i < NN; ++i) { - ASSERT(q0.try_get(j), "Missing value from buffer"); - ASSERT(!nFound[(int)j], "Duplicate value found"); - nFound[(int)j] = true; - } - - for(int ii = 0; ii < NN; ++ii) { - ASSERT(nFound[ii], "missing value"); - } - ASSERT(!q0.try_get(j), "Extra values in output"); - - // reset reverses a reversed edge. - // we will use a serial rejecting node to get the edge to reverse. - tbb::flow::function_node<T, T, tbb::flow::rejecting> sfn(g, tbb::flow::serial, serial_fn_body<T>(serial_fn_state0)); - tbb::flow::queue_node<T> outq(g); - tbb::flow::remove_edge(b0,q0); - tbb::flow::make_edge(b0, sfn); - tbb::flow::make_edge(sfn,outq); - g.wait_for_all(); // wait for all the tasks started by building the graph are done. - serial_fn_state0 = 0; - - // b0 ------> sfn ------> outq - - for(int icnt = 0; icnt < 2; ++icnt) { - serial_fn_state0 = 0; - b0.try_put((T)0); // will start sfn - // wait until function_node starts - tbb::internal::atomic_backoff backoff; - do { - backoff.pause(); - } while(serial_fn_state0 == 0); - // now the function_node is executing. - // this will start a task to forward the second item - b0.try_put((T)1); // first item will be consumed by task completing the execution - // of the serial function node - b0.try_put((T)2); // second item will remain after cancellation - // now wait for the task that attempts to forward the buffer item to - // complete. - tbb::internal::atomic_backoff backoff2; - do { - backoff.pause(); - } while(g.root_task()->ref_count() >= 3); - // now cancel the graph. - ASSERT(tgc.cancel_group_execution(), "task group already cancelled"); - serial_fn_state0 = 0; // release the function_node. - g.wait_for_all(); // wait for all the tasks to complete. - // check that at most one output reached the queue_node - T outt; - T outt2; - bool got_item1 = outq.try_get(outt); - bool got_item2 = outq.try_get(outt2); - // either the output queue was empty (if the function_node tested for cancellation before putting the - // result to the queue) or there was one element in the queue (the 0). - ASSERT(!got_item1 || ((int)outt == 0 && !got_item2), "incorrect output from function_node"); - // the edge between the buffer and the function_node should be reversed, and the last - // message we put in the buffer should still be there. We can't directly test for the - // edge reversal. - got_item1 = b0.try_get(outt); - ASSERT(got_item1, " buffer lost a message"); - ASSERT(2 == (int)outt || 1 == (int)outt, " buffer had incorrect message"); // the one not consumed by the node. - ASSERT(g.is_cancelled(), "Graph was not cancelled"); - g.reset(); - } // icnt - - // reset with remove_edge removes edge. (icnt ==0 => forward edge, 1 => reversed edge - for(int icnt = 0; icnt < 2; ++icnt) { - if(icnt == 1) { - // set up reversed edge - tbb::flow::make_edge(b0, sfn); - tbb::flow::make_edge(sfn,outq); - serial_fn_state0 = 0; - b0.try_put((T)0); // starts up the function node - b0.try_put((T)1); // shoyuld reverse the edge - tbb::internal::atomic_backoff backoff; - do { - backoff.pause(); - } while(serial_fn_state0 == 0); - ASSERT(tgc.cancel_group_execution(), "task group already cancelled"); - serial_fn_state0 = 0; // release the function_node. - g.wait_for_all(); // wait for all the tasks to complete. - } - g.reset(tbb::flow::rf_extract); - // test that no one is a successor to the buffer now. - serial_fn_state0 = 1; // let the function_node go if it gets an input message - b0.try_put((T)23); - g.wait_for_all(); - ASSERT((int)serial_fn_state0 == 1, "function_node executed when it shouldn't"); - T outt; - ASSERT(b0.try_get(outt) && (T)23 == outt, "node lost its input"); - } -} - -template< typename NODE_TYPE > -class test_buffer_base_extract { -protected: - tbb::flow::graph &g; - NODE_TYPE &in0; - NODE_TYPE &in1; - NODE_TYPE &middle; - NODE_TYPE &out0; - NODE_TYPE &out1; - NODE_TYPE *ins[2]; - NODE_TYPE *outs[2]; - typename NODE_TYPE::successor_type *ms_ptr; - typename NODE_TYPE::predecessor_type *mp_ptr; - - typename NODE_TYPE::predecessor_vector_type in0_p_vec; - typename NODE_TYPE::successor_vector_type in0_s_vec; - typename NODE_TYPE::predecessor_vector_type in1_p_vec; - typename NODE_TYPE::successor_vector_type in1_s_vec; - typename NODE_TYPE::predecessor_vector_type out0_p_vec; - typename NODE_TYPE::successor_vector_type out0_s_vec; - typename NODE_TYPE::predecessor_vector_type out1_p_vec; - typename NODE_TYPE::successor_vector_type out1_s_vec; - typename NODE_TYPE::predecessor_vector_type mp_vec; - typename NODE_TYPE::successor_vector_type ms_vec; - - virtual void set_up_vectors() { - in0_p_vec.clear(); - in0_s_vec.clear(); - in1_p_vec.clear(); - in1_s_vec.clear(); - mp_vec.clear(); - ms_vec.clear(); - out0_p_vec.clear(); - out0_s_vec.clear(); - out1_p_vec.clear(); - out1_s_vec.clear(); - in0.copy_predecessors(in0_p_vec); - in0.copy_successors(in0_s_vec); - in1.copy_predecessors(in1_p_vec); - in1.copy_successors(in1_s_vec); - middle.copy_predecessors(mp_vec); - middle.copy_successors(ms_vec); - out0.copy_predecessors(out0_p_vec); - out0.copy_successors(out0_s_vec); - out1.copy_predecessors(out1_p_vec); - out1.copy_successors(out1_s_vec); - } - - void make_and_validate_full_graph() { - /* in0 out0 */ - /* \ / */ - /* middle */ - /* / \ */ - /* in1 out1 */ - tbb::flow::make_edge( in0, middle ); - tbb::flow::make_edge( in1, middle ); - tbb::flow::make_edge( middle, out0 ); - tbb::flow::make_edge( middle, out1 ); - - set_up_vectors(); - - ASSERT( in0.predecessor_count() == 0 && in0_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in0.successor_count() == 1 && in0_s_vec.size() == 1 && in0_s_vec[0] == ms_ptr, "expected 1 successor" ); - ASSERT( in1.predecessor_count() == 0 && in1_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in1.successor_count() == 1 && in1_s_vec.size() == 1 && in1_s_vec[0] == ms_ptr, "expected 1 successor" ); - ASSERT( middle.predecessor_count() == 2 && mp_vec.size() == 2, "expected 2 predecessors" ); - ASSERT( middle.successor_count() == 2 && ms_vec.size() == 2, "expected 2 successors" ); - ASSERT( out0.predecessor_count() == 1 && out0_p_vec.size() == 1 && out0_p_vec[0] == mp_ptr, "expected 1 predecessor" ); - ASSERT( out0.successor_count() == 0 && out0_s_vec.size() == 0, "expected 0 successors" ); - ASSERT( out1.predecessor_count() == 1 && out1_p_vec.size() == 1 && out1_p_vec[0] == mp_ptr, "expected 1 predecessor" ); - ASSERT( out1.successor_count() == 0 && out1_s_vec.size() == 0, "expected 0 successors" ); - - int first_pred = mp_vec[0] == ins[0] ? 0 : ( mp_vec[0] == ins[1] ? 1 : -1 ); - int second_pred = mp_vec[1] == ins[0] ? 0 : ( mp_vec[1] == ins[1] ? 1 : -1 ); - ASSERT( first_pred != -1 && second_pred != -1 && first_pred != second_pred, "bad predecessor(s) for middle" ); - - int first_succ = ms_vec[0] == outs[0] ? 0 : ( ms_vec[0] == outs[1] ? 1 : -1 ); - int second_succ = ms_vec[1] == outs[0] ? 0 : ( ms_vec[1] == outs[1] ? 1 : -1 ); - ASSERT( first_succ != -1 && second_succ != -1 && first_succ != second_succ, "bad successor(s) for middle" ); - - in0.try_put(1); - in1.try_put(2); - g.wait_for_all(); - - int r = 0; - int v = 0; - - ASSERT( in0.try_get(v) == false, "buffer should not have a value" ); - ASSERT( in1.try_get(v) == false, "buffer should not have a value" ); - ASSERT( middle.try_get(v) == false, "buffer should not have a value" ); - while ( out0.try_get(v) ) { - ASSERT( (v == 1 || v == 2) && (v&r) == 0, "duplicate value" ); - r |= v; - g.wait_for_all(); - } - while ( out1.try_get(v) ) { - ASSERT( (v == 1 || v == 2) && (v&r) == 0, "duplicate value" ); - r |= v; - g.wait_for_all(); - } - ASSERT( r == 3, "not all values received" ); - g.wait_for_all(); - } - - void validate_half_graph() { - /* in0 out0 */ - /* */ - /* middle */ - /* / \ */ - /* in1 out1 */ - set_up_vectors(); - - ASSERT( in0.predecessor_count() == 0 && in0_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in0.successor_count() == 0 && in0_s_vec.size() == 0, "expected 0 successors" ); - ASSERT( in1.predecessor_count() == 0 && in1_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in1.successor_count() == 1 && in1_s_vec.size() == 1 && in1_s_vec[0] == ms_ptr, "expected 1 successor" ); - ASSERT( middle.predecessor_count() == 1 && mp_vec.size() == 1, "expected 1 predecessor" ); - ASSERT( middle.successor_count() == 1 && ms_vec.size() == 1, "expected 1 successor" ); - ASSERT( out0.predecessor_count() == 0 && out0_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( out0.successor_count() == 0 && out0_s_vec.size() == 0, "expected 0 successors" ); - ASSERT( out1.predecessor_count() == 1 && out1_p_vec.size() == 1 && out1_p_vec[0] == mp_ptr, "expected 1 predecessor" ); - ASSERT( out1.successor_count() == 0 && out1_s_vec.size() == 0, "expected 0 successors" ); - - ASSERT( middle.predecessor_count() == 1 && mp_vec.size() == 1, "expected two predecessors" ); - ASSERT( middle.successor_count() == 1 && ms_vec.size() == 1, "expected two successors" ); - - ASSERT( mp_vec[0] == ins[1], "incorrect predecessor" ); - ASSERT( ms_vec[0] == outs[1], "incorrect successor" ); - - in0.try_put(1); - in1.try_put(2); - g.wait_for_all(); - - int v = 0; - ASSERT( in0.try_get(v) == true && v == 1, "buffer should have a value of 1" ); - ASSERT( in1.try_get(v) == false, "buffer should not have a value" ); - ASSERT( middle.try_get(v) == false, "buffer should not have a value" ); - ASSERT( out0.try_get(v) == false, "buffer should not have a value" ); - ASSERT( out1.try_get(v) == true && v == 2, "buffer should have a value of 2" ); - g.wait_for_all(); - } - - void validate_empty_graph() { - /* in0 out0 */ - /* */ - /* middle */ - /* */ - /* in1 out1 */ - set_up_vectors(); - - ASSERT( in0.predecessor_count() == 0 && in0_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in0.successor_count() == 0 && in0_s_vec.size() == 0, "expected 0 successors" ); - ASSERT( in1.predecessor_count() == 0 && in1_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in1.successor_count() == 0 && in1_s_vec.size() == 0, "expected 0 successors" ); - ASSERT( middle.predecessor_count() == 0 && mp_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( middle.successor_count() == 0 && ms_vec.size() == 0, "expected 0 successors" ); - ASSERT( out0.predecessor_count() == 0 && out0_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( out0.successor_count() == 0 && out0_s_vec.size() == 0, "expected 0 successors" ); - ASSERT( out1.predecessor_count() == 0 && out1_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( out1.successor_count() == 0 && out1_s_vec.size() == 0, "expected 0 successors" ); - - ASSERT( middle.predecessor_count() == 0 && mp_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( middle.successor_count() == 0 && ms_vec.size() == 0, "expected 0 successors" ); - - in0.try_put(1); - in1.try_put(2); - g.wait_for_all(); - - int v = 0; - ASSERT( in0.try_get(v) == true && v == 1, "buffer should have a value of 1" ); - ASSERT( in1.try_get(v) == true && v == 2, "buffer should have a value of 2" ); - ASSERT( middle.try_get(v) == false, "buffer should not have a value" ); - ASSERT( out0.try_get(v) == false, "buffer should not have a value" ); - ASSERT( out1.try_get(v) == false, "buffer should not have a value" ); - g.wait_for_all(); - } - - // forbid the ecompiler generation of operator= (VS2012 warning) - test_buffer_base_extract& operator=(test_buffer_base_extract & /*other*/); - -public: - - test_buffer_base_extract(tbb::flow::graph &_g, NODE_TYPE &i0, NODE_TYPE &i1, NODE_TYPE &m, NODE_TYPE &o0, NODE_TYPE &o1) : - g(_g), in0(i0), in1(i1), middle(m), out0(o0), out1(o1) { - ins[0] = &in0; - ins[1] = &in1; - outs[0] = &out0; - outs[1] = &out1; - ms_ptr = static_cast< typename NODE_TYPE::successor_type * >(&middle); - mp_ptr = static_cast< typename NODE_TYPE::predecessor_type *>(&middle); - } - - virtual ~test_buffer_base_extract() {} - - void run_tests() { - make_and_validate_full_graph(); - - in0.extract(); - out0.extract(); - validate_half_graph(); - - in1.extract(); - out1.extract(); - validate_empty_graph(); - - make_and_validate_full_graph(); - - middle.extract(); - validate_empty_graph(); - - make_and_validate_full_graph(); - } - -}; - -template< typename NODE_TYPE > -class test_buffer_extract : public test_buffer_base_extract<NODE_TYPE> { -protected: - tbb::flow::graph my_g; - NODE_TYPE my_in0; - NODE_TYPE my_in1; - NODE_TYPE my_middle; - NODE_TYPE my_out0; - NODE_TYPE my_out1; -public: - test_buffer_extract() : test_buffer_base_extract<NODE_TYPE>( my_g, my_in0, my_in1, my_middle, my_out0, my_out1), - my_in0(my_g), my_in1(my_g), my_middle(my_g), my_out0(my_g), my_out1(my_g) { } -}; - -template< > -class test_buffer_extract< tbb::flow::sequencer_node<int> > : public test_buffer_base_extract< tbb::flow::sequencer_node<int> > { -protected: - typedef tbb::flow::sequencer_node<int> my_node_t; - tbb::flow::graph my_g; - my_node_t my_in0; - my_node_t my_in1; - my_node_t my_middle; - my_node_t my_out0; - my_node_t my_out1; - - typedef tbb::atomic<size_t> count_t; - count_t middle_count; - count_t out0_count; - count_t out1_count; - - struct always_zero { size_t operator()(int) { return 0; } }; - struct always_inc { - count_t *c; - always_inc(count_t &_c) : c(&_c) {} - size_t operator()(int) { - return c->fetch_and_increment(); - } - }; - - /*override*/void set_up_vectors() { - middle_count = 0; - out0_count = 0; - out1_count = 0; - my_g.reset(); // reset the sequencer nodes to start at 0 again - test_buffer_base_extract< my_node_t >::set_up_vectors(); - } - - -public: - test_buffer_extract() : test_buffer_base_extract<my_node_t>( my_g, my_in0, my_in1, my_middle, my_out0, my_out1), - my_in0(my_g, always_zero()), my_in1(my_g, always_zero()), my_middle(my_g, always_inc(middle_count)), - my_out0(my_g, always_inc(out0_count)), my_out1(my_g, always_inc(out1_count)) { - } -}; - -// test for simple node that has one input, one output (overwrite_node, write_once_node, limiter_node) -// decrement tests have to be done separately. -template<template< class > class NType, typename ItemType> -void test_extract_on_node() { - tbb::flow::graph g; - ItemType dont_care; - NType<ItemType> node0(g); - tbb::flow::queue_node<ItemType> q0(g); - tbb::flow::queue_node<ItemType> q1(g); - tbb::flow::queue_node<ItemType> q2(g); - for( int i = 0; i < 2; ++i) { - tbb::flow::make_edge(q0,node0); - tbb::flow::make_edge(q1,node0); - tbb::flow::make_edge(node0, q2); - q0.try_put(ItemType(i)); - g.wait_for_all(); - - /* q0 */ - /* \ */ - /* \ */ - /* node0 -- q2 */ - /* / */ - /* / */ - /* q1 */ - - ASSERT(node0.predecessor_count() == 2 && q0.successor_count() == 1 && q1.successor_count() == 1, "bad predecessor count"); - ASSERT(node0.successor_count() == 1 && q2.predecessor_count() == 1, "bad successor count"); - - ASSERT(q2.try_get(dont_care) && int(dont_care) == i, "item not forwarded"); - typename NType<ItemType>::successor_vector_type sv; - typename NType<ItemType>::predecessor_vector_type pv; - std::vector<tbb::flow::receiver<ItemType>*> sv1; - std::vector<tbb::flow::sender<ItemType>*> pv1; - - pv1.push_back(&q0); - pv1.push_back(&q1); - sv1.push_back(&q2); - node0.copy_predecessors(pv); - node0.copy_successors(sv); - ASSERT(lists_match(pv,pv1), "predecessor vector incorrect"); - ASSERT(lists_match(sv,sv1), "successor vector incorrect"); - - if(i == 0) { - node0.extract(); - } - else { - q0.extract(); - q1.extract(); - q2.extract(); - } - - q0.try_put(ItemType(2)); - g.wait_for_all(); - ASSERT(!q2.try_get(dont_care), "node0 not disconnected"); - ASSERT(q0.try_get(dont_care), "q0 empty (should have one item)"); - - node0.copy_predecessors(pv); - node0.copy_successors(sv); - ASSERT(node0.predecessor_count() == 0 && q0.successor_count() == 0 && q1.successor_count() == 0, "error in pred count after extract"); - ASSERT(pv.size() == 0, "error in pred array count after extract"); - ASSERT(node0.successor_count() == 0 && q2.predecessor_count() == 0, "error in succ count after extract"); - ASSERT(sv.size() == 0, "error in succ array count after extract"); - } -} - -#endif // TBB_PREVIEW_FLOW_GRAPH_FEATURES -#endif - - diff --git a/src/tbb/src/test/harness_inject_scheduler.h b/src/tbb/src/test/harness_inject_scheduler.h deleted file mode 100644 index 50124dba3..000000000 --- a/src/tbb/src/test/harness_inject_scheduler.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Used in tests that work with TBB scheduler but do not link to the TBB library. -// In other words it embeds the TBB library core into the test executable. - -#ifndef harness_inject_scheduler_H -#define harness_inject_scheduler_H - -// Suppress usage of #pragma comment -#define __TBB_NO_IMPLICIT_LINKAGE 1 - -// Enable preview features if any -#define __TBB_BUILD 1 - -#undef DO_ITT_NOTIFY - -#define __TBB_SOURCE_DIRECTLY_INCLUDED 1 -#include "../tbb/tbb_main.cpp" -#include "../tbb/dynamic_link.cpp" -#include "../tbb/tbb_misc_ex.cpp" - -// Tasking subsystem files -#include "../tbb/governor.cpp" -#include "../tbb/market.cpp" -#include "../tbb/arena.cpp" -#include "../tbb/scheduler.cpp" -#include "../tbb/observer_proxy.cpp" -#include "../tbb/task.cpp" -#include "../tbb/task_group_context.cpp" - -// Other dependencies -#include "../tbb/cache_aligned_allocator.cpp" -#include "../tbb/tbb_thread.cpp" -#include "../tbb/mutex.cpp" -#include "../tbb/spin_rw_mutex.cpp" -#include "../tbb/spin_mutex.cpp" -#include "../tbb/private_server.cpp" -#include "../tbb/concurrent_monitor.cpp" -#if _WIN32||_WIN64 -#include "../tbb/semaphore.cpp" -#endif -#include "../rml/client/rml_tbb.cpp" - -#if HARNESS_USE_RUNTIME_LOADER -#undef HARNESS_USE_RUNTIME_LOADER -#include "harness.h" - -int TestMain () { - // Tests that directly include sources make no sense in runtime loader testing mode. - return Harness::Skipped; -} -// Renaming the TestMain function avoids conditional compilation around same function in the test file -#define TestMain TestMainSkipped -#endif - -#endif /* harness_inject_scheduler_H */ diff --git a/src/tbb/src/test/harness_iterator.h b/src/tbb/src/test/harness_iterator.h deleted file mode 100644 index f2a00d691..000000000 --- a/src/tbb/src/test/harness_iterator.h +++ /dev/null @@ -1,157 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef harness_iterator_H -#define harness_iterator_H - -#include <iterator> -#include <memory> - -namespace Harness { - -template <class T> -class InputIterator { - T * my_ptr; -public: -#if HARNESS_EXTENDED_STD_COMPLIANCE - typedef std::input_iterator_tag iterator_category; - typedef T value_type; - typedef typename std::allocator<T>::difference_type difference_type; - typedef typename std::allocator<T>::pointer pointer; - typedef typename std::allocator<T>::reference reference; -#endif /* HARNESS_EXTENDED_STD_COMPLIANCE */ - - explicit InputIterator( T * ptr): my_ptr(ptr){} - - T& operator* () { return *my_ptr; } - - InputIterator& operator++ () { ++my_ptr; return *this; } - - bool operator== ( const InputIterator& r ) { return my_ptr == r.my_ptr; } -}; - -template <class T> -class ForwardIterator { - T * my_ptr; -public: -#if HARNESS_EXTENDED_STD_COMPLIANCE - typedef std::forward_iterator_tag iterator_category; - typedef T value_type; - typedef typename std::allocator<T>::difference_type difference_type; - typedef typename std::allocator<T>::pointer pointer; - typedef typename std::allocator<T>::reference reference; -#endif /* HARNESS_EXTENDED_STD_COMPLIANCE */ - - explicit ForwardIterator ( T * ptr ) : my_ptr(ptr){} - - ForwardIterator ( const ForwardIterator& r ) : my_ptr(r.my_ptr){} - - T& operator* () { return *my_ptr; } - - ForwardIterator& operator++ () { ++my_ptr; return *this; } - - bool operator== ( const ForwardIterator& r ) { return my_ptr == r.my_ptr; } -}; - -template <class T> -class RandomIterator { - T * my_ptr; -#if !HARNESS_EXTENDED_STD_COMPLIANCE - typedef typename std::allocator<T>::difference_type difference_type; -#endif - -public: -#if HARNESS_EXTENDED_STD_COMPLIANCE - typedef std::random_access_iterator_tag iterator_category; - typedef T value_type; - typedef typename std::allocator<T>::pointer pointer; - typedef typename std::allocator<T>::reference reference; - typedef typename std::allocator<T>::difference_type difference_type; -#endif /* HARNESS_EXTENDED_STD_COMPLIANCE */ - - explicit RandomIterator ( T * ptr ) : my_ptr(ptr){} - RandomIterator ( const RandomIterator& r ) : my_ptr(r.my_ptr){} - T& operator* () { return *my_ptr; } - RandomIterator& operator++ () { ++my_ptr; return *this; } - bool operator== ( const RandomIterator& r ) { return my_ptr == r.my_ptr; } - difference_type operator- (const RandomIterator &r) {return my_ptr - r.my_ptr;} - RandomIterator operator+ (difference_type n) {return RandomIterator(my_ptr + n);} -}; - -template <class T> -class ConstRandomIterator { - const T * my_ptr; -#if !HARNESS_EXTENDED_STD_COMPLIANCE - typedef typename std::allocator<T>::difference_type difference_type; -#endif - -public: -#if HARNESS_EXTENDED_STD_COMPLIANCE - typedef std::random_access_iterator_tag iterator_category; - typedef T value_type; - typedef typename std::allocator<T>::const_pointer pointer; - typedef typename std::allocator<T>::const_reference reference; - typedef typename std::allocator<T>::difference_type difference_type; -#endif /* HARNESS_EXTENDED_STD_COMPLIANCE */ - - explicit ConstRandomIterator ( const T * ptr ) : my_ptr(ptr){} - ConstRandomIterator ( const ConstRandomIterator& r ) : my_ptr(r.my_ptr){} - const T& operator* () { return *my_ptr; } - ConstRandomIterator& operator++ () { ++my_ptr; return *this; } - bool operator== ( const ConstRandomIterator& r ) { return my_ptr == r.my_ptr; } - difference_type operator- (const ConstRandomIterator &r) {return my_ptr - r.my_ptr;} - ConstRandomIterator operator+ (difference_type n) {return ConstRandomIterator(my_ptr + n);} -}; - -} // namespace Harness - -#if !HARNESS_EXTENDED_STD_COMPLIANCE -namespace std { - template<typename T> - struct iterator_traits< Harness::InputIterator<T> > { - typedef std::input_iterator_tag iterator_category; - typedef T value_type; - typedef value_type& reference; - }; - - template<typename T> - struct iterator_traits< Harness::ForwardIterator<T> > { - typedef std::forward_iterator_tag iterator_category; - typedef T value_type; - typedef value_type& reference; - }; - - template<typename T> - struct iterator_traits< Harness::RandomIterator<T> > { - typedef std::random_access_iterator_tag iterator_category; - typedef T value_type; - typedef value_type& reference; - }; - - template<typename T> - struct iterator_traits< Harness::ConstRandomIterator<T> > { - typedef std::random_access_iterator_tag iterator_category; - typedef T value_type; - typedef const value_type& reference; - }; -} // namespace std -#endif /* !HARNESS_EXTENDED_STD_COMPLIANCE */ - -#endif //harness_iterator_H diff --git a/src/tbb/src/test/harness_m128.h b/src/tbb/src/test/harness_m128.h deleted file mode 100644 index db44d5c21..000000000 --- a/src/tbb/src/test/harness_m128.h +++ /dev/null @@ -1,114 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Header that sets HAVE_m128/HAVE_m256 if vector types (__m128/__m256) are available - -//! Class for testing safety of using vector types. -/** Uses circuitous logic forces compiler to put __m128/__m256 objects on stack while - executing various methods, and thus tempt it to use aligned loads and stores - on the stack. */ -// Do not create file-scope objects of the class, because MinGW (as of May 2010) -// did not always provide proper stack alignment in destructors of such objects. - -#if (_MSC_VER>=1600) -//TODO: handle /arch:AVX in the right way. -#pragma warning (push) -#pragma warning (disable: 4752) -#endif - -template<typename __Mvec> -class ClassWithVectorType { - static const int n = 16; - static const int F = sizeof(__Mvec)/sizeof(float); - __Mvec field[n]; - void init( int start ); -public: - ClassWithVectorType() {init(-n);} - ClassWithVectorType( int i ) {init(i);} - void operator=( const ClassWithVectorType& src ) { - __Mvec stack[n]; - for( int i=0; i<n; ++i ) - stack[i^5] = src.field[i]; - for( int i=0; i<n; ++i ) - field[i^5] = stack[i]; - } - ~ClassWithVectorType() {init(-2*n);} - friend bool operator==( const ClassWithVectorType& x, const ClassWithVectorType& y ) { - for( int i=0; i<F*n; ++i ) - if( ((const float*)x.field)[i]!=((const float*)y.field)[i] ) - return false; - return true; - } - friend bool operator!=( const ClassWithVectorType& x, const ClassWithVectorType& y ) { - return !(x==y); - } -}; - -template<typename __Mvec> -void ClassWithVectorType<__Mvec>::init( int start ) { - __Mvec stack[n]; - for( int i=0; i<n; ++i ) { - // Declaring value as a one-element array instead of a scalar quites - // gratuitous warnings about possible use of "value" before it was set. - __Mvec value[1]; - for( int j=0; j<F; ++j ) - ((float*)value)[j] = float(n*start+F*i+j); - stack[i^5] = value[0]; - } - for( int i=0; i<n; ++i ) - field[i^5] = stack[i]; -} - -#if (__AVX__ || (_MSC_VER>=1600 && _M_X64)) && !defined(__sun) -#include <immintrin.h> -#define HAVE_m256 1 -typedef ClassWithVectorType<__m256> ClassWithAVX; -#if _MSC_VER -#include <intrin.h> // for __cpuid -#endif -bool have_AVX() { - bool result = false; - const int avx_mask = 1<<28; -#if _MSC_VER || __INTEL_COMPILER - int info[4] = {0,0,0,0}; - const int ECX = 2; - __cpuid(info, 1); - result = (info[ECX] & avx_mask)!=0; -#elif __GNUC__ - int ECX; - __asm__( "cpuid" - : "=c"(ECX) - : "a" (1) - : "ebx", "edx" ); - result = (ECX & avx_mask); -#endif - return result; -} -#endif /* __AVX__ etc */ - -#if (__SSE__ || _M_IX86_FP || _M_X64) && !defined(__sun) -#include <xmmintrin.h> -#define HAVE_m128 1 -typedef ClassWithVectorType<__m128> ClassWithSSE; -#endif - -#if (_MSC_VER>=1600) -#pragma warning (pop) -#endif diff --git a/src/tbb/src/test/harness_memory.h b/src/tbb/src/test/harness_memory.h deleted file mode 100644 index 16c1ae71c..000000000 --- a/src/tbb/src/test/harness_memory.h +++ /dev/null @@ -1,98 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Declarations for simple estimate of the memory being used by a program. -// Not yet implemented for OS X*. -// This header is an optional part of the test harness. -// It assumes that "harness_assert.h" has already been included. - -#if __linux__ || __sun -#include <sys/resource.h> -#include <unistd.h> - -#elif __APPLE__ -#include <unistd.h> -#include <mach/mach.h> -#include <AvailabilityMacros.h> -#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1060 -#include <mach/shared_region.h> -#else -#include <mach/shared_memory_server.h> -#endif -#if SHARED_TEXT_REGION_SIZE || SHARED_DATA_REGION_SIZE -const size_t shared_size = SHARED_TEXT_REGION_SIZE+SHARED_DATA_REGION_SIZE; -#else -const size_t shared_size = 0; -#endif - -#elif _WIN32 && !_XBOX && !__TBB_WIN8UI_SUPPORT -#include <windows.h> -#include <psapi.h> -#if _MSC_VER -#pragma comment(lib, "psapi") -#endif - -#endif /* OS selection */ - -//! Return estimate of number of bytes of memory that this program is currently using. -/* Returns 0 if not implemented on platform. */ -size_t GetMemoryUsage() { -#if _XBOX || __TBB_WIN8UI_SUPPORT - return 0; -#elif _WIN32 - PROCESS_MEMORY_COUNTERS mem; - bool status = GetProcessMemoryInfo(GetCurrentProcess(), &mem, sizeof(mem))!=0; - ASSERT(status, NULL); - return mem.PagefileUsage; -#elif __linux__ - FILE* statsfile = fopen("/proc/self/statm","r"); - size_t pagesize = getpagesize(); - ASSERT(statsfile, NULL); - long total_mem; - int n = fscanf(statsfile,"%lu",&total_mem); - if( n!=1 ) { - REPORT("Warning: memory usage statistics wasn't obtained\n"); - return 0; - } - fclose(statsfile); - return total_mem*pagesize; -#elif __APPLE__ - kern_return_t status; - task_basic_info info; - mach_msg_type_number_t msg_type = TASK_BASIC_INFO_COUNT; - status = task_info(mach_task_self(), TASK_BASIC_INFO, reinterpret_cast<task_info_t>(&info), &msg_type); - ASSERT(status==KERN_SUCCESS, NULL); - return info.virtual_size - shared_size; -#else - return 0; -#endif -} - -//! Use approximately a specified amount of stack space. -/** Recursion is used here instead of alloca because some implementations of alloca do not use the stack. */ -void UseStackSpace( size_t amount, char* top=0 ) { - char x[1000]; - memset( x, -1, sizeof(x) ); - if( !top ) - top = x; - ASSERT( x<=top, "test assumes that stacks grow downwards" ); - if( size_t(top-x)<amount ) - UseStackSpace( amount, top ); -} diff --git a/src/tbb/src/test/harness_mic.h b/src/tbb/src/test/harness_mic.h deleted file mode 100644 index f2145e960..000000000 --- a/src/tbb/src/test/harness_mic.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef tbb_test_harness_mic_H -#define tbb_test_harness_mic_H - -#if ! __TBB_DEFINE_MIC - #error test/harness_mic.h should be included only when building for Intel(R) Many Integrated Core Architecture -#endif - -// test for unifed sources. See makefiles -#undef HARNESS_INCOMPLETE_SOURCES - -#include <stdlib.h> -#include <stdio.h> - -#define TBB_TEST_LOW_WORKLOAD 1 - -#define REPORT_FATAL_ERROR REPORT -#define HARNESS_EXPORT - -#if __TBB_MIC_NATIVE - #define HARNESS_EXIT_ON_ASSERT 1 - #define __TBB_PLACEMENT_NEW_EXCEPTION_SAFETY_BROKEN 1 -#else - #define HARNESS_TERMINATE_ON_ASSERT 1 -#endif - -#endif /* tbb_test_harness_mic_H */ diff --git a/src/tbb/src/test/harness_report.h b/src/tbb/src/test/harness_report.h deleted file mode 100644 index a15fa040f..000000000 --- a/src/tbb/src/test/harness_report.h +++ /dev/null @@ -1,175 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Just the tracing portion of the harness. -// -// This header defines TRACE and TRACENL macros, which use REPORT like syntax and -// are useful for duplicating trace output to the standard debug output on Windows. -// It is possible to add the ability of automatic extending messages with additional -// info (file, line, function, time, thread ID, ...). -// -// Macros output nothing when test app runs in non-verbose mode (default). -// - -#ifndef tbb_tests_harness_report_H -#define tbb_tests_harness_report_H - -#if defined(MAX_TRACE_SIZE) && MAX_TRACE_SIZE < 1024 - #undef MAX_TRACE_SIZE -#endif -#ifndef MAX_TRACE_SIZE - #define MAX_TRACE_SIZE 1024 -#endif - -#if __SUNPRO_CC -#include <stdio.h> -#else -#include <cstdio> -#endif - -#include <cstdarg> - -#include "tbb/tbb_config.h" - -#if __TBB_DEFINE_MIC -#include "harness_mic.h" -#endif - -#ifdef HARNESS_INCOMPLETE_SOURCES -#error Source files are not complete. Check the build environment -#endif - -#if _MSC_VER - #define snprintf _snprintf -#if _MSC_VER<=1400 - #define vsnprintf _vsnprintf -#endif -#endif - -namespace Harness { - namespace internal { - -#ifndef TbbHarnessReporter - struct TbbHarnessReporter { - void Report ( const char* msg ) { - printf( "%s", msg ); - fflush(stdout); -#ifdef _WINDOWS_ - OutputDebugStringA(msg); -#endif - } - }; // struct TbbHarnessReporter -#endif /* !TbbHarnessReporter */ - - class Tracer { - int m_flags; - const char *m_file; - const char *m_func; - size_t m_line; - - TbbHarnessReporter m_reporter; - - public: - enum { - prefix = 1, - need_lf = 2 - }; - - Tracer* set_trace_info ( int flags, const char *file, size_t line, const char *func ) { - m_flags = flags; - m_line = line; - m_file = file; - m_func = func; - return this; - } - - void trace ( const char* fmt, ... ) { - char msg[MAX_TRACE_SIZE]; - char msg_fmt_buf[MAX_TRACE_SIZE]; - const char *msg_fmt = fmt; - if ( m_flags & prefix ) { - snprintf (msg_fmt_buf, MAX_TRACE_SIZE, "[%s] %s", m_func, fmt); - msg_fmt = msg_fmt_buf; - } - std::va_list argptr; - va_start (argptr, fmt); - int len = vsnprintf (msg, MAX_TRACE_SIZE, msg_fmt, argptr); - va_end (argptr); - if ( m_flags & need_lf && - len < MAX_TRACE_SIZE - 1 && msg_fmt[len-1] != '\n' ) - { - msg[len] = '\n'; - msg[len + 1] = 0; - } - m_reporter.Report(msg); - } - }; // class Tracer - - static Tracer tracer; - - template<int> - bool not_the_first_call () { - static bool first_call = false; - bool res = first_call; - first_call = true; - return res; - } - - } // namespace internal -} // namespace Harness - -#if defined(_MSC_VER) && _MSC_VER >= 1300 || defined(__GNUC__) || defined(__GNUG__) - #define HARNESS_TRACE_ORIG_INFO __FILE__, __LINE__, __FUNCTION__ -#else - #define HARNESS_TRACE_ORIG_INFO __FILE__, __LINE__, "" - #define __FUNCTION__ "" -#endif - - -//! printf style tracing macro -/** This variant of TRACE adds trailing line-feed (new line) character, if it is absent. **/ -#define TRACE Harness::internal::tracer.set_trace_info(Harness::internal::Tracer::need_lf, HARNESS_TRACE_ORIG_INFO)->trace - -//! printf style tracing macro without automatic new line character adding -#define TRACENL Harness::internal::tracer.set_trace_info(0, HARNESS_TRACE_ORIG_INFO)->trace - -//! printf style tracing macro with additional information prefix (e.g. current function name) -#define TRACEP Harness::internal::tracer.set_trace_info(Harness::internal::Tracer::prefix | \ - Harness::internal::Tracer::need_lf, HARNESS_TRACE_ORIG_INFO)->trace - -//! printf style remark macro -/** Produces output only when the test is run with the -v (verbose) option. **/ -#define REMARK !Verbose ? (void)0 : TRACENL - -//! printf style remark macro -/** Produces output only when invoked first time. - Only one instance of this macro is allowed per source code line. **/ -#define REMARK_ONCE (!Verbose || Harness::internal::not_the_first_call<__LINE__>()) ? (void)0 : TRACE - -//! printf style reporting macro -/** On heterogeneous platforms redirects its output to the host side. **/ -#define REPORT TRACENL - -//! printf style reporting macro -/** Produces output only when invoked first time. - Only one instance of this macro is allowed per source code line. **/ -#define REPORT_ONCE (Harness::internal::not_the_first_call<__LINE__>()) ? (void)0 : TRACENL - -#endif /* tbb_tests_harness_report_H */ diff --git a/src/tbb/src/test/harness_runtime_loader.h b/src/tbb/src/test/harness_runtime_loader.h deleted file mode 100644 index 4eced70d1..000000000 --- a/src/tbb/src/test/harness_runtime_loader.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef harness_runtime_loader_H -#define harness_runtime_loader_H - -#if HARNESS_USE_RUNTIME_LOADER - #if TEST_USES_TBB - #define TBB_PREVIEW_RUNTIME_LOADER 1 - #include "tbb/runtime_loader.h" - static char const * _path[] = { ".", NULL }; - // declaration must be placed before 1st TBB call - static tbb::runtime_loader _runtime_loader( _path ); - #else // TEST_USES_TBB - // if TBB library is not used, no need to test Runtime Loader - #define HARNESS_SKIP_TEST 1 - #endif // TEST_USES_TBB -#endif // HARNESS_USE_RUNTIME_LOADER - -#endif /* harness_runtime_loader_H */ diff --git a/src/tbb/src/test/harness_task.h b/src/tbb/src/test/harness_task.h deleted file mode 100644 index dd08ca31d..000000000 --- a/src/tbb/src/test/harness_task.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/task.h" -#include "harness.h" - -//! Helper for verifying that old use cases of spawn syntax still work. -tbb::task* GetTaskPtr( int& counter ) { - ++counter; - return NULL; -} - -class TaskGenerator: public tbb::task { - int m_ChildCount; - int m_Depth; - -public: - TaskGenerator( int child_count, int _depth ) : m_ChildCount(child_count), m_Depth(_depth) {} - ~TaskGenerator( ) { m_ChildCount = m_Depth = -125; } - - /*override*/ tbb::task* execute() { - ASSERT( m_ChildCount>=0 && m_Depth>=0, NULL ); - if( m_Depth>0 ) { - recycle_as_safe_continuation(); - set_ref_count( m_ChildCount+1 ); - int k=0; - for( int j=0; j<m_ChildCount; ++j ) { - tbb::task& t = *new( allocate_child() ) TaskGenerator(m_ChildCount/2,m_Depth-1); - GetTaskPtr(k)->spawn(t); - } - ASSERT(k==m_ChildCount,NULL); - --m_Depth; - __TBB_Yield(); - ASSERT( state()==recycle && ref_count()>0, NULL); - } - return NULL; - } -}; diff --git a/src/tbb/src/test/harness_tbb_independence.h b/src/tbb/src/test/harness_tbb_independence.h deleted file mode 100644 index 7232cbe6e..000000000 --- a/src/tbb/src/test/harness_tbb_independence.h +++ /dev/null @@ -1,76 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef harness_tbb_independence_H -#define harness_tbb_independence_H - -// The tests which include tbb/atomic.h gain the dependency on the __TBB_ASSERT -// implementation even the test does not use anything from it. But almost all -// compilers optimize out unused inline function so they throw out the -// dependency. But to be pedantic with the standard the __TBB_ASSERT -// implementation should be provided. Moreover the offload compiler really -// requires it. -#include "../tbb/tbb_assert_impl.h" - -#if __linux__ && __ia64__ - -#define __TBB_NO_IMPLICIT_LINKAGE 1 -#include "tbb/tbb_machine.h" - -#include <pthread.h> - -// Can't use Intel compiler intrinsic due to internal error reported by 10.1 compiler -pthread_mutex_t counter_mutex = PTHREAD_MUTEX_INITIALIZER; - -int32_t __TBB_machine_fetchadd4__TBB_full_fence (volatile void *ptr, int32_t value) -{ - pthread_mutex_lock(&counter_mutex); - int32_t result = *(int32_t*)ptr; - *(int32_t*)ptr = result + value; - pthread_mutex_unlock(&counter_mutex); - return result; -} - -int64_t __TBB_machine_fetchadd8__TBB_full_fence (volatile void *ptr, int64_t value) -{ - pthread_mutex_lock(&counter_mutex); - int32_t result = *(int32_t*)ptr; - *(int32_t*)ptr = result + value; - pthread_mutex_unlock(&counter_mutex); - return result; -} - -void __TBB_machine_pause(int32_t /*delay*/) { __TBB_Yield(); } - -pthread_mutex_t cas_mutex = PTHREAD_MUTEX_INITIALIZER; - -extern "C" int64_t __TBB_machine_cmpswp8__TBB_full_fence(volatile void *ptr, int64_t value, int64_t comparand) -{ - pthread_mutex_lock(&cas_mutex); - int64_t result = *(int64_t*)ptr; - if (result == comparand) - *(int64_t*)ptr = value; - pthread_mutex_unlock(&cas_mutex); - return result; -} - -#endif /* __linux__ && __ia64 */ - -#endif // harness_tbb_independence_H diff --git a/src/tbb/src/test/harness_test_cases_framework.h b/src/tbb/src/test/harness_test_cases_framework.h deleted file mode 100644 index db79eab16..000000000 --- a/src/tbb/src/test/harness_test_cases_framework.h +++ /dev/null @@ -1,234 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef tbb_harness_test_cases_framework_H -#define tbb_harness_test_cases_framework_H - -#if defined(_MSC_VER) - #define _SCL_SECURE_NO_WARNINGS -#endif - -#undef DO_ITT_NOTIFY - -#include "harness.h" -//#include "harness_report.h" -#include "harness_assert.h" -//#include "harness_allocator.h" -#include "tbb/tbb_stddef.h" - -#include <cstdlib> - -#include <vector> -#include <algorithm> - -#include <string> -#include <sstream> -#include <iostream> - -namespace test_framework{ - template<typename test_class> - void run_test(){ - test_class()(); - } - - struct assertion_failure:std::exception{ - const char* my_filename; - int my_line; - const char* my_expression; - const char * my_comment; - assertion_failure(const char* filename, int line, const char* expression, const char * comment): - my_filename(filename), - my_line(line), - my_expression(expression), - my_comment(comment) - {} - virtual const char* what() const throw(){ - return "test assertion failed"; - } - }; - void throw_assertion_failure(){throw assertion_failure("",0,"","");} - void throw_assertion_failure(const char* filename, int line, const char* expression, const char * comment){ - throw assertion_failure(filename, line, expression, comment); - } - class test_suite{ - typedef void(*run_test_function_pointer_type)(); - typedef std::pair<std::string, run_test_function_pointer_type> tc_record_pair; - std::vector<tc_record_pair > test_cases; - public: - template<class test_class> - void register_test_case(std::string const& name, test_class * ){ - test_cases.push_back(tc_record_pair(name,& run_test<test_class>)); - } - std::string operator()(bool silent=false){ - std::stringstream str; - size_t failed=0; - for (size_t i=0;i<test_cases.size();++i){ - try{ - (test_cases[i].second)(); - }catch(std::exception& e){ - failed++; - str<<"test case \""<<test_cases[i].first<<"\" failed with exception. what():\""<<e.what()<<"\""<<std::endl; - } - } - if (!silent) { - str<<test_cases.size()<<" test cases are run; "<<failed<<" failed"<<std::endl; - } - return str.str(); - } - }; - test_suite& get_suite_ref(){static test_suite ts; return ts;} - void run_all_and_print_results(test_suite& ts,std::ostream& o , bool silent=false){ - o<<ts(silent); - } -} -using test_framework::get_suite_ref; -#define TEST_CASE_WITH_FIXTURE(TC_NAME,FIXTURE_NAME) \ - struct TC_NAME; \ - struct TC_NAME:FIXTURE_NAME { \ - /* explicitly implemented default constructor \ - is need here to please gcc 4.3.2*/ \ - TC_NAME(){} \ - void operator()(); \ - }; \ - bool TC_NAME##_registerd = (get_suite_ref().register_test_case(#TC_NAME,static_cast<TC_NAME*>(0)),true);\ - void TC_NAME::operator()() - -namespace test_framework_unit_tests{ - namespace test_helper{ - template <size_t id> struct tag{}; - template<typename tag> - struct test_case{ - static bool is_run; - void operator()(){ - is_run=true; - } - }; - template<typename tag> bool test_case<tag>::is_run = false; - - } - using namespace test_framework; - namespace test_test_suite_ref{ - void run_all_runs_all_registered_test_cases(){ - test_suite s; - using test_helper::tag; - test_helper::test_case<tag<__LINE__> > tc1; - test_helper::test_case<tag<__LINE__> > tc2; - s.register_test_case("tc1",&tc1); - s.register_test_case("tc2",&tc2); - s(); - ASSERT(tc1.is_run && tc2.is_run,"test_suite::operator() should run all the tests"); - } - - struct silent_switch_fixture{ - test_helper::test_case<test_helper::tag<__LINE__> > empty_test_case; - }; - struct run_all_and_print_results_should_respect_silent_mode: silent_switch_fixture{ - void operator()(){ - using test_helper::tag; - test_helper::test_case<tag<__LINE__> > do_nothing_tc; - test_suite ts; - ts.register_test_case("tc_name",&do_nothing_tc); - bool silent =true; - ASSERT(ts(silent).empty(),"in silent mode no message except error should be output"); - } - }; - struct run_all_and_print_results_should_respect_verbose_mode: silent_switch_fixture{ - void operator()(){ - using test_helper::tag; - test_helper::test_case<tag<__LINE__> > do_nothing_tc; - test_suite ts; - ts.register_test_case("tc_name",&do_nothing_tc); - bool silent =true; - ASSERT(!ts(!silent).empty(),"in verbose mode all messages should be outputed"); - } - }; - } - namespace test_test_case_macro{ - test_suite& get_suite_ref(){static test_suite ts; return ts;} - typedef test_helper::test_case<test_helper::tag<__LINE__> > unique_test_type; - TEST_CASE_WITH_FIXTURE(test_auto_registration,unique_test_type){ - unique_test_type::operator()(); - } - void run_test_test_case_macro(){ - get_suite_ref()(); - ASSERT(unique_test_type::is_run,"test case macro should register the test case in suite"); - } - void test_test_case_macro_does_not_create_test_case_object(){ - ASSERT(false,"to implement"); - } - } - namespace internal_assertions_failure_test_cases{ - - test_suite& get_suite_ref(){static test_suite ts; return ts;} - - //TODO: investigate compilation errors regarding tbb::set_assertion_handler -// struct empty_fixture{}; -// TEST_CASE_WITH_FIXTURE(test_internal_assertion_does_not_stop_test_suite,empty_fixture){ -// struct handler{ -// static void _( const char* /*filename*/, int /*line*/, const char* /*expression*/, const char * /*comment*/ ){ -// } -// }; -// -// tbb::assertion_handler_type previous = tbb::set_assertion_handler(handler::_); -// __TBB_ASSERT(false,"this assert should not stop the test suite run"); -// tbb::set_assertion_handler(previous ); -//// ASSERT(assertion_handler::is_called,"__TBB_ASSERT should call installed assertion handler"); -// } -// TEST_CASE_WITH_FIXTURE(test_internal_assertion_does_mark_the_test_as_failed,empty_fixture){ -// test_suite ts; -// struct _{ -//// static -// static void assertion_handler_type( const char* /*filename*/, int /*line*/, const char* /*expression*/, const char * /*comment*/ ){ -// } -// }; -// tbb::assertion_handler_type previous = tbb::set_assertion_handler(_::assertion_handler_type); -// __TBB_ASSERT(false,"this assert should not stop the test suite run"); -// tbb::set_assertion_handler(previous ); -// std::string result = ts(); -// std::size_t test_case_name_begin_pos = result.find("test case \""); -// std::size_t failed_begin_pos = result.find("failed"); -// ASSERT(test_case_name_begin_pos!=std::string::npos && failed_begin_pos!=std::string::npos && test_case_name_begin_pos<failed_begin_pos,"internal assertion should result in test failure"); -// } - - } - void run_all_test(){ - test_test_suite_ref::run_all_runs_all_registered_test_cases(); - test_test_suite_ref::run_all_and_print_results_should_respect_silent_mode()(); - test_test_suite_ref::run_all_and_print_results_should_respect_verbose_mode()(); - test_test_case_macro::run_test_test_case_macro(); - //TODO: uncomment and implement -// test_test_case_macro::test_test_case_macro_does_not_create_test_case_object(); - run_all_and_print_results(internal_assertions_failure_test_cases::get_suite_ref(),std::cout,!Verbose); - } -} - -int TestMain (){ - SetHarnessErrorProcessing(test_framework::throw_assertion_failure); - //TODO: deal with assertions during stack unwinding - //tbb::set_assertion_handler( test_framework::throw_assertion_failure ); - { - test_framework_unit_tests::run_all_test(); - } - bool silent = !Verbose; - run_all_and_print_results(test_framework::get_suite_ref(),std::cout,silent); - return Harness::Done; -} - -#endif //tbb_harness_test_cases_framework_H diff --git a/src/tbb/src/test/harness_tsx.h b/src/tbb/src/test/harness_tsx.h deleted file mode 100644 index fb5bc2397..000000000 --- a/src/tbb/src/test/harness_tsx.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Header that includes TSX-specific test functions - -#if __TBB_TSX_AVAILABLE -#define __TBB_TSX_TESTING_ENABLED_FOR_THIS_COMPILER (__INTEL_COMPILER || __GNUC__ || _MSC_VER || __SUNPRO_CC) -#if __TBB_TSX_TESTING_ENABLED_FOR_THIS_COMPILER - -#include "harness_defs.h" - -inline static bool IsInsideTx() -{ - return __TBB_machine_is_in_transaction() != 0; -} - -#if _MSC_VER -#include <intrin.h> // for __cpuid -#endif -// TODO: consider reusing tbb_misc.cpp:cpu_has_speculation() instead of code duplication. -bool have_TSX() { - bool result = false; - const int hle_ebx_mask = 1<<4; - const int rtm_ebx_mask = 1<<11; -#if _MSC_VER - int info[4] = {0,0,0,0}; - const int reg_ebx = 1; - int old_ecx = 0; - __cpuidex(info, 7, old_ecx); - result = (info[reg_ebx] & hle_ebx_mask)!=0; - if( result ) ASSERT( (info[reg_ebx] & rtm_ebx_mask)!=0, NULL ); -#elif __GNUC__ || __SUNPRO_CC - int32_t reg_ebx = 0; - int32_t reg_eax = 7; - int32_t reg_ecx = 0; - __asm__ __volatile__ ( "movl %%ebx, %%esi\n" - "cpuid\n" - "movl %%ebx, %0\n" - "movl %%esi, %%ebx\n" - : "=a"(reg_ebx) : "0" (reg_eax), "c" (reg_ecx) : "esi", -#if __TBB_x86_64 - "ebx", -#endif - "edx" - ); - result = (reg_ebx & hle_ebx_mask)!=0 ; - if( result ) ASSERT( (reg_ebx & rtm_ebx_mask)!=0, NULL ); -#endif - return result; -} - -#endif /* __TBB_TSX_TESTING_ENABLED_FOR_THIS_COMPILER */ -#endif /* __TBB_TSX_AVAILABLE */ diff --git a/src/tbb/src/test/test_ScalableAllocator.cpp b/src/tbb/src/test/test_ScalableAllocator.cpp deleted file mode 100644 index 0877b0494..000000000 --- a/src/tbb/src/test/test_ScalableAllocator.cpp +++ /dev/null @@ -1,225 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Test whether scalable_allocator complies with the requirements in 20.1.5 of ISO C++ Standard (1998). - -#define __TBB_EXTRA_DEBUG 1 // enables additional checks -#define TBB_PREVIEW_MEMORY_POOL 1 - -#include "harness_assert.h" -#if !__TBB_SOURCE_DIRECTLY_INCLUDED -// harness_allocator.h requires atimics. We do not want dependency -// to TBB library to get atomics, so add rudimentary implementation of them. -#include "harness_tbb_independence.h" -#endif -#include "tbb/memory_pool.h" -#include "tbb/scalable_allocator.h" - -#define HARNESS_TBBMALLOC_THREAD_SHUTDOWN 1 -// the actual body of the test is there: -#include "test_allocator.h" -#include "harness_allocator.h" - -#if _MSC_VER -#include "tbb/machine/windows_api.h" -#endif /* _MSC_VER */ - -typedef static_counting_allocator<tbb::memory_pool_allocator<char> > cnt_alloc_t; -typedef local_counting_allocator<std::allocator<char> > cnt_provider_t; -class MinimalAllocator : cnt_provider_t { -public: - typedef char value_type; - MinimalAllocator() { - REMARK("%p::ctor\n", this); - } - MinimalAllocator(const MinimalAllocator&s) : cnt_provider_t(s) { - REMARK("%p::ctor(%p)\n", this, &s); - } - ~MinimalAllocator() { - REMARK("%p::dtor: alloc=%u/%u free=%u/%u\n", this, - unsigned(items_allocated),unsigned(allocations), - unsigned(items_freed), unsigned(frees) ); - ASSERT(allocations==frees && items_allocated==items_freed,0); - if( allocations ) { // non-temporal copy - // TODO: describe consumption requirements - ASSERT(items_allocated>cnt_alloc_t::items_allocated, 0); - } - } - void *allocate(size_t sz) { - void *p = cnt_provider_t::allocate(sz); - REMARK("%p::allocate(%u) = %p\n", this, unsigned(sz), p); - return p; - } - void deallocate(void *p, size_t sz) { - ASSERT(allocations>frees,0); - REMARK("%p::deallocate(%p, %u)\n", this, p, unsigned(sz)); - cnt_provider_t::deallocate(cnt_provider_t::pointer(p), sz); - } -}; - -#if TBB_USE_EXCEPTIONS - -class NullAllocator { -public: - typedef char value_type; - NullAllocator() { } - NullAllocator(const NullAllocator&) { } - ~NullAllocator() { } - void *allocate(size_t) { return NULL; } - void deallocate(void *, size_t) { ASSERT(0, NULL); } -}; - -void TestZeroSpaceMemoryPool() -{ - try { - tbb::memory_pool<NullAllocator> pool; - ASSERT(0, "Useless allocator with no memory must not be created"); - } catch (std::bad_alloc) { - } catch (...) { - ASSERT(0, "wrong exception type; expected bad_alloc"); - } -} - -#else // TBB_USE_EXCEPTIONS - -void TestZeroSpaceMemoryPool() { } - -struct FixedPool { - void *buf; - size_t size; - bool used; - FixedPool(void *buf, size_t size) : buf(buf), size(size), used(false) {} -}; - -static void *fixedBufGetMem(intptr_t pool_id, size_t &bytes) -{ - if (((FixedPool*)pool_id)->used) - return NULL; - - ((FixedPool*)pool_id)->used = true; - bytes = ((FixedPool*)pool_id)->size; - return ((FixedPool*)pool_id)->buf; -} - -#endif // TBB_USE_EXCEPTIONS - -/* test that pools in small space are either usable or not created - (i.e., exception raised) */ -void TestSmallFixedSizePool() -{ - char *buf; - bool allocated = false; - - for (size_t sz = 0; sz < 64*1024; sz = sz? 3*sz : 3) { - buf = (char*)malloc(sz); -#if TBB_USE_EXCEPTIONS - try { - tbb::fixed_pool pool(buf, sz); -/* Check that pool is usable, i.e. such an allocation exists, - that can be fulfilled from the pool. 16B allocation fits in 16KB slabs, - so it requires at least 16KB. Requirement of 9KB allocation is more modest. -*/ - allocated = pool.malloc( 16 ) || pool.malloc( 9*1024 ); - ASSERT(allocated, "If pool created, it must be useful."); - } catch (std::bad_alloc) { - } catch (...) { - ASSERT(0, "wrong exception type; expected bad_alloc"); - } -#else -/* Do not test high-level pool interface because pool ctor emit exception - on creation failure. Instead test same functionality via low-level interface. - TODO: add support for configuration with disabled exceptions to pools. -*/ - rml::MemPoolPolicy pol(fixedBufGetMem, NULL, 0, /*fixedSizePool=*/true, - /*keepMemTillDestroy=*/false); - rml::MemoryPool *pool; - FixedPool fixedPool(buf, sz); - - rml::MemPoolError ret = pool_create_v1((intptr_t)&fixedPool, &pol, &pool); - - if (ret == rml::POOL_OK) { - allocated = pool_malloc(pool, 16) || pool_malloc(pool, 9*1024); - ASSERT(allocated, "If pool created, it must be useful."); - pool_destroy(pool); - } else - ASSERT(ret == rml::NO_MEMORY, "Expected that pool either valid " - "or have no memory to be created"); -#endif - free(buf); - } - ASSERT(allocated, "Maximal buf size should be enough to create working fixed_pool"); -#if TBB_USE_EXCEPTIONS - try { - tbb::fixed_pool pool(NULL, 10*1024*1024); - ASSERT(0, "Useless allocator with no memory must not be created"); - } catch (std::bad_alloc) { - } catch (...) { - ASSERT(0, "wrong exception type; expected bad_alloc"); - } -#endif -} - -int TestMain () { -#if _MSC_VER && !__TBBMALLOC_NO_IMPLICIT_LINKAGE && !__TBB_WIN8UI_SUPPORT - #ifdef _DEBUG - ASSERT(!GetModuleHandle("tbbmalloc.dll") && GetModuleHandle("tbbmalloc_debug.dll"), - "test linked with wrong (non-debug) tbbmalloc library"); - #else - ASSERT(!GetModuleHandle("tbbmalloc_debug.dll") && GetModuleHandle("tbbmalloc.dll"), - "test linked with wrong (debug) tbbmalloc library"); - #endif -#endif /* _MSC_VER && !__TBBMALLOC_NO_IMPLICIT_LINKAGE */ - int result = TestMain<tbb::scalable_allocator<void> >(); - { - tbb::memory_pool<tbb::scalable_allocator<int> > pool; - result += TestMain(tbb::memory_pool_allocator<void>(pool) ); - }{ - tbb::memory_pool<MinimalAllocator> pool; - cnt_alloc_t alloc(( tbb::memory_pool_allocator<char>(pool) )); // double parentheses to avoid function declaration - result += TestMain(alloc); - }{ - static char buf[1024*1024*4]; - tbb::fixed_pool pool(buf, sizeof(buf)); - const char *text = "this is a test";// 15 bytes - char *p1 = (char*)pool.malloc( 16 ); - ASSERT(p1, NULL); - strcpy(p1, text); - char *p2 = (char*)pool.realloc( p1, 15 ); - ASSERT( p2 && !strcmp(p2, text), "realloc broke memory" ); - - result += TestMain(tbb::memory_pool_allocator<void>(pool) ); - - // try allocate almost entire buf keeping some reasonable space for internals - char *p3 = (char*)pool.realloc( p2, sizeof(buf)-128*1024 ); - ASSERT( p3, "defragmentation failed" ); - ASSERT( !strcmp(p3, text), "realloc broke memory" ); - for( size_t sz = 10; sz < sizeof(buf); sz *= 2) { - ASSERT( pool.malloc( sz ), NULL); - pool.recycle(); - } - - result += TestMain(tbb::memory_pool_allocator<void>(pool) ); - } - TestSmallFixedSizePool(); - TestZeroSpaceMemoryPool(); - - ASSERT( !result, NULL ); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_ScalableAllocator_STL.cpp b/src/tbb/src/test/test_ScalableAllocator_STL.cpp deleted file mode 100644 index c8e8786eb..000000000 --- a/src/tbb/src/test/test_ScalableAllocator_STL.cpp +++ /dev/null @@ -1,42 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Test whether scalable_allocator works with some of the host's STL containers. - -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#define __TBB_EXTRA_DEBUG 1 // enables additional checks -#define TBB_PREVIEW_MEMORY_POOL 1 - -#include "harness_assert.h" -#include "tbb/memory_pool.h" -#include "tbb/scalable_allocator.h" - -// The actual body of the test is there: -#include "test_allocator_STL.h" - -int TestMain () { - TestAllocatorWithSTL<tbb::scalable_allocator<void> >(); - tbb::memory_pool<tbb::scalable_allocator<int> > mpool; - TestAllocatorWithSTL(tbb::memory_pool_allocator<void>(mpool) ); - static char buf[1024*1024*4]; - tbb::fixed_pool fpool(buf, sizeof(buf)); - TestAllocatorWithSTL(tbb::memory_pool_allocator<void>(fpool) ); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_aggregator.cpp b/src/tbb/src/test/test_aggregator.cpp deleted file mode 100644 index ce7fcf20c..000000000 --- a/src/tbb/src/test/test_aggregator.cpp +++ /dev/null @@ -1,185 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef TBB_PREVIEW_AGGREGATOR - #define TBB_PREVIEW_AGGREGATOR 1 -#endif - -#include "tbb/aggregator.h" -#include "harness.h" -#include <queue> - -typedef std::priority_queue<int, std::vector<int>, std::less<int> > pq_t; - -int N; -int* shared_data; - -// Code for testing basic interface using function objects -class push_fnobj : NoAssign, Harness::NoAfterlife { - pq_t& pq; - int threadID; -public: - push_fnobj(pq_t& pq_, int tid) : pq(pq_), threadID(tid) {} - void operator()() const { - AssertLive(); - pq.push(threadID); - } -}; - -class pop_fnobj : NoAssign, Harness::NoAfterlife { - pq_t& pq; -public: - pop_fnobj(pq_t& pq_) : pq(pq_) {} - void operator()() const { - AssertLive(); - ASSERT(!pq.empty(), "queue should not be empty yet"); - int elem = pq.top(); - pq.pop(); - shared_data[elem]++; - } -}; - -class BasicBody : NoAssign { - pq_t& pq; - tbb::aggregator& agg; -public: - BasicBody(pq_t& pq_, tbb::aggregator& agg_) : pq(pq_), agg(agg_) {} - void operator()(const int threadID) const { - for (int i=0; i<N; ++i) agg.execute( push_fnobj(pq, threadID) ); - for (int i=0; i<N; ++i) agg.execute( pop_fnobj(pq) ); - } -}; - -void TestBasicInterface(int nThreads) { - pq_t my_pq; - tbb::aggregator agg; - for (int i=0; i<MaxThread; ++i) shared_data[i] = 0; - REMARK("Testing aggregator basic interface.\n"); - NativeParallelFor(nThreads, BasicBody(my_pq, agg)); - for (int i=0; i<nThreads; ++i) - ASSERT(shared_data[i] == N, "wrong number of elements pushed"); - REMARK("Done testing aggregator basic interface.\n"); -} -// End of code for testing basic interface using function objects - - -// Code for testing basic interface using lambda expressions -#if __TBB_LAMBDAS_PRESENT -void TestBasicLambdaInterface(int nThreads) { - pq_t my_pq; - tbb::aggregator agg; - for (int i=0; i<MaxThread; ++i) shared_data[i] = 0; - REMARK("Testing aggregator basic lambda interface.\n"); - NativeParallelFor(nThreads, [&agg, &my_pq](const int threadID) { - for (int i=0; i<N; ++i) - agg.execute( [&, threadID]() { my_pq.push(threadID); } ); - for (int i=0; i<N; ++i) { - agg.execute( [&]() { - ASSERT(!my_pq.empty(), "queue should not be empty yet"); - int elem = my_pq.top(); - my_pq.pop(); - shared_data[elem]++; - } ); - } - } ); - for (int i=0; i<nThreads; ++i) - ASSERT(shared_data[i] == N, "wrong number of elements pushed"); - REMARK("Done testing aggregator basic lambda interface.\n"); -} -#endif /* __TBB_LAMBDAS_PRESENT */ -// End of code for testing basic interface using lambda expressions - -// Code for testing expert interface -class op_data : public tbb::aggregator_operation, NoAssign { -public: - const int tid; - op_data(const int tid_=-1) : tbb::aggregator_operation(), tid(tid_) {} -}; - -class my_handler { - pq_t *pq; -public: - my_handler() {} - my_handler(pq_t *pq_) : pq(pq_) {} - void operator()(tbb::aggregator_operation* op_list) const { - while (op_list) { - op_data& request = static_cast<op_data&>(*op_list); - op_list = op_list->next(); - request.start(); - if (request.tid >= 0) pq->push(request.tid); - else { - ASSERT(!pq->empty(), "queue should not be empty!"); - int elem = pq->top(); - pq->pop(); - shared_data[elem]++; - } - request.finish(); - } - } -}; - -class ExpertBody : NoAssign { - pq_t& pq; - tbb::aggregator_ext<my_handler>& agg; -public: - ExpertBody(pq_t& pq_, tbb::aggregator_ext<my_handler>& agg_) : pq(pq_), agg(agg_) {} - void operator()(const int threadID) const { - for (int i=0; i<N; ++i) { - op_data to_push(threadID); - agg.process( &to_push ); - } - for (int i=0; i<N; ++i) { - op_data to_pop; - agg.process( &to_pop ); - } - } -}; - -void TestExpertInterface(int nThreads) { - pq_t my_pq; - tbb::aggregator_ext<my_handler> agg((my_handler(&my_pq))); - for (int i=0; i<MaxThread; ++i) shared_data[i] = 0; - REMARK("Testing aggregator expert interface.\n"); - NativeParallelFor(nThreads, ExpertBody(my_pq, agg)); - for (int i=0; i<nThreads; ++i) - ASSERT(shared_data[i] == N, "wrong number of elements pushed"); - REMARK("Done testing aggregator expert interface.\n"); -} -// End of code for testing expert interface - -int TestMain() { - if (MinThread < 1) - MinThread = 1; - shared_data = new int[MaxThread]; - for (int p = MinThread; p <= MaxThread; ++p) { - REMARK("Testing on %d threads.\n", p); - N = 0; - while (N <= 100) { - REMARK("Testing with N=%d\n", N); - TestBasicInterface(p); -#if __TBB_LAMBDAS_PRESENT - TestBasicLambdaInterface(p); -#endif /* __TBB_LAMBDAS_PRESENT */ - TestExpertInterface(p); - N = N ? N*10 : 1; - } - } - return Harness::Done; -} diff --git a/src/tbb/src/test/test_aligned_space.cpp b/src/tbb/src/test/test_aligned_space.cpp deleted file mode 100644 index a7ac63d28..000000000 --- a/src/tbb/src/test/test_aligned_space.cpp +++ /dev/null @@ -1,114 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" - -#if __TBB_GCC_STRICT_ALIASING_BROKEN - #pragma GCC diagnostic ignored "-Wstrict-aliasing" -#endif - -//! Wrapper around T where all members are private. -/** Used to prove that aligned_space<T,N> never calls member of T. */ -template<typename T> -class Minimal { - Minimal(); - Minimal( Minimal& min ); - ~Minimal(); - void operator=( const Minimal& ); - T pad; - template<typename U> - friend void AssignToCheckAlignment( Minimal<U>& dst, const Minimal<U>& src ) ; -}; - -template<typename T> -void AssignToCheckAlignment( Minimal<T>& dst, const Minimal<T>& src ) { - dst.pad = src.pad; -} - -#include "tbb/aligned_space.h" -#include "harness_assert.h" - -static bool SpaceWasted; - -template<typename U, size_t N> -void TestAlignedSpaceN() { - typedef Minimal<U> T; - struct { - //! Pad byte increases chance that subsequent member will be misaligned if there is a problem. - char pad; - tbb::aligned_space<T ,N> space; - } x; - AssertSameType( static_cast< T *>(0), x.space.begin() ); - AssertSameType( static_cast< T *>(0), x.space.end() ); - ASSERT( reinterpret_cast<void *>(x.space.begin())==reinterpret_cast< void *>(&x.space), NULL ); - ASSERT( x.space.end()-x.space.begin()==N, NULL ); - ASSERT( reinterpret_cast<void *>(x.space.begin())>=reinterpret_cast< void *>(&x.space), NULL ); - ASSERT( x.space.end()<=reinterpret_cast< T *>(&x.space+1), NULL ); - // Though not required, a good implementation of aligned_space<T,N> does not use any more space than a T[N]. - SpaceWasted |= sizeof(x.space)!=sizeof(T)*N; - for( size_t k=1; k<N; ++k ) - AssignToCheckAlignment( x.space.begin()[k-1], x.space.begin()[k] ); -} - -static void PrintSpaceWastingWarning( const char* type_name ); - -#include <typeinfo> - -template<typename T> -void TestAlignedSpace() { - SpaceWasted = false; - TestAlignedSpaceN<T,1>(); - TestAlignedSpaceN<T,2>(); - TestAlignedSpaceN<T,3>(); - TestAlignedSpaceN<T,4>(); - TestAlignedSpaceN<T,5>(); - TestAlignedSpaceN<T,6>(); - TestAlignedSpaceN<T,7>(); - TestAlignedSpaceN<T,8>(); - if( SpaceWasted ) - PrintSpaceWastingWarning( typeid(T).name() ); -} - -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#include "harness.h" - -#include "harness_m128.h" - -int TestMain () { - TestAlignedSpace<char>(); - TestAlignedSpace<short>(); - TestAlignedSpace<int>(); - TestAlignedSpace<float>(); - TestAlignedSpace<double>(); - TestAlignedSpace<long double>(); - TestAlignedSpace<size_t>(); -#if HAVE_m128 - TestAlignedSpace<__m128>(); -#endif -#if HAVE_m256 - if (have_AVX()) TestAlignedSpace<__m256>(); -#endif - return Harness::Done; -} - -static void PrintSpaceWastingWarning( const char* type_name ) { - REPORT("Consider rewriting aligned_space<%s,N> to waste less space\n", type_name ); -} - diff --git a/src/tbb/src/test/test_allocator.h b/src/tbb/src/test/test_allocator.h deleted file mode 100644 index 8e9808b66..000000000 --- a/src/tbb/src/test/test_allocator.h +++ /dev/null @@ -1,231 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Basic testing of an allocator -// Tests against requirements in 20.1.5 of ISO C++ Standard (1998). -// Does not check for thread safety or false sharing issues. -// -// Tests for compatibility with the host's STL are in -// test_Allocator_STL.h. Those tests are in a separate file -// because they bring in lots of STL headers, and the tests here -// are supposed to work in the abscense of STL. - -#include "harness.h" -#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - #include <utility> //for std::pair -#endif - -template<typename A> -struct is_zero_filling { - static const bool value = false; -}; - -int NumberOfFoo; - -template<typename T, size_t N> -struct Foo { - T foo_array[N]; - Foo() { - zero_fill<T>(foo_array, N); - ++NumberOfFoo; - } - Foo( const Foo& x ) { - *this = x; - ++NumberOfFoo; - } - ~Foo() { - --NumberOfFoo; - } -}; - -inline char PseudoRandomValue( size_t j, size_t k ) { - return char(j*3 ^ j>>4 ^ k); -} - -//! T is type and A is allocator for that type -template<typename T, typename A> -void TestBasic( A& a ) { - T x; - const T cx = T(); - - // See Table 32 in ISO ++ Standard - typename A::pointer px = &x; - typename A::const_pointer pcx = &cx; - - typename A::reference rx = x; - ASSERT( &rx==&x, NULL ); - - typename A::const_reference rcx = cx; - ASSERT( &rcx==&cx, NULL ); - - typename A::value_type v = x; - - typename A::size_type size; - size = 0; - --size; - ASSERT( size>0, "not an unsigned integral type?" ); - - typename A::difference_type difference; - difference = 0; - --difference; - ASSERT( difference<0, "not an signed integral type?" ); - - // "rebind" tested by our caller - - ASSERT( a.address(rx)==px, NULL ); - - ASSERT( a.address(rcx)==pcx, NULL ); - - typename A::pointer array[100]; - size_t sizeof_T = sizeof(T); - for( size_t k=0; k<100; ++k ) { - array[k] = k&1 ? a.allocate(k,array[0]) : a.allocate(k); - char* s = reinterpret_cast<char*>(reinterpret_cast<void*>(array[k])); - for( size_t j=0; j<k*sizeof_T; ++j ) - s[j] = PseudoRandomValue(j,k); - } - - // Test hint argument. This can't be compiled when hint is void*, It should be const void* - typename A::pointer a_ptr; - const void * const_hint = NULL; - a_ptr = a.allocate (1, const_hint); - a.deallocate(a_ptr, 1); - - // Test "a.deallocate(p,n) - for( size_t k=0; k<100; ++k ) { - char* s = reinterpret_cast<char*>(reinterpret_cast<void*>(array[k])); - for( size_t j=0; j<k*sizeof_T; ++j ) - ASSERT( s[j] == PseudoRandomValue(j,k), NULL ); - a.deallocate(array[k],k); - } - - // Test "a.max_size()" - AssertSameType( a.max_size(), typename A::size_type(0) ); - // Following assertion catches case where max_size() is so large that computation of - // number of bytes for such an allocation would overflow size_type. - ASSERT( a.max_size()*typename A::size_type(sizeof(T))>=a.max_size(), "max_size larger than reasonable" ); - - // Test "a.construct(p,t)" - int n = NumberOfFoo; - typename A::pointer p = a.allocate(1); - a.construct( p, cx ); - ASSERT( NumberOfFoo==n+1, "constructor for Foo not called?" ); - - // Test "a.destroy(p)" - a.destroy( p ); - ASSERT( NumberOfFoo==n, "destructor for Foo not called?" ); - a.deallocate(p,1); - - #if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - { - typedef typename A:: template rebind<std::pair<typename A::value_type, typename A::value_type> >::other pair_allocator_type; - pair_allocator_type pair_allocator(a); - int NumberOfFooBeforeConstruct= NumberOfFoo; - typename pair_allocator_type::pointer pair_pointer = pair_allocator.allocate(1); - pair_allocator.construct( pair_pointer, cx, cx); - ASSERT( NumberOfFoo==NumberOfFooBeforeConstruct+2, "constructor for Foo not called appropriate number of times?" ); - - pair_allocator.destroy( pair_pointer ); - ASSERT( NumberOfFoo==NumberOfFooBeforeConstruct, "destructor for Foo not called appropriate number of times?" ); - pair_allocator.deallocate(pair_pointer,1); - } - #endif - -} - -#include "tbb/blocked_range.h" - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Workaround for erroneous "conditional expression is constant" warning in method check_allocate. - #pragma warning (disable: 4127) -#endif - -// A is an allocator for some type -template<typename A> -struct Body: NoAssign { - static const size_t max_k = 100000; - A &a; - Body(A &a_) : a(a_) {} - void check_allocate( typename A::pointer array[], size_t i, size_t t ) const - { - ASSERT(array[i] == 0, NULL); - size_t size = i * (i&3); - array[i] = i&1 ? a.allocate(size, array[i>>3]) : a.allocate(size); - ASSERT(array[i] != 0, "allocator returned null"); - char* s = reinterpret_cast<char*>(reinterpret_cast<void*>(array[i])); - for( size_t j=0; j<size*sizeof(typename A::value_type); ++j ) { - if(is_zero_filling<typename A::template rebind<void>::other>::value) - ASSERT( !s[j], NULL); - s[j] = PseudoRandomValue(i, t); - } - } - - void check_deallocate( typename A::pointer array[], size_t i, size_t t ) const - { - ASSERT(array[i] != 0, NULL); - size_t size = i * (i&3); - char* s = reinterpret_cast<char*>(reinterpret_cast<void*>(array[i])); - for( size_t j=0; j<size*sizeof(typename A::value_type); ++j ) - ASSERT( s[j] == PseudoRandomValue(i, t), "Thread safety test failed" ); - a.deallocate(array[i], size); - array[i] = 0; - } - - void operator()( size_t thread_id ) const { - typename A::pointer array[256]; - - for( size_t k=0; k<256; ++k ) - array[k] = 0; - for( size_t k=0; k<max_k; ++k ) { - size_t i = static_cast<unsigned char>(PseudoRandomValue(k,thread_id)); - if(!array[i]) check_allocate(array, i, thread_id); - else check_deallocate(array, i, thread_id); - } - for( size_t k=0; k<256; ++k ) - if(array[k]) - check_deallocate(array, k, thread_id); - } -}; - -// A is an allocator for some type, and U is another type -template<typename U, typename A> -void Test(A &a) { - typename A::template rebind<U>::other b(a); - TestBasic<U>(b); - TestBasic<typename A::value_type>(a); - - // thread safety - NativeParallelFor( 4, Body<A>(a) ); - ASSERT( NumberOfFoo==0, "Allocate/deallocate count mismatched" ); - - ASSERT( a==b, NULL ); - ASSERT( !(a!=b), NULL ); -} - -template<typename Allocator> -int TestMain(const Allocator &a = Allocator() ) { - NumberOfFoo = 0; - typename Allocator::template rebind<Foo<char,1> >::other a1(a); - typename Allocator::template rebind<Foo<double,1> >::other a2(a); - Test<Foo<int,17> >( a1 ); - Test<Foo<float,23> >( a2 ); - return 0; -} - diff --git a/src/tbb/src/test/test_allocator_STL.h b/src/tbb/src/test/test_allocator_STL.h deleted file mode 100644 index 12afee90e..000000000 --- a/src/tbb/src/test/test_allocator_STL.h +++ /dev/null @@ -1,146 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Tests for compatibility with the host's STL. - -#include "harness.h" - -template<typename Container> -void TestSequence(const typename Container::allocator_type &a) { - Container c(a); - for( int i=0; i<1000; ++i ) - c.push_back(i*i); - typename Container::const_iterator p = c.begin(); - for( int i=0; i<1000; ++i ) { - ASSERT( *p==i*i, NULL ); - ++p; - } - // regression test against compilation error for GCC 4.6.2 - c.resize(1000); -} - -template<typename Set> -void TestSet(const typename Set::allocator_type &a) { - Set s(typename Set::key_compare(), a); - typedef typename Set::value_type value_type; - for( int i=0; i<100; ++i ) - s.insert(value_type(3*i)); - for( int i=0; i<300; ++i ) { - ASSERT( s.erase(i)==size_t(i%3==0), NULL ); - } -} - -template<typename Map> -void TestMap(const typename Map::allocator_type &a) { - Map m(typename Map::key_compare(), a); - typedef typename Map::value_type value_type; - for( int i=0; i<100; ++i ) - m.insert(value_type(i,i*i)); - for( int i=0; i<100; ++i ) - ASSERT( m.find(i)->second==i*i, NULL ); -} - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include <deque> -#include <list> -#include <map> -#include <set> -#include <vector> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#if __TBB_CPP11_RVALUE_REF_PRESENT -struct MoveOperationTracker { - int my_value; - - MoveOperationTracker( int value = 0 ) : my_value( value ) {} - MoveOperationTracker(const MoveOperationTracker&) { - ASSERT( false, "Copy constructor is called" ); - } - MoveOperationTracker(MoveOperationTracker&& m) __TBB_NOEXCEPT( true ) : my_value( m.my_value ) { - } - MoveOperationTracker& operator=(MoveOperationTracker const&) { - ASSERT( false, "Copy assigment operator is called" ); - return *this; - } - MoveOperationTracker& operator=(MoveOperationTracker&& m) __TBB_NOEXCEPT( true ) { - my_value = m.my_value; - return *this; - } - - bool operator==(int value) const { - return my_value == value; - } - - bool operator==(const MoveOperationTracker& m) const { - return my_value == m.my_value; - } -}; -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - -template<typename Allocator> -void TestAllocatorWithSTL(const Allocator &a = Allocator() ) { - typedef typename Allocator::template rebind<int>::other Ai; - typedef typename Allocator::template rebind<std::pair<const int, int> >::other Acii; -#if _MSC_VER - typedef typename Allocator::template rebind<const int>::other Aci; - typedef typename Allocator::template rebind<std::pair<int, int> >::other Aii; -#endif - - // Sequenced containers - TestSequence<std::deque <int,Ai> >(a); - TestSequence<std::list <int,Ai> >(a); - TestSequence<std::vector<int,Ai> >(a); - -#if __TBB_CPP11_RVALUE_REF_PRESENT - typedef typename Allocator::template rebind<MoveOperationTracker>::other Amot; - TestSequence<std::deque <MoveOperationTracker, Amot> >(a); - TestSequence<std::list <MoveOperationTracker, Amot> >(a); - TestSequence<std::vector<MoveOperationTracker, Amot> >(a); -#endif - - // Associative containers - TestSet<std::set <int, std::less<int>, Ai> >(a); - TestSet<std::multiset<int, std::less<int>, Ai> >(a); - TestMap<std::map <int, int, std::less<int>, Acii> >(a); - TestMap<std::multimap<int, int, std::less<int>, Acii> >(a); - -#if _MSC_VER - // Test compatibility with Microsoft's implementation of std::allocator for some cases that - // are undefined according to the ISO standard but permitted by Microsoft. - TestSequence<std::deque <const int,Aci> >(a); -#if _CPPLIB_VER>=500 - TestSequence<std::list <const int,Aci> >(a); -#endif - TestSequence<std::vector<const int,Aci> >(a); - TestSet<std::set<const int, std::less<int>, Aci> >(a); - TestMap<std::map<int, int, std::less<int>, Aii> >(a); - TestMap<std::map<const int, int, std::less<int>, Acii> >(a); - TestMap<std::multimap<int, int, std::less<int>, Aii> >(a); - TestMap<std::multimap<const int, int, std::less<int>, Acii> >(a); -#endif /* _MSC_VER */ -} diff --git a/src/tbb/src/test/test_assembly.cpp b/src/tbb/src/test/test_assembly.cpp deleted file mode 100644 index 3a20f41de..000000000 --- a/src/tbb/src/test/test_assembly.cpp +++ /dev/null @@ -1,148 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Program for basic correctness testing of assembly-language routines. -#include "harness_defs.h" -//for ICC builtins mode the test will be skipped as -//macro __TBB_GCC_BUILTIN_ATOMICS_PRESENT used to define __TBB_TEST_SKIP_GCC_BUILTINS_MODE -//will not be defined (it is explicitly disabled for ICC) -#if __TBB_TEST_SKIP_GCC_BUILTINS_MODE -#include "harness.h" -int TestMain() { - REPORT("Known issue: GCC builtins aren't available\n"); - return Harness::Skipped; -} -#else - -#include "tbb/task.h" - -#include <new> -#include "harness.h" - -using tbb::internal::reference_count; - -//TODO: remove this function when atomic function __TBB_XXX are dropped -//! Test __TBB_CompareAndSwapW -static void TestCompareExchange() { - ASSERT( intptr_t(-10)<10, "intptr_t not a signed integral type?" ); - REMARK("testing __TBB_CompareAndSwapW\n"); - for( intptr_t a=-10; a<10; ++a ) - for( intptr_t b=-10; b<10; ++b ) - for( intptr_t c=-10; c<10; ++c ) { -// Workaround for a bug in GCC 4.3.0; and one more is below. -#if __TBB_GCC_OPTIMIZER_ORDERING_BROKEN - intptr_t x; - __TBB_store_with_release( x, a ); -#else - intptr_t x = a; -#endif - intptr_t y = __TBB_CompareAndSwapW(&x,b,c); - ASSERT( y==a, NULL ); - if( a==c ) - ASSERT( x==b, NULL ); - else - ASSERT( x==a, NULL ); - } -} - -//TODO: remove this function when atomic function __TBB_XXX are dropped -//! Test __TBB___TBB_FetchAndIncrement and __TBB___TBB_FetchAndDecrement -static void TestAtomicCounter() { - // "canary" is a value used to detect illegal overwrites. - const reference_count canary = ~(uintptr_t)0/3; - REMARK("testing __TBB_FetchAndIncrement\n"); - struct { - reference_count prefix, i, suffix; - } x; - x.prefix = canary; - x.i = 0; - x.suffix = canary; - for( int k=0; k<10; ++k ) { - reference_count j = __TBB_FetchAndIncrementWacquire((volatile void *)&x.i); - ASSERT( x.prefix==canary, NULL ); - ASSERT( x.suffix==canary, NULL ); - ASSERT( x.i==k+1, NULL ); - ASSERT( j==k, NULL ); - } - REMARK("testing __TBB_FetchAndDecrement\n"); - x.i = 10; - for( int k=10; k>0; --k ) { - reference_count j = __TBB_FetchAndDecrementWrelease((volatile void *)&x.i); - ASSERT( j==k, NULL ); - ASSERT( x.i==k-1, NULL ); - ASSERT( x.prefix==canary, NULL ); - ASSERT( x.suffix==canary, NULL ); - } -} - -static void TestTinyLock() { - REMARK("testing __TBB_LockByte\n"); - __TBB_atomic_flag flags[16]; - for( unsigned int i=0; i<16; ++i ) - flags[i] = (__TBB_Flag)i; -#if __TBB_GCC_OPTIMIZER_ORDERING_BROKEN - __TBB_store_with_release( flags[8], 0 ); -#else - flags[8] = 0; -#endif - __TBB_LockByte(flags[8]); - for( unsigned int i=0; i<16; ++i ) - #ifdef __sparc - ASSERT( flags[i]==(i==8?0xff:i), NULL ); - #else - ASSERT( flags[i]==(i==8?1:i), NULL ); - #endif - __TBB_UnlockByte(flags[8]); - for( unsigned int i=0; i<16; ++i ) - ASSERT( flags[i] == (i==8?0:i), NULL ); -} - -static void TestLog2() { - REMARK("testing __TBB_Log2\n"); - for( uintptr_t i=1; i; i<<=1 ) { - for( uintptr_t j=1; j<1<<16; ++j ) { - if( uintptr_t k = i*j ) { - uintptr_t actual = __TBB_Log2(k); - const uintptr_t ONE = 1; // warning suppression again - ASSERT( k >= ONE<<actual, NULL ); - ASSERT( k>>1 < ONE<<actual, NULL ); - } - } - } -} - -static void TestPause() { - REMARK("testing __TBB_Pause\n"); - __TBB_Pause(1); -} - -int TestMain () { - __TBB_TRY { - TestLog2(); - TestTinyLock(); - TestCompareExchange(); - TestAtomicCounter(); - TestPause(); - } __TBB_CATCH(...) { - ASSERT(0,"unexpected exception"); - } - return Harness::Done; -} -#endif // __TBB_TEST_SKIP_BUILTINS_MODE diff --git a/src/tbb/src/test/test_atomic.cpp b/src/tbb/src/test/test_atomic.cpp deleted file mode 100644 index 5d76d6293..000000000 --- a/src/tbb/src/test/test_atomic.cpp +++ /dev/null @@ -1,1603 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness_defs.h" - -#if __TBB_TEST_SKIP_PIC_MODE || (__TBB_TEST_SKIP_GCC_BUILTINS_MODE && __TBB_TEST_SKIP_ICC_BUILTINS_MODE) -#include "harness.h" -int TestMain() { - REPORT("Known issue: %s\n", - __TBB_TEST_SKIP_PIC_MODE? "PIC mode is not supported" : "Compiler builtins for atomic operations aren't available"); - return Harness::Skipped; -} -#else - -// Put tbb/atomic.h first, so if it is missing a prerequisite header, we find out about it. -// The tests here do *not* test for atomicity, just serial correctness. */ - -#include "tbb/atomic.h" -#include "harness_assert.h" -#include <cstring> // memcmp -#include "tbb/aligned_space.h" -#include <new> //for placement new - -using std::memcmp; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Unary minus operator applied to unsigned type, result still unsigned - // Constant conditional expression - #pragma warning( disable: 4127 4310 ) -#endif - -#if __TBB_GCC_STRICT_ALIASING_BROKEN - #pragma GCC diagnostic ignored "-Wstrict-aliasing" -#endif - -// Intel(R) Compiler have an issue when a scoped enum with a specified underlying type has negative values. -#define __TBB_ICC_SCOPED_ENUM_WITH_UNDERLYING_TYPE_NEGATIVE_VALUE_BROKEN ( _MSC_VER && !__TBB_DEBUG && __INTEL_COMPILER && __INTEL_COMPILER <= 1500 ) -// Intel(R) Compiler have an issue with __atomic_load_explicit from a scoped enum with a specified underlying type. -#define __TBB_ICC_SCOPED_ENUM_WITH_UNDERLYING_TYPE_ATOMIC_LOAD_BROKEN ( TBB_USE_ICC_BUILTINS && !__TBB_DEBUG && __INTEL_COMPILER && __INTEL_COMPILER <= 1500 ) - -enum LoadStoreExpression { - UseOperators, - UseImplicitAcqRel, - UseExplicitFullyFenced, - UseExplicitAcqRel, - UseExplicitRelaxed, - UseGlobalHelperFullyFenced, - UseGlobalHelperAcqRel, - UseGlobalHelperRelaxed -}; - -//! Structure that holds an atomic<T> and some guard bytes around it. -template<typename T, LoadStoreExpression E = UseOperators> -struct TestStruct { - typedef unsigned char byte_type; - T prefix; - tbb::atomic<T> counter; - T suffix; - TestStruct( T i ) { - ASSERT( sizeof(*this)==3*sizeof(T), NULL ); - for (size_t j = 0; j < sizeof(T); ++j) { - reinterpret_cast<byte_type*>(&prefix)[j] = byte_type(0x11*(j+1)); - reinterpret_cast<byte_type*>(&suffix)[sizeof(T)-j-1] = byte_type(0x11*(j+1)); - } - if ( E == UseOperators ) - counter = i; - else if ( E == UseExplicitRelaxed ) - counter.template store<tbb::relaxed>(i); - else - tbb::store<tbb::full_fence>( counter, i ); - } - ~TestStruct() { - // Check for writes outside the counter. - for (size_t j = 0; j < sizeof(T); ++j) { - ASSERT( reinterpret_cast<byte_type*>(&prefix)[j] == byte_type(0x11*(j+1)), NULL ); - ASSERT( reinterpret_cast<byte_type*>(&suffix)[sizeof(T)-j-1] == byte_type(0x11*(j+1)), NULL ); - } - } - static tbb::atomic<T> gCounter; -}; - -// A global variable of type tbb::atomic<> -template<typename T, LoadStoreExpression E> tbb::atomic<T> TestStruct<T, E>::gCounter; - -//! Test compare_and_swap template members of class atomic<T> for memory_semantics=M -template<typename T,tbb::memory_semantics M> -void TestCompareAndSwapWithExplicitOrdering( T i, T j, T k ) { - ASSERT( i!=k, "values must be distinct" ); - // Test compare_and_swap that should fail - TestStruct<T> x(i); - T old = x.counter.template compare_and_swap<M>( j, k ); - ASSERT( old==i, NULL ); - ASSERT( x.counter==i, "old value not retained" ); - // Test compare and swap that should succeed - old = x.counter.template compare_and_swap<M>( j, i ); - ASSERT( old==i, NULL ); - ASSERT( x.counter==j, "value not updated?" ); -} - -//! i, j, k must be different values -template<typename T> -void TestCompareAndSwap( T i, T j, T k ) { - ASSERT( i!=k, "values must be distinct" ); - // Test compare_and_swap that should fail - TestStruct<T> x(i); - T old = x.counter.compare_and_swap( j, k ); - ASSERT( old==i, NULL ); - ASSERT( x.counter==i, "old value not retained" ); - // Test compare and swap that should succeed - old = x.counter.compare_and_swap( j, i ); - ASSERT( old==i, NULL ); - if( x.counter==i ) { - ASSERT( x.counter==j, "value not updated?" ); - } else { - ASSERT( x.counter==j, "value trashed" ); - } - // Check that atomic global variables work - TestStruct<T>::gCounter = i; - old = TestStruct<T>::gCounter.compare_and_swap( j, i ); - ASSERT( old==i, NULL ); - ASSERT( TestStruct<T>::gCounter==j, "value not updated?" ); - TestCompareAndSwapWithExplicitOrdering<T,tbb::full_fence>(i,j,k); - TestCompareAndSwapWithExplicitOrdering<T,tbb::acquire>(i,j,k); - TestCompareAndSwapWithExplicitOrdering<T,tbb::release>(i,j,k); - TestCompareAndSwapWithExplicitOrdering<T,tbb::relaxed>(i,j,k); -} - -//! memory_semantics variation on TestFetchAndStore -template<typename T, tbb::memory_semantics M> -void TestFetchAndStoreWithExplicitOrdering( T i, T j ) { - ASSERT( i!=j, "values must be distinct" ); - TestStruct<T> x(i); - T old = x.counter.template fetch_and_store<M>( j ); - ASSERT( old==i, NULL ); - ASSERT( x.counter==j, NULL ); -} - -//! i and j must be different values -template<typename T> -void TestFetchAndStore( T i, T j ) { - ASSERT( i!=j, "values must be distinct" ); - TestStruct<T> x(i); - T old = x.counter.fetch_and_store( j ); - ASSERT( old==i, NULL ); - ASSERT( x.counter==j, NULL ); - // Check that atomic global variables work - TestStruct<T>::gCounter = i; - old = TestStruct<T>::gCounter.fetch_and_store( j ); - ASSERT( old==i, NULL ); - ASSERT( TestStruct<T>::gCounter==j, "value not updated?" ); - TestFetchAndStoreWithExplicitOrdering<T,tbb::full_fence>(i,j); - TestFetchAndStoreWithExplicitOrdering<T,tbb::acquire>(i,j); - TestFetchAndStoreWithExplicitOrdering<T,tbb::release>(i,j); - TestFetchAndStoreWithExplicitOrdering<T,tbb::relaxed>(i,j); -} - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // conversion from <bigger integer> to <smaller integer>, possible loss of data - // the warning seems a complete nonsense when issued for e.g. short+=short - #pragma warning( disable: 4244 ) -#endif - -//! Test fetch_and_add members of class atomic<T> for memory_semantics=M -template<typename T,tbb::memory_semantics M> -void TestFetchAndAddWithExplicitOrdering( T i ) { - TestStruct<T> x(i); - T actual; - T expected = i; - - // Test fetch_and_add member template - for( int j=0; j<10; ++j ) { - actual = x.counter.fetch_and_add(j); - ASSERT( actual==expected, NULL ); - expected += j; - } - for( int j=0; j<10; ++j ) { - actual = x.counter.fetch_and_add(-j); - ASSERT( actual==expected, NULL ); - expected -= j; - } - - // Test fetch_and_increment member template - ASSERT( x.counter==i, NULL ); - actual = x.counter.template fetch_and_increment<M>(); - ASSERT( actual==i, NULL ); - ASSERT( x.counter==T(i+1), NULL ); - - // Test fetch_and_decrement member template - actual = x.counter.template fetch_and_decrement<M>(); - ASSERT( actual==T(i+1), NULL ); - ASSERT( x.counter==i, NULL ); -} - -//! Test fetch_and_add and related operators -template<typename T> -void TestFetchAndAdd( T i ) { - TestStruct<T> x(i); - T value; - value = ++x.counter; - ASSERT( value==T(i+1), NULL ); - value = x.counter++; - ASSERT( value==T(i+1), NULL ); - value = x.counter--; - ASSERT( value==T(i+2), NULL ); - value = --x.counter; - ASSERT( value==i, NULL ); - T actual; - T expected = i; - for( int j=-100; j<=100; ++j ) { - expected += j; - actual = x.counter += j; - ASSERT( actual==expected, NULL ); - } - for( int j=-100; j<=100; ++j ) { - expected -= j; - actual = x.counter -= j; - ASSERT( actual==expected, NULL ); - } - // Test fetch_and_increment - ASSERT( x.counter==i, NULL ); - actual = x.counter.fetch_and_increment(); - ASSERT( actual==i, NULL ); - ASSERT( x.counter==T(i+1), NULL ); - - // Test fetch_and_decrement - actual = x.counter.fetch_and_decrement(); - ASSERT( actual==T(i+1), NULL ); - ASSERT( x.counter==i, NULL ); - x.counter = i; - ASSERT( x.counter==i, NULL ); - - // Check that atomic global variables work - TestStruct<T>::gCounter = i; - value = TestStruct<T>::gCounter.fetch_and_add( 42 ); - expected = i+42; - ASSERT( value==i, NULL ); - ASSERT( TestStruct<T>::gCounter==expected, "value not updated?" ); - TestFetchAndAddWithExplicitOrdering<T,tbb::full_fence>(i); - TestFetchAndAddWithExplicitOrdering<T,tbb::acquire>(i); - TestFetchAndAddWithExplicitOrdering<T,tbb::release>(i); - TestFetchAndAddWithExplicitOrdering<T,tbb::relaxed>(i); -} - -//! A type with unknown size. -class IncompleteType; - -void TestFetchAndAdd( IncompleteType* ) { - // There are no fetch-and-add operations on a IncompleteType*. -} -void TestFetchAndAdd( void* ) { - // There are no fetch-and-add operations on a void*. -} - -void TestFetchAndAdd( bool ) { - // There are no fetch-and-add operations on a bool. -} - -template<typename T> -void TestConst( T i ) { - // Try const - const TestStruct<T> x(i); - ASSERT( memcmp( &i, &x.counter, sizeof(T) )==0, "write to atomic<T> broken?" ); - ASSERT( x.counter==i, "read of atomic<T> broken?" ); - const TestStruct<T, UseExplicitRelaxed> y(i); - ASSERT( memcmp( &i, &y.counter, sizeof(T) )==0, "relaxed write to atomic<T> broken?" ); - ASSERT( tbb::load<tbb::relaxed>(y.counter) == i, "relaxed read of atomic<T> broken?" ); - const TestStruct<T, UseGlobalHelperFullyFenced> z(i); - ASSERT( memcmp( &i, &z.counter, sizeof(T) )==0, "sequentially consistent write to atomic<T> broken?" ); - ASSERT( z.counter.template load<tbb::full_fence>() == i, "sequentially consistent read of atomic<T> broken?" ); -} - -#include "harness.h" - -#include <sstream> - -//TODO: consider moving it to separate file, and unify with one in examples command line interface -template<typename T> -std::string to_string(const T& a){ - std::stringstream str; str <<a; - return str.str(); -} -namespace initialization_tests { - template<typename T> - struct test_initialization_fixture{ - typedef tbb::atomic<T> atomic_t; - tbb::aligned_space<atomic_t> non_zeroed_storage; - enum {fill_value = 0xFF }; - test_initialization_fixture(){ - memset(non_zeroed_storage.begin(),fill_value,sizeof(non_zeroed_storage)); - ASSERT( char(fill_value)==*(reinterpret_cast<char*>(non_zeroed_storage.begin())) - ,"failed to fill the storage; memset error?"); - } - //TODO: consider move it to destructor, even in a price of UB - void tear_down(){ - non_zeroed_storage.begin()->~atomic_t(); - } - }; - - template<typename T> - struct TestValueInitialization : test_initialization_fixture<T>{ - void operator()(){ - typedef typename test_initialization_fixture<T>::atomic_t atomic_type; - //please note that explicit braces below are needed to get zero initialization. - //in C++11, 8.5 Initializers [dcl.init], see paragraphs 10,7,5 - new (this->non_zeroed_storage.begin()) atomic_type(); - //TODO: add use of KNOWN_ISSUE macro on SunCC 5.11 - #if !__SUNPRO_CC || __SUNPRO_CC > 0x5110 - //TODO: add printing of typename to the assertion - ASSERT(char(0)==*(reinterpret_cast<char*>(this->non_zeroed_storage.begin())) - ,("value initialization for tbb::atomic should do zero initialization; " - "actual value:"+to_string(this->non_zeroed_storage.begin()->load())).c_str()); - #endif - this->tear_down(); - }; - }; - - template<typename T> - struct TestDefaultInitialization : test_initialization_fixture<T>{ - void operator ()(){ - typedef typename test_initialization_fixture<T>::atomic_t atomic_type; - new (this->non_zeroed_storage.begin()) atomic_type; - ASSERT( char(this->fill_value)==*(reinterpret_cast<char*>(this->non_zeroed_storage.begin())) - ,"default initialization for atomic should do no initialization"); - this->tear_down(); - } - }; -# if __TBB_ATOMIC_CTORS - template<typename T> - struct TestDirectInitialization : test_initialization_fixture<T> { - void operator()(T i){ - typedef typename test_initialization_fixture<T>::atomic_t atomic_type; - new (this->non_zeroed_storage.begin()) atomic_type(i); - ASSERT(i == this->non_zeroed_storage.begin()->load() - ,("tbb::atomic initialization failed; " - "value:"+to_string(this->non_zeroed_storage.begin()->load())+ - "; expected:"+to_string(i)).c_str()); - this->tear_down(); - } - }; -# endif -} -template<typename T> -void TestValueInitialization(){ - initialization_tests::TestValueInitialization<T>()(); -} -template<typename T> -void TestDefaultInitialization(){ - initialization_tests::TestDefaultInitialization<T>()(); -} - -#if __TBB_ATOMIC_CTORS -template<typename T> -void TestDirectInitialization(T i){ - initialization_tests::TestDirectInitialization<T>()(i); -} -//TODO: it would be great to have constructor doing dynamic initialization of local atomic objects implicitly (with zero?), -// but do no dynamic initializations by default for static objects -namespace test_constexpr_initialization_helper { - struct white_box_ad_hoc_type { - int _int; - constexpr white_box_ad_hoc_type(int a =0) : _int(a) {}; - constexpr operator int() const { return _int;} - }; -} -//some white boxing -namespace tbb { namespace internal { - template<> - struct atomic_impl<test_constexpr_initialization_helper::white_box_ad_hoc_type>: atomic_impl<int> { - atomic_impl() = default; - constexpr atomic_impl(test_constexpr_initialization_helper::white_box_ad_hoc_type value):atomic_impl<int>(value){} - constexpr operator int(){ return this->my_storage.my_value;} - }; -}} - -//TODO: make this a parameterized macro -void TestConstExprInitializationIsTranslationTime(){ - const char* ct_init_failed_msg = "translation time init failed?"; - typedef tbb::atomic<int> atomic_t; - constexpr atomic_t a(8); - ASSERT(a == 8,ct_init_failed_msg); - - constexpr tbb::atomic<test_constexpr_initialization_helper::white_box_ad_hoc_type> ct_atomic(10); - //for some unknown reason clang does not managed to enum syntax -#if __clang__ - constexpr int ct_atomic_value_ten = (int)ct_atomic; -#else - enum {ct_atomic_value_ten = (int)ct_atomic}; -#endif - __TBB_STATIC_ASSERT(ct_atomic_value_ten == 10, "translation time init failed?"); - ASSERT(ct_atomic_value_ten == 10,ct_init_failed_msg); - int array[ct_atomic_value_ten]; - ASSERT(Harness::array_length(array) == 10,ct_init_failed_msg); -} - -#include <string> -#include <vector> -namespace TestConstExprInitializationOfGlobalObjectsHelper{ - struct static_objects_dynamic_init_order_tester { - static int order_hash; - template<int N> struct nth { - nth(){ order_hash = (order_hash<<4)+N; } - }; - - static nth<2> second; - static nth<3> third; - }; - - int static_objects_dynamic_init_order_tester::order_hash=1; - static_objects_dynamic_init_order_tester::nth<2> static_objects_dynamic_init_order_tester::second; - static_objects_dynamic_init_order_tester::nth<3> static_objects_dynamic_init_order_tester::third; - - void TestStaticsDynamicInitializationOrder(){ - ASSERT(static_objects_dynamic_init_order_tester::order_hash==0x123,"Statics dynamic initialization order is broken? "); - } - - template<typename T> - void TestStaticInit(); - - namespace auto_registered_tests_helper { - template<typename T> - struct type_name ; - - #define REGISTER_TYPE_NAME(T) \ - namespace auto_registered_tests_helper{ \ - template<> \ - struct type_name<T> { \ - static const char* name; \ - }; \ - const char* type_name<T>::name = #T; \ - } \ - - typedef void (* p_test_function_type)(); - static std::vector<p_test_function_type> const_expr_tests; - - template <typename T> - struct registration{ - registration(){const_expr_tests.push_back(&TestStaticInit<T>);} - }; - } - //according to ISO C++11 [basic.start.init], static data fields of class template have unordered - //initialization unless it is an explicit specialization - template<typename T> - struct tester; - - #define TESTER_SPECIALIZATION(T,ct_value) \ - template<> \ - struct tester<T> { \ - struct static_before; \ - static bool result; \ - static static_before static_before_; \ - static tbb::atomic<T> static_atomic; \ - \ - static auto_registered_tests_helper::registration<T> registered; \ - }; \ - bool tester<T>::result = false; \ - \ - struct tester<T>::static_before { \ - static_before(){ result = (static_atomic==ct_value); } \ - } ; \ - \ - typename tester<T>::static_before tester<T>::static_before_; \ - tbb::atomic<T> tester<T>::static_atomic(ct_value); \ - \ - auto_registered_tests_helper::registration<T> tester<T>::registered; \ - REGISTER_TYPE_NAME(T) \ - - template<typename T> - void TestStaticInit(){ - //TODO: add printing of values to the assertion - std::string type_name = auto_registered_tests_helper::type_name<T>::name; - ASSERT(tester<T>::result,("Static initialization failed for atomic " + type_name).c_str()); - } - - void CallExprInitTests(){ - using namespace auto_registered_tests_helper; - for (size_t i =0; i<const_expr_tests.size(); ++i){ - (*const_expr_tests[i])(); - } - REMARK("ran %d consrexpr static init test \n",const_expr_tests.size()); - } - - //TODO: unify somehow list of tested types with one in TestMain - //TODO: add specializations for: - //T,T(-T(1) - //T,1 -# if __TBB_64BIT_ATOMICS - TESTER_SPECIALIZATION(long long,8LL) - TESTER_SPECIALIZATION(unsigned long long,8ULL) -# endif - TESTER_SPECIALIZATION(unsigned long,8UL) - TESTER_SPECIALIZATION(long,8L) - TESTER_SPECIALIZATION(unsigned int,8U) - TESTER_SPECIALIZATION(int,8) - TESTER_SPECIALIZATION(unsigned short,8) - TESTER_SPECIALIZATION(short,8) - TESTER_SPECIALIZATION(unsigned char,8) - TESTER_SPECIALIZATION(signed char,8) - TESTER_SPECIALIZATION(char,8) - TESTER_SPECIALIZATION(wchar_t,8) - - int dummy; - TESTER_SPECIALIZATION(void*,&dummy); - TESTER_SPECIALIZATION(bool,false); - //TODO: add test for constexpt initialization of floating types - //for some unknown reasons 0.1 becomes 0.10000001 and equality comparison fails - enum written_number_enum{one=2,two}; - TESTER_SPECIALIZATION(written_number_enum,one); - //TODO: add test for ArrayElement<> as in TestMain -} - -void TestConstExprInitializationOfGlobalObjects(){ - //first assert that assumption the test based on are correct - TestConstExprInitializationOfGlobalObjectsHelper::TestStaticsDynamicInitializationOrder(); - TestConstExprInitializationOfGlobalObjectsHelper::CallExprInitTests(); -} -#endif //__TBB_ATOMIC_CTORS -template<typename T> -void TestOperations( T i, T j, T k ) { - TestValueInitialization<T>(); - TestDefaultInitialization<T>(); -# if __TBB_ATOMIC_CTORS - TestConstExprInitializationIsTranslationTime(); - TestDirectInitialization<T>(i); - TestDirectInitialization<T>(j); - TestDirectInitialization<T>(k); -# endif - TestConst(i); - TestCompareAndSwap(i,j,k); - TestFetchAndStore(i,k); // Pass i,k instead of i,j, because callee requires two distinct values. -} - -template<typename T> -void TestParallel( const char* name ); - -bool ParallelError; - -template<typename T> -struct AlignmentChecker { - char c; - tbb::atomic<T> i; -}; - -//TODO: candidate for test_compiler? -template<typename T> -void TestAlignment( const char* name ) { - AlignmentChecker<T> ac; - tbb::atomic<T> x; - x = T(0); - bool is_stack_variable_aligned = tbb::internal::is_aligned(&x,sizeof(T)); - bool is_member_variable_aligned = tbb::internal::is_aligned(&ac.i,sizeof(T)); - bool is_struct_size_correct = (sizeof(AlignmentChecker<T>)==2*sizeof(tbb::atomic<T>)); - bool known_issue_condition = __TBB_FORCE_64BIT_ALIGNMENT_BROKEN && ( sizeof(T)==8); - //TODO: replace these ifs with KNOWN_ISSUE macro when it available - if (!is_stack_variable_aligned){ - std::string msg = "Compiler failed to properly align local atomic variable?; size:"+to_string(sizeof(T)) + " type: " - +to_string(name) + " location:" + to_string(&x) +"\n"; - if (known_issue_condition) { - REPORT(("Known issue: "+ msg).c_str()); - }else{ - ASSERT(false,msg.c_str()); - } - } - if (!is_member_variable_aligned){ - std::string msg = "Compiler failed to properly align atomic member variable?; size:"+to_string(sizeof(T)) + " type: " - +to_string(name) + " location:" + to_string(&ac.i) +"\n"; - if (known_issue_condition) { - REPORT(("Known issue: "+ msg).c_str()); - }else{ - ASSERT(false,msg.c_str()); - } - } - if (!is_struct_size_correct){ - std::string msg = "Compiler failed to properly add padding to structure with atomic member variable?; Structure size:"+to_string(sizeof(AlignmentChecker<T>)) - + " atomic size:"+to_string(sizeof(tbb::atomic<T>)) + " type: " + to_string(name) +"\n"; - if (known_issue_condition) { - REPORT(("Known issue: "+ msg).c_str()); - }else{ - ASSERT(false,msg.c_str()); - } - } - - AlignmentChecker<T> array[5]; - for( int k=0; k<5; ++k ) { - bool is_member_variable_in_array_aligned = tbb::internal::is_aligned(&array[k].i,sizeof(T)); - if (!is_member_variable_in_array_aligned) { - std::string msg = "Compiler failed to properly align atomic member variable inside an array?; size:"+to_string(sizeof(T)) + " type:"+to_string(name) - + " location:" + to_string(&array[k].i) + "\n"; - if (known_issue_condition){ - REPORT(("Known issue: "+ msg).c_str()); - }else{ - ASSERT(false,msg.c_str()); - } - } - } -} - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // unary minus operator applied to unsigned type, result still unsigned - #pragma warning( disable: 4146 ) -#endif - -/** T is an integral type. */ -template<typename T> -void TestAtomicInteger( const char* name ) { - REMARK("testing atomic<%s> (size=%d)\n",name,sizeof(tbb::atomic<T>)); - TestAlignment<T>(name); - TestOperations<T>(0L,T(-T(1)),T(1)); - for( int k=0; k<int(sizeof(long))*8-1; ++k ) { - TestOperations<T>(T(1L<<k),T(~(1L<<k)),T(1-(1L<<k))); - TestOperations<T>(T(-1L<<k),T(~(-1L<<k)),T(1-(-1L<<k))); - TestFetchAndAdd<T>(T(-1L<<k)); - } - TestParallel<T>( name ); -} - -namespace test_indirection_helpers { - template<typename T> - struct Foo { - T x, y, z; - }; -} - -template<typename T> -void TestIndirection() { - using test_indirection_helpers::Foo; - Foo<T> item; - tbb::atomic<Foo<T>*> pointer; - pointer = &item; - for( int k=-10; k<=10; ++k ) { - // Test various syntaxes for indirection to fields with non-zero offset. - T value1=T(), value2=T(); - for( size_t j=0; j<sizeof(T); ++j ) { - ((char*)&value1)[j] = char(k^j); - ((char*)&value2)[j] = char(k^j*j); - } - pointer->y = value1; - (*pointer).z = value2; - T result1 = (*pointer).y; - T result2 = pointer->z; - ASSERT( memcmp(&value1,&result1,sizeof(T))==0, NULL ); - ASSERT( memcmp(&value2,&result2,sizeof(T))==0, NULL ); - } - #if __TBB_ICC_BUILTIN_ATOMICS_POINTER_ALIASING_BROKEN - //prevent ICC compiler from assuming 'item' is unused and reusing it's storage - item.x = item.y=item.z; - #endif -} - -//! Test atomic<T*> -template<typename T> -void TestAtomicPointer() { - REMARK("testing atomic pointer (%d)\n",int(sizeof(T))); - T array[1000]; - TestOperations<T*>(&array[500],&array[250],&array[750]); - TestFetchAndAdd<T*>(&array[500]); - TestIndirection<T>(); - TestParallel<T*>( "pointer" ); - -} - -//! Test atomic<Ptr> where Ptr is a pointer to a type of unknown size -template<typename Ptr> -void TestAtomicPointerToTypeOfUnknownSize( const char* name ) { - REMARK("testing atomic<%s>\n",name); - char array[1000]; - TestOperations<Ptr>((Ptr)(void*)&array[500],(Ptr)(void*)&array[250],(Ptr)(void*)&array[750]); - TestParallel<Ptr>( name ); -} - -void TestAtomicBool() { - REMARK("testing atomic<bool>\n"); - TestOperations<bool>(true,true,false); - TestOperations<bool>(false,false,true); - TestParallel<bool>( "bool" ); -} - -template<typename EnumType> -struct HasImplicitConversionToInt { - typedef bool yes; - typedef int no; - __TBB_STATIC_ASSERT( sizeof(yes) != sizeof(no), "The helper needs two types of different sizes to work." ); - - static yes detect( int ); - static no detect( ... ); - - enum { value = (sizeof(yes) == sizeof(detect( EnumType() ))) }; -}; - -enum Color {Red=0,Green=1,Blue=-1}; - -void TestAtomicEnum() { - REMARK("testing atomic<Color>\n"); - TestOperations<Color>(Red,Green,Blue); - TestParallel<Color>( "Color" ); - __TBB_STATIC_ASSERT( HasImplicitConversionToInt< tbb::atomic<Color> >::value, "The implicit conversion is expected." ); -} - -#if __TBB_SCOPED_ENUM_PRESENT -enum class ScopedColor1 {ScopedRed,ScopedGreen,ScopedBlue=-1}; -// TODO: extend the test to cover 2 byte scoped enum as well -#if __TBB_ICC_SCOPED_ENUM_WITH_UNDERLYING_TYPE_NEGATIVE_VALUE_BROKEN -enum class ScopedColor2 : char {ScopedZero, ScopedOne,ScopedRed=42,ScopedGreen=-1,ScopedBlue=127}; -#else -enum class ScopedColor2 : char {ScopedZero, ScopedOne,ScopedRed=-128,ScopedGreen=-1,ScopedBlue=127}; -#endif - -// TODO: replace the hack of getting symbolic enum name with a better implementation -std::string enum_strings[] = {"ScopedZero","ScopedOne","ScopedRed","ScopedGreen","ScopedBlue"}; -template<> -std::string to_string<ScopedColor1>(const ScopedColor1& a){ - return enum_strings[a==ScopedColor1::ScopedBlue? 4 : (int)a+2]; -} -template<> -std::string to_string<ScopedColor2>(const ScopedColor2& a){ - return enum_strings[a==ScopedColor2::ScopedRed? 2 : - a==ScopedColor2::ScopedGreen? 3 : a==ScopedColor2::ScopedBlue? 4 : (int)a ]; -} - -void TestAtomicScopedEnum() { - REMARK("testing atomic<ScopedColor>\n"); - TestOperations<ScopedColor1>(ScopedColor1::ScopedRed,ScopedColor1::ScopedGreen,ScopedColor1::ScopedBlue); - TestParallel<ScopedColor1>( "ScopedColor1" ); -#if __TBB_ICC_SCOPED_ENUM_WITH_UNDERLYING_TYPE_ATOMIC_LOAD_BROKEN - REPORT("Known issue: the operation tests for a scoped enum with a specified underlying type are skipped.\n"); -#else - TestOperations<ScopedColor2>(ScopedColor2::ScopedRed,ScopedColor2::ScopedGreen,ScopedColor2::ScopedBlue); - TestParallel<ScopedColor2>( "ScopedColor2" ); -#endif - __TBB_STATIC_ASSERT( !HasImplicitConversionToInt< tbb::atomic<ScopedColor1> >::value, "The implicit conversion is not expected." ); - __TBB_STATIC_ASSERT( !HasImplicitConversionToInt< tbb::atomic<ScopedColor1> >::value, "The implicit conversion is not expected." ); - __TBB_STATIC_ASSERT( sizeof(tbb::atomic<ScopedColor1>) == sizeof(ScopedColor1), "tbb::atomic instantiated with scoped enum should have the same size as scoped enum." ); - __TBB_STATIC_ASSERT( sizeof(tbb::atomic<ScopedColor2>) == sizeof(ScopedColor2), "tbb::atomic instantiated with scoped enum should have the same size as scoped enum." ); -} -#endif /* __TBB_SCOPED_ENUM_PRESENT */ - -template<typename T> -void TestAtomicFloat( const char* name ) { - REMARK("testing atomic<%s>\n", name ); - TestAlignment<T>(name); - TestOperations<T>(0.5,3.25,10.75); - TestParallel<T>( name ); -} - -#define __TBB_TEST_GENERIC_PART_WORD_CAS (__TBB_ENDIANNESS!=__TBB_ENDIAN_UNSUPPORTED) -#if __TBB_TEST_GENERIC_PART_WORD_CAS -void TestEndianness() { - // Test for pure endianness (assumed by simpler probe in __TBB_MaskedCompareAndSwap()). - bool is_big_endian = true, is_little_endian = true; - const tbb::internal::uint32_t probe = 0x03020100; - ASSERT (tbb::internal::is_aligned(&probe,4), NULL); - for( const char *pc_begin = reinterpret_cast<const char*>(&probe) - , *pc = pc_begin, *pc_end = pc_begin + sizeof(probe) - ; pc != pc_end; ++pc) { - if (*pc != pc_end-1-pc) is_big_endian = false; - if (*pc != pc-pc_begin) is_little_endian = false; - } - ASSERT (!is_big_endian || !is_little_endian, NULL); - #if __TBB_ENDIANNESS==__TBB_ENDIAN_DETECT - ASSERT (is_big_endian || is_little_endian, "__TBB_ENDIANNESS should be set to __TBB_ENDIAN_UNSUPPORTED"); - #elif __TBB_ENDIANNESS==__TBB_ENDIAN_BIG - ASSERT (is_big_endian, "__TBB_ENDIANNESS should NOT be set to __TBB_ENDIAN_BIG"); - #elif __TBB_ENDIANNESS==__TBB_ENDIAN_LITTLE - ASSERT (is_little_endian, "__TBB_ENDIANNESS should NOT be set to __TBB_ENDIAN_LITTLE"); - #elif __TBB_ENDIANNESS==__TBB_ENDIAN_UNSUPPORTED - #error Generic implementation of part-word CAS may not be used: unsupported endianness - #else - #error Unexpected value of __TBB_ENDIANNESS - #endif -} - -namespace masked_cas_helpers { - const int numMaskedOperations = 100000; - const int testSpaceSize = 8; - int prime[testSpaceSize] = {3,5,7,11,13,17,19,23}; - - template<typename T> - class TestMaskedCAS_Body: NoAssign { - T* test_space_uncontended; - T* test_space_contended; - public: - TestMaskedCAS_Body( T* _space1, T* _space2 ) : test_space_uncontended(_space1), test_space_contended(_space2) {} - void operator()( int my_idx ) const { - using tbb::internal::__TBB_MaskedCompareAndSwap; - const volatile T my_prime = T(prime[my_idx]); // 'volatile' prevents erroneous optimizations by SunCC - T* const my_ptr = test_space_uncontended+my_idx; - T old_value=0; - for( int i=0; i<numMaskedOperations; ++i, old_value+=my_prime ){ - T result; - // Test uncontended case - T new_value = old_value + my_prime; - // The following CAS should always fail - result = __TBB_MaskedCompareAndSwap<T>(my_ptr,new_value,old_value-1); - ASSERT(result!=old_value-1, "masked CAS succeeded while it should fail"); - ASSERT(result==*my_ptr, "masked CAS result mismatch with real value"); - // The following one should succeed - result = __TBB_MaskedCompareAndSwap<T>(my_ptr,new_value,old_value); - ASSERT(result==old_value && *my_ptr==new_value, "masked CAS failed while it should succeed"); - // The following one should fail again - result = __TBB_MaskedCompareAndSwap<T>(my_ptr,new_value,old_value); - ASSERT(result!=old_value, "masked CAS succeeded while it should fail"); - ASSERT(result==*my_ptr, "masked CAS result mismatch with real value"); - // Test contended case - for( int j=0; j<testSpaceSize; ++j ){ - // try adding my_prime until success - T value; - do { - value = test_space_contended[j]; - result = __TBB_MaskedCompareAndSwap<T>(test_space_contended+j,value+my_prime,value); - } while( result!=value ); - } - } - } - }; - - template<typename T> - struct intptr_as_array_of - { - static const int how_many_Ts = sizeof(intptr_t)/sizeof(T); - union { - intptr_t result; - T space[ how_many_Ts ]; - }; - }; - - template<typename T> - intptr_t getCorrectUncontendedValue(int slot_idx) { - intptr_as_array_of<T> slot; - slot.result = 0; - for( int i=0; i<slot.how_many_Ts; ++i ) { - const T my_prime = T(prime[slot_idx*slot.how_many_Ts + i]); - for( int j=0; j<numMaskedOperations; ++j ) - slot.space[i] += my_prime; - } - return slot.result; - } - - template<typename T> - intptr_t getCorrectContendedValue() { - intptr_as_array_of<T> slot; - slot.result = 0; - for( int i=0; i<slot.how_many_Ts; ++i ) - for( int primes=0; primes<testSpaceSize; ++primes ) - for( int j=0; j<numMaskedOperations; ++j ) - slot.space[i] += prime[primes]; - return slot.result; - } -} // namespace masked_cas_helpers - -template<typename T> -void TestMaskedCAS() { - using namespace masked_cas_helpers; - REMARK("testing masked CAS<%d>\n",int(sizeof(T))); - - const int num_slots = sizeof(T)*testSpaceSize/sizeof(intptr_t); - intptr_t arr1[num_slots+2]; // two more "canary" slots at boundaries - intptr_t arr2[num_slots+2]; - for(int i=0; i<num_slots+2; ++i) - arr2[i] = arr1[i] = 0; - T* test_space_uncontended = (T*)(arr1+1); - T* test_space_contended = (T*)(arr2+1); - - NativeParallelFor( testSpaceSize, TestMaskedCAS_Body<T>(test_space_uncontended, test_space_contended) ); - - ASSERT( arr1[0]==0 && arr1[num_slots+1]==0 && arr2[0]==0 && arr2[num_slots+1]==0 , "adjacent memory was overwritten" ); - const intptr_t correctContendedValue = getCorrectContendedValue<T>(); - for(int i=0; i<num_slots; ++i) { - ASSERT( arr1[i+1]==getCorrectUncontendedValue<T>(i), "unexpected value in an uncontended slot" ); - ASSERT( arr2[i+1]==correctContendedValue, "unexpected value in a contended slot" ); - } -} -#endif // __TBB_TEST_GENERIC_PART_WORD_CAS - -template <typename T> -class TestRelaxedLoadStorePlainBody { - static T s_turn, - s_ready; - -public: - static unsigned s_count1, - s_count2; - - void operator() ( int id ) const { - using tbb::internal::__TBB_load_relaxed; - using tbb::internal::__TBB_store_relaxed; - - if ( id == 0 ) { - while ( !__TBB_load_relaxed(s_turn) ) { - ++s_count1; - __TBB_store_relaxed(s_ready, 1); - } - } - else { - while ( !__TBB_load_relaxed(s_ready) ) { - ++s_count2; - continue; - } - __TBB_store_relaxed(s_turn, 1); - } - } -}; // class TestRelaxedLoadStorePlainBody<T> - -template <typename T> T TestRelaxedLoadStorePlainBody<T>::s_turn = 0; -template <typename T> T TestRelaxedLoadStorePlainBody<T>::s_ready = 0; -template <typename T> unsigned TestRelaxedLoadStorePlainBody<T>::s_count1 = 0; -template <typename T> unsigned TestRelaxedLoadStorePlainBody<T>::s_count2 = 0; - -template <typename T> -class TestRelaxedLoadStoreAtomicBody { - static tbb::atomic<T> s_turn, - s_ready; - -public: - static unsigned s_count1, - s_count2; - - void operator() ( int id ) const { - if ( id == 0 ) { - while ( s_turn.template load<tbb::relaxed>() == 0 ) { - ++s_count1; - s_ready.template store<tbb::relaxed>(1); - } - } - else { - while ( s_ready.template load<tbb::relaxed>() == 0 ) { - ++s_count2; - continue; - } - s_turn.template store<tbb::relaxed>(1); - } - } -}; // class TestRelaxedLoadStoreAtomicBody<T> - -template <typename T> tbb::atomic<T> TestRelaxedLoadStoreAtomicBody<T>::s_turn; -template <typename T> tbb::atomic<T> TestRelaxedLoadStoreAtomicBody<T>::s_ready; -template <typename T> unsigned TestRelaxedLoadStoreAtomicBody<T>::s_count1 = 0; -template <typename T> unsigned TestRelaxedLoadStoreAtomicBody<T>::s_count2 = 0; - -template <typename T> -void TestRegisterPromotionSuppression () { - REMARK("testing register promotion suppression (size=%d)\n", (int)sizeof(T)); - NativeParallelFor( 2, TestRelaxedLoadStorePlainBody<T>() ); - NativeParallelFor( 2, TestRelaxedLoadStoreAtomicBody<T>() ); -} - -template<unsigned N> -class ArrayElement { - char item[N]; -}; - -#include "harness_barrier.h" -namespace bit_operation_test_suite{ - struct fixture : NoAssign{ - static const uintptr_t zero = 0; - const uintptr_t random_value ; - const uintptr_t inverted_random_value ; - fixture(): - random_value (tbb::internal::select_size_t_constant<0x9E3779B9,0x9E3779B97F4A7C15ULL>::value), - inverted_random_value ( ~random_value) - {} - }; - - struct TestAtomicORSerially : fixture { - void operator()(){ - //these additional variable are needed to get more meaningful expression in the assert - uintptr_t initial_value = zero; - uintptr_t atomic_or_result = initial_value; - uintptr_t atomic_or_operand = random_value; - - __TBB_AtomicOR(&atomic_or_result,atomic_or_operand); - - ASSERT(atomic_or_result == (initial_value | atomic_or_operand),"AtomicOR should do the OR operation"); - } - }; - struct TestAtomicANDSerially : fixture { - void operator()(){ - //these additional variable are needed to get more meaningful expression in the assert - uintptr_t initial_value = inverted_random_value; - uintptr_t atomic_and_result = initial_value; - uintptr_t atomic_and_operand = random_value; - - __TBB_AtomicAND(&atomic_and_result,atomic_and_operand); - - ASSERT(atomic_and_result == (initial_value & atomic_and_operand),"AtomicAND should do the AND operation"); - } - }; - - struct TestAtomicORandANDConcurrently : fixture { - static const uintptr_t bit_per_word = sizeof(uintptr_t) * 8; - static const uintptr_t threads_number = bit_per_word; - Harness::SpinBarrier m_barrier; - uintptr_t bitmap; - TestAtomicORandANDConcurrently():bitmap(zero) {} - - struct thread_body{ - TestAtomicORandANDConcurrently* test; - thread_body(TestAtomicORandANDConcurrently* the_test) : test(the_test) {} - void operator()(int thread_index)const{ - const uintptr_t single_bit_mask = ((uintptr_t)1u) << (thread_index % bit_per_word); - test->m_barrier.wait(); - static const char* error_msg = "AtomicOR and AtomicAND should be atomic"; - for (uintptr_t attempts=0; attempts<1000; attempts++ ){ - //Set and clear designated bits in a word. - __TBB_AtomicOR(&test->bitmap,single_bit_mask); - __TBB_Yield(); - bool the_bit_is_set_after_set_via_atomic_or = ((__TBB_load_with_acquire(test->bitmap) & single_bit_mask )== single_bit_mask); - ASSERT(the_bit_is_set_after_set_via_atomic_or,error_msg); - - __TBB_AtomicAND(&test->bitmap,~single_bit_mask); - __TBB_Yield(); - bool the_bit_is_clear_after_clear_via_atomic_and = ((__TBB_load_with_acquire(test->bitmap) & single_bit_mask )== zero); - ASSERT(the_bit_is_clear_after_clear_via_atomic_and,error_msg); - } - } - }; - void operator()(){ - m_barrier.initialize(threads_number); - NativeParallelFor(threads_number,thread_body(this)); - } - }; -} -void TestBitOperations(){ - using namespace bit_operation_test_suite; - TestAtomicORSerially()(); - TestAtomicANDSerially()(); - TestAtomicORandANDConcurrently()(); -} - -int TestMain () { -# if __TBB_ATOMIC_CTORS - TestConstExprInitializationOfGlobalObjects(); -# endif //__TBB_ATOMIC_CTORS -# if __TBB_64BIT_ATOMICS && !__TBB_CAS_8_CODEGEN_BROKEN - TestAtomicInteger<unsigned long long>("unsigned long long"); - TestAtomicInteger<long long>("long long"); -# elif __TBB_CAS_8_CODEGEN_BROKEN - REPORT("Known issue: compiler generates incorrect code for 64-bit atomics on this configuration\n"); -# else - REPORT("64-bit atomics not supported\n"); - ASSERT(sizeof(long long)==8, "type long long is not 64 bits"); -# endif - TestAtomicInteger<unsigned long>("unsigned long"); - TestAtomicInteger<long>("long"); - TestAtomicInteger<unsigned int>("unsigned int"); - TestAtomicInteger<int>("int"); - TestAtomicInteger<unsigned short>("unsigned short"); - TestAtomicInteger<short>("short"); - TestAtomicInteger<signed char>("signed char"); - TestAtomicInteger<unsigned char>("unsigned char"); - TestAtomicInteger<char>("char"); - TestAtomicInteger<wchar_t>("wchar_t"); - TestAtomicInteger<size_t>("size_t"); - TestAtomicInteger<ptrdiff_t>("ptrdiff_t"); - TestAtomicPointer<ArrayElement<1> >(); - TestAtomicPointer<ArrayElement<2> >(); - TestAtomicPointer<ArrayElement<3> >(); - TestAtomicPointer<ArrayElement<4> >(); - TestAtomicPointer<ArrayElement<5> >(); - TestAtomicPointer<ArrayElement<6> >(); - TestAtomicPointer<ArrayElement<7> >(); - TestAtomicPointer<ArrayElement<8> >(); - TestAtomicPointerToTypeOfUnknownSize<IncompleteType*>( "IncompleteType*" ); - TestAtomicPointerToTypeOfUnknownSize<void*>( "void*" ); - TestAtomicBool(); - TestAtomicEnum(); -# if __TBB_SCOPED_ENUM_PRESENT - TestAtomicScopedEnum(); -# endif - TestAtomicFloat<float>("float"); -# if __TBB_64BIT_ATOMICS && !__TBB_CAS_8_CODEGEN_BROKEN - TestAtomicFloat<double>("double"); -# else - ASSERT(sizeof(double)==8, "type double is not 64 bits"); -# endif - ASSERT( !ParallelError, NULL ); -# if __TBB_TEST_GENERIC_PART_WORD_CAS - TestEndianness(); - ASSERT (sizeof(short)==2, NULL); - TestMaskedCAS<unsigned short>(); - TestMaskedCAS<short>(); - TestMaskedCAS<unsigned char>(); - TestMaskedCAS<signed char>(); - TestMaskedCAS<char>(); -# elif __TBB_USE_GENERIC_PART_WORD_CAS -# error Generic part-word CAS is enabled, but not covered by the test -# else - REPORT("Skipping test for generic part-word CAS\n"); -# endif -# if __TBB_64BIT_ATOMICS && !__TBB_CAS_8_CODEGEN_BROKEN - TestRegisterPromotionSuppression<tbb::internal::int64_t>(); -# endif - TestRegisterPromotionSuppression<tbb::internal::int32_t>(); - TestRegisterPromotionSuppression<tbb::internal::int16_t>(); - TestRegisterPromotionSuppression<tbb::internal::int8_t>(); - TestBitOperations(); - - return Harness::Done; -} - -template<typename T, bool aligned> -class AlignedAtomic: NoAssign { - //tbb::aligned_space can not be used here, because internally it utilize align pragma/attribute, - //which has bugs on 8byte alignment on ia32 on some compilers( see according ****_BROKEN macro) - // Allocate space big enough to always contain sizeof(T)-byte locations that are aligned and misaligned. - char raw_space[2*sizeof(T) -1]; -public: - tbb::atomic<T>& construct_atomic(){ - std::memset(&raw_space[0],0, sizeof(raw_space)); - uintptr_t delta = aligned ? 0 : sizeof(T)/2; - size_t index=sizeof(T)-1; - tbb::atomic<T>* y = reinterpret_cast<tbb::atomic<T>*>((reinterpret_cast<uintptr_t>(&raw_space[index+delta])&~index) - delta); - // Assertion checks that y really did end up somewhere inside "raw_space". - ASSERT( raw_space<=reinterpret_cast<char*>(y), "y starts before raw_space" ); - ASSERT( reinterpret_cast<char*>(y+1) <= raw_space+sizeof(raw_space), "y starts after raw_space" ); - ASSERT( !(aligned ^ tbb::internal::is_aligned(y,sizeof(T))), "y is not aligned as it required" ); - new (y) tbb::atomic<T> (); - return *y; - } -}; - -template<typename T, bool aligned> -struct FlagAndMessage: AlignedAtomic<T,aligned> { - //! 0 if message not set yet, 1 if message is set. - tbb::atomic<T>& flag; - /** Force flag and message to be on distinct cache lines for machines with cache line size <= 4096 bytes */ - char pad[4096/sizeof(T)]; - //! Non-zero if message is ready - T message; - FlagAndMessage(): flag(FlagAndMessage::construct_atomic()) { - std::memset(pad,0,sizeof(pad)); - } -}; - -// A special template function used for summation. -// Actually it is only necessary because of its specialization for void* -template<typename T> -T special_sum(intptr_t arg1, intptr_t arg2) { - return (T)((T)arg1 + arg2); -} - -// The specialization for IncompleteType* is required -// because pointer arithmetic (+) is impossible with IncompleteType* -template<> -IncompleteType* special_sum<IncompleteType*>(intptr_t arg1, intptr_t arg2) { - return (IncompleteType*)(arg1 + arg2); -} - -// The specialization for void* is required -// because pointer arithmetic (+) is impossible with void* -template<> -void* special_sum<void*>(intptr_t arg1, intptr_t arg2) { - return (void*)(arg1 + arg2); -} - -// The specialization for bool is required to shut up gratuitous compiler warnings, -// because some compilers warn about casting int to bool. -template<> -bool special_sum<bool>(intptr_t arg1, intptr_t arg2) { - return ((arg1!=0) + arg2)!=0; -} - -#if __TBB_SCOPED_ENUM_PRESENT -// The specialization for scoped enumerators is required -// because scoped enumerators prohibit implicit conversion to int -template<> -ScopedColor1 special_sum<ScopedColor1>(intptr_t arg1, intptr_t arg2) { - return (ScopedColor1)(arg1 + arg2); -} -template<> -ScopedColor2 special_sum<ScopedColor2>(intptr_t arg1, intptr_t arg2) { - return (ScopedColor2)(arg1 + arg2); -} -#endif - -volatile int One = 1; - -inline bool IsRelaxed ( LoadStoreExpression e ) { - return e == UseExplicitRelaxed || e == UseGlobalHelperRelaxed; -} - -template <typename T, LoadStoreExpression E> -struct LoadStoreTraits; - -template <typename T> -struct LoadStoreTraits<T, UseOperators> { - static void load ( T& dst, const tbb::atomic<T>& src ) { dst = src; } - static void store ( tbb::atomic<T>& dst, const T& src ) { dst = src; } -}; - -template <typename T> -struct LoadStoreTraits<T, UseImplicitAcqRel> { - static void load ( T& dst, const tbb::atomic<T>& src ) { dst = src.load(); } - static void store ( tbb::atomic<T>& dst, const T& src ) { dst.store(src); } -}; - -template <typename T> -struct LoadStoreTraits<T, UseExplicitFullyFenced> { - static void load ( T& dst, const tbb::atomic<T>& src ) { dst = src.template load<tbb::full_fence>(); } - static void store ( tbb::atomic<T>& dst, const T& src ) { dst.template store<tbb::full_fence>(src); } -}; - -template <typename T> -struct LoadStoreTraits<T, UseExplicitAcqRel> { - static void load ( T& dst, const tbb::atomic<T>& src ) { dst = src.template load<tbb::acquire>(); } - static void store ( tbb::atomic<T>& dst, const T& src ) { dst.template store<tbb::release>(src); } -}; - -template <typename T> -struct LoadStoreTraits<T, UseExplicitRelaxed> { - static void load ( T& dst, const tbb::atomic<T>& src ) { dst = src.template load<tbb::relaxed>(); } - static void store ( tbb::atomic<T>& dst, const T& src ) { dst.template store<tbb::relaxed>(src); } -}; - -template <typename T> -struct LoadStoreTraits<T, UseGlobalHelperFullyFenced> { - static void load ( T& dst, const tbb::atomic<T>& src ) { dst = tbb::load<tbb::full_fence>(src); } - static void store ( tbb::atomic<T>& dst, const T& src ) { tbb::store<tbb::full_fence>(dst, src); } -}; - -template <typename T> -struct LoadStoreTraits<T, UseGlobalHelperAcqRel> { - static void load ( T& dst, const tbb::atomic<T>& src ) { dst = tbb::load<tbb::acquire>(src); } - static void store ( tbb::atomic<T>& dst, const T& src ) { tbb::store<tbb::release>(dst, src); } -}; - -template <typename T> -struct LoadStoreTraits<T, UseGlobalHelperRelaxed> { - static void load ( T& dst, const tbb::atomic<T>& src ) { dst = tbb::load<tbb::relaxed>(src); } - static void store ( tbb::atomic<T>& dst, const T& src ) { tbb::store<tbb::relaxed>(dst, src); } -}; - -template<typename T, bool aligned, LoadStoreExpression E> -struct HammerLoadAndStoreFence: NoAssign { - typedef FlagAndMessage<T,aligned> fam_type; -private: - typedef LoadStoreTraits<T, E> trait; - fam_type* fam; - const int n; - const int p; - const int trial; - const char* name; - mutable T accum; -public: - HammerLoadAndStoreFence( fam_type* fam_, int n_, int p_, const char* name_, int trial_ ) : fam(fam_), n(n_), p(p_), trial(trial_), name(name_) {} - void operator()( int k ) const { - int one = One; - fam_type* s = fam+k; - fam_type* s_next = fam + (k+1)%p; - for( int i=0; i<n; ++i ) { - // The inner for loop is a spin-wait loop, which is normally considered very bad style. - // But we must use it here because we are interested in examining subtle hardware effects. - for(unsigned short cnt=1; ; ++cnt) { - if( !(cnt%1024) ) // to help 1-core or oversubscribed systems complete the test, yield every 2^10 iterations - __TBB_Yield(); - // Compilers typically generate non-trivial sequence for division by a constant. - // The expression here is dependent on the loop index i, so it cannot be hoisted. - #define COMPLICATED_ZERO (i*(one-1)/100) - // Read flag and then the message - T flag, message; - if( trial&1 ) { - // COMPLICATED_ZERO here tempts compiler to hoist load of message above reading of flag. - trait::load( flag, (s+COMPLICATED_ZERO)->flag ); - message = s->message; - } else { - trait::load( flag, s->flag ); - message = s->message; - } - if ( flag != T(0) ) { - if( flag!=(T)-1 ) { - REPORT("ERROR: flag!=(T)-1 k=%d i=%d trial=%x type=%s (atomicity problem?)\n", k, i, trial, name ); - ParallelError = true; - } - if( !IsRelaxed(E) && message!=(T)-1 ) { - REPORT("ERROR: message!=(T)-1 k=%d i=%d trial=%x type=%s mode=%d (memory fence problem?)\n", k, i, trial, name, E ); - ParallelError = true; - } - s->message = T(0); - trait::store( s->flag, T(0) ); - // Prevent deadlock possible in relaxed mode because of store(0) - // to the first thread's flag being reordered after the last - // thread's store(-1) into it. - if ( IsRelaxed(E) ) { - while( s_next->flag.template load<tbb::relaxed>() != T(0) ) - __TBB_Yield(); - } - else - ASSERT( s_next->flag == T(0), NULL ); - // Set message and then the flag - if( trial&2 ) { - // COMPLICATED_ZERO here tempts compiler to sink store below setting of flag - s_next->message = special_sum<T>(-1, COMPLICATED_ZERO); - trait::store( s_next->flag, (T)-1 ); - } else { - s_next->message = (T)-1; - trait::store( s_next->flag, (T)-1 ); - } - break; - } else { - // Force compiler to use message anyway, so it cannot sink read of s->message below the if. - accum = message; - } - } - } - } -}; - -//! Test that atomic<T> has acquire semantics for loads and release semantics for stores. -/** Test performs round-robin passing of message among p processors, - where p goes from MinThread to MaxThread. */ -template<typename T, bool aligned, LoadStoreExpression E> -void TestLoadAndStoreFences( const char* name ) { - typedef HammerLoadAndStoreFence<T, aligned, E> hammer_load_store_type; - typedef typename hammer_load_store_type::fam_type fam_type; - for( int p=MinThread<2 ? 2 : MinThread; p<=MaxThread; ++p ) { - fam_type * fam = new fam_type[p]; - // Each of four trials exercise slightly different expression pattern within the test. - // See occurrences of COMPLICATED_ZERO for details. - for( int trial=0; trial<4; ++trial ) { - fam->message = (T)-1; - fam->flag = (T)-1; - NativeParallelFor( p, hammer_load_store_type( fam, 100, p, name, trial ) ); - if ( !IsRelaxed(E) ) { - for( int k=0; k<p; ++k ) { - ASSERT( fam[k].message==(k==0 ? (T)-1 : T(0)), "incomplete round-robin?" ); - ASSERT( fam[k].flag==(k==0 ? (T)-1 : T(0)), "incomplete round-robin?" ); - } - } - } - delete[] fam; - } -} - -//! Sparse set of values of integral type T. -/** Set is designed so that if a value is read or written non-atomically, - the resulting intermediate value is likely to not be a member of the set. */ -template<typename T> -class SparseValueSet { - T factor; -public: - SparseValueSet() { - // Compute factor such that: - // 1. It has at least one 1 in most of its bytes. - // 2. The bytes are typically different. - // 3. When multiplied by any value <=127, the product does not overflow. - factor = T(0); - for( unsigned i=0; i<sizeof(T)*8-7; i+=7 ) - factor = T(factor | T(1)<<i); - } - //! Get ith member of set - T get( int i ) const { - // Create multiple of factor. The & prevents overflow of the product. - return T((i&0x7F)*factor); - } - //! True if set contains x - bool contains( T x ) const { - // True if - return (x%factor)==0; - } -}; - -//! Specialization for pointer types. The pointers are random and should not be dereferenced. -template<typename T> -class SparseValueSet<T*> { - SparseValueSet<ptrdiff_t> my_set; -public: - T* get( int i ) const {return reinterpret_cast<T*>(my_set.get(i));} - bool contains( T* x ) const {return my_set.contains(reinterpret_cast<ptrdiff_t>(x));} -}; - -//! Specialization for bool. -/** Checking bool for atomic read/write is pointless in practice, because - there is no way to *not* atomically read or write a bool value. */ -template<> -class SparseValueSet<bool> { -public: - bool get( int i ) const {return i&1;} - bool contains( bool ) const {return true;} -}; - -#if _MSC_VER==1500 && !defined(__INTEL_COMPILER) - // VS2008/VC9 seems to have an issue; limits pull in math.h - #pragma warning( push ) - #pragma warning( disable: 4985 ) -#endif -#include <limits> /* Need std::numeric_limits */ -#if _MSC_VER==1500 && !defined(__INTEL_COMPILER) - #pragma warning( pop ) -#endif - -//! Commonality inherited by specializations for floating-point types. -template<typename T> -class SparseFloatSet: NoAssign { - const T epsilon; -public: - SparseFloatSet() : epsilon(std::numeric_limits<T>::epsilon()) {} - T get( int i ) const { - return i==0 ? T(0) : 1/T((i&0x7F)+1); - } - bool contains( T x ) const { - if( x==T(0) ) { - return true; - } else { - int j = int(1/x+T(0.5)); - if( 0<j && j<=128 ) { - T error = x*T(j)-T(1); - // In the calculation above, if x was indeed generated by method get, the error should be - // at most epsilon, because x is off by at most 1/2 ulp from its infinitely precise value, - // j is exact, and the multiplication incurs at most another 1/2 ulp of round-off error. - if( -epsilon<=error && error<=epsilon ) { - return true; - } else { - REPORT("Warning: excessive floating-point error encountered j=%d x=%.15g error=%.15g\n",j,x,error); - } - } - return false; - } - }; -}; - -template<> -class SparseValueSet<float>: public SparseFloatSet<float> {}; - -template<> -class SparseValueSet<double>: public SparseFloatSet<double> {}; - -#if __TBB_SCOPED_ENUM_PRESENT -//! Commonality inherited by specializations for scoped enumerator types. -template<typename EnumType> -class SparseEnumValueSet { -public: - EnumType get( int i ) const {return i%3==0 ? EnumType::ScopedRed : i%3==1 ? EnumType::ScopedGreen : EnumType::ScopedBlue;} - bool contains( EnumType e ) const {return e==EnumType::ScopedRed || e==EnumType::ScopedGreen || e==EnumType::ScopedBlue;} -}; -template<> -class SparseValueSet<ScopedColor1> : public SparseEnumValueSet<ScopedColor1> {}; -template<> -class SparseValueSet<ScopedColor2> : public SparseEnumValueSet<ScopedColor2> {}; -#endif - -template<typename T, bool aligned> -class HammerAssignment: AlignedAtomic<T,aligned> { - tbb::atomic<T>& x; - const char* name; - SparseValueSet<T> set; -public: - HammerAssignment(const char* name_ ) : x(HammerAssignment::construct_atomic()), name(name_) { - x = set.get(0); - } - void operator()( int k ) const { - const int n = 1000000; - if( k ) { - tbb::atomic<T> z; - AssertSameType( z=x, z ); // Check that return type from assignment is correct - for( int i=0; i<n; ++i ) { - // Read x atomically into z. - z = x; - if( !set.contains(z) ) { - REPORT("ERROR: assignment of atomic<%s> is not atomic\n", name); - ParallelError = true; - return; - } - } - } else { - tbb::atomic<T> y; - for( int i=0; i<n; ++i ) { - // Get pseudo-random value. - y = set.get(i); - // Write y atomically into x. - x = y; - } - } - } -}; - -// Compile-time check that a class method has the required signature. -// Intended to check the assignment operator of tbb::atomic. -template<typename T> void TestAssignmentSignature( T& (T::*)(const T&) ) {} - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning( disable: 4355 4800 ) -#endif - -template<typename T, bool aligned> -void TestAssignment( const char* name ) { - TestAssignmentSignature( &tbb::atomic<T>::operator= ); - NativeParallelFor( 2, HammerAssignment<T,aligned>(name ) ); -} - -template <typename T, bool aligned, LoadStoreExpression E> -class DekkerArbitrationBody : NoAssign, Harness::NoAfterlife { - typedef LoadStoreTraits<T, E> trait; - - mutable Harness::FastRandom my_rand; - static const unsigned short c_rand_ceil = 10; - mutable AlignedAtomic<T,aligned> s_ready_storage[2]; - mutable AlignedAtomic<T,aligned> s_turn_storage; - mutable tbb::atomic<T>* s_ready[2]; - tbb::atomic<T>& s_turn; - mutable volatile bool s_inside; - -public: - void operator() ( int id ) const { - const int me = id; - const T other = (T)(uintptr_t)(1 - id), - cleared = T(0), - signaled = T(1); - for ( int i = 0; i < 100000; ++i ) { - trait::store( *s_ready[me], signaled ); - trait::store( s_turn, other ); - T r, t; - for ( int j = 0; ; ++j ) { - trait::load(r, *s_ready[(uintptr_t)other]); - trait::load(t, s_turn); - if ( r != signaled || t != other ) - break; - __TBB_Pause(1); - if ( j == 2<<12 ) { - j = 0; - __TBB_Yield(); - } - } - // Entered critical section - ASSERT( !s_inside, "Peterson lock is broken - some fences are missing" ); - s_inside = true; - unsigned short spin = my_rand.get() % c_rand_ceil; - for ( volatile int j = 0; j < spin; ++j ) - continue; - s_inside = false; - ASSERT( !s_inside, "Peterson lock is broken - some fences are missing" ); - // leaving critical section - trait::store( *s_ready[me], cleared ); - spin = my_rand.get() % c_rand_ceil; - for ( volatile int j = 0; j < spin; ++j ) - continue; - } - } - - DekkerArbitrationBody () - : my_rand((unsigned)(uintptr_t)this) - , s_turn(s_turn_storage.construct_atomic()) - , s_inside (false) - { - //atomics pointed to by s_ready and s_turn will be zeroed by the - //according construct_atomic() calls - s_ready[0] = &s_ready_storage[0].construct_atomic(); - s_ready[1] = &s_ready_storage[1].construct_atomic(); - } -}; - -template <typename T, bool aligned, LoadStoreExpression E> -void TestDekkerArbitration () { - NativeParallelFor( 2, DekkerArbitrationBody<T,aligned, E>() ); -} - -template<typename T> -void TestParallel( const char* name ) { - //TODO: looks like there are no tests for operations other than load/store ? -#if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN - if (sizeof(T)==8){ - TestLoadAndStoreFences<T, false, UseOperators>(name); - TestLoadAndStoreFences<T, false, UseImplicitAcqRel>(name); - TestLoadAndStoreFences<T, false, UseExplicitFullyFenced>(name); - TestLoadAndStoreFences<T, false, UseExplicitAcqRel>(name); - TestLoadAndStoreFences<T, false, UseExplicitRelaxed>(name); - TestLoadAndStoreFences<T, false, UseGlobalHelperFullyFenced>(name); - TestLoadAndStoreFences<T, false, UseGlobalHelperAcqRel>(name); - TestLoadAndStoreFences<T, false, UseGlobalHelperRelaxed>(name); - TestAssignment<T,false>(name); - TestDekkerArbitration<T, false, UseExplicitFullyFenced>(); - TestDekkerArbitration<T, false, UseGlobalHelperFullyFenced>(); - } -#endif - - TestLoadAndStoreFences<T, true, UseOperators>(name); - TestLoadAndStoreFences<T, true, UseImplicitAcqRel>(name); - TestLoadAndStoreFences<T, true, UseExplicitFullyFenced>(name); - TestLoadAndStoreFences<T, true, UseExplicitAcqRel>(name); - TestLoadAndStoreFences<T, true, UseExplicitRelaxed>(name); - TestLoadAndStoreFences<T, true, UseGlobalHelperFullyFenced>(name); - TestLoadAndStoreFences<T, true, UseGlobalHelperAcqRel>(name); - TestLoadAndStoreFences<T, true, UseGlobalHelperRelaxed>(name); - TestAssignment<T,true>(name); - TestDekkerArbitration<T, true, UseExplicitFullyFenced>(); - TestDekkerArbitration<T, true, UseGlobalHelperFullyFenced>(); -} - -#endif // __TBB_TEST_SKIP_PIC_MODE || __TBB_TEST_SKIP_BUILTINS_MODE diff --git a/src/tbb/src/test/test_blocked_range.cpp b/src/tbb/src/test/test_blocked_range.cpp deleted file mode 100644 index dd45eccd9..000000000 --- a/src/tbb/src/test/test_blocked_range.cpp +++ /dev/null @@ -1,188 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/blocked_range.h" -#include "harness_assert.h" - -// First test as much as we can without including other headers. -// Doing so should catch problems arising from failing to include headers. - -class AbstractValueType { - AbstractValueType() {} - int value; -public: - friend AbstractValueType MakeAbstractValueType( int i ); - friend int GetValueOf( const AbstractValueType& v ) {return v.value;} -}; - -AbstractValueType MakeAbstractValueType( int i ) { - AbstractValueType x; - x.value = i; - return x; -} - -std::size_t operator-( const AbstractValueType& u, const AbstractValueType& v ) { - return GetValueOf(u) - GetValueOf(v); -} - -bool operator<( const AbstractValueType& u, const AbstractValueType& v ) { - return GetValueOf(u) < GetValueOf(v); -} - -AbstractValueType operator+( const AbstractValueType& u, std::size_t offset ) { - return MakeAbstractValueType(GetValueOf(u) + int(offset)); -} - -static void SerialTest() { - for( int x=-10; x<10; ++x ) - for( int y=-10; y<10; ++y ) { - AbstractValueType i = MakeAbstractValueType(x); - AbstractValueType j = MakeAbstractValueType(y); - for( std::size_t k=1; k<10; ++k ) { - typedef tbb::blocked_range<AbstractValueType> range_type; - range_type r( i, j, k ); - AssertSameType( r.empty(), true ); - AssertSameType( range_type::size_type(), std::size_t() ); - AssertSameType( static_cast<range_type::const_iterator*>(0), static_cast<AbstractValueType*>(0) ); - AssertSameType( r.begin(), MakeAbstractValueType(0) ); - AssertSameType( r.end(), MakeAbstractValueType(0) ); - ASSERT( r.empty()==(y<=x), NULL ); - ASSERT( r.grainsize()==k, NULL ); - if( x<=y ) { - AssertSameType( r.is_divisible(), true ); - ASSERT( r.is_divisible()==(std::size_t(y-x)>k), NULL ); - ASSERT( r.size()==std::size_t(y-x), NULL ); - if( r.is_divisible() ) { - tbb::blocked_range<AbstractValueType> r2(r,tbb::split()); - ASSERT( GetValueOf(r.begin())==x, NULL ); - ASSERT( GetValueOf(r.end())==GetValueOf(r2.begin()), NULL ); - ASSERT( GetValueOf(r2.end())==y, NULL ); - ASSERT( r.grainsize()==k, NULL ); - ASSERT( r2.grainsize()==k, NULL ); - } - } - } - } -} - -#include "tbb/parallel_for.h" -#include "harness.h" - -const int N = 1<<22; - -unsigned char Array[N]; - -struct Striker { - // Note: we use <int> here instead of <long> in order to test for Quad 407676 - void operator()( const tbb::blocked_range<int>& r ) const { - for( tbb::blocked_range<int>::const_iterator i=r.begin(); i!=r.end(); ++i ) - ++Array[i]; - } -}; - -void ParallelTest() { - for( int i=0; i<N; i=i<3 ? i+1 : i*3 ) { - const tbb::blocked_range<int> r( 0, i, 10 ); - tbb::parallel_for( r, Striker() ); - for( int k=0; k<N; ++k ) { - ASSERT( Array[k]==(k<i), NULL ); - Array[k] = 0; - } - } -} - -#if __TBB_RANGE_BASED_FOR_PRESENT -#include "test_range_based_for.h" -#include <functional> -void TestRangeBasedFor() { - using namespace range_based_for_support_tests; - REMARK("testing range based for loop compatibility \n"); - - size_t int_array[100] = {0}; - const size_t sequence_length = Harness::array_length(int_array); - - for (size_t i = 0; i < sequence_length; ++i) { - int_array[i] = i + 1; - } - - const tbb::blocked_range<size_t*> r(int_array, Harness::end(int_array), 1); - - ASSERT(range_based_for_accumulate<size_t>(r, std::plus<size_t>(), size_t(0)) == gauss_summ_of_int_sequence(sequence_length), "incorrect accumulated value generated via range based for ?"); -} -#endif //if __TBB_RANGE_BASED_FOR_PRESENT - -#if __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES - -void TestProportionalSplitOverflow() -{ - REMARK("Testing overflow during proportional split - "); - using tbb::blocked_range; - using tbb::proportional_split; - - blocked_range<size_t> r1(0, size_t(-1) / 2); - size_t size = r1.size(); - size_t begin = r1.begin(); - size_t end = r1.end(); - - proportional_split p(1, 3); - blocked_range<size_t> r2(r1, p); - - // overflow-free computation - size_t parts = p.left() + p.right(); - size_t int_part = size / parts; - size_t fraction = size - int_part * parts; // fraction < parts - size_t right_idx = int_part * p.right() + fraction * p.right() / parts + 1; - size_t newRangeBegin = end - right_idx; - - // Division in 'right_idx' very likely is inexact also. - size_t tolerance = 1; - size_t diff = (r2.begin() < newRangeBegin) ? (newRangeBegin - r2.begin()) : (r2.begin() - newRangeBegin); - bool is_split_correct = diff <= tolerance; - bool test_passed = (r1.begin() == begin && r1.end() == r2.begin() && is_split_correct && - r2.end() == end); - if (!test_passed) { - REPORT("Incorrect split of blocked range[%lu, %lu) into r1[%lu, %lu) and r2[%lu, %lu), " - "must be r1[%lu, %lu) and r2[%lu, %lu)\n", begin, end, r1.begin(), r1.end(), r2.begin(), r2.end(), begin, newRangeBegin, newRangeBegin, end); - ASSERT(test_passed, NULL); - } - REMARK("OK\n"); -} -#endif /* __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES */ -//------------------------------------------------------------------------ -// Test driver -#include "tbb/task_scheduler_init.h" - -int TestMain () { - SerialTest(); - for( int p=MinThread; p<=MaxThread; ++p ) { - tbb::task_scheduler_init init(p); - ParallelTest(); - } - - #if __TBB_RANGE_BASED_FOR_PRESENT - TestRangeBasedFor(); - #endif //if __TBB_RANGE_BASED_FOR_PRESENT - - #if __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES - TestProportionalSplitOverflow(); - #endif - - return Harness::Done; -} diff --git a/src/tbb/src/test/test_blocked_range2d.cpp b/src/tbb/src/test/test_blocked_range2d.cpp deleted file mode 100644 index b6ff616dc..000000000 --- a/src/tbb/src/test/test_blocked_range2d.cpp +++ /dev/null @@ -1,148 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/blocked_range2d.h" -#include "harness_assert.h" - -// First test as much as we can without including other headers. -// Doing so should catch problems arising from failing to include headers. - -template<typename Tag> -class AbstractValueType { - AbstractValueType() {} - int value; -public: - template<typename OtherTag> - friend AbstractValueType<OtherTag> MakeAbstractValueType( int i ); - - template<typename OtherTag> - friend int GetValueOf( const AbstractValueType<OtherTag>& v ) ; -}; - -template<typename Tag> -AbstractValueType<Tag> MakeAbstractValueType( int i ) { - AbstractValueType<Tag> x; - x.value = i; - return x; -} - -template<typename Tag> -int GetValueOf( const AbstractValueType<Tag>& v ) {return v.value;} - -template<typename Tag> -bool operator<( const AbstractValueType<Tag>& u, const AbstractValueType<Tag>& v ) { - return GetValueOf(u)<GetValueOf(v); -} - -template<typename Tag> -std::size_t operator-( const AbstractValueType<Tag>& u, const AbstractValueType<Tag>& v ) { - return GetValueOf(u)-GetValueOf(v); -} - -template<typename Tag> -AbstractValueType<Tag> operator+( const AbstractValueType<Tag>& u, std::size_t offset ) { - return MakeAbstractValueType<Tag>(GetValueOf(u)+int(offset)); -} - -struct RowTag {}; -struct ColTag {}; - -static void SerialTest() { - typedef AbstractValueType<RowTag> row_type; - typedef AbstractValueType<ColTag> col_type; - typedef tbb::blocked_range2d<row_type,col_type> range_type; - for( int rowx=-10; rowx<10; ++rowx ) { - for( int rowy=rowx; rowy<10; ++rowy ) { - row_type rowi = MakeAbstractValueType<RowTag>(rowx); - row_type rowj = MakeAbstractValueType<RowTag>(rowy); - for( int rowg=1; rowg<10; ++rowg ) { - for( int colx=-10; colx<10; ++colx ) { - for( int coly=colx; coly<10; ++coly ) { - col_type coli = MakeAbstractValueType<ColTag>(colx); - col_type colj = MakeAbstractValueType<ColTag>(coly); - for( int colg=1; colg<10; ++colg ) { - range_type r( rowi, rowj, rowg, coli, colj, colg ); - AssertSameType( r.is_divisible(), true ); - AssertSameType( r.empty(), true ); - AssertSameType( static_cast<range_type::row_range_type::const_iterator*>(0), static_cast<row_type*>(0) ); - AssertSameType( static_cast<range_type::col_range_type::const_iterator*>(0), static_cast<col_type*>(0) ); - AssertSameType( r.rows(), tbb::blocked_range<row_type>( rowi, rowj, 1 )); - AssertSameType( r.cols(), tbb::blocked_range<col_type>( coli, colj, 1 )); - ASSERT( r.empty()==(rowx==rowy||colx==coly), NULL ); - ASSERT( r.is_divisible()==(rowy-rowx>rowg||coly-colx>colg), NULL ); - if( r.is_divisible() ) { - range_type r2(r,tbb::split()); - if( GetValueOf(r2.rows().begin())==GetValueOf(r.rows().begin()) ) { - ASSERT( GetValueOf(r2.rows().end())==GetValueOf(r.rows().end()), NULL ); - ASSERT( GetValueOf(r2.cols().begin())==GetValueOf(r.cols().end()), NULL ); - } else { - ASSERT( GetValueOf(r2.cols().end())==GetValueOf(r.cols().end()), NULL ); - ASSERT( GetValueOf(r2.rows().begin())==GetValueOf(r.rows().end()), NULL ); - } - } - } - } - } - } - } - } -} - -#include "tbb/parallel_for.h" -#include "harness.h" - -const int N = 1<<10; - -unsigned char Array[N][N]; - -struct Striker { - // Note: we use <int> here instead of <long> in order to test for problems similar to Quad 407676 - void operator()( const tbb::blocked_range2d<int>& r ) const { - for( tbb::blocked_range<int>::const_iterator i=r.rows().begin(); i!=r.rows().end(); ++i ) - for( tbb::blocked_range<int>::const_iterator j=r.cols().begin(); j!=r.cols().end(); ++j ) - ++Array[i][j]; - } -}; - -void ParallelTest() { - for( int i=0; i<N; i=i<3 ? i+1 : i*3 ) { - for( int j=0; j<N; j=j<3 ? j+1 : j*3 ) { - const tbb::blocked_range2d<int> r( 0, i, 7, 0, j, 5 ); - tbb::parallel_for( r, Striker() ); - for( int k=0; k<N; ++k ) { - for( int l=0; l<N; ++l ) { - ASSERT( Array[k][l]==(k<i && l<j), NULL ); - Array[k][l] = 0; - } - } - } - } -} - -#include "tbb/task_scheduler_init.h" - -int TestMain () { - SerialTest(); - for( int p=MinThread; p<=MaxThread; ++p ) { - tbb::task_scheduler_init init(p); - ParallelTest(); - } - return Harness::Done; -} diff --git a/src/tbb/src/test/test_blocked_range3d.cpp b/src/tbb/src/test/test_blocked_range3d.cpp deleted file mode 100644 index 08ea70ae2..000000000 --- a/src/tbb/src/test/test_blocked_range3d.cpp +++ /dev/null @@ -1,179 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/blocked_range3d.h" -#include "harness_assert.h" - -// First test as much as we can without including other headers. -// Doing so should catch problems arising from failing to include headers. - -template<typename Tag> -class AbstractValueType { - AbstractValueType() {} - int value; -public: - template<typename OtherTag> - friend AbstractValueType<OtherTag> MakeAbstractValueType( int i ); - - template<typename OtherTag> - friend int GetValueOf( const AbstractValueType<OtherTag>& v ) ; -}; - -template<typename Tag> -AbstractValueType<Tag> MakeAbstractValueType( int i ) { - AbstractValueType<Tag> x; - x.value = i; - return x; -} - -template<typename Tag> -int GetValueOf( const AbstractValueType<Tag>& v ) {return v.value;} - -template<typename Tag> -bool operator<( const AbstractValueType<Tag>& u, const AbstractValueType<Tag>& v ) { - return GetValueOf(u)<GetValueOf(v); -} - -template<typename Tag> -std::size_t operator-( const AbstractValueType<Tag>& u, const AbstractValueType<Tag>& v ) { - return GetValueOf(u)-GetValueOf(v); -} - -template<typename Tag> -AbstractValueType<Tag> operator+( const AbstractValueType<Tag>& u, std::size_t offset ) { - return MakeAbstractValueType<Tag>(GetValueOf(u)+int(offset)); -} - -struct PageTag {}; -struct RowTag {}; -struct ColTag {}; - -static void SerialTest() { - typedef AbstractValueType<PageTag> page_type; - typedef AbstractValueType<RowTag> row_type; - typedef AbstractValueType<ColTag> col_type; - typedef tbb::blocked_range3d<page_type,row_type,col_type> range_type; - for( int pagex=-4; pagex<4; ++pagex ) { - for( int pagey=pagex; pagey<4; ++pagey ) { - page_type pagei = MakeAbstractValueType<PageTag>(pagex); - page_type pagej = MakeAbstractValueType<PageTag>(pagey); - for( int pageg=1; pageg<4; ++pageg ) { - for( int rowx=-4; rowx<4; ++rowx ) { - for( int rowy=rowx; rowy<4; ++rowy ) { - row_type rowi = MakeAbstractValueType<RowTag>(rowx); - row_type rowj = MakeAbstractValueType<RowTag>(rowy); - for( int rowg=1; rowg<4; ++rowg ) { - for( int colx=-4; colx<4; ++colx ) { - for( int coly=colx; coly<4; ++coly ) { - col_type coli = MakeAbstractValueType<ColTag>(colx); - col_type colj = MakeAbstractValueType<ColTag>(coly); - for( int colg=1; colg<4; ++colg ) { - range_type r( pagei, pagej, pageg, rowi, rowj, rowg, coli, colj, colg ); - AssertSameType( r.is_divisible(), true ); - - AssertSameType( r.empty(), true ); - - AssertSameType( static_cast<range_type::page_range_type::const_iterator*>(0), static_cast<page_type*>(0) ); - AssertSameType( static_cast<range_type::row_range_type::const_iterator*>(0), static_cast<row_type*>(0) ); - AssertSameType( static_cast<range_type::col_range_type::const_iterator*>(0), static_cast<col_type*>(0) ); - - AssertSameType( r.pages(), tbb::blocked_range<page_type>( pagei, pagej, 1 )); - AssertSameType( r.rows(), tbb::blocked_range<row_type>( rowi, rowj, 1 )); - AssertSameType( r.cols(), tbb::blocked_range<col_type>( coli, colj, 1 )); - - ASSERT( r.empty()==(pagex==pagey||rowx==rowy||colx==coly), NULL ); - - ASSERT( r.is_divisible()==(pagey-pagex>pageg||rowy-rowx>rowg||coly-colx>colg), NULL ); - - if( r.is_divisible() ) { - range_type r2(r,tbb::split()); - if( (GetValueOf(r2.pages().begin())==GetValueOf(r.pages().begin())) && (GetValueOf(r2.rows().begin())==GetValueOf(r.rows().begin())) ) { - ASSERT( GetValueOf(r2.pages().end())==GetValueOf(r.pages().end()), NULL ); - ASSERT( GetValueOf(r2.rows().end())==GetValueOf(r.rows().end()), NULL ); - ASSERT( GetValueOf(r2.cols().begin())==GetValueOf(r.cols().end()), NULL ); - } else { - if ( (GetValueOf(r2.pages().begin())==GetValueOf(r.pages().begin())) && (GetValueOf(r2.cols().begin())==GetValueOf(r.cols().begin())) ) { - ASSERT( GetValueOf(r2.pages().end())==GetValueOf(r.pages().end()), NULL ); - ASSERT( GetValueOf(r2.cols().end())==GetValueOf(r.cols().end()), NULL ); - ASSERT( GetValueOf(r2.rows().begin())==GetValueOf(r.rows().end()), NULL ); - } else { - ASSERT( GetValueOf(r2.rows().end())==GetValueOf(r.rows().end()), NULL ); - ASSERT( GetValueOf(r2.cols().end())==GetValueOf(r.cols().end()), NULL ); - ASSERT( GetValueOf(r2.pages().begin())==GetValueOf(r.pages().end()), NULL ); - } - } - } - } - } - } - } - } - } - } - } - } -} - -#include "tbb/parallel_for.h" -#include "harness.h" - -const int N = 1<<5; - -unsigned char Array[N][N][N]; - -struct Striker { - // Note: we use <int> here instead of <long> in order to test for problems similar to Quad 407676 - void operator()( const tbb::blocked_range3d<int>& r ) const { - for( tbb::blocked_range<int>::const_iterator i=r.pages().begin(); i!=r.pages().end(); ++i ) - for( tbb::blocked_range<int>::const_iterator j=r.rows().begin(); j!=r.rows().end(); ++j ) - for( tbb::blocked_range<int>::const_iterator k=r.cols().begin(); k!=r.cols().end(); ++k ) - ++Array[i][j][k]; - } -}; - -void ParallelTest() { - for( int i=0; i<N; i=i<3 ? i+1 : i*3 ) { - for( int j=0; j<N; j=j<3 ? j+1 : j*3 ) { - for( int k=0; k<N; k=k<3 ? k+1 : k*3 ) { - const tbb::blocked_range3d<int> r( 0, i, 5, 0, j, 3, 0, k, 1 ); - tbb::parallel_for( r, Striker() ); - for( int l=0; l<N; ++l ) { - for( int m=0; m<N; ++m ) { - for( int n=0; n<N; ++n ) { - ASSERT( Array[l][m][n]==(l<i && m<j && n<k), NULL ); - Array[l][m][n] = 0; - } - } - } - } - } - } -} - -#include "tbb/task_scheduler_init.h" - -int TestMain () { - SerialTest(); - for( int p=MinThread; p<=MaxThread; ++p ) { - tbb::task_scheduler_init init(p); - ParallelTest(); - } - return Harness::Done; -} diff --git a/src/tbb/src/test/test_broadcast_node.cpp b/src/tbb/src/test/test_broadcast_node.cpp deleted file mode 100644 index fae0c39b4..000000000 --- a/src/tbb/src/test/test_broadcast_node.cpp +++ /dev/null @@ -1,340 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness.h" -#include "tbb/flow_graph.h" -#include "tbb/task.h" - -#include "tbb/atomic.h" - -const int N = 1000; -const int R = 4; - -class int_convertable_type : private NoAssign { - - int my_value; - -public: - - int_convertable_type( int v ) : my_value(v) {} - operator int() const { return my_value; } - -}; - - -template< typename T > -class counting_array_receiver : public tbb::flow::receiver<T> { - - tbb::atomic<size_t> my_counters[N]; - -public: - - counting_array_receiver() { - for (int i = 0; i < N; ++i ) - my_counters[i] = 0; - } - - size_t operator[]( int i ) { - size_t v = my_counters[i]; - return v; - } - - /* override */ tbb::task * try_put_task( const T &v ) { - ++my_counters[(int)v]; - return const_cast<tbb::task *>(tbb::flow::interface7::SUCCESSFULLY_ENQUEUED); - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void internal_add_built_predecessor(tbb::flow::sender<T> &) {} - /*override*/void internal_delete_built_predecessor(tbb::flow::sender<T> &) {} - /*override*/void copy_predecessors(std::vector<tbb::flow::sender<T>*> &) {} - /*override*/size_t predecessor_count() { return 0; } - /*override*/void reset_receiver(tbb::flow::reset_flags /*f*/) { } -#else - /*override*/void reset_receiver() { } -#endif - -}; - -template< typename T > -void test_serial_broadcasts() { - - tbb::flow::graph g; - tbb::flow::broadcast_node<T> b(g); - - for ( int num_receivers = 1; num_receivers < R; ++num_receivers ) { - counting_array_receiver<T> *receivers = new counting_array_receiver<T>[num_receivers]; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - ASSERT(b.successor_count() == 0, NULL); - ASSERT(b.predecessor_count() == 0, NULL); - typename tbb::flow::broadcast_node<T>::successor_vector_type my_succs; - b.copy_successors(my_succs); - ASSERT(my_succs.size() == 0, NULL); - typename tbb::flow::broadcast_node<T>::predecessor_vector_type my_preds; - b.copy_predecessors(my_preds); - ASSERT(my_preds.size() == 0, NULL); -#endif - - for ( int r = 0; r < num_receivers; ++r ) { - tbb::flow::make_edge( b, receivers[r] ); - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - ASSERT( b.successor_count() == (size_t)num_receivers, NULL); -#endif - - for (int n = 0; n < N; ++n ) { - ASSERT( b.try_put( (T)n ), NULL ); - } - - for ( int r = 0; r < num_receivers; ++r ) { - for (int n = 0; n < N; ++n ) { - ASSERT( receivers[r][n] == 1, NULL ); - } - tbb::flow::remove_edge( b, receivers[r] ); - } - ASSERT( b.try_put( (T)0 ), NULL ); - for ( int r = 0; r < num_receivers; ++r ) - ASSERT( receivers[0][0] == 1, NULL ) ; - - delete [] receivers; - - } - -} - -template< typename T > -class native_body : private NoAssign { - - tbb::flow::broadcast_node<T> &my_b; - -public: - - native_body( tbb::flow::broadcast_node<T> &b ) : my_b(b) {} - - void operator()(int) const { - for (int n = 0; n < N; ++n ) { - ASSERT( my_b.try_put( (T)n ), NULL ); - } - } - -}; - -template< typename T > -void run_parallel_broadcasts(int p, tbb::flow::broadcast_node<T>& b) { - for ( int num_receivers = 1; num_receivers < R; ++num_receivers ) { - counting_array_receiver<T> *receivers = new counting_array_receiver<T>[num_receivers]; - - for ( int r = 0; r < num_receivers; ++r ) { - tbb::flow::make_edge( b, receivers[r] ); - } - - NativeParallelFor( p, native_body<T>( b ) ); - - for ( int r = 0; r < num_receivers; ++r ) { - for (int n = 0; n < N; ++n ) { - ASSERT( (int)receivers[r][n] == p, NULL ); - } - tbb::flow::remove_edge( b, receivers[r] ); - } - ASSERT( b.try_put( (T)0 ), NULL ); - for ( int r = 0; r < num_receivers; ++r ) - ASSERT( (int)receivers[r][0] == p, NULL ) ; - - delete [] receivers; - - } -} - -template< typename T > -void test_parallel_broadcasts(int p) { - - tbb::flow::graph g; - tbb::flow::broadcast_node<T> b(g); - run_parallel_broadcasts(p, b); - - // test copy constructor - tbb::flow::broadcast_node<T> b_copy(b); - run_parallel_broadcasts(p, b_copy); -} - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES -// broadcast_node does not allow successors to try_get from it (it does not allow -// the flow edge to switch) so we only need test the forward direction. -template<typename T> -void test_resets() { - tbb::flow::graph g; - tbb::flow::broadcast_node<T> b0(g); - tbb::flow::broadcast_node<T> b1(g); - tbb::flow::queue_node<T> q0(g); - tbb::flow::make_edge(b0,b1); - tbb::flow::make_edge(b1,q0); - T j; - - // test standard reset - for(int testNo = 0; testNo < 2; ++testNo) { - for(T i= 0; i <= 3; i += 1) { - b0.try_put(i); - } - g.wait_for_all(); - for(T i= 0; i <= 3; i += 1) { - ASSERT(q0.try_get(j) && j == i, "Bad value in queue"); - } - ASSERT(!q0.try_get(j), "extra value in queue"); - - // reset the graph. It should work as before. - if (testNo == 0) g.reset(); - } - - g.reset(tbb::flow::rf_extract); - for(T i= 0; i <= 3; i += 1) { - b0.try_put(i); - } - g.wait_for_all(); - ASSERT(!q0.try_get(j), "edge between nodes not removed"); - for(T i= 0; i <= 3; i += 1) { - b1.try_put(i); - } - g.wait_for_all(); - ASSERT(!q0.try_get(j), "edge between nodes not removed"); -} - -void test_extract() { - int dont_care; - tbb::flow::graph g; - tbb::flow::broadcast_node<int> b0(g); - tbb::flow::broadcast_node<int> b1(g); - tbb::flow::broadcast_node<int> b2(g); - tbb::flow::broadcast_node<int> b3(g); - tbb::flow::broadcast_node<int> b4(g); - tbb::flow::broadcast_node<int> b5(g); - tbb::flow::queue_node<int> q0(g); - tbb::flow::make_edge(b0,b1); - tbb::flow::make_edge(b0,b2); - tbb::flow::make_edge(b1,b3); - tbb::flow::make_edge(b1,b4); - tbb::flow::make_edge(b2,b4); - tbb::flow::make_edge(b2,b5); - tbb::flow::make_edge(b3,q0); - tbb::flow::make_edge(b4,q0); - tbb::flow::make_edge(b5,q0); - - /* b3 */ - /* / \ */ - /* b1 \ */ - /* / \ \ */ - /* b0 b4---q0 */ - /* \ / / */ - /* b2 / */ - /* \ / */ - /* b5 */ - - g.wait_for_all(); - b0.try_put(1); - g.wait_for_all(); - for( int i = 0; i < 4; ++i ) { - int j; - ASSERT(q0.try_get(j) && j == 1, "missing or incorrect message"); - } - ASSERT(!q0.try_get(dont_care), "extra message in queue"); - ASSERT(b0.predecessor_count() == 0 && b0.successor_count() == 2, "improper count for b0"); - ASSERT(b1.predecessor_count() == 1 && b1.successor_count() == 2, "improper count for b1"); - ASSERT(b2.predecessor_count() == 1 && b2.successor_count() == 2, "improper count for b2"); - ASSERT(b3.predecessor_count() == 1 && b3.successor_count() == 1, "improper count for b3"); - ASSERT(b4.predecessor_count() == 2 && b4.successor_count() == 1, "improper count before extract of b4"); - ASSERT(b5.predecessor_count() == 1 && b5.successor_count() == 1, "improper count for b5"); - b4.extract(); // remove from tree of nodes. - ASSERT(b0.predecessor_count() == 0 && b0.successor_count() == 2, "improper count for b0 after"); - ASSERT(b1.predecessor_count() == 1 && b1.successor_count() == 1, "improper succ count for b1 after"); - ASSERT(b2.predecessor_count() == 1 && b2.successor_count() == 1, "improper succ count for b2 after"); - ASSERT(b3.predecessor_count() == 1 && b3.successor_count() == 1, "improper succ count for b3 after"); - ASSERT(b4.predecessor_count() == 0 && b4.successor_count() == 0, "improper succ count after extract"); - ASSERT(b5.predecessor_count() == 1 && b5.successor_count() == 1, "improper succ count for b5 after"); - - /* b3 */ - /* / \ */ - /* b1 \ */ - /* / \ */ - /* b0 q0 */ - /* \ / */ - /* b2 / */ - /* \ / */ - /* b5 */ - - b0.try_put(1); - g.wait_for_all(); - for( int i = 0; i < 2; ++i ) { - int j; - ASSERT(q0.try_get(j) && j == 1, "missing or incorrect message"); - } - ASSERT(!q0.try_get(dont_care), "extra message in queue"); - tbb::flow::make_edge(b0,b4); - tbb::flow::make_edge(b4,q0); - g.wait_for_all(); - ASSERT(b0.predecessor_count() == 0 && b0.successor_count() == 3, "improper count for b0 after"); - ASSERT(b1.predecessor_count() == 1 && b1.successor_count() == 1, "improper succ count for b1 after"); - ASSERT(b2.predecessor_count() == 1 && b2.successor_count() == 1, "improper succ count for b2 after"); - ASSERT(b3.predecessor_count() == 1 && b3.successor_count() == 1, "improper succ count for b3 after"); - ASSERT(b4.predecessor_count() == 1 && b4.successor_count() == 1, "improper succ count after extract"); - ASSERT(b5.predecessor_count() == 1 && b5.successor_count() == 1, "improper succ count for b5 after"); - - /* b3 */ - /* / \ */ - /* b1 \ */ - /* / \ */ - /* b0---b4---q0 */ - /* \ / */ - /* b2 / */ - /* \ / */ - /* b5 */ - - b0.try_put(1); - g.wait_for_all(); - for( int i = 0; i < 3; ++i ) { - int j; - ASSERT(q0.try_get(j) && j == 1, "missing or incorrect message"); - } - ASSERT(!q0.try_get(dont_care), "extra message in queue"); -} -#endif // TBB_PREVIEW_FLOW_GRAPH_FEATURES - -int TestMain() { - if( MinThread<1 ) { - REPORT("number of threads must be positive\n"); - exit(1); - } - - test_serial_broadcasts<int>(); - test_serial_broadcasts<float>(); - test_serial_broadcasts<int_convertable_type>(); - - for( int p=MinThread; p<=MaxThread; ++p ) { - test_parallel_broadcasts<int>(p); - test_parallel_broadcasts<float>(p); - test_parallel_broadcasts<int_convertable_type>(p); - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - test_resets<int>(); - test_resets<float>(); - test_extract(); -#endif - - return Harness::Done; -} diff --git a/src/tbb/src/test/test_buffer_node.cpp b/src/tbb/src/test/test_buffer_node.cpp deleted file mode 100644 index 5e526c4f1..000000000 --- a/src/tbb/src/test/test_buffer_node.cpp +++ /dev/null @@ -1,444 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness.h" -#include "tbb/flow_graph.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/tick_count.h" -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES -#include "harness_graph.h" -#include <vector> -#include <algorithm> -#endif - -#define N 1000 -#define C 10 - -template< typename T > -void spin_try_get( tbb::flow::buffer_node<T> &b, T &value ) { - while ( b.try_get(value) != true ) {} -} - -template< typename T > -void check_item( T* count_value, T &value ) { - count_value[value / N] += value % N; -} - -template< typename T > -struct parallel_puts : NoAssign { - - tbb::flow::buffer_node<T> &my_b; - - parallel_puts( tbb::flow::buffer_node<T> &b ) : my_b(b) {} - - void operator()(int i) const { - for (int j = 0; j < N; ++j) { - bool msg = my_b.try_put( T(N*i + j) ); - ASSERT( msg == true, NULL ); - } - } -}; - -template< typename T > -struct touches { - - bool **my_touches; - int my_num_threads; - - touches( int num_threads ) : my_num_threads(num_threads) { - my_touches = new bool* [my_num_threads]; - for ( int p = 0; p < my_num_threads; ++p) { - my_touches[p] = new bool[N]; - for ( int n = 0; n < N; ++n) - my_touches[p][n] = false; - } - } - - ~touches() { - for ( int p = 0; p < my_num_threads; ++p) { - delete [] my_touches[p]; - } - delete [] my_touches; - } - - bool check( T v ) { - ASSERT ( my_touches[v/N][v%N] == false, NULL); - my_touches[v/N][v%N] = true; - return true; - } - - bool validate_touches() { - for ( int p = 0; p < my_num_threads; ++p) { - for ( int n = 0; n < N; ++n) { - ASSERT ( my_touches[p][n] == true, NULL); - } - } - return true; - } -}; - -template< typename T > -struct parallel_gets : NoAssign { - - tbb::flow::buffer_node<T> &my_b; - touches<T> &my_touches; - - parallel_gets( tbb::flow::buffer_node<T> &b, touches<T> &t) : my_b(b), my_touches(t) {} - - void operator()(int) const { - for (int j = 0; j < N; ++j) { - T v; - spin_try_get( my_b, v ); - my_touches.check( v ); - } - } - -}; - -template< typename T > -struct parallel_put_get : NoAssign { - - tbb::flow::buffer_node<T> &my_b; - touches<T> &my_touches; - - parallel_put_get( tbb::flow::buffer_node<T> &b, touches<T> &t ) : my_b(b), my_touches(t) {} - - void operator()(int tid) const { - - for ( int i = 0; i < N; i+=C ) { - int j_end = ( N < i + C ) ? N : i + C; - // dump about C values into the buffer - for ( int j = i; j < j_end; ++j ) { - ASSERT( my_b.try_put( T (N*tid + j ) ) == true, NULL ); - } - // receiver about C values from the buffer - for ( int j = i; j < j_end; ++j ) { - T v; - spin_try_get( my_b, v ); - my_touches.check( v ); - } - } - } - -}; - -// -// Tests -// -// Item can be reserved, released, consumed ( single serial receiver ) -// -template< typename T > -int test_reservation() { - tbb::flow::graph g; - T bogus_value(-1); - - // Simple tests - tbb::flow::buffer_node<T> b(g); - - b.try_put(T(1)); - b.try_put(T(2)); - b.try_put(T(3)); - - T v, vsum; - ASSERT( b.try_reserve(v) == true, NULL ); - ASSERT( b.try_release() == true, NULL ); - v = bogus_value; - g.wait_for_all(); - ASSERT( b.try_reserve(v) == true, NULL ); - ASSERT( b.try_consume() == true, NULL ); - vsum += v; - v = bogus_value; - g.wait_for_all(); - - ASSERT( b.try_get(v) == true, NULL ); - vsum += v; - v = bogus_value; - g.wait_for_all(); - - ASSERT( b.try_reserve(v) == true, NULL ); - ASSERT( b.try_release() == true, NULL ); - v = bogus_value; - g.wait_for_all(); - ASSERT( b.try_reserve(v) == true, NULL ); - ASSERT( b.try_consume() == true, NULL ); - vsum += v; - ASSERT( vsum == T(6), NULL); - v = bogus_value; - g.wait_for_all(); - - return 0; -} - -// -// Tests -// -// multilpe parallel senders, items in arbitrary order -// multilpe parallel senders, multiple parallel receivers, items in arbitrary order and all items received -// * overlapped puts / gets -// * all puts finished before any getS -// -template< typename T > -int test_parallel(int num_threads) { - tbb::flow::graph g; - tbb::flow::buffer_node<T> b(g); - tbb::flow::buffer_node<T> b2(g); - tbb::flow::buffer_node<T> b3(g); - T bogus_value(-1); - T j = bogus_value; - - NativeParallelFor( num_threads, parallel_puts<T>(b) ); - - T *next_value = new T[num_threads]; - for (int tid = 0; tid < num_threads; ++tid) next_value[tid] = T(0); - - for (int i = 0; i < num_threads * N; ++i ) { - spin_try_get( b, j ); - check_item( next_value, j ); - j = bogus_value; - } - for (int tid = 0; tid < num_threads; ++tid) { - ASSERT( next_value[tid] == T((N*(N-1))/2), NULL ); - } - - j = bogus_value; - g.wait_for_all(); - ASSERT( b.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - NativeParallelFor( num_threads, parallel_puts<T>(b) ); - - { - touches< T > t( num_threads ); - NativeParallelFor( num_threads, parallel_gets<T>(b, t) ); - g.wait_for_all(); - ASSERT( t.validate_touches(), NULL ); - } - j = bogus_value; - ASSERT( b.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - g.wait_for_all(); - { - touches< T > t( num_threads ); - NativeParallelFor( num_threads, parallel_put_get<T>(b, t) ); - g.wait_for_all(); - ASSERT( t.validate_touches(), NULL ); - } - j = bogus_value; - ASSERT( b.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - tbb::flow::make_edge( b, b2 ); - tbb::flow::make_edge( b2, b3 ); - - NativeParallelFor( num_threads, parallel_puts<T>(b) ); - { - touches< T > t( num_threads ); - NativeParallelFor( num_threads, parallel_gets<T>(b3, t) ); - g.wait_for_all(); - ASSERT( t.validate_touches(), NULL ); - } - j = bogus_value; - g.wait_for_all(); - ASSERT( b.try_get( j ) == false, NULL ); - g.wait_for_all(); - ASSERT( b2.try_get( j ) == false, NULL ); - g.wait_for_all(); - ASSERT( b3.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - // test copy constructor - ASSERT( b.remove_successor( b2 ), NULL ); - // fill up b: - NativeParallelFor( num_threads, parallel_puts<T>(b) ); - // copy b: - tbb::flow::buffer_node<T> b_copy(b); - - // b_copy should be empty - j = bogus_value; - g.wait_for_all(); - ASSERT( b_copy.try_get( j ) == false, NULL ); - - // hook them together: - ASSERT( b.register_successor(b_copy) == true, NULL ); - // try to get content from b_copy - { - touches< T > t( num_threads ); - NativeParallelFor( num_threads, parallel_gets<T>(b_copy, t) ); - g.wait_for_all(); - ASSERT( t.validate_touches(), NULL ); - } - // now both should be empty - j = bogus_value; - g.wait_for_all(); - ASSERT( b.try_get( j ) == false, NULL ); - g.wait_for_all(); - ASSERT( b_copy.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - return 0; -} - -// -// Tests -// -// Predecessors cannot be registered -// Empty buffer rejects item requests -// Single serial sender, items in arbitrary order -// Chained buffers ( 2 & 3 ), single sender, items at last buffer in arbitrary order -// - -template< typename T > -int test_serial() { - tbb::flow::graph g; - T bogus_value(-1); - - tbb::flow::buffer_node<T> b(g); - tbb::flow::buffer_node<T> b2(g); - T j = bogus_value; - - // - // Rejects attempts to add / remove predecessor - // Rejects request from empty buffer - // - ASSERT( b.register_predecessor( b2 ) == false, NULL ); - ASSERT( b.remove_predecessor( b2 ) == false, NULL ); - ASSERT( b.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - // - // Simple puts and gets - // - - for (int i = 0; i < N; ++i) { - bool msg = b.try_put( T(i) ); - ASSERT( msg == true, NULL ); - } - - T vsum = T(0); - for (int i = 0; i < N; ++i) { - j = bogus_value; - spin_try_get( b, j ); - vsum += j; - } - ASSERT( vsum == (N*(N-1))/2, NULL); - j = bogus_value; - g.wait_for_all(); - ASSERT( b.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - tbb::flow::make_edge(b, b2); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - ASSERT( b.successor_count() == 1, NULL); - ASSERT( b.predecessor_count() == 0, NULL); - ASSERT( b2.successor_count() == 0, NULL); - ASSERT( b2.predecessor_count() == 1, NULL); - typename tbb::flow::buffer_node<T>::successor_vector_type my_succs; - b.copy_successors(my_succs); - ASSERT(my_succs.size() == 1, NULL); - typename tbb::flow::buffer_node<T>::predecessor_vector_type my_preds; - b.copy_predecessors(my_preds); - ASSERT(my_preds.size() == 0, NULL); -#endif - - vsum = T(0); - for (int i = 0; i < N; ++i) { - bool msg = b.try_put( T(i) ); - ASSERT( msg == true, NULL ); - } - - for (int i = 0; i < N; ++i) { - j = bogus_value; - spin_try_get( b2, j ); - vsum += j; - } - ASSERT( vsum == (N*(N-1))/2, NULL); - j = bogus_value; - g.wait_for_all(); - ASSERT( b.try_get( j ) == false, NULL ); - g.wait_for_all(); - ASSERT( b2.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - tbb::flow::remove_edge(b, b2); - ASSERT( b.try_put( 1 ) == true, NULL ); - g.wait_for_all(); - ASSERT( b2.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - g.wait_for_all(); - ASSERT( b.try_get( j ) == true, NULL ); - ASSERT( j == 1, NULL ); - - tbb::flow::buffer_node<T> b3(g); - tbb::flow::make_edge( b, b2 ); - tbb::flow::make_edge( b2, b3 ); - - vsum = T(0); - for (int i = 0; i < N; ++i) { - bool msg = b.try_put( T(i) ); - ASSERT( msg == true, NULL ); - } - - for (int i = 0; i < N; ++i) { - j = bogus_value; - spin_try_get( b3, j ); - vsum += j; - } - ASSERT( vsum == (N*(N-1))/2, NULL); - j = bogus_value; - g.wait_for_all(); - ASSERT( b.try_get( j ) == false, NULL ); - g.wait_for_all(); - ASSERT( b2.try_get( j ) == false, NULL ); - g.wait_for_all(); - ASSERT( b3.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - tbb::flow::remove_edge(b, b2); - ASSERT( b.try_put( 1 ) == true, NULL ); - g.wait_for_all(); - ASSERT( b2.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - g.wait_for_all(); - ASSERT( b3.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - g.wait_for_all(); - ASSERT( b.try_get( j ) == true, NULL ); - ASSERT( j == 1, NULL ); - - return 0; -} - -int TestMain() { - tbb::tick_count start = tbb::tick_count::now(), stop; - for (int p = 2; p <= 4; ++p) { - tbb::task_scheduler_init init(p); - test_serial<int>(); - test_parallel<int>(p); - } - stop = tbb::tick_count::now(); - REMARK("Buffer_Node Time=%6.6f\n", (stop-start).seconds()); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - test_resets<int,tbb::flow::buffer_node<int> >(); - test_resets<float,tbb::flow::buffer_node<float> >(); - test_buffer_extract<tbb::flow::buffer_node<int> >().run_tests(); -#endif - return Harness::Done; -} diff --git a/src/tbb/src/test/test_cache_aligned_allocator.cpp b/src/tbb/src/test/test_cache_aligned_allocator.cpp deleted file mode 100644 index 6cecffb09..000000000 --- a/src/tbb/src/test/test_cache_aligned_allocator.cpp +++ /dev/null @@ -1,80 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Test whether cache_aligned_allocator works with some of the host's STL containers. - -#include "tbb/cache_aligned_allocator.h" -#include "tbb/tbb_allocator.h" - -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -// the real body of the test is there: -#include "test_allocator.h" - -template<> -struct is_zero_filling<tbb::zero_allocator<void> > { - static const bool value = true; -}; - -// Test that NFS_Allocate() throws bad_alloc if cannot allocate memory. -void Test_NFS_Allocate_Throws() { -#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN - using namespace tbb::internal; - - // First, allocate a reasonably big amount of memory, big enough - // to not cause warp around in system allocator after adding object header - // during address2 allocation. - const size_t itemsize = 1024; - const size_t nitems = 1024; - void *address1 = NULL; - try { - address1 = NFS_Allocate( nitems, itemsize, NULL ); - } catch( ... ) { - // intentionally empty - } - ASSERT( address1, "NFS_Allocate unable to obtain 1024*1024 bytes" ); - - bool exception_caught = false; - try { - // Try allocating more memory than left in the address space; should cause std::bad_alloc - (void) NFS_Allocate( 1, ~size_t(0) - itemsize*nitems + NFS_GetLineSize(), NULL); - } catch( std::bad_alloc ) { - exception_caught = true; - } catch( ... ) { - ASSERT( __TBB_EXCEPTION_TYPE_INFO_BROKEN, "Unexpected exception type (std::bad_alloc was expected)" ); - exception_caught = true; - } - ASSERT( exception_caught, "NFS_Allocate did not throw bad_alloc" ); - - try { - NFS_Free( address1 ); - } catch( ... ) { - ASSERT( false, "NFS_Free did not accept the address obtained with NFS_Allocate" ); - } -#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN */ -} - -int TestMain () { - int result = TestMain<tbb::cache_aligned_allocator<void> >(); - result += TestMain<tbb::tbb_allocator<void> >(); - result += TestMain<tbb::zero_allocator<void> >(); - ASSERT( !result, NULL ); - Test_NFS_Allocate_Throws(); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_cache_aligned_allocator_STL.cpp b/src/tbb/src/test/test_cache_aligned_allocator_STL.cpp deleted file mode 100644 index 7942c33e7..000000000 --- a/src/tbb/src/test/test_cache_aligned_allocator_STL.cpp +++ /dev/null @@ -1,35 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Test whether cache_aligned_allocator works with some of the host's STL containers. - -#include "tbb/cache_aligned_allocator.h" -#include "tbb/tbb_allocator.h" - -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#include "test_allocator_STL.h" - -int TestMain () { - TestAllocatorWithSTL<tbb::cache_aligned_allocator<void> >(); - TestAllocatorWithSTL<tbb::tbb_allocator<void> >(); - TestAllocatorWithSTL<tbb::zero_allocator<void> >(); - return Harness::Done; -} - diff --git a/src/tbb/src/test/test_cilk_common.h b/src/tbb/src/test/test_cilk_common.h deleted file mode 100644 index ce580d9aa..000000000 --- a/src/tbb/src/test/test_cilk_common.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// This file is a common part of test_cilk_interop and test_cilk_dynamic_load tests - -int TBB_Fib( int n ); - -class FibCilkSubtask: public tbb::task { - int n; - int& result; - /*override*/ task* execute() { - if( n<2 ) { - result = n; - } else { - int x, y; - x = cilk_spawn TBB_Fib(n-2); - y = cilk_spawn TBB_Fib(n-1); - cilk_sync; - result = x+y; - } - return NULL; - } -public: - FibCilkSubtask( int& result_, int n_ ) : result(result_), n(n_) {} -}; - -class FibTask: public tbb::task { - int n; - int& result; - /*override*/ task* execute() { - if( !g_sandwich && n<2 ) { - result = n; - } else { - int x,y; - tbb::task_scheduler_init init(P_nested); - task* self0 = &task::self(); - set_ref_count( 3 ); - if ( g_sandwich ) { - spawn (*new( allocate_child() ) FibCilkSubtask(x,n-1)); - spawn (*new( allocate_child() ) FibCilkSubtask(y,n-2)); - } - else { - spawn (*new( allocate_child() ) FibTask(x,n-1)); - spawn (*new( allocate_child() ) FibTask(y,n-2)); - } - wait_for_all(); - task* self1 = &task::self(); - ASSERT( self0 == self1, "failed to preserve TBB TLS" ); - result = x+y; - } - return NULL; - } -public: - FibTask( int& result_, int n_ ) : result(result_), n(n_) {} -}; - -int TBB_Fib( int n ) { - if( n<2 ) { - return n; - } else { - int result; - tbb::task_scheduler_init init(P_nested); - tbb::task::spawn_root_and_wait(*new( tbb::task::allocate_root()) FibTask(result,n) ); - return result; - } -} diff --git a/src/tbb/src/test/test_cilk_dynamic_load.cpp b/src/tbb/src/test/test_cilk_dynamic_load.cpp deleted file mode 100644 index eb845c827..000000000 --- a/src/tbb/src/test/test_cilk_dynamic_load.cpp +++ /dev/null @@ -1,156 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" - -// Skip the test if no interoperability with cilkrts -#define __TBB_CILK_INTEROP (__TBB_SURVIVE_THREAD_SWITCH && __INTEL_COMPILER>=1200) -// Skip the test when cilkrts did not have dlopen()/dlclose() start up feature -#define CILK_SYMBOLS_VISIBLE (_WIN32||_WIN64) -// The compiler does not add "-lcilkrts" linker option on some linux systems -#define CILK_LINKAGE_BROKEN (__linux__ && __GNUC__<4 && __INTEL_COMPILER_BUILD_DATE <= 20110427) -// Currently, the interop doesn't support the situation: -//1) Intel(R) Threading Building Blocks (Intel(R) TBB) is outermost; -//2) Intel(R) Cilk(TM) Plus, and it should be dynamically loaded with dlopen/LoadLibrary (possibly via a 3rd party module); -//3) Intel(R) TBB again; -//4) Intel(R) Cilk(TM) Plus again. -#define HEAVY_NESTED_INTEROP_SUPPORT ( __INTEL_COMPILER_BUILD_DATE < 20110427 ) - -#if __TBB_CILK_INTEROP && CILK_SYMBOLS_VISIBLE && !CILK_LINKAGE_BROKEN && HEAVY_NESTED_INTEROP_SUPPORT - -#include "tbb/task_scheduler_init.h" -#include "tbb/task.h" - -static const int N = 25; -static const int P_outer = 4; -static const int P_nested = 2; - -#ifdef _USRDLL - -#include <cilk/cilk.h> -#define HARNESS_CUSTOM_MAIN 1 -#include "harness.h" -#undef HARNESS_CUSTOM_MAIN - -#if _WIN32 || _WIN64 -#define CILK_TEST_EXPORT extern "C" __declspec(dllexport) -#else -#define CILK_TEST_EXPORT extern "C" -#endif /* _WIN32 || _WIN64 */ - -bool g_sandwich = true; // have to be declare before #include "test_cilk_common.h" -#include "test_cilk_common.h" - -CILK_TEST_EXPORT int CilkFib( int n ) -{ - return TBB_Fib(n); -} - -CILK_TEST_EXPORT void CilkShutdown() -{ - __cilkrts_end_cilk(); -} - -#else /* _USRDLL undefined */ - -#include "harness.h" -#include "harness_dynamic_libs.h" - -int SerialFib( int n ) { - int a=0, b=1; - for( int i=0; i<n; ++i ) { - b += a; - a = b-a; - } - return a; -} - -int F = SerialFib(N); - -typedef int (*CILK_CALL)(int); -CILK_CALL CilkFib = 0; - -typedef void (*CILK_SHUTDOWN)(); -CILK_SHUTDOWN CilkShutdown = 0; - -class FibTask: public tbb::task { - int n; - int& result; - /*override*/ task* execute() { - if( n<2 ) { - result = n; - } else { - - // TODO: why RTLD_LAZY was used here? - Harness::LIBRARY_HANDLE hLib = - Harness::OpenLibrary(TEST_LIBRARY_NAME("test_cilk_dynamic_load_dll")); - CilkFib = (CILK_CALL)Harness::GetAddress(hLib, "CilkFib"); - CilkShutdown = (CILK_SHUTDOWN)Harness::GetAddress(hLib, "CilkShutdown"); - - int x, y; - x = CilkFib(n-2); - y = CilkFib(n-1); - result = x+y; - - CilkShutdown(); - - Harness::CloseLibrary(hLib); - } - return NULL; - } -public: - FibTask( int& result_, int n_ ) : result(result_), n(n_) {} -}; - - -int TBB_Fib( int n ) { - if( n<2 ) { - return n; - } else { - int result; - tbb::task_scheduler_init init(P_nested); - tbb::task::spawn_root_and_wait(*new( tbb::task::allocate_root()) FibTask(result,n) ); - return result; - } -} - -void RunSandwich() { - tbb::task_scheduler_init init(P_outer); - int m = TBB_Fib(N); - ASSERT( m == F, NULL ); -} - -int TestMain () { - for ( int i = 0; i < 20; ++i ) - RunSandwich(); - return Harness::Done; -} - -#endif /* _USRDLL */ - -#else /* !__TBB_CILK_INTEROP */ - -#include "harness.h" - -int TestMain () { - return Harness::Skipped; -} - -#endif /* !__TBB_CILK_INTEROP */ diff --git a/src/tbb/src/test/test_cilk_interop.cpp b/src/tbb/src/test/test_cilk_interop.cpp deleted file mode 100644 index b5cdbebca..000000000 --- a/src/tbb/src/test/test_cilk_interop.cpp +++ /dev/null @@ -1,155 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" -#include "harness.h" - -// Skip the test if no interoperability with cilkrts -#define __TBB_CILK_INTEROP (__TBB_SURVIVE_THREAD_SWITCH && __INTEL_COMPILER>=1200) -// The compiler does not add "-lcilkrts" linker option on some linux systems -#define CILK_LINKAGE_BROKEN (__linux__ && __GNUC__<4 && __INTEL_COMPILER_BUILD_DATE <= 20110427) -// In U4, cilkrts incorrectly sends the interop notifications to TBB -#define CILK_NOTIFICATIONS_BROKEN ( __INTEL_COMPILER_BUILD_DATE == 20110427 ) - -#if __TBB_CILK_INTEROP && !CILK_LINKAGE_BROKEN && !CILK_NOTIFICATIONS_BROKEN - -static const int N = 14; -static const int P_outer = 4; -static const int P_nested = 2; - -#include <cilk/cilk.h> -#include <cilk/cilk_api.h> -#define private public -#include "tbb/task.h" -#undef private -#include "tbb/task_scheduler_init.h" -#include <cstdio> -#include <cassert> - -enum tbb_sched_injection_mode_t { - tbbsched_none = 0, - tbbsched_explicit_only = 1, - tbbsched_auto_only = 2, - tbbsched_mixed = 3 -}; - -tbb_sched_injection_mode_t g_sim = tbbsched_none; - -bool g_sandwich = false; // have to be declare before #include "test_cilk_common.h" -#include "test_cilk_common.h" - -// A time delay routine -void Delay( int n ) { - static volatile int Global; - for( int k=0; k<10000; ++k ) - for( int i=0; i<n; ++i ) - ++Global; -} - -int SerialFib( int n ) { - int a=0, b=1; - for( int i=0; i<n; ++i ) { - b += a; - a = b-a; - } - return a; -} - -int F = SerialFib(N); - -int Fib ( int n ) { - if( n < 2 ) { - if ( g_sim ) { - tbb::task_scheduler_init tsi(P_nested); - } - return n; - } else { - tbb::task_scheduler_init *tsi = NULL; - tbb::task *cur = NULL; - if ( g_sim ) { - if ( n % 2 == 0 ) { - if ( g_sim == tbbsched_auto_only || (g_sim == tbbsched_mixed && n % 4 == 0) ) { - // Trigger TBB scheduler auto-initialization - cur = &tbb::task::self(); - } - else { - ASSERT ( g_sim == tbbsched_explicit_only || (g_sim == tbbsched_mixed && n % 4 != 0), NULL ); - // Initialize TBB scheduler explicitly - tsi = new tbb::task_scheduler_init(P_nested); - } - } - } - int x, y; - x = cilk_spawn Fib(n-2); - y = cilk_spawn Fib(n-1); - cilk_sync; - if ( tsi ) - delete tsi; - return x+y; - } -} - -void RunCilkOnly ( tbb_sched_injection_mode_t sim ) { - g_sim = sim; - int m = Fib(N); - ASSERT( m == F, NULL ); -} - -struct FibBody : NoAssign, Harness::NoAfterlife { - void operator() ( int ) const { - int m = Fib(N); - ASSERT( m == F, NULL ); - } -}; - -void RunCilkOnlyConcurrently ( tbb_sched_injection_mode_t sim ) { - g_sim = sim; - NativeParallelFor( P_outer, FibBody() ); -} - -void RunSandwich( bool sandwich ) { - g_sandwich = sandwich; - tbb::task_scheduler_init init(P_outer); - int m = TBB_Fib(N); - ASSERT( g_sandwich == sandwich, "Memory corruption detected" ); - ASSERT( m == F, NULL ); -} - -int TestMain () { - for ( int i = 0; i < 100; ++i ) - RunCilkOnlyConcurrently( tbbsched_none ); - RunCilkOnly( tbbsched_none ); - RunCilkOnly( tbbsched_explicit_only ); - RunCilkOnly( tbbsched_auto_only ); - RunCilkOnly( tbbsched_mixed ); - RunSandwich( false ); - for ( int i = 0; i < 10; ++i ) - RunSandwich( true ); - __cilkrts_end_cilk(); - return Harness::Done; -} - -#else /* !__TBB_CILK_INTEROP */ - -int TestMain () { - return Harness::Skipped; -} - -#endif /* !__TBB_CILK_INTEROP */ diff --git a/src/tbb/src/test/test_combinable.cpp b/src/tbb/src/test/test_combinable.cpp deleted file mode 100644 index 8418787a4..000000000 --- a/src/tbb/src/test/test_combinable.cpp +++ /dev/null @@ -1,450 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#define __TBB_EXTRA_DEBUG 1 // for concurrent_hash_map -#include "tbb/combinable.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/parallel_for.h" -#include "tbb/blocked_range.h" -#include "tbb/tick_count.h" -#include "tbb/tbb_allocator.h" -#include "tbb/tbb_thread.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include <cstring> -#include <vector> -#include <utility> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "harness_assert.h" -#include "harness.h" - -#if __TBB_GCC_WARNING_SUPPRESSION_PRESENT -#pragma GCC diagnostic ignored "-Wuninitialized" -#endif - -static tbb::atomic<int> construction_counter; -static tbb::atomic<int> destruction_counter; - -const int REPETITIONS = 10; -const int N = 100000; -const double EXPECTED_SUM = (REPETITIONS + 1) * N; - -// -// A minimal class -// Define: default and copy constructor, and allow implicit operator& -// also operator= -// - -class minimal { -private: - int my_value; -public: - minimal(int val=0) : my_value(val) { ++construction_counter; } - minimal( const minimal &m ) : my_value(m.my_value) { ++construction_counter; } - minimal& operator=(const minimal& other) { my_value = other.my_value; return *this; } - minimal& operator+=(const minimal& other) { my_value += other.my_value; return *this; } - operator int() const { return my_value; } - ~minimal() { ++destruction_counter; } - void set_value( const int i ) { my_value = i; } - int value( ) const { return my_value; } -}; - -//// functors for initialization and combine - -// Addition -template <typename T> -struct FunctorAddFinit { - T operator()() { return 0; } -}; - -template <typename T> -struct FunctorAddFinit7 { - T operator()() { return 7; } -}; - -template <typename T> -struct FunctorAddCombine { - T operator()(T left, T right ) const { - return left + right; - } -}; - -template <typename T> -struct FunctorAddCombineRef { - T operator()(const T& left, const T& right ) const { - return left + right; - } -}; - -template <typename T> -T my_finit( ) { return 0; } - -template <typename T> -T my_combine( T left, T right) { return left + right; } - -template <typename T> -T my_combine_ref( const T &left, const T &right) { return left + right; } - -template <typename T> -class CombineEachHelper { -public: - CombineEachHelper(T& _result) : my_result(_result) {} - void operator()(const T& new_bit) { my_result += new_bit; } - CombineEachHelper& operator=(const CombineEachHelper& other) { - my_result = other; - return *this; - } -private: - T& my_result; -}; - -template <typename T> -class CombineEachHelperCnt { -public: - CombineEachHelperCnt(T& _result, int& _nbuckets) : my_result(_result), nBuckets(_nbuckets) {} - void operator()(const T& new_bit) { my_result += new_bit; ++nBuckets; } - CombineEachHelperCnt& operator=(const CombineEachHelperCnt& other) { - my_result = other.my_result; - nBuckets = other.nBuckets; - return *this; - } -private: - T& my_result; - int& nBuckets; -}; - -template <typename T> -class CombineEachVectorHelper { -public: - typedef std::vector<T, tbb::tbb_allocator<T> > ContainerType; - CombineEachVectorHelper(T& _result) : my_result(_result) { } - void operator()(const ContainerType& new_bit) { - for(typename ContainerType::const_iterator ci = new_bit.begin(); ci != new_bit.end(); ++ci) { - my_result += *ci; - } - } - CombineEachVectorHelper& operator=(const CombineEachVectorHelper& other) { my_result=other.my_result; return *this;} -private: - T& my_result; -}; - - - -//// end functors - -template< typename T > -void run_serial_scalar_tests(const char *test_name) { - tbb::tick_count t0; - T sum = 0; - - REMARK("Testing serial %s... ", test_name); - for (int t = -1; t < REPETITIONS; ++t) { - if (Verbose && t == 0) t0 = tbb::tick_count::now(); - for (int i = 0; i < N; ++i) { - sum += 1; - } - } - - double ResultValue = sum; - ASSERT( EXPECTED_SUM == ResultValue, NULL); - REMARK("done\nserial %s, 0, %g, %g\n", test_name, ResultValue, ( tbb::tick_count::now() - t0).seconds()); -} - - -template <typename T> -class ParallelScalarBody: NoAssign { - - tbb::combinable<T> &sums; - -public: - - ParallelScalarBody ( tbb::combinable<T> &_sums ) : sums(_sums) { } - - void operator()( const tbb::blocked_range<int> &r ) const { - for (int i = r.begin(); i != r.end(); ++i) { - bool was_there; - T& my_local = sums.local(was_there); - if(!was_there) my_local = 0; - my_local += 1 ; - } - } - -}; - -// parallel body with no test for first access. -template <typename T> -class ParallelScalarBodyNoInit: NoAssign { - - tbb::combinable<T> &sums; - -public: - - ParallelScalarBodyNoInit ( tbb::combinable<T> &_sums ) : sums(_sums) { } - - void operator()( const tbb::blocked_range<int> &r ) const { - for (int i = r.begin(); i != r.end(); ++i) { - sums.local() += 1 ; - } - } - -}; - -template< typename T > -void RunParallelScalarTests(const char *test_name) { - - tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred); - - for (int p = MinThread; p <= MaxThread; ++p) { - - - if (p == 0) continue; - - REMARK("Testing parallel %s on %d thread(s)... ", test_name, p); - init.initialize(p); - - tbb::tick_count t0; - - T assign_sum(0); - - T combine_sum(0); - - T combine_ref_sum(0); - - T combine_each_sum(0); - - T combine_finit_sum(0); - - for (int t = -1; t < REPETITIONS; ++t) { - if (Verbose && t == 0) t0 = tbb::tick_count::now(); - - tbb::combinable<T> sums; - FunctorAddFinit<T> my_finit_decl; - tbb::combinable<T> finit_combinable(my_finit_decl); - - - tbb::parallel_for( tbb::blocked_range<int>( 0, N, 10000 ), ParallelScalarBodyNoInit<T>( finit_combinable ) ); - tbb::parallel_for( tbb::blocked_range<int>( 0, N, 10000 ), ParallelScalarBody<T>( sums ) ); - - // Use combine - combine_sum += sums.combine(my_combine<T>); - combine_ref_sum += sums.combine(my_combine_ref<T>); - - CombineEachHelper<T> my_helper(combine_each_sum); - sums.combine_each(my_helper); - - // test assignment - tbb::combinable<T> assigned; - assigned = sums; - - assign_sum += assigned.combine(my_combine<T>); - - combine_finit_sum += finit_combinable.combine(my_combine<T>); - } - - ASSERT( EXPECTED_SUM == combine_sum, NULL); - ASSERT( EXPECTED_SUM == combine_ref_sum, NULL); - ASSERT( EXPECTED_SUM == assign_sum, NULL); - ASSERT( EXPECTED_SUM == combine_finit_sum, NULL); - - REMARK("done\nparallel %s, %d, %g, %g\n", test_name, p, static_cast<double>(combine_sum), - ( tbb::tick_count::now() - t0).seconds()); - init.terminate(); - } -} - - -template <typename T> -class ParallelVectorForBody: NoAssign { - - tbb::combinable< std::vector<T, tbb::tbb_allocator<T> > > &locals; - -public: - - ParallelVectorForBody ( tbb::combinable< std::vector<T, tbb::tbb_allocator<T> > > &_locals ) : locals(_locals) { } - - void operator()( const tbb::blocked_range<int> &r ) const { - T one = 1; - - for (int i = r.begin(); i < r.end(); ++i) { - locals.local().push_back( one ); - } - } - -}; - -template< typename T > -void RunParallelVectorTests(const char *test_name) { - tbb::tick_count t0; - tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred); - typedef std::vector<T, tbb::tbb_allocator<T> > ContainerType; - - for (int p = MinThread; p <= MaxThread; ++p) { - - if (p == 0) continue; - REMARK("Testing parallel %s on %d thread(s)... ", test_name, p); - init.initialize(p); - - T sum = 0; - T sum2 = 0; - T sum3 = 0; - - for (int t = -1; t < REPETITIONS; ++t) { - if (Verbose && t == 0) t0 = tbb::tick_count::now(); - typedef typename tbb::combinable< ContainerType > CombinableType; - CombinableType vs; - - tbb::parallel_for ( tbb::blocked_range<int> (0, N, 10000), ParallelVectorForBody<T>( vs ) ); - - // copy construct - CombinableType vs2(vs); // this causes an assertion failure, related to allocators... - - // assign - CombinableType vs3; - vs3 = vs; - - CombineEachVectorHelper<T> MyCombineEach(sum); - vs.combine_each(MyCombineEach); - - CombineEachVectorHelper<T> MyCombineEach2(sum2); - vs2.combine_each(MyCombineEach2); - - CombineEachVectorHelper<T> MyCombineEach3(sum3); - vs2.combine_each(MyCombineEach3); - // combine_each sums all elements of each vector into the result. - } - - double ResultValue = sum; - ASSERT( EXPECTED_SUM == ResultValue, NULL); - ResultValue = sum2; - ASSERT( EXPECTED_SUM == ResultValue, NULL); - ResultValue = sum3; - ASSERT( EXPECTED_SUM == ResultValue, NULL); - REMARK("done\nparallel %s, %d, %g, %g\n", test_name, p, ResultValue, ( tbb::tick_count::now() - t0).seconds()); - init.terminate(); - } -} - -#include "harness_barrier.h" - -Harness::SpinBarrier sBarrier; - -struct Body : NoAssign { - tbb::combinable<int>* locals; - const int nthread; - const int nIters; - Body( int nthread_, int niters_ ) : nthread(nthread_), nIters(niters_) { sBarrier.initialize(nthread_); } - - - void operator()(int thread_id ) const { - bool existed; - sBarrier.wait(); - for(int i = 0; i < nIters; ++i ) { - existed = thread_id & 1; - int oldval = locals->local(existed); - ASSERT(existed == (i > 0), "Error on first reference"); - ASSERT(!existed || (oldval == thread_id), "Error on fetched value"); - existed = thread_id & 1; - locals->local(existed) = thread_id; - ASSERT(existed, "Error on assignment"); - } - } -}; - -void -TestLocalAllocations( int nthread ) { - ASSERT(nthread > 0, "nthread must be positive"); -#define NITERATIONS 1000 - Body myBody(nthread, NITERATIONS); - tbb::combinable<int> myCombinable; - myBody.locals = &myCombinable; - - NativeParallelFor( nthread, myBody ); - - int mySum = 0; - int mySlots = 0; - CombineEachHelperCnt<int> myCountCombine(mySum, mySlots); - myCombinable.combine_each(myCountCombine); - - ASSERT(nthread == mySlots, "Incorrect number of slots"); - ASSERT(mySum == (nthread - 1) * nthread / 2, "Incorrect values in result"); -} - - -void -RunParallelTests() { - RunParallelScalarTests<int>("int"); - RunParallelScalarTests<double>("double"); - RunParallelScalarTests<minimal>("minimal"); - RunParallelVectorTests<int>("std::vector<int, tbb::tbb_allocator<int> >"); - RunParallelVectorTests<double>("std::vector<double, tbb::tbb_allocator<double> >"); -} - -template <typename T> -void -RunAssignmentAndCopyConstructorTest(const char *test_name) { - REMARK("Testing assignment and copy construction for %s\n", test_name); - - // test creation with finit function (combine returns finit return value if no threads have created locals) - FunctorAddFinit7<T> my_finit7_decl; - tbb::combinable<T> create2(my_finit7_decl); - ASSERT(7 == create2.combine(my_combine<T>), NULL); - - // test copy construction with function initializer - tbb::combinable<T> copy2(create2); - ASSERT(7 == copy2.combine(my_combine<T>), NULL); - - // test copy assignment with function initializer - FunctorAddFinit<T> my_finit_decl; - tbb::combinable<T> assign2(my_finit_decl); - assign2 = create2; - ASSERT(7 == assign2.combine(my_combine<T>), NULL); -} - -void -RunAssignmentAndCopyConstructorTests() { - REMARK("Running assignment and copy constructor tests\n"); - RunAssignmentAndCopyConstructorTest<int>("int"); - RunAssignmentAndCopyConstructorTest<double>("double"); - RunAssignmentAndCopyConstructorTest<minimal>("minimal"); -} - -int TestMain () { - if (MaxThread > 0) { - RunParallelTests(); - } - RunAssignmentAndCopyConstructorTests(); - for(int i = 1 <= MinThread ? MinThread : 1; i <= MaxThread; ++i) { - REMARK("Testing local() allocation with nthreads=%d\n", i); - for(int j = 0; j < 100; ++j) { - TestLocalAllocations(i); - } - } - return Harness::Done; -} - diff --git a/src/tbb/src/test/test_concurrent_hash_map.cpp b/src/tbb/src/test/test_concurrent_hash_map.cpp deleted file mode 100644 index df16f40ac..000000000 --- a/src/tbb/src/test/test_concurrent_hash_map.cpp +++ /dev/null @@ -1,1377 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef TBB_USE_PERFORMANCE_WARNINGS -#define TBB_USE_PERFORMANCE_WARNINGS 1 -#endif - -// Our tests usually include the header under test first. But this test needs -// to use the preprocessor to edit the identifier runtime_warning in concurrent_hash_map.h. -// Hence we include a few other headers before doing the abusive edit. -#include "tbb/tbb_stddef.h" /* Defines runtime_warning */ -#include "harness_assert.h" /* Prerequisite for defining hooked_warning */ - -// The symbol internal::runtime_warning is normally an entry point into the TBB library. -// Here for sake of testing, we define it to be hooked_warning, a routine peculiar to this unit test. -#define runtime_warning hooked_warning - -static bool bad_hashing = false; - -namespace tbb { - namespace internal { - static void hooked_warning( const char* /*format*/, ... ) { - ASSERT(bad_hashing, "unexpected runtime_warning: bad hashing"); - } - } // namespace internal -} // namespace tbb -#define __TBB_EXTRA_DEBUG 1 // enables additional checks -#include "tbb/concurrent_hash_map.h" - -// Restore runtime_warning as an entry point into the TBB library. -#undef runtime_warning - -namespace Jungle { - struct Tiger {}; - size_t tbb_hasher( const Tiger& ) {return 0;} -} - -#if !defined(_MSC_VER) || _MSC_VER>=1400 || __INTEL_COMPILER -void test_ADL() { - tbb::tbb_hash_compare<Jungle::Tiger>::hash(Jungle::Tiger()); // Instantiation chain finds tbb_hasher via Argument Dependent Lookup -} -#endif - -struct UserDefinedKeyType { -}; - -namespace tbb { - // Test whether tbb_hash_compare can be partially specialized as stated in Reference manual. - template<> struct tbb_hash_compare<UserDefinedKeyType> { - size_t hash( UserDefinedKeyType ) const {return 0;} - bool equal( UserDefinedKeyType /*x*/, UserDefinedKeyType /*y*/ ) {return true;} - }; -} - -#include "harness_runtime_loader.h" - -tbb::concurrent_hash_map<UserDefinedKeyType,int> TestInstantiationWithUserDefinedKeyType; - -// Test whether a sufficient set of headers were included to instantiate a concurrent_hash_map. OSS Bug #120 (& #130): -// http://www.threadingbuildingblocks.org/bug_desc.php?id=120 -tbb::concurrent_hash_map<std::pair<std::pair<int,std::string>,const char*>,int> TestInstantiation; - -#include "tbb/parallel_for.h" -#include "tbb/blocked_range.h" -#include "tbb/atomic.h" -#include "tbb/tick_count.h" -#include "harness.h" -#include "harness_allocator.h" - -class MyException : public std::bad_alloc { -public: - virtual const char *what() const throw() { return "out of items limit"; } - virtual ~MyException() throw() {} -}; - -/** Has tightly controlled interface so that we can verify - that concurrent_hash_map uses only the required interface. */ -class MyKey { -private: - void operator=( const MyKey& ); // Deny access - int key; - friend class MyHashCompare; - friend class YourHashCompare; -public: - static MyKey make( int i ) { - MyKey result; - result.key = i; - return result; - } - int value_of() const {return key;} -}; - -tbb::atomic<long> MyDataCount; -long MyDataCountLimit = 0; - -class MyData { -protected: - friend class MyData2; - int data; - enum state_t { - LIVE=0x1234, - DEAD=0x5678 - } my_state; - void operator=( const MyData& ); // Deny access -public: - MyData(int i = 0) { - my_state = LIVE; - data = i; - if(MyDataCountLimit && MyDataCount + 1 >= MyDataCountLimit) - __TBB_THROW( MyException() ); - ++MyDataCount; - } - MyData( const MyData& other ) { - ASSERT( other.my_state==LIVE, NULL ); - my_state = LIVE; - data = other.data; - if(MyDataCountLimit && MyDataCount + 1 >= MyDataCountLimit) - __TBB_THROW( MyException() ); - ++MyDataCount; - } - ~MyData() { - --MyDataCount; - my_state = DEAD; - } - static MyData make( int i ) { - MyData result; - result.data = i; - return result; - } - int value_of() const { - ASSERT( my_state==LIVE, NULL ); - return data; - } - void set_value( int i ) { - ASSERT( my_state==LIVE, NULL ); - data = i; - } - bool operator==( const MyData& other ) const { - ASSERT( other.my_state==LIVE, NULL ); - ASSERT( my_state==LIVE, NULL ); - return data == other.data; - } -}; - -class MyData2 : public MyData { -public: - MyData2( ) {} - MyData2( const MyData& other ) { - ASSERT( other.my_state==LIVE, NULL ); - ASSERT( my_state==LIVE, NULL ); - data = other.data; - } - void operator=( const MyData& other ) { - ASSERT( other.my_state==LIVE, NULL ); - ASSERT( my_state==LIVE, NULL ); - data = other.data; - } - void operator=( const MyData2& other ) { - ASSERT( other.my_state==LIVE, NULL ); - ASSERT( my_state==LIVE, NULL ); - data = other.data; - } - bool operator==( const MyData2& other ) const { - ASSERT( other.my_state==LIVE, NULL ); - ASSERT( my_state==LIVE, NULL ); - return data == other.data; - } -}; - -class MyHashCompare { -public: - bool equal( const MyKey& j, const MyKey& k ) const { - return j.key==k.key; - } - unsigned long hash( const MyKey& k ) const { - return k.key; - } -}; - -class YourHashCompare { -public: - bool equal( const MyKey& j, const MyKey& k ) const { - return j.key==k.key; - } - unsigned long hash( const MyKey& ) const { - return 1; - } -}; - -typedef local_counting_allocator<std::allocator<MyData> > MyAllocator; -typedef tbb::concurrent_hash_map<MyKey,MyData,MyHashCompare,MyAllocator> MyTable; -typedef tbb::concurrent_hash_map<MyKey,MyData2,MyHashCompare> MyTable2; -typedef tbb::concurrent_hash_map<MyKey,MyData,YourHashCompare> YourTable; - -template<typename MyTable> -inline void CheckAllocator(MyTable &table, size_t expected_allocs, size_t expected_frees, bool exact = true) { - size_t items_allocated = table.get_allocator().items_allocated, items_freed = table.get_allocator().items_freed; - size_t allocations = table.get_allocator().allocations, frees = table.get_allocator().frees; - REMARK("checking allocators: items %u/%u, allocs %u/%u\n", - unsigned(items_allocated), unsigned(items_freed), unsigned(allocations), unsigned(frees) ); - ASSERT( items_allocated == allocations, NULL); ASSERT( items_freed == frees, NULL); - if(exact) { - ASSERT( allocations == expected_allocs, NULL); ASSERT( frees == expected_frees, NULL); - } else { - ASSERT( allocations >= expected_allocs, NULL); ASSERT( frees >= expected_frees, NULL); - ASSERT( allocations - frees == expected_allocs - expected_frees, NULL ); - } -} - -inline bool UseKey( size_t i ) { - return (i&3)!=3; -} - -struct Insert { - static void apply( MyTable& table, int i ) { - if( UseKey(i) ) { - if( i&4 ) { - MyTable::accessor a; - table.insert( a, MyKey::make(i) ); - if( i&1 ) - (*a).second.set_value(i*i); - else - a->second.set_value(i*i); - } else - if( i&1 ) { - MyTable::accessor a; - table.insert( a, std::make_pair(MyKey::make(i), MyData(i*i)) ); - ASSERT( (*a).second.value_of()==i*i, NULL ); - } else { - MyTable::const_accessor ca; - table.insert( ca, std::make_pair(MyKey::make(i), MyData(i*i)) ); - ASSERT( ca->second.value_of()==i*i, NULL ); - } - } - } -}; - -#if __TBB_INITIALIZER_LISTS_PRESENT -struct InsertInitList { - static void apply( MyTable& table, int i ) { - if ( UseKey( i ) ) { - // TODO: investigate why the following sequence causes an additional allocation sometimes: - // table.insert( MyTable::value_type( MyKey::make( i ), i*i ) ); - // table.insert( MyTable::value_type( MyKey::make( i ), i*i+1 ) ); - std::initializer_list<MyTable::value_type> il = { MyTable::value_type( MyKey::make( i ), i*i )/*, MyTable::value_type( MyKey::make( i ), i*i+1 ) */ }; - table.insert( il ); - } - } -}; -#endif /* __TBB_INITIALIZER_LISTS_PRESENT */ - -struct Find { - static void apply( MyTable& table, int i ) { - MyTable::accessor a; - const MyTable::accessor& ca = a; - bool b = table.find( a, MyKey::make(i) ); - ASSERT( b==!a.empty(), NULL ); - if( b ) { - if( !UseKey(i) ) - REPORT("Line %d: unexpected key %d present\n",__LINE__,i); - AssertSameType( &*a, static_cast<MyTable::value_type*>(0) ); - ASSERT( ca->second.value_of()==i*i, NULL ); - ASSERT( (*ca).second.value_of()==i*i, NULL ); - if( i&1 ) - ca->second.set_value( ~ca->second.value_of() ); - else - (*ca).second.set_value( ~ca->second.value_of() ); - } else { - if( UseKey(i) ) - REPORT("Line %d: key %d missing\n",__LINE__,i); - } - } -}; - -struct FindConst { - static void apply( const MyTable& table, int i ) { - MyTable::const_accessor a; - const MyTable::const_accessor& ca = a; - bool b = table.find( a, MyKey::make(i) ); - ASSERT( b==(table.count(MyKey::make(i))>0), NULL ); - ASSERT( b==!a.empty(), NULL ); - ASSERT( b==UseKey(i), NULL ); - if( b ) { - AssertSameType( &*ca, static_cast<const MyTable::value_type*>(0) ); - ASSERT( ca->second.value_of()==~(i*i), NULL ); - ASSERT( (*ca).second.value_of()==~(i*i), NULL ); - } - } -}; - -tbb::atomic<int> EraseCount; - -struct Erase { - static void apply( MyTable& table, int i ) { - bool b; - if(i&4) { - if(i&8) { - MyTable::const_accessor a; - b = table.find( a, MyKey::make(i) ) && table.erase( a ); - } else { - MyTable::accessor a; - b = table.find( a, MyKey::make(i) ) && table.erase( a ); - } - } else - b = table.erase( MyKey::make(i) ); - if( b ) ++EraseCount; - ASSERT( table.count(MyKey::make(i)) == 0, NULL ); - } -}; - -static const int IE_SIZE = 2; -tbb::atomic<YourTable::size_type> InsertEraseCount[IE_SIZE]; - -struct InsertErase { - static void apply( YourTable& table, int i ) { - if ( i%3 ) { - int key = i%IE_SIZE; - if ( table.insert( std::make_pair(MyKey::make(key), MyData2()) ) ) - ++InsertEraseCount[key]; - } else { - int key = i%IE_SIZE; - if( i&1 ) { - YourTable::accessor res; - if(table.find( res, MyKey::make(key) ) && table.erase( res ) ) - --InsertEraseCount[key]; - } else { - YourTable::const_accessor res; - if(table.find( res, MyKey::make(key) ) && table.erase( res ) ) - --InsertEraseCount[key]; - } - } - } -}; - -// Test for the deadlock discussed at: -// http://softwarecommunity.intel.com/isn/Community/en-US/forums/permalink/30253302/30253302/ShowThread.aspx#30253302 -struct InnerInsert { - static void apply( YourTable& table, int i ) { - YourTable::accessor a1, a2; - if(i&1) __TBB_Yield(); - table.insert( a1, MyKey::make(1) ); - __TBB_Yield(); - table.insert( a2, MyKey::make(1 + (1<<30)) ); // the same chain - table.erase( a2 ); // if erase by key it would lead to deadlock for single thread - } -}; - -#include "harness_barrier.h" -// Test for the misuse of constness -struct FakeExclusive : NoAssign { - Harness::SpinBarrier& barrier; - YourTable& table; - FakeExclusive(Harness::SpinBarrier& b, YourTable&t) : barrier(b), table(t) {} - void operator()( int i ) const { - if(i) { - YourTable::const_accessor real_ca; - // const accessor on non-const table aquired as reader (shared) - ASSERT( table.find(real_ca,MyKey::make(1)), NULL ); - barrier.wait(); // item can be erased - Harness::Sleep(10); // let it enter the erase - real_ca->second.value_of(); // check the state while holding accessor - } else { - YourTable::accessor fake_ca; - const YourTable &const_table = table; - // non-const accessor on const table aquired as reader (shared) - ASSERT( const_table.find(fake_ca,MyKey::make(1)), NULL ); - barrier.wait(); // readers aquired - // can mistakenly remove the item while other readers still refers to it - table.erase( fake_ca ); - } - } -}; - -template<typename Op, typename MyTable> -class TableOperation: NoAssign { - MyTable& my_table; -public: - void operator()( const tbb::blocked_range<int>& range ) const { - for( int i=range.begin(); i!=range.end(); ++i ) - Op::apply(my_table,i); - } - TableOperation( MyTable& table ) : my_table(table) {} -}; - -template<typename Op, typename TableType> -void DoConcurrentOperations( TableType& table, int n, const char* what, int nthread ) { - REMARK("testing %s with %d threads\n",what,nthread); - tbb::tick_count t0 = tbb::tick_count::now(); - tbb::parallel_for( tbb::blocked_range<int>(0,n,100), TableOperation<Op,TableType>(table) ); - tbb::tick_count t1 = tbb::tick_count::now(); - REMARK("time for %s = %g with %d threads\n",what,(t1-t0).seconds(),nthread); -} - -//! Test traversing the table with an iterator. -void TraverseTable( MyTable& table, size_t n, size_t expected_size ) { - REMARK("testing traversal\n"); - size_t actual_size = table.size(); - ASSERT( actual_size==expected_size, NULL ); - size_t count = 0; - bool* array = new bool[n]; - memset( array, 0, n*sizeof(bool) ); - const MyTable& const_table = table; - MyTable::const_iterator ci = const_table.begin(); - for( MyTable::iterator i = table.begin(); i!=table.end(); ++i ) { - // Check iterator - int k = i->first.value_of(); - ASSERT( UseKey(k), NULL ); - ASSERT( (*i).first.value_of()==k, NULL ); - ASSERT( 0<=k && size_t(k)<n, "out of bounds key" ); - ASSERT( !array[k], "duplicate key" ); - array[k] = true; - ++count; - - // Check lower/upper bounds - std::pair<MyTable::iterator, MyTable::iterator> er = table.equal_range(i->first); - std::pair<MyTable::const_iterator, MyTable::const_iterator> cer = const_table.equal_range(i->first); - ASSERT(cer.first == er.first && cer.second == er.second, NULL); - ASSERT(cer.first == i, NULL); - ASSERT(std::distance(cer.first, cer.second) == 1, NULL); - - // Check const_iterator - MyTable::const_iterator cic = ci++; - ASSERT( cic->first.value_of()==k, NULL ); - ASSERT( (*cic).first.value_of()==k, NULL ); - } - ASSERT( ci==const_table.end(), NULL ); - delete[] array; - if( count!=expected_size ) { - REPORT("Line %d: count=%ld but should be %ld\n",__LINE__,long(count),long(expected_size)); - } -} - -typedef tbb::atomic<unsigned char> AtomicByte; - -template<typename RangeType> -struct ParallelTraverseBody: NoAssign { - const size_t n; - AtomicByte* const array; - ParallelTraverseBody( AtomicByte array_[], size_t n_ ) : - n(n_), - array(array_) - {} - void operator()( const RangeType& range ) const { - for( typename RangeType::iterator i = range.begin(); i!=range.end(); ++i ) { - int k = i->first.value_of(); - ASSERT( 0<=k && size_t(k)<n, NULL ); - ++array[k]; - } - } -}; - -void Check( AtomicByte array[], size_t n, size_t expected_size ) { - if( expected_size ) - for( size_t k=0; k<n; ++k ) { - if( array[k] != int(UseKey(k)) ) { - REPORT("array[%d]=%d != %d=UseKey(%d)\n", - int(k), int(array[k]), int(UseKey(k)), int(k)); - ASSERT(false,NULL); - } - } -} - -//! Test travering the tabel with a parallel range -void ParallelTraverseTable( MyTable& table, size_t n, size_t expected_size ) { - REMARK("testing parallel traversal\n"); - ASSERT( table.size()==expected_size, NULL ); - AtomicByte* array = new AtomicByte[n]; - - memset( array, 0, n*sizeof(AtomicByte) ); - MyTable::range_type r = table.range(10); - tbb::parallel_for( r, ParallelTraverseBody<MyTable::range_type>( array, n )); - Check( array, n, expected_size ); - - const MyTable& const_table = table; - memset( array, 0, n*sizeof(AtomicByte) ); - MyTable::const_range_type cr = const_table.range(10); - tbb::parallel_for( cr, ParallelTraverseBody<MyTable::const_range_type>( array, n )); - Check( array, n, expected_size ); - - delete[] array; -} - -void TestInsertFindErase( int nthread ) { - int n=250000; - - // compute m = number of unique keys - int m = 0; - for( int i=0; i<n; ++i ) - m += UseKey(i); - - MyAllocator a; a.items_freed = a.frees = 100; - ASSERT( MyDataCount==0, NULL ); - MyTable table(a); - TraverseTable(table,n,0); - ParallelTraverseTable(table,n,0); - CheckAllocator(table, 0, 100); - - int expected_allocs = 0, expected_frees = 100; -#if __TBB_INITIALIZER_LISTS_PRESENT - for ( int i = 0; i < 2; ++i ) { - if ( i==0 ) - DoConcurrentOperations<InsertInitList, MyTable>( table, n, "insert(std::initializer_list)", nthread ); - else -#endif - DoConcurrentOperations<Insert, MyTable>( table, n, "insert", nthread ); - ASSERT( MyDataCount == m, NULL ); - TraverseTable( table, n, m ); - ParallelTraverseTable( table, n, m ); - expected_allocs += m; - CheckAllocator( table, expected_allocs, expected_frees ); - - DoConcurrentOperations<Find, MyTable>( table, n, "find", nthread ); - ASSERT( MyDataCount == m, NULL ); - CheckAllocator( table, expected_allocs, expected_frees ); - - DoConcurrentOperations<FindConst, MyTable>( table, n, "find(const)", nthread ); - ASSERT( MyDataCount == m, NULL ); - CheckAllocator( table, expected_allocs, expected_frees ); - - EraseCount = 0; - DoConcurrentOperations<Erase, MyTable>( table, n, "erase", nthread ); - ASSERT( EraseCount == m, NULL ); - ASSERT( MyDataCount == 0, NULL ); - TraverseTable( table, n, 0 ); - expected_frees += m; - CheckAllocator( table, expected_allocs, expected_frees ); - - bad_hashing = true; - table.clear(); - bad_hashing = false; -#if __TBB_INITIALIZER_LISTS_PRESENT - } -#endif - - if(nthread > 1) { - YourTable ie_table; - for( int i=0; i<IE_SIZE; ++i ) - InsertEraseCount[i] = 0; - DoConcurrentOperations<InsertErase,YourTable>(ie_table,n/2,"insert_erase",nthread); - for( int i=0; i<IE_SIZE; ++i ) - ASSERT( InsertEraseCount[i]==ie_table.count(MyKey::make(i)), NULL ); - - DoConcurrentOperations<InnerInsert,YourTable>(ie_table,2000,"inner insert",nthread); - Harness::SpinBarrier barrier(nthread); - REMARK("testing erase on fake exclusive accessor\n"); - NativeParallelFor( nthread, FakeExclusive(barrier, ie_table)); - } -} - -volatile int Counter; - -class AddToTable: NoAssign { - MyTable& my_table; - const int my_nthread; - const int my_m; -public: - AddToTable( MyTable& table, int nthread, int m ) : my_table(table), my_nthread(nthread), my_m(m) {} - void operator()( int ) const { - for( int i=0; i<my_m; ++i ) { - // Busy wait to synchronize threads - int j = 0; - while( Counter<i ) { - if( ++j==1000000 ) { - // If Counter<i after a million iterations, then we almost surely have - // more logical threads than physical threads, and should yield in - // order to let suspended logical threads make progress. - j = 0; - __TBB_Yield(); - } - } - // Now all threads attempt to simultaneously insert a key. - int k; - { - MyTable::accessor a; - MyKey key = MyKey::make(i); - if( my_table.insert( a, key ) ) - a->second.set_value( 1 ); - else - a->second.set_value( a->second.value_of()+1 ); - k = a->second.value_of(); - } - if( k==my_nthread ) - Counter=i+1; - } - } -}; - -class RemoveFromTable: NoAssign { - MyTable& my_table; - const int my_nthread; - const int my_m; -public: - RemoveFromTable( MyTable& table, int nthread, int m ) : my_table(table), my_nthread(nthread), my_m(m) {} - void operator()(int) const { - for( int i=0; i<my_m; ++i ) { - bool b; - if(i&4) { - if(i&8) { - MyTable::const_accessor a; - b = my_table.find( a, MyKey::make(i) ) && my_table.erase( a ); - } else { - MyTable::accessor a; - b = my_table.find( a, MyKey::make(i) ) && my_table.erase( a ); - } - } else - b = my_table.erase( MyKey::make(i) ); - if( b ) ++EraseCount; - } - } -}; - -//! Test for memory leak in concurrent_hash_map (TR #153). -void TestConcurrency( int nthread ) { - REMARK("testing multiple insertions/deletions of same key with %d threads\n", nthread); - { - ASSERT( MyDataCount==0, NULL ); - MyTable table; - const int m = 1000; - Counter = 0; - tbb::tick_count t0 = tbb::tick_count::now(); - NativeParallelFor( nthread, AddToTable(table,nthread,m) ); - tbb::tick_count t1 = tbb::tick_count::now(); - REMARK("time for %u insertions = %g with %d threads\n",unsigned(MyDataCount),(t1-t0).seconds(),nthread); - ASSERT( MyDataCount==m, "memory leak detected" ); - - EraseCount = 0; - t0 = tbb::tick_count::now(); - NativeParallelFor( nthread, RemoveFromTable(table,nthread,m) ); - t1 = tbb::tick_count::now(); - REMARK("time for %u deletions = %g with %d threads\n",unsigned(EraseCount),(t1-t0).seconds(),nthread); - ASSERT( MyDataCount==0, "memory leak detected" ); - ASSERT( EraseCount==m, "return value of erase() is broken" ); - - CheckAllocator(table, m, m, /*exact*/nthread <= 1); - } - ASSERT( MyDataCount==0, "memory leak detected" ); -} - -void TestTypes() { - AssertSameType( static_cast<MyTable::key_type*>(0), static_cast<MyKey*>(0) ); - AssertSameType( static_cast<MyTable::mapped_type*>(0), static_cast<MyData*>(0) ); - AssertSameType( static_cast<MyTable::value_type*>(0), static_cast<std::pair<const MyKey,MyData>*>(0) ); - AssertSameType( static_cast<MyTable::accessor::value_type*>(0), static_cast<MyTable::value_type*>(0) ); - AssertSameType( static_cast<MyTable::const_accessor::value_type*>(0), static_cast<const MyTable::value_type*>(0) ); - AssertSameType( static_cast<MyTable::size_type*>(0), static_cast<size_t*>(0) ); - AssertSameType( static_cast<MyTable::difference_type*>(0), static_cast<ptrdiff_t*>(0) ); -} - -template<typename Iterator, typename T> -void TestIteratorTraits() { - AssertSameType( static_cast<typename Iterator::difference_type*>(0), static_cast<ptrdiff_t*>(0) ); - AssertSameType( static_cast<typename Iterator::value_type*>(0), static_cast<T*>(0) ); - AssertSameType( static_cast<typename Iterator::pointer*>(0), static_cast<T**>(0) ); - AssertSameType( static_cast<typename Iterator::iterator_category*>(0), static_cast<std::forward_iterator_tag*>(0) ); - T x; - typename Iterator::reference xr = x; - typename Iterator::pointer xp = &x; - ASSERT( &xr==xp, NULL ); -} - -template<typename Iterator1, typename Iterator2> -void TestIteratorAssignment( Iterator2 j ) { - Iterator1 i(j), k; - ASSERT( i==j, NULL ); ASSERT( !(i!=j), NULL ); - k = j; - ASSERT( k==j, NULL ); ASSERT( !(k!=j), NULL ); -} - -template<typename Range1, typename Range2> -void TestRangeAssignment( Range2 r2 ) { - Range1 r1(r2); r1 = r2; -} -//------------------------------------------------------------------------ -// Test for copy constructor and assignment -//------------------------------------------------------------------------ - -template<typename MyTable> -static void FillTable( MyTable& x, int n ) { - for( int i=1; i<=n; ++i ) { - MyKey key( MyKey::make(-i) ); // hash values must not be specified in direct order - typename MyTable::accessor a; - bool b = x.insert(a,key); - ASSERT(b, NULL); - a->second.set_value( i*i ); - } -} - -template<typename MyTable> -static void CheckTable( const MyTable& x, int n ) { - ASSERT( x.size()==size_t(n), "table is different size than expected" ); - ASSERT( x.empty()==(n==0), NULL ); - ASSERT( x.size()<=x.max_size(), NULL ); - for( int i=1; i<=n; ++i ) { - MyKey key( MyKey::make(-i) ); - typename MyTable::const_accessor a; - bool b = x.find(a,key); - ASSERT( b, NULL ); - ASSERT( a->second.value_of()==i*i, NULL ); - } - int count = 0; - int key_sum = 0; - for( typename MyTable::const_iterator i(x.begin()); i!=x.end(); ++i ) { - ++count; - key_sum += -i->first.value_of(); - } - ASSERT( count==n, NULL ); - ASSERT( key_sum==n*(n+1)/2, NULL ); -} - -static void TestCopy() { - REMARK("testing copy\n"); - MyTable t1; - for( int i=0; i<10000; i=(i<100 ? i+1 : i*3) ) { - MyDataCount = 0; - - FillTable(t1,i); - // Do not call CheckTable(t1,i) before copying, it enforces rehashing - - MyTable t2(t1); - // Check that copy constructor did not mangle source table. - CheckTable(t1,i); - swap(t1, t2); - CheckTable(t1,i); - ASSERT( !(t1 != t2), NULL ); - - // Clear original table - t2.clear(); - swap(t2, t1); - CheckTable(t1,0); - - // Verify that copy of t1 is correct, even after t1 is cleared. - CheckTable(t2,i); - t2.clear(); - t1.swap( t2 ); - CheckTable(t1,0); - CheckTable(t2,0); - ASSERT( MyDataCount==0, "data leak?" ); - } -} - -void TestAssignment() { - REMARK("testing assignment\n"); - for( int i=0; i<1000; i=(i<30 ? i+1 : i*5) ) { - for( int j=0; j<1000; j=(j<30 ? j+1 : j*7) ) { - MyTable t1; - MyTable t2; - FillTable(t1,i); - FillTable(t2,j); - ASSERT( (t1 == t2) == (i == j), NULL ); - CheckTable(t2,j); - - MyTable& tref = t2=t1; - ASSERT( &tref==&t2, NULL ); - ASSERT( t1 == t2, NULL ); - CheckTable(t1,i); - CheckTable(t2,i); - - t1.clear(); - CheckTable(t1,0); - CheckTable(t2,i); - ASSERT( MyDataCount==i, "data leak?" ); - - t2.clear(); - CheckTable(t1,0); - CheckTable(t2,0); - ASSERT( MyDataCount==0, "data leak?" ); - } - } -} - -void TestIteratorsAndRanges() { - REMARK("testing iterators compliance\n"); - TestIteratorTraits<MyTable::iterator,MyTable::value_type>(); - TestIteratorTraits<MyTable::const_iterator,const MyTable::value_type>(); - - MyTable v; - MyTable const &u = v; - - TestIteratorAssignment<MyTable::const_iterator>( u.begin() ); - TestIteratorAssignment<MyTable::const_iterator>( v.begin() ); - TestIteratorAssignment<MyTable::iterator>( v.begin() ); - // doesn't compile as expected: TestIteratorAssignment<typename V::iterator>( u.begin() ); - - // check for non-existing - ASSERT(v.equal_range(MyKey::make(-1)) == std::make_pair(v.end(), v.end()), NULL); - ASSERT(u.equal_range(MyKey::make(-1)) == std::make_pair(u.end(), u.end()), NULL); - - REMARK("testing ranges compliance\n"); - TestRangeAssignment<MyTable::const_range_type>( u.range() ); - TestRangeAssignment<MyTable::const_range_type>( v.range() ); - TestRangeAssignment<MyTable::range_type>( v.range() ); - // doesn't compile as expected: TestRangeAssignment<typename V::range_type>( u.range() ); - - REMARK("testing construction and insertion from iterators range\n"); - FillTable( v, 1000 ); - MyTable2 t(v.begin(), v.end()); - v.rehash(); - CheckTable(t, 1000); - t.insert(v.begin(), v.end()); // do nothing - CheckTable(t, 1000); - t.clear(); - t.insert(v.begin(), v.end()); // restore - CheckTable(t, 1000); - - REMARK("testing comparison\n"); - typedef tbb::concurrent_hash_map<MyKey,MyData2,YourHashCompare,MyAllocator> YourTable1; - typedef tbb::concurrent_hash_map<MyKey,MyData2,YourHashCompare> YourTable2; - YourTable1 t1; - FillTable( t1, 10 ); - CheckTable(t1, 10 ); - YourTable2 t2(t1.begin(), t1.end()); - MyKey key( MyKey::make(-5) ); MyData2 data; - ASSERT(t2.erase(key), NULL); - YourTable2::accessor a; - ASSERT(t2.insert(a, key), NULL); - data.set_value(0); a->second = data; - ASSERT( t1 != t2, NULL); - data.set_value(5*5); a->second = data; - ASSERT( t1 == t2, NULL); -} - -void TestRehash() { - REMARK("testing rehashing\n"); - MyTable w; - w.insert( std::make_pair(MyKey::make(-5), MyData()) ); - w.rehash(); // without this, assertion will fail - MyTable::iterator it = w.begin(); - int i = 0; // check for non-rehashed buckets - for( ; it != w.end(); i++ ) - w.count( (it++)->first ); - ASSERT( i == 1, NULL ); - for( i=0; i<1000; i=(i<29 ? i+1 : i*2) ) { - for( int j=max(256+i, i*2); j<10000; j*=3 ) { - MyTable v; - FillTable( v, i ); - ASSERT(int(v.size()) == i, NULL); - ASSERT(int(v.bucket_count()) <= j, NULL); - v.rehash( j ); - ASSERT(int(v.bucket_count()) >= j, NULL); - CheckTable( v, i ); - } - } -} - -#if TBB_USE_EXCEPTIONS -void TestExceptions() { - typedef local_counting_allocator<tbb::tbb_allocator<MyData2> > allocator_t; - typedef tbb::concurrent_hash_map<MyKey,MyData2,MyHashCompare,allocator_t> ThrowingTable; - enum methods { - zero_method = 0, - ctor_copy, op_assign, op_insert, - all_methods - }; - REMARK("testing exception-safety guarantees\n"); - ThrowingTable src; - FillTable( src, 1000 ); - ASSERT( MyDataCount==1000, NULL ); - - try { - for(int t = 0; t < 2; t++) // exception type - for(int m = zero_method+1; m < all_methods; m++) - { - allocator_t a; - if(t) MyDataCountLimit = 101; - else a.set_limits(101); - ThrowingTable victim(a); - MyDataCount = 0; - - try { - switch(m) { - case ctor_copy: { - ThrowingTable acopy(src, a); - } break; - case op_assign: { - victim = src; - } break; - case op_insert: { - FillTable( victim, 1000 ); - } break; - default:; - } - ASSERT(false, "should throw an exception"); - } catch(std::bad_alloc &e) { - MyDataCountLimit = 0; - size_t size = victim.size(); - switch(m) { - case op_assign: - ASSERT( MyDataCount==100, "data leak?" ); - ASSERT( size>=100, NULL ); - CheckAllocator(victim, 100+t, t); - case ctor_copy: - CheckTable(src, 1000); - break; - case op_insert: - ASSERT( size==size_t(100-t), NULL ); - ASSERT( MyDataCount==100-t, "data leak?" ); - CheckTable(victim, 100-t); - CheckAllocator(victim, 100, t); - break; - - default:; // nothing to check here - } - REMARK("Exception %d: %s\t- ok ()\n", m, e.what()); - } - catch ( ... ) { - ASSERT ( __TBB_EXCEPTION_TYPE_INFO_BROKEN, "Unrecognized exception" ); - } - } - } catch(...) { - ASSERT(false, "unexpected exception"); - } - src.clear(); MyDataCount = 0; -} -#endif /* TBB_USE_EXCEPTIONS */ - - -#if __TBB_INITIALIZER_LISTS_PRESENT -#include "test_initializer_list.h" - -struct test_insert { - template<typename container_type, typename element_type> - static void do_test( std::initializer_list<element_type> il, container_type const& expected ) { - container_type vd; - vd.insert( il ); - ASSERT( vd == expected, "inserting with an initializer list failed" ); - } -}; - -void TestInitList(){ - using namespace initializer_list_support_tests; - REMARK("testing initializer_list methods \n"); - - typedef tbb::concurrent_hash_map<int,int> ch_map_type; - std::initializer_list<ch_map_type::value_type> pairs_il = {{1,1},{2,2},{3,3},{4,4},{5,5}}; - - TestInitListSupportWithoutAssign<ch_map_type, test_insert>( pairs_il ); - TestInitListSupportWithoutAssign<ch_map_type, test_insert>( {} ); -} -#endif //if __TBB_INITIALIZER_LISTS_PRESENT - -#if __TBB_RANGE_BASED_FOR_PRESENT -#include "test_range_based_for.h" - -void TestRangeBasedFor(){ - using namespace range_based_for_support_tests; - - REMARK("testing range based for loop compatibility \n"); - typedef tbb::concurrent_hash_map<int,int> ch_map; - ch_map a_ch_map; - - const int sequence_length = 100; - for (int i = 1; i <= sequence_length; ++i){ - a_ch_map.insert(ch_map::value_type(i,i)); - } - - ASSERT( range_based_for_accumulate(a_ch_map, pair_second_summer(), 0) == gauss_summ_of_int_sequence(sequence_length), "incorrect accumulated value generated via range based for ?"); -} -#endif //if __TBB_RANGE_BASED_FOR_PRESENT - -#include "harness_defs.h" - -// The helper to run a test only when a default construction is present. -template <bool default_construction_present> struct do_default_construction_test { - template<typename FuncType> void operator() ( FuncType func ) const { func(); } -}; -template <> struct do_default_construction_test<false> { - template<typename FuncType> void operator()( FuncType ) const {} -}; - -template <typename Table> -class test_insert_by_key : NoAssign { - typedef typename Table::value_type value_type; - Table &my_c; - const value_type &my_value; -public: - test_insert_by_key( Table &c, const value_type &value ) : my_c(c), my_value(value) {} - void operator()() const { - { - typename Table::accessor a; - ASSERT( my_c.insert( a, my_value.first ), NULL ); - ASSERT( Harness::IsEqual()(a->first, my_value.first), NULL ); - a->second = my_value.second; - } { - typename Table::const_accessor ca; - ASSERT( !my_c.insert( ca, my_value.first ), NULL ); - ASSERT( Harness::IsEqual()(ca->first, my_value.first), NULL); - ASSERT( Harness::IsEqual()(ca->second, my_value.second), NULL); - } - } -}; - -#include <vector> -#include <list> -#include <algorithm> - -template <typename Table, typename Iterator, typename Range = typename Table::range_type> -class test_range : NoAssign { - typedef typename Table::value_type value_type; - Table &my_c; - const std::list<value_type> &my_lst; - std::vector< tbb::atomic<bool> >& my_marks; -public: - test_range( Table &c, const std::list<value_type> &lst, std::vector< tbb::atomic<bool> > &marks ) : my_c(c), my_lst(lst), my_marks(marks) { - std::fill( my_marks.begin(), my_marks.end(), false ); - } - void operator()( const Range &r ) const { do_test_range( r.begin(), r.end() ); } - void do_test_range( Iterator i, Iterator j ) const { - for ( Iterator it = i; it != j; ) { - Iterator it_prev = it++; - typename std::list<value_type>::const_iterator it2 = std::search( my_lst.begin(), my_lst.end(), it_prev, it, Harness::IsEqual() ); - ASSERT( it2 != my_lst.end(), NULL ); - typename std::list<value_type>::difference_type dist = std::distance( my_lst.begin(), it2 ); - ASSERT( !my_marks[dist], NULL ); - my_marks[dist] = true; - } - } -}; - -template <bool default_construction_present, typename Table> -class check_value : NoAssign { - typedef typename Table::const_iterator const_iterator; - typedef typename Table::iterator iterator; - typedef typename Table::size_type size_type; - Table &my_c; -public: - check_value( Table &c ) : my_c(c) {} - void operator()(const typename Table::value_type &value ) { - const Table &const_c = my_c; - ASSERT( my_c.count( value.first ) == 1, NULL ); - { // tests with a const accessor. - typename Table::const_accessor ca; - // find - ASSERT( my_c.find( ca, value.first ), NULL); - ASSERT( !ca.empty() , NULL); - ASSERT( Harness::IsEqual()(ca->first, value.first), NULL ); - ASSERT( Harness::IsEqual()(ca->second, value.second), NULL ); - // erase - ASSERT( my_c.erase( ca ), NULL ); - ASSERT( my_c.count( value.first ) == 0, NULL ); - // insert (pair) - ASSERT( my_c.insert( ca, value ), NULL); - ASSERT( Harness::IsEqual()(ca->first, value.first), NULL ); - ASSERT( Harness::IsEqual()(ca->second, value.second), NULL ); - } { // tests with a non-const accessor. - typename Table::accessor a; - // find - ASSERT( my_c.find( a, value.first ), NULL); - ASSERT( !a.empty() , NULL); - ASSERT( Harness::IsEqual()(a->first, value.first), NULL ); - ASSERT( Harness::IsEqual()(a->second, value.second), NULL ); - // erase - ASSERT( my_c.erase( a ), NULL ); - ASSERT( my_c.count( value.first ) == 0, NULL ); - // insert - ASSERT( my_c.insert( a, value ), NULL); - ASSERT( Harness::IsEqual()(a->first, value.first), NULL ); - ASSERT( Harness::IsEqual()(a->second, value.second), NULL ); - } - // erase by key - ASSERT( my_c.erase( value.first ), NULL ); - ASSERT( my_c.count( value.first ) == 0, NULL ); - do_default_construction_test<default_construction_present>()(test_insert_by_key<Table>( my_c, value )); - // insert by value - ASSERT( my_c.insert( value ) != default_construction_present, NULL ); - // equal_range - std::pair<iterator,iterator> r1 = my_c.equal_range( value.first ); - iterator r1_first_prev = r1.first++; - ASSERT( Harness::IsEqual()( *r1_first_prev, value ) && Harness::IsEqual()( r1.first, r1.second ), NULL ); - std::pair<const_iterator,const_iterator> r2 = const_c.equal_range( value.first ); - const_iterator r2_first_prev = r2.first++; - ASSERT( Harness::IsEqual()( *r2_first_prev, value ) && Harness::IsEqual()( r2.first, r2.second ), NULL ); - } -}; - -#include "tbb/task_scheduler_init.h" - -template <typename Value, typename U = Value> -struct CompareTables { - template <typename T> - static bool IsEqual( const T& t1, const T& t2 ) { - return (t1 == t2) && !(t1 != t2); - } -}; - -#if __TBB_CPP11_SMART_POINTERS_PRESENT -template <typename U> -struct CompareTables< std::pair<const std::weak_ptr<U>, std::weak_ptr<U> > > { - template <typename T> - static bool IsEqual( const T&, const T& ) { - /* do nothing for std::weak_ptr */ - return true; - } -}; -#endif /* __TBB_CPP11_SMART_POINTERS_PRESENT */ - -template <bool default_construction_present, typename Table> -void Examine( Table c, const std::list<typename Table::value_type> &lst) { - typedef const Table const_table; - typedef typename Table::const_iterator const_iterator; - typedef typename Table::iterator iterator; - typedef typename Table::value_type value_type; - typedef typename Table::size_type size_type; - - ASSERT( !c.empty(), NULL ); - ASSERT( c.size() == lst.size(), NULL ); - ASSERT( c.max_size() >= c.size(), NULL ); - - const check_value<default_construction_present,Table> cv(c); - std::for_each( lst.begin(), lst.end(), cv ); - - std::vector< tbb::atomic<bool> > marks( lst.size() ); - - test_range<Table,iterator>( c, lst, marks ).do_test_range( c.begin(), c.end() ); - ASSERT( std::find( marks.begin(), marks.end(), false ) == marks.end(), NULL ); - - test_range<const_table,const_iterator>( c, lst, marks ).do_test_range( c.begin(), c.end() ); - ASSERT( std::find( marks.begin(), marks.end(), false ) == marks.end(), NULL ); - - tbb::task_scheduler_init init; - - typedef typename Table::range_type range_type; - tbb::parallel_for( c.range(), test_range<Table,typename range_type::iterator,range_type>( c, lst, marks ) ); - ASSERT( std::find( marks.begin(), marks.end(), false ) == marks.end(), NULL ); - - const_table const_c = c; - ASSERT( CompareTables<value_type>::IsEqual( c, const_c ), NULL ); - - typedef typename const_table::const_range_type const_range_type; - tbb::parallel_for( c.range(), test_range<const_table,typename const_range_type::iterator,const_range_type>( const_c, lst, marks ) ); - ASSERT( std::find( marks.begin(), marks.end(), false ) == marks.end(), NULL ); - - const size_type new_bucket_count = 2*c.bucket_count(); - c.rehash( new_bucket_count ); - ASSERT( c.bucket_count() >= new_bucket_count, NULL ); - - Table c2; - typename std::list<value_type>::const_iterator begin5 = lst.begin(); - std::advance( begin5, 5 ); - c2.insert( lst.begin(), begin5 ); - std::for_each( lst.begin(), begin5, check_value<default_construction_present, Table>( c2 ) ); - - c2.swap( c ); - ASSERT( CompareTables<value_type>::IsEqual( c2, const_c ), NULL ); - ASSERT( c.size() == 5, NULL ); - std::for_each( lst.begin(), lst.end(), check_value<default_construction_present,Table>(c2) ); - - tbb::swap( c, c2 ); - ASSERT( CompareTables<value_type>::IsEqual( c, const_c ), NULL ); - ASSERT( c2.size() == 5, NULL ); - - c2.clear(); - ASSERT( CompareTables<value_type>::IsEqual( c2, Table() ), NULL ); - - typename Table::allocator_type a = c.get_allocator(); - value_type *ptr = a.allocate(1); - ASSERT( ptr, NULL ); - a.deallocate( ptr, 1 ); -} - -template <bool default_construction_present, typename Value> -void TypeTester( const std::list<Value> &lst ) { - __TBB_ASSERT( lst.size() >= 5, "Array should have at least 5 elements" ); - typedef typename Value::first_type first_type; - typedef typename Value::second_type second_type; - typedef tbb::concurrent_hash_map<first_type,second_type> ch_map; - // Construct an empty hash map. - ch_map c1; - c1.insert( lst.begin(), lst.end() ); - Examine<default_construction_present>( c1, lst ); -#if __TBB_INITIALIZER_LISTS_PRESENT && !__TBB_CPP11_INIT_LIST_TEMP_OBJS_LIFETIME_BROKEN - // Constructor from initializer_list. - typename std::list<Value>::const_iterator it = lst.begin(); - ch_map c2( {*it++, *it++, *it++} ); - c2.insert( it, lst.end() ); - Examine<default_construction_present>( c2, lst ); -#endif - // Copying constructor. - ch_map c3(c1); - Examine<default_construction_present>( c3, lst ); - // Construct with non-default allocator - typedef tbb::concurrent_hash_map< first_type,second_type,tbb::tbb_hash_compare<first_type>,debug_allocator<Value> > ch_map_debug_alloc; - ch_map_debug_alloc c4; - c4.insert( lst.begin(), lst.end() ); - Examine<default_construction_present>( c4, lst ); - // Copying constructor for vector with different allocator type. - ch_map_debug_alloc c5(c4); - Examine<default_construction_present>( c5, lst ); - // Construction empty table with n preallocated buckets. - ch_map c6( lst.size() ); - c6.insert( lst.begin(), lst.end() ); - Examine<default_construction_present>( c6, lst ); - ch_map_debug_alloc c7( lst.size() ); - c7.insert( lst.begin(), lst.end() ); - Examine<default_construction_present>( c7, lst ); - // Construction with copying iteration range and given allocator instance. - ch_map c8( c1.begin(), c1.end() ); - Examine<default_construction_present>( c8, lst ); - debug_allocator<Value> allocator; - ch_map_debug_alloc c9( lst.begin(), lst.end(), allocator ); - Examine<default_construction_present>( c9, lst ); -} - -#if __TBB_CPP11_SMART_POINTERS_PRESENT -namespace tbb { - template<> struct tbb_hash_compare< const std::shared_ptr<int> > { - static size_t hash( const std::shared_ptr<int>& ptr ) { return static_cast<size_t>( *ptr ) * interface5::internal::hash_multiplier; } - static bool equal( const std::shared_ptr<int>& ptr1, const std::shared_ptr<int>& ptr2 ) { return ptr1 == ptr2; } - }; - template<> struct tbb_hash_compare< const std::weak_ptr<int> > { - static size_t hash( const std::weak_ptr<int>& ptr ) { return static_cast<size_t>( *ptr.lock() ) * interface5::internal::hash_multiplier; } - static bool equal( const std::weak_ptr<int>& ptr1, const std::weak_ptr<int>& ptr2 ) { return ptr1.lock() == ptr2.lock(); } - }; -} -#endif /* __TBB_CPP11_SMART_POINTERS_PRESENT */ - -void TestCPP11Types() { - const int NUMBER = 10; - - typedef std::pair<const int, int> int_int_t; - std::list<int_int_t> arrIntInt; - for ( int i=0; i<NUMBER; ++i ) arrIntInt.push_back( int_int_t(i, NUMBER-i) ); - TypeTester</*default_construction_present = */true>( arrIntInt ); - -#if __TBB_CPP11_REFERENCE_WRAPPER_PRESENT - typedef std::pair<const std::reference_wrapper<const int>, int> ref_int_t; - std::list<ref_int_t> arrRefInt; - for ( std::list<int_int_t>::iterator it = arrIntInt.begin(); it != arrIntInt.end(); ++it ) - arrRefInt.push_back( ref_int_t( it->first, it->second ) ); - TypeTester</*default_construction_present = */true>( arrRefInt ); - - typedef std::pair< const int, std::reference_wrapper<int> > int_ref_t; - std::list<int_ref_t> arrIntRef; - for ( std::list<int_int_t>::iterator it = arrIntInt.begin(); it != arrIntInt.end(); ++it ) - arrIntRef.push_back( int_ref_t( it->first, it->second ) ); - TypeTester</*default_construction_present = */false>( arrIntRef ); -#else - REPORT("Known issue: C++11 reference wrapper tests are skipped.\n"); -#endif /* __TBB_CPP11_REFERENCE_WRAPPER_PRESENT */ - - typedef std::pair< const int, tbb::atomic<int> > int_tbb_t; - std::list<int_tbb_t> arrIntTbb; - for ( int i=0; i<NUMBER; ++i ) { - tbb::atomic<int> b; - b = NUMBER-i; - arrIntTbb.push_back( int_tbb_t(i, b) ); - } - TypeTester</*default_construction_present = */true>( arrIntTbb ); - -#if __TBB_CPP11_SMART_POINTERS_PRESENT - typedef std::pair< const std::shared_ptr<int>, std::shared_ptr<int> > shr_shr_t; - std::list<shr_shr_t> arrShrShr; - for ( int i=0; i<NUMBER; ++i ) arrShrShr.push_back( shr_shr_t( std::make_shared<int>(i), std::make_shared<int>(NUMBER-i) ) ); - TypeTester< /*default_construction_present = */true>( arrShrShr ); - - typedef std::pair< const std::weak_ptr<int>, std::weak_ptr<int> > wk_wk_t; - std::list< wk_wk_t > arrWkWk; - std::copy( arrShrShr.begin(), arrShrShr.end(), std::back_inserter(arrWkWk) ); - TypeTester< /*default_construction_present = */true>( arrWkWk ); -#else - REPORT("Known issue: C++11 smart pointer tests are skipped.\n"); -#endif /* __TBB_CPP11_SMART_POINTERS_PRESENT */ -} - -#if __TBB_CPP11_RVALUE_REF_PRESENT -#include "test_container_move_support.h" - -struct hash_map_move_traits : default_container_traits { - enum{ expected_number_of_items_to_allocate_for_steal_move = 0 }; - - template<typename T> - struct hash_compare { - bool equal( const T& lhs, const T& rhs ) const { - return lhs==rhs; - } - size_t hash( const T& k ) const { - return tbb::tbb_hasher(k); - } - }; - template<typename element_type, typename allocator_type> - struct apply { - typedef tbb::concurrent_hash_map<element_type, element_type, hash_compare<element_type>, allocator_type > type; - }; - - typedef FooPairIterator init_iterator_type; - template<typename hash_map_type, typename iterator> - static bool equal(hash_map_type const& c, iterator begin, iterator end){ - bool equal_sizes = ( static_cast<size_t>(std::distance(begin, end)) == c.size() ); - if (!equal_sizes) - return false; - - for (iterator it = begin; it != end; ++it ){ - if (c.count( (*it).first) == 0){ - return false; - } - } - return true; - } -}; - -void TestMoveSupport(){ - TestMoveConstructor<hash_map_move_traits>(); - TestConstructorWithMoveIterators<hash_map_move_traits>(); - TestMoveAssignOperator<hash_map_move_traits>(); -#if TBB_USE_EXCEPTIONS - TestExceptionSafetyGuaranteesMoveConstructorWithUnEqualAllocatorMemoryFailure<hash_map_move_traits>(); - TestExceptionSafetyGuaranteesMoveConstructorWithUnEqualAllocatorExceptionInElementCtor<hash_map_move_traits>(); -#else - REPORT("Known issue: exception safety tests for C++11 move semantics support are skipped.\n"); -#endif //TBB_USE_EXCEPTIONS -} -#else -void TestMoveSupport(){ - REPORT("Known issue: tests for C++11 move semantics support are skipped.\n"); -} -#endif //__TBB_CPP11_RVALUE_REF_PRESENT -//------------------------------------------------------------------------ -// Test driver -//------------------------------------------------------------------------ -int TestMain () { - if( MinThread<0 ) { - REPORT("ERROR: must use at least one thread\n"); - exit(1); - } - if( MaxThread<2 ) MaxThread=2; - - // Do serial tests - TestTypes(); - TestCopy(); - TestRehash(); - TestAssignment(); - TestIteratorsAndRanges(); -#if __TBB_INITIALIZER_LISTS_PRESENT - TestInitList(); -#endif //__TBB_INITIALIZER_LISTS_PRESENT - -#if __TBB_RANGE_BASED_FOR_PRESENT - TestRangeBasedFor(); -#endif //#if __TBB_RANGE_BASED_FOR_PRESENT - -#if TBB_USE_EXCEPTIONS - TestExceptions(); -#endif /* TBB_USE_EXCEPTIONS */ - - TestMoveSupport(); - - // Do concurrency tests. - for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) { - tbb::task_scheduler_init init( nthread ); - TestInsertFindErase( nthread ); - TestConcurrency( nthread ); - } - // check linking - if(bad_hashing) { //should be false - tbb::internal::runtime_warning("none\nERROR: it must not be executed"); - } - - TestCPP11Types(); - - return Harness::Done; -} diff --git a/src/tbb/src/test/test_concurrent_lru_cache.cpp b/src/tbb/src/test/test_concurrent_lru_cache.cpp deleted file mode 100644 index 9a2ff96fc..000000000 --- a/src/tbb/src/test/test_concurrent_lru_cache.cpp +++ /dev/null @@ -1,404 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if _MSC_VER - #pragma warning (disable: 4503) // Suppress "decorated name length exceeded, name was truncated" warning -#endif - -#include "harness_test_cases_framework.h" -#include "harness.h" -#include "harness_barrier.h" - -#include <utility> - -#ifdef TEST_COARSE_GRAINED_LOCK_IMPLEMENTATION - #include "../perf/coarse_grained_raii_lru_cache.h" - #define selected_raii_lru_cache_impl coarse_grained_raii_lru_cache -#else - #define TBB_PREVIEW_CONCURRENT_LRU_CACHE 1 - #include "tbb/concurrent_lru_cache.h" - #define selected_raii_lru_cache_impl tbb::concurrent_lru_cache -#endif - -#include "tbb/task_scheduler_init.h" - -namespace helpers{ - // Busy work and calibration helpers - unsigned int one_us_iters = 345; // default value - - // if user wants to calibrate to microseconds on particular machine, call - // this at beginning of program; sets one_us_iters to number of iters to - // busy_wait for approx. 1 us -// void calibrate_busy_wait() { -// tbb::tick_count t0, t1; -// -// t0 = tbb::tick_count::now(); -// for (volatile unsigned int i=0; i<1000000; ++i) continue; -// t1 = tbb::tick_count::now(); -// -// one_us_iters = (unsigned int)((1000000.0/(t1-t0).seconds())*0.000001); -// printf("one_us_iters: %d\n", one_us_iters); -// } - void busy_wait(int us) - { - unsigned int iter = us*one_us_iters; - for (volatile unsigned int i=0; i<iter; ++i) continue; - } -} -namespace helpers{ - template<class T> void ignore( const T& ) { } - //TODO: add test cases for prevent_optimizing_out function - template<typename type> - void prevent_optimizing_out(type volatile const& s){ - volatile const type* dummy = &s; - ignore(dummy); - } - - struct empty_fixture{}; - - template<typename argument_type> - struct native_for_concurrent_op_repeated:NoAssign{ - typedef void (*test_function_pointer_type)(argument_type&); - - argument_type& m_counter_ref; - test_function_pointer_type m_test_function_pointer_type; - std::size_t m_repeat_number; - native_for_concurrent_op_repeated(argument_type& counter_ref, test_function_pointer_type action, std::size_t repeat_number) - :m_counter_ref(counter_ref), m_test_function_pointer_type(action), m_repeat_number(repeat_number) - {} - template <typename ignored_parameter_type> - void operator()(ignored_parameter_type const&)const{ - for (size_t i=0; i<m_repeat_number;++i){ - m_test_function_pointer_type(m_counter_ref); - } - } - - }; - - template <typename counter_type = size_t> - struct object_instances_counting_type{ - counter_type * m_p_count; - object_instances_counting_type(): m_p_count (new counter_type){*m_p_count =1; } //to overcome absense of constructor in tbb::atomic - ~object_instances_counting_type(){ if (! --(*m_p_count)){delete(m_p_count);}} - object_instances_counting_type(object_instances_counting_type const& other): m_p_count(other.m_p_count){ - ++(*m_p_count); - } - object_instances_counting_type& operator=(object_instances_counting_type other){ - std::swap(this->m_p_count,other.m_p_count); - return *this; - } - size_t instances_count()const {return *m_p_count;} - }; - typedef object_instances_counting_type<> object_instances_counting_serial_type; - typedef object_instances_counting_type<tbb::atomic<std::size_t> > object_instances_counting_concurrent_type; - - namespace object_instances_counting_type_test_cases{ - namespace serial_tests{ - TEST_CASE_WITH_FIXTURE(test_object_instances_counting_type_creation,empty_fixture){ - ASSERT(object_instances_counting_serial_type().instances_count()==1,"newly created instance by definition has instances_count equal to 1"); - } - TEST_CASE_WITH_FIXTURE(test_object_instances_counting_type_copy,empty_fixture){ - object_instances_counting_serial_type source; - ASSERT(object_instances_counting_serial_type(source).instances_count()==2,"copy should increase ref count"); - } - TEST_CASE_WITH_FIXTURE(test_object_instances_counting_type_assignment,empty_fixture){ - object_instances_counting_serial_type source; - object_instances_counting_serial_type assigned; - assigned = source; - ASSERT(source.instances_count()==2,"assign should increase ref count"); - ASSERT(assigned.instances_count()==2,"assign should increase ref count"); - } - } - namespace concurrent_tests{ - typedef native_for_concurrent_op_repeated<object_instances_counting_concurrent_type> native_for_concurrent_op; - - struct native_for_single_op_repeated_fixture{ - object_instances_counting_concurrent_type source; - void run_native_for_and_assert_source_is_unique(native_for_concurrent_op::test_function_pointer_type operation,const char* msg){ - //TODO: refactor number of threads into separate fixture - const size_t number_of_threads = min(4,tbb::task_scheduler_init::default_num_threads()); - const size_t repeats_per_thread = 1000000; - - NativeParallelFor(number_of_threads , native_for_concurrent_op(source,operation,repeats_per_thread)); - ASSERT(source.instances_count()==1,msg); - } - - }; - TEST_CASE_WITH_FIXTURE(test_object_instances_counting_type_copy,native_for_single_op_repeated_fixture){ - struct _{ static void copy(object_instances_counting_concurrent_type& source){ - object_instances_counting_concurrent_type copy(source); - helpers::prevent_optimizing_out(copy); - }}; - run_native_for_and_assert_source_is_unique(&_::copy,"reference counting during copy construction/destruction is not thread safe ?"); - } - TEST_CASE_WITH_FIXTURE(test_object_instances_counting_type_assignment,native_for_single_op_repeated_fixture){ - struct _{ static void assign(object_instances_counting_concurrent_type& source){ - object_instances_counting_concurrent_type assigned; - assigned = source; - helpers::prevent_optimizing_out(assigned); - }}; - run_native_for_and_assert_source_is_unique(&_::assign,"reference counting during assigning/destruction is not thread safe ?"); - } - - } -} -} - -struct get_lru_cache_type{ - - template< typename parameter1, typename parameter2, typename parameter3=void> - struct apply{ - typedef selected_raii_lru_cache_impl<parameter1,parameter2,parameter3> type; - }; - template< typename parameter1, typename parameter2> - struct apply<parameter1,parameter2,void>{ - typedef selected_raii_lru_cache_impl<parameter1,parameter2> type; - }; - -}; - -namespace serial_tests{ - using namespace helpers; - namespace usability{ - namespace compilation_only{ - TEST_CASE_WITH_FIXTURE(test_creation_and_use_interface,empty_fixture){ - struct dummy_function{static int _(int key){return key;}}; - typedef get_lru_cache_type::apply<int,int>::type cache_type; - size_t number_of_lru_history_items = 8; - cache_type cache((&dummy_function::_),(number_of_lru_history_items)); - int dummy_key=0; - cache_type::handle h = cache[dummy_key]; - int value = h.value(); - (void)value; - } - } - namespace behaviour{ - namespace helpers{ - template <size_t id> struct tag{}; - template< typename tag, typename value_and_key_type> - struct call_counting_function{ - static int calls_count; - static value_and_key_type _(value_and_key_type key){ - ++calls_count; - return key; - } - }; - template< typename tag, typename value_and_key_type> - int call_counting_function<tag,value_and_key_type>::calls_count = 0; - - - } - TEST_CASE_WITH_FIXTURE(test_cache_returns_only_values_from_value_function,empty_fixture){ - struct dummy_function{static int _(int /*key*/){return 0xDEADBEEF;}}; - typedef get_lru_cache_type::apply<int,int>::type cache_type; - size_t number_of_lru_history_items = 8; - int dummy_key=1; - cache_type cache((&dummy_function::_),(number_of_lru_history_items)); - ASSERT(dummy_function::_(dummy_key)==cache[dummy_key].value(),"cache operator() must return only values obtained from value_function "); - } - - TEST_CASE_WITH_FIXTURE(test_value_function_called_only_on_cache_miss,empty_fixture){ - typedef helpers::tag<__LINE__> tag; - typedef helpers::call_counting_function<tag,int> function; - typedef get_lru_cache_type::apply<int,int>::type cache_type; - size_t number_of_lru_history_items = 8; - cache_type cache((&function::_),(number_of_lru_history_items)); - - int dummy_key=0; - cache[dummy_key]; - cache[dummy_key]; - ASSERT(function::calls_count==1,"value function should be called only on a cache miss"); - } - } - namespace helpers{ - using ::helpers::object_instances_counting_serial_type; - } - namespace helpers{ - template<typename value_type> - struct clonning_function:NoAssign{ - value_type& m_ref_original; - clonning_function(value_type& ref_original):m_ref_original(ref_original){} - template<typename key_type> - value_type operator()(key_type)const{ return m_ref_original;} - }; - } - struct instance_counting_fixture{ - static const size_t number_of_lru_history_items = 8; - - typedef helpers::clonning_function<helpers::object_instances_counting_serial_type> cloner_type; - typedef get_lru_cache_type::apply<size_t,helpers::object_instances_counting_serial_type,cloner_type>::type cache_type; - helpers::object_instances_counting_serial_type source; - cloner_type cloner; - cache_type cache; - - instance_counting_fixture():cloner((source)),cache(cloner,number_of_lru_history_items){} - }; - - TEST_CASE_WITH_FIXTURE(test_cache_stores_unused_objects,instance_counting_fixture){ - for (size_t i=0;i<number_of_lru_history_items;++i){ - cache[i]; - } - ASSERT(source.instances_count()> 1,"cache should store some unused objects "); - } - - TEST_CASE_WITH_FIXTURE(test_cache_stores_no_more_then_X_number_of_unused_objects,instance_counting_fixture){ - for (size_t i=0;i<number_of_lru_history_items+1;++i){ - cache[i]; - } - ASSERT(source.instances_count()== number_of_lru_history_items+1,"cache should respect number of stored unused objects to number passed in constructor"); - } - - namespace helpers{ - template< typename key_type, typename value_type> - struct map_searcher:NoAssign{ - typedef std::map<key_type,value_type> map_type; - map_type & m_map_ref; - map_searcher(map_type & map_ref): m_map_ref(map_ref) {} - value_type& operator()(key_type k){ - typename map_type::iterator it =m_map_ref.find(k); - if (it==m_map_ref.end()){ - it = m_map_ref.insert(it,std::make_pair(k,value_type())); - } - return it->second; - } - }; - } - - struct filled_instance_counting_fixture_with_external_map{ - static const size_t number_of_lru_history_items = 8; - - typedef helpers::map_searcher<size_t,helpers::object_instances_counting_serial_type> map_searcher_type; - typedef map_searcher_type::map_type objects_map_type; - typedef get_lru_cache_type::apply<size_t,helpers::object_instances_counting_serial_type,map_searcher_type>::type cache_type; - map_searcher_type::map_type objects_map; - cache_type cache; - filled_instance_counting_fixture_with_external_map():cache(map_searcher_type(objects_map),number_of_lru_history_items){} - bool is_evicted(size_t k){ - objects_map_type::iterator it =objects_map.find(k); - ASSERT(it!=objects_map.end(),"no value for key - error in test logic ?"); - return it->second.instances_count()==1; - } - void fill_up_cache(size_t lower_bound, size_t upper_bound){ - for (size_t i=lower_bound;i<upper_bound;++i){ - cache[i]; - } - } - }; - - TEST_CASE_WITH_FIXTURE(test_cache_should_evict_unused_objects_lru_order,filled_instance_counting_fixture_with_external_map){ - ASSERT(number_of_lru_history_items > 2,"incorrect test setup"); - fill_up_cache(0,number_of_lru_history_items); - //heat up first element - cache[0]; - //cause eviction - cache[number_of_lru_history_items]; - ASSERT(is_evicted(1) && !is_evicted(0),"cache should evict items in lru order"); - } - - TEST_CASE_WITH_FIXTURE(test_live_handler_object_prevents_item_from_eviction,filled_instance_counting_fixture_with_external_map){ - cache_type::handle h = cache[0]; - //cause eviction - fill_up_cache(1,number_of_lru_history_items+2); - ASSERT(is_evicted(1) && !is_evicted(0),"cache should not evict items in use"); - } - TEST_CASE_WITH_FIXTURE(test_live_handler_object_is_ref_counted,filled_instance_counting_fixture_with_external_map){ - cache_type::handle h = cache[0]; - { - cache_type::handle h1 = cache[0]; - } - //cause eviction - fill_up_cache(1,number_of_lru_history_items+2); - ASSERT(is_evicted(1) && !is_evicted(0),"cache should not evict items in use"); - } - } -} - - -namespace concurrency_tests{ - namespace helpers{ - using namespace ::helpers; - } - namespace helpers{ - //key_type must be convertible to array index - template< typename key_type, typename value_type, size_t array_size> - struct array_searcher:NoAssign{ - typedef value_type array_type[array_size]; - array_type const& m_array_ref; - array_searcher(array_type const& array_ref): m_array_ref(array_ref) {} - const value_type& operator()(key_type k)const{ - size_t index = k; - ASSERT(k < array_size,"incorrect test setup"); - return m_array_ref[index]; - } - }; - } - - struct filled_instance_counting_fixture_with_external_array{ - static const size_t number_of_lru_history_items = 8; - static const size_t array_size = 16*number_of_lru_history_items; - - typedef helpers::array_searcher<size_t,helpers::object_instances_counting_concurrent_type,array_size> array_searcher_type; - typedef array_searcher_type::array_type objects_array_type; - typedef get_lru_cache_type::apply<size_t,helpers::object_instances_counting_concurrent_type,array_searcher_type>::type cache_type; - array_searcher_type::array_type objects_array; - cache_type cache; - filled_instance_counting_fixture_with_external_array():cache(array_searcher_type(objects_array),number_of_lru_history_items){} - bool is_evicted(size_t k)const{ - return array_searcher_type(objects_array)(k).instances_count()==1; - } - void fill_up_cache(size_t lower_bound, size_t upper_bound){ - for (size_t i=lower_bound;i<upper_bound;++i){ - cache[i]; - } - } - size_t number_of_non_evicted_from_cache()const{ - size_t result=0; - for (size_t i=0; i<array_size; ++i){ - if (!this->is_evicted(i)){ - ++result; - } - } - return result; - } - }; - - - //TODO: make this more reproducible - //TODO: split this test case in two parts - TEST_CASE_WITH_FIXTURE(correctness_of_braces_and_handle_destructor,filled_instance_counting_fixture_with_external_array){ - typedef correctness_of_braces_and_handle_destructor self_type; - struct _{static void use_cache(self_type& tc){ - for (size_t i=0;i<array_size;++i){ - cache_type::handle h=tc.cache[i]; - helpers::prevent_optimizing_out(h.value()); - } - - }}; - static const size_t repeat_number = 2; - static const size_t number_of_threads = 4 * tbb::task_scheduler_init::default_num_threads(); //have 4x over subscription - static const size_t repeats_per_thread = 4; - - for (size_t i=0; i < repeat_number; i++){ - NativeParallelFor(number_of_threads,helpers::native_for_concurrent_op_repeated<self_type>(*this,&_::use_cache,repeats_per_thread)); - fill_up_cache(0,array_size); - ASSERT(number_of_non_evicted_from_cache()==number_of_lru_history_items,"thread safety is broken for cache "); - } - } -} diff --git a/src/tbb/src/test/test_concurrent_monitor.cpp b/src/tbb/src/test/test_concurrent_monitor.cpp deleted file mode 100644 index b248e5688..000000000 --- a/src/tbb/src/test/test_concurrent_monitor.cpp +++ /dev/null @@ -1,366 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/concurrent_monitor.h" -#include "tbb/atomic.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/parallel_for.h" -#include "tbb/blocked_range.h" -#include "harness.h" -#if _WIN32||_WIN64 -#include "tbb/dynamic_link.cpp" -#endif - -#include "tbb/semaphore.cpp" -#include "tbb/concurrent_monitor.cpp" - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings - // Suppress compiler warning about constant conditional expression - #pragma warning (disable: 4127) -#endif - -using namespace tbb; - -//! Queuing lock with concurrent_monitor; to test concurrent_monitor::notify( Predicate p ) -class QueuingMutex { -public: - //! Construct unacquired mutex. - QueuingMutex() { q_tail = NULL; } - - //! The scoped locking pattern - class ScopedLock: internal::no_copy { - void Initialize() { mutex = NULL; } - public: - ScopedLock() {Initialize();} - ScopedLock( QueuingMutex& m, size_t test_mode ) { Initialize(); Acquire(m,test_mode); } - ~ScopedLock() { if( mutex ) Release(); } - void Acquire( QueuingMutex& m, size_t test_mode ); - void Release(); - void SleepPerhaps(); - - private: - QueuingMutex* mutex; - ScopedLock* next; - uintptr_t going; - internal::concurrent_monitor::thread_context thr_ctx; - }; - - friend class ScopedLock; -private: - //! The last competitor requesting the lock - atomic<ScopedLock*> q_tail; - internal::concurrent_monitor waitq; -}; - -struct PredicateEq { - uintptr_t p; - PredicateEq( uintptr_t p_ ) : p(p_) {} - bool operator() ( uintptr_t v ) const {return p==v;} -}; - -struct QueuingMutex_Context { - const QueuingMutex::ScopedLock* lck; - QueuingMutex_Context( QueuingMutex::ScopedLock* l_ ) : lck(l_) {} - uintptr_t operator()() { return uintptr_t(lck); } -}; - -struct QueuingMutex_Until : NoAssign { - uintptr_t& flag; - QueuingMutex_Until( uintptr_t& f_ ) : flag(f_) {} - bool operator()() { return flag!=0ul; } -}; - -//! A method to acquire QueuingMutex lock -void QueuingMutex::ScopedLock::Acquire( QueuingMutex& m, size_t test_mode ) -{ - // Must set all fields before the fetch_and_store, because once the - // fetch_and_store executes, *this becomes accessible to other threads. - mutex = &m; - next = NULL; - going = 0; - - // The fetch_and_store must have release semantics, because we are - // "sending" the fields initialized above to other processors. - ScopedLock* pred = m.q_tail.fetch_and_store<tbb::release>(this); - if( pred ) { -#if TBB_USE_ASSERT - __TBB_control_consistency_helper(); // on "m.q_tail" - ASSERT( !pred->next, "the predecessor has another successor!"); -#endif - pred->next = this; - for( int i=0; i<16; ++i ) { - if( going!=0ul ) break; - __TBB_Yield(); - } - int x = int( test_mode%3 ); - switch( x ) { - case 0: - mutex->waitq.wait( QueuingMutex_Until(going), QueuingMutex_Context(this) ); - break; -#if __TBB_LAMBDAS_PRESENT - case 1: - mutex->waitq.wait( [&](){ return going!=0ul; }, [=]() { return (uintptr_t)this; } ); - break; -#endif - default: - SleepPerhaps(); - break; - } - } - - // Acquire critical section indirectly from previous owner or directly from predecessor. - __TBB_control_consistency_helper(); // on either "m.q_tail" or "going" -} - -//! A method to release QueuingMutex lock -void QueuingMutex::ScopedLock::Release( ) -{ - if( !next ) { - if( this == mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) { - // this was the only item in the queue, and the queue is now empty. - goto done; - } - // Someone in the queue - spin_wait_while_eq( next, (ScopedLock*)0 ); - } - __TBB_store_with_release(next->going, 1); - mutex->waitq.notify( PredicateEq(uintptr_t(next)) ); -done: - Initialize(); -} - -//! Yield and block; go to sleep -void QueuingMutex::ScopedLock::SleepPerhaps() -{ - bool slept = false; - internal::concurrent_monitor& mq = mutex->waitq; - mq.prepare_wait( thr_ctx, uintptr_t(this) ); - while( going==0ul ) { - if( (slept=mq.commit_wait( thr_ctx ))==true && going!=0ul ) - break; - slept = false; - mq.prepare_wait( thr_ctx, uintptr_t(this) ); - } - if( !slept ) - mq.cancel_wait( thr_ctx ); -} - -// Spin lock with concurrent_monitor; to test concurrent_monitor::notify_all() and concurrent_monitor::notify() -class SpinMutex { -public: - //! Construct unacquired mutex. - SpinMutex() : toggle(false) { flag = 0; } - - //! The scoped locking pattern - class ScopedLock: internal::no_copy { - void Initialize() { mutex = NULL; } - public: - ScopedLock() {Initialize();} - ScopedLock( SpinMutex& m, size_t test_mode ) { Initialize(); Acquire(m,test_mode); } - ~ScopedLock() { if( mutex ) Release(); } - void Acquire( SpinMutex& m, size_t test_mode ); - void Release(); - void SleepPerhaps(); - - private: - SpinMutex* mutex; - internal::concurrent_monitor::thread_context thr_ctx; - }; - - friend class ScopedLock; - friend struct SpinMutex_Until; -private: - tbb::atomic<unsigned> flag; - bool toggle; - internal::concurrent_monitor waitq; -}; - -struct SpinMutex_Context { - const SpinMutex::ScopedLock* lck; - SpinMutex_Context( SpinMutex::ScopedLock* l_ ) : lck(l_) {} - uintptr_t operator()() { return uintptr_t(lck); } -}; - -struct SpinMutex_Until { - const SpinMutex* mtx; - SpinMutex_Until( SpinMutex* m_ ) : mtx(m_) {} - bool operator()() { return mtx->flag==0; } -}; - -//! A method to acquire SpinMutex lock -void SpinMutex::ScopedLock::Acquire( SpinMutex& m, size_t test_mode ) -{ - mutex = &m; -retry: - if( m.flag.compare_and_swap( 1, 0 )!=0 ) { - int x = int( test_mode%3 ); - switch( x ) { - case 0: - mutex->waitq.wait( SpinMutex_Until(mutex), SpinMutex_Context(this) ); - break; -#if __TBB_LAMBDAS_PRESENT - case 1: - mutex->waitq.wait( [&](){ return mutex->flag==0; }, [=]() { return (uintptr_t)this; } ); - break; -#endif - default: - SleepPerhaps(); - break; - } - goto retry; - } -} - -//! A method to release SpinMutex lock -void SpinMutex::ScopedLock::Release() -{ - bool old_toggle = mutex->toggle; - mutex->toggle = !mutex->toggle; - mutex->flag = 0; - if( old_toggle ) - mutex->waitq.notify_one(); - else - mutex->waitq.notify_all(); -} - -//! Yield and block; go to sleep -void SpinMutex::ScopedLock::SleepPerhaps() -{ - bool slept = false; - internal::concurrent_monitor& mq = mutex->waitq; - mq.prepare_wait( thr_ctx, uintptr_t(this) ); - while( mutex->flag ) { - if( (slept=mq.commit_wait( thr_ctx ))==true ) - break; - mq.prepare_wait( thr_ctx, uintptr_t(this) ); - } - if( !slept ) - mq.cancel_wait( thr_ctx ); -} - -//! A value protected by a mutex. -template<typename M> -struct Counter { - typedef M mutex_type; - M mutex; - long value; -}; - -//! Function object for use with parallel_for.h. -template<typename C, int D> -struct AddOne: NoAssign { - C& counter; - /** Increments counter once for each iteration in the iteration space. */ - void operator()( tbb::blocked_range<size_t>& range ) const { - for( size_t i=range.begin(); i!=range.end(); ++i ) { - typename C::mutex_type::ScopedLock lock(counter.mutex, i); - counter.value = counter.value+1; - if( D>0 ) - for( int j=0; j<D; ++j ) __TBB_Yield(); - } - } - AddOne( C& counter_ ) : counter(counter_) {} -}; - -//! Generic test with TBB mutex type M, max range R, and delay D. -template<typename M,int R, int D> -void Test( int p ) { - Counter<M> counter; - counter.value = 0; - const int n = R; - tbb::task_scheduler_init init(p); - tbb::parallel_for(tbb::blocked_range<size_t>(0,n,n/10),AddOne<Counter<M>,D>(counter)); - if( counter.value!=n ) - REPORT("ERROR : counter.value=%ld (instead of %ld)\n",counter.value,n); -} - -#if TBB_USE_EXCEPTIONS -#define NTHRS_USED_IN_DESTRUCTOR_TEST 8 - -atomic<size_t> n_sleepers; - -#if defined(_MSC_VER) && defined(_Wp64) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (disable: 4244 4267) -#endif - -struct AllButOneSleep : NoAssign { - internal::concurrent_monitor*& mon; - static const size_t VLN = 1024*1024; - void operator()( int i ) const { - internal::concurrent_monitor::thread_context thr_ctx; - - if( i==0 ) { - size_t n_expected_sleepers = NTHRS_USED_IN_DESTRUCTOR_TEST-1; - while( n_sleepers<n_expected_sleepers ) - __TBB_Yield(); - while( n_sleepers.compare_and_swap( VLN+NTHRS_USED_IN_DESTRUCTOR_TEST, n_expected_sleepers )!=n_expected_sleepers ) - __TBB_Yield(); - - for( int j=0; j<100; ++j ) - Harness::Sleep( 1 ); - delete mon; - mon = NULL; - } else { - mon->prepare_wait( thr_ctx, uintptr_t(this) ); - while( n_sleepers<VLN ) { - try { - ++n_sleepers; - mon->commit_wait( thr_ctx ); - if( --n_sleepers>VLN ) - break; - } catch( tbb::user_abort& ) { - // can no longer access 'mon' - break; - } - mon->prepare_wait( thr_ctx, uintptr_t(this) ); - } - } - } - AllButOneSleep( internal::concurrent_monitor*& m_ ) : mon(m_) {} -}; -#endif /* TBB_USE_EXCEPTIONS */ - -void TestDestructor() { -#if TBB_USE_EXCEPTIONS - tbb::task_scheduler_init init(NTHRS_USED_IN_DESTRUCTOR_TEST); - internal::concurrent_monitor* my_mon = new internal::concurrent_monitor; - REMARK( "testing the destructor\n" ); - n_sleepers = 0; - NativeParallelFor(NTHRS_USED_IN_DESTRUCTOR_TEST,AllButOneSleep(my_mon)); - ASSERT( my_mon==NULL, "" ); -#endif /* TBB_USE_EXCEPTIONS */ -} - -int TestMain () { - for( int p=MinThread; p<=MaxThread; ++p ) { - REMARK( "testing with %d workers\n", static_cast<int>(p) ); - // test the predicated notify - Test<QueuingMutex,100000,0>( p ); - Test<QueuingMutex,1000,10000>( p ); - // test the notify_all method - Test<SpinMutex,100000,0>( p ); - Test<SpinMutex,1000,10000>( p ); - REMARK( "calling destructor for task_scheduler_init\n" ); - } - TestDestructor(); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_concurrent_priority_queue.cpp b/src/tbb/src/test/test_concurrent_priority_queue.cpp deleted file mode 100644 index abafe73af..000000000 --- a/src/tbb/src/test/test_concurrent_priority_queue.cpp +++ /dev/null @@ -1,992 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness_defs.h" -#include "tbb/concurrent_priority_queue.h" -#include "tbb/atomic.h" -#include "tbb/blocked_range.h" -#include "harness.h" -#include <functional> -#include <algorithm> -#include "harness_allocator.h" -#include <vector> -#include "test_container_move_support.h" - -#if _MSC_VER==1500 && !__INTEL_COMPILER - // VS2008/VC9 seems to have an issue; limits pull in math.h - #pragma warning( push ) - #pragma warning( disable: 4985 ) -#endif -#include <climits> -#if _MSC_VER==1500 && !__INTEL_COMPILER - #pragma warning( pop ) -#endif - -#if __INTEL_COMPILER && (_WIN32 || _WIN64) && TBB_USE_DEBUG && _CPPLIB_VER<520 -// The Intel Compiler has an issue that causes the Microsoft Iterator -// Debugging code to crash in vector::pop_back when it is called after a -// vector::push_back throws an exception. -// #define _HAS_ITERATOR_DEBUGGING 0 // Setting this to 0 doesn't solve the problem - // and also provokes a redefinition warning -#define __TBB_ITERATOR_DEBUGGING_EXCEPTIONS_BROKEN -#endif - -using namespace tbb; - -const size_t MAX_ITER = 10000; -const int MAX_PRIO = 100000000; - -tbb::atomic<unsigned int> counter; - -class my_data_type { -public: - int priority; - char padding[tbb::internal::NFS_MaxLineSize - sizeof(int) % tbb::internal::NFS_MaxLineSize]; - my_data_type() {} - my_data_type(int init_val) : priority(init_val) {} - const my_data_type operator+(const my_data_type& other) const { - return my_data_type(priority+other.priority); - } - bool operator==(const my_data_type& other) const { - return this->priority == other.priority; - } -}; - -const my_data_type DATA_MIN(INT_MIN); -const my_data_type DATA_MAX(INT_MAX); - -class my_less { -public: - bool operator()(const my_data_type d1, const my_data_type d2) const { - return d1.priority<d2.priority; - } -}; - -class my_throwing_type : public my_data_type { -public: - static int throw_flag; - my_throwing_type() : my_data_type() {} - my_throwing_type(const my_throwing_type& src) : my_data_type(src) { - if (my_throwing_type::throw_flag) throw 42; - priority = src.priority; - } -}; - -int my_throwing_type::throw_flag = 0; - -typedef concurrent_priority_queue<my_throwing_type, my_less > cpq_ex_test_type; - -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT -const size_t push_selector_variants = 3; -#elif __TBB_CPP11_RVALUE_REF_PRESENT -const size_t push_selector_variants = 2; -#else -const size_t push_selector_variants = 1; -#endif - -template <typename Q, typename E> -void push_selector(Q& q, E e, size_t i) { - switch (i%push_selector_variants) { - case 0: q->push(e); break; -#if __TBB_CPP11_RVALUE_REF_PRESENT - case 1: q->push(tbb::internal::move(e)); break; -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT - case 2: q->emplace(e); break; -#endif -#endif - } -} - -template<typename T, typename C> -class FillBody : NoAssign { - int nThread; - T my_max, my_min; - concurrent_priority_queue<T, C> *q; - C less_than; -public: - FillBody(int nThread_, T max_, T min_, concurrent_priority_queue<T, C> *q_) : nThread(nThread_), my_max(max_), my_min(min_), q(q_) {} - void operator()(const int threadID) const { - T elem = my_min + T(threadID); - for (size_t i=0; i<MAX_ITER; ++i) { - // do some pushes - push_selector(q, elem, i); - if (elem == my_max) elem = my_min; - elem = elem + T(nThread); - } - } -}; - -template<typename T, typename C> -struct EmptyBody : NoAssign { - int nThread; - T my_max; - concurrent_priority_queue<T, C> *q; - C less_than; -public: - EmptyBody(int nThread_, T max_, concurrent_priority_queue<T, C> *q_) : nThread(nThread_), my_max(max_), q(q_) {} - void operator()(const int /*threadID*/) const { - T elem(my_max), last; - if (q->try_pop(last)) { - ++counter; - while(q->try_pop(elem)) { - ASSERT(!less_than(last, elem), "FAILED pop/priority test in EmptyBody."); - last = elem; - elem = my_max; - ++counter; - } - } - } -}; - -template <typename T, typename C> -class FloggerBody : NoAssign { - int nThread; - concurrent_priority_queue<T, C> *q; -public: - FloggerBody(int nThread_, concurrent_priority_queue<T, C> *q_) : - nThread(nThread_), q(q_) {} - void operator()(const int threadID) const { - T elem = T(threadID+1); - for (size_t i=0; i<MAX_ITER; ++i) { - push_selector(q, elem, i); - (void) q->try_pop(elem); - } - } -}; - -namespace equality_comparison_helpers { - struct to_vector{ - template <typename element_type, typename compare_t, typename allocator_t> - std::vector<element_type> operator()(tbb::concurrent_priority_queue<element_type, compare_t, allocator_t> const& source) const{ - tbb::concurrent_priority_queue<element_type, compare_t, allocator_t> cpq((source)); - std::vector<element_type> v; v.reserve(cpq.size()); - element_type element; - while (cpq.try_pop(element)){ v.push_back(element);} - std::reverse(v.begin(),v.end()); - return v; - } - }; -} -//TODO: make CPQ more testable instead of hacking ad-hoc operator == -//operator == is required for __TBB_TEST_INIT_LIST_SUITE -template <typename element_type, typename compare_t, typename allocator_t> -bool operator==(tbb::concurrent_priority_queue<element_type, compare_t, allocator_t> const& lhs, tbb::concurrent_priority_queue<element_type, compare_t, allocator_t> const& rhs){ - using equality_comparison_helpers::to_vector; - return to_vector()(lhs) == to_vector()(rhs); -} - -template <typename range, typename element_type, typename compare_t, typename allocator_t> -bool operator==(tbb::concurrent_priority_queue<element_type, compare_t, allocator_t> const& lhs, range const & rhs ){ - using equality_comparison_helpers::to_vector; - return to_vector()(lhs) == std::vector<element_type>(rhs.begin(),rhs.end()); -} - -void TestToVector(){ - using equality_comparison_helpers::to_vector; - int array[] = {1,5,6,8,4,7}; - tbb::blocked_range<int *> range = Harness::make_blocked_range(array); - std::vector<int> source(range.begin(),range.end()); - tbb::concurrent_priority_queue<int> q(source.begin(),source.end()); - std::vector<int> from_cpq = to_vector()(q); - std::sort(source.begin(),source.end()); - ASSERT(source == from_cpq,"equality_comparison_helpers::to_vector incorrectly copied items from CPQ?"); -} - -void TestHelpers(){ - TestToVector(); -} - -void TestConstructorsDestructorsAccessors() { - std::vector<int> v; - std::allocator<int> a; - concurrent_priority_queue<int, std::less<int> > *q, *qo; - concurrent_priority_queue<int, std::less<int>, std::allocator<int> > *qi; - - // Test constructors/destructors - REMARK("Testing default constructor.\n"); - q = new concurrent_priority_queue<int, std::less<int> >(); - REMARK("Default constructor complete.\n"); - ASSERT(q->size()==0, "FAILED size test."); - ASSERT(q->empty(), "FAILED empty test."); - REMARK("Testing destructor.\n"); - delete q; - REMARK("Destruction complete.\n"); - - REMARK("Testing capacity constructor.\n"); - q = new concurrent_priority_queue<int, std::less<int> >(42); - REMARK("Capacity constructor complete.\n"); - ASSERT(q->size()==0, "FAILED size test."); - ASSERT(q->empty(), "FAILED empty test."); - REMARK("Testing destructor.\n"); - delete q; - REMARK("Destruction complete.\n"); - - REMARK("Testing allocator constructor.\n"); - qi = new concurrent_priority_queue<int, std::less<int>, std::allocator<int> >(a); - REMARK("Allocator constructor complete.\n"); - ASSERT(qi->size()==0, "FAILED size test."); - ASSERT(qi->empty(), "FAILED empty test."); - REMARK("Testing destructor.\n"); - delete qi; - REMARK("Destruction complete.\n"); - - REMARK("Testing capacity+allocator constructor.\n"); - qi = new concurrent_priority_queue<int, std::less<int>, std::allocator<int> >(42, a); - REMARK("Capacity+allocator constructor complete.\n"); - ASSERT(qi->size()==0, "FAILED size test."); - ASSERT(qi->empty(), "FAILED empty test."); - REMARK("Testing destructor.\n"); - delete qi; - REMARK("Destruction complete.\n"); - - REMARK("Testing iterator filler constructor.\n"); - for (int i=0; i<42; ++i) - v.push_back(i); - q = new concurrent_priority_queue<int, std::less<int> >(v.begin(), v.end()); - REMARK("Iterator filler constructor complete.\n"); - ASSERT(q->size()==42, "FAILED vector/size test."); - ASSERT(!q->empty(), "FAILED vector/empty test."); - ASSERT(*q == v, "FAILED vector/equality test."); - - REMARK("Testing copy constructor.\n"); - qo = new concurrent_priority_queue<int, std::less<int> >(*q); - REMARK("Copy constructor complete.\n"); - ASSERT(qo->size()==42, "FAILED cpq/size test."); - ASSERT(!qo->empty(), "FAILED cpq/empty test."); - ASSERT(*q == *qo, "FAILED cpq/equality test."); - - REMARK("Testing destructor.\n"); - delete q; - delete qo; - REMARK("Destruction complete.\n"); -} - -void TestAssignmentClearSwap() { - typedef concurrent_priority_queue<int, std::less<int> > cpq_type; - std::vector<int> v; - cpq_type *q, *qo; - int e; - - for (int i=0; i<42; ++i) - v.push_back(i); - q = new cpq_type(v.begin(), v.end()); - qo = new cpq_type(); - - REMARK("Testing assignment (1).\n"); - *qo = *q; - REMARK("Assignment complete.\n"); - ASSERT(qo->size()==42, "FAILED assignment/size test."); - ASSERT(!qo->empty(), "FAILED assignment/empty test."); - ASSERT(*qo == v,"FAILED assignment/equality test"); - - cpq_type assigned_q; - REMARK("Testing assign(begin,end) (2).\n"); - assigned_q.assign(v.begin(), v.end()); - REMARK("Assignment complete.\n"); - ASSERT(assigned_q.size()==42, "FAILED assignment/size test."); - ASSERT(!assigned_q.empty(), "FAILED assignment/empty test."); - ASSERT(assigned_q == v,"FAILED assignment/equality test"); - - REMARK("Testing clear.\n"); - q->clear(); - REMARK("Clear complete.\n"); - ASSERT(q->size()==0, "FAILED clear/size test."); - ASSERT(q->empty(), "FAILED clear/empty test."); - - for (size_t i=0; i<5; ++i) - (void) qo->try_pop(e); - - REMARK("Testing assignment (3).\n"); - *q = *qo; - REMARK("Assignment complete.\n"); - ASSERT(q->size()==37, "FAILED assignment/size test."); - ASSERT(!q->empty(), "FAILED assignment/empty test."); - - for (size_t i=0; i<5; ++i) - (void) qo->try_pop(e); - - REMARK("Testing swap.\n"); - q->swap(*qo); - REMARK("Swap complete.\n"); - ASSERT(q->size()==32, "FAILED swap/size test."); - ASSERT(!q->empty(), "FAILED swap/empty test."); - ASSERT(qo->size()==37, "FAILED swap_operand/size test."); - ASSERT(!qo->empty(), "FAILED swap_operand/empty test."); - delete q; - delete qo; -} - -void TestSerialPushPop() { - concurrent_priority_queue<int, std::less<int> > *q; - int e=42, prev=INT_MAX; - size_t count=0; - - q = new concurrent_priority_queue<int, std::less<int> >(MAX_ITER); - REMARK("Testing serial push.\n"); - for (size_t i=0; i<MAX_ITER; ++i) { - push_selector(q, e, i); - e = e*-1 + int(i); - } - REMARK("Pushing complete.\n"); - ASSERT(q->size()==MAX_ITER, "FAILED push/size test."); - ASSERT(!q->empty(), "FAILED push/empty test."); - - REMARK("Testing serial pop.\n"); - while (!q->empty()) { - ASSERT(q->try_pop(e), "FAILED pop test."); - ASSERT(prev>=e, "FAILED pop/priority test."); - prev = e; - ++count; - ASSERT(q->size()==MAX_ITER-count, "FAILED swap/size test."); - ASSERT(!q->empty() || count==MAX_ITER, "FAILED swap/empty test."); - } - ASSERT(!q->try_pop(e), "FAILED: successful pop from the empty queue."); - REMARK("Popping complete.\n"); - delete q; -} - -template <typename T, typename C> -void TestParallelPushPop(int nThreads, T t_max, T t_min, C /*compare*/) { - size_t qsize; - - concurrent_priority_queue<T, C> *q = new concurrent_priority_queue<T, C>(0); - FillBody<T, C> filler(nThreads, t_max, t_min, q); - EmptyBody<T, C> emptier(nThreads, t_max, q); - counter = 0; - REMARK("Testing parallel push.\n"); - NativeParallelFor(nThreads, filler); - REMARK("Pushing complete.\n"); - qsize = q->size(); - ASSERT(q->size()==nThreads*MAX_ITER, "FAILED push/size test."); - ASSERT(!q->empty(), "FAILED push/empty test."); - - REMARK("Testing parallel pop.\n"); - NativeParallelFor(nThreads, emptier); - REMARK("Popping complete.\n"); - ASSERT(counter==qsize, "FAILED pop/size test."); - ASSERT(q->size()==0, "FAILED pop/empty test."); - - q->clear(); - delete(q); -} - -void TestExceptions() { - const size_t TOO_LARGE_SZ = 1000000000; - my_throwing_type elem; - - REMARK("Testing basic constructor exceptions.\n"); - // Allocate empty queue should not throw no matter the type - try { - my_throwing_type::throw_flag = 1; - cpq_ex_test_type q; - } catch(...) { - ASSERT(false, "FAILED: allocating empty queue should not throw exception.\n"); - } - // Allocate small queue should not throw for reasonably sized type - try { - my_throwing_type::throw_flag = 1; - cpq_ex_test_type q(42); - } catch(...) { - ASSERT(false, "FAILED: allocating small queue should not throw exception.\n"); - } - // Allocate a queue with too large initial size - try { - my_throwing_type::throw_flag = 0; - cpq_ex_test_type q(TOO_LARGE_SZ); - REMARK("FAILED: Huge queue did not throw exception.\n"); - } catch(...) {} - - cpq_ex_test_type *pq; - try { - my_throwing_type::throw_flag = 0; - pq = NULL; - pq = new cpq_ex_test_type(TOO_LARGE_SZ); - REMARK("FAILED: Huge queue did not throw exception.\n"); - delete pq; - } catch(...) { - ASSERT(!pq, "FAILED: pq should not be touched when constructor throws.\n"); - } - REMARK("Basic constructor exceptions testing complete.\n"); - REMARK("Testing copy constructor exceptions.\n"); - my_throwing_type::throw_flag = 0; - cpq_ex_test_type src_q(42); - elem.priority = 42; - for (size_t i=0; i<42; ++i) src_q.push(elem); - try { - my_throwing_type::throw_flag = 1; - cpq_ex_test_type q(src_q); - REMARK("FAILED: Copy construct did not throw exception.\n"); - } catch(...) {} - try { - my_throwing_type::throw_flag = 1; - pq = NULL; - pq = new concurrent_priority_queue<my_throwing_type, my_less >(src_q); - REMARK("FAILED: Copy construct did not throw exception.\n"); - delete pq; - } catch(...) { - ASSERT(!pq, "FAILED: pq should not be touched when constructor throws.\n"); - } - REMARK("Copy constructor exceptions testing complete.\n"); - REMARK("Testing assignment exceptions.\n"); - // Assignment is copy-swap, so it should be exception safe - my_throwing_type::throw_flag = 0; - cpq_ex_test_type assign_q(24); - try { - my_throwing_type::throw_flag = 1; - assign_q = src_q; - REMARK("FAILED: Assign did not throw exception.\n"); - } catch(...) { - ASSERT(assign_q.empty(), "FAILED: assign_q should be empty.\n"); - } - REMARK("Assignment exceptions testing complete.\n"); -#ifndef __TBB_ITERATOR_DEBUGGING_EXCEPTIONS_BROKEN - REMARK("Testing push exceptions.\n"); - for (size_t i=0; i<push_selector_variants; ++i) { - my_throwing_type::throw_flag = 0; - pq = new cpq_ex_test_type(3); - try { - push_selector(pq, elem, i); - push_selector(pq, elem, i); - push_selector(pq, elem, i); - } catch(...) { - ASSERT(false, "FAILED: Push should not throw exception... yet.\n"); - } - try { // should crash on copy during expansion of vector - my_throwing_type::throw_flag = 1; - push_selector(pq, elem, i); - REMARK("FAILED: Push did not throw exception.\n"); - } catch(...) { - ASSERT(!pq->empty(), "FAILED: pq should not be empty.\n"); - ASSERT(pq->size()==3, "FAILED: pq should be only three elements.\n"); - ASSERT(pq->try_pop(elem), "FAILED: pq is not functional.\n"); - } - delete pq; - - my_throwing_type::throw_flag = 0; - pq = new cpq_ex_test_type(3); - try { - push_selector(pq, elem, i); - push_selector(pq, elem, i); - } catch(...) { - ASSERT(false, "FAILED: Push should not throw exception... yet.\n"); - } - try { // should crash on push copy of element - my_throwing_type::throw_flag = 1; - push_selector(pq, elem, i); - REMARK("FAILED: Push did not throw exception.\n"); - } catch(...) { - ASSERT(!pq->empty(), "FAILED: pq should not be empty.\n"); - ASSERT(pq->size()==2, "FAILED: pq should be only two elements.\n"); - ASSERT(pq->try_pop(elem), "FAILED: pq is not functional.\n"); - } - delete pq; - } - REMARK("Push exceptions testing complete.\n"); -#endif -} - -template <typename T, typename C> -void TestFlogger(int nThreads, T /*max*/, C /*compare*/) { - REMARK("Testing queue flogger.\n"); - concurrent_priority_queue<T, C> *q = new concurrent_priority_queue<T, C> (0); - NativeParallelFor(nThreads, FloggerBody<T, C >(nThreads, q)); - ASSERT(q->empty(), "FAILED flogger/empty test."); - ASSERT(!q->size(), "FAILED flogger/size test."); - REMARK("Flogging complete.\n"); - delete q; -} - -#if __TBB_INITIALIZER_LISTS_PRESENT -#include "test_initializer_list.h" - -void TestInitList(){ - REMARK("testing initializer_list methods \n"); - using namespace initializer_list_support_tests; - TestInitListSupport<tbb::concurrent_priority_queue<char> >({1,2,3,4,5}); - TestInitListSupport<tbb::concurrent_priority_queue<int> >({}); -} -#endif //if __TBB_INITIALIZER_LISTS_PRESENT - -struct special_member_calls_t { - size_t copy_constructor_called_times; - size_t move_constructor_called_times; - size_t copy_assignment_called_times; - size_t move_assignment_called_times; - - bool friend operator==(special_member_calls_t const& lhs, special_member_calls_t const& rhs){ - return - lhs.copy_constructor_called_times == rhs.copy_constructor_called_times - && lhs.move_constructor_called_times == rhs.move_constructor_called_times - && lhs.copy_assignment_called_times == rhs.copy_assignment_called_times - && lhs.move_assignment_called_times == rhs.move_assignment_called_times; - } - -}; -#if __TBB_CPP11_RVALUE_REF_PRESENT -struct MoveOperationTracker { - static size_t copy_constructor_called_times; - static size_t move_constructor_called_times; - static size_t copy_assignment_called_times; - static size_t move_assignment_called_times; - - static special_member_calls_t special_member_calls(){ - special_member_calls_t calls = {copy_constructor_called_times, move_constructor_called_times, copy_assignment_called_times, move_assignment_called_times}; - return calls; - } - static size_t value_counter; - - size_t value; - - MoveOperationTracker() : value(++value_counter) {} - MoveOperationTracker( const int value_ ) : value( value_ ) {} - ~MoveOperationTracker() __TBB_NOEXCEPT( true ) { - value = 0; - } - MoveOperationTracker(const MoveOperationTracker& m) : value(m.value) { - ASSERT(m.value, "The object has been moved or destroyed"); - ++copy_constructor_called_times; - } - MoveOperationTracker(MoveOperationTracker&& m) __TBB_NOEXCEPT(true) : value(m.value) { - ASSERT(m.value, "The object has been moved or destroyed"); - m.value = 0; - ++move_constructor_called_times; - } - MoveOperationTracker& operator=(MoveOperationTracker const& m) { - ASSERT(m.value, "The object has been moved or destroyed"); - value = m.value; - ++copy_assignment_called_times; - return *this; - } - MoveOperationTracker& operator=(MoveOperationTracker&& m) __TBB_NOEXCEPT(true) { - ASSERT(m.value, "The object has been moved or destroyed"); - value = m.value; - m.value = 0; - ++move_assignment_called_times; - return *this; - } - - bool operator<(MoveOperationTracker const &m) const { - ASSERT(value, "The object has been moved or destroyed"); - ASSERT(m.value, "The object has been moved or destroyed"); - return value < m.value; - } - - friend bool operator==(MoveOperationTracker const &lhs, MoveOperationTracker const &rhs){ - return !(lhs < rhs) && !(rhs <lhs); - } -}; -size_t MoveOperationTracker::copy_constructor_called_times = 0; -size_t MoveOperationTracker::move_constructor_called_times = 0; -size_t MoveOperationTracker::copy_assignment_called_times = 0; -size_t MoveOperationTracker::move_assignment_called_times = 0; -size_t MoveOperationTracker::value_counter = 0; - -template<typename allocator = tbb::cache_aligned_allocator<MoveOperationTracker> > -struct cpq_src_fixture { - enum {default_container_size = 100}; - typedef concurrent_priority_queue<MoveOperationTracker, std::less<MoveOperationTracker>, typename allocator:: template rebind<MoveOperationTracker>::other > cpq_t; - - cpq_t cpq_src; - const size_t container_size; - - void init(){ - size_t &mcct = MoveOperationTracker::move_constructor_called_times; - size_t &ccct = MoveOperationTracker::copy_constructor_called_times; - size_t &cact = MoveOperationTracker::copy_assignment_called_times; - size_t &mact = MoveOperationTracker::move_assignment_called_times; - mcct = ccct = cact = mact = 0; - - for (size_t i=1; i <= container_size; ++i){ - cpq_src.push(MoveOperationTracker(i)); - } - ASSERT(cpq_src.size() == container_size, "error in test setup ?" ); - } - - cpq_src_fixture(size_t size = default_container_size) : container_size(size){ - init(); - } - - cpq_src_fixture(typename cpq_t::allocator_type const& a, size_t size = default_container_size) : cpq_src(a), container_size(size){ - init(); - } - -}; - - -void TestStealingMoveConstructor(){ - typedef cpq_src_fixture<> fixture_t; - fixture_t fixture; - fixture_t::cpq_t src_copy(fixture.cpq_src); - - special_member_calls_t previous = MoveOperationTracker::special_member_calls(); - fixture_t::cpq_t dst(std::move(fixture.cpq_src)); - ASSERT(previous == MoveOperationTracker::special_member_calls(), "stealing move constructor should not create any new elements"); - - ASSERT(dst == src_copy, "cpq content changed during stealing move ?"); -} - -void TestStealingMoveConstructorOtherAllocatorInstance(){ - typedef two_memory_arenas_fixture<MoveOperationTracker> arena_fixture_t; - typedef cpq_src_fixture<arena_fixture_t::allocator_t > fixture_t; - - arena_fixture_t arena_fixture(8 * fixture_t::default_container_size, "TestStealingMoveConstructorOtherAllocatorInstance"); - fixture_t fixture(arena_fixture.source_allocator); - fixture_t::cpq_t src_copy(fixture.cpq_src); - - special_member_calls_t previous = MoveOperationTracker::special_member_calls(); - fixture_t::cpq_t dst(std::move(fixture.cpq_src), arena_fixture.source_allocator); - ASSERT(previous == MoveOperationTracker::special_member_calls(), "stealing move constructor should not create any new elements"); - - ASSERT(dst == src_copy, "cpq content changed during stealing move ?"); -} - -void TestPerElementMoveConstructorOtherAllocatorInstance(){ - typedef two_memory_arenas_fixture<MoveOperationTracker> arena_fixture_t; - typedef cpq_src_fixture<arena_fixture_t::allocator_t > fixture_t; - - arena_fixture_t arena_fixture(8 * fixture_t::default_container_size, "TestPerElementMoveConstructorOtherAllocatorInstance"); - fixture_t fixture(arena_fixture.source_allocator); - fixture_t::cpq_t src_copy(fixture.cpq_src); - - special_member_calls_t move_ctor_called_cpq_size_times = MoveOperationTracker::special_member_calls(); - move_ctor_called_cpq_size_times.move_constructor_called_times += fixture.container_size; - - fixture_t::cpq_t dst(std::move(fixture.cpq_src), arena_fixture.dst_allocator); - ASSERT(move_ctor_called_cpq_size_times == MoveOperationTracker::special_member_calls(), "Per element move constructor should move initialize all new elements"); - ASSERT(dst == src_copy, "cpq content changed during move ?"); -} - -void TestgMoveConstructor(){ - TestStealingMoveConstructor(); - TestStealingMoveConstructorOtherAllocatorInstance(); - TestPerElementMoveConstructorOtherAllocatorInstance(); -} - -void TestStealingMoveAssignOperator(){ - typedef cpq_src_fixture<> fixture_t; - fixture_t fixture; - fixture_t::cpq_t src_copy(fixture.cpq_src); - - fixture_t::cpq_t dst; - special_member_calls_t previous = MoveOperationTracker::special_member_calls(); - dst = std::move(fixture.cpq_src); - ASSERT(previous == MoveOperationTracker::special_member_calls(), "stealing move assign operator should not create any new elements"); - - ASSERT(dst == src_copy, "cpq content changed during stealing move ?"); -} - -void TestStealingMoveAssignOperatorWithStatefulAllocator(){ - //Use stateful allocator which is propagated on assignment , i.e. POCMA = true - typedef two_memory_arenas_fixture<MoveOperationTracker, /*pocma =*/Harness::true_type> arena_fixture_t; - typedef cpq_src_fixture<arena_fixture_t::allocator_t > fixture_t; - - arena_fixture_t arena_fixture(8 * fixture_t::default_container_size, "TestStealingMoveAssignOperatorWithStatefullAllocator"); - fixture_t fixture(arena_fixture.source_allocator); - fixture_t::cpq_t src_copy(fixture.cpq_src); - fixture_t::cpq_t dst(arena_fixture.dst_allocator); - - special_member_calls_t previous = MoveOperationTracker::special_member_calls(); - dst = std::move(fixture.cpq_src); - ASSERT(previous == MoveOperationTracker::special_member_calls(), "stealing move assignment operator should not create any new elements"); - - ASSERT(dst == src_copy, "cpq content changed during stealing move ?"); -} - -void TestPerElementMoveAssignOperator(){ - //use stateful allocator which is not propagate on assignment , i.e. POCMA = false - typedef two_memory_arenas_fixture<MoveOperationTracker, /*pocma =*/Harness::false_type> arena_fixture_t; - typedef cpq_src_fixture<arena_fixture_t::allocator_t > fixture_t; - - arena_fixture_t arena_fixture(8 * fixture_t::default_container_size, "TestPerElementMoveAssignOperator"); - fixture_t fixture(arena_fixture.source_allocator); - fixture_t::cpq_t src_copy(fixture.cpq_src); - fixture_t::cpq_t dst(arena_fixture.dst_allocator); - - special_member_calls_t move_ctor_called_cpq_size_times = MoveOperationTracker::special_member_calls(); - move_ctor_called_cpq_size_times.move_constructor_called_times += fixture.container_size; - dst = std::move(fixture.cpq_src); - ASSERT(move_ctor_called_cpq_size_times == MoveOperationTracker::special_member_calls(), "per element move assignment should move initialize new elements"); - - ASSERT(dst == src_copy, "cpq content changed during per element move ?"); -} - -void TestgMoveAssignOperator(){ - TestStealingMoveAssignOperator(); -#if __TBB_ALLOCATOR_TRAITS_PRESENT - TestStealingMoveAssignOperatorWithStatefulAllocator(); -#endif //__TBB_ALLOCATOR_TRAITS_PRESENT - TestPerElementMoveAssignOperator(); -} - -struct ForwardInEmplaceTester { - int a; - static bool moveCtorCalled; - ForwardInEmplaceTester( int a_val ) : a( a_val ) {} - ForwardInEmplaceTester( ForwardInEmplaceTester&& obj, int a_val ) : a( obj.a ) { - moveCtorCalled = true; - obj.a = a_val; - } - bool operator<( ForwardInEmplaceTester const& ) const { return true; } -}; -bool ForwardInEmplaceTester::moveCtorCalled = false; - -struct NoDefaultCtorType { - size_t value1, value2; - NoDefaultCtorType( size_t value1_, size_t value2_ ) : value1( value1_ ), value2( value2_ ) {} - bool operator<(NoDefaultCtorType const &m) const { - return value1+value2 < m.value1+m.value2; - } -}; - -void TestMoveSupportInPushPop() { - REMARK("Testing Move Support in Push/Pop..."); - size_t &mcct = MoveOperationTracker::move_constructor_called_times; - size_t &ccct = MoveOperationTracker::copy_constructor_called_times; - size_t &cact = MoveOperationTracker::copy_assignment_called_times; - size_t &mact = MoveOperationTracker::move_assignment_called_times; - mcct = ccct = cact = mact = 0; - - concurrent_priority_queue<MoveOperationTracker> q1; - - ASSERT(mcct == 0, "Value must be zero-initialized"); - ASSERT(ccct == 0, "Value must be zero-initialized"); - - q1.push(MoveOperationTracker()); - ASSERT(mcct > 0, "Not working push(T&&)?"); - ASSERT(ccct == 0, "Copying of arg occurred during push(T&&)"); - - MoveOperationTracker ob; - const size_t prev_mcct = mcct; - q1.push(std::move(ob)); - ASSERT(mcct > prev_mcct, "Not working push(T&&)?"); - ASSERT(ccct == 0, "Copying of arg occurred during push(T&&)"); - - ASSERT(cact == 0, "Copy assignment called during push(T&&)"); - const size_t prev_mact = mact; - q1.try_pop(ob); - ASSERT(cact == 0, "Copy assignment called during try_pop(T&)"); - ASSERT(mact > prev_mact, "Move assignment was not called during try_pop(T&)"); - - REMARK(" works.\n"); - -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT - REMARK("Testing Emplace..."); - - concurrent_priority_queue<NoDefaultCtorType> q2; - q2.emplace(15, 3); - q2.emplace(2, 35); - q2.emplace(8, 8); - - NoDefaultCtorType o(0, 0); - q2.try_pop(o); - ASSERT(o.value1 == 2 && o.value2 == 35, "Unexpected data popped; possible emplace() failure."); - q2.try_pop(o); - ASSERT(o.value1 == 15 && o.value2 == 3, "Unexpected data popped; possible emplace() failure."); - q2.try_pop(o); - ASSERT(o.value1 == 8 && o.value2 == 8, "Unexpected data popped; possible emplace() failure."); - ASSERT(!q2.try_pop(o), "The queue should be empty."); - - concurrent_priority_queue<ForwardInEmplaceTester> q3; - ASSERT( ForwardInEmplaceTester::moveCtorCalled == false, NULL ); - q3.emplace( tbb::internal::move( ForwardInEmplaceTester(5) ), 2 ); - ASSERT( ForwardInEmplaceTester::moveCtorCalled == true, "Not used std::forward in emplace()?" ); - ForwardInEmplaceTester obj( 0 ); - q3.try_pop( obj ); - ASSERT( obj.a == 5, "Not used std::forward in emplace()?" ); - ASSERT(!q3.try_pop( obj ), "The queue should be empty."); - - REMARK(" works.\n"); -#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */ -} -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - -void TestCpqOnNThreads( int nThreads ) { - std::less<int> int_compare; - my_less data_compare; - - TestConstructorsDestructorsAccessors(); - TestAssignmentClearSwap(); - TestSerialPushPop(); - - TestParallelPushPop( nThreads, INT_MAX, INT_MIN, int_compare ); - TestParallelPushPop( nThreads, (unsigned char)CHAR_MAX, (unsigned char)CHAR_MIN, int_compare ); - TestParallelPushPop( nThreads, DATA_MAX, DATA_MIN, data_compare ); - - TestFlogger( nThreads, INT_MAX, int_compare ); - TestFlogger( nThreads, (unsigned char)CHAR_MAX, int_compare ); - TestFlogger( nThreads, DATA_MAX, data_compare ); -#if __TBB_CPP11_RVALUE_REF_PRESENT - MoveOperationTracker::copy_assignment_called_times = 0; - TestFlogger( nThreads, MoveOperationTracker(), std::less<MoveOperationTracker>() ); - ASSERT( MoveOperationTracker::copy_assignment_called_times == 0, "Copy assignment called during try_pop(T&)?" ); -#endif - -#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN - TestExceptions(); -#else - REPORT( "Known issue: exception handling tests are skipped.\n" ); -#endif -} - -#if __TBB_CPP11_SMART_POINTERS_PRESENT -struct SmartPointersCompare { - template <typename Type> bool operator() (const std::shared_ptr<Type> &t1, const std::shared_ptr<Type> &t2) { - return *t1 < *t2; - } - template <typename Type> bool operator() (const std::weak_ptr<Type> &t1, const std::weak_ptr<Type> &t2) { - return *t1.lock().get() < *t2.lock().get(); - } -}; -#endif /* __TBB_CPP11_SMART_POINTERS_PRESENT */ - -template <typename Queue, typename Compare> -void Examine(const Queue &q, const std::vector<typename Queue::value_type> &vec, Compare comp = Compare()) { - typedef typename Queue::value_type value_type; - - Queue q1(q); - ASSERT(!q1.empty() && q1.size() == vec.size(), NULL); - - value_type elem; - - Queue q2(q); - q2.clear(); - ASSERT(q2.empty() && !q2.size() && !q2.try_pop(elem), NULL); - - std::vector<typename Queue::value_type> vec_sorted(vec); - std::sort(vec_sorted.begin(), vec_sorted.end(), comp); - - typename std::vector<value_type>::reverse_iterator it1; - for (it1 = vec_sorted.rbegin(); q1.try_pop(elem); it1++) { - ASSERT( Harness::IsEqual()(elem, *it1), NULL ); - q2.push(elem); - } - ASSERT(it1 == vec_sorted.rend(), NULL); - ASSERT(q1.empty() && !q1.size(), NULL); - ASSERT(!q2.empty() && q2.size() == vec.size(), NULL); - - q1.swap(q2); - ASSERT(q2.empty() && !q2.size(), NULL); - ASSERT(!q1.empty() && q1.size() == vec.size(), NULL); - for (it1 = vec_sorted.rbegin(); q1.try_pop(elem); it1++) ASSERT(Harness::IsEqual()(elem, *it1), NULL); - ASSERT(it1 == vec_sorted.rend(), NULL); - - typename Queue::allocator_type a = q1.get_allocator(); - value_type *ptr = a.allocate(1); - ASSERT(ptr, NULL); - a.deallocate(ptr, 1); -} - -template <typename ValueType, typename Compare> -void TypeTester(const std::vector<ValueType> &vec, Compare comp) { - typedef tbb::concurrent_priority_queue<ValueType, Compare> Queue; - typedef tbb::concurrent_priority_queue< ValueType, Compare, debug_allocator<ValueType> > QueueDebugAlloc; - __TBB_ASSERT(vec.size() >= 5, "Array should have at least 5 elements"); - // Construct an empty queue. - Queue q1; - q1.assign(vec.begin(), vec.end()); - Examine(q1, vec, comp); -#if __TBB_INITIALIZER_LISTS_PRESENT - // Constructor from initializer_list. - Queue q2({ vec[0], vec[1], vec[2] }); - for (typename std::vector<ValueType>::const_iterator it = vec.begin() + 3; it != vec.end(); ++it) q2.push(*it); - Examine(q2, vec, comp); - Queue q3; - q3 = { vec[0], vec[1], vec[2] }; - for (typename std::vector<ValueType>::const_iterator it = vec.begin() + 3; it != vec.end(); ++it) q3.push(*it); - Examine(q3, vec, comp); -#endif - // Copying constructor. - Queue q4(q1); - Examine(q4, vec, comp); - // Construct with non-default allocator. - QueueDebugAlloc q5; - q5.assign(vec.begin(), vec.end()); - Examine(q5, vec, comp); - // Copying constructor for vector with different allocator type. - QueueDebugAlloc q6(q5); - Examine(q6, vec, comp); - // Construction with copying iteration range and given allocator instance. - Queue q7(vec.begin(), vec.end()); - Examine(q7, vec, comp); - typename QueueDebugAlloc::allocator_type a; - QueueDebugAlloc q8(a); - q8.assign(vec.begin(), vec.end()); - Examine(q8, vec, comp); -} - -template <typename ValueType> -void TypeTester(const std::vector<ValueType> &vec) { TypeTester(vec, std::less<ValueType>()); } - -void TestTypes() { - const int NUMBER = 10; - - Harness::FastRandom rnd(1234); - - std::vector<int> arrInt; - for (int i = 0; i<NUMBER; ++i) arrInt.push_back(rnd.get()); - std::vector< tbb::atomic<int> > arrTbb; - for (int i = 0; i<NUMBER; ++i) { - tbb::atomic<int> a; - a = rnd.get(); - arrTbb.push_back(a); - } - - TypeTester(arrInt); - TypeTester(arrTbb); - -#if __TBB_CPP11_SMART_POINTERS_PRESENT - std::vector< std::shared_ptr<int> > arrShr; - for (int i = 0; i<NUMBER; ++i) arrShr.push_back(std::make_shared<int>(rnd.get())); - std::vector< std::weak_ptr<int> > arrWk; - std::copy(arrShr.begin(), arrShr.end(), std::back_inserter(arrWk)); - TypeTester(arrShr, SmartPointersCompare()); - TypeTester(arrWk, SmartPointersCompare()); -#else - REPORT( "Known issue: C++11 smart pointer tests are skipped.\n" ); -#endif /* __TBB_CPP11_SMART_POINTERS_PRESENT */ -} - -int TestMain() { - if (MinThread < 1) - MinThread = 1; - - TestHelpers(); -#if __TBB_INITIALIZER_LISTS_PRESENT - TestInitList(); -#else - REPORT("Known issue: initializer list tests are skipped.\n"); -#endif - - TestTypes(); - -#if __TBB_CPP11_RVALUE_REF_PRESENT - TestgMoveConstructor(); - TestgMoveAssignOperator(); - TestMoveSupportInPushPop(); -#else - REPORT("Known issue: move support tests are skipped.\n"); -#endif - - for (int p = MinThread; p <= MaxThread; ++p) { - REMARK("Testing on %d threads.\n", p); - TestCpqOnNThreads(p); - } - return Harness::Done; -} diff --git a/src/tbb/src/test/test_concurrent_queue.cpp b/src/tbb/src/test/test_concurrent_queue.cpp deleted file mode 100644 index 4393b125d..000000000 --- a/src/tbb/src/test/test_concurrent_queue.cpp +++ /dev/null @@ -1,1707 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#define NOMINMAX -#include "harness_defs.h" -#include "tbb/concurrent_queue.h" -#include "tbb/tick_count.h" -#include "harness.h" -#include "harness_allocator.h" - -#include <vector> - -static tbb::atomic<long> FooConstructed; -static tbb::atomic<long> FooDestroyed; - -enum state_t{ - LIVE=0x1234, - DEAD=0xDEAD -}; - -class Foo { - state_t state; -public: - int thread_id; - int serial; - Foo() : state(LIVE), thread_id(0), serial(0) { - ++FooConstructed; - } - Foo( const Foo& item ) : state(LIVE) { - ASSERT( item.state==LIVE, NULL ); - ++FooConstructed; - thread_id = item.thread_id; - serial = item.serial; - } - ~Foo() { - ASSERT( state==LIVE, NULL ); - ++FooDestroyed; - state=DEAD; - thread_id=DEAD; - serial=DEAD; - } - void operator=( const Foo& item ) { - ASSERT( item.state==LIVE, NULL ); - ASSERT( state==LIVE, NULL ); - thread_id = item.thread_id; - serial = item.serial; - } - bool is_const() {return false;} - bool is_const() const {return true;} - static void clear_counters() { FooConstructed = 0; FooDestroyed = 0; } - static long get_n_constructed() { return FooConstructed; } - static long get_n_destroyed() { return FooDestroyed; } -}; - -// problem size -static const int N = 50000; // # of bytes - -#if TBB_USE_EXCEPTIONS -//! Exception for concurrent_queue -class Foo_exception : public std::bad_alloc { -public: - virtual const char *what() const throw() { return "out of Foo limit"; } - virtual ~Foo_exception() throw() {} -}; - -static tbb::atomic<long> FooExConstructed; -static tbb::atomic<long> FooExDestroyed; -static tbb::atomic<long> serial_source; -static long MaxFooCount = 0; -static const long Threshold = 400; - -class FooEx { - state_t state; -public: - int serial; - FooEx() : state(LIVE) { - ++FooExConstructed; - serial = serial_source++; - } - FooEx( const FooEx& item ) : state(LIVE) { - ASSERT( item.state == LIVE, NULL ); - ++FooExConstructed; - if( MaxFooCount && (FooExConstructed-FooExDestroyed) >= MaxFooCount ) // in push() - throw Foo_exception(); - serial = item.serial; - } - ~FooEx() { - ASSERT( state==LIVE, NULL ); - ++FooExDestroyed; - state=DEAD; - serial=DEAD; - } - void operator=( FooEx& item ) { - ASSERT( item.state==LIVE, NULL ); - ASSERT( state==LIVE, NULL ); - serial = item.serial; - if( MaxFooCount==2*Threshold && (FooExConstructed-FooExDestroyed) <= MaxFooCount/4 ) // in pop() - throw Foo_exception(); - } -#if __TBB_CPP11_RVALUE_REF_PRESENT - void operator=( FooEx&& item ) { - operator=( item ); - item.serial = 0; - } -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ -} ; -#endif /* TBB_USE_EXCEPTIONS */ - -const size_t MAXTHREAD = 256; - -static int Sum[MAXTHREAD]; - -//! Count of various pop operations -/** [0] = pop_if_present that failed - [1] = pop_if_present that succeeded - [2] = pop */ -static tbb::atomic<long> PopKind[3]; - -const int M = 10000; - -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT -const size_t push_selector_variants = 3; -#elif __TBB_CPP11_RVALUE_REF_PRESENT -const size_t push_selector_variants = 2; -#else -const size_t push_selector_variants = 1; -#endif - -template<typename CQ, typename ValueType, typename CounterType> -void push( CQ& q, ValueType v, CounterType i ) { - switch( i % push_selector_variants ) { - case 0: q.push( v ); break; -#if __TBB_CPP11_RVALUE_REF_PRESENT - case 1: q.push( std::move(v) ); break; -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT - case 2: q.emplace( v ); break; -#endif -#endif - default: ASSERT( false, NULL ); break; - } -} - -template<typename CQ,typename T> -struct Body: NoAssign { - CQ* queue; - const int nthread; - Body( int nthread_ ) : nthread(nthread_) {} - void operator()( int thread_id ) const { - long pop_kind[3] = {0,0,0}; - int serial[MAXTHREAD+1]; - memset( serial, 0, nthread*sizeof(int) ); - ASSERT( thread_id<nthread, NULL ); - - long sum = 0; - for( long j=0; j<M; ++j ) { - T f; - f.thread_id = DEAD; - f.serial = DEAD; - bool prepopped = false; - if( j&1 ) { - prepopped = queue->try_pop( f ); - ++pop_kind[prepopped]; - } - T g; - g.thread_id = thread_id; - g.serial = j+1; - push( *queue, g, j ); - if( !prepopped ) { - while( !(queue)->try_pop(f) ) __TBB_Yield(); - ++pop_kind[2]; - } - ASSERT( f.thread_id<=nthread, NULL ); - ASSERT( f.thread_id==nthread || serial[f.thread_id]<f.serial, "partial order violation" ); - serial[f.thread_id] = f.serial; - sum += f.serial-1; - } - Sum[thread_id] = sum; - for( int k=0; k<3; ++k ) - PopKind[k] += pop_kind[k]; - } -}; - -// Define wrapper classes to test tbb::concurrent_queue<T> -template<typename T, typename A = tbb::cache_aligned_allocator<T> > -class ConcQWithSizeWrapper : public tbb::concurrent_queue<T, A> { -public: - ConcQWithSizeWrapper() {} - ConcQWithSizeWrapper( const ConcQWithSizeWrapper& q ) : tbb::concurrent_queue<T, A>( q ) {} - ConcQWithSizeWrapper(const A& a) : tbb::concurrent_queue<T, A>( a ) {} -#if __TBB_CPP11_RVALUE_REF_PRESENT - ConcQWithSizeWrapper(ConcQWithSizeWrapper&& q) : tbb::concurrent_queue<T>( std::move(q) ) {} - ConcQWithSizeWrapper(ConcQWithSizeWrapper&& q, const A& a) - : tbb::concurrent_queue<T, A>( std::move(q), a ) { } -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - template<typename InputIterator> - ConcQWithSizeWrapper( InputIterator begin, InputIterator end, const A& a = A()) - : tbb::concurrent_queue<T, A>(begin,end,a) {} - size_t size() const { return this->unsafe_size(); } -}; - -template<typename T> -class ConcQPushPopWrapper : public tbb::concurrent_queue<T> { -public: - ConcQPushPopWrapper() : my_capacity( size_t(-1)/(sizeof(void*)+sizeof(T)) ) {} - size_t size() const { return this->unsafe_size(); } - void set_capacity( const ptrdiff_t n ) { my_capacity = n; } - bool try_push( const T& source ) { return this->push( source ); } - bool try_pop( T& dest ) { return this->tbb::concurrent_queue<T>::try_pop( dest ); } - size_t my_capacity; -}; - -template<typename T> -class ConcQWithCapacity : public tbb::concurrent_queue<T> { -public: - ConcQWithCapacity() : my_capacity( size_t(-1)/(sizeof(void*)+sizeof(T)) ) {} - size_t size() const { return this->unsafe_size(); } - size_t capacity() const { return my_capacity; } - void set_capacity( const int n ) { my_capacity = n; } - bool try_push( const T& source ) { this->push( source ); return (size_t)source.serial<my_capacity; } - bool try_pop( T& dest ) { this->tbb::concurrent_queue<T>::try_pop( dest ); return (size_t)dest.serial<my_capacity; } - size_t my_capacity; -}; - -template <typename Queue> -void AssertEquality(Queue &q, const std::vector<typename Queue::value_type> &vec) { - ASSERT(q.size() == typename Queue::size_type(vec.size()), NULL); - ASSERT(std::equal(q.unsafe_begin(), q.unsafe_end(), vec.begin(), Harness::IsEqual()), NULL); -} - -template <typename Queue> -void AssertEmptiness(Queue &q) { - ASSERT(q.empty(), NULL); - ASSERT(!q.size(), NULL); - typename Queue::value_type elem; - ASSERT(!q.try_pop(elem), NULL); -} - -enum push_t { push_op, try_push_op }; - -template<push_t push_op> -struct pusher { -#if __TBB_CPP11_RVALUE_REF_PRESENT - template<typename CQ, typename VType> - static bool push( CQ& queue, VType&& val ) { - queue.push( std::forward<VType>( val ) ); - return true; - } -#else - template<typename CQ, typename VType> - static bool push( CQ& queue, const VType& val ) { - queue.push( val ); - return true; - } -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ -}; - -template<> -struct pusher< try_push_op > { -#if __TBB_CPP11_RVALUE_REF_PRESENT - template<typename CQ, typename VType> - static bool push( CQ& queue, VType&& val ) { - return queue.try_push( std::forward<VType>( val ) ); - } -#else - template<typename CQ, typename VType> - static bool push( CQ& queue, const VType& val ) { - return queue.try_push( val ); - } -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ -}; - -enum pop_t { pop_op, try_pop_op }; - -template<pop_t pop_op> -struct popper { -#if __TBB_CPP11_RVALUE_REF_PRESENT - template<typename CQ, typename VType> - static bool pop( CQ& queue, VType&& val ) { - if( queue.empty() ) return false; - queue.pop( std::forward<VType>( val ) ); - return true; - } -#else - template<typename CQ, typename VType> - static bool pop( CQ& queue, VType& val ) { - if( queue.empty() ) return false; - queue.pop( val ); - return true; - } -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ -}; - -template<> -struct popper< try_pop_op > { -#if __TBB_CPP11_RVALUE_REF_PRESENT - template<typename CQ, typename VType> - static bool pop( CQ& queue, VType&& val ) { - return queue.try_pop( std::forward<VType>( val ) ); - } -#else - template<typename CQ, typename VType> - static bool pop( CQ& queue, VType& val ) { - return queue.try_pop( val ); - } -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ -}; - -template <push_t push_op, typename Queue> -void FillTest(Queue &q, const std::vector<typename Queue::value_type> &vec) { - for (typename std::vector<typename Queue::value_type>::const_iterator it = vec.begin(); it != vec.end(); ++it) - ASSERT(pusher<push_op>::push(q, *it), NULL); - AssertEquality(q, vec); -} - -template <pop_t pop_op, typename Queue> -void EmptyTest(Queue &q, const std::vector<typename Queue::value_type> &vec) { - typedef typename Queue::value_type value_type; - - value_type elem; - typename std::vector<value_type>::const_iterator it = vec.begin(); - while (popper<pop_op>::pop(q, elem)) { - ASSERT(Harness::IsEqual()(elem, *it), NULL); - ++it; - } - ASSERT(it == vec.end(), NULL); - AssertEmptiness(q); -} - -template <typename T, typename A> -void bounded_queue_specific_test(tbb::concurrent_queue<T, A> &, const std::vector<T> &) { /* do nothing */ } - -template <typename T, typename A> -void bounded_queue_specific_test(tbb::concurrent_bounded_queue<T, A> &q, const std::vector<T> &vec) { - typedef typename tbb::concurrent_bounded_queue<T, A>::size_type size_type; - - FillTest<try_push_op>(q, vec); - tbb::concurrent_bounded_queue<T, A> q2 = q; - EmptyTest<pop_op>(q, vec); - - // capacity - q2.set_capacity(size_type(vec.size())); - ASSERT(q2.capacity() == size_type(vec.size()), NULL); - ASSERT(q2.size() == size_type(vec.size()), NULL); - ASSERT(!q2.try_push(vec[0]), NULL); - -#if TBB_USE_EXCEPTIONS - q.abort(); -#endif -} - -template<typename CQ, typename T> -void TestPushPop( size_t prefill, ptrdiff_t capacity, int nthread ) { - ASSERT( nthread>0, "nthread must be positive" ); - ptrdiff_t signed_prefill = ptrdiff_t(prefill); - if( signed_prefill+1>=capacity ) - return; - bool success = false; - for( int k=0; k<3; ++k ) - PopKind[k] = 0; - for( int trial=0; !success; ++trial ) { - T::clear_counters(); - Body<CQ,T> body(nthread); - CQ queue; - queue.set_capacity( capacity ); - body.queue = &queue; - for( size_t i=0; i<prefill; ++i ) { - T f; - f.thread_id = nthread; - f.serial = 1+int(i); - push(queue, f, i); - ASSERT( unsigned(queue.size())==i+1, NULL ); - ASSERT( !queue.empty(), NULL ); - } - tbb::tick_count t0 = tbb::tick_count::now(); - NativeParallelFor( nthread, body ); - tbb::tick_count t1 = tbb::tick_count::now(); - double timing = (t1-t0).seconds(); - REMARK("prefill=%d capacity=%d threads=%d time = %g = %g nsec/operation\n", int(prefill), int(capacity), nthread, timing, timing/(2*M*nthread)*1.E9); - int sum = 0; - for( int k=0; k<nthread; ++k ) - sum += Sum[k]; - int expected = int(nthread*((M-1)*M/2) + ((prefill-1)*prefill)/2); - for( int i=int(prefill); --i>=0; ) { - ASSERT( !queue.empty(), NULL ); - T f; - bool result = queue.try_pop(f); - ASSERT( result, NULL ); - ASSERT( int(queue.size())==i, NULL ); - sum += f.serial-1; - } - ASSERT( queue.empty(), "The queue should be empty" ); - ASSERT( queue.size()==0, "The queue should have zero size" ); - if( sum!=expected ) - REPORT("sum=%d expected=%d\n",sum,expected); - ASSERT( T::get_n_constructed()==T::get_n_destroyed(), NULL ); - // TODO: checks by counting allocators - - success = true; - if( nthread>1 && prefill==0 ) { - // Check that pop_if_present got sufficient exercise - for( int k=0; k<2; ++k ) { -#if (_WIN32||_WIN64) - // The TBB library on Windows seems to have a tough time generating - // the desired interleavings for pop_if_present, so the code tries longer, and settles - // for fewer desired interleavings. - const int max_trial = 100; - const int min_requirement = 20; -#else - const int min_requirement = 100; - const int max_trial = 20; -#endif /* _WIN32||_WIN64 */ - if( PopKind[k]<min_requirement ) { - if( trial>=max_trial ) { - if( Verbose ) - REPORT("Warning: %d threads had only %ld pop_if_present operations %s after %d trials (expected at least %d). " - "This problem may merely be unlucky scheduling. " - "Investigate only if it happens repeatedly.\n", - nthread, long(PopKind[k]), k==0?"failed":"succeeded", max_trial, min_requirement); - else - REPORT("Warning: the number of %s pop_if_present operations is less than expected for %d threads. Investigate if it happens repeatedly.\n", - k==0?"failed":"succeeded", nthread ); - - } else { - success = false; - } - } - } - } - } -} - -class Bar { - state_t state; -public: - static size_t construction_num, destruction_num; - ptrdiff_t my_id; - Bar() : state(LIVE), my_id(-1) {} - Bar(size_t _i) : state(LIVE), my_id(_i) { construction_num++; } - Bar( const Bar& a_bar ) : state(LIVE) { - ASSERT( a_bar.state==LIVE, NULL ); - my_id = a_bar.my_id; - construction_num++; - } - ~Bar() { - ASSERT( state==LIVE, NULL ); - state = DEAD; - my_id = DEAD; - destruction_num++; - } - void operator=( const Bar& a_bar ) { - ASSERT( a_bar.state==LIVE, NULL ); - ASSERT( state==LIVE, NULL ); - my_id = a_bar.my_id; - } - friend bool operator==(const Bar& bar1, const Bar& bar2 ) ; -} ; - -size_t Bar::construction_num = 0; -size_t Bar::destruction_num = 0; - -bool operator==(const Bar& bar1, const Bar& bar2) { - ASSERT( bar1.state==LIVE, NULL ); - ASSERT( bar2.state==LIVE, NULL ); - return bar1.my_id == bar2.my_id; -} - -class BarIterator -{ - Bar* bar_ptr; - BarIterator(Bar* bp_) : bar_ptr(bp_) {} -public: - ~BarIterator() {} - BarIterator& operator=( const BarIterator& other ) { - bar_ptr = other.bar_ptr; - return *this; - } - Bar& operator*() const { - return *bar_ptr; - } - BarIterator& operator++() { - ++bar_ptr; - return *this; - } - Bar* operator++(int) { - Bar* result = &operator*(); - operator++(); - return result; - } - friend bool operator==(const BarIterator& bia, const BarIterator& bib) ; - friend bool operator!=(const BarIterator& bia, const BarIterator& bib) ; - template<typename CQ, typename T, typename TIter, typename CQ_EX, typename T_EX> - friend void TestConstructors (); -} ; - -bool operator==(const BarIterator& bia, const BarIterator& bib) { - return bia.bar_ptr==bib.bar_ptr; -} - -bool operator!=(const BarIterator& bia, const BarIterator& bib) { - return bia.bar_ptr!=bib.bar_ptr; -} - -#if TBB_USE_EXCEPTIONS -class Bar_exception : public std::bad_alloc { -public: - virtual const char *what() const throw() { return "making the entry invalid"; } - virtual ~Bar_exception() throw() {} -}; - -class BarEx { - static int count; -public: - state_t state; - typedef enum { - PREPARATION, - COPY_CONSTRUCT - } mode_t; - static mode_t mode; - ptrdiff_t my_id; - ptrdiff_t my_tilda_id; - static int button; - BarEx() : state(LIVE), my_id(-1), my_tilda_id(-1) {} - BarEx(size_t _i) : state(LIVE), my_id(_i), my_tilda_id(my_id^(-1)) {} - BarEx( const BarEx& a_bar ) : state(LIVE) { - ASSERT( a_bar.state==LIVE, NULL ); - my_id = a_bar.my_id; - if( mode==PREPARATION ) - if( !( ++count % 100 ) ) - throw Bar_exception(); - my_tilda_id = a_bar.my_tilda_id; - } - ~BarEx() { - ASSERT( state==LIVE, NULL ); - state = DEAD; - my_id = DEAD; - } - static void set_mode( mode_t m ) { mode = m; } - void operator=( const BarEx& a_bar ) { - ASSERT( a_bar.state==LIVE, NULL ); - ASSERT( state==LIVE, NULL ); - my_id = a_bar.my_id; - my_tilda_id = a_bar.my_tilda_id; - } - friend bool operator==(const BarEx& bar1, const BarEx& bar2 ) ; -} ; - -int BarEx::count = 0; -BarEx::mode_t BarEx::mode = BarEx::PREPARATION; - -bool operator==(const BarEx& bar1, const BarEx& bar2) { - ASSERT( bar1.state==LIVE, NULL ); - ASSERT( bar2.state==LIVE, NULL ); - ASSERT( (bar1.my_id ^ bar1.my_tilda_id) == -1, NULL ); - ASSERT( (bar2.my_id ^ bar2.my_tilda_id) == -1, NULL ); - return bar1.my_id==bar2.my_id && bar1.my_tilda_id==bar2.my_tilda_id; -} -#endif /* TBB_USE_EXCEPTIONS */ - -template<typename CQ, typename T, typename TIter, typename CQ_EX, typename T_EX> -void TestConstructors () -{ - CQ src_queue; - typename CQ::const_iterator dqb; - typename CQ::const_iterator dqe; - typename CQ::const_iterator iter; - - for( size_t size=0; size<1001; ++size ) { - for( size_t i=0; i<size; ++i ) - src_queue.push(T(i+(i^size))); - typename CQ::const_iterator sqb( src_queue.unsafe_begin() ); - typename CQ::const_iterator sqe( src_queue.unsafe_end() ); - - CQ dst_queue(sqb, sqe); - - ASSERT(src_queue.size()==dst_queue.size(), "different size"); - - src_queue.clear(); - } - - T bar_array[1001]; - for( size_t size=0; size<1001; ++size ) { - for( size_t i=0; i<size; ++i ) - bar_array[i] = T(i+(i^size)); - - const TIter sab(bar_array+0); - const TIter sae(bar_array+size); - - CQ dst_queue2(sab, sae); - - ASSERT( size==unsigned(dst_queue2.size()), NULL ); - ASSERT( sab==TIter(bar_array+0), NULL ); - ASSERT( sae==TIter(bar_array+size), NULL ); - - dqb = dst_queue2.unsafe_begin(); - dqe = dst_queue2.unsafe_end(); - TIter v_iter(sab); - for( ; dqb != dqe; ++dqb, ++v_iter ) - ASSERT( *dqb == *v_iter, "unexpected element" ); - ASSERT( v_iter==sae, "different size?" ); - } - - src_queue.clear(); - - CQ dst_queue3( src_queue ); - ASSERT( src_queue.size()==dst_queue3.size(), NULL ); - ASSERT( 0==dst_queue3.size(), NULL ); - - int k=0; - for( size_t i=0; i<1001; ++i ) { - T tmp_bar; - src_queue.push(T(++k)); - src_queue.push(T(++k)); - src_queue.try_pop(tmp_bar); - - CQ dst_queue4( src_queue ); - - ASSERT( src_queue.size()==dst_queue4.size(), NULL ); - - dqb = dst_queue4.unsafe_begin(); - dqe = dst_queue4.unsafe_end(); - iter = src_queue.unsafe_begin(); - - for( ; dqb != dqe; ++dqb, ++iter ) - ASSERT( *dqb == *iter, "unexpected element" ); - - ASSERT( iter==src_queue.unsafe_end(), "different size?" ); - } - - CQ dst_queue5( src_queue ); - - ASSERT( src_queue.size()==dst_queue5.size(), NULL ); - dqb = dst_queue5.unsafe_begin(); - dqe = dst_queue5.unsafe_end(); - iter = src_queue.unsafe_begin(); - for( ; dqb != dqe; ++dqb, ++iter ) - ASSERT( *dqb == *iter, "unexpected element" ); - - for( size_t i=0; i<100; ++i) { - T tmp_bar; - src_queue.push(T(i+1000)); - src_queue.push(T(i+1000)); - src_queue.try_pop(tmp_bar); - - dst_queue5.push(T(i+1000)); - dst_queue5.push(T(i+1000)); - dst_queue5.try_pop(tmp_bar); - } - - ASSERT( src_queue.size()==dst_queue5.size(), NULL ); - dqb = dst_queue5.unsafe_begin(); - dqe = dst_queue5.unsafe_end(); - iter = src_queue.unsafe_begin(); - for( ; dqb != dqe; ++dqb, ++iter ) - ASSERT( *dqb == *iter, "unexpected element" ); - ASSERT( iter==src_queue.unsafe_end(), "different size?" ); - -#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN || __TBB_PLACEMENT_NEW_EXCEPTION_SAFETY_BROKEN - REPORT("Known issue: part of the constructor test is skipped.\n"); -#elif TBB_USE_EXCEPTIONS - k = 0; - typename CQ_EX::size_type n_elements=0; - CQ_EX src_queue_ex; - for( size_t size=0; size<1001; ++size ) { - T_EX tmp_bar_ex; - typename CQ_EX::size_type n_successful_pushes=0; - T_EX::set_mode( T_EX::PREPARATION ); - try { - src_queue_ex.push(T_EX(k+(k^size))); - ++n_successful_pushes; - } catch (...) { - } - ++k; - try { - src_queue_ex.push(T_EX(k+(k^size))); - ++n_successful_pushes; - } catch (...) { - } - ++k; - src_queue_ex.try_pop(tmp_bar_ex); - n_elements += (n_successful_pushes - 1); - ASSERT( src_queue_ex.size()==n_elements, NULL); - - T_EX::set_mode( T_EX::COPY_CONSTRUCT ); - CQ_EX dst_queue_ex( src_queue_ex ); - - ASSERT( src_queue_ex.size()==dst_queue_ex.size(), NULL ); - - typename CQ_EX::const_iterator dqb_ex = dst_queue_ex.unsafe_begin(); - typename CQ_EX::const_iterator dqe_ex = dst_queue_ex.unsafe_end(); - typename CQ_EX::const_iterator iter_ex = src_queue_ex.unsafe_begin(); - - for( ; dqb_ex != dqe_ex; ++dqb_ex, ++iter_ex ) - ASSERT( *dqb_ex == *iter_ex, "unexpected element" ); - ASSERT( iter_ex==src_queue_ex.unsafe_end(), "different size?" ); - } -#endif /* TBB_USE_EXCEPTIONS */ - -#if __TBB_CPP11_RVALUE_REF_PRESENT - // Testing work of move constructors - src_queue.clear(); - - typedef typename CQ::size_type qsize_t; - for( qsize_t size = 0; size < 1001; ++size ) { - for( qsize_t i = 0; i < size; ++i ) - src_queue.push( T(i + (i ^ size)) ); - std::vector<const T*> locations(size); - typename CQ::const_iterator qit = src_queue.unsafe_begin(); - for( qsize_t i = 0; i < size; ++i, ++qit ) - locations[i] = &(*qit); - - qsize_t size_of_queue = src_queue.size(); - CQ dst_queue( std::move(src_queue) ); - - ASSERT( src_queue.empty() && src_queue.size() == 0, "not working move constructor?" ); - ASSERT( size == size_of_queue && size_of_queue == dst_queue.size(), - "not working move constructor?" ); - - qit = dst_queue.unsafe_begin(); - for( qsize_t i = 0; i < size; ++i, ++qit ) - ASSERT( locations[i] == &(*qit), "there was data movement during move constructor" ); - - for( qsize_t i = 0; i < size; ++i ) { - T test(i + (i ^ size)); - T popped; - bool pop_result = dst_queue.try_pop( popped ); - - ASSERT( pop_result, NULL ); - ASSERT( test == popped, NULL ); - } - } -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ -} - -#if __TBB_CPP11_RVALUE_REF_PRESENT -template<class T> -class allocator: public tbb::cache_aligned_allocator<T> { -public: - size_t m_unique_id; - - allocator() : m_unique_id( 0 ) {} - - allocator(size_t unique_id) { m_unique_id = unique_id; } - - template<typename U> - allocator(const allocator<U>& a) throw() { m_unique_id = a.m_unique_id; } - - template<typename U> - struct rebind { typedef allocator<U> other; }; - - friend bool operator==(const allocator& lhs, const allocator& rhs) { - return lhs.m_unique_id == rhs.m_unique_id; - } -}; - -// Checks operability of the queue the data was moved from -template<typename T, typename CQ> -void TestQueueOperabilityAfterDataMove( CQ& queue ) { - const size_t size = 10; - std::vector<T> v(size); - for( size_t i = 0; i < size; ++i ) v[i] = T( i * i + i ); - - FillTest<push_op>(queue, v); - EmptyTest<try_pop_op>(queue, v); - bounded_queue_specific_test(queue, v); -} - -template<class CQ, class T> -void TestMoveConstructors() { - T::construction_num = T::destruction_num = 0; - CQ src_queue( allocator<T>(0) ); - const size_t size = 10; - for( size_t i = 0; i < size; ++i ) - src_queue.push( T(i + (i ^ size)) ); - - ASSERT( T::construction_num == 2 * size, NULL ); - ASSERT( T::destruction_num == size, NULL ); - const T* locations[size]; - typename CQ::const_iterator qit = src_queue.unsafe_begin(); - for( size_t i = 0; i < size; ++i, ++qit ) - locations[i] = &(*qit); - - // Ensuring allocation operation takes place during move when allocators are different - CQ dst_queue( std::move(src_queue), allocator<T>(1) ); - ASSERT( T::construction_num == 2 * size + size, NULL ); - ASSERT( T::destruction_num == 2 * size + size, NULL ); - - TestQueueOperabilityAfterDataMove<T>( src_queue ); - - qit = dst_queue.unsafe_begin(); - for( size_t i = 0; i < size; ++i, ++qit ) { - ASSERT( locations[i] != &(*qit), "item was not moved" ); - locations[i] = &(*qit); - } - - T::construction_num = T::destruction_num = 0; - // Ensuring there is no allocation operation during move with equal allocators - CQ dst_queue2( std::move(dst_queue), allocator<T>(1) ); - ASSERT( T::construction_num == 0, NULL ); - ASSERT( T::destruction_num == 0, NULL ); - - TestQueueOperabilityAfterDataMove<T>( dst_queue ); - - qit = dst_queue2.unsafe_begin(); - for( size_t i = 0; i < size; ++i, ++qit ) { - ASSERT( locations[i] == &(*qit), "item was moved" ); - } - - for( size_t i = 0; i < size; ++i) { - T test(i + (i ^ size)); - T popped; - bool pop_result = dst_queue2.try_pop( popped ); - ASSERT( pop_result, NULL ); - ASSERT( test == popped, NULL ); - } - ASSERT( dst_queue2.empty(), NULL ); - ASSERT( dst_queue2.size() == 0, NULL ); -} - -void TestMoveConstruction() { - REMARK("Testing move constructors with specified allocators..."); - TestMoveConstructors< ConcQWithSizeWrapper< Bar, allocator<Bar> >, Bar >(); - TestMoveConstructors< tbb::concurrent_bounded_queue< Bar, allocator<Bar> >, Bar >(); - REMARK(" work\n"); -} -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - -template<typename Iterator1, typename Iterator2> -void TestIteratorAux( Iterator1 i, Iterator2 j, int size ) { - // Now test iteration - Iterator1 old_i; - for( int k=0; k<size; ++k ) { - ASSERT( i!=j, NULL ); - ASSERT( !(i==j), NULL ); - Foo f; - if( k&1 ) { - // Test pre-increment - f = *old_i++; - // Test assignment - i = old_i; - } else { - // Test post-increment - f=*i++; - if( k<size-1 ) { - // Test "->" - ASSERT( k+2==i->serial, NULL ); - } - // Test assignment - old_i = i; - } - ASSERT( k+1==f.serial, NULL ); - } - ASSERT( !(i!=j), NULL ); - ASSERT( i==j, NULL ); -} - -template<typename Iterator1, typename Iterator2> -void TestIteratorAssignment( Iterator2 j ) { - Iterator1 i(j); - ASSERT( i==j, NULL ); - ASSERT( !(i!=j), NULL ); - Iterator1 k; - k = j; - ASSERT( k==j, NULL ); - ASSERT( !(k!=j), NULL ); -} - -template<typename Iterator, typename T> -void TestIteratorTraits() { - AssertSameType( static_cast<typename Iterator::difference_type*>(0), static_cast<ptrdiff_t*>(0) ); - AssertSameType( static_cast<typename Iterator::value_type*>(0), static_cast<T*>(0) ); - AssertSameType( static_cast<typename Iterator::pointer*>(0), static_cast<T**>(0) ); - AssertSameType( static_cast<typename Iterator::iterator_category*>(0), static_cast<std::forward_iterator_tag*>(0) ); - T x; - typename Iterator::reference xr = x; - typename Iterator::pointer xp = &x; - ASSERT( &xr==xp, NULL ); -} - -//! Test the iterators for concurrent_queue -template<typename CQ> -void TestIterator() { - CQ queue; - const CQ& const_queue = queue; - for( int j=0; j<500; ++j ) { - TestIteratorAux( queue.unsafe_begin() , queue.unsafe_end() , j ); - TestIteratorAux( const_queue.unsafe_begin(), const_queue.unsafe_end(), j ); - TestIteratorAux( const_queue.unsafe_begin(), queue.unsafe_end() , j ); - TestIteratorAux( queue.unsafe_begin() , const_queue.unsafe_end(), j ); - Foo f; - f.serial = j+1; - queue.push(f); - } - TestIteratorAssignment<typename CQ::const_iterator>( const_queue.unsafe_begin() ); - TestIteratorAssignment<typename CQ::const_iterator>( queue.unsafe_begin() ); - TestIteratorAssignment<typename CQ::iterator>( queue.unsafe_begin() ); - TestIteratorTraits<typename CQ::const_iterator, const Foo>(); - TestIteratorTraits<typename CQ::iterator, Foo>(); -} - -template<typename CQ> -void TestConcurrentQueueType() { - AssertSameType( typename CQ::value_type(), Foo() ); - Foo f; - const Foo g; - typename CQ::reference r = f; - ASSERT( &r==&f, NULL ); - ASSERT( !r.is_const(), NULL ); - typename CQ::const_reference cr = g; - ASSERT( &cr==&g, NULL ); - ASSERT( cr.is_const(), NULL ); -} - -template<typename CQ, typename T> -void TestEmptyQueue() { - const CQ queue; - ASSERT( queue.size()==0, NULL ); - ASSERT( queue.capacity()>0, NULL ); - ASSERT( size_t(queue.capacity())>=size_t(-1)/(sizeof(void*)+sizeof(T)), NULL ); -} - -template<typename CQ,typename T> -void TestFullQueue() { - for( int n=0; n<10; ++n ) { - T::clear_counters(); - CQ queue; - queue.set_capacity(n); - for( int i=0; i<=n; ++i ) { - T f; - f.serial = i; - bool result = queue.try_push( f ); - ASSERT( result==(i<n), NULL ); - } - for( int i=0; i<=n; ++i ) { - T f; - bool result = queue.try_pop( f ); - ASSERT( result==(i<n), NULL ); - ASSERT( !result || f.serial==i, NULL ); - } - ASSERT( T::get_n_constructed()==T::get_n_destroyed(), NULL ); - } -} - -template<typename CQ> -void TestClear() { - FooConstructed = 0; - FooDestroyed = 0; - const unsigned int n=5; - - CQ queue; - const int q_capacity=10; - queue.set_capacity(q_capacity); - for( size_t i=0; i<n; ++i ) { - Foo f; - f.serial = int(i); - queue.push( f ); - } - ASSERT( unsigned(queue.size())==n, NULL ); - queue.clear(); - ASSERT( queue.size()==0, NULL ); - for( size_t i=0; i<n; ++i ) { - Foo f; - f.serial = int(i); - queue.push( f ); - } - ASSERT( unsigned(queue.size())==n, NULL ); - queue.clear(); - ASSERT( queue.size()==0, NULL ); - for( size_t i=0; i<n; ++i ) { - Foo f; - f.serial = int(i); - queue.push( f ); - } - ASSERT( unsigned(queue.size())==n, NULL ); -} - -template<typename T> -struct TestNegativeQueueBody: NoAssign { - tbb::concurrent_bounded_queue<T>& queue; - const int nthread; - TestNegativeQueueBody( tbb::concurrent_bounded_queue<T>& q, int n ) : queue(q), nthread(n) {} - void operator()( int k ) const { - if( k==0 ) { - int number_of_pops = nthread-1; - // Wait for all pops to pend. - while( queue.size()>-number_of_pops ) { - __TBB_Yield(); - } - for( int i=0; ; ++i ) { - ASSERT( queue.size()==i-number_of_pops, NULL ); - ASSERT( queue.empty()==(queue.size()<=0), NULL ); - if( i==number_of_pops ) break; - // Satisfy another pop - queue.push( T() ); - } - } else { - // Pop item from queue - T item; - queue.pop(item); - } - } -}; - -//! Test a queue with a negative size. -template<typename T> -void TestNegativeQueue( int nthread ) { - tbb::concurrent_bounded_queue<T> queue; - NativeParallelFor( nthread, TestNegativeQueueBody<T>(queue,nthread) ); -} - -#if TBB_USE_EXCEPTIONS -template<typename CQ,typename A1,typename A2,typename T> -void TestExceptionBody() { - enum methods { - m_push = 0, - m_pop - }; - - REMARK("Testing exception safety\n"); - MaxFooCount = 5; - // verify 'clear()' on exception; queue's destructor calls its clear() - // Do test on queues of two different types at the same time to - // catch problem with incorrect sharing between templates. - { - CQ queue0; - tbb::concurrent_queue<int,A1> queue1; - for( int i=0; i<2; ++i ) { - bool caught = false; - try { - A2::init_counters(); - A2::set_limits(N/2); - for( int k=0; k<N; k++ ) { - if( i==0 ) - push(queue0, T(), i); - else - queue1.push( k ); - } - } catch (...) { - caught = true; - } - ASSERT( caught, "call to push should have thrown exception" ); - } - } - REMARK("... queue destruction test passed\n"); - - try { - int n_pushed=0, n_popped=0; - for(int t = 0; t <= 1; t++)// exception type -- 0 : from allocator(), 1 : from Foo's constructor - { - CQ queue_test; - for( int m=m_push; m<=m_pop; m++ ) { - // concurrent_queue internally rebinds the allocator to one with 'char' - A2::init_counters(); - - if(t) MaxFooCount = MaxFooCount + 400; - else A2::set_limits(N/2); - - try { - switch(m) { - case m_push: - for( int k=0; k<N; k++ ) { - push( queue_test, T(), k ); - n_pushed++; - } - break; - case m_pop: - n_popped=0; - for( int k=0; k<n_pushed; k++ ) { - T elt; - queue_test.try_pop( elt ); - n_popped++; - } - n_pushed = 0; - A2::set_limits(); - break; - } - if( !t && m==m_push ) ASSERT(false, "should throw an exception"); - } catch ( Foo_exception & ) { - switch(m) { - case m_push: { - ASSERT( ptrdiff_t(queue_test.size())==n_pushed, "incorrect queue size" ); - long tc = MaxFooCount; - MaxFooCount = 0; - for( int k=0; k<(int)tc; k++ ) { - push( queue_test, T(), k ); - n_pushed++; - } - MaxFooCount = tc; - } - break; - case m_pop: - MaxFooCount = 0; // disable exception - n_pushed -= (n_popped+1); // including one that threw an exception - ASSERT( n_pushed>=0, "n_pushed cannot be less than 0" ); - for( int k=0; k<1000; k++ ) { - push( queue_test, T(), k ); - n_pushed++; - } - ASSERT( !queue_test.empty(), "queue must not be empty" ); - ASSERT( ptrdiff_t(queue_test.size())==n_pushed, "queue size must be equal to n pushed" ); - for( int k=0; k<n_pushed; k++ ) { - T elt; - queue_test.try_pop( elt ); - } - ASSERT( queue_test.empty(), "queue must be empty" ); - ASSERT( queue_test.size()==0, "queue must be empty" ); - break; - } - } catch ( std::bad_alloc & ) { - A2::set_limits(); // disable exception from allocator - size_t size = queue_test.size(); - switch(m) { - case m_push: - ASSERT( size>0, "incorrect queue size"); - break; - case m_pop: - if( !t ) ASSERT( false, "should not throw an exceptin" ); - break; - } - } - REMARK("... for t=%d and m=%d, exception test passed\n", t, m); - } - } - } catch(...) { - ASSERT(false, "unexpected exception"); - } -} -#endif /* TBB_USE_EXCEPTIONS */ - -void TestExceptions() { -#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN - REPORT("Known issue: exception safety test is skipped.\n"); -#elif TBB_USE_EXCEPTIONS - typedef static_counting_allocator<std::allocator<FooEx>, size_t> allocator_t; - typedef static_counting_allocator<std::allocator<char>, size_t> allocator_char_t; - TestExceptionBody<ConcQWithSizeWrapper<FooEx, allocator_t>,allocator_t,allocator_char_t,FooEx>(); - TestExceptionBody<tbb::concurrent_bounded_queue<FooEx, allocator_t>,allocator_t,allocator_char_t,FooEx>(); -#endif /* TBB_USE_EXCEPTIONS */ -} - -template<typename CQ, typename T> -struct TestQueueElements: NoAssign { - CQ& queue; - const int nthread; - TestQueueElements( CQ& q, int n ) : queue(q), nthread(n) {} - void operator()( int k ) const { - for( int i=0; i<1000; ++i ) { - if( (i&0x1)==0 ) { - ASSERT( T(k)<T(nthread), NULL ); - queue.push( T(k) ); - } else { - // Pop item from queue - T item = 0; - queue.try_pop(item); - ASSERT( item<=T(nthread), NULL ); - } - } - } -}; - -//! Test concurrent queue with primitive data type -template<typename CQ, typename T> -void TestPrimitiveTypes( int nthread, T exemplar ) -{ - CQ queue; - for( int i=0; i<100; ++i ) - queue.push( exemplar ); - NativeParallelFor( nthread, TestQueueElements<CQ,T>(queue,nthread) ); -} - -#include "harness_m128.h" - -#if HAVE_m128 || HAVE_m256 - -//! Test concurrent queue with vector types -/** Type Queue should be a queue of ClassWithSSE/ClassWithAVX. */ -template<typename ClassWithVectorType, typename Queue> -void TestVectorTypes() { - Queue q1; - for( int i=0; i<100; ++i ) { - // VC8 does not properly align a temporary value; to work around, use explicit variable - ClassWithVectorType bar(i); - q1.push(bar); - } - - // Copy the queue - Queue q2 = q1; - // Check that elements of the copy are correct - typename Queue::const_iterator ci = q2.unsafe_begin(); - for( int i=0; i<100; ++i ) { - ClassWithVectorType foo = *ci; - ClassWithVectorType bar(i); - ASSERT( *ci==bar, NULL ); - ++ci; - } - - for( int i=0; i<101; ++i ) { - ClassWithVectorType tmp; - bool b = q1.try_pop( tmp ); - ASSERT( b==(i<100), NULL ); - ClassWithVectorType bar(i); - ASSERT( !b || tmp==bar, NULL ); - } -} -#endif /* HAVE_m128 || HAVE_m256 */ - -void TestEmptiness() -{ - REMARK(" Test Emptiness\n"); - TestEmptyQueue<ConcQWithCapacity<char>, char>(); - TestEmptyQueue<ConcQWithCapacity<Foo>, Foo>(); - TestEmptyQueue<tbb::concurrent_bounded_queue<char>, char>(); - TestEmptyQueue<tbb::concurrent_bounded_queue<Foo>, Foo>(); -} - -void TestFullness() -{ - REMARK(" Test Fullness\n"); - TestFullQueue<ConcQWithCapacity<Foo>,Foo>(); - TestFullQueue<tbb::concurrent_bounded_queue<Foo>,Foo>(); -} - -void TestClearWorks() -{ - REMARK(" Test concurrent_queue::clear() works\n"); - TestClear<ConcQWithCapacity<Foo> >(); - TestClear<tbb::concurrent_bounded_queue<Foo> >(); -} - -void TestQueueTypeDeclaration() -{ - REMARK(" Test concurrent_queue's types work\n"); - TestConcurrentQueueType<tbb::concurrent_queue<Foo> >(); - TestConcurrentQueueType<tbb::concurrent_bounded_queue<Foo> >(); -} - -void TestQueueIteratorWorks() -{ - REMARK(" Test concurrent_queue's iterators work\n"); - TestIterator<tbb::concurrent_queue<Foo> >(); - TestIterator<tbb::concurrent_bounded_queue<Foo> >(); -} - -#if TBB_USE_EXCEPTIONS -#define BAR_EX BarEx -#else -#define BAR_EX Empty /* passed as template arg but should not be used */ -#endif -class Empty; - -void TestQueueConstructors() -{ - REMARK(" Test concurrent_queue's constructors work\n"); - TestConstructors<ConcQWithSizeWrapper<Bar>,Bar,BarIterator,ConcQWithSizeWrapper<BAR_EX>,BAR_EX>(); - TestConstructors<tbb::concurrent_bounded_queue<Bar>,Bar,BarIterator,tbb::concurrent_bounded_queue<BAR_EX>,BAR_EX>(); -} - -void TestQueueWorksWithPrimitiveTypes() -{ - REMARK(" Test concurrent_queue works with primitive types\n"); - TestPrimitiveTypes<tbb::concurrent_queue<char>, char>( MaxThread, (char)1 ); - TestPrimitiveTypes<tbb::concurrent_queue<int>, int>( MaxThread, (int)-12 ); - TestPrimitiveTypes<tbb::concurrent_queue<float>, float>( MaxThread, (float)-1.2f ); - TestPrimitiveTypes<tbb::concurrent_queue<double>, double>( MaxThread, (double)-4.3 ); - TestPrimitiveTypes<tbb::concurrent_bounded_queue<char>, char>( MaxThread, (char)1 ); - TestPrimitiveTypes<tbb::concurrent_bounded_queue<int>, int>( MaxThread, (int)-12 ); - TestPrimitiveTypes<tbb::concurrent_bounded_queue<float>, float>( MaxThread, (float)-1.2f ); - TestPrimitiveTypes<tbb::concurrent_bounded_queue<double>, double>( MaxThread, (double)-4.3 ); -} - -void TestQueueWorksWithSSE() -{ - REMARK(" Test concurrent_queue works with SSE data\n"); -#if HAVE_m128 - TestVectorTypes<ClassWithSSE, tbb::concurrent_queue<ClassWithSSE> >(); - TestVectorTypes<ClassWithSSE, tbb::concurrent_bounded_queue<ClassWithSSE> >(); -#endif /* HAVE_m128 */ -#if HAVE_m256 - if( have_AVX() ) { - TestVectorTypes<ClassWithAVX, tbb::concurrent_queue<ClassWithAVX> >(); - TestVectorTypes<ClassWithAVX, tbb::concurrent_bounded_queue<ClassWithAVX> >(); - } -#endif /* HAVE_m256 */ -} - -void TestConcurrentPushPop() -{ - REMARK(" Test concurrent_queue's concurrent push and pop\n"); - for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) { - REMARK(" Testing with %d thread(s)\n", nthread ); - TestNegativeQueue<Foo>(nthread); - for( size_t prefill=0; prefill<64; prefill+=(1+prefill/3) ) { - TestPushPop<ConcQPushPopWrapper<Foo>,Foo>(prefill,ptrdiff_t(-1),nthread); - TestPushPop<ConcQPushPopWrapper<Foo>,Foo>(prefill,ptrdiff_t(1),nthread); - TestPushPop<ConcQPushPopWrapper<Foo>,Foo>(prefill,ptrdiff_t(2),nthread); - TestPushPop<ConcQPushPopWrapper<Foo>,Foo>(prefill,ptrdiff_t(10),nthread); - TestPushPop<ConcQPushPopWrapper<Foo>,Foo>(prefill,ptrdiff_t(100),nthread); - } - for( size_t prefill=0; prefill<64; prefill+=(1+prefill/3) ) { - TestPushPop<tbb::concurrent_bounded_queue<Foo>,Foo>(prefill,ptrdiff_t(-1),nthread); - TestPushPop<tbb::concurrent_bounded_queue<Foo>,Foo>(prefill,ptrdiff_t(1),nthread); - TestPushPop<tbb::concurrent_bounded_queue<Foo>,Foo>(prefill,ptrdiff_t(2),nthread); - TestPushPop<tbb::concurrent_bounded_queue<Foo>,Foo>(prefill,ptrdiff_t(10),nthread); - TestPushPop<tbb::concurrent_bounded_queue<Foo>,Foo>(prefill,ptrdiff_t(100),nthread); - } - } -} - -#if TBB_USE_EXCEPTIONS -tbb::atomic<size_t> num_pushed; -tbb::atomic<size_t> num_popped; -tbb::atomic<size_t> failed_pushes; -tbb::atomic<size_t> failed_pops; - -class SimplePushBody { - tbb::concurrent_bounded_queue<int>* q; - int max; -public: - SimplePushBody(tbb::concurrent_bounded_queue<int>* _q, int hi_thr) : q(_q), max(hi_thr) {} - void operator()(int thread_id) const { - if (thread_id == max) { - Harness::Sleep(50); - q->abort(); - return; - } - try { - q->push(42); - ++num_pushed; - } catch ( tbb::user_abort& ) { - ++failed_pushes; - } - } -}; - -class SimplePopBody { - tbb::concurrent_bounded_queue<int>* q; - int max; -public: - SimplePopBody(tbb::concurrent_bounded_queue<int>* _q, int hi_thr) : q(_q), max(hi_thr) {} - void operator()(int thread_id) const { - int e; - if (thread_id == max) { - Harness::Sleep(50); - q->abort(); - return; - } - try { - q->pop(e); - ++num_popped; - } catch ( tbb::user_abort& ) { - ++failed_pops; - } - } -}; -#endif /* TBB_USE_EXCEPTIONS */ - -void TestAbort() { -#if TBB_USE_EXCEPTIONS - for (int nthreads=MinThread; nthreads<=MaxThread; ++nthreads) { - REMARK("Testing Abort on %d thread(s).\n", nthreads); - - REMARK("...testing pushing to zero-sized queue\n"); - tbb::concurrent_bounded_queue<int> iq1; - iq1.set_capacity(0); - for (int i=0; i<10; ++i) { - num_pushed = num_popped = failed_pushes = failed_pops = 0; - SimplePushBody my_push_body1(&iq1, nthreads); - NativeParallelFor( nthreads+1, my_push_body1 ); - ASSERT(num_pushed == 0, "no elements should have been pushed to zero-sized queue"); - ASSERT((int)failed_pushes == nthreads, "All threads should have failed to push an element to zero-sized queue"); - } - - REMARK("...testing pushing to small-sized queue\n"); - tbb::concurrent_bounded_queue<int> iq2; - iq2.set_capacity(2); - for (int i=0; i<10; ++i) { - num_pushed = num_popped = failed_pushes = failed_pops = 0; - SimplePushBody my_push_body2(&iq2, nthreads); - NativeParallelFor( nthreads+1, my_push_body2 ); - ASSERT(num_pushed <= 2, "at most 2 elements should have been pushed to queue of size 2"); - if (nthreads >= 2) - ASSERT((int)failed_pushes == nthreads-2, "nthreads-2 threads should have failed to push an element to queue of size 2"); - int e; - while (iq2.try_pop(e)) ; - } - - REMARK("...testing popping from small-sized queue\n"); - tbb::concurrent_bounded_queue<int> iq3; - iq3.set_capacity(2); - for (int i=0; i<10; ++i) { - num_pushed = num_popped = failed_pushes = failed_pops = 0; - iq3.push(42); - iq3.push(42); - SimplePopBody my_pop_body(&iq3, nthreads); - NativeParallelFor( nthreads+1, my_pop_body); - ASSERT(num_popped <= 2, "at most 2 elements should have been popped from queue of size 2"); - if (nthreads >= 2) - ASSERT((int)failed_pops == nthreads-2, "nthreads-2 threads should have failed to pop an element from queue of size 2"); - else { - int e; - iq3.pop(e); - } - } - - REMARK("...testing pushing and popping from small-sized queue\n"); - tbb::concurrent_bounded_queue<int> iq4; - int cap = nthreads/2; - if (!cap) cap=1; - iq4.set_capacity(cap); - for (int i=0; i<10; ++i) { - num_pushed = num_popped = failed_pushes = failed_pops = 0; - SimplePushBody my_push_body2(&iq4, nthreads); - NativeParallelFor( nthreads+1, my_push_body2 ); - ASSERT((int)num_pushed <= cap, "at most cap elements should have been pushed to queue of size cap"); - if (nthreads >= cap) - ASSERT((int)failed_pushes == nthreads-cap, "nthreads-cap threads should have failed to push an element to queue of size cap"); - SimplePopBody my_pop_body(&iq4, nthreads); - NativeParallelFor( nthreads+1, my_pop_body); - ASSERT((int)num_popped <= cap, "at most cap elements should have been popped from queue of size cap"); - if (nthreads >= cap) - ASSERT((int)failed_pops == nthreads-cap, "nthreads-cap threads should have failed to pop an element from queue of size cap"); - else { - int e; - while (iq4.try_pop(e)) ; - } - } - } -#endif -} - -#if __TBB_CPP11_RVALUE_REF_PRESENT -struct MoveOperationTracker { - static size_t copy_constructor_called_times; - static size_t move_constructor_called_times; - static size_t copy_assignment_called_times; - static size_t move_assignment_called_times; - - MoveOperationTracker() {} - MoveOperationTracker(const MoveOperationTracker&) { - ++copy_constructor_called_times; - } - MoveOperationTracker(MoveOperationTracker&&) { - ++move_constructor_called_times; - } - MoveOperationTracker& operator=(MoveOperationTracker const&) { - ++copy_assignment_called_times; - return *this; - } - MoveOperationTracker& operator=(MoveOperationTracker&&) { - ++move_assignment_called_times; - return *this; - } -}; -size_t MoveOperationTracker::copy_constructor_called_times = 0; -size_t MoveOperationTracker::move_constructor_called_times = 0; -size_t MoveOperationTracker::copy_assignment_called_times = 0; -size_t MoveOperationTracker::move_assignment_called_times = 0; - -template <class CQ, push_t push_op, pop_t pop_op> -void TestMoveSupport() { - size_t &mcct = MoveOperationTracker::move_constructor_called_times; - size_t &ccct = MoveOperationTracker::copy_constructor_called_times; - size_t &cact = MoveOperationTracker::copy_assignment_called_times; - size_t &mact = MoveOperationTracker::move_assignment_called_times; - mcct = ccct = cact = mact = 0; - - CQ q; - - ASSERT(mcct == 0, "Value must be zero-initialized"); - ASSERT(ccct == 0, "Value must be zero-initialized"); - ASSERT(pusher<push_op>::push( q, MoveOperationTracker() ), NULL); - ASSERT(mcct == 1, "Not working push(T&&) or try_push(T&&)?"); - ASSERT(ccct == 0, "Copying of arg occurred during push(T&&) or try_push(T&&)"); - - MoveOperationTracker ob; - ASSERT(pusher<push_op>::push( q, std::move(ob) ), NULL); - ASSERT(mcct == 2, "Not working push(T&&) or try_push(T&&)?"); - ASSERT(ccct == 0, "Copying of arg occurred during push(T&&) or try_push(T&&)"); - - ASSERT(cact == 0, "Copy assignment called during push(T&&) or try_push(T&&)"); - ASSERT(mact == 0, "Move assignment called during push(T&&) or try_push(T&&)"); - - bool result = popper<pop_op>::pop( q, ob ); - ASSERT(result, NULL); - ASSERT(cact == 0, "Copy assignment called during try_pop(T&&)"); - ASSERT(mact == 1, "Move assignment was not called during try_pop(T&&)"); -} - -void TestMoveSupportInPushPop() { - REMARK("Testing Move Support in Push/Pop..."); - TestMoveSupport< tbb::concurrent_queue<MoveOperationTracker>, push_op, try_pop_op >(); - TestMoveSupport< tbb::concurrent_bounded_queue<MoveOperationTracker>, push_op, pop_op >(); - TestMoveSupport< tbb::concurrent_bounded_queue<MoveOperationTracker>, try_push_op, try_pop_op >(); - REMARK(" works.\n"); -} - -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT -class NonTrivialConstructorType { -public: - NonTrivialConstructorType( int a = 0 ) : m_a( a ), m_str( "" ) {} - NonTrivialConstructorType( const std::string& str ) : m_a( 0 ), m_str( str ) {} - NonTrivialConstructorType( int a, const std::string& str ) : m_a( a ), m_str( str ) {} - int get_a() const { return m_a; } - std::string get_str() const { return m_str; } -private: - int m_a; - std::string m_str; -}; - -enum emplace_t { emplace_op, try_emplace_op }; - -template< emplace_t emplace_op > -struct emplacer { - template< typename CQ, typename... Args> - static void emplace( CQ& queue, Args&&... val ) { queue.emplace( std::forward<Args>( val )... ); } -}; - -template<> -struct emplacer< try_emplace_op > { - template<typename CQ, typename... Args> - static void emplace( CQ& queue, Args&&... val ) { - bool result = queue.try_emplace( std::forward<Args>( val )... ); - ASSERT( result, "try_emplace error\n" ); - } -}; - -template<typename CQ, emplace_t emplace_op> -void TestEmplaceInQueue() { - CQ cq; - std::string test_str = "I'm being emplaced!"; - { - emplacer<emplace_op>::emplace( cq, 5 ); - ASSERT( cq.size() == 1, NULL ); - NonTrivialConstructorType popped( -1 ); - bool result = cq.try_pop( popped ); - ASSERT( result, NULL ); - ASSERT( popped.get_a() == 5, NULL ); - ASSERT( popped.get_str() == std::string( "" ), NULL ); - } - - ASSERT( cq.empty(), NULL ); - - { - NonTrivialConstructorType popped( -1 ); - emplacer<emplace_op>::emplace( cq, std::string(test_str) ); - bool result = cq.try_pop( popped ); - ASSERT( result, NULL ); - ASSERT( popped.get_a() == 0, NULL ); - ASSERT( popped.get_str() == test_str, NULL ); - } - - ASSERT( cq.empty(), NULL ); - - { - NonTrivialConstructorType popped( -1, "" ); - emplacer<emplace_op>::emplace( cq, 5, std::string(test_str) ); - bool result = cq.try_pop( popped ); - ASSERT( result, NULL ); - ASSERT( popped.get_a() == 5, NULL ); - ASSERT( popped.get_str() == test_str, NULL ); - } -} -void TestEmplace() { - REMARK("Testing support for 'emplace' method..."); - TestEmplaceInQueue< ConcQWithSizeWrapper<NonTrivialConstructorType>, emplace_op >(); - TestEmplaceInQueue< tbb::concurrent_bounded_queue<NonTrivialConstructorType>, emplace_op >(); - TestEmplaceInQueue< tbb::concurrent_bounded_queue<NonTrivialConstructorType>, try_emplace_op >(); - REMARK(" works.\n"); -} -#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */ -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - -template <typename Queue> -void Examine(Queue q, const std::vector<typename Queue::value_type> &vec) { - typedef typename Queue::value_type value_type; - - AssertEquality(q, vec); - - const Queue cq = q; - AssertEquality(cq, vec); - - q.clear(); - AssertEmptiness(q); - - FillTest<push_op>(q, vec); - EmptyTest<try_pop_op>(q, vec); - - bounded_queue_specific_test(q, vec); - - typename Queue::allocator_type a = q.get_allocator(); - value_type *ptr = a.allocate(1); - ASSERT(ptr, NULL); - a.deallocate(ptr, 1); -} - -template <typename Queue, typename QueueDebugAlloc> -void TypeTester(const std::vector<typename Queue::value_type> &vec) { - typedef typename std::vector<typename Queue::value_type>::const_iterator iterator; - ASSERT(vec.size() >= 5, "Array should have at least 5 elements"); - // Construct an empty queue. - Queue q1; - for (iterator it = vec.begin(); it != vec.end(); ++it) q1.push(*it); - Examine(q1, vec); - // Copying constructor. - Queue q3(q1); - Examine(q3, vec); - // Construct with non-default allocator. - QueueDebugAlloc q4; - for (iterator it = vec.begin(); it != vec.end(); ++it) q4.push(*it); - Examine(q4, vec); - // Copying constructor with the same allocator type. - QueueDebugAlloc q5(q4); - Examine(q5, vec); - // Construction with given allocator instance. - typename QueueDebugAlloc::allocator_type a; - QueueDebugAlloc q6(a); - for (iterator it = vec.begin(); it != vec.end(); ++it) q6.push(*it); - Examine(q6, vec); - // Construction with copying iteration range and given allocator instance. - QueueDebugAlloc q7(q1.unsafe_begin(), q1.unsafe_end(), a); - Examine<QueueDebugAlloc>(q7, vec); -} - -template <typename value_type> -void TestTypes(const std::vector<value_type> &vec) { - TypeTester< ConcQWithSizeWrapper<value_type>, ConcQWithSizeWrapper<value_type, debug_allocator<value_type> > >(vec); - TypeTester< tbb::concurrent_bounded_queue<value_type>, tbb::concurrent_bounded_queue<value_type, debug_allocator<value_type> > >(vec); -} - -void TestTypes() { - const int NUMBER = 10; - - std::vector<int> arrInt; - for (int i = 0; i < NUMBER; ++i) arrInt.push_back(i); - std::vector< tbb::atomic<int> > arrTbb; - for (int i = 0; i < NUMBER; ++i) { - tbb::atomic<int> a; - a = i; - arrTbb.push_back(a); - } - TestTypes(arrInt); - TestTypes(arrTbb); - -#if __TBB_CPP11_SMART_POINTERS_PRESENT - std::vector< std::shared_ptr<int> > arrShr; - for (int i = 0; i < NUMBER; ++i) arrShr.push_back(std::make_shared<int>(i)); - std::vector< std::weak_ptr<int> > arrWk; - std::copy(arrShr.begin(), arrShr.end(), std::back_inserter(arrWk)); - TestTypes(arrShr); - TestTypes(arrWk); -#else - REPORT("Known issue: C++11 smart pointer tests are skipped.\n"); -#endif /* __TBB_CXX11_TYPES_PRESENT */ -} - -int TestMain () { - TestEmptiness(); - - TestFullness(); - - TestClearWorks(); - - TestQueueTypeDeclaration(); - - TestQueueIteratorWorks(); - - TestQueueConstructors(); - - TestQueueWorksWithPrimitiveTypes(); - - TestQueueWorksWithSSE(); - - // Test concurrent operations - TestConcurrentPushPop(); - - TestExceptions(); - - TestAbort(); - -#if __TBB_CPP11_RVALUE_REF_PRESENT - TestMoveSupportInPushPop(); - TestMoveConstruction(); -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT - TestEmplace(); -#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */ -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - - TestTypes(); - - return Harness::Done; -} diff --git a/src/tbb/src/test/test_concurrent_queue.h b/src/tbb/src/test/test_concurrent_queue.h deleted file mode 100644 index 6e4bc4e1f..000000000 --- a/src/tbb/src/test/test_concurrent_queue.h +++ /dev/null @@ -1,90 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/atomic.h" -#include "tbb/cache_aligned_allocator.h" -#include "tbb/spin_mutex.h" -#include "tbb/tbb_machine.h" - -#include "tbb/concurrent_monitor.h" - -struct hacked_micro_queue { - tbb::atomic<uintptr_t> head_page; - tbb::atomic<size_t> head_counter; - - tbb::atomic<uintptr_t> tail_page; - tbb::atomic<size_t> tail_counter; - - tbb::spin_mutex page_mutex; - }; - -// hacks for strict_ppl::concurrent_queue -struct hacked_concurrent_queue_rep { - static const size_t phi = 3; - static const size_t n_queue = 8; - - tbb::atomic<size_t> head_counter; - char pad1[tbb::internal::NFS_MaxLineSize-sizeof(tbb::atomic<size_t>)]; - tbb::atomic<size_t> tail_counter; - char pad2[tbb::internal::NFS_MaxLineSize-sizeof(tbb::atomic<size_t>)]; - - size_t items_per_page; - size_t item_size; - tbb::atomic<size_t> n_invalid_entries; - char pad3[tbb::internal::NFS_MaxLineSize-sizeof(size_t)-sizeof(size_t)-sizeof(tbb::atomic<size_t>)]; - - hacked_micro_queue array[n_queue]; -}; - -struct hacked_concurrent_queue_page_allocator { - size_t foo; -}; - -template <typename T> -struct hacked_concurrent_queue : public hacked_concurrent_queue_page_allocator { - hacked_concurrent_queue_rep* my_rep; - typename tbb::cache_aligned_allocator<T>::template rebind<char>::other my_allocator; -}; - -// hacks for concurrent_bounded_queue and deprecated::concurrent_queue -struct hacked_bounded_concurrent_queue_rep { - static const size_t phi = 3; - static const size_t n_queue = 8; - - tbb::atomic<size_t> head_counter; - char cmon_items_avail[ sizeof(tbb::internal::concurrent_monitor) ]; - tbb::atomic<size_t> n_invalid_entries; - char pad1[tbb::internal::NFS_MaxLineSize-((sizeof(tbb::atomic<size_t>)+sizeof(tbb::internal::concurrent_monitor)+sizeof(tbb::atomic<size_t>))&(tbb::internal::NFS_MaxLineSize-1))]; - - tbb::atomic<size_t> tail_counter; - char cmon_slots_avail[ sizeof(tbb::internal::concurrent_monitor) ]; - char pad2[tbb::internal::NFS_MaxLineSize-((sizeof(tbb::atomic<size_t>)+sizeof(tbb::internal::concurrent_monitor))&(tbb::internal::NFS_MaxLineSize-1))]; - hacked_micro_queue array[n_queue]; - - static const ptrdiff_t infinite_capacity = ptrdiff_t(~size_t(0)/2); -}; - -struct hacked_bounded_concurrent_queue { - size_t foo; - hacked_bounded_concurrent_queue_rep* my_rep; - ptrdiff_t my_capacity; - size_t items_per_page; - size_t item_size; -}; diff --git a/src/tbb/src/test/test_concurrent_queue_whitebox.cpp b/src/tbb/src/test/test_concurrent_queue_whitebox.cpp deleted file mode 100644 index af167b984..000000000 --- a/src/tbb/src/test/test_concurrent_queue_whitebox.cpp +++ /dev/null @@ -1,100 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#define NOMINMAX -#include "harness_defs.h" -#include "test_concurrent_queue.h" -#include "tbb/concurrent_queue.h" -#include "tbb/concurrent_monitor.cpp" -#include "harness.h" -#include "harness_allocator.h" - -#if _MSC_VER==1500 && !__INTEL_COMPILER - // VS2008/VC9 seems to have an issue; limits pull in math.h - #pragma warning( push ) - #pragma warning( disable: 4985 ) -#endif -#include <limits> -#if _MSC_VER==1500 && !__INTEL_COMPILER - #pragma warning( pop ) -#endif - -template <typename Q> -class FloggerBody : NoAssign { - Q *q; -public: - FloggerBody(Q *q_) : q(q_) {} - void operator()(const int threadID) const { - typedef typename Q::value_type value_type; - value_type elem = value_type(threadID); - for (size_t i=0; i<275; ++i) { - q->push(elem); - (void) q->try_pop(elem); - } - } -}; - -template <typename HackedQRep, typename Q> -void TestFloggerHelp(HackedQRep* hack_rep, Q* q, size_t items_per_page) { - size_t nq = HackedQRep::n_queue; - size_t hack_val = std::numeric_limits<std::size_t>::max() & ~( nq * items_per_page - 1 ); - hack_rep->head_counter = hack_val; - hack_rep->tail_counter = hack_val; - size_t k = hack_rep->tail_counter & -(ptrdiff_t)nq; - - for (size_t i=0; i<nq; ++i) { - hack_rep->array[i].head_counter = k; - hack_rep->array[i].tail_counter = k; - } - NativeParallelFor(MaxThread, FloggerBody<Q>(q)); - ASSERT(q->empty(), "FAILED flogger/empty test."); - delete q; -} - -template <typename T> -void TestFlogger(T /*max*/) { - { - tbb::concurrent_queue<T>* q = new tbb::concurrent_queue<T>; - REMARK("Wraparound on strict_ppl::concurrent_queue..."); - hacked_concurrent_queue_rep* hack_rep = ((hacked_concurrent_queue<T>*)(void*)q)->my_rep; - TestFloggerHelp(hack_rep, q, hack_rep->items_per_page); - REMARK(" works.\n"); - } - { - tbb::concurrent_bounded_queue<T>* q = new tbb::concurrent_bounded_queue<T>; - REMARK("Wraparound on tbb::concurrent_bounded_queue..."); - hacked_bounded_concurrent_queue* hack_q = (hacked_bounded_concurrent_queue*)(void*)q; - hacked_bounded_concurrent_queue_rep* hack_rep = hack_q->my_rep; - TestFloggerHelp(hack_rep, q, hack_q->items_per_page); - REMARK(" works.\n"); - } -} - -void TestWraparound() { - REMARK("Testing Wraparound...\n"); - TestFlogger(std::numeric_limits<int>::max()); - TestFlogger(std::numeric_limits<unsigned char>::max()); - REMARK("Done Testing Wraparound.\n"); -} - -int TestMain () { - TestWraparound(); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_concurrent_unordered_common.h b/src/tbb/src/test/test_concurrent_unordered_common.h deleted file mode 100644 index 8befd6a5f..000000000 --- a/src/tbb/src/test/test_concurrent_unordered_common.h +++ /dev/null @@ -1,837 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -/* Some tests in this source file are based on PPL tests provided by Microsoft. */ -#include "tbb/parallel_for.h" -#include "tbb/tick_count.h" -#include "harness.h" -// Test that unordered containers do not require keys have default constructors. -#define __HARNESS_CHECKTYPE_DEFAULT_CTOR 0 -#include "harness_checktype.h" -#undef __HARNESS_CHECKTYPE_DEFAULT_CTOR -#include "harness_allocator.h" - -// TestInitListSupportWithoutAssign with an empty initializer list causes internal error in Intel Compiler. -#define __TBB_ICC_EMPTY_INIT_LIST_TESTS_BROKEN (__INTEL_COMPILER && __INTEL_COMPILER <= 1500) - -typedef local_counting_allocator<debug_allocator<std::pair<const int,int>,std::allocator> > MyAllocator; - -#define CheckAllocatorE(t,a,f) CheckAllocator(t,a,f,true,__LINE__) -#define CheckAllocatorA(t,a,f) CheckAllocator(t,a,f,false,__LINE__) -template<typename MyTable> -inline void CheckAllocator(MyTable &table, size_t expected_allocs, size_t expected_frees, bool exact = true, int line = 0) { - typename MyTable::allocator_type a = table.get_allocator(); - REMARK("#%d checking allocators: items %u/%u, allocs %u/%u\n", line, - unsigned(a.items_allocated), unsigned(a.items_freed), unsigned(a.allocations), unsigned(a.frees) ); - ASSERT( a.items_allocated == a.allocations, NULL); ASSERT( a.items_freed == a.frees, NULL); - if(exact) { - ASSERT( a.allocations == expected_allocs, NULL); ASSERT( a.frees == expected_frees, NULL); - } else { - ASSERT( a.allocations >= expected_allocs, NULL); ASSERT( a.frees >= expected_frees, NULL); - ASSERT( a.allocations - a.frees == expected_allocs - expected_frees, NULL ); - } -} - -// value generator for cumap -template <typename K, typename V = std::pair<const K, K> > -struct ValueFactory { - static V make(const K &value) { return V(value, value); } - static K key(const V &value) { return value.first; } - static K get(const V& value) { return value.second; } -}; - -// generator for cuset -template <typename T> -struct ValueFactory<T, T> { - static T make(const T &value) { return value; } - static T key(const T &value) { return value; } - static T get(const T &value) { return value; } -}; - -template <typename T> -struct Value : ValueFactory<typename T::key_type, typename T::value_type> {}; - -#if _MSC_VER -#pragma warning(disable: 4189) // warning 4189 -- local variable is initialized but not referenced -#pragma warning(disable: 4127) // warning 4127 -- while (true) has a constant expression in it -#endif - -template<typename ContainerType, typename Iterator, typename RangeType> -std::pair<int,int> CheckRecursiveRange(RangeType range) { - std::pair<int,int> sum(0, 0); // count, sum - for( Iterator i = range.begin(), e = range.end(); i != e; ++i ) { - ++sum.first; sum.second += Value<ContainerType>::get(*i); - } - if( range.is_divisible() ) { - RangeType range2( range, tbb::split() ); - std::pair<int,int> sum1 = CheckRecursiveRange<ContainerType,Iterator, RangeType>( range ); - std::pair<int,int> sum2 = CheckRecursiveRange<ContainerType,Iterator, RangeType>( range2 ); - sum1.first += sum2.first; sum1.second += sum2.second; - ASSERT( sum == sum1, "Mismatched ranges after division"); - } - return sum; -} - -template <typename T> -struct SpecialTests { - static void Test(const char *str) {REMARK("skipped -- specialized %s tests\n", str);} -}; - -#if __TBB_INITIALIZER_LISTS_PRESENT -template<typename container_type> -bool equal_containers( container_type const& lhs, container_type const& rhs ) { - if ( lhs.size() != rhs.size() ) { - return false; - } - return std::equal( lhs.begin(), lhs.end(), lhs.begin(), Harness::IsEqual() ); -} - -#include "test_initializer_list.h" - -template <typename Table, typename MultiTable> -void TestInitList( std::initializer_list<typename Table::value_type> il ) { - using namespace initializer_list_support_tests; - REMARK("testing initializer_list methods \n"); - - TestInitListSupportWithoutAssign<Table,test_special_insert>(il); - TestInitListSupportWithoutAssign<MultiTable, test_special_insert>( il ); - -#if __TBB_ICC_EMPTY_INIT_LIST_TESTS_BROKEN - REPORT( "Known issue: TestInitListSupportWithoutAssign with an empty initializer list is skipped.\n"); -#else - TestInitListSupportWithoutAssign<Table, test_special_insert>( {} ); - TestInitListSupportWithoutAssign<MultiTable, test_special_insert>( {} ); -#endif -} -#endif //if __TBB_INITIALIZER_LISTS_PRESENT - -template<typename T> -void test_basic(const char * str) -{ - T cont; - const T &ccont(cont); - - // bool empty() const; - ASSERT(ccont.empty(), "Concurrent container is not empty after construction"); - - // size_type size() const; - ASSERT(ccont.size() == 0, "Concurrent container is not empty after construction"); - - // size_type max_size() const; - ASSERT(ccont.max_size() > 0, "Concurrent container max size is invalid"); - - //iterator begin(); - //iterator end(); - ASSERT(cont.begin() == cont.end(), "Concurrent container iterators are invalid after construction"); - ASSERT(ccont.begin() == ccont.end(), "Concurrent container iterators are invalid after construction"); - ASSERT(cont.cbegin() == cont.cend(), "Concurrent container iterators are invalid after construction"); - - //std::pair<iterator, bool> insert(const value_type& obj); - std::pair<typename T::iterator, bool> ins = cont.insert(Value<T>::make(1)); - ASSERT(ins.second == true && Value<T>::get(*(ins.first)) == 1, "Element 1 has not been inserted properly"); - - // bool empty() const; - ASSERT(!ccont.empty(), "Concurrent container is empty after adding an element"); - - // size_type size() const; - ASSERT(ccont.size() == 1, "Concurrent container size is incorrect"); - - std::pair<typename T::iterator, bool> ins2 = cont.insert(Value<T>::make(1)); - - if (T::allow_multimapping) - { - // std::pair<iterator, bool> insert(const value_type& obj); - ASSERT(ins2.second == true && Value<T>::get(*(ins2.first)) == 1, "Element 1 has not been inserted properly"); - - // size_type size() const; - ASSERT(ccont.size() == 2, "Concurrent container size is incorrect"); - - // size_type count(const key_type& k) const; - ASSERT(ccont.count(1) == 2, "Concurrent container count(1) is incorrect"); - - // std::pair<iterator, iterator> equal_range(const key_type& k); - std::pair<typename T::iterator, typename T::iterator> range = cont.equal_range(1); - typename T::iterator it = range.first; - ASSERT(it != cont.end() && Value<T>::get(*it) == 1, "Element 1 has not been found properly"); - unsigned int count = 0; - for (; it != range.second; it++) - { - count++; - ASSERT(Value<T>::get(*it) == 1, "Element 1 has not been found properly"); - } - - ASSERT(count == 2, "Range doesn't have the right number of elements"); - } - else - { - // std::pair<iterator, bool> insert(const value_type& obj); - ASSERT(ins2.second == false && ins2.first == ins.first, "Element 1 should not be re-inserted"); - - // size_type size() const; - ASSERT(ccont.size() == 1, "Concurrent container size is incorrect"); - - // size_type count(const key_type& k) const; - ASSERT(ccont.count(1) == 1, "Concurrent container count(1) is incorrect"); - - // std::pair<const_iterator, const_iterator> equal_range(const key_type& k) const; - // std::pair<iterator, iterator> equal_range(const key_type& k); - std::pair<typename T::iterator, typename T::iterator> range = cont.equal_range(1); - typename T::iterator it = range.first; - ASSERT(it != cont.end() && Value<T>::get(*it) == 1, "Element 1 has not been found properly"); - ASSERT(++it == range.second, "Range doesn't have the right number of elements"); - } - - // const_iterator find(const key_type& k) const; - // iterator find(const key_type& k); - typename T::iterator it = cont.find(1); - ASSERT(it != cont.end() && Value<T>::get(*(it)) == 1, "Element 1 has not been found properly"); - ASSERT(ccont.find(1) == it, "Element 1 has not been found properly"); - - // iterator insert(const_iterator hint, const value_type& obj); - typename T::iterator it2 = cont.insert(ins.first, Value<T>::make(2)); - ASSERT(Value<T>::get(*it2) == 2, "Element 2 has not been inserted properly"); - - // T(const T& _Umap) - T newcont = ccont; - ASSERT(T::allow_multimapping ? (newcont.size() == 3) : (newcont.size() == 2), "Copy construction has not copied the elements properly"); - - // size_type unsafe_erase(const key_type& k); - typename T::size_type size = cont.unsafe_erase(1); - ASSERT(T::allow_multimapping ? (size == 2) : (size == 1), "Erase has not removed the right number of elements"); - - // iterator unsafe_erase(const_iterator position); - typename T::iterator it4 = cont.unsafe_erase(cont.find(2)); - ASSERT(it4 == cont.end() && cont.size() == 0, "Erase has not removed the last element properly"); - - // template<class InputIterator> void insert(InputIterator first, InputIterator last); - cont.insert(newcont.begin(), newcont.end()); - ASSERT(T::allow_multimapping ? (cont.size() == 3) : (cont.size() == 2), "Range insert has not copied the elements properly"); - - // iterator unsafe_erase(const_iterator first, const_iterator last); - std::pair<typename T::iterator, typename T::iterator> range2 = newcont.equal_range(1); - newcont.unsafe_erase(range2.first, range2.second); - ASSERT(newcont.size() == 1, "Range erase has not erased the elements properly"); - - // void clear(); - newcont.clear(); - ASSERT(newcont.begin() == newcont.end() && newcont.size() == 0, "Clear has not cleared the container"); - -#if __TBB_INITIALIZER_LISTS_PRESENT -#if __TBB_CPP11_INIT_LIST_TEMP_OBJS_LIFETIME_BROKEN - REPORT("Known issue: the test for insert with initializer_list is skipped.\n"); -#else - // void insert(const std::initializer_list<value_type> &il); - newcont.insert( { Value<T>::make( 1 ), Value<T>::make( 2 ), Value<T>::make( 1 ) } ); - if (T::allow_multimapping) { - ASSERT(newcont.size() == 3, "Concurrent container size is incorrect"); - ASSERT(newcont.count(1) == 2, "Concurrent container count(1) is incorrect"); - ASSERT(newcont.count(2) == 1, "Concurrent container count(2) is incorrect"); - std::pair<typename T::iterator, typename T::iterator> range = cont.equal_range(1); - it = range.first; - ASSERT(it != newcont.end() && Value<T>::get(*it) == 1, "Element 1 has not been found properly"); - unsigned int count = 0; - for (; it != range.second; it++) { - count++; - ASSERT(Value<T>::get(*it) == 1, "Element 1 has not been found properly"); - } - ASSERT(count == 2, "Range doesn't have the right number of elements"); - range = newcont.equal_range(2); it = range.first; - ASSERT(it != newcont.end() && Value<T>::get(*it) == 2, "Element 2 has not been found properly"); - count = 0; - for (; it != range.second; it++) { - count++; - ASSERT(Value<T>::get(*it) == 2, "Element 2 has not been found properly"); - } - ASSERT(count == 1, "Range doesn't have the right number of elements"); - } else { - ASSERT(newcont.size() == 2, "Concurrent container size is incorrect"); - ASSERT(newcont.count(1) == 1, "Concurrent container count(1) is incorrect"); - ASSERT(newcont.count(2) == 1, "Concurrent container count(2) is incorrect"); - std::pair<typename T::iterator, typename T::iterator> range = newcont.equal_range(1); - it = range.first; - ASSERT(it != newcont.end() && Value<T>::get(*it) == 1, "Element 1 has not been found properly"); - ASSERT(++it == range.second, "Range doesn't have the right number of elements"); - range = newcont.equal_range(2); it = range.first; - ASSERT(it != newcont.end() && Value<T>::get(*it) == 2, "Element 2 has not been found properly"); - ASSERT(++it == range.second, "Range doesn't have the right number of elements"); - } -#endif /* __TBB_CPP11_INIT_LIST_TEMP_OBJS_COMPILATION_BROKEN */ -#endif /* __TBB_INITIALIZER_LISTS_PRESENT */ - - // T& operator=(const T& _Umap) - newcont = ccont; - ASSERT(T::allow_multimapping ? (newcont.size() == 3) : (newcont.size() == 2), "Assignment operator has not copied the elements properly"); - - // void rehash(size_type n); - newcont.rehash(16); - ASSERT(T::allow_multimapping ? (newcont.size() == 3) : (newcont.size() == 2), "Rehash should not affect the container elements"); - - // float load_factor() const; - // float max_load_factor() const; - ASSERT(ccont.load_factor() <= ccont.max_load_factor(), "Load factor is invalid"); - - // void max_load_factor(float z); - cont.max_load_factor(16.0f); - ASSERT(ccont.max_load_factor() == 16.0f, "Max load factor has not been changed properly"); - - // hasher hash_function() const; - ccont.hash_function(); - - // key_equal key_eq() const; - ccont.key_eq(); - - cont.clear(); - CheckAllocatorA(cont, 1, 0); // one dummy is always allocated - for (int i = 0; i < 256; i++) - { - std::pair<typename T::iterator, bool> ins3 = cont.insert(Value<T>::make(i)); - ASSERT(ins3.second == true && Value<T>::get(*(ins3.first)) == i, "Element 1 has not been inserted properly"); - } - ASSERT(cont.size() == 256, "Wrong number of elements have been inserted"); - ASSERT((256 == CheckRecursiveRange<T,typename T::iterator>(cont.range()).first), NULL); - ASSERT((256 == CheckRecursiveRange<T,typename T::const_iterator>(ccont.range()).first), NULL); - - // size_type unsafe_bucket_count() const; - ASSERT(ccont.unsafe_bucket_count() == 16, "Wrong number of buckets"); - - // size_type unsafe_max_bucket_count() const; - ASSERT(ccont.unsafe_max_bucket_count() > 65536, "Wrong max number of buckets"); - - for (unsigned int i = 0; i < 256; i++) - { - typename T::size_type buck = ccont.unsafe_bucket(i); - - // size_type unsafe_bucket(const key_type& k) const; - ASSERT(buck < 16, "Wrong bucket mapping"); - } - - for (unsigned int i = 0; i < 16; i++) - { - // size_type unsafe_bucket_size(size_type n); - ASSERT(cont.unsafe_bucket_size(i) == 16, "Wrong number of elements are in a bucket"); - - // local_iterator unsafe_begin(size_type n); - // const_local_iterator unsafe_begin(size_type n) const; - // local_iterator unsafe_end(size_type n); - // const_local_iterator unsafe_end(size_type n) const; - // const_local_iterator unsafe_cbegin(size_type n) const; - // const_local_iterator unsafe_cend(size_type n) const; - unsigned int count = 0; - for (typename T::iterator bit = cont.unsafe_begin(i); bit != cont.unsafe_end(i); bit++) - { - count++; - } - ASSERT(count == 16, "Bucket iterators are invalid"); - } - - // void swap(T&); - cont.swap(newcont); - ASSERT(newcont.size() == 256, "Wrong number of elements after swap"); - ASSERT(newcont.count(200) == 1, "Element with key 200 is not present after swap"); - ASSERT(newcont.count(16) == 1, "Element with key 16 is not present after swap"); - ASSERT(newcont.count(99) == 1, "Element with key 99 is not present after swap"); - ASSERT(T::allow_multimapping ? (cont.size() == 3) : (cont.size() == 2), "Wrong number of elements after swap"); - - REMARK("passed -- basic %s tests\n", str); - -#if defined (VERBOSE) - REMARK("container dump debug:\n"); - cont._Dump(); - REMARK("container dump release:\n"); - cont.dump(); - REMARK("\n"); -#endif - - SpecialTests<T>::Test(str); -} - -void test_machine() { - ASSERT(__TBB_ReverseByte(0)==0, NULL ); - ASSERT(__TBB_ReverseByte(1)==0x80, NULL ); - ASSERT(__TBB_ReverseByte(0xFE)==0x7F, NULL ); - ASSERT(__TBB_ReverseByte(0xFF)==0xFF, NULL ); -} - -template<typename T> -class FillTable: NoAssign { - T &table; - const int items; - bool my_asymptotic; - typedef std::pair<typename T::iterator, bool> pairIB; -public: - FillTable(T &t, int i, bool asymptotic) : table(t), items(i), my_asymptotic(asymptotic) { - ASSERT( !(items&1) && items > 100, NULL); - } - void operator()(int threadn) const { - if( threadn == 0 ) { // Fill even keys forward (single thread) - bool last_inserted = true; - for( int i = 0; i < items; i+=2 ) { - pairIB pib = table.insert(Value<T>::make(my_asymptotic?1:i)); - ASSERT(Value<T>::get(*(pib.first)) == (my_asymptotic?1:i), "Element not properly inserted"); - ASSERT( last_inserted || !pib.second, "Previous key was not inserted but this is inserted" ); - last_inserted = pib.second; - } - } else if( threadn == 1 ) { // Fill even keys backward (single thread) - bool last_inserted = true; - for( int i = items-2; i >= 0; i-=2 ) { - pairIB pib = table.insert(Value<T>::make(my_asymptotic?1:i)); - ASSERT(Value<T>::get(*(pib.first)) == (my_asymptotic?1:i), "Element not properly inserted"); - ASSERT( last_inserted || !pib.second, "Previous key was not inserted but this is inserted" ); - last_inserted = pib.second; - } - } else if( !(threadn&1) ) { // Fill odd keys forward (multiple threads) - for( int i = 1; i < items; i+=2 ) -#if __TBB_INITIALIZER_LISTS_PRESENT && !__TBB_CPP11_INIT_LIST_TEMP_OBJS_LIFETIME_BROKEN - if ( i % 32 == 1 && i + 6 < items ) { - if (my_asymptotic) { - table.insert({ Value<T>::make(1), Value<T>::make(1), Value<T>::make(1) }); - ASSERT(Value<T>::get(*table.find(1)) == 1, "Element not properly inserted"); - } - else { - table.insert({ Value<T>::make(i), Value<T>::make(i + 2), Value<T>::make(i + 4) }); - ASSERT(Value<T>::get(*table.find(i)) == i, "Element not properly inserted"); - ASSERT(Value<T>::get(*table.find(i + 2)) == i + 2, "Element not properly inserted"); - ASSERT(Value<T>::get(*table.find(i + 4)) == i + 4, "Element not properly inserted"); - } - i += 4; - } else -#endif - { - pairIB pib = table.insert(Value<T>::make(my_asymptotic ? 1 : i)); - ASSERT(Value<T>::get(*(pib.first)) == (my_asymptotic ? 1 : i), "Element not properly inserted"); - } - } else { // Check odd keys backward (multiple threads) - if (!my_asymptotic) { - bool last_found = false; - for( int i = items-1; i >= 0; i-=2 ) { - typename T::iterator it = table.find(i); - if( it != table.end() ) { // found - ASSERT(Value<T>::get(*it) == i, "Element not properly inserted"); - last_found = true; - } else ASSERT( !last_found, "Previous key was found but this is not" ); - } - } - } - } -}; - -typedef tbb::atomic<unsigned char> AtomicByte; - -template<typename ContainerType, typename RangeType> -struct ParallelTraverseBody: NoAssign { - const int n; - AtomicByte* const array; - ParallelTraverseBody( AtomicByte an_array[], int a_n ) : - n(a_n), array(an_array) - {} - void operator()( const RangeType& range ) const { - for( typename RangeType::iterator i = range.begin(); i!=range.end(); ++i ) { - int k = Value<ContainerType>::key(*i); - ASSERT( k == Value<ContainerType>::get(*i), NULL ); - ASSERT( 0<=k && k<n, NULL ); - array[k]++; - } - } -}; - -// if multimapping, oddCount is the value that each odd-indexed array element should have. -// not meaningful for non-multimapped case. -void CheckRange( AtomicByte array[], int n, bool allowMultiMapping, int oddCount ) { - if(allowMultiMapping) { - for( int k = 0; k<n; ++k) { - if(k%2) { - if( array[k] != oddCount ) { - REPORT("array[%d]=%d (should be %d)\n", k, int(array[k]), oddCount); - ASSERT(false,NULL); - } - } - else { - if(array[k] != 2) { - REPORT("array[%d]=%d\n", k, int(array[k])); - ASSERT(false,NULL); - } - } - } - } - else { - for( int k=0; k<n; ++k ) { - if( array[k] != 1 ) { - REPORT("array[%d]=%d\n", k, int(array[k])); - ASSERT(false,NULL); - } - } - } -} - -template<typename T> -class CheckTable: NoAssign { - T &table; -public: - CheckTable(T &t) : NoAssign(), table(t) {} - void operator()(int i) const { - int c = (int)table.count( i ); - ASSERT( c, "must exist" ); - } -}; - -template<typename T> -class AssignBody: NoAssign { - T &table; -public: - AssignBody(T &t) : NoAssign(), table(t) {} - void operator()(int i) const { - table[i] = i; - } -}; - -template<typename T> -void test_concurrent(const char *tablename, bool asymptotic = false) { -#if TBB_USE_ASSERT - int items = 2000; -#else - int items = 20000; -#endif - int nItemsInserted = 0; - int nThreads = 0; - T table(items/1000); - #if __bgp__ - nThreads = 6; - #else - nThreads = 16; - #endif - if(T::allow_multimapping) { - // even passes (threads 0 & 1) put N/2 items each - // odd passes (threads > 1) put N/2 if thread is odd, else checks if even. - items = 4*items / (nThreads + 2); // approximately same number of items inserted. - nItemsInserted = items + (nThreads-2) * items / 4; - } - else { - nItemsInserted = items; - } - REMARK("%s items == %d\n", tablename, items); - tbb::tick_count t0 = tbb::tick_count::now(); - NativeParallelFor( nThreads, FillTable<T>(table, items, asymptotic) ); - tbb::tick_count t1 = tbb::tick_count::now(); - REMARK( "time for filling '%s' by %d items = %g\n", tablename, table.size(), (t1-t0).seconds() ); - ASSERT( int(table.size()) == nItemsInserted, NULL); - - if(!asymptotic) { - AtomicByte* array = new AtomicByte[items]; - memset( array, 0, items*sizeof(AtomicByte) ); - - typename T::range_type r = table.range(); - std::pair<int,int> p = CheckRecursiveRange<T,typename T::iterator>(r); - ASSERT((nItemsInserted == p.first), NULL); - tbb::parallel_for( r, ParallelTraverseBody<T, typename T::const_range_type>( array, items )); - CheckRange( array, items, T::allow_multimapping, (nThreads - 1)/2 ); - - const T &const_table = table; - memset( array, 0, items*sizeof(AtomicByte) ); - typename T::const_range_type cr = const_table.range(); - ASSERT((nItemsInserted == CheckRecursiveRange<T,typename T::const_iterator>(cr).first), NULL); - tbb::parallel_for( cr, ParallelTraverseBody<T, typename T::const_range_type>( array, items )); - CheckRange( array, items, T::allow_multimapping, (nThreads - 1) / 2 ); - delete[] array; - - tbb::parallel_for( 0, items, CheckTable<T>( table ) ); - } - - table.clear(); - CheckAllocatorA(table, items+1, items); // one dummy is always allocated - - for(int i=0; i<1000; ++i) { - tbb::parallel_for( 0, 8, AssignBody<T>( table ) ); - table.clear(); - } -} - -// The helper to call a function only when a doCall == true. -template <bool doCall> struct CallIf { - template<typename FuncType> void operator() ( FuncType func ) const { func(); } -}; -template <> struct CallIf<false> { - template<typename FuncType> void operator()( FuncType ) const {} -}; - -#include <vector> -#include <list> -#include <algorithm> - -template <typename ValueType> -class TestRange : NoAssign { - const std::list<ValueType> &my_lst; - std::vector< tbb::atomic<bool> > &my_marks; -public: - TestRange( const std::list<ValueType> &lst, std::vector< tbb::atomic<bool> > &marks ) : my_lst( lst ), my_marks( marks ) { - std::fill( my_marks.begin(), my_marks.end(), false ); - } - template <typename Range> - void operator()( const Range &r ) const { doTestRange( r.begin(), r.end() ); } - template<typename Iterator> - void doTestRange( Iterator i, Iterator j ) const { - for ( Iterator it = i; it != j; ) { - Iterator prev_it = it++; - typename std::list<ValueType>::const_iterator it2 = std::search( my_lst.begin(), my_lst.end(), prev_it, it, Harness::IsEqual() ); - ASSERT( it2 != my_lst.end(), NULL ); - typename std::list<ValueType>::difference_type dist = std::distance( my_lst.begin( ), it2 ); - ASSERT( !my_marks[dist], NULL ); - my_marks[dist] = true; - } - } -}; - -#if __TBB_CPP11_SMART_POINTERS_PRESENT -namespace tbb { - template<> class tbb_hash< std::shared_ptr<int> > { - public: - size_t operator()( const std::shared_ptr<int>& key ) const { return tbb_hasher( *key ); } - }; - template<> class tbb_hash< const std::shared_ptr<int> > { - public: - size_t operator()( const std::shared_ptr<int>& key ) const { return tbb_hasher( *key ); } - }; - template<> class tbb_hash< std::weak_ptr<int> > { - public: - size_t operator()( const std::weak_ptr<int>& key ) const { return tbb_hasher( *key.lock( ) ); } - }; - template<> class tbb_hash< const std::weak_ptr<int> > { - public: - size_t operator()( const std::weak_ptr<int>& key ) const { return tbb_hasher( *key.lock( ) ); } - }; -} -#endif /* __TBB_CPP11_SMART_POINTERS_PRESENT */ - -template <bool, typename Table> -void TestMapSpecificMethods( Table &, const typename Table::value_type & ) { /* do nothing for a common case */ } - -template <bool defCtorPresent, typename Table> -class CheckValue : NoAssign { - Table &my_c; -public: - CheckValue( Table &c ) : my_c( c ) {} - void operator()( const typename Table::value_type &value ) { - typedef typename Table::iterator Iterator; - typedef typename Table::const_iterator ConstIterator; - const Table &constC = my_c; - ASSERT( my_c.count( Value<Table>::key( value ) ) == 1, NULL ); - // find - ASSERT( Harness::IsEqual()(*my_c.find( Value<Table>::key( value ) ), value), NULL ); - ASSERT( Harness::IsEqual()(*constC.find( Value<Table>::key( value ) ), value), NULL ); - // erase - ASSERT( my_c.unsafe_erase( Value<Table>::key( value ) ), NULL ); - ASSERT( my_c.count( Value<Table>::key( value ) ) == 0, NULL ); - // insert - std::pair<Iterator, bool> res = my_c.insert( value ); - ASSERT( Harness::IsEqual()(*res.first, value), NULL ); - ASSERT( res.second, NULL); - // erase - Iterator it = res.first; - it++; - ASSERT( my_c.unsafe_erase( res.first ) == it, NULL ); - // insert - ASSERT( Harness::IsEqual()(*my_c.insert( my_c.begin(), value ), value), NULL ); - // equal_range - std::pair<Iterator, Iterator> r1 = my_c.equal_range( Value<Table>::key( value ) ); - ASSERT( Harness::IsEqual()(*r1.first, value) && ++r1.first == r1.second, NULL ); - std::pair<ConstIterator, ConstIterator> r2 = constC.equal_range( Value<Table>::key( value ) ); - ASSERT( Harness::IsEqual()(*r2.first, value) && ++r2.first == r2.second, NULL ); - TestMapSpecificMethods<defCtorPresent>( my_c, value ); - } -}; - -#include "tbb/task_scheduler_init.h" - -#if __TBB_CPP11_RVALUE_REF_PRESENT -#include "test_container_move_support.h" - -struct unordered_move_traits_base { - enum{ expected_number_of_items_to_allocate_for_steal_move = 3 }; - - template <typename unordered_type, typename iterator_type> - static unordered_type& construct_container(tbb::aligned_space<unordered_type> & storage, iterator_type begin, iterator_type end){ - new (storage.begin()) unordered_type(begin, end); - return * storage.begin(); - } - - template <typename unordered_type, typename iterator_type, typename allocator_type> - static unordered_type& construct_container(tbb::aligned_space<unordered_type> & storage, iterator_type begin, iterator_type end, allocator_type const& a ){ - size_t deault_n_of_buckets = 8; //can not use concurrent_unordered_base::n_of_buckets as it is inaccessible - new (storage.begin()) unordered_type(begin, end, deault_n_of_buckets, typename unordered_type::hasher(), typename unordered_type::key_equal(), a); - return * storage.begin(); - } - - template<typename unordered_type, typename iterator> - static bool equal(unordered_type const& c, iterator begin, iterator end){ - bool equal_sizes = ( static_cast<size_t>(std::distance(begin, end)) == c.size() ); - if (!equal_sizes) - return false; - - for (iterator it = begin; it != end; ++it ){ - if (c.find( Value<unordered_type>::key(*it)) == c.end()){ - return false; - } - } - return true; - } -}; - -template<typename container_traits> -void test_rvalue_ref_support(const char* /*container_name*/){ - TestMoveConstructor<container_traits>(); - TestMoveAssignOperator<container_traits>(); -#if TBB_USE_EXCEPTIONS - TestExceptionSafetyGuaranteesMoveConstructorWithUnEqualAllocatorMemoryFailure<container_traits>(); - TestExceptionSafetyGuaranteesMoveConstructorWithUnEqualAllocatorExceptionInElementCtor<container_traits>(); -#endif //TBB_USE_EXCEPTIONS -} -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - -template <bool defCtorPresent, typename Table> -void Examine( Table c, const std::list<typename Table::value_type> &lst ) { - typedef typename Table::size_type SizeType; - typedef typename Table::value_type ValueType; - - ASSERT( !c.empty() && c.size() == lst.size() && c.max_size() >= c.size(), NULL ); - - std::for_each( lst.begin(), lst.end(), CheckValue<defCtorPresent, Table>( c ) ); - - std::vector< tbb::atomic<bool> > marks( lst.size() ); - - TestRange<ValueType>( lst, marks ).doTestRange( c.begin(), c.end() ); - ASSERT( std::find( marks.begin(), marks.end(), false ) == marks.end(), NULL ); - - TestRange<ValueType>( lst, marks ).doTestRange( c.begin(), c.end() ); - ASSERT( std::find( marks.begin(), marks.end(), false ) == marks.end(), NULL ); - - const Table constC = c; - ASSERT( c.size() == constC.size(), NULL ); - - TestRange<ValueType>( lst, marks ).doTestRange( constC.cbegin(), constC.cend() ); - ASSERT( std::find( marks.begin(), marks.end(), false ) == marks.end(), NULL ); - - tbb::task_scheduler_init init; - - tbb::parallel_for( c.range(), TestRange<ValueType>( lst, marks ) ); - ASSERT( std::find( marks.begin(), marks.end(), false ) == marks.end(), NULL ); - - tbb::parallel_for( constC.range( ), TestRange<ValueType>( lst, marks ) ); - ASSERT( std::find( marks.begin(), marks.end(), false ) == marks.end(), NULL ); - - const SizeType bucket_count = c.unsafe_bucket_count(); - ASSERT( c.unsafe_max_bucket_count() >= bucket_count, NULL ); - SizeType counter = SizeType( 0 ); - for ( SizeType i = 0; i < bucket_count; ++i ) { - const SizeType size = c.unsafe_bucket_size( i ); - typedef typename Table::difference_type diff_type; - ASSERT( std::distance( c.unsafe_begin( i ), c.unsafe_end( i ) ) == diff_type( size ), NULL ); - ASSERT( std::distance( c.unsafe_cbegin( i ), c.unsafe_cend( i ) ) == diff_type( size ), NULL ); - ASSERT( std::distance( constC.unsafe_begin( i ), constC.unsafe_end( i ) ) == diff_type( size ), NULL ); - ASSERT( std::distance( constC.unsafe_cbegin( i ), constC.unsafe_cend( i ) ) == diff_type( size ), NULL ); - counter += size; - } - ASSERT( counter == lst.size(), NULL ); - - typedef typename Table::value_type value_type; - for ( typename std::list<value_type>::const_iterator it = lst.begin(); it != lst.end(); ) { - const SizeType index = c.unsafe_bucket( Value<Table>::key( *it ) ); - typename std::list<value_type>::const_iterator prev_it = it++; - ASSERT( std::search( c.unsafe_begin( index ), c.unsafe_end( index ), prev_it, it, Harness::IsEqual() ) != c.unsafe_end( index ), NULL ); - } - - c.rehash( 2 * bucket_count ); - ASSERT( c.unsafe_bucket_count() > bucket_count, NULL ); - - ASSERT( c.load_factor() <= c.max_load_factor(), NULL ); - c.max_load_factor( 1.0f ); - - Table c2; - typename std::list<value_type>::const_iterator begin5 = lst.begin(); - std::advance( begin5, 5 ); - c2.insert( lst.begin(), begin5 ); - std::for_each( lst.begin(), begin5, CheckValue<defCtorPresent, Table>( c2 ) ); - - c2.swap( c ); - ASSERT( c2.size() == lst.size(), NULL ); - ASSERT( c.size() == 5, NULL ); - std::for_each( lst.begin(), lst.end(), CheckValue<defCtorPresent, Table>( c2 ) ); - - c2.clear(); - ASSERT( c2.size() == 0, NULL ); - - typename Table::allocator_type a = c.get_allocator(); - value_type *ptr = a.allocate( 1 ); - ASSERT( ptr, NULL ); - a.deallocate( ptr, 1 ); - - c.hash_function(); - c.key_eq(); -} - -template <bool defCtorPresent, typename Table, typename TableDebugAlloc> -void TypeTester( const std::list<typename Table::value_type> &lst ) { - ASSERT( lst.size() >= 5, "Array should have at least 5 elements" ); - ASSERT( lst.size() <= 100, "The test has O(n^2) complexity so a big number of elements can lead long execution time" ); - // Construct an empty table. - Table c1; - c1.insert( lst.begin(), lst.end() ); - Examine<defCtorPresent>( c1, lst ); -#if __TBB_INITIALIZER_LISTS_PRESENT && !__TBB_CPP11_INIT_LIST_TEMP_OBJS_LIFETIME_BROKEN - // Constructor from an initializer_list. - typename std::list<typename Table::value_type>::const_iterator it = lst.begin(); - Table c2( { *it++, *it++, *it++ } ); - c2.insert( it, lst.end( ) ); - Examine<defCtorPresent>( c2, lst ); -#endif - // Copying constructor. - Table c3( c1 ); - Examine<defCtorPresent>( c3, lst ); - // Construct with non-default allocator - TableDebugAlloc c4; - c4.insert( lst.begin(), lst.end() ); - Examine<defCtorPresent>( c4, lst ); - // Copying constructor for a container with a different allocator type. - TableDebugAlloc c5( c4 ); - Examine<defCtorPresent>( c5, lst ); - // Construction empty table with n preallocated buckets. - Table c6( lst.size() ); - c6.insert( lst.begin(), lst.end() ); - Examine<defCtorPresent>( c6, lst ); - TableDebugAlloc c7( lst.size( ) ); - c7.insert( lst.begin(), lst.end() ); - Examine<defCtorPresent>( c7, lst ); - // Construction with a copying iteration range and a given allocator instance. - Table c8( c1.begin(), c1.end() ); - Examine<defCtorPresent>( c8, lst ); - typename TableDebugAlloc::allocator_type a; - TableDebugAlloc c9( a ); - c9.insert( c7.begin(), c7.end() ); - Examine<defCtorPresent>( c9, lst ); -} - -namespace test_select_size_t_constant{ - __TBB_STATIC_ASSERT((tbb::internal::select_size_t_constant<1234,1234>::value == 1234),"select_size_t_constant::value is not compile time constant"); -// There will be two constant used in the test 32 bit and 64 bit one. -// The 64 bit constant should chosen so that it 32 bit halves adds up to the 32 bit one ( first constant used in the test). -// % ~0U is used to sum up 32bit halves of the 64 constant. ("% ~0U" essentially adds the 32-bit "digits", like "%9" adds -// the digits (modulo 9) of a number in base 10). -// So iff select_size_t_constant is correct result of the calculation below will be same on both 32bit and 64bit platforms. - __TBB_STATIC_ASSERT((tbb::internal::select_size_t_constant<0x12345678U,0x091A2B3C091A2B3CULL>::value % ~0U == 0x12345678U), - "select_size_t_constant have chosen the wrong constant"); -} diff --git a/src/tbb/src/test/test_concurrent_unordered_map.cpp b/src/tbb/src/test/test_concurrent_unordered_map.cpp deleted file mode 100644 index 742175746..000000000 --- a/src/tbb/src/test/test_concurrent_unordered_map.cpp +++ /dev/null @@ -1,311 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#define __TBB_EXTRA_DEBUG 1 -#include "tbb/concurrent_unordered_map.h" -#if __TBB_INITIALIZER_LISTS_PRESENT -// These operator== are used implicitly in test_initializer_list.h. -// For some unknown reason clang is not able to find the if they a declared after the -// inclusion of test_initializer_list.h. -template<typename container_type> -bool equal_containers( container_type const& lhs, container_type const& rhs ); -template<typename Key, typename Value> -bool operator==( tbb::concurrent_unordered_map<Key, Value> const& lhs, tbb::concurrent_unordered_map<Key, Value> const& rhs ) { - return equal_containers( lhs, rhs ); -} -template<typename Key, typename Value> -bool operator==( tbb::concurrent_unordered_multimap<Key, Value> const& lhs, tbb::concurrent_unordered_multimap<Key, Value> const& rhs ) { - return equal_containers( lhs, rhs ); -} -#endif /* __TBB_INITIALIZER_LISTS_PRESENT */ -#include "test_concurrent_unordered_common.h" - -typedef tbb::concurrent_unordered_map<int, int, tbb::tbb_hash<int>, std::equal_to<int>, MyAllocator> MyMap; -typedef tbb::concurrent_unordered_map<int, check_type<int>, tbb::tbb_hash<int>, std::equal_to<int>, MyAllocator> MyCheckedMap; -typedef tbb::concurrent_unordered_multimap<int, int, tbb::tbb_hash<int>, std::equal_to<int>, MyAllocator> MyMultiMap; -typedef tbb::concurrent_unordered_multimap<int, check_type<int>, tbb::tbb_hash<int>, std::equal_to<int>, MyAllocator> MyCheckedMultiMap; - -template <> -struct SpecialTests <MyMap> { - static void Test( const char *str ) { - MyMap cont( 0 ); - const MyMap &ccont( cont ); - - // mapped_type& operator[](const key_type& k); - cont[1] = 2; - - // bool empty() const; - ASSERT( !ccont.empty( ), "Concurrent container empty after adding an element" ); - - // size_type size() const; - ASSERT( ccont.size( ) == 1, "Concurrent container size incorrect" ); - - ASSERT( cont[1] == 2, "Concurrent container value incorrect" ); - - // mapped_type& at( const key_type& k ); - // const mapped_type& at(const key_type& k) const; - ASSERT( cont.at( 1 ) == 2, "Concurrent container value incorrect" ); - ASSERT( ccont.at( 1 ) == 2, "Concurrent container value incorrect" ); - - // iterator find(const key_type& k); - MyMap::const_iterator it = cont.find( 1 ); - ASSERT( it != cont.end( ) && Value<MyMap>::get( *(it) ) == 2, "Element with key 1 not properly found" ); - cont.unsafe_erase( it ); - it = cont.find( 1 ); - ASSERT( it == cont.end( ), "Element with key 1 not properly erased" ); - - REMARK( "passed -- specialized %s tests\n", str ); - } -}; - -template<> -class AssignBody<MyCheckedMap>: NoAssign{ - MyCheckedMap &table; -public: - AssignBody( MyCheckedMap &t ) : NoAssign( ), table( t ) {} - void operator()( int i ) const { - table.insert( MyCheckedMap::value_type( i, check_type<int>( i ) ) ); - } -}; - -// for multimap insert (i%3)+1 items [i,3*i], [i,3*i+1] .. -template<> -class AssignBody<MyMultiMap>: NoAssign{ - MyMultiMap &table; -public: - AssignBody( MyMultiMap &t ) : NoAssign( ), table( t ) {} - void operator()( int i ) const { - for ( int j = 0; j < (i % 3) + 1; ++j ) { - table.insert( std::pair<int, int>( i, 3 * i + j - 1 ) ); - } - } -}; - -// for multimap insert (i%3)+1 items [i,3*i], [i,3*i+1] .. -template<> -class AssignBody<MyCheckedMultiMap>: NoAssign{ - MyCheckedMultiMap &table; -public: - AssignBody( MyCheckedMultiMap &t ) : NoAssign( ), table( t ) {} - void operator()( int i ) const { - for ( int j = 0; j < (i % 3) + 1; ++j ) { - table.insert( std::pair<int, int>( i, 3 * i + j - 1 ) ); - } - } -}; - -// test assumes the unordered multimap puts items in ascending order, and the insertions -// occur at the end of a range. This assumption may not always be valid. -template <> -struct SpecialTests <MyMultiMap> { -#define VALUE1 7 -#define VALUE2 2 - static void Test( const char *str ) { - MyMultiMap cont( 0 ); - const MyMultiMap &ccont( cont ); - // mapped_type& operator[](const key_type& k); - cont.insert( std::make_pair( 1, VALUE1 ) ); - - // bool empty() const; - ASSERT( !ccont.empty( ), "Concurrent container empty after adding an element" ); - - // size_type size() const; - ASSERT( ccont.size( ) == 1, "Concurrent container size incorrect" ); - ASSERT( (*(cont.begin( ))).second == VALUE1, "Concurrent container value incorrect" ); - ASSERT( (*(cont.equal_range( 1 )).first).second == VALUE1, "Improper value from equal_range" ); - ASSERT( (cont.equal_range( 1 )).second == cont.end( ), "Improper iterator from equal_range" ); - - cont.insert( std::make_pair( 1, VALUE2 ) ); - - // bool empty() const; - ASSERT( !ccont.empty( ), "Concurrent container empty after adding an element" ); - - // size_type size() const; - ASSERT( ccont.size( ) == 2, "Concurrent container size incorrect" ); - ASSERT( (*(cont.begin( ))).second == VALUE1, "Concurrent container value incorrect" ); - ASSERT( (*(cont.equal_range( 1 )).first).second == VALUE1, "Improper value from equal_range" ); - ASSERT( (cont.equal_range( 1 )).second == cont.end( ), "Improper iterator from equal_range" ); - - // check that the second value is part of the range. - // though I am not sure there are guarantees what order the insertions appear in the range - // if the order differs the ASSERT above will fail already. - std::pair<MyMultiMap::iterator, MyMultiMap::iterator> range = cont.equal_range( 1 ); - MyMultiMap::iterator ii = range.first; - ++ii; - ASSERT( (*ii).second == VALUE2, "Improper value for second insertion" ); - - cont.insert( std::make_pair( 0, 4 ) ); - - // bool empty() const; - ASSERT( !ccont.empty( ), "Concurrent container empty after adding an element" ); - - // size_type size() const; - ASSERT( ccont.size( ) == 3, "Concurrent container size incorrect" ); - ASSERT( (*(cont.begin( ))).second == 4, "Concurrent container value incorrect" ); - ASSERT( (*(cont.equal_range( 1 )).first).second == VALUE1, "Improper value from equal_range" ); - ASSERT( (cont.equal_range( 1 )).second == cont.end( ), "Improper iterator from equal_range" ); - - REMARK( "passed -- specialized %s tests\n", str ); - } -}; - -#if __TBB_RANGE_BASED_FOR_PRESENT -#include "test_range_based_for.h" -// Add the similar test for concurrent_unordered_set. -void TestRangeBasedFor() { - using namespace range_based_for_support_tests; - - REMARK( "testing range based for loop compatibility \n" ); - typedef tbb::concurrent_unordered_map<int, int> cu_map; - cu_map a_cu_map; - const int sequence_length = 100; - for ( int i = 1; i <= sequence_length; ++i ) { - a_cu_map.insert( cu_map::value_type( i, i ) ); - } - - ASSERT( range_based_for_accumulate( a_cu_map, pair_second_summer(), 0 ) == gauss_summ_of_int_sequence( sequence_length ), "incorrect accumulated value generated via range based for ?" ); -} -#endif //if __TBB_RANGE_BASED_FOR_PRESENT - -#if __TBB_CPP11_RVALUE_REF_PRESENT -struct cu_map_type : unordered_move_traits_base { - template<typename element_type, typename allocator_type> - struct apply { - typedef tbb::concurrent_unordered_map<element_type, element_type, tbb::tbb_hash<element_type>, std::equal_to<element_type>, allocator_type > type; - }; - - typedef FooPairIterator init_iterator_type; -}; - -struct cu_multimap_type : unordered_move_traits_base { - template<typename element_type, typename allocator_type> - struct apply { - typedef tbb::concurrent_unordered_multimap<element_type, element_type, tbb::tbb_hash<element_type>, std::equal_to<element_type>, allocator_type > type; - }; - - typedef FooPairIterator init_iterator_type; -}; -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - -template <typename Table> -class TestOperatorSquareBrackets : NoAssign { - typedef typename Table::value_type ValueType; - Table &my_c; - const ValueType &my_value; -public: - TestOperatorSquareBrackets( Table &c, const ValueType &value ) : my_c( c ), my_value( value ) {} - void operator()() const { - ASSERT( Harness::IsEqual()(my_c[my_value.first], my_value.second), NULL ); - } -}; - -template <bool defCtorPresent, typename Key, typename Element, typename Hasher, typename Equality, typename Allocator> -void TestMapSpecificMethods( tbb::concurrent_unordered_map<Key, Element, Hasher, Equality, Allocator> &c, - const typename tbb::concurrent_unordered_map<Key, Element, Hasher, Equality, Allocator>::value_type &value ) { - typedef tbb::concurrent_unordered_map<Key, Element, Hasher, Equality, Allocator> Table; - CallIf<defCtorPresent>()(TestOperatorSquareBrackets<Table>( c, value )); - ASSERT( Harness::IsEqual()(c.at( value.first ), value.second), NULL ); - const Table &constC = c; - ASSERT( Harness::IsEqual()(constC.at( value.first ), value.second), NULL ); -} - -template <bool defCtorPresent, typename ValueType> -void TestTypesMap( const std::list<ValueType> &lst ) { - typedef typename ValueType::first_type KeyType; - typedef typename ValueType::second_type ElemType; - TypeTester< defCtorPresent, tbb::concurrent_unordered_map<KeyType, ElemType, tbb::tbb_hash<KeyType>, Harness::IsEqual>, - tbb::concurrent_unordered_map< KeyType, ElemType, tbb::tbb_hash<KeyType>, Harness::IsEqual, debug_allocator<ValueType> > >( lst ); - TypeTester< defCtorPresent, tbb::concurrent_unordered_multimap<KeyType, ElemType, tbb::tbb_hash<KeyType>, Harness::IsEqual>, - tbb::concurrent_unordered_multimap< KeyType, ElemType, tbb::tbb_hash<KeyType>, Harness::IsEqual, debug_allocator<ValueType> > >( lst ); -} - -void TestTypes() { - const int NUMBER = 10; - - std::list< std::pair<const int, int> > arrIntInt; - for ( int i = 0; i < NUMBER; ++i ) arrIntInt.push_back( std::make_pair( i, NUMBER - i ) ); - TestTypesMap</*def_ctor_present = */true>( arrIntInt ); - - std::list< std::pair< const int, tbb::atomic<int> > > arrIntTbb; - for ( int i = 0; i < NUMBER; ++i ) { - tbb::atomic<int> b; - b = NUMBER - i; - arrIntTbb.push_back( std::make_pair( i, b ) ); - } - TestTypesMap</*defCtorPresent = */true>( arrIntTbb ); - -#if __TBB_CPP11_REFERENCE_WRAPPER_PRESENT - std::list< std::pair<const std::reference_wrapper<const int>, int> > arrRefInt; - for ( std::list< std::pair<const int, int> >::iterator it = arrIntInt.begin(); it != arrIntInt.end(); ++it ) - arrRefInt.push_back( std::make_pair( std::reference_wrapper<const int>( it->first ), it->second ) ); - TestTypesMap</*defCtorPresent = */true>( arrRefInt ); - - std::list< std::pair<const int, std::reference_wrapper<int> > > arrIntRef; - for ( std::list< std::pair<const int, int> >::iterator it = arrIntInt.begin(); it != arrIntInt.end(); ++it ) { - // Using std::make_pair below causes compilation issues with early implementations of std::reference_wrapper. - arrIntRef.push_back( std::pair<const int, std::reference_wrapper<int> >( it->first, std::reference_wrapper<int>( it->second ) ) ); - } - TestTypesMap</*defCtorPresent = */false>( arrIntRef ); -#endif /* __TBB_CPP11_REFERENCE_WRAPPER_PRESENT */ - -#if __TBB_CPP11_SMART_POINTERS_PRESENT - std::list< std::pair< const std::shared_ptr<int>, std::shared_ptr<int> > > arrShrShr; - for ( int i = 0; i < NUMBER; ++i ) arrShrShr.push_back( std::make_pair( std::make_shared<int>( i ), std::make_shared<int>( NUMBER - i ) ) ); - TestTypesMap</*defCtorPresent = */true>( arrShrShr ); - - std::list< std::pair< const std::weak_ptr<int>, std::weak_ptr<int> > > arrWkWk; - std::copy( arrShrShr.begin(), arrShrShr.end(), std::back_inserter( arrWkWk ) ); - TestTypesMap</*defCtorPresent = */true>( arrWkWk ); -#endif /* __TBB_CPP11_SMART_POINTERS_PRESENT */ -} - -int TestMain() { - test_machine(); - - test_basic<MyMap>( "concurrent unordered Map" ); - test_concurrent<MyMap>( "concurrent unordered Map" ); - test_basic<MyMultiMap>( "concurrent unordered MultiMap" ); - test_concurrent<MyMultiMap>( "concurrent unordered MultiMap" ); - test_concurrent<MyMultiMap>( "concurrent unordered MultiMap asymptotic", true ); - - { Check<MyCheckedMap::value_type> checkit; test_basic<MyCheckedMap>( "concurrent unordered map (checked)" ); } - { Check<MyCheckedMap::value_type> checkit; test_concurrent<MyCheckedMap>( "concurrent unordered map (checked)" ); } - - { Check<MyCheckedMultiMap::value_type> checkit; test_basic<MyCheckedMultiMap>( "concurrent unordered MultiMap (checked)" ); } - { Check<MyCheckedMultiMap::value_type> checkit; test_concurrent<MyCheckedMultiMap>( "concurrent unordered MultiMap (checked)" ); } - -#if __TBB_INITIALIZER_LISTS_PRESENT - TestInitList< tbb::concurrent_unordered_map<int, int>, - tbb::concurrent_unordered_multimap<int, int> >( {{1,1},{2,2},{3,3},{4,4},{5,5}} ); -#endif /* __TBB_INITIALIZER_LISTS_PRESENT */ - -#if __TBB_RANGE_BASED_FOR_PRESENT - TestRangeBasedFor(); -#endif - -#if __TBB_CPP11_RVALUE_REF_PRESENT - test_rvalue_ref_support<cu_map_type>( "concurrent unordered map" ); - test_rvalue_ref_support<cu_multimap_type>( "concurrent unordered multiset" ); -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - - TestTypes(); - - return Harness::Done; -} diff --git a/src/tbb/src/test/test_concurrent_unordered_set.cpp b/src/tbb/src/test/test_concurrent_unordered_set.cpp deleted file mode 100644 index 3f6638c73..000000000 --- a/src/tbb/src/test/test_concurrent_unordered_set.cpp +++ /dev/null @@ -1,227 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness_defs.h" -#if !(__TBB_TEST_SECONDARY && __TBB_CPP11_STD_PLACEHOLDERS_LINKAGE_BROKEN) - -#define __TBB_EXTRA_DEBUG 1 -#include "tbb/concurrent_unordered_set.h" -#include "harness_assert.h" - -#if __TBB_TEST_SECONDARY - -#include "harness_runtime_loader.h" - -#else // __TBB_TEST_SECONDARY -#if __TBB_INITIALIZER_LISTS_PRESENT -// These operator== are used implicitly in test_initializer_list.h. -// For some unknown reason clang is not able to find the if they a declared after the -// inclusion of test_initializer_list.h. -template<typename container_type> -bool equal_containers( container_type const& lhs, container_type const& rhs ); -template<typename T> -bool operator==(tbb::concurrent_unordered_set<T> const& lhs, tbb::concurrent_unordered_set<T> const& rhs) { - return equal_containers( lhs, rhs ); -} - -template<typename T> -bool operator==(tbb::concurrent_unordered_multiset<T> const& lhs, tbb::concurrent_unordered_multiset<T> const& rhs) { - return equal_containers( lhs, rhs ); -} -#endif /* __TBB_INITIALIZER_LISTS_PRESENT */ -#include "test_concurrent_unordered_common.h" - -typedef tbb::concurrent_unordered_set<int, tbb::tbb_hash<int>, std::equal_to<int>, MyAllocator> MySet; -typedef tbb::concurrent_unordered_set<check_type<int>, tbb::tbb_hash<check_type<int> >, std::equal_to<check_type<int> >, MyAllocator> MyCheckedSet; -typedef tbb::concurrent_unordered_multiset<int, tbb::tbb_hash<int>, std::equal_to<int>, MyAllocator> MyMultiSet; -typedef tbb::concurrent_unordered_multiset<check_type<int>, tbb::tbb_hash<check_type<int> >, std::equal_to<check_type<int> >, MyAllocator> MyCheckedMultiSet; - -template<> -class AssignBody<MySet>: NoAssign{ - MySet &table; -public: - AssignBody( MySet &t ) : NoAssign( ), table( t ) {} - void operator()( int i ) const { - table.insert( i ); - } -}; - -template<> -class AssignBody<MyCheckedSet>: NoAssign{ - MyCheckedSet &table; -public: - AssignBody( MyCheckedSet &t ) : NoAssign( ), table( t ) {} - void operator()( int i ) const { - table.insert( check_type<int>( i ) ); - } -}; - -// multiset: for i, inserts i i%3+1 times -template<> -class AssignBody<MyMultiSet>: NoAssign{ - MyMultiSet &table; -public: - AssignBody( MyMultiSet &t ) : NoAssign( ), table( t ) {} - void operator()( int i ) const { - int num = i % 3 + 1; - for ( int j = 0; j < num; ++j ) { - table.insert( i ); - } - } -}; - -// multiset: for i, inserts i i%3+1 times -template<> -class AssignBody<MyCheckedMultiSet>: NoAssign{ - MyCheckedMultiSet &table; -public: - AssignBody( MyCheckedMultiSet &t ) : NoAssign( ), table( t ) {} - void operator()( int i ) const { - int num = i % 3 + 1; - for ( int j = 0; j < num; ++j ) { - table.insert( i ); - } - } -}; - -#if __TBB_CPP11_RVALUE_REF_PRESENT -struct cu_set_type : unordered_move_traits_base { - template<typename element_type, typename allocator_type> - struct apply { - typedef tbb::concurrent_unordered_set<element_type, tbb::tbb_hash<element_type>, std::equal_to<element_type>, allocator_type > type; - }; - - typedef FooIterator init_iterator_type; -}; - -struct cu_multiset_type : unordered_move_traits_base { - template<typename element_type, typename allocator_type> - struct apply { - typedef tbb::concurrent_unordered_multiset<element_type, tbb::tbb_hash<element_type>, std::equal_to<element_type>, allocator_type > type; - }; - - typedef FooIterator init_iterator_type; -}; -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - -template <bool defCtorPresent, typename value_type> -void TestTypesSet( const std::list<value_type> &lst ) { - TypeTester< defCtorPresent, tbb::concurrent_unordered_set<value_type, tbb::tbb_hash<value_type>, Harness::IsEqual>, - tbb::concurrent_unordered_set< value_type, tbb::tbb_hash<value_type>, Harness::IsEqual, debug_allocator<value_type> > >( lst ); - TypeTester< defCtorPresent, tbb::concurrent_unordered_multiset<value_type, tbb::tbb_hash<value_type>, Harness::IsEqual>, - tbb::concurrent_unordered_multiset< value_type, tbb::tbb_hash<value_type>, Harness::IsEqual, debug_allocator<value_type> > >( lst ); -} - -void TestTypes( ) { - const int NUMBER = 10; - - std::list<int> arrInt; - for ( int i = 0; i<NUMBER; ++i ) arrInt.push_back( i ); - TestTypesSet</*defCtorPresent = */true>( arrInt ); - - std::list< tbb::atomic<int> > arrTbb(NUMBER); - int seq = 0; - for ( std::list< tbb::atomic<int> >::iterator it = arrTbb.begin(); it != arrTbb.end(); ++it, ++seq ) *it = seq; - TestTypesSet</*defCtorPresent = */true>( arrTbb ); - -#if __TBB_CPP11_REFERENCE_WRAPPER_PRESENT - std::list< std::reference_wrapper<int> > arrRef; - for ( std::list<int>::iterator it = arrInt.begin( ); it != arrInt.end( ); ++it ) - arrRef.push_back( std::reference_wrapper<int>(*it) ); - TestTypesSet</*defCtorPresent = */false>( arrRef ); -#endif /* __TBB_CPP11_REFERENCE_WRAPPER_PRESENT */ - -#if __TBB_CPP11_SMART_POINTERS_PRESENT - std::list< std::shared_ptr<int> > arrShr; - for ( int i = 0; i<NUMBER; ++i ) arrShr.push_back( std::make_shared<int>( i ) ); - TestTypesSet</*defCtorPresent = */true>( arrShr ); - - std::list< std::weak_ptr<int> > arrWk; - std::copy( arrShr.begin( ), arrShr.end( ), std::back_inserter( arrWk ) ); - TestTypesSet</*defCtorPresent = */true>( arrWk ); -#endif /* __TBB_CPP11_SMART_POINTERS_PRESENT */ -} -#endif // __TBB_TEST_SECONDARY - -#if !__TBB_TEST_SECONDARY -#define INITIALIZATION_TIME_TEST_NAMESPACE initialization_time_test -#define TEST_INITIALIZATION_TIME_OPERATIONS_NAME test_initialization_time_operations -void test_initialization_time_operations_external(); -#else -#define INITIALIZATION_TIME_TEST_NAMESPACE initialization_time_test_external -#define TEST_INITIALIZATION_TIME_OPERATIONS_NAME test_initialization_time_operations_external -#endif - -namespace INITIALIZATION_TIME_TEST_NAMESPACE { - tbb::concurrent_unordered_set<int> static_init_time_set; - int any_non_zero_value = 89432; - bool static_init_time_inserted = (static_init_time_set.insert( any_non_zero_value )).second; - bool static_init_time_found = ((static_init_time_set.find( any_non_zero_value )) != static_init_time_set.end( )); -} -void TEST_INITIALIZATION_TIME_OPERATIONS_NAME( ) { - using namespace INITIALIZATION_TIME_TEST_NAMESPACE; -#define LOCATION ",in function: " __TBB_STRING(TEST_INITIALIZATION_TIME_OPERATIONS_NAME) - ASSERT( static_init_time_inserted, "failed to insert an item during initialization of global objects" LOCATION ); - ASSERT( static_init_time_found, "failed to find an item during initialization of global objects" LOCATION ); - - bool static_init_time_found_in_main = ((static_init_time_set.find( any_non_zero_value )) != static_init_time_set.end( )); - ASSERT( static_init_time_found_in_main, "failed to find during main() an item inserted during initialization of global objects" LOCATION ); -#undef LOCATION -} - -#if !__TBB_TEST_SECONDARY -int TestMain() { - test_machine( ); - - test_basic<MySet>( "concurrent unordered Set" ); - test_concurrent<MySet>("concurrent unordered Set"); - test_basic<MyMultiSet>("concurrent unordered MultiSet"); - test_concurrent<MyMultiSet>( "concurrent unordered MultiSet" ); - test_concurrent<MyMultiSet>( "concurrent unordered MultiSet asymptotic", true ); - - { Check<MyCheckedSet::value_type> checkit; test_basic<MyCheckedSet>( "concurrent_unordered_set (checked)" ); } - { Check<MyCheckedSet::value_type> checkit; test_concurrent<MyCheckedSet>( "concurrent unordered set (checked)" ); } - - { Check<MyCheckedMultiSet::value_type> checkit; test_basic<MyCheckedMultiSet>("concurrent_unordered_multiset (checked)"); } - { Check<MyCheckedMultiSet::value_type> checkit; test_concurrent<MyCheckedMultiSet>( "concurrent unordered multiset (checked)" ); } - - test_initialization_time_operations( ); -#if !__TBB_CPP11_STD_PLACEHOLDERS_LINKAGE_BROKEN - test_initialization_time_operations_external( ); -#else - REPORT( "Known issue: global objects initialization time tests skipped.\n" ); -#endif //!__TBB_CPP11_STD_PLACEHOLDERS_LINKING_BROKEN - -#if __TBB_INITIALIZER_LISTS_PRESENT - TestInitList< tbb::concurrent_unordered_set<int>, - tbb::concurrent_unordered_multiset<int> >( {1,2,3,4,5} ); -#endif - -#if __TBB_CPP11_RVALUE_REF_PRESENT - test_rvalue_ref_support<cu_set_type>( "concurrent unordered set" ); - test_rvalue_ref_support<cu_multiset_type>( "concurrent unordered multiset" ); -#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ - - TestTypes(); - - return Harness::Done; -} -#endif //#if !__TBB_TEST_SECONDARY -#endif //!(__TBB_TEST_SECONDARY && __TBB_CPP11_STD_PLACEHOLDERS_LINKING_BROKEN) diff --git a/src/tbb/src/test/test_concurrent_vector.cpp b/src/tbb/src/test/test_concurrent_vector.cpp deleted file mode 100644 index 96e29da8b..000000000 --- a/src/tbb/src/test/test_concurrent_vector.cpp +++ /dev/null @@ -1,1744 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/concurrent_vector.h" -#include "tbb/tbb_allocator.h" -#include "tbb/cache_aligned_allocator.h" -#include "tbb/tbb_exception.h" -#include <cstdio> -#include <cstdlib> -#include <functional> -#include <vector> -#include <numeric> -#include "harness_report.h" -#include "harness_assert.h" -#include "harness_allocator.h" -#include "harness_defs.h" -#include "test_container_move_support.h" - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (push) - #pragma warning (disable: 4800) -#endif - -#if TBB_USE_EXCEPTIONS -static bool known_issue_verbose = false; -#define KNOWN_ISSUE(msg) if(!known_issue_verbose) known_issue_verbose = true, REPORT(msg) -#endif /* TBB_USE_EXCEPTIONS */ - -inline void NextSize( int& s ) { - if( s<=32 ) ++s; - else s += s/10; -} - -//! Check vector have expected size and filling -template<typename vector_t> -static void CheckVector( const vector_t& cv, size_t expected_size, size_t old_size ) { - ASSERT( cv.capacity()>=expected_size, NULL ); - ASSERT( cv.size()==expected_size, NULL ); - ASSERT( cv.empty()==(expected_size==0), NULL ); - for( int j=0; j<int(expected_size); ++j ) { - if( cv[j].bar()!=~j ) - REPORT("ERROR on line %d for old_size=%ld expected_size=%ld j=%d\n",__LINE__,long(old_size),long(expected_size),j); - } -} - -//! Test of assign, grow, copying with various sizes -void TestResizeAndCopy() { - typedef static_counting_allocator<debug_allocator<Foo,std::allocator>, std::size_t> allocator_t; - typedef tbb::concurrent_vector<Foo, allocator_t> vector_t; - allocator_t::init_counters(); - for( int old_size=0; old_size<=128; NextSize( old_size ) ) { - for( int new_size=0; new_size<=1280; NextSize( new_size ) ) { - size_t count = FooCount; - vector_t v; - ASSERT( count==FooCount, NULL ); - v.assign(old_size/2, Foo() ); - ASSERT( count+old_size/2==FooCount, NULL ); - for( int j=0; j<old_size/2; ++j ) - ASSERT( v[j].state == Foo::CopyInitialized, NULL); - v.assign(FooIterator(0), FooIterator(old_size)); - v.resize(new_size, Foo(33) ); - ASSERT( count+new_size==FooCount, NULL ); - for( int j=0; j<new_size; ++j ) { - int expected = j<old_size ? j : 33; - if( v[j].bar()!=expected ) - REPORT("ERROR on line %d for old_size=%ld new_size=%ld v[%ld].bar()=%d != %d\n",__LINE__,long(old_size),long(new_size),long(j),v[j].bar(), expected); - } - ASSERT( v.size()==size_t(new_size), NULL ); - for( int j=0; j<new_size; ++j ) { - v[j].bar() = ~j; - } - const vector_t& cv = v; - // Try copy constructor - vector_t copy_of_v(cv); - CheckVector(cv,new_size,old_size); - ASSERT( !(v != copy_of_v), NULL ); - v.clear(); - ASSERT( v.empty(), NULL ); - swap(v, copy_of_v); - ASSERT( copy_of_v.empty(), NULL ); - CheckVector(v,new_size,old_size); - } - } - ASSERT( allocator_t::items_allocated == allocator_t::items_freed, NULL); - ASSERT( allocator_t::allocations == allocator_t::frees, NULL); -} - -//! Test reserve, compact, capacity -void TestCapacity() { - typedef static_counting_allocator<debug_allocator<Foo,tbb::cache_aligned_allocator>, std::size_t> allocator_t; - typedef tbb::concurrent_vector<Foo, allocator_t> vector_t; - allocator_t::init_counters(); - for( size_t old_size=0; old_size<=11000; old_size=(old_size<5 ? old_size+1 : 3*old_size) ) { - for( size_t new_size=0; new_size<=11000; new_size=(new_size<5 ? new_size+1 : 3*new_size) ) { - size_t count = FooCount; - { - vector_t v; v.reserve(old_size); - ASSERT( v.capacity()>=old_size, NULL ); - v.reserve( new_size ); - ASSERT( v.capacity()>=old_size, NULL ); - ASSERT( v.capacity()>=new_size, NULL ); - ASSERT( v.empty(), NULL ); - size_t fill_size = 2*new_size; - for( size_t i=0; i<fill_size; ++i ) { - ASSERT( size_t(FooCount)==count+i, NULL ); - size_t j = v.grow_by(1) - v.begin(); - ASSERT( j==i, NULL ); - v[j].bar() = int(~j); - } - vector_t copy_of_v(v); // should allocate first segment with same size as for shrink_to_fit() - if(__TBB_Log2(/*reserved size*/old_size|1) > __TBB_Log2(fill_size|1) ) - ASSERT( v.capacity() != copy_of_v.capacity(), NULL ); - v.shrink_to_fit(); - ASSERT( v.capacity() == copy_of_v.capacity(), NULL ); - CheckVector(v, new_size*2, old_size); // check vector correctness - ASSERT( v==copy_of_v, NULL ); // TODO: check also segments layout equality - } - ASSERT( FooCount==count, NULL ); - } - } - ASSERT( allocator_t::items_allocated == allocator_t::items_freed, NULL); - ASSERT( allocator_t::allocations == allocator_t::frees, NULL); -} - -struct AssignElement { - typedef tbb::concurrent_vector<int>::range_type::iterator iterator; - iterator base; - void operator()( const tbb::concurrent_vector<int>::range_type& range ) const { - for( iterator i=range.begin(); i!=range.end(); ++i ) { - if( *i!=0 ) - REPORT("ERROR for v[%ld]\n", long(i-base)); - *i = int(i-base); - } - } - AssignElement( iterator base_ ) : base(base_) {} -}; - -struct CheckElement { - typedef tbb::concurrent_vector<int>::const_range_type::iterator iterator; - iterator base; - void operator()( const tbb::concurrent_vector<int>::const_range_type& range ) const { - for( iterator i=range.begin(); i!=range.end(); ++i ) - if( *i != int(i-base) ) - REPORT("ERROR for v[%ld]\n", long(i-base)); - } - CheckElement( iterator base_ ) : base(base_) {} -}; - -#include "tbb/tick_count.h" -#include "tbb/parallel_for.h" -#include "harness.h" - -//! Test parallel access by iterators -void TestParallelFor( int nthread ) { - typedef tbb::concurrent_vector<int> vector_t; - vector_t v; - v.resize(N); - tbb::tick_count t0 = tbb::tick_count::now(); - REMARK("Calling parallel_for with %ld threads\n",long(nthread)); - tbb::parallel_for( v.range(10000), AssignElement(v.begin()) ); - tbb::tick_count t1 = tbb::tick_count::now(); - const vector_t& u = v; - tbb::parallel_for( u.range(10000), CheckElement(u.begin()) ); - tbb::tick_count t2 = tbb::tick_count::now(); - REMARK("Time for parallel_for: assign time = %8.5f, check time = %8.5f\n", - (t1-t0).seconds(),(t2-t1).seconds()); - for( long i=0; size_t(i)<v.size(); ++i ) - if( v[i]!=i ) - REPORT("ERROR for v[%ld]\n", i); -} - -template<typename Iterator1, typename Iterator2> -void TestIteratorAssignment( Iterator2 j ) { - Iterator1 i(j); - ASSERT( i==j, NULL ); - ASSERT( !(i!=j), NULL ); - Iterator1 k; - k = j; - ASSERT( k==j, NULL ); - ASSERT( !(k!=j), NULL ); -} - -template<typename Range1, typename Range2> -void TestRangeAssignment( Range2 r2 ) { - Range1 r1(r2); r1 = r2; -} - -template<typename Iterator, typename T> -void TestIteratorTraits() { - AssertSameType( static_cast<typename Iterator::difference_type*>(0), static_cast<ptrdiff_t*>(0) ); - AssertSameType( static_cast<typename Iterator::value_type*>(0), static_cast<T*>(0) ); - AssertSameType( static_cast<typename Iterator::pointer*>(0), static_cast<T**>(0) ); - AssertSameType( static_cast<typename Iterator::iterator_category*>(0), static_cast<std::random_access_iterator_tag*>(0) ); - T x; - typename Iterator::reference xr = x; - typename Iterator::pointer xp = &x; - ASSERT( &xr==xp, NULL ); -} - -template<typename Vector, typename Iterator> -void CheckConstIterator( const Vector& u, int i, const Iterator& cp ) { - typename Vector::const_reference pref = *cp; - if( pref.bar()!=i ) - REPORT("ERROR for u[%ld] using const_iterator\n", long(i)); - typename Vector::difference_type delta = cp-u.begin(); - ASSERT( delta==i, NULL ); - if( u[i].bar()!=i ) - REPORT("ERROR for u[%ld] using subscripting\n", long(i)); - ASSERT( u.begin()[i].bar()==i, NULL ); -} - -template<typename Iterator1, typename Iterator2, typename V> -void CheckIteratorComparison( V& u ) { - V u2 = u; - Iterator1 i = u.begin(); - - for( int i_count=0; i_count<100; ++i_count ) { - Iterator2 j = u.begin(); - Iterator2 i2 = u2.begin(); - for( int j_count=0; j_count<100; ++j_count ) { - ASSERT( (i==j)==(i_count==j_count), NULL ); - ASSERT( (i!=j)==(i_count!=j_count), NULL ); - ASSERT( (i-j)==(i_count-j_count), NULL ); - ASSERT( (i<j)==(i_count<j_count), NULL ); - ASSERT( (i>j)==(i_count>j_count), NULL ); - ASSERT( (i<=j)==(i_count<=j_count), NULL ); - ASSERT( (i>=j)==(i_count>=j_count), NULL ); - ASSERT( !(i==i2), NULL ); - ASSERT( i!=i2, NULL ); - ++j; - ++i2; - } - ++i; - } -} - -template<typename Vector, typename T> -void TestGrowToAtLeastWithSourceParameter(T const& src){ - static const size_t vector_size = 10; - Vector v1(vector_size,src); - Vector v2; - v2.grow_to_at_least(vector_size,src); - ASSERT(v1==v2,"grow_to_at_least(vector_size,src) did not properly initialize new elements ?"); -} -//! Test sequential iterators for vector type V. -/** Also does timing. */ -template<typename T> -void TestSequentialFor() { - typedef tbb::concurrent_vector<FooWithAssign> V; - V v(N); - ASSERT(v.grow_by(0) == v.grow_by(0, FooWithAssign()), NULL); - - // Check iterator - tbb::tick_count t0 = tbb::tick_count::now(); - typename V::iterator p = v.begin(); - ASSERT( !(*p).is_const(), NULL ); - ASSERT( !p->is_const(), NULL ); - for( int i=0; size_t(i)<v.size(); ++i, ++p ) { - if( (*p).state!=Foo::DefaultInitialized ) - REPORT("ERROR for v[%ld]\n", long(i)); - typename V::reference pref = *p; - pref.bar() = i; - typename V::difference_type delta = p-v.begin(); - ASSERT( delta==i, NULL ); - ASSERT( -delta<=0, "difference type not signed?" ); - } - tbb::tick_count t1 = tbb::tick_count::now(); - - // Check const_iterator going forwards - const V& u = v; - typename V::const_iterator cp = u.begin(); - ASSERT( cp == v.cbegin(), NULL ); - ASSERT( (*cp).is_const(), NULL ); - ASSERT( cp->is_const(), NULL ); - ASSERT( *cp == v.front(), NULL); - for( int i=0; size_t(i)<u.size(); ++i ) { - CheckConstIterator(u,i,cp); - V::const_iterator &cpr = ++cp; - ASSERT( &cpr == &cp, "pre-increment not returning a reference?"); - } - tbb::tick_count t2 = tbb::tick_count::now(); - REMARK("Time for serial for: assign time = %8.5f, check time = %8.5f\n", - (t1-t0).seconds(),(t2-t1).seconds()); - - // Now go backwards - cp = u.end(); - ASSERT( cp == v.cend(), NULL ); - for( int i=int(u.size()); i>0; ) { - --i; - V::const_iterator &cpr = --cp; - ASSERT( &cpr == &cp, "pre-decrement not returning a reference?"); - if( i>0 ) { - typename V::const_iterator cp_old = cp--; - intptr_t here = (*cp_old).bar(); - ASSERT( here==u[i].bar(), NULL ); - typename V::const_iterator cp_new = cp++; - intptr_t prev = (*cp_new).bar(); - ASSERT( prev==u[i-1].bar(), NULL ); - } - CheckConstIterator(u,i,cp); - } - - // Now go forwards and backwards - ptrdiff_t k = 0; - cp = u.begin(); - for( size_t i=0; i<u.size(); ++i ) { - CheckConstIterator(u,int(k),cp); - typename V::difference_type delta = i*3 % u.size(); - if( 0<=k+delta && size_t(k+delta)<u.size() ) { - V::const_iterator &cpr = (cp += delta); - ASSERT( &cpr == &cp, "+= not returning a reference?"); - k += delta; - } - delta = i*7 % u.size(); - if( 0<=k-delta && size_t(k-delta)<u.size() ) { - if( i&1 ) { - V::const_iterator &cpr = (cp -= delta); - ASSERT( &cpr == &cp, "-= not returning a reference?"); - } else - cp = cp - delta; // Test operator- - k -= delta; - } - } - - for( int i=0; size_t(i)<u.size(); i=(i<50?i+1:i*3) ) - for( int j=-i; size_t(i+j)<u.size(); j=(j<50?j+1:j*5) ) { - ASSERT( (u.begin()+i)[j].bar()==i+j, NULL ); - ASSERT( (v.begin()+i)[j].bar()==i+j, NULL ); - ASSERT((v.cbegin()+i)[j].bar()==i+j, NULL ); - ASSERT( (i+u.begin())[j].bar()==i+j, NULL ); - ASSERT( (i+v.begin())[j].bar()==i+j, NULL ); - ASSERT((i+v.cbegin())[j].bar()==i+j, NULL ); - } - - CheckIteratorComparison<typename V::iterator, typename V::iterator>(v); - CheckIteratorComparison<typename V::iterator, typename V::const_iterator>(v); - CheckIteratorComparison<typename V::const_iterator, typename V::iterator>(v); - CheckIteratorComparison<typename V::const_iterator, typename V::const_iterator>(v); - - TestIteratorAssignment<typename V::const_iterator>( u.begin() ); - TestIteratorAssignment<typename V::const_iterator>( v.begin() ); - TestIteratorAssignment<typename V::const_iterator>( v.cbegin() ); - TestIteratorAssignment<typename V::iterator>( v.begin() ); - // doesn't compile as expected: TestIteratorAssignment<typename V::iterator>( u.begin() ); - - TestRangeAssignment<typename V::const_range_type>( u.range() ); - TestRangeAssignment<typename V::const_range_type>( v.range() ); - TestRangeAssignment<typename V::range_type>( v.range() ); - // doesn't compile as expected: TestRangeAssignment<typename V::range_type>( u.range() ); - - // Check reverse_iterator - typename V::reverse_iterator rp = v.rbegin(); - for( size_t i=v.size(); i>0; --i, ++rp ) { - typename V::reference pref = *rp; - ASSERT( size_t(pref.bar())==i-1, NULL ); - ASSERT( rp!=v.rend(), NULL ); - } - ASSERT( rp==v.rend(), NULL ); - - // Check const_reverse_iterator - typename V::const_reverse_iterator crp = u.rbegin(); - ASSERT( crp == v.crbegin(), NULL ); - ASSERT( *crp == v.back(), NULL); - for( size_t i=v.size(); i>0; --i, ++crp ) { - typename V::const_reference cpref = *crp; - ASSERT( size_t(cpref.bar())==i-1, NULL ); - ASSERT( crp!=u.rend(), NULL ); - } - ASSERT( crp == u.rend(), NULL ); - ASSERT( crp == v.crend(), NULL ); - - TestIteratorAssignment<typename V::const_reverse_iterator>( u.rbegin() ); - TestIteratorAssignment<typename V::reverse_iterator>( v.rbegin() ); - - // test compliance with C++ Standard 2003, clause 23.1.1p9 - { - tbb::concurrent_vector<int> v1, v2(1, 100); - v1.assign(1, 100); ASSERT(v1 == v2, NULL); - ASSERT(v1.size() == 1 && v1[0] == 100, "used integral iterators"); - } - - // cross-allocator tests -#if !defined(_WIN64) || defined(_CPPLIB_VER) - typedef local_counting_allocator<std::allocator<int>, size_t> allocator1_t; - typedef tbb::cache_aligned_allocator<void> allocator2_t; - typedef tbb::concurrent_vector<FooWithAssign, allocator1_t> V1; - typedef tbb::concurrent_vector<FooWithAssign, allocator2_t> V2; - V1 v1( v ); // checking cross-allocator copying - V2 v2( 10 ); v2 = v1; // checking cross-allocator assignment - ASSERT( (v1 == v) && !(v2 != v), NULL); - ASSERT( !(v1 < v) && !(v2 > v), NULL); - ASSERT( (v1 <= v) && (v2 >= v), NULL); -#endif -} - -static const size_t Modulus = 7; - -namespace test_grow_to_at_least_helpers { - template<typename MyVector > - class GrowToAtLeast: NoAssign { - typedef typename MyVector::const_reference const_reference; - - const bool my_use_two_args_form ; - MyVector& my_vector; - const_reference my_init_from; - public: - void operator()( const tbb::blocked_range<size_t>& range ) const { - for( size_t i=range.begin(); i!=range.end(); ++i ) { - size_t n = my_vector.size(); - size_t req = (i % (2*n+1))+1; - - typename MyVector::iterator p; - Foo::State desired_state; - if (my_use_two_args_form){ - p = my_vector.grow_to_at_least(req,my_init_from); - desired_state = Foo::CopyInitialized; - }else{ - p = my_vector.grow_to_at_least(req); - desired_state = Foo::DefaultInitialized; - } - if( p-my_vector.begin() < typename MyVector::difference_type(req) ) - ASSERT( p->state == desired_state || p->state == Foo::ZeroInitialized, NULL ); - ASSERT( my_vector.size()>=req, NULL ); - } - } - GrowToAtLeast(bool use_two_args_form, MyVector& vector, const_reference init_from ) - : my_use_two_args_form(use_two_args_form), my_vector(vector), my_init_from(init_from) {} - }; -} - -template<bool use_two_arg_form> -void TestConcurrentGrowToAtLeastImpl() { - using namespace test_grow_to_at_least_helpers; - typedef static_counting_allocator< tbb::zero_allocator<Foo> > MyAllocator; - typedef tbb::concurrent_vector<Foo, MyAllocator> MyVector; - Foo copy_from; - MyAllocator::init_counters(); - MyVector v(2, Foo(), MyAllocator()); - for( size_t s=1; s<1000; s*=10 ) { - tbb::parallel_for( tbb::blocked_range<size_t>(0,10000*s,s), GrowToAtLeast<MyVector>(use_two_arg_form, v, copy_from), tbb::simple_partitioner() ); - } - v.clear(); - ASSERT( 0 == v.get_allocator().frees, NULL); - v.shrink_to_fit(); - size_t items_allocated = v.get_allocator().items_allocated, - items_freed = v.get_allocator().items_freed; - size_t allocations = v.get_allocator().allocations, - frees = v.get_allocator().frees; - ASSERT( items_allocated == items_freed, NULL); - ASSERT( allocations == frees, NULL); -} - -void TestConcurrentGrowToAtLeast() { - TestConcurrentGrowToAtLeastImpl<false>(); - TestConcurrentGrowToAtLeastImpl<true>(); -} - -struct grain_map: NoAssign { - enum grow_method_enum { - grow_by_range = 1, - grow_by_default, - grow_by_copy, - grow_by_init_list, - push_back, - push_back_move, - emplace_back, - last_method - }; - - struct range_part { - size_t number_of_parts; - grain_map::grow_method_enum method; - bool distribute; - Foo::State expected_element_state; - }; - - const std::vector<range_part> distributed; - const std::vector<range_part> batched; - const size_t total_number_of_parts; - - grain_map(const range_part* begin, const range_part* end) - : distributed(separate(begin,end, &distributed::is_not)) - , batched(separate(begin,end, &distributed::is_yes)) - , total_number_of_parts(std::accumulate(begin, end, (size_t)0, &sum_number_of_parts::sum)) - {} - -private: - struct sum_number_of_parts{ - static size_t sum(size_t accumulator, grain_map::range_part const& rp){ return accumulator + rp.number_of_parts;} - }; - - template <typename functor_t> - static std::vector<range_part> separate(const range_part* begin, const range_part* end, functor_t f){ - std::vector<range_part> part; - part.reserve(std::distance(begin,end)); - //copy all that false==f(*it) - std::remove_copy_if(begin, end, std::back_inserter(part), f); - - return part; - } - - struct distributed { - static bool is_not(range_part const& rp){ return !rp.distribute;} - static bool is_yes(range_part const& rp){ return rp.distribute;} - }; -}; - -//! Test concurrent invocations of method concurrent_vector::grow_by -template<typename MyVector> -class GrowBy: NoAssign { - MyVector& my_vector; - const grain_map& my_grain_map; - size_t my_part_weight; -public: - void operator()( const tbb::blocked_range<size_t>& range ) const { - ASSERT( range.begin() < range.end(), NULL ); - - size_t current_adding_index_in_cvector = range.begin(); - - for(size_t index=0; index < my_grain_map.batched.size(); ++index){ - const grain_map::range_part& batch_part = my_grain_map.batched[index]; - const size_t number_of_items_to_add = batch_part.number_of_parts * my_part_weight; - const size_t end = current_adding_index_in_cvector + number_of_items_to_add; - - switch(batch_part.method){ - case grain_map::grow_by_range : { - my_vector.grow_by(FooIterator(current_adding_index_in_cvector),FooIterator(end)); - } break; - case grain_map::grow_by_default : { - typename MyVector::iterator const s = my_vector.grow_by(number_of_items_to_add); - for( size_t k = 0; k < number_of_items_to_add; ++k ) - s[k].bar() = current_adding_index_in_cvector + k; - } break; -#if __TBB_INITIALIZER_LISTS_PRESENT - case grain_map::grow_by_init_list : { - FooIterator curr(current_adding_index_in_cvector); - for ( size_t k = 0; k < number_of_items_to_add; ++k ) { - if ( k + 4 < number_of_items_to_add ) { - my_vector.grow_by( { *curr++, *curr++, *curr++, *curr++, *curr++ } ); - k += 4; - } else { - my_vector.grow_by( { *curr++ } ); - } - } - ASSERT( curr == FooIterator(end), NULL ); - } break; -#endif - default : { ASSERT(false, "using unimplemented method of batch add in ConcurrentGrow test.");} break; - }; - - current_adding_index_in_cvector = end; - } - - std::vector<size_t> items_left_to_add(my_grain_map.distributed.size()); - for (size_t i=0; i<my_grain_map.distributed.size(); ++i ){ - items_left_to_add[i] = my_grain_map.distributed[i].number_of_parts * my_part_weight; - } - - for (;current_adding_index_in_cvector < range.end(); ++current_adding_index_in_cvector){ - size_t method_index = current_adding_index_in_cvector % my_grain_map.distributed.size(); - - if (! items_left_to_add[method_index]) { - struct not_zero{ - static bool is(size_t items_to_add){ return items_to_add;} - }; - method_index = std::distance(items_left_to_add.begin(), std::find_if(items_left_to_add.begin(), items_left_to_add.end(), ¬_zero::is)); - ASSERT(method_index < my_grain_map.distributed.size(), "incorrect test setup - wrong expected distribution: left free space but no elements to add?"); - }; - - ASSERT(items_left_to_add[method_index], "logic error ?"); - const grain_map::range_part& distributed_part = my_grain_map.distributed[method_index]; - - typename MyVector::iterator r; - typename MyVector::value_type source; - source.bar() = current_adding_index_in_cvector; - - switch(distributed_part.method){ - case grain_map::grow_by_default : { - (r = my_vector.grow_by(1))->bar() = current_adding_index_in_cvector; - } break; - case grain_map::grow_by_copy : { - r = my_vector.grow_by(1, source); - } break; - case grain_map::push_back : { - r = my_vector.push_back(source); - } break; -#if __TBB_CPP11_RVALUE_REF_PRESENT - case grain_map::push_back_move : { - r = my_vector.push_back(std::move(source)); - } break; -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT - case grain_map::emplace_back : { - r = my_vector.emplace_back(current_adding_index_in_cvector); - } break; -#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - - default : { ASSERT(false, "using unimplemented method of batch add in ConcurrentGrow test.");} break; - }; - - ASSERT( static_cast<size_t>(r->bar()) == current_adding_index_in_cvector, NULL ); - } - } - - GrowBy( MyVector& vector, const grain_map& m, size_t part_weight ) - : my_vector(vector) - , my_grain_map(m) - , my_part_weight(part_weight) - { - } -}; - -const grain_map::range_part concurrent_grow_single_range_map [] = { -// number_of_parts, method, distribute, expected_element_state - {3, grain_map::grow_by_range, false, - #if __TBB_CPP11_RVALUE_REF_PRESENT - Foo::MoveInitialized - #else - Foo::CopyInitialized - #endif - }, -#if __TBB_INITIALIZER_LISTS_PRESENT && !__TBB_CPP11_INIT_LIST_TEMP_OBJS_LIFETIME_BROKEN - {1, grain_map::grow_by_init_list, false, Foo::CopyInitialized}, -#endif - {2, grain_map::grow_by_default, false, Foo::DefaultInitialized}, - {1, grain_map::grow_by_default, true, Foo::DefaultInitialized}, - {1, grain_map::grow_by_copy, true, Foo::CopyInitialized}, - {1, grain_map::push_back, true, Foo::CopyInitialized}, -#if __TBB_CPP11_RVALUE_REF_PRESENT - {1, grain_map::push_back_move, true, Foo::MoveInitialized}, -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT - {1, grain_map::emplace_back, true, Foo::DirectInitialized}, -#endif // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT -#endif //__TBB_CPP11_RVALUE_REF_PRESENT -}; - -//! Test concurrent invocations of grow methods -void TestConcurrentGrowBy( int nthread ) { - - typedef static_counting_allocator<debug_allocator<Foo> > MyAllocator; - typedef tbb::concurrent_vector<Foo, MyAllocator> MyVector; - -#if __TBB_INITIALIZER_LISTS_PRESENT && __TBB_CPP11_INIT_LIST_TEMP_OBJS_LIFETIME_BROKEN - static bool is_reported = false; - if ( !is_reported ) { - REPORT( "Known issue: concurrent tests of grow_by(std::initializer_list) are skipped.\n" ); - is_reported = true; - } -#endif - - MyAllocator::init_counters(); - { - grain_map m(concurrent_grow_single_range_map, Harness::end(concurrent_grow_single_range_map)); - - static const size_t desired_grain_size = 100; - - static const size_t part_weight = desired_grain_size / m.total_number_of_parts; - static const size_t grain_size = part_weight * m.total_number_of_parts; - static const size_t number_of_grains = 8; //this should be (power of two) in order to get minimal ranges equal to grain_size - static const size_t range_size = grain_size * number_of_grains; - - MyAllocator a; - MyVector v( a ); - tbb::parallel_for( tbb::blocked_range<size_t>(0,range_size,grain_size), GrowBy<MyVector>(v, m, part_weight), tbb::simple_partitioner() ); - ASSERT( v.size()==size_t(range_size), NULL ); - - // Verify that v is a permutation of 0..m - size_t inversions = 0, direct_inits = 0, def_inits = 0, copy_inits = 0, move_inits = 0; - std::vector<bool> found(range_size, 0); - for( size_t i=0; i<range_size; ++i ) { - if( v[i].state == Foo::DefaultInitialized ) ++def_inits; - else if( v[i].state == Foo::DirectInitialized ) ++direct_inits; - else if( v[i].state == Foo::CopyInitialized ) ++copy_inits; - else if( v[i].state == Foo::MoveInitialized ) ++move_inits; - else { - REMARK("i: %d ", i); - ASSERT( false, "v[i] seems not initialized"); - } - intptr_t index = v[i].bar(); - ASSERT( !found[index], NULL ); - found[index] = true; - if( i>0 ) - inversions += v[i].bar()<v[i-1].bar(); - } - for( size_t i=0; i<range_size; ++i ) { - ASSERT( found[i], NULL ); - ASSERT( nthread>1 || v[i].bar() == static_cast<intptr_t>(i), "sequential execution is wrong" ); - } - - REMARK("Initialization by default constructor: %d, by copy: %d, by move: %d\n", def_inits, copy_inits, move_inits); - - size_t expected_direct_inits = 0, expected_def_inits = 0, expected_copy_inits = 0, expected_move_inits = 0; - for (size_t i=0; i<Harness::array_length(concurrent_grow_single_range_map); ++i){ - const grain_map::range_part& rp =concurrent_grow_single_range_map[i]; - switch (rp.expected_element_state){ - case Foo::DefaultInitialized: { expected_def_inits += rp.number_of_parts ; } break; - case Foo::DirectInitialized: { expected_direct_inits += rp.number_of_parts ;} break; - case Foo::MoveInitialized: { expected_move_inits += rp.number_of_parts ;} break; - case Foo::CopyInitialized: { expected_copy_inits += rp.number_of_parts ;} break; - default: {ASSERT(false, "unexpected expected state");}break; - }; - } - - expected_def_inits *= part_weight * number_of_grains; - expected_move_inits *= part_weight * number_of_grains; - expected_copy_inits *= part_weight * number_of_grains; - expected_direct_inits *= part_weight * number_of_grains; - - ASSERT( def_inits == expected_def_inits , NULL); - ASSERT( copy_inits == expected_copy_inits , NULL); - ASSERT( move_inits == expected_move_inits , NULL); - ASSERT( direct_inits == expected_direct_inits , NULL); - - if( nthread>1 && inversions<range_size/20 ) - REPORT("Warning: not much concurrency in TestConcurrentGrowBy (%d inversions)\n", inversions); - } - //TODO: factor this into separate thing, as it seems to used in big number of tests - size_t items_allocated = MyAllocator::items_allocated, - items_freed = MyAllocator::items_freed; - size_t allocations = MyAllocator::allocations, - frees = MyAllocator::frees; - ASSERT( items_allocated == items_freed, NULL); - ASSERT( allocations == frees, NULL); -} - -template <typename Vector> -void test_grow_by_empty_range( Vector &v, typename Vector::value_type* range_begin_end ) { - const Vector v_copy = v; - ASSERT( v.grow_by( range_begin_end, range_begin_end ) == v.end(), "grow_by(empty_range) returned a wrong iterator." ); - ASSERT( v == v_copy, "grow_by(empty_range) has changed the vector." ); -} - -void TestSerialGrowByRange( bool segmented_vector ) { - tbb::concurrent_vector<int> v; - if ( segmented_vector ) { - v.reserve( 1 ); - } - int init_range[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; - ASSERT( v.grow_by( init_range, init_range + (Harness::array_length( init_range )) ) == v.begin(), "grow_by(I,I) returned a wrong iterator." ); - ASSERT( std::equal( v.begin(), v.end(), init_range ), "grow_by(I,I) did not properly copied all elements ?" ); - test_grow_by_empty_range( v, init_range ); - test_grow_by_empty_range( v, (int*)NULL ); -} - -//TODO: move this to more appropriate place, smth like test_harness.cpp -void TestArrayLength(){ - int five_element_array[5] = {0}; - ASSERT(Harness::array_length(five_element_array)==5,"array_length failed to determine length of non empty non dynamic array"); -} - -#if __TBB_INITIALIZER_LISTS_PRESENT -#include "test_initializer_list.h" - -struct test_grow_by { - template<typename container_type, typename element_type> - static void do_test( std::initializer_list<element_type> const& il, container_type const& expected ) { - container_type vd; - vd.grow_by( il ); - ASSERT( vd == expected, "grow_by with an initializer list failed" ); - } -}; - -void TestInitList() { - REMARK( "testing initializer_list methods \n" ); - using namespace initializer_list_support_tests; - TestInitListSupport<tbb::concurrent_vector<char>, test_grow_by>( { 1, 2, 3, 4, 5 } ); - TestInitListSupport<tbb::concurrent_vector<int>, test_grow_by>( {} ); -} -#endif //if __TBB_INITIALIZER_LISTS_PRESENT - -#if __TBB_RANGE_BASED_FOR_PRESENT -#include "test_range_based_for.h" - -void TestRangeBasedFor(){ - using namespace range_based_for_support_tests; - - REMARK("testing range based for loop compatibility \n"); - typedef tbb::concurrent_vector<int> c_vector; - c_vector a_c_vector; - - const int sequence_length = 100; - for (int i =1; i<= sequence_length; ++i){ - a_c_vector.push_back(i); - } - - ASSERT( range_based_for_accumulate(a_c_vector, std::plus<int>(), 0) == gauss_summ_of_int_sequence(sequence_length), "incorrect accumulated value generated via range based for ?"); -} -#endif //if __TBB_RANGE_BASED_FOR_PRESENT - -#if TBB_USE_EXCEPTIONS -#endif //TBB_USE_EXCEPTIONS - -#if __TBB_CPP11_RVALUE_REF_PRESENT -namespace move_semantics_helpers{ - struct move_only_type:NoCopy{ - const int* my_pointer; - move_only_type(move_only_type && other): my_pointer(other.my_pointer){other.my_pointer=NULL;} - explicit move_only_type(const int* value): my_pointer(value) {} - }; -} - -void TestPushBackMoveOnlyContainee(){ - using namespace move_semantics_helpers; - typedef tbb::concurrent_vector<move_only_type > vector_t; - vector_t v; - static const int magic_number =7; - move_only_type src(&magic_number); - v.push_back(std::move(src)); - ASSERT(v[0].my_pointer == &magic_number,"item was incorrectly moved during push_back?"); - ASSERT(src.my_pointer == NULL,"item was incorrectly moved during push_back?"); -} - -namespace emplace_helpers{ - struct wrapper_type:NoCopy{ - int value1; - int value2; - explicit wrapper_type(int v1, int v2) : value1 (v1), value2(v2) {} - friend bool operator==(const wrapper_type& lhs, const wrapper_type& rhs){ - return (lhs.value1 == rhs.value1) && (lhs.value2 == rhs.value2 ); - } - }; -} -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT -//TODO: extend the test to number of types e.g. std::string -void TestEmplaceBack(){ - using namespace emplace_helpers; - typedef tbb::concurrent_vector<wrapper_type > vector_t; - vector_t v; - v.emplace_back(1,2); - ASSERT(v[0] == wrapper_type(1,2),"incorrectly in-place constructed item during emplace_back?"); -} -#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - -//! Test the assignment operator and swap -void TestAssign() { - typedef tbb::concurrent_vector<FooWithAssign, local_counting_allocator<std::allocator<FooWithAssign>, size_t > > vector_t; - local_counting_allocator<std::allocator<FooWithAssign>, size_t > init_alloc; - init_alloc.allocations = 100; - for( int dst_size=1; dst_size<=128; NextSize( dst_size ) ) { - for( int src_size=2; src_size<=128; NextSize( src_size ) ) { - vector_t u(FooIterator(0), FooIterator(src_size), init_alloc); - for( int i=0; i<src_size; ++i ) - ASSERT( u[i].bar()==i, NULL ); - vector_t v(dst_size, FooWithAssign(), init_alloc); - for( int i=0; i<dst_size; ++i ) { - ASSERT( v[i].state==Foo::CopyInitialized, NULL ); - v[i].bar() = ~i; - } - ASSERT( v != u, NULL); - v.swap(u); - CheckVector(u, dst_size, src_size); - u.swap(v); - // using assignment - v = u; - ASSERT( v == u, NULL); - u.clear(); - ASSERT( u.size()==0, NULL ); - ASSERT( v.size()==size_t(src_size), NULL ); - for( int i=0; i<src_size; ++i ) - ASSERT( v[i].bar()==i, NULL ); - ASSERT( 0 == u.get_allocator().frees, NULL); - u.shrink_to_fit(); // deallocate unused memory - size_t items_allocated = u.get_allocator().items_allocated, - items_freed = u.get_allocator().items_freed; - size_t allocations = u.get_allocator().allocations, - frees = u.get_allocator().frees + 100; - ASSERT( items_allocated == items_freed, NULL); - ASSERT( allocations == frees, NULL); - } - } -} - -struct c_vector_type : default_container_traits { - template<typename element_type, typename allocator_type> - struct apply{ - typedef tbb::concurrent_vector<element_type, allocator_type > type; - }; - - typedef FooIterator init_iterator_type; - enum{ expected_number_of_items_to_allocate_for_steal_move = 0 }; - - template<typename element_type, typename allocator_type, typename iterator> - static bool equal(tbb::concurrent_vector<element_type, allocator_type > const& c, iterator begin, iterator end){ - bool equal_sizes = (size_t)std::distance(begin, end) == c.size(); - return equal_sizes && std::equal(c.begin(), c.end(), begin); - } -}; - -#if __TBB_CPP11_RVALUE_REF_PRESENT -void TestSerialGrowByWithMoveIterators(){ - typedef default_stateful_fixture_make_helper<c_vector_type>::type fixture_t; - typedef fixture_t::container_t vector_t; - - fixture_t fixture("TestSerialGrowByWithMoveIterators"); - - vector_t dst(fixture.dst_allocator); - dst.grow_by(std::make_move_iterator(fixture.source.begin()), std::make_move_iterator(fixture.source.end())); - - fixture.verify_content_deep_moved(dst); -} -#endif //__TBB_CPP11_RVALUE_REF_PRESENT -// Test the comparison operators -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include <string> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -void TestComparison() { - std::string str[3]; str[0] = "abc"; - str[1].assign("cba"); - str[2].assign("abc"); // same as 0th - tbb::concurrent_vector<char> var[3]; - var[0].assign(str[0].begin(), str[0].end()); - var[1].assign(str[0].rbegin(), str[0].rend()); - var[2].assign(var[1].rbegin(), var[1].rend()); // same as 0th - for (int i = 0; i < 3; ++i) { - for (int j = 0; j < 3; ++j) { - ASSERT( (var[i] == var[j]) == (str[i] == str[j]), NULL ); - ASSERT( (var[i] != var[j]) == (str[i] != str[j]), NULL ); - ASSERT( (var[i] < var[j]) == (str[i] < str[j]), NULL ); - ASSERT( (var[i] > var[j]) == (str[i] > str[j]), NULL ); - ASSERT( (var[i] <= var[j]) == (str[i] <= str[j]), NULL ); - ASSERT( (var[i] >= var[j]) == (str[i] >= str[j]), NULL ); - } - } -} - -//------------------------------------------------------------------------ -// Regression test for problem where on oversubscription caused -// concurrent_vector::grow_by to run very slowly (TR#196). -//------------------------------------------------------------------------ - -#include "tbb/task_scheduler_init.h" -#include <math.h> - -typedef unsigned long Number; - -static tbb::concurrent_vector<Number> Primes; - -class FindPrimes { - bool is_prime( Number val ) const { - int limit, factor = 3; - if( val<5u ) - return val==2; - else { - limit = long(sqrtf(float(val))+0.5f); - while( factor<=limit && val % factor ) - ++factor; - return factor>limit; - } - } -public: - void operator()( const tbb::blocked_range<Number>& r ) const { - for( Number i=r.begin(); i!=r.end(); ++i ) { - if( i%2 && is_prime(i) ) { - Primes.push_back( i ); - } - } - } -}; - -double TimeFindPrimes( int nthread ) { - Primes.clear(); - Primes.reserve(1000000);// TODO: or compact()? - tbb::task_scheduler_init init(nthread); - tbb::tick_count t0 = tbb::tick_count::now(); - tbb::parallel_for( tbb::blocked_range<Number>(0,1000000,500), FindPrimes() ); - tbb::tick_count t1 = tbb::tick_count::now(); - return (t1-t0).seconds(); -} - -void TestFindPrimes() { - // Time fully subscribed run. - double t2 = TimeFindPrimes( tbb::task_scheduler_init::automatic ); - - // Time parallel run that is very likely oversubscribed. -#if _XBOX - double t128 = TimeFindPrimes(32); //XBOX360 can't handle too many threads -#else - double t128 = TimeFindPrimes(128); -#endif - REMARK("TestFindPrimes: t2==%g t128=%g k=%g\n", t2, t128, t128/t2); - - // We allow the 128-thread run a little extra time to allow for thread overhead. - // Theoretically, following test will fail on machine with >128 processors. - // But that situation is not going to come up in the near future, - // and the generalization to fix the issue is not worth the trouble. - if( t128 > 1.3*t2 ) { - REPORT("Warning: grow_by is pathetically slow: t2==%g t128=%g k=%g\n", t2, t128, t128/t2); - } -} - -//------------------------------------------------------------------------ -// Test compatibility with STL sort. -//------------------------------------------------------------------------ - -#include <algorithm> - -void TestSort() { - for( int n=0; n<100; n=n*3+1 ) { - tbb::concurrent_vector<int> array(n); - for( int i=0; i<n; ++i ) - array.at(i) = (i*7)%n; - std::sort( array.begin(), array.end() ); - for( int i=0; i<n; ++i ) - ASSERT( array[i]==i, NULL ); - } -} - -#if TBB_USE_EXCEPTIONS - -template<typename c_vector> -size_t get_early_size(c_vector & v){ - return v.grow_by(0) - v.begin(); -} - -void verify_c_vector_size(size_t size, size_t capacity, size_t early_size, const char * const test_name){ - ASSERT_IN_TEST( size <= capacity, "", test_name); - ASSERT_IN_TEST( early_size >= size, "", test_name); -} - -template<typename c_vector_t> -void verify_c_vector_size(c_vector_t & c_v, const char * const test_name){ - verify_c_vector_size(c_v.size(), c_v.capacity(), get_early_size(c_v), test_name); -} - -void verify_c_vector_capacity_is_below(size_t capacity, size_t high, const char * const test_name){ - ASSERT_IN_TEST(capacity > 0, "unexpected capacity", test_name); - ASSERT_IN_TEST(capacity < high, "unexpected capacity", test_name); -} - -template<typename vector_t> -void verify_last_segment_allocation_failed(vector_t const& victim, const char* const test_name){ - ASSERT_THROWS_IN_TEST(victim.at(victim.size()), std::range_error, "",test_name ); -} - -template<typename vector_t> -void verify_assignment_operator_throws_bad_last_alloc(vector_t & victim, const char* const test_name){ - vector_t copy_of_victim(victim, victim.get_allocator()); - ASSERT_THROWS_IN_TEST(victim = copy_of_victim, tbb::bad_last_alloc, "", test_name); -} - -template<typename vector_t> -void verify_copy_and_assign_from_produce_the_same(vector_t const& victim, const char* const test_name){ - //TODO: remove explicit copy of allocator when full support of C++11 allocator_traits in concurrent_vector is present - vector_t copy_of_victim(victim, victim.get_allocator()); - ASSERT_IN_TEST(copy_of_victim == victim, "copy doesn't match original", test_name); - vector_t copy_of_victim2(10, victim[0], victim.get_allocator()); - copy_of_victim2 = victim; - ASSERT_IN_TEST(copy_of_victim == copy_of_victim2, "assignment doesn't match copying", test_name); -} - -template<typename allocator_t> -void verify_vector_partially_copied( - tbb::concurrent_vector<FooWithAssign, allocator_t> const& victim, size_t planned_victim_size, - tbb::concurrent_vector<FooWithAssign, allocator_t> const& src, bool is_memory_allocation_failure ,const char* const test_name) -{ - if (is_memory_allocation_failure) { // allocator generated exception - typedef tbb::concurrent_vector<FooWithAssign, allocator_t> vector_t; - ASSERT_IN_TEST( victim == vector_t(src.begin(), src.begin() + victim.size(), src.get_allocator()), "failed to properly copy of source ?", test_name ); - }else{ - ASSERT_IN_TEST( std::equal(victim.begin(), victim.begin() + planned_victim_size, src.begin()), "failed to properly copy items before the exception?", test_name ); - ASSERT_IN_TEST( ::all_of( victim.begin() + planned_victim_size, victim.end(), &is_state<Foo::ZeroInitialized> ), "failed to zero-initialize items left not constructed after the exception?", test_name ); - } -} - -//------------------------------------------------------------------------ -// Test exceptions safety (from allocator and items constructors) -//------------------------------------------------------------------------ -void TestExceptions() { - typedef static_counting_allocator<debug_allocator<FooWithAssign>, std::size_t> allocator_t; - typedef tbb::concurrent_vector<FooWithAssign, allocator_t> vector_t; - - enum methods { - zero_method = 0, - ctor_copy, ctor_size, assign_nt, assign_ir, reserve, compact, - all_methods - }; - ASSERT( !FooCount, NULL ); - - try { - vector_t src(FooIterator(0), FooIterator(N)); // original data - - for(int t = 0; t < 2; ++t) // exception type - for(int m = zero_method+1; m < all_methods; ++m) - { - track_foo_count<__LINE__> check_all_foo_destroyed_on_exit("TestExceptions"); - track_allocator_memory<allocator_t> verify_no_leak_at_exit("TestExceptions"); - allocator_t::init_counters(); - if(t) MaxFooCount = FooCount + N/4; - else allocator_t::set_limits(N/4); - vector_t victim; - try { - switch(m) { - case ctor_copy: { - vector_t acopy(src); - } break; // auto destruction after exception is checked by ~Foo - case ctor_size: { - vector_t sized(N); - } break; // auto destruction after exception is checked by ~Foo - // Do not test assignment constructor due to reusing of same methods as below - case assign_nt: { - victim.assign(N, FooWithAssign()); - } break; - case assign_ir: { - victim.assign(FooIterator(0), FooIterator(N)); - } break; - case reserve: { - try { - victim.reserve(victim.max_size()+1); - } catch(std::length_error &) { - } catch(...) { - KNOWN_ISSUE("ERROR: unrecognized exception - known compiler issue\n"); - } - victim.reserve(N); - } break; - case compact: { - if(t) MaxFooCount = 0; else allocator_t::set_limits(); // reset limits - victim.reserve(2); victim = src; // fragmented assignment - if(t) MaxFooCount = FooCount + 10; else allocator_t::set_limits(1, false); // block any allocation, check NULL return from allocator - victim.shrink_to_fit(); // should start defragmenting first segment - } break; - default:; - } - if(!t || m != reserve) ASSERT(false, "should throw an exception"); - } catch(std::bad_alloc &e) { - allocator_t::set_limits(); MaxFooCount = 0; - size_t capacity = victim.capacity(); - size_t size = victim.size(); - - size_t req_size = get_early_size(victim); - - verify_c_vector_size(size, capacity, req_size, "TestExceptions"); - - switch(m) { - case reserve: - if(t) ASSERT(false, NULL); - case assign_nt: - case assign_ir: - if(!t) { - ASSERT(capacity < N/2, "unexpected capacity"); - ASSERT(size == 0, "unexpected size"); - break; - } else { - ASSERT(size == N, "unexpected size"); - ASSERT(capacity >= N, "unexpected capacity"); - int i; - for(i = 1; ; ++i) - if(!victim[i].zero_bar()) break; - else ASSERT(victim[i].bar() == (m == assign_ir)? i : initial_value_of_bar, NULL); - for(; size_t(i) < size; ++i) ASSERT(!victim[i].zero_bar(), NULL); - ASSERT(size_t(i) == size, NULL); - break; - } - case compact: - ASSERT(capacity > 0, "unexpected capacity"); - ASSERT(victim == src, "shrink_to_fit() is broken"); - break; - - default:; // nothing to check here - } - REMARK("Exception %d: %s\t- ok\n", m, e.what()); - } - } - } catch(...) { - ASSERT(false, "unexpected exception"); - } -} - -//TODO: split into two separate tests -//TODO: remove code duplication in exception safety tests -void TestExceptionSafetyGuaranteesForAssignOperator(){ - //TODO: use __FUNCTION__ for test name - const char* const test_name = "TestExceptionSafetyGuaranteesForAssignOperator"; - typedef static_counting_allocator<debug_allocator<FooWithAssign>, std::size_t> allocator_t; - typedef tbb::concurrent_vector<FooWithAssign, allocator_t> vector_t; - - track_foo_count<__LINE__> check_all_foo_destroyed_on_exit(test_name); - track_allocator_memory<allocator_t> verify_no_leak_at_exit(test_name); - - vector_t src(FooIterator(0), FooIterator(N)); // original data - - const size_t planned_victim_size = N/4; - - for(int t = 0; t < 2; ++t) {// exception type - vector_t victim; - victim.reserve(2); // get fragmented assignment - - ASSERT_THROWS_IN_TEST( - { - limit_foo_count_in_scope foo_limit(FooCount + planned_victim_size, t); - limit_allocated_items_in_scope<allocator_t> allocator_limit(allocator_t::items_allocated + planned_victim_size, !t); - - victim = src; // fragmented assignment - }, - std::bad_alloc, "", test_name - ); - - verify_c_vector_size(victim, test_name); - - if(!t) { - verify_c_vector_capacity_is_below(victim.capacity(), N, test_name); - } - - verify_vector_partially_copied(victim, planned_victim_size, src, !t, test_name); - verify_last_segment_allocation_failed(victim, test_name); - verify_copy_and_assign_from_produce_the_same(victim, test_name); - verify_assignment_operator_throws_bad_last_alloc(victim, test_name); - } -} -//TODO: split into two separate tests -void TestExceptionSafetyGuaranteesForConcurrentGrow(){ - const char* const test_name = "TestExceptionSafetyGuaranteesForConcurrentGrow"; - typedef static_counting_allocator<debug_allocator<FooWithAssign>, std::size_t> allocator_t; - typedef tbb::concurrent_vector<FooWithAssign, allocator_t> vector_t; - - track_foo_count<__LINE__> check_all_foo_destroyed_on_exit(test_name); - track_allocator_memory<allocator_t> verify_no_leak_at_exit(test_name); - - vector_t src(FooIterator(0), FooIterator(N)); // original data - - const size_t planned_victim_size = N/4; - static const int grain_size = 70; - - tbb::task_scheduler_init init(2); - - for(int t = 0; t < 2; ++t) {// exception type - vector_t victim; - -#if TBB_USE_CAPTURED_EXCEPTION - #define EXPECTED_EXCEPTION tbb::captured_exception -#else - #define EXPECTED_EXCEPTION std::bad_alloc -#endif - - ASSERT_THROWS_IN_TEST( - { - limit_foo_count_in_scope foo_limit(FooCount + 31, t); // these numbers help to reproduce the live lock for versions < TBB2.2 - limit_allocated_items_in_scope<allocator_t> allocator_limit(allocator_t::items_allocated + planned_victim_size, !t); - - grain_map m(concurrent_grow_single_range_map, Harness::end(concurrent_grow_single_range_map)); - - static const size_t part_weight = grain_size / m.total_number_of_parts; - - tbb::parallel_for( - tbb::blocked_range<size_t>(0, N, grain_size), - GrowBy<vector_t>(victim, m, part_weight) - ); - }, - EXPECTED_EXCEPTION, "", test_name - ); - - verify_c_vector_size(victim, test_name); - - if(!t) { - verify_c_vector_capacity_is_below(victim.capacity(), N, test_name); - } - - for(int i = 0; ; ++i) { - try { - Foo &foo = victim.at(i); - ASSERT( foo.is_valid_or_zero(),"" ); - } catch(std::range_error &) { // skip broken segment - ASSERT( size_t(i) < get_early_size(victim), NULL ); - } catch(std::out_of_range &){ - ASSERT( i > 0, NULL ); break; - } catch(...) { - KNOWN_ISSUE("ERROR: unrecognized exception - known compiler issue\n"); break; - } - } - - verify_copy_and_assign_from_produce_the_same(victim, test_name); - } -} - -#if __TBB_CPP11_RVALUE_REF_PRESENT -void TestExceptionSafetyGuaranteesForMoveAssignOperatorWithUnEqualAllocatorMemoryFailure(){ - const char* const test_name = "TestExceptionSafetyGuaranteesForMoveAssignOperatorWithUnEqualAllocatorMemoryFailure"; - - //TODO: add ability to inject debug_allocator into stateful_allocator_fixture::allocator_t - //typedef static_counting_allocator<debug_allocator<FooWithAssign>, std::size_t> allocator_t; - typedef default_stateful_fixture_make_helper<c_vector_type, Harness::false_type>::type fixture_t; - typedef arena_allocator_fixture<FooWithAssign, Harness::false_type> arena_allocator_fixture_t; - typedef fixture_t::allocator_t allocator_t; - typedef fixture_t::container_t vector_t; - - fixture_t fixture(test_name); - arena_allocator_fixture_t arena_allocator_fixture(4 * fixture.container_size); - - const size_t allocation_limit = fixture.container_size/4; - - vector_t victim(arena_allocator_fixture.allocator); - victim.reserve(2); // get fragmented assignment - - ASSERT_THROWS_IN_TEST( - { - limit_allocated_items_in_scope<allocator_t> allocator_limit(allocator_t::items_allocated + allocation_limit); - victim = std::move(fixture.source); // fragmented assignment - }, - std::bad_alloc, "", test_name - ); - - verify_c_vector_size(victim, test_name); - verify_c_vector_capacity_is_below(victim.capacity(), allocation_limit + 2, test_name); - - fixture.verify_part_of_content_deep_moved(victim, victim.size()); - - verify_last_segment_allocation_failed(victim, test_name); - verify_copy_and_assign_from_produce_the_same(victim, test_name); - verify_assignment_operator_throws_bad_last_alloc(victim, test_name); -} - -void TestExceptionSafetyGuaranteesForMoveAssignOperatorWithUnEqualAllocatorExceptionInElementCtor(){ - const char* const test_name = "TestExceptionSafetyGuaranteesForMoveAssignOperator"; - //typedef static_counting_allocator<debug_allocator<FooWithAssign>, std::size_t> allocator_t; - typedef default_stateful_fixture_make_helper<c_vector_type, Harness::false_type>::type fixture_t; - typedef arena_allocator_fixture<FooWithAssign, Harness::false_type> arena_allocator_fixture_t; - typedef fixture_t::container_t vector_t; - - fixture_t fixture(test_name); - const size_t planned_victim_size = fixture.container_size/4; - arena_allocator_fixture_t arena_allocator_fixture(4 * fixture.container_size); - - vector_t victim(arena_allocator_fixture.allocator); - victim.reserve(2); // get fragmented assignment - - ASSERT_THROWS_IN_TEST( - { - limit_foo_count_in_scope foo_limit(FooCount + planned_victim_size); - victim = std::move(fixture.source); // fragmented assignment - }, - std::bad_alloc, "", test_name - ); - - verify_c_vector_size(victim, test_name); - - fixture.verify_part_of_content_deep_moved(victim, planned_victim_size); - - verify_last_segment_allocation_failed(victim, test_name); - verify_copy_and_assign_from_produce_the_same(victim, test_name); - verify_assignment_operator_throws_bad_last_alloc(victim, test_name); -} -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - -namespace push_back_exception_safety_helpers{ - //TODO: remove code duplication with emplace_helpers::wrapper_type - struct throwing_foo:Foo{ - int value1; - int value2; - explicit throwing_foo(int v1, int v2) : value1 (v1), value2(v2) { } - }; - - template< typename foo_t = throwing_foo> - struct fixture{ - typedef tbb::concurrent_vector<foo_t, debug_allocator<foo_t> > vector_t; - vector_t v; - - void test( void(*p_test)(vector_t&), const char * test_name){ - track_foo_count<__LINE__> verify_no_foo_leaked_during_exception(test_name); - ASSERT_IN_TEST(v.empty(),"incorrect test setup?", test_name ); - ASSERT_THROWS_IN_TEST(p_test(v), Foo_exception ,"", test_name); - ASSERT_IN_TEST(is_state<Foo::ZeroInitialized>(v[0]),"incorrectly filled item during exception in emplace_back?", test_name); - } - }; -} - -#if __TBB_CPP11_RVALUE_REF_PRESENT -void TestPushBackMoveExceptionSafety(){ - typedef push_back_exception_safety_helpers::fixture<Foo> fixture_t; - fixture_t t; - - limit_foo_count_in_scope foo_limit(FooCount + 1); - - struct test{ - static void test_move_push_back(fixture_t::vector_t& v){ - Foo f; - v.push_back(std::move(f)); - } - }; - t.test(&test::test_move_push_back, "TestPushBackMoveExceptionSafety"); -} - -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT -void TestEmplaceBackExceptionSafety(){ - typedef push_back_exception_safety_helpers::fixture<> fixture_t; - fixture_t t; - - Foo dummy; //make FooCount non zero; - Harness::suppress_unused_warning(dummy); - limit_foo_count_in_scope foo_limit(FooCount); - - struct test{ - static void test_emplace(fixture_t::vector_t& v){ - v.emplace_back(1,2); - } - }; - t.test(&test::test_emplace, "TestEmplaceBackExceptionSafety"); -} -#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - -#endif /* TBB_USE_EXCEPTIONS */ - -//------------------------------------------------------------------------ -// Test support for SIMD instructions -//------------------------------------------------------------------------ -#include "harness_m128.h" - -#if HAVE_m128 || HAVE_m256 - -template<typename ClassWithVectorType> -void TestVectorTypes() { - tbb::concurrent_vector<ClassWithVectorType> v; - for( int i=0; i<100; ++i ) { - // VC8 does not properly align a temporary value; to work around, use explicit variable - ClassWithVectorType foo(i); - v.push_back(foo); - for( int j=0; j<i; ++j ) { - ClassWithVectorType bar(j); - ASSERT( v[j]==bar, NULL ); - } - } -} -#endif /* HAVE_m128 | HAVE_m256 */ - -//------------------------------------------------------------------------ - -namespace v3_backward_compatibility{ - namespace segment_t_layout_helpers{ - //this is previous definition of according inner class of concurrent_vector_base_v3 - struct segment_t_v3 { - void* array; - }; - //helper class to access protected members of concurrent_vector_base - struct access_vector_fields :tbb::internal::concurrent_vector_base_v3 { - using tbb::internal::concurrent_vector_base_v3::segment_t; - using tbb::internal::concurrent_vector_base_v3::segment_index_t; - using tbb::internal::concurrent_vector_base_v3::pointers_per_long_table; - using tbb::internal::concurrent_vector_base_v3::internal_segments_table; - }; - //this is previous definition of according inner class of concurrent_vector_base_v3 - struct internal_segments_table_v3 { - access_vector_fields::segment_index_t first_block; - segment_t_v3 table[access_vector_fields::pointers_per_long_table]; - }; - - template <typename checked_type> - struct alignment_check_helper{ - char dummy; - checked_type checked; - }; - } - void TestSegmentTLayout(){ - using namespace segment_t_layout_helpers; - typedef alignment_check_helper<segment_t_v3> structure_with_old_segment_type; - typedef alignment_check_helper<access_vector_fields::segment_t> structure_with_new_segment_type; - - ASSERT((sizeof(structure_with_old_segment_type)==sizeof(structure_with_new_segment_type)) - ,"layout of new segment_t and old one differ?"); - } - - void TestInternalSegmentsTableLayout(){ - using namespace segment_t_layout_helpers; - typedef alignment_check_helper<internal_segments_table_v3> structure_with_old_segment_table_type; - typedef alignment_check_helper<access_vector_fields::internal_segments_table> structure_with_new_segment_table_type; - - ASSERT((sizeof(structure_with_old_segment_table_type)==sizeof(structure_with_new_segment_table_type)) - ,"layout of new internal_segments_table and old one differ?"); - } -} -void TestV3BackwardCompatibility(){ - using namespace v3_backward_compatibility; - TestSegmentTLayout(); - TestInternalSegmentsTableLayout(); -} - -#include "harness_defs.h" - -#include <vector> -#include <numeric> -#include <functional> - -// The helper to run a test only when a default construction is present. -template <bool default_construction_present> struct do_default_construction_test { - template<typename FuncType> void operator() ( FuncType func ) const { func(); } -}; -template <> struct do_default_construction_test<false> { - template<typename FuncType> void operator()( FuncType ) const {} -}; - -template <typename Type, typename Allocator> -class test_grow_by_and_resize : NoAssign { - tbb::concurrent_vector<Type, Allocator> &my_c; -public: - test_grow_by_and_resize( tbb::concurrent_vector<Type, Allocator> &c ) : my_c(c) {} - void operator()() const { - const typename tbb::concurrent_vector<Type, Allocator>::size_type sz = my_c.size(); - my_c.grow_by( 5 ); - ASSERT( my_c.size() == sz + 5, NULL ); - my_c.resize( sz ); - ASSERT( my_c.size() == sz, NULL ); - } -}; - -template <typename Type, typename Allocator> -void CompareVectors( const tbb::concurrent_vector<Type, Allocator> &c1, const tbb::concurrent_vector<Type, Allocator> &c2 ) { - ASSERT( !(c1 == c2) && c1 != c2, NULL ); - ASSERT( c1 <= c2 && c1 < c2 && c2 >= c1 && c2 > c1, NULL ); -} - -#if __TBB_CPP11_SMART_POINTERS_PRESENT -template <typename Type, typename Allocator> -void CompareVectors( const tbb::concurrent_vector<std::weak_ptr<Type>, Allocator> &, const tbb::concurrent_vector<std::weak_ptr<Type>, Allocator> & ) { - /* do nothing for std::weak_ptr */ -} -#endif /* __TBB_CPP11_SMART_POINTERS_PRESENT */ - -template <bool default_construction_present, typename Type, typename Allocator> -void Examine( tbb::concurrent_vector<Type, Allocator> c, const std::vector<Type> &vec ) { - typedef tbb::concurrent_vector<Type, Allocator> vector_t; - typedef typename vector_t::size_type size_type_t; - - ASSERT( c.size() == vec.size(), NULL ); - for ( size_type_t i=0; i<c.size(); ++i ) ASSERT( Harness::IsEqual()(c[i], vec[i]), NULL ); - do_default_construction_test<default_construction_present>()(test_grow_by_and_resize<Type,Allocator>(c)); - c.grow_by( size_type_t(5), c[0] ); - c.grow_to_at_least( c.size()+5, c.at(0) ); - vector_t c2; - c2.reserve( 5 ); - std::copy( c.begin(), c.begin() + 5, std::back_inserter( c2 ) ); - - c.grow_by( c2.begin(), c2.end() ); - ASSERT( Harness::IsEqual()(c.front(), *(c2.rend()-1)), NULL ); - ASSERT( Harness::IsEqual()(c.back(), *c2.rbegin()), NULL); - ASSERT( Harness::IsEqual()(*c.cbegin(), *(c.crend()-1)), NULL ); - ASSERT( Harness::IsEqual()(*(c.cend()-1), *c.crbegin()), NULL ); - c.swap( c2 ); - ASSERT( c.size() == 5, NULL ); - CompareVectors( c, c2 ); - c.swap( c2 ); - c2.clear(); - ASSERT( c2.size() == 0, NULL ); - c2.shrink_to_fit(); - Allocator a = c.get_allocator(); - a.deallocate( a.allocate(1), 1 ); -} - -template <typename Type> -class test_default_construction : NoAssign { - const std::vector<Type> &my_vec; -public: - test_default_construction( const std::vector<Type> &vec ) : my_vec(vec) {} - void operator()() const { - // Construction with initial size specified by argument n. - tbb::concurrent_vector<Type> c7( my_vec.size() ); - std::copy( my_vec.begin(), my_vec.end(), c7.begin() ); - Examine</*default_construction_present = */true>( c7, my_vec ); - tbb::concurrent_vector< Type, debug_allocator<Type> > c8( my_vec.size() ); - std::copy( c7.begin(), c7.end(), c8.begin() ); - Examine</*default_construction_present = */true>( c8, my_vec ); - } -}; - -template <bool default_construction_present, typename Type> -void TypeTester( const std::vector<Type> &vec ) { - __TBB_ASSERT( vec.size() >= 5, "Array should have at least 5 elements" ); - // Construct empty vector. - tbb::concurrent_vector<Type> c1; - std::copy( vec.begin(), vec.end(), std::back_inserter(c1) ); - Examine<default_construction_present>( c1, vec ); -#if __TBB_INITIALIZER_LISTS_PRESENT - // Constructor from initializer_list. - tbb::concurrent_vector<Type> c2({vec[0],vec[1],vec[2]}); - std::copy( vec.begin()+3, vec.end(), std::back_inserter(c2) ); - Examine<default_construction_present>( c2, vec ); -#endif - // Copying constructor. - tbb::concurrent_vector<Type> c3(c1); - Examine<default_construction_present>( c3, vec ); - // Construct with non-default allocator - tbb::concurrent_vector< Type, debug_allocator<Type> > c4; - std::copy( vec.begin(), vec.end(), std::back_inserter(c4) ); - Examine<default_construction_present>( c4, vec ); - // Copying constructor for vector with different allocator type. - tbb::concurrent_vector<Type> c5(c4); - Examine<default_construction_present>( c5, vec ); - tbb::concurrent_vector< Type, debug_allocator<Type> > c6(c3); - Examine<default_construction_present>( c6, vec ); - // Construction with initial size specified by argument n. - do_default_construction_test<default_construction_present>()(test_default_construction<Type>(vec)); - // Construction with initial size specified by argument n, initialization by copying of t, and given allocator instance. - debug_allocator<Type> allocator; - tbb::concurrent_vector< Type, debug_allocator<Type> > c9(vec.size(), vec[1], allocator); - Examine<default_construction_present>( c9, std::vector<Type>(vec.size(), vec[1]) ); - // Construction with copying iteration range and given allocator instance. - tbb::concurrent_vector< Type, debug_allocator<Type> > c10(c1.begin(), c1.end(), allocator); - Examine<default_construction_present>( c10, vec ); - tbb::concurrent_vector<Type> c11(vec.begin(), vec.end()); - Examine<default_construction_present>( c11, vec ); -} - -void TestTypes() { - const int NUMBER = 100; - - std::vector<int> intArr; - for ( int i=0; i<NUMBER; ++i ) intArr.push_back(i); - TypeTester</*default_construction_present = */true>( intArr ); - -#if __TBB_CPP11_REFERENCE_WRAPPER_PRESENT - std::vector< std::reference_wrapper<int> > refArr; - // The constructor of std::reference_wrapper<T> from T& is explicit in some versions of libstdc++. - for ( int i=0; i<NUMBER; ++i ) refArr.push_back( std::reference_wrapper<int>(intArr[i]) ); - TypeTester</*default_construction_present = */false>( refArr ); -#else - REPORT( "Known issue: C++11 reference wrapper tests are skipped.\n" ); -#endif /* __TBB_CPP11_REFERENCE_WRAPPER_PRESENT */ - - std::vector< tbb::atomic<int> > tbbIntArr( NUMBER ); - for ( int i=0; i<NUMBER; ++i ) tbbIntArr[i] = i; - TypeTester</*default_construction_present = */true>( tbbIntArr ); - -#if __TBB_CPP11_SMART_POINTERS_PRESENT - std::vector< std::shared_ptr<int> > shrPtrArr; - for ( int i=0; i<NUMBER; ++i ) shrPtrArr.push_back( std::make_shared<int>(i) ); - TypeTester</*default_construction_present = */true>( shrPtrArr ); - - std::vector< std::weak_ptr<int> > wkPtrArr; - std::copy( shrPtrArr.begin(), shrPtrArr.end(), std::back_inserter(wkPtrArr) ); - TypeTester</*default_construction_present = */true>( wkPtrArr ); -#else - REPORT( "Known issue: C++11 smart pointer tests are skipped.\n" ); -#endif /* __TBB_CPP11_SMART_POINTERS_PRESENT */ -} - -int TestMain () { - if( MinThread<1 ) { - REPORT("ERROR: MinThread=%d, but must be at least 1\n",MinThread); MinThread = 1; - } - TestFoo(); - TestV3BackwardCompatibility(); - TestIteratorTraits<tbb::concurrent_vector<Foo>::iterator,Foo>(); - TestIteratorTraits<tbb::concurrent_vector<Foo>::const_iterator,const Foo>(); - TestArrayLength(); - TestAllOf(); -#if __TBB_INITIALIZER_LISTS_PRESENT - TestInitList(); -#else - REPORT("Known issue: initializer list tests are skipped.\n"); -#endif - TestSequentialFor<FooWithAssign> (); - TestResizeAndCopy(); - TestAssign(); -#if __TBB_CPP11_RVALUE_REF_PRESENT - TestMoveConstructor<c_vector_type>(); - TestMoveAssignOperator<c_vector_type>(); - TestConstructorWithMoveIterators<c_vector_type>(); - TestAssignWithMoveIterators<c_vector_type>(); - TestSerialGrowByWithMoveIterators(); -#else - REPORT("Known issue: tests for vector move constructor/assignment operator are skipped.\n"); -#endif - TestGrowToAtLeastWithSourceParameter<tbb::concurrent_vector<int> >(12345); - TestSerialGrowByRange(false); - TestSerialGrowByRange(true); -#if __TBB_CPP11_RVALUE_REF_PRESENT - TestPushBackMoveOnlyContainee(); -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT - TestEmplaceBack(); -#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT -#endif //__TBB_CPP11_RVALUE_REF_PRESENT -#if HAVE_m128 - TestVectorTypes<ClassWithSSE>(); -#endif -#if HAVE_m256 - if (have_AVX()) TestVectorTypes<ClassWithAVX>(); -#endif - TestCapacity(); - ASSERT( !FooCount, NULL ); - for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) { - tbb::task_scheduler_init init( nthread ); - TestParallelFor( nthread ); - TestConcurrentGrowToAtLeast(); - TestConcurrentGrowBy( nthread ); - } - ASSERT( !FooCount, NULL ); - TestComparison(); - TestFindPrimes(); - TestSort(); -#if __TBB_RANGE_BASED_FOR_PRESENT - TestRangeBasedFor(); -#endif //if __TBB_RANGE_BASED_FOR_PRESENT -#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN - REPORT("Known issue: exception safety test is skipped.\n"); -#elif TBB_USE_EXCEPTIONS - TestExceptions(); - TestExceptionSafetyGuaranteesForAssignOperator(); -#if __TBB_CPP11_RVALUE_REF_PRESENT - TestExceptionSafetyGuaranteesMoveConstructorWithUnEqualAllocatorMemoryFailure<c_vector_type>(); - TestExceptionSafetyGuaranteesMoveConstructorWithUnEqualAllocatorExceptionInElementCtor<c_vector_type>(); - TestExceptionSafetyGuaranteesForMoveAssignOperatorWithUnEqualAllocatorMemoryFailure(); - TestExceptionSafetyGuaranteesForMoveAssignOperatorWithUnEqualAllocatorExceptionInElementCtor(); - TestPushBackMoveExceptionSafety(); -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT - TestEmplaceBackExceptionSafety(); -#endif /*__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */ -#else - REPORT("Known issue: exception safety tests for move constructor/assignment operator , grow_by are skipped.\n"); -#endif /*__TBB_CPP11_RVALUE_REF_PRESENT */ -#endif /* TBB_USE_EXCEPTIONS */ - TestTypes(); - ASSERT( !FooCount, NULL ); - REMARK("sizeof(concurrent_vector<int>) == %d\n", (int)sizeof(tbb::concurrent_vector<int>)); - return Harness::Done; -} - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4800 is back diff --git a/src/tbb/src/test/test_condition_variable.h b/src/tbb/src/test/test_condition_variable.h deleted file mode 100644 index 58d2554fa..000000000 --- a/src/tbb/src/test/test_condition_variable.h +++ /dev/null @@ -1,700 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include "tbb/compat/condition_variable" -#include "tbb/mutex.h" -#include "tbb/recursive_mutex.h" -#include "tbb/tick_count.h" -#include "tbb/atomic.h" - -#include <stdexcept> - -#include "harness.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - - -// This test deliberately avoids a "using tbb" statement, -// so that the error of putting types in the wrong namespace will be caught. - -using namespace std; - -template<typename M> -struct Counter { - typedef M mutex_type; - M mutex; - volatile long value; - void flog_once_lock_guard( size_t mode ); - void flog_once_unique_lock( size_t mode ); -}; - -template<typename M> -void Counter<M>::flog_once_lock_guard(size_t mode) -/** Increments counter once for each iteration in the iteration space. */ -{ - if( mode&1 ) { - // Try acquire and release with implicit lock_guard - // precondition: if mutex_type is not a recursive mutex, the calling thread does not own the mutex m. - // if the precondition is not met, either dead-lock incorrect 'value' would result in. - lock_guard<M> lg(mutex); - value = value+1; - } else { - // Try acquire and release with adopt lock_quard - // precodition: the calling thread owns the mutex m. - // if the precondition is not met, incorrect 'value' would result in because the thread unlocks - // mutex that it does not own. - mutex.lock(); - lock_guard<M> lg( mutex, adopt_lock ); - value = value+1; - } -} - -template<typename M> -void Counter<M>::flog_once_unique_lock(size_t mode) -/** Increments counter once for each iteration in the iteration space. */ -{ - switch( mode&7 ) { - case 0: - {// implicitly acquire and release mutex with unique_lock - unique_lock<M> ul( mutex ); - value = value+1; - ASSERT( ul==true, NULL ); - } - break; - case 1: - {// unique_lock with defer_lock - unique_lock<M> ul( mutex, defer_lock ); - ASSERT( ul.owns_lock()==false, NULL ); - ul.lock(); - value = value+1; - ASSERT( ul.owns_lock()==true, NULL ); - } - break; - case 2: - {// unique_lock::try_lock() with try_to_lock - unique_lock<M> ul( mutex, try_to_lock ); - if( !ul ) - while( !ul.try_lock() ) - __TBB_Yield(); - value = value+1; - } - break; - case 3: - {// unique_lock::try_lock_for() with try_to_lock - unique_lock<M> ul( mutex, defer_lock ); - tbb::tick_count::interval_t i(1.0); - while( !ul.try_lock_for( i ) ) - ; - value = value+1; - ASSERT( ul.owns_lock()==true, NULL ); - } - break; - case 4: - { - unique_lock<M> ul_o4; - {// unique_lock with adopt_lock - mutex.lock(); - unique_lock<M> ul( mutex, adopt_lock ); - value = value+1; - ASSERT( ul.owns_lock()==true, NULL ); - ASSERT( ul.mutex()==&mutex, NULL ); - ASSERT( ul_o4.owns_lock()==false, NULL ); - ASSERT( ul_o4.mutex()==NULL, NULL ); - swap( ul, ul_o4 ); - ASSERT( ul.owns_lock()==false, NULL ); - ASSERT( ul.mutex()==NULL, NULL ); - ASSERT( ul_o4.owns_lock()==true, NULL ); - ASSERT( ul_o4.mutex()==&mutex, NULL ); - ul_o4.unlock(); - } - ASSERT( ul_o4.owns_lock()==false, NULL ); - } - break; - case 5: - { - unique_lock<M> ul_o5; - {// unique_lock with adopt_lock - mutex.lock(); - unique_lock<M> ul( mutex, adopt_lock ); - value = value+1; - ASSERT( ul.owns_lock()==true, NULL ); - ASSERT( ul.mutex()==&mutex, NULL ); - ASSERT( ul_o5.owns_lock()==false, NULL ); - ASSERT( ul_o5.mutex()==NULL, NULL ); - ul_o5.swap( ul ); - ASSERT( ul.owns_lock()==false, NULL ); - ASSERT( ul.mutex()==NULL, NULL ); - ASSERT( ul_o5.owns_lock()==true, NULL ); - ASSERT( ul_o5.mutex()==&mutex, NULL ); - ul_o5.unlock(); - } - ASSERT( ul_o5.owns_lock()==false, NULL ); - } - break; - default: - {// unique_lock with adopt_lock, and release() - mutex.lock(); - unique_lock<M> ul( mutex, adopt_lock ); - ASSERT( ul==true, NULL ); - value = value+1; - M* old_m = ul.release(); - old_m->unlock(); - ASSERT( ul.owns_lock()==false, NULL ); - } - break; - } -} - -static tbb::atomic<size_t> Order; - -template<typename State, long TestSize> -struct WorkForLocks: NoAssign { - static const size_t chunk = 100; - State& state; - WorkForLocks( State& state_ ) : state(state_) {} - void operator()( int ) const { - size_t step; - while( (step=Order.fetch_and_add<tbb::acquire>(chunk))<TestSize ) { - for( size_t i=0; i<chunk && step<TestSize; ++i, ++step ) { - state.flog_once_lock_guard(step); - state.flog_once_unique_lock(step); - } - } - } -}; - -template<typename M> -void TestLocks( const char* name, int nthread ) { - REMARK("testing %s in TestLocks\n",name); - Counter<M> counter; - counter.value = 0; - Order = 0; - // use the macro because of a gcc 4.6 bug -#define TEST_SIZE 100000 - NativeParallelFor( nthread, WorkForLocks<Counter<M>, TEST_SIZE>(counter) ); - - if( counter.value!=2*TEST_SIZE ) - REPORT("ERROR for %s in TestLocks: counter.value=%ld != 2 * %ld=test_size\n",name,counter.value,TEST_SIZE); -#undef TEST_SIZE -} - -static tbb::atomic<int> barrier; - -// Test if the constructor works and if native_handle() works -template<typename M> -struct WorkForCondVarCtor: NoAssign { - condition_variable& my_cv; - M& my_mtx; - WorkForCondVarCtor( condition_variable& cv_, M& mtx_ ) : my_cv(cv_), my_mtx(mtx_) {} - void operator()( int tid ) const { - ASSERT( tid<=1, NULL ); // test with 2 threads. - condition_variable::native_handle_type handle = my_cv.native_handle(); - if( tid&1 ) { - my_mtx.lock(); - ++barrier; -#if _WIN32||_WIN64 - if( !tbb::interface5::internal::internal_condition_variable_wait( *handle, &my_mtx ) ) { - int ec = GetLastError(); - ASSERT( ec!=WAIT_TIMEOUT, NULL ); - throw_exception( tbb::internal::eid_condvar_wait_failed ); - } -#else - if( pthread_cond_wait( handle, my_mtx.native_handle() ) ) - throw_exception( tbb::internal::eid_condvar_wait_failed ); -#endif - ++barrier; - my_mtx.unlock(); - } else { - bool res; - while( (res=my_mtx.try_lock())==true && barrier==0 ) { - my_mtx.unlock(); - __TBB_Yield(); - } - if( res ) my_mtx.unlock(); - do { -#if _WIN32||_WIN64 - tbb::interface5::internal::internal_condition_variable_notify_one( *handle ); -#else - pthread_cond_signal( handle ); -#endif - __TBB_Yield(); - } while ( barrier<2 ); - } - } -}; - -static condition_variable* test_cv; -static tbb::atomic<int> n_waiters; - -// Test if the destructor works -template<typename M> -struct WorkForCondVarDtor: NoAssign { - int nthread; - M& my_mtx; - WorkForCondVarDtor( int n, M& mtx_ ) : nthread(n), my_mtx(mtx_) {} - void operator()( int tid ) const { - if( tid==0 ) { - unique_lock<M> ul( my_mtx, defer_lock ); - test_cv = new condition_variable; - - while( n_waiters<nthread-1 ) - __TBB_Yield(); - ul.lock(); - test_cv->notify_all(); - ul.unlock(); - while( n_waiters>0 ) - __TBB_Yield(); - delete test_cv; - } else { - while( test_cv==NULL ) - __TBB_Yield(); - unique_lock<M> ul(my_mtx); - ++n_waiters; - test_cv->wait( ul ); - --n_waiters; - } - } -}; - -static const int max_ticket = 100; -static const int short_delay = 10; -static const int long_delay = 100; - -tbb::atomic<int> n_signaled; -tbb::atomic<int> n_done, n_done_1, n_done_2; -tbb::atomic<int> n_timed_out; - -static bool false_to_true; - -struct TestPredicateFalseToTrue { - TestPredicateFalseToTrue() {} - bool operator()() { return false_to_true; } -}; - -struct TestPredicateFalse { - TestPredicateFalse() {} - bool operator()() { return false; } -}; - -struct TestPredicateTrue { - TestPredicateTrue() {} - bool operator()() { return true; } -}; - -// Test timed wait and timed wait with pred -template<typename M> -struct WorkForCondVarTimedWait: NoAssign { - int nthread; - condition_variable& test_cv; - M& my_mtx; - WorkForCondVarTimedWait( int n_, condition_variable& cv_, M& mtx_ ) : nthread(n_), test_cv(cv_), my_mtx(mtx_) {} - void operator()( int tid ) const { - tbb::tick_count t1, t2; - - unique_lock<M> ul( my_mtx, defer_lock ); - - ASSERT( n_timed_out==0, NULL ); - ++barrier; - while( barrier<nthread ) __TBB_Yield(); - - // test if a thread times out with wait_for() - for( int i=1; i<10; ++i ) { - tbb::tick_count::interval_t intv((double)i*0.0999 /*seconds*/); - ul.lock(); - cv_status st = no_timeout; - __TBB_TRY { - /** Some version of glibc return EINVAL instead 0 when spurious wakeup occurs on pthread_cond_timedwait() **/ - st = test_cv.wait_for( ul, intv ); - } __TBB_CATCH( std::runtime_error& ) {} - ASSERT( ul, "mutex should have been reacquired" ); - ul.unlock(); - if( st==timeout ) - ++n_timed_out; - } - - ASSERT( n_timed_out>0, "should have been timed-out at least once\n" ); - ++n_done_1; - while( n_done_1<nthread ) __TBB_Yield(); - - for( int i=1; i<10; ++i ) { - tbb::tick_count::interval_t intv((double)i*0.0001 /*seconds*/); - ul.lock(); - __TBB_TRY { - /** Some version of glibc return EINVAL instead 0 when spurious wakeup occurs on pthread_cond_timedwait() **/ - ASSERT( false==test_cv.wait_for( ul, intv, TestPredicateFalse()), "incorrect return value" ); - } __TBB_CATCH( std::runtime_error& ) {} - ASSERT( ul, "mutex should have been reacquired" ); - ul.unlock(); - } - - if( tid==0 ) - n_waiters = 0; - // barrier - ++n_done_2; - while( n_done_2<nthread ) __TBB_Yield(); - - // at this point, we know wait_for() successfully times out. - // so test if a thread blocked on wait_for() could receive a signal before its waiting time elapses. - if( tid==0 ) { - // signaler - n_signaled = 0; - ASSERT( n_waiters==0, NULL ); - ++n_done_2; // open gate 1 - - while( n_waiters<(nthread-1) ) __TBB_Yield(); // wait until all other threads block on cv. flag_1 - - ul.lock(); - test_cv.notify_all(); - n_waiters = 0; - ul.unlock(); - - while( n_done_2<2*nthread ) __TBB_Yield(); - ASSERT( n_signaled>0, "too small an interval?" ); - n_signaled = 0; - - } else { - while( n_done_2<nthread+1 ) __TBB_Yield(); // gate 1 - - // sleeper - tbb::tick_count::interval_t intv((double)2.0 /*seconds*/); - ul.lock(); - ++n_waiters; // raise flag 1/(nthread-1) - t1 = tbb::tick_count::now(); - cv_status st = test_cv.wait_for( ul, intv ); // gate 2 - t2 = tbb::tick_count::now(); - ul.unlock(); - if( st==no_timeout ) { - ++n_signaled; - ASSERT( (t2-t1).seconds()<intv.seconds(), "got a signal after timed-out?" ); - } - } - - ASSERT( n_done==0, NULL ); - ++n_done_2; - - if( tid==0 ) { - ASSERT( n_waiters==0, NULL ); - ++n_done; // open gate 3 - - while( n_waiters<(nthread-1) ) __TBB_Yield(); // wait until all other threads block on cv. - for( int i=0; i<2*short_delay; ++i ) __TBB_Yield(); // give some time to waiters so that all of them in the waitq - ul.lock(); - false_to_true = true; - test_cv.notify_all(); // open gate 4 - ul.unlock(); - - while( n_done<nthread ) __TBB_Yield(); // wait until all other threads wake up. - ASSERT( n_signaled>0, "too small an interval?" ); - } else { - - while( n_done<1 ) __TBB_Yield(); // gate 3 - - tbb::tick_count::interval_t intv((double)2.0 /*seconds*/); - ul.lock(); - ++n_waiters; - // wait_for w/ predciate - t1 = tbb::tick_count::now(); - ASSERT( test_cv.wait_for( ul, intv, TestPredicateFalseToTrue())==true, NULL ); // gate 4 - t2 = tbb::tick_count::now(); - ul.unlock(); - if( (t2-t1).seconds()<intv.seconds() ) - ++n_signaled; - ++n_done; - } - } -}; - -tbb::atomic<int> ticket_for_sleep, ticket_for_wakeup, signaled_ticket, wokeup_ticket; -tbb::atomic<unsigned> n_visit_to_waitq; -unsigned max_waitq_length; - -template<typename M> -struct WorkForCondVarWaitAndNotifyOne: NoAssign { - int nthread; - condition_variable& test_cv; - M& my_mtx; - WorkForCondVarWaitAndNotifyOne( int n_, condition_variable& cv_, M& mtx_ ) : nthread(n_), test_cv(cv_), my_mtx(mtx_) {} - void operator()( int tid ) const { - if( tid&1 ) { - // exercise signal part - while( ticket_for_wakeup<max_ticket ) { - int my_ticket = ++ticket_for_wakeup; // atomically grab the next ticket - if( my_ticket>max_ticket ) - break; - - for( ;; ) { - unique_lock<M> ul( my_mtx, defer_lock ); - ul.lock(); - if( n_waiters>0 && my_ticket<=ticket_for_sleep && my_ticket==(wokeup_ticket+1) ) { - signaled_ticket = my_ticket; - test_cv.notify_one(); - ++n_signaled; - ul.unlock(); - break; - } - ul.unlock(); - __TBB_Yield(); - } - - // give waiters time to go to sleep. - for( int m=0; m<short_delay; ++m ) - __TBB_Yield(); - } - } else { - while( ticket_for_sleep<max_ticket ) { - unique_lock<M> ul( my_mtx, defer_lock ); - ul.lock(); - // exercise wait part - int my_ticket = ++ticket_for_sleep; // grab my ticket - if( my_ticket>max_ticket ) break; - - // each waiter should go to sleep at least once - unsigned nw = ++n_waiters; - for( ;; ) { - // update to max_waitq_length - if( nw>max_waitq_length ) max_waitq_length = nw; - ++n_visit_to_waitq; - test_cv.wait( ul ); - // if( ret==false ) ++n_timedout; - ASSERT( ul, "mutex should have been locked" ); - --n_waiters; - if( signaled_ticket==my_ticket ) { - wokeup_ticket = my_ticket; - break; - } - if( n_waiters>0 ) - test_cv.notify_one(); - nw = ++n_waiters; // update to max_waitq_length occurs above - } - - ul.unlock(); - __TBB_Yield(); // give other threads chance to run. - } - } - ++n_done; - spin_wait_until_eq( n_done, nthread ); - ASSERT( n_signaled==max_ticket, "incorrect number of notifications sent" ); - } -}; - -struct TestPredicate1 { - int target; - TestPredicate1( int i_ ) : target(i_) {} - bool operator()( ) { return signaled_ticket==target; } -}; - -template<typename M> -struct WorkForCondVarWaitPredAndNotifyAll: NoAssign { - int nthread; - condition_variable& test_cv; - M& my_mtx; - int multiple; - WorkForCondVarWaitPredAndNotifyAll( int n_, condition_variable& cv_, M& mtx_, int m_ ) : - nthread(n_), test_cv(cv_), my_mtx(mtx_), multiple(m_) {} - void operator()( int tid ) const { - if( tid&1 ) { - while( ticket_for_sleep<max_ticket ) { - unique_lock<M> ul( my_mtx, defer_lock ); - // exercise wait part - int my_ticket = ++ticket_for_sleep; // grab my ticket - if( my_ticket>max_ticket ) - break; - - ul.lock(); - ++n_visit_to_waitq; - unsigned nw = ++n_waiters; - if( nw>max_waitq_length ) max_waitq_length = nw; - test_cv.wait( ul, TestPredicate1( my_ticket ) ); - wokeup_ticket = my_ticket; - --n_waiters; - ASSERT( ul, "mutex should have been locked" ); - ul.unlock(); - - __TBB_Yield(); // give other threads chance to run. - } - } else { - // exercise signal part - while( ticket_for_wakeup<max_ticket ) { - int my_ticket = ++ticket_for_wakeup; // atomically grab the next ticket - if( my_ticket>max_ticket ) - break; - - for( ;; ) { - unique_lock<M> ul( my_mtx ); - if( n_waiters>0 && my_ticket<=ticket_for_sleep && my_ticket==(wokeup_ticket+1) ) { - signaled_ticket = my_ticket; - test_cv.notify_all(); - ++n_signaled; - ul.unlock(); - break; - } - ul.unlock(); - __TBB_Yield(); - } - - // give waiters time to go to sleep. - for( int m=0; m<long_delay*multiple; ++m ) - __TBB_Yield(); - } - } - ++n_done; - spin_wait_until_eq( n_done, nthread ); - ASSERT( n_signaled==max_ticket, "incorrect number of notifications sent" ); - } -}; - -void InitGlobalCounters() -{ - ticket_for_sleep = ticket_for_wakeup = signaled_ticket = wokeup_ticket = 0; - n_waiters = 0; - n_signaled = 0; - n_done = n_done_1 = n_done_2 = 0; - n_visit_to_waitq = 0; - n_timed_out = 0; -} - -template<typename M> -void TestConditionVariable( const char* name, int nthread ) -{ - REMARK("testing %s in TestConditionVariable\n",name); - Counter<M> counter; - M mtx; - - ASSERT( nthread>1, "at least two threads are needed for testing condition_variable" ); - REMARK(" - constructor\n" ); - // Test constructor. - { - condition_variable cv1; -#if _WIN32||_WIN64 - condition_variable::native_handle_type handle = cv1.native_handle(); - ASSERT( uintptr_t(&handle->cv_event)==uintptr_t(&handle->cv_native), NULL ); -#endif - M mtx1; - barrier = 0; - NativeParallelFor( 2, WorkForCondVarCtor<M>( cv1, mtx1 ) ); - } - - REMARK(" - destructor\n" ); - // Test destructor. - { - M mtx2; - test_cv = NULL; - n_waiters = 0; - NativeParallelFor( nthread, WorkForCondVarDtor<M>( nthread, mtx2 ) ); - } - - REMARK(" - timed_wait (i.e., wait_for)\n"); - // Test timed wait. - { - condition_variable cv_tw; - M mtx_tw; - barrier = 0; - InitGlobalCounters(); - int nthr = nthread>4?4:nthread; - NativeParallelFor( nthr, WorkForCondVarTimedWait<M>( nthr, cv_tw, mtx_tw ) ); - } - - REMARK(" - wait with notify_one\n"); - // Test wait and notify_one - do { - condition_variable cv3; - M mtx3; - InitGlobalCounters(); - NativeParallelFor( nthread, WorkForCondVarWaitAndNotifyOne<M>( nthread, cv3, mtx3 ) ); - } while( n_visit_to_waitq==0 || max_waitq_length==0 ); - - REMARK(" - predicated wait with notify_all\n"); - // Test wait_pred and notify_all - int delay_multiple = 1; - do { - condition_variable cv4; - M mtx4; - InitGlobalCounters(); - NativeParallelFor( nthread, WorkForCondVarWaitPredAndNotifyAll<M>( nthread, cv4, mtx4, delay_multiple ) ); - if( max_waitq_length<unsigned(nthread/2) ) - ++delay_multiple; - } while( n_visit_to_waitq<=0 || max_waitq_length<unsigned(nthread/2) ); -} - -#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN -static tbb::atomic<int> err_count; - -#define TRY_AND_CATCH_RUNTIME_ERROR(op,msg) \ - try { \ - op; \ - ++err_count; \ - } catch( std::runtime_error& e ) {ASSERT( strstr(e.what(), msg) , NULL );} catch(...) {++err_count;} - -template<typename M> -void TestUniqueLockException( const char * name ) { - REMARK("testing %s TestUniqueLockException\n",name); - M mtx; - unique_lock<M> ul_0; - err_count = 0; - - TRY_AND_CATCH_RUNTIME_ERROR( ul_0.lock(), "Operation not permitted" ); - TRY_AND_CATCH_RUNTIME_ERROR( ul_0.try_lock(), "Operation not permitted" ); - - unique_lock<M> ul_1( mtx ); - - TRY_AND_CATCH_RUNTIME_ERROR( ul_1.lock(), "Resource deadlock" ); - TRY_AND_CATCH_RUNTIME_ERROR( ul_1.try_lock(), "Resource deadlock" ); - - ul_1.unlock(); - TRY_AND_CATCH_RUNTIME_ERROR( ul_1.unlock(), "Operation not permitted" ); - - ASSERT( !err_count, "Some exceptions are not thrown or incorrect ones are thrown" ); -} - -template<typename M> -void TestConditionVariableException( const char * name ) { - REMARK("testing %s in TestConditionVariableException; yet to be implemented\n",name); -} -#endif /* TBB_USE_EXCEPTIONS */ - -template<typename Mutex, typename RecursiveMutex> -void DoCondVarTest() -{ - for( int p=MinThread; p<=MaxThread; ++p ) { - REMARK( "testing with %d threads\n", p ); - TestLocks<Mutex>( "mutex", p ); - TestLocks<RecursiveMutex>( "recursive_mutex", p ); - - if( p<=1 ) continue; - - // for testing condition_variable, at least one sleeper and one notifier are needed - TestConditionVariable<Mutex>( "mutex", p ); - } -#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN - REPORT("Known issue: exception handling tests are skipped.\n"); -#elif TBB_USE_EXCEPTIONS - TestUniqueLockException<Mutex>( "mutex" ); - TestUniqueLockException<RecursiveMutex>( "recursive_mutex" ); - TestConditionVariableException<Mutex>( "mutex" ); -#endif /* TBB_USE_EXCEPTIONS */ -} diff --git a/src/tbb/src/test/test_container_move_support.h b/src/tbb/src/test/test_container_move_support.h deleted file mode 100644 index 6c0b69daa..000000000 --- a/src/tbb/src/test/test_container_move_support.h +++ /dev/null @@ -1,832 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_test_container_move_support_H -#define __TBB_test_container_move_support_H - -#include "harness.h" -#include "harness_assert.h" -#include "harness_allocator.h" -#include "tbb/atomic.h" -#include "tbb/aligned_space.h" -#include <stdexcept> - -tbb::atomic<size_t> FooCount; -size_t MaxFooCount = 0; - -//! Problem size -const size_t N = 500000; - -//! Exception for concurrent_container -class Foo_exception : public std::bad_alloc { -public: - virtual const char *what() const throw() { return "out of Foo limit"; } - virtual ~Foo_exception() throw() {} -}; - -static const intptr_t initial_value_of_bar = 42; - -struct Foo { - intptr_t my_bar; -public: - enum State { - ZeroInitialized =0, - DefaultInitialized =0xDEFAUL, - DirectInitialized =0xD1111, - CopyInitialized =0xC0314, - MoveInitialized =0xAAAAA, - Assigned =0x11AED, - MoveAssigned =0x22AED, - MovedFrom =0xFFFFF, - Destroyed =0xDEADF00 - } state; - bool is_valid() const { - return state == DefaultInitialized || state == DirectInitialized || state == CopyInitialized - || state == MoveInitialized || state == Assigned || state == MoveAssigned || state == MovedFrom; - } - bool is_valid_or_zero() const { - return is_valid()||(state==ZeroInitialized && !my_bar); - } - intptr_t& zero_bar() { - ASSERT( is_valid_or_zero(), NULL ); - return my_bar; - } - intptr_t zero_bar() const { - ASSERT( is_valid_or_zero(), NULL ); - return my_bar; - } - intptr_t& bar() { - ASSERT( is_valid(), NULL ); - return my_bar; - } - intptr_t bar() const { - ASSERT( is_valid(), NULL ); - return my_bar; - } - operator intptr_t() const{ return this->bar();} - Foo( intptr_t barr ) { - my_bar = barr; - if(MaxFooCount && FooCount >= MaxFooCount) - __TBB_THROW( Foo_exception() ); - FooCount++; - state = DirectInitialized; - } - Foo() { - my_bar = initial_value_of_bar; - if(MaxFooCount && FooCount >= MaxFooCount) - __TBB_THROW( Foo_exception() ); - FooCount++; - state = DefaultInitialized; - } - Foo( const Foo& foo ) { - my_bar = foo.my_bar; - ASSERT( foo.is_valid_or_zero(), "bad source for copy" ); - if(MaxFooCount && FooCount >= MaxFooCount) - __TBB_THROW( Foo_exception() ); - FooCount++; - state = CopyInitialized; - } -#if __TBB_CPP11_RVALUE_REF_PRESENT - Foo( Foo&& foo ) { - my_bar = foo.my_bar; - ASSERT( foo.is_valid_or_zero(), "bad source for move" ); - if(MaxFooCount && FooCount >= MaxFooCount) - __TBB_THROW( Foo_exception() ); - FooCount++; - state = MoveInitialized; - foo.state = MovedFrom; - //TODO: consider not using constant here, instead something like ~my_bar - foo.my_bar = -1; - } -#endif - ~Foo() { - ASSERT( is_valid_or_zero(), NULL ); - my_bar = ~initial_value_of_bar; - if(state != ZeroInitialized) --FooCount; - state = Destroyed; - } - bool operator==(const Foo &f) const { - ASSERT( is_valid_or_zero(), "comparing invalid objects ?" ); - ASSERT( f.is_valid_or_zero(), "comparing invalid objects ?" ); - return my_bar == f.my_bar; - } - bool operator<(const Foo &f) const { - ASSERT( is_valid_or_zero(), "comparing invalid objects ?" ); - ASSERT( f.is_valid_or_zero(), "comparing invalid objects ?" ); - return my_bar < f.my_bar; - } - bool is_const() const {return true;} - bool is_const() {return false;} -protected: - char reserve[1]; - Foo& operator=( const Foo& x ) { - ASSERT( x.is_valid_or_zero(), "bad source for assignment" ); - ASSERT( is_valid_or_zero(), NULL ); - my_bar = x.my_bar; - state = Assigned; - return *this; - } -#if __TBB_CPP11_RVALUE_REF_PRESENT - Foo& operator=( Foo&& x ) { - ASSERT( x.is_valid_or_zero(), "bad source for assignment" ); - ASSERT( is_valid_or_zero(), NULL ); - my_bar = x.my_bar; - x.state = MovedFrom; - state = MoveAssigned; - x.my_bar = -1; - return *this; - } -#endif -}; - -struct FooWithAssign: public Foo { - FooWithAssign() : Foo(){} - FooWithAssign(intptr_t barr) : Foo(barr){} - FooWithAssign(FooWithAssign const& f) : Foo(f) {} - FooWithAssign& operator=(FooWithAssign const& f) { return static_cast<FooWithAssign&>(Foo::operator=(f)); } - - -#if __TBB_CPP11_RVALUE_REF_PRESENT - FooWithAssign(FooWithAssign && f) : Foo(std::move(f)) {} - FooWithAssign& operator=(FooWithAssign && f) { return static_cast<FooWithAssign&>(Foo::operator=(std::move(f))); } -#endif -}; - -template<typename FooIteratorType> -class FooIteratorBase { -protected: - intptr_t x_bar; -private: - FooIteratorType& as_derived(){ return *static_cast<FooIteratorType*>(this);} -public: - FooIteratorBase(intptr_t x) { - x_bar = x; - } - FooIteratorType &operator++() { - x_bar++; return as_derived(); - } - FooIteratorType operator++(int) { - FooIteratorType tmp(as_derived()); x_bar++; return tmp; - } - friend bool operator==(const FooIteratorType & lhs, const FooIteratorType & rhs) { return lhs.x_bar == rhs.x_bar; } - friend bool operator!=(const FooIteratorType & lhs, const FooIteratorType & rhs) { return !(lhs == rhs); } -}; - -class FooIterator: public std::iterator<std::input_iterator_tag,FooWithAssign>, public FooIteratorBase<FooIterator> { -public: - FooIterator(intptr_t x) : FooIteratorBase<FooIterator>(x) {} - - FooWithAssign operator*() { - return FooWithAssign(x_bar); - } -}; - -class FooPairIterator: public std::iterator<std::input_iterator_tag, std::pair<FooWithAssign,FooWithAssign> >, public FooIteratorBase<FooPairIterator> { -public: - FooPairIterator(intptr_t x) : FooIteratorBase<FooPairIterator>(x) {} - - std::pair<FooWithAssign,FooWithAssign> operator*() { - FooWithAssign foo; foo.bar() = x_bar; - - return std::make_pair(foo, foo); - } -}; - -namespace FooTests{ - template<typename Foo_type> - void TestDefaultConstructor(){ - Foo_type src; - ASSERT(src.state == Foo::DefaultInitialized, "incorrect state for default constructed Foo (derived) ?"); - } - - template<typename Foo_type> - void TestDirectConstructor(){ - Foo_type src(1); - ASSERT(src.state == Foo::DirectInitialized, "incorrect state for direct constructed Foo (derived) ?"); - } - -#if __TBB_CPP11_RVALUE_REF_PRESENT - template<typename Foo_type> - void TestMoveConstructor(){ - Foo_type src; - Foo_type dst(std::move(src)); - ASSERT(dst.state == Foo::MoveInitialized, "incorrect state for Move constructed Foo ?"); - ASSERT(src.state == Foo::MovedFrom, "incorrect state for Move from Foo ?"); - } - - template<typename Foo_type> - void TestMoveAssignOperator(){ - Foo_type src; - Foo_type dst; - dst = std::move(src); - - ASSERT(dst.state == Foo::MoveAssigned, "incorrect state for Move Assigned Foo ?"); - ASSERT(src.state == Foo::MovedFrom, "incorrect state for Moved from Foo ?"); - } - -#endif -} - -void TestFoo(){ - using namespace FooTests; - TestDefaultConstructor<Foo>(); - TestDefaultConstructor<FooWithAssign>(); - TestDirectConstructor<Foo>(); - TestDirectConstructor<FooWithAssign>(); -#if __TBB_CPP11_RVALUE_REF_PRESENT - TestMoveConstructor<Foo>(); - TestMoveConstructor<FooWithAssign>(); - TestMoveAssignOperator<FooWithAssign>(); -#endif -} - - -//TODO: replace _IN_TEST with separately defined macro IN_TEST(msg,test_name) -#define ASSERT_IN_TEST(p,message,test_name) ASSERT(p, (std::string(test_name) + ": " + message).c_str()); -//TODO: move to harness_assert -#define ASSERT_THROWS_IN_TEST(expression, exception_type, message, test_name) \ - try{ \ - expression; \ - ASSERT_IN_TEST(false, "should throw an exception", test_name); \ - }catch(exception_type &){ \ - }catch(...){ASSERT_IN_TEST(false, "unexpected exception", test_name);} \ - -#define ASSERT_THROWS(expression, exception_type, message) ASSERT_THROWS_IN_TEST(expression, exception_type, message, "") - -template<Foo::State desired_stated> -bool is_state(Foo const& f){ return f.state == desired_stated;} - -template<> -bool is_state<Foo::ZeroInitialized>(Foo const& f){ return f.state == Foo::ZeroInitialized && !f.zero_bar() ;} - -template<Foo::State desired_stated> -struct is_state_f { - bool operator()(Foo const& f){ return is_state<desired_stated>(f); } - //TODO: cu_map defines key as a const thus by default it is not moved, instead it is copied. Investigate how std::unordered_map behaves - bool operator()(std::pair<const FooWithAssign, FooWithAssign> const& p){ return /*is_state<desired_stated>(p.first) && */is_state<desired_stated>(p.second); } -}; - -template<typename iterator, typename unary_predicate> -bool all_of(iterator begin, iterator const& end, unary_predicate p){ - for (; begin != end; ++begin){ - if ( !p(*begin)) return false; - } - return true; -} - -template<typename container, typename unary_predicate> -bool all_of(container const& c, unary_predicate p){ - return ::all_of( c.begin(), c.end(), p ); -} - -void TestAllOf(){ - Foo foos[] = {Foo(), Foo(), Foo()}; - ASSERT(::all_of(foos, Harness::end(foos), &is_state<Foo::DefaultInitialized>), "all_of returned false while true expected"); - ASSERT(! ::all_of(foos, Harness::end(foos), &is_state<Foo::CopyInitialized>), "all_of returned true while false expected "); -} - -template<typename static_counter_allocator_type> -struct track_allocator_memory: NoCopy{ - typedef typename static_counter_allocator_type::counters_t counters_t; - - counters_t previous_state; - const char* const test_name; - track_allocator_memory(const char* a_test_name): test_name(a_test_name) { static_counter_allocator_type::init_counters(); } - ~track_allocator_memory(){verify_no_allocator_memory_leaks();} - - void verify_no_allocator_memory_leaks() const{ - ASSERT_IN_TEST( static_counter_allocator_type::items_allocated == static_counter_allocator_type::items_freed, "memory leak?", test_name ); - ASSERT_IN_TEST( static_counter_allocator_type::allocations == static_counter_allocator_type::frees, "memory leak?", test_name ); - } - void save_allocator_counters(){ previous_state = static_counter_allocator_type::counters(); } - void verify_no_more_than_x_memory_items_allocated(size_t expected_number_of_items_to_allocate){ - counters_t now = static_counter_allocator_type::counters(); - ASSERT_IN_TEST( (now.items_allocated - previous_state.items_allocated) <= expected_number_of_items_to_allocate, "More then excepted memory allocated ?", test_name ); - } -}; - -#include <vector> -template<int line_n> -struct track_foo_count: NoCopy{ - bool active; - size_t previous_state; - const char* const test_name; - track_foo_count(const char* a_test_name): active(true), previous_state(FooCount), test_name(a_test_name) { } - ~track_foo_count(){ - if (active){ - this->verify_no_undestroyed_foo_left_and_dismiss(); - } - } - - //TODO: ideally in most places this check should be replaced with "no foo created or destroyed" - //TODO: deactivation of the check seems like a hack - void verify_no_undestroyed_foo_left_and_dismiss() { - ASSERT_IN_TEST( FooCount == previous_state, "Some instances of Foo were not destroyed ?", test_name ); - active = false; - } -}; - -//TODO: inactive mode in these limiters is a temporary workaround for usage in exception type loop of TestException - -struct limit_foo_count_in_scope: NoCopy{ - size_t previous_state; - bool active; - limit_foo_count_in_scope(size_t new_limit, bool an_active = true) : previous_state(MaxFooCount), active(an_active) { - if (active){ - MaxFooCount = new_limit; - } - } - ~limit_foo_count_in_scope(){ - if (active) { - MaxFooCount = previous_state; - } - } -}; - -template<typename static_counter_allocator_type> -struct limit_allocated_items_in_scope: NoCopy{ - size_t previous_state; - bool active; - limit_allocated_items_in_scope(size_t new_limit, bool an_active = true) : previous_state(static_counter_allocator_type::max_items), active(an_active) { - if (active){ - static_counter_allocator_type::set_limits(new_limit); - } - } - ~limit_allocated_items_in_scope(){ - if (active) { - static_counter_allocator_type::set_limits(previous_state); - } - } -}; - -struct default_container_traits{ - template <typename container_type, typename iterator_type> - static container_type& construct_container(tbb::aligned_space<container_type> & storage, iterator_type begin, iterator_type end){ - new (storage.begin()) container_type(begin, end); - return *storage.begin(); - } - - template <typename container_type, typename iterator_type, typename allocator_type> - static container_type& construct_container(tbb::aligned_space<container_type> & storage, iterator_type begin, iterator_type end, allocator_type const& a){ - new (storage.begin()) container_type(begin, end, a); - return *storage.begin(); - } -}; - -#if __TBB_CPP11_RVALUE_REF_PRESENT -template<typename container_traits, typename allocator_t> -struct move_fixture : NoCopy{ - typedef typename container_traits:: template apply<FooWithAssign, allocator_t>::type container_t; - typedef typename container_traits::init_iterator_type init_iterator_type; - enum {default_container_size = 100}; - const size_t container_size; - tbb::aligned_space<container_t> source_storage; - container_t & source; - //check that location of _all_ elements of container under test is changed/unchanged - std::vector<void*> locations; - - ~move_fixture(){ - source_storage.begin()->~container_t(); - } - - const char* const test_name; - move_fixture(const char* a_test_name, size_t a_container_size = default_container_size ) - : container_size(a_container_size) - , source(container_traits::construct_container(source_storage, init_iterator_type(0), init_iterator_type(container_size))) - , locations(container_size) - , test_name(a_test_name) - { - init("move_fixture::move_fixture()"); - } - - move_fixture(const char* a_test_name, allocator_t const& a, size_t a_container_size = default_container_size) - : container_size(a_container_size) - , source(container_traits::construct_container(source_storage, init_iterator_type(0), init_iterator_type(container_size), a)) - , locations(container_size) - , test_name(a_test_name) - { - init("move_fixture::move_fixture(allocator_t const& a)"); - } - - void init(const std::string& ctor_name){ - verify_size(source, ctor_name.c_str()); - verify_content_equal_to_source(source, "did not properly initialized source? Or can not check container for equality with expected ?: " + ctor_name); - verify_size(locations, "move_fixture:init "); - for (typename container_t::iterator it = source.begin(); it != source.end(); ++it){locations[std::distance(source.begin(), it)] = & *it;} - } - - bool content_location_unchanged(container_t const& dst){ - struct is_same_location{ - static bool compare(typename container_t::value_type const& v, void* location){ return &v == location;} - }; - - return std::equal(dst.begin(), dst.end(), locations.begin(), &is_same_location::compare); - } - - bool content_location_changed(container_t const& dst){ - struct is_not_same_location{ - static bool compare(typename container_t::value_type const& v, void* location){ return &v != location;} - }; - - return std::equal(dst.begin(), dst.end(), locations.begin(), &is_not_same_location::compare); - } - - template<typename container_type> - void verify_size(container_type const& dst, const char* a_test_name){ - ASSERT_IN_TEST(container_size == dst.size(), "Did not construct all the elements or allocate enough memory?, while should ?", a_test_name); - } - - void verify_content_equal_to_source(container_t const& dst, const std::string& msg){ - ASSERT_IN_TEST( container_traits::equal(dst, init_iterator_type(0), init_iterator_type(container_size)), msg.c_str(), test_name); - } - - void verify_content_equal_to_source(container_t const& dst){ - verify_content_equal_to_source(dst, "content changed during move/copy ?"); - } - - void verify_content_equal_to_source(container_t const& dst, size_t number_of_constructed_items){ - ASSERT_IN_TEST(number_of_constructed_items <= dst.size(), "incorrect test expectation/input parameters?", test_name); - ASSERT_IN_TEST(std::equal(dst.begin(), dst.begin() + number_of_constructed_items, init_iterator_type(0)), "content changed during move/copy ?", test_name); - } - - //TODO: better name ? e.g. "content_was_stolen" - void verify_content_shallow_moved(container_t const& dst){ - verify_size(dst, test_name); - ASSERT_IN_TEST(content_location_unchanged(dst), "container move constructor actually changed element locations, while should not", test_name); - ASSERT_IN_TEST(source.empty(), "Moved from container instance should not contain any elements", test_name); - verify_content_equal_to_source(dst); - } - - //TODO: better name ? e.g. "element move" - void verify_content_deep_moved(container_t const& dst){ - verify_size(dst, test_name); - ASSERT_IN_TEST(content_location_changed(dst), "container actually did not changed element locations for unequal allocators, while should", test_name); - ASSERT_IN_TEST(all_of(dst, is_state_f<Foo::MoveInitialized>()), "container did not move construct some elements?", test_name); - ASSERT_IN_TEST(all_of(source, is_state_f<Foo::MovedFrom>()), "container did not move all the elements?", test_name); - verify_content_equal_to_source(dst); - } - - void verify_part_of_content_deep_moved(container_t const& dst, size_t number_of_constructed_items){ - ASSERT_IN_TEST(content_location_changed(dst), "Vector actually did not changed element locations for unequal allocators, while should", test_name); - ASSERT_IN_TEST(::all_of(dst.begin(), dst.begin() + number_of_constructed_items, &is_state<Foo::MoveInitialized>), "Vector did not move construct some elements?", test_name); - if (dst.size() != number_of_constructed_items) { - ASSERT_IN_TEST(::all_of(dst.begin() + number_of_constructed_items, dst.end(), &is_state<Foo::ZeroInitialized> ), "Failed to zero-initialize items left not constructed after the exception?", test_name ); - } - verify_content_equal_to_source(dst, number_of_constructed_items); - - ASSERT_IN_TEST(::all_of(source.begin(), source.begin() + number_of_constructed_items, &is_state<Foo::MovedFrom>), "Vector did not move all the elements?", test_name); - ASSERT_IN_TEST(::all_of(source.begin() + number_of_constructed_items, source.end(), std::not1(std::ptr_fun(&is_state<Foo::MovedFrom>))), "Vector changed elements in source after exception point?", test_name); - } -}; - - -template <typename T, typename pocma = Harness::false_type> -struct arena_allocator_fixture : NoCopy{ - typedef arena<T, pocma> allocator_t; - typedef typename allocator_t::arena_data_t arena_data_t; - - std::vector<tbb::aligned_space<T, 1> > storage; - arena_data_t arena_data; - allocator_t allocator; - - arena_allocator_fixture(size_t size_to_allocate) - : storage(size_to_allocate) - , arena_data((*storage.begin()).begin(), storage.size()) - , allocator(arena_data) - {} -}; - -//TODO: add ability to inject debug_allocator into stateful_allocator_fixture::allocator_t -template <typename T, typename pocma = Harness::false_type> -struct two_memory_arenas_fixture : NoCopy{ - typedef arena_allocator_fixture<T, pocma> arena_fixture_t; - typedef typename arena_fixture_t::allocator_t allocator_t; - - arena_fixture_t source_arena_fixture; - arena_fixture_t dst_arena_fixture; - - allocator_t& source_allocator; - allocator_t& dst_allocator; - - const char* test_name; - - two_memory_arenas_fixture(size_t size_to_allocate, const char* a_test_name) - : source_arena_fixture(size_to_allocate) - , dst_arena_fixture(size_to_allocate) - , source_allocator(source_arena_fixture.allocator) - , dst_allocator(dst_arena_fixture.allocator) - , test_name(a_test_name) - { - ASSERT_IN_TEST(&*source_arena_fixture.storage.begin() != &*dst_arena_fixture.storage.begin(), "source and destination arena instances should use different memory regions", test_name); - ASSERT_IN_TEST(source_allocator != dst_allocator, "arenas using different memory regions should not compare equal", test_name); - ASSERT_IN_TEST(pocma::value == tbb::internal::allocator_traits<allocator_t>::propagate_on_container_move_assignment::value, "This test require proper allocator_traits support", test_name); - - //Some ISO C++11 allocator requirements enforcement: - allocator_t source_allocator_copy(source_allocator), dst(dst_allocator); - allocator_t source_previous_state(source_allocator); - ASSERT_IN_TEST(source_previous_state == source_allocator, "Copy of allocator should compare equal to it's source", test_name); - dst = std::move(source_allocator_copy); - ASSERT_IN_TEST(dst == source_previous_state, "Move initialized instance of allocator should compare equal to it's source state before movement", test_name); - } - - void verify_allocator_was_moved(const allocator_t& result_allocator){ - //TODO: add assert that allocator move constructor/assignment operator was called - ASSERT_IN_TEST(result_allocator == source_allocator, "allocator was not moved ?", test_name); - ASSERT_IN_TEST(result_allocator != dst_allocator, "allocator was not moved ?", test_name); - } - -// template <typename any_allocator_t> -// void verify_allocator_was_moved(const any_allocator_t& ){} -}; - -template <typename pocma = Harness::false_type> -struct std_stateful_allocator : NoCopy { - typedef stateful_allocator<FooWithAssign, pocma> allocator_t; - - allocator_t source_allocator; - allocator_t dst_allocator; - - const char* test_name; - - std_stateful_allocator(size_t , const char* a_test_name) - : test_name(a_test_name) - {} - - template <typename any_allocator_t> - void verify_allocator_was_moved(const any_allocator_t& ){} - -}; - -template<typename container_traits, typename pocma = Harness::false_type> -struct default_stateful_fixture_make_helper{ -// typedef std_stateful_allocator<pocma> allocator_fixture_t; - typedef two_memory_arenas_fixture<FooWithAssign, pocma> allocator_fixture_t; - typedef static_shared_counting_allocator<Harness::int_to_type<__LINE__>, typename allocator_fixture_t::allocator_t, std::size_t> allocator_t; - - typedef move_fixture<container_traits, allocator_t> move_fixture_t; - typedef track_allocator_memory<allocator_t> no_leaks_t; - typedef track_foo_count<__LINE__> no_foo_leaks_in_fixture_t; - typedef track_foo_count<__LINE__> no_foo_leaks_in_test_t; - - struct default_stateful_fixture : no_leaks_t, private no_foo_leaks_in_fixture_t, allocator_fixture_t, move_fixture_t, no_foo_leaks_in_test_t { - - default_stateful_fixture(const char* a_test_name) - : no_leaks_t(a_test_name) - , no_foo_leaks_in_fixture_t(a_test_name) - //TODO: calculate needed size more accurately - //allocate twice more storage to handle case when copy constructor called instead of move one - , allocator_fixture_t(2*4 * move_fixture_t::default_container_size, a_test_name) - , move_fixture_t(a_test_name, allocator_fixture_t::source_allocator) - , no_foo_leaks_in_test_t(a_test_name) - { - no_leaks_t::save_allocator_counters(); - } - - void verify_no_more_than_x_memory_items_allocated(){ - no_leaks_t::verify_no_more_than_x_memory_items_allocated(container_traits::expected_number_of_items_to_allocate_for_steal_move); - } - using no_foo_leaks_in_test_t::verify_no_undestroyed_foo_left_and_dismiss; - typedef typename move_fixture_t::container_t::allocator_type allocator_t; - }; - - typedef default_stateful_fixture type; -}; - -template<typename container_traits> -void TestMoveConstructorSingleArgument(){ - typedef typename default_stateful_fixture_make_helper<container_traits>::type fixture_t; - typedef typename fixture_t::container_t container_t; - - fixture_t fixture("TestMoveConstructorSingleArgument"); - - container_t dst(std::move(fixture.source)); - - fixture.verify_content_shallow_moved(dst); - fixture.verify_allocator_was_moved(dst.get_allocator()); - fixture.verify_no_more_than_x_memory_items_allocated(); - fixture.verify_no_undestroyed_foo_left_and_dismiss(); -} - -template<typename container_traits> -void TestMoveConstructorWithEqualAllocator(){ - typedef typename default_stateful_fixture_make_helper<container_traits>::type fixture_t; - typedef typename fixture_t::container_t container_t; - - fixture_t fixture("TestMoveConstructorWithEqualAllocator"); - - container_t dst(std::move(fixture.source), fixture.source.get_allocator()); - - fixture.verify_content_shallow_moved(dst); - fixture.verify_no_more_than_x_memory_items_allocated(); - fixture.verify_no_undestroyed_foo_left_and_dismiss(); -} - -template<typename container_traits> -void TestMoveConstructorWithUnEqualAllocator(){ - typedef typename default_stateful_fixture_make_helper<container_traits>::type fixture_t; - typedef typename fixture_t::container_t container_t; - - fixture_t fixture("TestMoveConstructorWithUnEqualAllocator"); - - container_t dst(std::move(fixture.source), fixture.dst_allocator); - - fixture.verify_content_deep_moved(dst); -} - -template<typename container_traits> -void TestMoveConstructor(){ - TestMoveConstructorSingleArgument<container_traits>(); - TestMoveConstructorWithEqualAllocator<container_traits>(); - TestMoveConstructorWithUnEqualAllocator<container_traits>(); -} - -template<typename container_traits> -void TestMoveAssignOperatorPOCMAStateful(){ - typedef typename default_stateful_fixture_make_helper<container_traits, Harness::true_type>::type fixture_t; - typedef typename fixture_t::container_t container_t; - - fixture_t fixture("TestMoveAssignOperatorPOCMAStateful"); - - container_t dst(fixture.dst_allocator); - - fixture.save_allocator_counters(); - - dst = std::move(fixture.source); - - fixture.verify_content_shallow_moved(dst); - fixture.verify_allocator_was_moved(dst.get_allocator()); - fixture.verify_no_more_than_x_memory_items_allocated(); - fixture.verify_no_undestroyed_foo_left_and_dismiss(); -} - -template<typename container_traits> -void TestMoveAssignOperatorPOCMANonStateful(){ - typedef std::allocator<FooWithAssign> allocator_t; - - typedef move_fixture<container_traits, allocator_t> fixture_t; - typedef typename fixture_t::container_t container_t; - - fixture_t fixture("TestMoveAssignOperatorPOCMANonStateful"); - - ASSERT(fixture.source.get_allocator() == allocator_t(), "Incorrect test setup: allocator is stateful while should not?"); - - container_t dst; - dst = std::move(fixture.source); - - fixture.verify_content_shallow_moved(dst); - //TODO: add an assert that allocator was "moved" when POCMA is set -} - -template<typename container_traits> -void TestMoveAssignOperatorNotPOCMAWithUnEqualAllocator(){ - typedef typename default_stateful_fixture_make_helper<container_traits>::type fixture_t; - typedef typename fixture_t::container_t container_t; - - fixture_t fixture("TestMoveAssignOperatorNotPOCMAWithUnEqualAllocator"); - - container_t dst(fixture.dst_allocator); - dst = std::move(fixture.source); - - fixture.verify_content_deep_moved(dst); -} - -template<typename container_traits> -void TestMoveAssignOperatorNotPOCMAWithEqualAllocator(){ - typedef typename default_stateful_fixture_make_helper<container_traits, Harness::false_type>::type fixture_t; - typedef typename fixture_t::container_t container_t; - fixture_t fixture("TestMoveAssignOperatorNotPOCMAWithEqualAllocator"); - - container_t dst(fixture.source_allocator); - ASSERT(fixture.source.get_allocator() == dst.get_allocator(), "Incorrect test setup: allocators are not equal while should be?"); - - fixture.save_allocator_counters(); - - dst = std::move(fixture.source); - - fixture.verify_content_shallow_moved(dst); - fixture.verify_no_more_than_x_memory_items_allocated(); - fixture.verify_no_undestroyed_foo_left_and_dismiss(); -} - -template<typename container_traits> -void TestMoveAssignOperator(){ -#if __TBB_ALLOCATOR_TRAITS_PRESENT - TestMoveAssignOperatorPOCMANonStateful<container_traits>(); - TestMoveAssignOperatorPOCMAStateful<container_traits>(); -#endif - TestMoveAssignOperatorNotPOCMAWithUnEqualAllocator<container_traits>(); - TestMoveAssignOperatorNotPOCMAWithEqualAllocator<container_traits>(); -} - -template<typename container_traits> -void TestConstructorWithMoveIterators(){ - typedef typename default_stateful_fixture_make_helper<container_traits>::type fixture_t; - typedef typename fixture_t::container_t container_t; - - fixture_t fixture("TestConstructorWithMoveIterators"); - - container_t dst(std::make_move_iterator(fixture.source.begin()), std::make_move_iterator(fixture.source.end()), fixture.dst_allocator); - - fixture.verify_content_deep_moved(dst); -} - -template<typename container_traits> -void TestAssignWithMoveIterators(){ - typedef typename default_stateful_fixture_make_helper<container_traits>::type fixture_t; - typedef typename fixture_t::container_t container_t; - - fixture_t fixture("TestAssignWithMoveIterators"); - - container_t dst(fixture.dst_allocator); - dst.assign(std::make_move_iterator(fixture.source.begin()), std::make_move_iterator(fixture.source.end())); - - fixture.verify_content_deep_moved(dst); -} - -#if TBB_USE_EXCEPTIONS -template<typename container_traits> -void TestExceptionSafetyGuaranteesMoveConstructorWithUnEqualAllocatorMemoryFailure(){ - typedef typename default_stateful_fixture_make_helper<container_traits>::type fixture_t; - typedef typename fixture_t::container_t container_t; - typedef typename container_t::allocator_type allocator_t; - const char* test_name = "TestExceptionSafetyGuaranteesMoveConstructorWithUnEqualAllocatorMemoryFailure"; - fixture_t fixture(test_name); - - limit_allocated_items_in_scope<allocator_t> allocator_limit(allocator_t::items_allocated + fixture.container_size/4); - ASSERT_THROWS_IN_TEST(container_t dst(std::move(fixture.source), fixture.dst_allocator), std::bad_alloc, "", test_name); -} - -//TODO: add tests that verify that stealing move constructors/assign operators does not throw exceptions -template<typename container_traits> -void TestExceptionSafetyGuaranteesMoveConstructorWithUnEqualAllocatorExceptionInElementCtor(){ - typedef typename default_stateful_fixture_make_helper<container_traits>::type fixture_t; - typedef typename fixture_t::container_t container_t; - - const char* test_name = "TestExceptionSafetyGuaranteesMoveConstructorWithUnEqualAllocatorExceptionInElementCtor"; - fixture_t fixture(test_name); - - limit_foo_count_in_scope foo_limit(FooCount + fixture.container_size/4); - ASSERT_THROWS_IN_TEST(container_t dst(std::move(fixture.source), fixture.dst_allocator), std::bad_alloc, "", test_name); -} -#endif /* TBB_USE_EXCEPTIONS */ -#endif//__TBB_CPP11_RVALUE_REF_PRESENT - -namespace helper_stuff_tests { - void inline TestArena(){ - typedef int arena_element; - - arena_element arena_storage[10] = {0}; - typedef arena<arena_element> arena_t; - - arena_t::arena_data_t arena_data(arena_storage,Harness::array_length(arena_storage)); - arena_t a(arena_data); - - ASSERT(a.allocate(1) == arena_storage, ""); - ASSERT(a.allocate(2) == &arena_storage[1], ""); - ASSERT(a.allocate(2) == &arena_storage[2+1], ""); - } - - template<typename static_counting_allocator_type> - void inline TestStaticCountingAllocatorRebound(){ - static_counting_allocator_type::set_limits(1); - typedef typename static_counting_allocator_type:: template rebind<std::pair<int,int> >::other rebound_type; - ASSERT(rebound_type::max_items == static_counting_allocator_type::max_items, "rebound allocator should use the same limits"); - static_counting_allocator_type::set_limits(0); - } - - void inline TestStatefulAllocator(){ - stateful_allocator<int> a1,a2; - stateful_allocator<int> copy_of_a1(a1); - ASSERT(a1 != a2,"non_equal_allocator are designed to simulate stateful allocators"); - ASSERT(copy_of_a1 == a1,""); - } -} -struct TestHelperStuff{ - TestHelperStuff(){ - using namespace helper_stuff_tests; - TestFoo(); - TestAllOf(); - TestArena(); - TestStaticCountingAllocatorRebound<static_shared_counting_allocator<int, arena<int> > >(); - TestStatefulAllocator(); - } -}; -static TestHelperStuff TestHelperStuff_s; -#endif /* __TBB_test_container_move_support_H */ diff --git a/src/tbb/src/test/test_continue_node.cpp b/src/tbb/src/test/test_continue_node.cpp deleted file mode 100644 index 37c62ae13..000000000 --- a/src/tbb/src/test/test_continue_node.cpp +++ /dev/null @@ -1,370 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness_graph.h" - -#include "tbb/task_scheduler_init.h" - -#define N 1000 -#define MAX_NODES 4 -#define C 8 - -struct empty_no_assign : private NoAssign { - empty_no_assign() {} - empty_no_assign( int ) {} - operator int() { return 0; } -}; - -// A class to use as a fake predecessor of continue_node -struct fake_continue_sender : public tbb::flow::sender<tbb::flow::continue_msg> -{ - // Define implementations of virtual methods that are abstract in the base class - /*override*/ bool register_successor( successor_type& ) { return false; } - /*override*/ bool remove_successor( successor_type& ) { return false; } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void internal_add_built_successor( successor_type &) { } - /*override*/void internal_delete_built_successor( successor_type &) { } - /*override*/void copy_successors(std::vector<successor_type *> &) {} - /*override*/size_t successor_count() {return 0;} -#endif -}; - -template< typename InputType > -struct parallel_puts : private NoAssign { - - tbb::flow::receiver< InputType > * const my_exe_node; - - parallel_puts( tbb::flow::receiver< InputType > &exe_node ) : my_exe_node(&exe_node) {} - - void operator()( int ) const { - for ( int i = 0; i < N; ++i ) { - // the nodes will accept all puts - ASSERT( my_exe_node->try_put( InputType() ) == true, NULL ); - } - } - -}; - -template< typename OutputType > -void run_continue_nodes( int p, tbb::flow::graph& g, tbb::flow::continue_node< OutputType >& n ) { - fake_continue_sender fake_sender; - for (size_t i = 0; i < N; ++i) { - n.register_predecessor( fake_sender ); - } - - for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) { - harness_counting_receiver<OutputType> *receivers = new harness_counting_receiver<OutputType>[num_receivers]; - harness_graph_executor<tbb::flow::continue_msg, OutputType>::execute_count = 0; - - for (size_t r = 0; r < num_receivers; ++r ) { - tbb::flow::make_edge( n, receivers[r] ); - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - ASSERT(n.successor_count() == (size_t)num_receivers, NULL); - ASSERT(n.predecessor_count() == 0, NULL); - typename tbb::flow::continue_node<OutputType>::successor_vector_type my_succs; - n.copy_successors(my_succs); - ASSERT(my_succs.size() == num_receivers, NULL); -#endif - - NativeParallelFor( p, parallel_puts<tbb::flow::continue_msg>(n) ); - g.wait_for_all(); - - // 2) the nodes will receive puts from multiple predecessors simultaneously, - size_t ec = harness_graph_executor<tbb::flow::continue_msg, OutputType>::execute_count; - ASSERT( (int)ec == p, NULL ); - for (size_t r = 0; r < num_receivers; ++r ) { - size_t c = receivers[r].my_count; - // 3) the nodes will send to multiple successors. - ASSERT( (int)c == p, NULL ); - } - - for (size_t r = 0; r < num_receivers; ++r ) { -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - tbb::flow::remove_edge( n, *(my_succs[r]) ); -#else - tbb::flow::remove_edge( n, receivers[r] ); -#endif - } - } -} - -template< typename OutputType, typename Body > -void continue_nodes( Body body ) { - for (int p = 1; p < 2*MaxThread; ++p) { - tbb::flow::graph g; - tbb::flow::continue_node< OutputType > exe_node( g, body ); - run_continue_nodes( p, g, exe_node); - exe_node.try_put(tbb::flow::continue_msg()); - tbb::flow::continue_node< OutputType > exe_node_copy( exe_node ); - run_continue_nodes( p, g, exe_node_copy); - } -} - -const size_t Offset = 123; -tbb::atomic<size_t> global_execute_count; - -template< typename OutputType > -struct inc_functor { - - tbb::atomic<size_t> local_execute_count; - inc_functor( ) { local_execute_count = 0; } - inc_functor( const inc_functor &f ) { local_execute_count = f.local_execute_count; } - void operator=(const inc_functor &f) { local_execute_count = f.local_execute_count; } - - OutputType operator()( tbb::flow::continue_msg ) { - ++global_execute_count; - ++local_execute_count; - return OutputType(); - } - -}; - -template< typename OutputType > -void continue_nodes_with_copy( ) { - - for (int p = 1; p < 2*MaxThread; ++p) { - tbb::flow::graph g; - inc_functor<OutputType> cf; - cf.local_execute_count = Offset; - global_execute_count = Offset; - - tbb::flow::continue_node< OutputType > exe_node( g, cf ); - fake_continue_sender fake_sender; - for (size_t i = 0; i < N; ++i) { - exe_node.register_predecessor( fake_sender ); - } - - for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) { - harness_counting_receiver<OutputType> *receivers = new harness_counting_receiver<OutputType>[num_receivers]; - - for (size_t r = 0; r < num_receivers; ++r ) { - tbb::flow::make_edge( exe_node, receivers[r] ); - } - - NativeParallelFor( p, parallel_puts<tbb::flow::continue_msg>(exe_node) ); - g.wait_for_all(); - - // 2) the nodes will receive puts from multiple predecessors simultaneously, - for (size_t r = 0; r < num_receivers; ++r ) { - size_t c = receivers[r].my_count; - // 3) the nodes will send to multiple successors. - ASSERT( (int)c == p, NULL ); - } - } - - // validate that the local body matches the global execute_count and both are correct - inc_functor<OutputType> body_copy = tbb::flow::copy_body< inc_functor<OutputType> >( exe_node ); - const size_t expected_count = p*MAX_NODES + Offset; - size_t global_count = global_execute_count; - size_t inc_count = body_copy.local_execute_count; - ASSERT( global_count == expected_count && global_count == inc_count, NULL ); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - g.reset(tbb::flow::rf_reset_bodies); - body_copy = tbb::flow::copy_body< inc_functor<OutputType> >( exe_node ); - inc_count = body_copy.local_execute_count; - ASSERT( Offset == inc_count, "reset(rf_reset_bodies) did not reset functor" ); -#endif - - } -} - -template< typename OutputType > -void run_continue_nodes() { - harness_graph_executor< tbb::flow::continue_msg, OutputType>::max_executors = 0; - #if __TBB_LAMBDAS_PRESENT - continue_nodes<OutputType>( []( tbb::flow::continue_msg i ) -> OutputType { return harness_graph_executor<tbb::flow::continue_msg, OutputType>::func(i); } ); - #endif - continue_nodes<OutputType>( &harness_graph_executor<tbb::flow::continue_msg, OutputType>::func ); - continue_nodes<OutputType>( typename harness_graph_executor<tbb::flow::continue_msg, OutputType>::functor() ); - continue_nodes_with_copy<OutputType>(); -} - -//! Tests limited concurrency cases for nodes that accept data messages -void test_concurrency(int num_threads) { - tbb::task_scheduler_init init(num_threads); - run_continue_nodes<tbb::flow::continue_msg>(); - run_continue_nodes<int>(); - run_continue_nodes<empty_no_assign>(); -} -/* - * Connection of two graphs is not currently supported, but works to some limited extent. - * This test is included to check for backward compatibility. It checks that a continue_node - * with predecessors in two different graphs receives the required - * number of continue messages before it executes. - */ -using namespace tbb::flow; - -struct add_to_counter { - int* counter; - add_to_counter(int& var):counter(&var){} - void operator()(continue_msg){*counter+=1;} -}; - -void test_two_graphs(){ - int count=0; - - //graph g with broadcast_node and continue_node - graph g; - broadcast_node<continue_msg> start_g(g); - continue_node<continue_msg> first_g(g, add_to_counter(count)); - - //graph h with broadcast_node - graph h; - broadcast_node<continue_msg> start_h(h); - - //making two edges to first_g from the two graphs - make_edge(start_g,first_g); - make_edge(start_h, first_g); - - //two try_puts from the two graphs - start_g.try_put(continue_msg()); - start_h.try_put(continue_msg()); - g.wait_for_all(); - ASSERT(count==1, "Not all continue messages received"); - - //two try_puts from the graph that doesn't contain the node - count=0; - start_h.try_put(continue_msg()); - start_h.try_put(continue_msg()); - g.wait_for_all(); - ASSERT(count==1, "Not all continue messages received -1"); - - //only one try_put - count=0; - start_g.try_put(continue_msg()); - g.wait_for_all(); - ASSERT(count==0, "Node executed without waiting for all predecessors"); -} - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES -void test_extract() { - int my_count = 0; - tbb::flow::continue_msg cm; - tbb::flow::graph g; - tbb::flow::broadcast_node<tbb::flow::continue_msg> b0(g); - tbb::flow::broadcast_node<tbb::flow::continue_msg> b1(g); - tbb::flow::continue_node<tbb::flow::continue_msg> c0(g, add_to_counter(my_count)); - tbb::flow::queue_node<tbb::flow::continue_msg> q0(g); - - tbb::flow::make_edge(b0, c0); - tbb::flow::make_edge(b1, c0); - tbb::flow::make_edge(c0, q0); - for( int i = 0; i < 2; ++i ) { - ASSERT(b0.predecessor_count() == 0 && b0.successor_count() == 1, "b0 has incorrect counts"); - ASSERT(b1.predecessor_count() == 0 && b1.successor_count() == 1, "b1 has incorrect counts"); - ASSERT(c0.predecessor_count() == 2 && c0.successor_count() == 1, "c0 has incorrect counts"); - ASSERT(q0.predecessor_count() == 1 && q0.successor_count() == 0, "q0 has incorrect counts"); - - /* b0 */ - /* \ */ - /* c0 - q0 */ - /* / */ - /* b1 */ - - b0.try_put(tbb::flow::continue_msg()); - g.wait_for_all(); - ASSERT(my_count == 0, "continue_node fired too soon"); - b1.try_put(tbb::flow::continue_msg()); - g.wait_for_all(); - ASSERT(my_count == 1, "continue_node didn't fire"); - ASSERT(q0.try_get(cm), "continue_node didn't forward"); - - b0.extract(); - - /* b0 */ - /* */ - /* c0 - q0 */ - /* / */ - /* b1 */ - - ASSERT(b0.predecessor_count() == 0 && b0.successor_count() == 0, "b0 has incorrect counts"); - ASSERT(b1.predecessor_count() == 0 && b1.successor_count() == 1, "b1 has incorrect counts"); - ASSERT(c0.predecessor_count() == 1 && c0.successor_count() == 1, "c0 has incorrect counts"); - ASSERT(q0.predecessor_count() == 1 && q0.successor_count() == 0, "q0 has incorrect counts"); - b0.try_put(tbb::flow::continue_msg()); - b0.try_put(tbb::flow::continue_msg()); - g.wait_for_all(); - ASSERT(my_count == 1, "b0 messages being forwarded to continue_node even though it is disconnected"); - b1.try_put(tbb::flow::continue_msg()); - g.wait_for_all(); - ASSERT(my_count == 2, "continue_node didn't fire though it has only one predecessor"); - ASSERT(q0.try_get(cm), "continue_node didn't forward second time"); - - c0.extract(); - - /* b0 */ - /* */ - /* c0 q0 */ - /* */ - /* b1 */ - - ASSERT(b0.predecessor_count() == 0 && b0.successor_count() == 0, "b0 has incorrect counts"); - ASSERT(b1.predecessor_count() == 0 && b1.successor_count() == 0, "b1 has incorrect counts"); - ASSERT(c0.predecessor_count() == 0 && c0.successor_count() == 0, "c0 has incorrect counts"); - ASSERT(q0.predecessor_count() == 0 && q0.successor_count() == 0, "q0 has incorrect counts"); - b0.try_put(tbb::flow::continue_msg()); - b0.try_put(tbb::flow::continue_msg()); - b1.try_put(tbb::flow::continue_msg()); - b1.try_put(tbb::flow::continue_msg()); - g.wait_for_all(); - ASSERT(my_count == 2, "continue didn't fire though it has only one predecessor"); - ASSERT(!q0.try_get(cm), "continue_node forwarded though it shouldn't"); - make_edge(b0, c0); - - /* b0 */ - /* \ */ - /* c0 q0 */ - /* */ - /* b1 */ - - ASSERT(b0.predecessor_count() == 0 && b0.successor_count() == 1, "b0 has incorrect counts"); - ASSERT(b1.predecessor_count() == 0 && b1.successor_count() == 0, "b1 has incorrect counts"); - ASSERT(c0.predecessor_count() == 1 && c0.successor_count() == 0, "c0 has incorrect counts"); - ASSERT(q0.predecessor_count() == 0 && q0.successor_count() == 0, "q0 has incorrect counts"); - - b0.try_put(tbb::flow::continue_msg()); - g.wait_for_all(); - - ASSERT(my_count == 3, "continue didn't fire though it has only one predecessor"); - ASSERT(!q0.try_get(cm), "continue_node forwarded though it shouldn't"); - - tbb::flow::make_edge(b1, c0); - tbb::flow::make_edge(c0, q0); - my_count = 0; - } -} -#endif - -int TestMain() { - if( MinThread<1 ) { - REPORT("number of threads must be positive\n"); - exit(1); - } - for( int p=MinThread; p<=MaxThread; ++p ) { - test_concurrency(p); - } - test_two_graphs(); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - test_extract(); -#endif - return Harness::Done; -} - diff --git a/src/tbb/src/test/test_critical_section.cpp b/src/tbb/src/test/test_critical_section.cpp deleted file mode 100644 index 8bc8e6cce..000000000 --- a/src/tbb/src/test/test_critical_section.cpp +++ /dev/null @@ -1,216 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// test critical section -// -#include "tbb/critical_section.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/enumerable_thread_specific.h" -#include "tbb/tick_count.h" -#include "harness_assert.h" -#include "harness.h" -#include <math.h> - -#include "harness_barrier.h" -Harness::SpinBarrier sBarrier; -tbb::critical_section cs; -const int MAX_WORK = 300; - -struct BusyBody : NoAssign { - tbb::enumerable_thread_specific<double> &locals; - const int nThread; - const int WorkRatiox100; - int &unprotected_count; - bool test_throw; - - BusyBody( int nThread_, int workRatiox100_, tbb::enumerable_thread_specific<double> &locals_, int &unprotected_count_, bool test_throw_) : - locals(locals_), - nThread(nThread_), - WorkRatiox100(workRatiox100_), - unprotected_count(unprotected_count_), - test_throw(test_throw_) { - sBarrier.initialize(nThread_); - } - - void operator()(const int /* threadID */ ) const { - int nIters = MAX_WORK/nThread; - sBarrier.wait(); - tbb::tick_count t0 = tbb::tick_count::now(); - for(int j = 0; j < nIters; j++) { - - for(int i = 0; i < MAX_WORK * (100 - WorkRatiox100); i++) { - locals.local() += 1.0; - } - cs.lock(); - ASSERT( !cs.try_lock(), "recursive try_lock must fail" ); -#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN - if(test_throw && j == (nIters / 2)) { - bool was_caught = false, - unknown_exception = false; - try { - cs.lock(); - } - catch(tbb::improper_lock& e) { - ASSERT( e.what(), "Error message is absent" ); - was_caught = true; - } - catch(...) { - was_caught = unknown_exception = true; - } - ASSERT(was_caught, "Recursive lock attempt did not throw"); - ASSERT(!unknown_exception, "tbb::improper_lock exception is expected"); - } -#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN */ - for(int i = 0; i < MAX_WORK * WorkRatiox100; i++) { - locals.local() += 1.0; - } - unprotected_count++; - cs.unlock(); - } - locals.local() = (tbb::tick_count::now() - t0).seconds(); - } -}; - -struct BusyBodyScoped : NoAssign { - tbb::enumerable_thread_specific<double> &locals; - const int nThread; - const int WorkRatiox100; - int &unprotected_count; - bool test_throw; - - BusyBodyScoped( int nThread_, int workRatiox100_, tbb::enumerable_thread_specific<double> &locals_, int &unprotected_count_, bool test_throw_) : - locals(locals_), - nThread(nThread_), - WorkRatiox100(workRatiox100_), - unprotected_count(unprotected_count_), - test_throw(test_throw_) { - sBarrier.initialize(nThread_); - } - - void operator()(const int /* threadID */ ) const { - int nIters = MAX_WORK/nThread; - sBarrier.wait(); - tbb::tick_count t0 = tbb::tick_count::now(); - for(int j = 0; j < nIters; j++) { - - for(int i = 0; i < MAX_WORK * (100 - WorkRatiox100); i++) { - locals.local() += 1.0; - } - { - tbb::critical_section::scoped_lock my_lock(cs); - for(int i = 0; i < MAX_WORK * WorkRatiox100; i++) { - locals.local() += 1.0; - } - unprotected_count++; - } - } - locals.local() = (tbb::tick_count::now() - t0).seconds(); - } -}; - -void -RunOneCriticalSectionTest(int nThreads, int csWorkRatio, bool test_throw) { - tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred); - tbb::enumerable_thread_specific<double> test_locals; - int myCount = 0; - BusyBody myBody(nThreads, csWorkRatio, test_locals, myCount, test_throw); - BusyBodyScoped myScopedBody(nThreads, csWorkRatio, test_locals, myCount, test_throw); - init.initialize(nThreads); - tbb::tick_count t0; - { - t0 = tbb::tick_count::now(); - myCount = 0; - NativeParallelFor(nThreads, myBody); - ASSERT(myCount == (MAX_WORK - (MAX_WORK % nThreads)), NULL); - REMARK("%d threads, work ratio %d per cent, time %g", nThreads, csWorkRatio, (tbb::tick_count::now() - t0).seconds()); - if (nThreads > 1) { - double etsSum = 0; - double etsMax = 0; - double etsMin = 0; - double etsSigmaSq = 0; - double etsSigma = 0; - - for(tbb::enumerable_thread_specific<double>::const_iterator ci = test_locals.begin(); ci != test_locals.end(); ci++) { - etsSum += *ci; - if(etsMax==0.0) { - etsMin = *ci; - } - else { - if(etsMin > *ci) etsMin = *ci; - } - if(etsMax < *ci) etsMax = *ci; - } - double etsAvg = etsSum / (double)nThreads; - for(tbb::enumerable_thread_specific<double>::const_iterator ci = test_locals.begin(); ci != test_locals.end(); ci++) { - etsSigma = etsAvg - *ci; - etsSigmaSq += etsSigma * etsSigma; - } - // an attempt to gauge the "fairness" of the scheduling of the threads. We figure - // the standard deviation, and compare it with the maximum deviation from the - // average time. If the difference is 0 that means all threads finished in the same - // amount of time. If non-zero, the difference is divided by the time, and the - // negative log is taken. If > 2, then the difference is on the order of 0.01*t - // where T is the average time. We aritrarily define this as "fair." - etsSigma = sqrt(etsSigmaSq/double(nThreads)); - etsMax -= etsAvg; // max - a == delta1 - etsMin = etsAvg - etsMin; // a - min == delta2 - if(etsMax < etsMin) etsMax = etsMin; - etsMax -= etsSigma; - // ASSERT(etsMax >= 0, NULL); // shouldn't the maximum difference from the mean be > the stddev? - etsMax = (etsMax > 0.0) ? etsMax : 0.0; // possible rounding error - double fairness = etsMax / etsAvg; - if(fairness == 0.0) { - fairness = 100.0; - } - else fairness = - log10(fairness); - if(fairness > 2.0 ) { - REMARK(" Fair (%g)\n", fairness); - } - else { - REMARK(" Unfair (%g)\n", fairness); - } - } - myCount = 0; - NativeParallelFor(nThreads, myScopedBody); - ASSERT(myCount == (MAX_WORK - (MAX_WORK % nThreads)), NULL); - - } - - init.terminate(); -} - -void -RunParallelTests() { - for(int p = MinThread; p <= MaxThread; p++) { - for(int cs_ratio = 1; cs_ratio < 95; cs_ratio *= 2) { - RunOneCriticalSectionTest(p, cs_ratio, /*test_throw*/true); - } - } -} - -int TestMain () { - if(MinThread <= 0) MinThread = 1; - - if(MaxThread > 0) { - RunParallelTests(); - } - - return Harness::Done; -} diff --git a/src/tbb/src/test/test_dynamic_link.cpp b/src/tbb/src/test/test_dynamic_link.cpp deleted file mode 100644 index 0ab0969ed..000000000 --- a/src/tbb/src/test/test_dynamic_link.cpp +++ /dev/null @@ -1,84 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -enum FOO_TYPE { - FOO_DUMMY, - FOO_IMPLEMENTATION -}; - -#if _WIN32 || _WIN64 -#define TEST_EXPORT -#else -#define TEST_EXPORT extern "C" -#endif /* _WIN32 || _WIN64 */ - -// foo "implementations". -TEST_EXPORT FOO_TYPE foo1() { return FOO_IMPLEMENTATION; } -TEST_EXPORT FOO_TYPE foo2() { return FOO_IMPLEMENTATION; } -// foo "dummies". -FOO_TYPE dummy_foo1() { return FOO_DUMMY; } -FOO_TYPE dummy_foo2() { return FOO_DUMMY; } - -// Handlers. -static FOO_TYPE (*foo1_handler)() = &dummy_foo1; -static FOO_TYPE (*foo2_handler)() = &dummy_foo2; - -#include "tbb/tbb_config.h" -// Suppress the weak symbol mechanism to avoid surplus compiler warnings. -#ifdef __TBB_WEAK_SYMBOLS_PRESENT -#undef __TBB_WEAK_SYMBOLS_PRESENT -#endif -// Use of harness assert to avoid the dependency on TBB -#include "harness_assert.h" -#define LIBRARY_ASSERT(p,message) ASSERT(p,message) -#include "tbb/dynamic_link.h" -// Table describing how to link the handlers. -static const tbb::internal::dynamic_link_descriptor LinkTable[] = { - { "foo1", (tbb::internal::pointer_to_handler*)(void*)(&foo1_handler) }, - { "foo2", (tbb::internal::pointer_to_handler*)(void*)(&foo2_handler) } -}; - -// The direct include since we want to test internal functionality. -#include "tbb/dynamic_link.cpp" -#include "harness_dynamic_libs.h" -#include "harness.h" - -#if !HARNESS_SKIP_TEST -int TestMain () { -#if !_WIN32 - // Check if the executable exports its symbols. - ASSERT( Harness::GetAddress( Harness::OpenLibrary(NULL), "foo1" ) && Harness::GetAddress( Harness::OpenLibrary(NULL), "foo2" ), - "The executable doesn't export its symbols. Is the -rdynamic switch set during linking?" ); -#endif /* !_WIN32 */ - // We want to link (or fail to link) to the symbols available from the - // executable so it doesn't matter what the library name is specified in - // the dynamic_link call - let it be an empty string. - // Generally speaking the test has sense only on Linux but on Windows it - // checks the dynamic_link graceful behavior with incorrect library name. - if ( tbb::internal::dynamic_link( "", LinkTable, sizeof(LinkTable)/sizeof(LinkTable[0]) ) ) { - ASSERT( foo1_handler && foo2_handler, "The symbols are corrupted by dynamic_link" ); - ASSERT( foo1_handler() == FOO_IMPLEMENTATION && foo2_handler() == FOO_IMPLEMENTATION, - "dynamic_link returned the successful code but symbol(s) are wrong" ); - } else { - ASSERT( foo1_handler==dummy_foo1 && foo2_handler==dummy_foo2, "The symbols are corrupted by dynamic_link" ); - } - return Harness::Done; -} -#endif // HARNESS_SKIP_TEST diff --git a/src/tbb/src/test/test_eh_algorithms.cpp b/src/tbb/src/test/test_eh_algorithms.cpp deleted file mode 100644 index 46a6fcd59..000000000 --- a/src/tbb/src/test/test_eh_algorithms.cpp +++ /dev/null @@ -1,1580 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness.h" - -#if __TBB_TASK_GROUP_CONTEXT - -#include <limits.h> // for INT_MAX -#include "tbb/task_scheduler_init.h" -#include "tbb/tbb_exception.h" -#include "tbb/task.h" -#include "tbb/atomic.h" -#include "tbb/parallel_for.h" -#include "tbb/parallel_reduce.h" -#include "tbb/parallel_do.h" -#include "tbb/pipeline.h" -#include "tbb/parallel_scan.h" -#include "tbb/blocked_range.h" -#include "harness_assert.h" - -#define FLAT_RANGE 100000 -#define FLAT_GRAIN 100 -#define OUTER_RANGE 100 -#define OUTER_GRAIN 10 -#define INNER_RANGE (FLAT_RANGE / OUTER_RANGE) -#define INNER_GRAIN (FLAT_GRAIN / OUTER_GRAIN) - -tbb::atomic<intptr_t> g_FedTasksCount; // number of tasks added by parallel_do feeder -tbb::atomic<intptr_t> g_OuterParCalls; // number of actual invocations of the outer construct executed. -tbb::atomic<intptr_t> g_TGCCancelled; // Number of times a task sees its group cancelled at start - -inline intptr_t Existed () { return INT_MAX; } - -#include "harness_eh.h" -/******************************** - Variables in test - -__ Test control variables - g_ExceptionInMaster -- only the master thread is allowed to throw. If false, the master cannot throw - g_SolitaryException -- only one throw may be executed. - --- controls for ThrowTestException for pipeline tests - g_NestedPipelines -- are inner pipelines being run? - g_PipelinesStarted -- how many pipelines have run their first filter at least once. - --- Information variables - - g_Master -- Thread ID of the "master" thread - In pipelines sometimes the master thread does not participate, so the tests have to be resilient to this. - --- Measurement variables - - g_OuterParCalls -- how many outer parallel ranges or filters started - g_TGCCancelled -- how many inner parallel ranges or filters saw task::self().is_cancelled() - g_ExceptionsThrown -- number of throws executed (counted in ThrowTestException) - g_MasterExecutedThrow -- number of times master thread actually executed a throw - g_NonMasterExecutedThrow -- number of times non-master thread actually executed a throw - g_ExceptionCaught -- one of PropagatedException or unknown exception was caught. (Other exceptions cause assertions.) - - -- Tallies for the task bodies which have executed (counted in each inner body, sampled in ThrowTestException) - g_CurExecuted -- total number of inner ranges or filters which executed - g_ExecutedAtLastCatch -- value of g_CurExecuted when last catch was made, 0 if none. - g_ExecutedAtFirstCatch -- value of g_CurExecuted when first catch is made, 0 if none. - *********************************/ - -inline void ResetGlobals ( bool throwException = true, bool flog = false ) { - ResetEhGlobals( throwException, flog ); - g_FedTasksCount = 0; - g_OuterParCalls = 0; - g_NestedPipelines = false; - g_TGCCancelled = 0; -} - -//////////////////////////////////////////////////////////////////////////////// -// Tests for tbb::parallel_for and tbb::parallel_reduce - -typedef size_t count_type; -typedef tbb::blocked_range<count_type> range_type; - -inline intptr_t CountSubranges(range_type r) { - if(!r.is_divisible()) return intptr_t(1); - range_type r2(r,tbb::split()); - return CountSubranges(r) + CountSubranges(r2); -} - -inline intptr_t NumSubranges ( intptr_t length, intptr_t grain ) { - return CountSubranges(range_type(0,length,grain)); -} - -template<class Body> -intptr_t TestNumSubrangesCalculation ( intptr_t length, intptr_t grain, intptr_t inner_length, intptr_t inner_grain ) { - ResetGlobals(); - g_ThrowException = false; - intptr_t outerCalls = NumSubranges(length, grain), - innerCalls = NumSubranges(inner_length, inner_grain), - maxExecuted = outerCalls * (innerCalls + 1); - tbb::parallel_for( range_type(0, length, grain), Body() ); - ASSERT (g_CurExecuted == maxExecuted, "Wrong estimation of bodies invocation count"); - return maxExecuted; -} - -class NoThrowParForBody { -public: - void operator()( const range_type& r ) const { - volatile count_type x = 0; - if(g_Master == Harness::CurrentTid()) g_MasterExecuted = true; - else g_NonMasterExecuted = true; - if( tbb::task::self().is_cancelled() ) ++g_TGCCancelled; - count_type end = r.end(); - for( count_type i=r.begin(); i<end; ++i ) - x += i; - } -}; - -#if TBB_USE_EXCEPTIONS - -void Test0 () { - ResetGlobals(); - tbb::simple_partitioner p; - for( size_t i=0; i<10; ++i ) { - tbb::parallel_for( range_type(0, 0, 1), NoThrowParForBody() ); - tbb::parallel_for( range_type(0, 0, 1), NoThrowParForBody(), p ); - tbb::parallel_for( range_type(0, 128, 8), NoThrowParForBody() ); - tbb::parallel_for( range_type(0, 128, 8), NoThrowParForBody(), p ); - } -} // void Test0 () - -//! Template that creates a functor suitable for parallel_reduce from a functor for parallel_for. -template<typename ParForBody> -class SimpleParReduceBody: NoAssign { - ParForBody m_Body; -public: - void operator()( const range_type& r ) const { m_Body(r); } - SimpleParReduceBody() {} - SimpleParReduceBody( SimpleParReduceBody& left, tbb::split ) : m_Body(left.m_Body) {} - void join( SimpleParReduceBody& /*right*/ ) {} -}; // SimpleParReduceBody - -//! Test parallel_for and parallel_reduce for a given partitioner. -/** The Body need only be suitable for a parallel_for. */ -template<typename ParForBody, typename Partitioner> -void TestParallelLoopAux() { - Partitioner partitioner; - for( int i=0; i<2; ++i ) { - ResetGlobals(); - TRY(); - if( i==0 ) - tbb::parallel_for( range_type(0, FLAT_RANGE, FLAT_GRAIN), ParForBody(), partitioner ); - else { - SimpleParReduceBody<ParForBody> rb; - tbb::parallel_reduce( range_type(0, FLAT_RANGE, FLAT_GRAIN), rb, partitioner ); - } - CATCH_AND_ASSERT(); - // two cases: g_SolitaryException and !g_SolitaryException - // 1) g_SolitaryException: only one thread actually threw. There is only one context, so the exception - // (when caught) will cause that context to be cancelled. After this event, there may be one or - // more threads which are "in-flight", up to g_NumThreads, but no more will be started. The threads, - // when they start, if they see they are cancelled, TGCCancelled is incremented. - // 2) !g_SolitaryException: more than one thread can throw. The number of threads that actually - // threw is g_MasterExecutedThrow if only the master is allowed, else g_NonMasterExecutedThrow. - // Only one context, so TGCCancelled should be <= g_NumThreads. - // - // the reasoning is similar for nested algorithms in a single context (Test2). - // - // If a thread throws in a context, more than one subsequent task body may see the - // cancelled state (if they are scheduled before the state is propagated.) this is - // infrequent, but it occurs. So what was to be an assertion must be a remark. - ASSERT( g_TGCCancelled <= g_NumThreads, "Too many tasks ran after exception thrown"); - if( g_TGCCancelled > g_NumThreads) REMARK( "Too many tasks ran after exception thrown (%d vs. %d)\n", - (int)g_TGCCancelled, (int)g_NumThreads); - ASSERT(g_CurExecuted <= g_ExecutedAtLastCatch + g_NumThreads, "Too many tasks survived exception"); - if ( g_SolitaryException ) { - ASSERT(g_NumExceptionsCaught == 1, "No try_blocks in any body expected in this test"); - ASSERT(g_NumExceptionsCaught == (g_ExceptionInMaster ? g_MasterExecutedThrow : g_NonMasterExecutedThrow), - "Not all throws were caught"); - ASSERT(g_ExecutedAtFirstCatch == g_ExecutedAtLastCatch, "Too many exceptions occurred"); - } - else { - ASSERT(g_NumExceptionsCaught >= 1, "No try blocks in any body expected in this test"); - } - } -} // TestParallelLoopAux - -//! Test with parallel_for and parallel_reduce, over all three kinds of partitioners. -/** The Body only needs to be suitable for tbb::parallel_for. */ -template<typename Body> -void TestParallelLoop() { - // The simple and auto partitioners should be const, but not the affinity partitioner. - TestParallelLoopAux<Body, const tbb::simple_partitioner >(); - TestParallelLoopAux<Body, const tbb::auto_partitioner >(); -#define __TBB_TEMPORARILY_DISABLED 1 -#if !__TBB_TEMPORARILY_DISABLED - // TODO: Improve the test so that it tolerates delayed start of tasks with affinity_partitioner - TestParallelLoopAux<Body, /***/ tbb::affinity_partitioner>(); -#endif -#undef __TBB_TEMPORARILY_DISABLED -} - -class SimpleParForBody: NoAssign { -public: - void operator()( const range_type& r ) const { - Harness::ConcurrencyTracker ct; - volatile long x = 0; - ++g_CurExecuted; - if(g_Master == Harness::CurrentTid()) g_MasterExecuted = true; - else g_NonMasterExecuted = true; - if( tbb::task::self().is_cancelled() ) ++g_TGCCancelled; - for( count_type i = r.begin(); i != r.end(); ++i ) - x += 0; - WaitUntilConcurrencyPeaks(); - ThrowTestException(1); - } -}; - -void Test1() { - // non-nested parallel_for/reduce with throwing body, one context - TestParallelLoop<SimpleParForBody>(); -} // void Test1 () - -class OuterParForBody: NoAssign { -public: - void operator()( const range_type& ) const { - Harness::ConcurrencyTracker ct; - ++g_OuterParCalls; - tbb::parallel_for( tbb::blocked_range<size_t>(0, INNER_RANGE, INNER_GRAIN), SimpleParForBody() ); - } -}; - -//! Uses parallel_for body containing an inner parallel_for with the default context not wrapped by a try-block. -/** Inner algorithms are spawned inside the new bound context by default. Since - exceptions thrown from the inner parallel_for are not handled by the caller - (outer parallel_for body) in this test, they will cancel all the sibling inner - algorithms. **/ -void Test2 () { - TestParallelLoop<OuterParForBody>(); -} // void Test2 () - -class OuterParForBodyWithIsolatedCtx { -public: - void operator()( const range_type& ) const { - tbb::task_group_context ctx(tbb::task_group_context::isolated); - ++g_OuterParCalls; - tbb::parallel_for( tbb::blocked_range<size_t>(0, INNER_RANGE, INNER_GRAIN), SimpleParForBody(), tbb::simple_partitioner(), ctx ); - } -}; - -//! Uses parallel_for body invoking an inner parallel_for with an isolated context without a try-block. -/** Even though exceptions thrown from the inner parallel_for are not handled - by the caller in this test, they will not affect sibling inner algorithms - already running because of the isolated contexts. However because the first - exception cancels the root parallel_for only the first g_NumThreads subranges - will be processed (which launch inner parallel_fors) **/ -void Test3 () { - ResetGlobals(); - typedef OuterParForBodyWithIsolatedCtx body_type; - intptr_t innerCalls = NumSubranges(INNER_RANGE, INNER_GRAIN), - // we expect one thread to throw without counting, the rest to run to completion - // this formula assumes g_numThreads outer pfor ranges will be started, but that is not the - // case; the SimpleParFor subranges are started up as part of the outer ones, and when - // the amount of concurrency reaches g_NumThreads no more outer Pfor ranges are started. - // so we have to count the number of outer Pfors actually started. - minExecuted = (g_NumThreads - 1) * innerCalls; - TRY(); - tbb::parallel_for( range_type(0, OUTER_RANGE, OUTER_GRAIN), body_type() ); - CATCH_AND_ASSERT(); - minExecuted = (g_OuterParCalls - 1) * innerCalls; // see above - - // The first formula above assumes all ranges of the outer parallel for are executed, and one - // cancels. In the event, we have a smaller number of ranges that start before the exception - // is caught. - // - // g_SolitaryException:One inner range throws. Outer parallel_For is cancelled, but sibling - // parallel_fors continue to completion (unless the threads that execute - // are not allowed to throw, in which case we will not see any exceptions). - // !g_SolitaryException:multiple inner ranges may throw. Any which throws will stop, and the - // corresponding range of the outer pfor will stop also. - // - // In either case, once the outer pfor gets the exception it will stop executing further ranges. - - // if the only threads executing were not allowed to throw, then not seeing an exception is okay. - bool okayNoExceptionsCaught = (g_ExceptionInMaster && !g_MasterExecuted) || (!g_ExceptionInMaster && !g_NonMasterExecuted); - if ( g_SolitaryException ) { - ASSERT( g_TGCCancelled <= g_NumThreads, "Too many tasks survived exception"); - ASSERT (g_CurExecuted > minExecuted, "Too few tasks survived exception"); - ASSERT (g_CurExecuted <= minExecuted + (g_ExecutedAtLastCatch + g_NumThreads), "Too many tasks survived exception"); - ASSERT (g_NumExceptionsCaught == 1 || okayNoExceptionsCaught, "No try_blocks in any body expected in this test"); - } - else { - ASSERT (g_CurExecuted <= g_ExecutedAtLastCatch + g_NumThreads, "Too many tasks survived exception"); - ASSERT (g_NumExceptionsCaught >= 1 || okayNoExceptionsCaught, "No try_blocks in any body expected in this test"); - } -} // void Test3 () - -class OuterParForExceptionSafeBody { -public: - void operator()( const range_type& ) const { - tbb::task_group_context ctx(tbb::task_group_context::isolated); - ++g_OuterParCalls; - TRY(); - tbb::parallel_for( tbb::blocked_range<size_t>(0, INNER_RANGE, INNER_GRAIN), SimpleParForBody(), tbb::simple_partitioner(), ctx ); - CATCH(); // this macro sets g_ExceptionCaught - } -}; - -//! Uses parallel_for body invoking an inner parallel_for (with isolated context) inside a try-block. -/** Since exception(s) thrown from the inner parallel_for are handled by the caller - in this test, they do not affect neither other tasks of the the root parallel_for - nor sibling inner algorithms. **/ -void Test4 () { - ResetGlobals( true, true ); - intptr_t innerCalls = NumSubranges(INNER_RANGE, INNER_GRAIN), - outerCalls = NumSubranges(OUTER_RANGE, OUTER_GRAIN); - TRY(); - tbb::parallel_for( range_type(0, OUTER_RANGE, OUTER_GRAIN), OuterParForExceptionSafeBody() ); - CATCH(); - // g_SolitaryException : one inner pfor will throw, the rest will execute to completion. - // so the count should be (outerCalls -1) * innerCalls, if a throw happened. - // !g_SolitaryException : possible multiple inner pfor throws. Should be approximately - // (outerCalls - g_NumExceptionsCaught) * innerCalls, give or take a few - intptr_t minExecuted = (outerCalls - g_NumExceptionsCaught) * innerCalls; - bool okayNoExceptionsCaught = (g_ExceptionInMaster && !g_MasterExecuted) || (!g_ExceptionInMaster && !g_NonMasterExecuted); - if ( g_SolitaryException ) { - // only one task had exception thrown. That task had at least one execution (the one that threw). - // There may be an arbitrary number of ranges executed after the throw but before the exception - // is caught in the scheduler and cancellation is signaled. (seen 9, 11 and 62 (!) for 8 threads) - ASSERT (g_NumExceptionsCaught == 1 || okayNoExceptionsCaught, "No exception registered"); - ASSERT (g_CurExecuted >= minExecuted, "Too few tasks executed"); - ASSERT( g_TGCCancelled <= g_NumThreads, "Too many tasks survived exception"); - // a small number of threads can execute in a throwing sub-pfor, if the task which is - // to do the solitary throw swaps out after registering its intent to throw but before it - // actually does so. (Or is this caused by the extra threads participating? No, the - // number of extra tasks is sometimes far greater than the number of extra threads.) - ASSERT (g_CurExecuted <= minExecuted + g_NumThreads, "Too many tasks survived exception"); - if(g_CurExecuted > minExecuted + g_NumThreads) REMARK("Unusual number of tasks executed after signal (%d vs. %d)\n", - (int)g_CurExecuted, minExecuted + g_NumThreads); - } - else { - ASSERT ((g_NumExceptionsCaught >= 1 && g_NumExceptionsCaught <= outerCalls) || okayNoExceptionsCaught, "Unexpected actual number of exceptions"); - ASSERT (g_CurExecuted >= minExecuted, "Too few executed tasks reported"); - ASSERT (g_CurExecuted <= g_ExecutedAtLastCatch + g_NumThreads, "Too many tasks survived multiple exceptions"); - if(g_CurExecuted > g_ExecutedAtLastCatch + g_NumThreads) REMARK("Unusual number of tasks executed after signal (%d vs. %d)\n", - (int)g_CurExecuted, g_ExecutedAtLastCatch + g_NumThreads); - ASSERT (g_CurExecuted <= outerCalls * (1 + g_NumThreads), "Too many tasks survived exception"); - } -} // void Test4 () - -#endif /* TBB_USE_EXCEPTIONS */ - -class ParForBodyToCancel { -public: - void operator()( const range_type& ) const { - ++g_CurExecuted; - CancellatorTask::WaitUntilReady(); - } -}; - -template<class B> -class ParForLauncherTask : public tbb::task { - tbb::task_group_context &my_ctx; - - tbb::task* execute () { - tbb::parallel_for( range_type(0, FLAT_RANGE, FLAT_GRAIN), B(), tbb::simple_partitioner(), my_ctx ); - return NULL; - } -public: - ParForLauncherTask ( tbb::task_group_context& ctx ) : my_ctx(ctx) {} -}; - -//! Test for cancelling an algorithm from outside (from a task running in parallel with the algorithm). -void TestCancelation1 () { - ResetGlobals( false ); - RunCancellationTest<ParForLauncherTask<ParForBodyToCancel>, CancellatorTask>( NumSubranges(FLAT_RANGE, FLAT_GRAIN) / 4 ); -} - -class CancellatorTask2 : public tbb::task { - tbb::task_group_context &m_GroupToCancel; - - tbb::task* execute () { - Harness::ConcurrencyTracker ct; - WaitUntilConcurrencyPeaks(); - m_GroupToCancel.cancel_group_execution(); - g_ExecutedAtLastCatch = g_CurExecuted; - return NULL; - } -public: - CancellatorTask2 ( tbb::task_group_context& ctx, intptr_t ) : m_GroupToCancel(ctx) {} -}; - -class ParForBodyToCancel2 { -public: - void operator()( const range_type& ) const { - ++g_CurExecuted; - Harness::ConcurrencyTracker ct; - // The test will hang (and be timed out by the test system) if is_cancelled() is broken - while( !tbb::task::self().is_cancelled() ) - __TBB_Yield(); - } -}; - -//! Test for cancelling an algorithm from outside (from a task running in parallel with the algorithm). -/** This version also tests task::is_cancelled() method. **/ -void TestCancelation2 () { - ResetGlobals(); - RunCancellationTest<ParForLauncherTask<ParForBodyToCancel2>, CancellatorTask2>(); - ASSERT (g_ExecutedAtLastCatch < g_NumThreads, "Somehow worker tasks started their execution before the cancellator task"); - ASSERT( g_TGCCancelled <= g_NumThreads, "Too many tasks survived cancellation"); - ASSERT (g_CurExecuted <= g_ExecutedAtLastCatch + g_NumThreads, "Some tasks were executed after cancellation"); -} - -//////////////////////////////////////////////////////////////////////////////// -// Regression test based on the contribution by the author of the following forum post: -// http://softwarecommunity.intel.com/isn/Community/en-US/forums/thread/30254959.aspx - -class Worker { - static const int max_nesting = 3; - static const int reduce_range = 1024; - static const int reduce_grain = 256; -public: - int DoWork (int level); - int Validate (int start_level) { - int expected = 1; // identity for multiplication - for(int i=start_level+1; i<max_nesting; ++i) - expected *= reduce_range; - return expected; - } -}; - -class RecursiveParReduceBodyWithSharedWorker { - Worker * m_SharedWorker; - int m_NestingLevel; - int m_Result; -public: - RecursiveParReduceBodyWithSharedWorker ( RecursiveParReduceBodyWithSharedWorker& src, tbb::split ) - : m_SharedWorker(src.m_SharedWorker) - , m_NestingLevel(src.m_NestingLevel) - , m_Result(0) - {} - RecursiveParReduceBodyWithSharedWorker ( Worker *w, int outer ) - : m_SharedWorker(w) - , m_NestingLevel(outer) - , m_Result(0) - {} - - void operator() ( const tbb::blocked_range<size_t>& r ) { - if(g_Master == Harness::CurrentTid()) g_MasterExecuted = true; - else g_NonMasterExecuted = true; - if( tbb::task::self().is_cancelled() ) ++g_TGCCancelled; - for (size_t i = r.begin (); i != r.end (); ++i) { - m_Result += m_SharedWorker->DoWork (m_NestingLevel); - } - } - void join (const RecursiveParReduceBodyWithSharedWorker & x) { - m_Result += x.m_Result; - } - int result () { return m_Result; } -}; - -int Worker::DoWork ( int level ) { - ++level; - if ( level < max_nesting ) { - RecursiveParReduceBodyWithSharedWorker rt (this, level); - tbb::parallel_reduce (tbb::blocked_range<size_t>(0, reduce_range, reduce_grain), rt); - return rt.result(); - } - else - return 1; -} - -//! Regression test for hanging that occurred with the first version of cancellation propagation -void TestCancelation3 () { - Worker w; - int result = w.DoWork (0); - int expected = w.Validate(0); - ASSERT ( result == expected, "Wrong calculation result"); -} - -struct StatsCounters { - tbb::atomic<size_t> my_total_created; - tbb::atomic<size_t> my_total_deleted; - StatsCounters() { - my_total_created = 0; - my_total_deleted = 0; - } -}; - -class ParReduceBody { - StatsCounters* my_stats; - size_t my_id; - bool my_exception; - -public: - ParReduceBody( StatsCounters& s_, bool e_ ) : my_stats(&s_), my_exception(e_) { - my_id = my_stats->my_total_created++; - } - - ParReduceBody( const ParReduceBody& lhs ) { - my_stats = lhs.my_stats; - my_id = my_stats->my_total_created++; - } - - ParReduceBody( ParReduceBody& lhs, tbb::split ) { - my_stats = lhs.my_stats; - my_id = my_stats->my_total_created++; - } - - ~ParReduceBody(){ ++my_stats->my_total_deleted; } - - void operator()( const tbb::blocked_range<std::size_t>& /*range*/ ) const { - //Do nothing, except for one task (chosen arbitrarily) - if( my_id >= 12 ) { - if( my_exception ) - ThrowTestException(1); - else - tbb::task::self().cancel_group_execution(); - } - } - - void join( ParReduceBody& /*rhs*/ ) {} -}; - -void TestCancelation4() { - StatsCounters statsObj; - __TBB_TRY { - tbb::task_group_context tgc1, tgc2; - ParReduceBody body_for_cancellation(statsObj, false), body_for_exception(statsObj, true); - tbb::parallel_reduce( tbb::blocked_range<std::size_t>(0,100000000,100), body_for_cancellation, tbb::simple_partitioner(), tgc1 ); - tbb::parallel_reduce( tbb::blocked_range<std::size_t>(0,100000000,100), body_for_exception, tbb::simple_partitioner(), tgc2 ); - } __TBB_CATCH(...) {} - ASSERT ( statsObj.my_total_created==statsObj.my_total_deleted, "Not all parallel_reduce body objects created were reclaimed"); -} - -void RunParForAndReduceTests () { - REMARK( "parallel for and reduce tests\n" ); - tbb::task_scheduler_init init (g_NumThreads); - g_Master = Harness::CurrentTid(); - -#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN - Test0(); - Test1(); - Test2(); - Test3(); - Test4(); -#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN */ - TestCancelation1(); - TestCancelation2(); - TestCancelation3(); - TestCancelation4(); -} - -//////////////////////////////////////////////////////////////////////////////// -// Tests for tbb::parallel_do - -#define ITER_RANGE 1000 -#define ITEMS_TO_FEED 50 -#define INNER_ITER_RANGE 100 -#define OUTER_ITER_RANGE 50 - -#define PREPARE_RANGE(Iterator, rangeSize) \ - size_t test_vector[rangeSize + 1]; \ - for (int i =0; i < rangeSize; i++) \ - test_vector[i] = i; \ - Iterator begin(&test_vector[0]); \ - Iterator end(&test_vector[rangeSize]) - -void Feed ( tbb::parallel_do_feeder<size_t> &feeder, size_t val ) { - if (g_FedTasksCount < ITEMS_TO_FEED) { - ++g_FedTasksCount; - feeder.add(val); - } -} - -#include "harness_iterator.h" - -#if TBB_USE_EXCEPTIONS - -// Simple functor object with exception -class SimpleParDoBody { -public: - void operator() ( size_t &value ) const { - ++g_CurExecuted; - if(g_Master == Harness::CurrentTid()) g_MasterExecuted = true; - else g_NonMasterExecuted = true; - if( tbb::task::self().is_cancelled() ) ++g_TGCCancelled; - Harness::ConcurrencyTracker ct; - value += 1000; - WaitUntilConcurrencyPeaks(); - ThrowTestException(1); - } -}; - -// Simple functor object with exception and feeder -class SimpleParDoBodyWithFeeder : SimpleParDoBody { -public: - void operator() ( size_t &value, tbb::parallel_do_feeder<size_t> &feeder ) const { - Feed(feeder, 0); - SimpleParDoBody::operator()(value); - } -}; - -// Tests exceptions without nesting -template <class Iterator, class simple_body> -void Test1_parallel_do () { - ResetGlobals(); - PREPARE_RANGE(Iterator, ITER_RANGE); - TRY(); - tbb::parallel_do<Iterator, simple_body>(begin, end, simple_body() ); - CATCH_AND_ASSERT(); - ASSERT (g_CurExecuted <= g_ExecutedAtLastCatch + g_NumThreads, "Too many tasks survived exception"); - ASSERT( g_TGCCancelled <= g_NumThreads, "Too many tasks survived cancellation"); - ASSERT (g_NumExceptionsCaught == 1, "No try_blocks in any body expected in this test"); - if ( !g_SolitaryException ) - ASSERT (g_CurExecuted <= g_ExecutedAtLastCatch + g_NumThreads, "Too many tasks survived exception"); - -} // void Test1_parallel_do () - -template <class Iterator> -class OuterParDoBody { -public: - void operator()( size_t& /*value*/ ) const { - ++g_OuterParCalls; - PREPARE_RANGE(Iterator, INNER_ITER_RANGE); - tbb::parallel_do<Iterator, SimpleParDoBody>(begin, end, SimpleParDoBody()); - } -}; - -template <class Iterator> -class OuterParDoBodyWithFeeder : OuterParDoBody<Iterator> { -public: - void operator()( size_t& value, tbb::parallel_do_feeder<size_t>& feeder ) const { - Feed(feeder, 0); - OuterParDoBody<Iterator>::operator()(value); - } -}; - -//! Uses parallel_do body containing an inner parallel_do with the default context not wrapped by a try-block. -/** Inner algorithms are spawned inside the new bound context by default. Since - exceptions thrown from the inner parallel_do are not handled by the caller - (outer parallel_do body) in this test, they will cancel all the sibling inner - algorithms. **/ -template <class Iterator, class outer_body> -void Test2_parallel_do () { - ResetGlobals(); - PREPARE_RANGE(Iterator, ITER_RANGE); - TRY(); - tbb::parallel_do<Iterator, outer_body >(begin, end, outer_body() ); - CATCH_AND_ASSERT(); - //if ( g_SolitaryException ) - ASSERT (g_CurExecuted <= g_ExecutedAtLastCatch + g_NumThreads, "Too many tasks survived exception"); - ASSERT( g_TGCCancelled <= g_NumThreads, "Too many tasks survived cancellation"); - ASSERT (g_NumExceptionsCaught == 1, "No try_blocks in any body expected in this test"); - if ( !g_SolitaryException ) - ASSERT (g_CurExecuted <= g_ExecutedAtLastCatch + g_NumThreads, "Too many tasks survived exception"); -} // void Test2_parallel_do () - -template <class Iterator> -class OuterParDoBodyWithIsolatedCtx { -public: - void operator()( size_t& /*value*/ ) const { - tbb::task_group_context ctx(tbb::task_group_context::isolated); - ++g_OuterParCalls; - PREPARE_RANGE(Iterator, INNER_ITER_RANGE); - tbb::parallel_do<Iterator, SimpleParDoBody>(begin, end, SimpleParDoBody(), ctx); - } -}; - -template <class Iterator> -class OuterParDoBodyWithIsolatedCtxWithFeeder : OuterParDoBodyWithIsolatedCtx<Iterator> { -public: - void operator()( size_t& value, tbb::parallel_do_feeder<size_t> &feeder ) const { - Feed(feeder, 0); - OuterParDoBodyWithIsolatedCtx<Iterator>::operator()(value); - } -}; - -//! Uses parallel_do body invoking an inner parallel_do with an isolated context without a try-block. -/** Even though exceptions thrown from the inner parallel_do are not handled - by the caller in this test, they will not affect sibling inner algorithms - already running because of the isolated contexts. However because the first - exception cancels the root parallel_do, at most the first g_NumThreads subranges - will be processed (which launch inner parallel_dos) **/ -template <class Iterator, class outer_body> -void Test3_parallel_do () { - ResetGlobals(); - PREPARE_RANGE(Iterator, OUTER_ITER_RANGE); - intptr_t innerCalls = INNER_ITER_RANGE, - // The assumption here is the same as in outer parallel fors. - minExecuted = (g_NumThreads - 1) * innerCalls; - g_Master = Harness::CurrentTid(); - TRY(); - tbb::parallel_do<Iterator, outer_body >(begin, end, outer_body()); - CATCH_AND_ASSERT(); - // figure actual number of expected executions given the number of outer PDos started. - minExecuted = (g_OuterParCalls - 1) * innerCalls; - // one extra thread may run a task that sees cancellation. Infrequent but possible - ASSERT( g_TGCCancelled <= g_NumThreads, "Too many tasks survived exception"); - if(g_TGCCancelled > g_NumThreads) REMARK("Extra thread(s) executed after cancel (%d vs. %d)\n", - (int)g_TGCCancelled, (int)g_NumThreads); - if ( g_SolitaryException ) { - ASSERT (g_CurExecuted > minExecuted, "Too few tasks survived exception"); - ASSERT (g_CurExecuted <= minExecuted + (g_ExecutedAtLastCatch + g_NumThreads), "Too many tasks survived exception"); - } - ASSERT (g_NumExceptionsCaught == 1, "No try_blocks in any body expected in this test"); - if ( !g_SolitaryException ) - ASSERT (g_CurExecuted <= g_ExecutedAtLastCatch + g_NumThreads, "Too many tasks survived exception"); -} // void Test3_parallel_do () - -template <class Iterator> -class OuterParDoWithEhBody { -public: - void operator()( size_t& /*value*/ ) const { - tbb::task_group_context ctx(tbb::task_group_context::isolated); - ++g_OuterParCalls; - PREPARE_RANGE(Iterator, INNER_ITER_RANGE); - TRY(); - tbb::parallel_do<Iterator, SimpleParDoBody>(begin, end, SimpleParDoBody(), ctx); - CATCH(); - } -}; - -template <class Iterator> -class OuterParDoWithEhBodyWithFeeder : NoAssign, OuterParDoWithEhBody<Iterator> { -public: - void operator()( size_t &value, tbb::parallel_do_feeder<size_t> &feeder ) const { - Feed(feeder, 0); - OuterParDoWithEhBody<Iterator>::operator()(value); - } -}; - -//! Uses parallel_for body invoking an inner parallel_for (with default bound context) inside a try-block. -/** Since exception(s) thrown from the inner parallel_for are handled by the caller - in this test, they do not affect neither other tasks of the the root parallel_for - nor sibling inner algorithms. **/ -template <class Iterator, class outer_body_with_eh> -void Test4_parallel_do () { - ResetGlobals( true, true ); - PREPARE_RANGE(Iterator, OUTER_ITER_RANGE); - g_Master = Harness::CurrentTid(); - TRY(); - tbb::parallel_do<Iterator, outer_body_with_eh>(begin, end, outer_body_with_eh()); - CATCH(); - ASSERT (!l_ExceptionCaughtAtCurrentLevel, "All exceptions must have been handled in the parallel_do body"); - intptr_t innerCalls = INNER_ITER_RANGE, - outerCalls = OUTER_ITER_RANGE + g_FedTasksCount, - maxExecuted = outerCalls * innerCalls, - minExecuted = 0; - ASSERT( g_TGCCancelled <= g_NumThreads, "Too many tasks survived exception"); - if ( g_SolitaryException ) { - minExecuted = maxExecuted - innerCalls; - ASSERT (g_NumExceptionsCaught == 1, "No exception registered"); - ASSERT (g_CurExecuted >= minExecuted, "Too few tasks executed"); - // This test has the same property as Test4 (parallel_for); the exception can be - // thrown, but some number of tasks from the outer Pdo can execute after the throw but - // before the cancellation is signaled (have seen 36). - ASSERT_WARNING(g_CurExecuted < maxExecuted || g_TGCCancelled, "All tasks survived exception. Oversubscription?"); - } - else { - minExecuted = g_NumExceptionsCaught; - ASSERT (g_NumExceptionsCaught > 1 && g_NumExceptionsCaught <= outerCalls, "Unexpected actual number of exceptions"); - ASSERT (g_CurExecuted >= minExecuted, "Too many executed tasks reported"); - ASSERT (g_CurExecuted < g_ExecutedAtLastCatch + g_NumThreads + outerCalls, "Too many tasks survived multiple exceptions"); - ASSERT (g_CurExecuted <= outerCalls * (1 + g_NumThreads), "Too many tasks survived exception"); - } -} // void Test4_parallel_do () - -// This body throws an exception only if the task was added by feeder -class ParDoBodyWithThrowingFeederTasks { -public: - //! This form of the function call operator can be used when the body needs to add more work during the processing - void operator() ( size_t &value, tbb::parallel_do_feeder<size_t> &feeder ) const { - ++g_CurExecuted; - if(g_Master == Harness::CurrentTid()) g_MasterExecuted = true; - else g_NonMasterExecuted = true; - if( tbb::task::self().is_cancelled() ) ++g_TGCCancelled; - Feed(feeder, 1); - if (value == 1) - ThrowTestException(1); - } -}; // class ParDoBodyWithThrowingFeederTasks - -// Test exception in task, which was added by feeder. -template <class Iterator> -void Test5_parallel_do () { - ResetGlobals(); - PREPARE_RANGE(Iterator, ITER_RANGE); - g_Master = Harness::CurrentTid(); - TRY(); - tbb::parallel_do<Iterator, ParDoBodyWithThrowingFeederTasks>(begin, end, ParDoBodyWithThrowingFeederTasks()); - CATCH(); - if (g_SolitaryException) { - // Failure occurs when g_ExceptionInMaster is false, but all the 1 values in the range - // are handled by the master thread. In this case no throw occurs. - ASSERT (l_ExceptionCaughtAtCurrentLevel // we saw an exception - || (!g_ExceptionInMaster && !g_NonMasterExecutedThrow) // non-master throws but none tried - || (g_ExceptionInMaster && !g_MasterExecutedThrow) // master throws but master didn't try - , "At least one exception should occur"); - if(!g_ExceptionCaught) { - if(g_ExceptionInMaster) - REMARK("PDo exception not thrown; non-masters handled all throwing values.\n"); - else - REMARK("PDo exception not thrown; master handled all throwing values.\n"); - } - } -} // void Test5_parallel_do () - -#endif /* TBB_USE_EXCEPTIONS */ - -class ParDoBodyToCancel { -public: - void operator()( size_t& /*value*/ ) const { - ++g_CurExecuted; - CancellatorTask::WaitUntilReady(); - } -}; - -class ParDoBodyToCancelWithFeeder : ParDoBodyToCancel { -public: - void operator()( size_t& value, tbb::parallel_do_feeder<size_t> &feeder ) const { - Feed(feeder, 0); - ParDoBodyToCancel::operator()(value); - } -}; - -template<class B, class Iterator> -class ParDoWorkerTask : public tbb::task { - tbb::task_group_context &my_ctx; - - tbb::task* execute () { - PREPARE_RANGE(Iterator, INNER_ITER_RANGE); - tbb::parallel_do<Iterator, B>( begin, end, B(), my_ctx ); - return NULL; - } -public: - ParDoWorkerTask ( tbb::task_group_context& ctx ) : my_ctx(ctx) {} -}; - -//! Test for cancelling an algorithm from outside (from a task running in parallel with the algorithm). -template <class Iterator, class body_to_cancel> -void TestCancelation1_parallel_do () { - ResetGlobals( false ); - intptr_t threshold = 10; - tbb::task_group_context ctx; - ctx.reset(); - tbb::empty_task &r = *new( tbb::task::allocate_root() ) tbb::empty_task; - r.set_ref_count(3); - r.spawn( *new( r.allocate_child() ) CancellatorTask(ctx, threshold) ); - __TBB_Yield(); - r.spawn( *new( r.allocate_child() ) ParDoWorkerTask<body_to_cancel, Iterator>(ctx) ); - TRY(); - r.wait_for_all(); - CATCH_AND_FAIL(); - ASSERT (g_CurExecuted < g_ExecutedAtLastCatch + g_NumThreads, "Too many tasks were executed after cancellation"); - r.destroy(r); -} - -class ParDoBodyToCancel2 { -public: - void operator()( size_t& /*value*/ ) const { - ++g_CurExecuted; - Harness::ConcurrencyTracker ct; - // The test will hang (and be timed out by the test system) if is_cancelled() is broken - while( !tbb::task::self().is_cancelled() ) - __TBB_Yield(); - } -}; - -class ParDoBodyToCancel2WithFeeder : ParDoBodyToCancel2 { -public: - void operator()( size_t& value, tbb::parallel_do_feeder<size_t> &feeder ) const { - Feed(feeder, 0); - ParDoBodyToCancel2::operator()(value); - } -}; - -//! Test for cancelling an algorithm from outside (from a task running in parallel with the algorithm). -/** This version also tests task::is_cancelled() method. **/ -template <class Iterator, class body_to_cancel> -void TestCancelation2_parallel_do () { - ResetGlobals(); - RunCancellationTest<ParDoWorkerTask<body_to_cancel, Iterator>, CancellatorTask2>(); -} - -#define RunWithSimpleBody(func, body) \ - func<Harness::RandomIterator<size_t>, body>(); \ - func<Harness::RandomIterator<size_t>, body##WithFeeder>(); \ - func<Harness::ForwardIterator<size_t>, body>(); \ - func<Harness::ForwardIterator<size_t>, body##WithFeeder>() - -#define RunWithTemplatedBody(func, body) \ - func<Harness::RandomIterator<size_t>, body<Harness::RandomIterator<size_t> > >(); \ - func<Harness::RandomIterator<size_t>, body##WithFeeder<Harness::RandomIterator<size_t> > >(); \ - func<Harness::ForwardIterator<size_t>, body<Harness::ForwardIterator<size_t> > >(); \ - func<Harness::ForwardIterator<size_t>, body##WithFeeder<Harness::ForwardIterator<size_t> > >() - -void RunParDoTests() { - REMARK( "parallel do tests\n" ); - tbb::task_scheduler_init init (g_NumThreads); - g_Master = Harness::CurrentTid(); -#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN - RunWithSimpleBody(Test1_parallel_do, SimpleParDoBody); - RunWithTemplatedBody(Test2_parallel_do, OuterParDoBody); - RunWithTemplatedBody(Test3_parallel_do, OuterParDoBodyWithIsolatedCtx); - RunWithTemplatedBody(Test4_parallel_do, OuterParDoWithEhBody); - Test5_parallel_do<Harness::ForwardIterator<size_t> >(); - Test5_parallel_do<Harness::RandomIterator<size_t> >(); -#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN */ - RunWithSimpleBody(TestCancelation1_parallel_do, ParDoBodyToCancel); - RunWithSimpleBody(TestCancelation2_parallel_do, ParDoBodyToCancel2); -} - -//////////////////////////////////////////////////////////////////////////////// -// Tests for tbb::pipeline - -#define NUM_ITEMS 100 - -const size_t c_DataEndTag = size_t(~0); - -int g_NumTokens = 0; - -// Simple input filter class, it assigns 1 to all array members -// It stops when it receives item equal to -1 -class InputFilter: public tbb::filter { - tbb::atomic<size_t> m_Item; - size_t m_Buffer[NUM_ITEMS + 1]; -public: - InputFilter() : tbb::filter(parallel) { - m_Item = 0; - for (size_t i = 0; i < NUM_ITEMS; ++i ) - m_Buffer[i] = 1; - m_Buffer[NUM_ITEMS] = c_DataEndTag; - } - - void* operator()( void* ) { - size_t item = m_Item.fetch_and_increment(); - if(g_Master == Harness::CurrentTid()) g_MasterExecuted = true; - else g_NonMasterExecuted = true; - if( tbb::task::self().is_cancelled() ) ++g_TGCCancelled; - if(item == 1) { - ++g_PipelinesStarted; // count on emitting the first item. - } - if ( item >= NUM_ITEMS ) - return NULL; - m_Buffer[item] = 1; - return &m_Buffer[item]; - } - - size_t* buffer() { return m_Buffer; } -}; // class InputFilter - -// Pipeline filter, without exceptions throwing -class NoThrowFilter : public tbb::filter { - size_t m_Value; -public: - enum operation { - addition, - subtraction, - multiplication - } m_Operation; - - NoThrowFilter(operation _operation, size_t value, bool is_parallel) - : filter(is_parallel? tbb::filter::parallel : tbb::filter::serial_in_order), - m_Value(value), m_Operation(_operation) - {} - void* operator()(void* item) { - size_t &value = *(size_t*)item; - if(g_Master == Harness::CurrentTid()) g_MasterExecuted = true; - else g_NonMasterExecuted = true; - if( tbb::task::self().is_cancelled() ) ++g_TGCCancelled; - ASSERT(value != c_DataEndTag, "terminator element is being processed"); - switch (m_Operation){ - case addition: - value += m_Value; - break; - case subtraction: - value -= m_Value; - break; - case multiplication: - value *= m_Value; - break; - default: - ASSERT(0, "Wrong operation parameter passed to NoThrowFilter"); - } // switch (m_Operation) - return item; - } -}; - -// Test pipeline without exceptions throwing -void Test0_pipeline () { - ResetGlobals(); - // Run test when serial filter is the first non-input filter - InputFilter inputFilter; //Emits NUM_ITEMS items - NoThrowFilter filter1(NoThrowFilter::addition, 99, false); - NoThrowFilter filter2(NoThrowFilter::subtraction, 90, true); - NoThrowFilter filter3(NoThrowFilter::multiplication, 5, false); - // Result should be 50 for all items except the last - tbb::pipeline p; - p.add_filter(inputFilter); - p.add_filter(filter1); - p.add_filter(filter2); - p.add_filter(filter3); - p.run(8); - for (size_t i = 0; i < NUM_ITEMS; ++i) - ASSERT(inputFilter.buffer()[i] == 50, "pipeline didn't process items properly"); -} // void Test0_pipeline () - -#if TBB_USE_EXCEPTIONS - -// Simple filter with exception throwing. If parallel, will wait until -// as many parallel filters start as there are threads. -class SimpleFilter : public tbb::filter { - bool m_canThrow; -public: - SimpleFilter (tbb::filter::mode _mode, bool canThrow ) : filter (_mode), m_canThrow(canThrow) {} - void* operator()(void* item) { - ++g_CurExecuted; - if(g_Master == Harness::CurrentTid()) g_MasterExecuted = true; - else g_NonMasterExecuted = true; - if( tbb::task::self().is_cancelled() ) ++g_TGCCancelled; - if ( m_canThrow ) { - if ( !is_serial() ) { - Harness::ConcurrencyTracker ct; - WaitUntilConcurrencyPeaks( min(g_NumTokens, g_NumThreads) ); - } - ThrowTestException(1); - } - return item; - } -}; // class SimpleFilter - -// This enumeration represents filters order in pipeline -struct FilterSet { - tbb::filter::mode mode1, - mode2; - bool throw1, - throw2; - - FilterSet( tbb::filter::mode m1, tbb::filter::mode m2, bool t1, bool t2 ) - : mode1(m1), mode2(m2), throw1(t1), throw2(t2) - {} -}; // struct FilterSet - -FilterSet serial_parallel( tbb::filter::serial, tbb::filter::parallel, /*throw1*/false, /*throw2*/true ); - -template<typename InFilter, typename Filter> -class CustomPipeline : protected tbb::pipeline { - InFilter inputFilter; - Filter filter1; - Filter filter2; -public: - CustomPipeline( const FilterSet& filters ) - : filter1(filters.mode1, filters.throw1), filter2(filters.mode2, filters.throw2) - { - add_filter(inputFilter); - add_filter(filter1); - add_filter(filter2); - } - void run () { tbb::pipeline::run(g_NumTokens); } - void run ( tbb::task_group_context& ctx ) { tbb::pipeline::run(g_NumTokens, ctx); } - - using tbb::pipeline::add_filter; -}; - -typedef CustomPipeline<InputFilter, SimpleFilter> SimplePipeline; - -// Tests exceptions without nesting -void Test1_pipeline ( const FilterSet& filters ) { - ResetGlobals(); - SimplePipeline testPipeline(filters); - TRY(); - testPipeline.run(); - if ( g_CurExecuted == 2 * NUM_ITEMS ) { - // all the items were processed, though an exception was supposed to occur. - if(!g_ExceptionInMaster && g_NonMasterExecutedThrow > 0) { - // if !g_ExceptionInMaster, the master thread is not allowed to throw. - // if g_nonMasterExcutedThrow > 0 then a thread besides the master tried to throw. - ASSERT(filters.mode1 != tbb::filter::parallel && filters.mode2 != tbb::filter::parallel, "Unusual count"); - } - else { - REMARK("test1_Pipeline with %d threads: Only the master thread tried to throw, and it is not allowed to.\n", (int)g_NumThreads); - } - // In case of all serial filters they might be all executed in the thread(s) - // where exceptions are not allowed by the common test logic. So we just quit. - return; - } - CATCH_AND_ASSERT(); - ASSERT( g_TGCCancelled <= g_NumThreads, "Too many tasks survived exception"); - ASSERT (g_CurExecuted <= g_ExecutedAtLastCatch + g_NumThreads, "Too many tasks survived exception"); - ASSERT (g_NumExceptionsCaught == 1, "No try_blocks in any body expected in this test"); - if ( !g_SolitaryException ) - ASSERT (g_CurExecuted <= g_ExecutedAtLastCatch + g_NumThreads, "Too many tasks survived exception"); - -} // void Test1_pipeline () - -// Filter with nesting -class OuterFilter : public tbb::filter { -public: - OuterFilter (tbb::filter::mode _mode, bool ) : filter (_mode) {} - - void* operator()(void* item) { - ++g_OuterParCalls; - SimplePipeline testPipeline(serial_parallel); - testPipeline.run(); - return item; - } -}; // class OuterFilter - -//! Uses pipeline containing an inner pipeline with the default context not wrapped by a try-block. -/** Inner algorithms are spawned inside the new bound context by default. Since - exceptions thrown from the inner pipeline are not handled by the caller - (outer pipeline body) in this test, they will cancel all the sibling inner - algorithms. **/ -void Test2_pipeline ( const FilterSet& filters ) { - ResetGlobals(); - g_NestedPipelines = true; - CustomPipeline<InputFilter, OuterFilter> testPipeline(filters); - TRY(); - testPipeline.run(); - CATCH_AND_ASSERT(); - bool okayNoExceptionCaught = (g_ExceptionInMaster && !g_MasterExecutedThrow) || (!g_ExceptionInMaster && !g_NonMasterExecutedThrow); - ASSERT (g_NumExceptionsCaught == 1 || okayNoExceptionCaught, "No try_blocks in any body expected in this test"); - if ( g_SolitaryException ) { - if( g_TGCCancelled > g_NumThreads) REMARK( "Extra tasks ran after exception thrown (%d vs. %d)\n", - (int)g_TGCCancelled, (int)g_NumThreads); - } - else { - ASSERT (g_CurExecuted <= g_ExecutedAtLastCatch + g_NumThreads, "Too many tasks survived exception"); - } -} // void Test2_pipeline () - -//! creates isolated inner pipeline and runs it. -class OuterFilterWithIsolatedCtx : public tbb::filter { -public: - OuterFilterWithIsolatedCtx(tbb::filter::mode m, bool ) : filter(m) {} - - void* operator()(void* item) { - ++g_OuterParCalls; - tbb::task_group_context ctx(tbb::task_group_context::isolated); - // create inner pipeline with serial input, parallel output filter, second filter throws - SimplePipeline testPipeline(serial_parallel); - testPipeline.run(ctx); - return item; - } -}; // class OuterFilterWithIsolatedCtx - -//! Uses pipeline invoking an inner pipeline with an isolated context without a try-block. -/** Even though exceptions thrown from the inner pipeline are not handled - by the caller in this test, they will not affect sibling inner algorithms - already running because of the isolated contexts. However because the first - exception cancels the root parallel_do only the first g_NumThreads subranges - will be processed (which launch inner pipelines) **/ -void Test3_pipeline ( const FilterSet& filters ) { - for( int nTries = 1; nTries <= 4; ++nTries) { - ResetGlobals(); - g_NestedPipelines = true; - g_Master = Harness::CurrentTid(); - intptr_t innerCalls = NUM_ITEMS, - minExecuted = (g_NumThreads - 1) * innerCalls; - CustomPipeline<InputFilter, OuterFilterWithIsolatedCtx> testPipeline(filters); - TRY(); - testPipeline.run(); - CATCH_AND_ASSERT(); - - bool okayNoExceptionCaught = (g_ExceptionInMaster && !g_MasterExecuted) || - (!g_ExceptionInMaster && !g_NonMasterExecuted); - // only test assertions if the test threw an exception (or we don't care) - bool testSucceeded = okayNoExceptionCaught || g_NumExceptionsCaught > 0; - if(testSucceeded) { - if (g_SolitaryException) { - - // The test is one outer pipeline with two NestedFilters that each start an inner pipeline. - // Each time the input filter of a pipeline delivers its first item, it increments - // g_PipelinesStarted. When g_SolitaryException, the throw will not occur until - // g_PipelinesStarted >= 3. (This is so at least a second pipeline in its own isolated - // context will start; that is what we're testing.) - // - // There are two pipelines which will NOT run to completion when a solitary throw - // happens in an isolated inner context: the outer pipeline and the pipeline which - // throws. All the other pipelines which start should run to completion. But only - // inner body invocations are counted. - // - // So g_CurExecuted should be about - // - // (2*NUM_ITEMS) * (g_PipelinesStarted - 2) + 1 - // ^ executions for each completed pipeline - // ^ completing pipelines (remembering two will not complete) - // ^ one for the inner throwing pipeline - - minExecuted = (2*NUM_ITEMS) * (g_PipelinesStarted - 2) + 1; - // each failing pipeline must execute at least two tasks - ASSERT(g_CurExecuted >= minExecuted, "Too few tasks survived exception"); - // no more than g_NumThreads tasks will be executed in a cancelled context. Otherwise - // tasks not executing at throw were scheduled. - ASSERT( g_TGCCancelled <= g_NumThreads, "Tasks not in-flight were executed"); - ASSERT(g_NumExceptionsCaught == 1, "Should have only one exception"); - // if we're only throwing from the master thread, and that thread didn't - // participate in the pipelines, then no throw occurred. - if(g_ExceptionInMaster && !g_MasterExecuted) { - REMARK_ONCE("Master expected to throw, but didn't participate.\n"); - } - else if(!g_ExceptionInMaster && !g_NonMasterExecuted) { - REMARK_ONCE("Non-master expected to throw, but didn't participate.\n"); - } - } - ASSERT (g_NumExceptionsCaught == 1 || okayNoExceptionCaught, "No try_blocks in any body expected in this test"); - ASSERT ((g_CurExecuted <= g_ExecutedAtLastCatch + g_NumThreads) || okayNoExceptionCaught, "Too many tasks survived exception"); - if(nTries > 1) REMARK("Test3_pipeline succeeeded on try %d\n", nTries); - return; - } - } - REMARK_ONCE("Test3_pipeline failed for g_NumThreads==%d, g_ExceptionInMaster==%s , g_SolitaryException==%s\n", - g_NumThreads, g_ExceptionInMaster?"T":"F", g_SolitaryException?"T":"F"); -} // void Test3_pipeline () - -class OuterFilterWithEhBody : public tbb::filter { -public: - OuterFilterWithEhBody(tbb::filter::mode m, bool ) : filter(m) {} - - void* operator()(void* item) { - tbb::task_group_context ctx(tbb::task_group_context::isolated); - ++g_OuterParCalls; - SimplePipeline testPipeline(serial_parallel); - TRY(); - testPipeline.run(ctx); - CATCH(); - return item; - } -}; // class OuterFilterWithEhBody - -//! Uses pipeline body invoking an inner pipeline (with isolated context) inside a try-block. -/** Since exception(s) thrown from the inner pipeline are handled by the caller - in this test, they do not affect other tasks of the the root pipeline - nor sibling inner algorithms. **/ -void Test4_pipeline ( const FilterSet& filters ) { -#if __GNUC__ && !__INTEL_COMPILER - if ( strncmp(__VERSION__, "4.1.0", 5) == 0 ) { - REMARK_ONCE("Known issue: one of exception handling tests is skipped.\n"); - return; - } -#endif - ResetGlobals( true, true ); - // each outer pipeline stage will start NUM_ITEMS inner pipelines. - // each inner pipeline that doesn't throw will process NUM_ITEMS items. - // for solitary exception there will be one pipeline that only processes one stage, one item. - // innerCalls should be 2*NUM_ITEMS - intptr_t innerCalls = 2*NUM_ITEMS, - outerCalls = 2 * NUM_ITEMS, - maxExecuted = outerCalls * innerCalls; // the number of invocations of the inner pipelines - CustomPipeline<InputFilter, OuterFilterWithEhBody> testPipeline(filters); - TRY(); - testPipeline.run(); - CATCH_AND_ASSERT(); - intptr_t minExecuted = 0; - bool okayNoExceptionCaught = (g_ExceptionInMaster && !g_MasterExecuted) || - (!g_ExceptionInMaster && !g_NonMasterExecuted); - if ( g_SolitaryException ) { - minExecuted = maxExecuted - innerCalls; // one throwing inner pipeline - ASSERT (g_NumExceptionsCaught == 1 || okayNoExceptionCaught, "No exception registered"); - ASSERT( g_TGCCancelled <= g_NumThreads, "Too many tasks survived exception"); // probably will assert. - } - else { - // we assume throwing pipelines will not count - minExecuted = (outerCalls - g_NumExceptionsCaught) * innerCalls; - ASSERT((g_NumExceptionsCaught >= 1 && g_NumExceptionsCaught <= outerCalls)||okayNoExceptionCaught, "Unexpected actual number of exceptions"); - ASSERT (g_CurExecuted >= minExecuted, "Too many executed tasks reported"); - // too many already-scheduled tasks are started after the first exception is - // thrown. And g_ExecutedAtLastCatch is updated every time an exception is caught. - // So with multiple exceptions there are a variable number of tasks that have been - // discarded because of the signals. - // each throw is caught, so we will see many cancelled tasks. g_ExecutedAtLastCatch is - // updated with each throw, so the value will be the number of tasks executed at the last - ASSERT (g_CurExecuted <= g_ExecutedAtLastCatch + g_NumThreads, "Too many tasks survived multiple exceptions"); - } -} // void Test4_pipeline () - -//! Testing filter::finalize method -#define BUFFER_SIZE 32 -#define NUM_BUFFERS 1024 - -tbb::atomic<size_t> g_AllocatedCount; // Number of currently allocated buffers -tbb::atomic<size_t> g_TotalCount; // Total number of allocated buffers - -//! Base class for all filters involved in finalize method testing -class FinalizationBaseFilter : public tbb::filter { -public: - FinalizationBaseFilter ( tbb::filter::mode m ) : filter(m) {} - - // Deletes buffers if exception occurred - virtual void finalize( void* item ) { - size_t* m_Item = (size_t*)item; - delete[] m_Item; - --g_AllocatedCount; - } -}; - -//! Input filter to test finalize method -class InputFilterWithFinalization: public FinalizationBaseFilter { -public: - InputFilterWithFinalization() : FinalizationBaseFilter(tbb::filter::serial) { - g_TotalCount = 0; - } - void* operator()( void* ){ - if (g_TotalCount == NUM_BUFFERS) - return NULL; - size_t* item = new size_t[BUFFER_SIZE]; - for (int i = 0; i < BUFFER_SIZE; i++) - item[i] = 1; - ++g_TotalCount; - ++g_AllocatedCount; - return item; - } -}; - -// The filter multiplies each buffer item by 10. -class ProcessingFilterWithFinalization : public FinalizationBaseFilter { -public: - ProcessingFilterWithFinalization (tbb::filter::mode _mode, bool) : FinalizationBaseFilter (_mode) {} - - void* operator()( void* item) { - if(g_Master == Harness::CurrentTid()) g_MasterExecuted = true; - else g_NonMasterExecuted = true; - if( tbb::task::self().is_cancelled()) ++g_TGCCancelled; - if (g_TotalCount > NUM_BUFFERS / 2) - ThrowTestException(1); - size_t* m_Item = (size_t*)item; - for (int i = 0; i < BUFFER_SIZE; i++) - m_Item[i] *= 10; - return item; - } -}; - -// Output filter deletes previously allocated buffer -class OutputFilterWithFinalization : public FinalizationBaseFilter { -public: - OutputFilterWithFinalization (tbb::filter::mode m) : FinalizationBaseFilter (m) {} - - void* operator()( void* item){ - size_t* m_Item = (size_t*)item; - delete[] m_Item; - --g_AllocatedCount; - return NULL; - } -}; - -//! Tests filter::finalize method -void Test5_pipeline ( const FilterSet& filters ) { - ResetGlobals(); - g_AllocatedCount = 0; - CustomPipeline<InputFilterWithFinalization, ProcessingFilterWithFinalization> testPipeline(filters); - OutputFilterWithFinalization my_output_filter(tbb::filter::parallel); - - testPipeline.add_filter(my_output_filter); - TRY(); - testPipeline.run(); - CATCH(); - ASSERT (g_AllocatedCount == 0, "Memory leak: Some my_object weren't destroyed"); -} // void Test5_pipeline () - -//! Tests pipeline function passed with different combination of filters -template<void testFunc(const FilterSet&)> -void TestWithDifferentFilters() { - const int NumFilterTypes = 3; - const tbb::filter::mode modes[NumFilterTypes] = { - tbb::filter::parallel, - tbb::filter::serial, - tbb::filter::serial_out_of_order - }; - for ( int i = 0; i < NumFilterTypes; ++i ) { - for ( int j = 0; j < NumFilterTypes; ++j ) { - for ( int k = 0; k < 2; ++k ) - testFunc( FilterSet(modes[i], modes[j], k == 0, k != 0) ); - } - } -} - -#endif /* TBB_USE_EXCEPTIONS */ - -class FilterToCancel : public tbb::filter { -public: - FilterToCancel(bool is_parallel) - : filter( is_parallel ? tbb::filter::parallel : tbb::filter::serial_in_order ) - {} - void* operator()(void* item) { - ++g_CurExecuted; - CancellatorTask::WaitUntilReady(); - return item; - } -}; // class FilterToCancel - -template <class Filter_to_cancel> -class PipelineLauncherTask : public tbb::task { - tbb::task_group_context &my_ctx; -public: - PipelineLauncherTask ( tbb::task_group_context& ctx ) : my_ctx(ctx) {} - - tbb::task* execute () { - // Run test when serial filter is the first non-input filter - InputFilter inputFilter; - Filter_to_cancel filterToCancel(true); - tbb::pipeline p; - p.add_filter(inputFilter); - p.add_filter(filterToCancel); - p.run(g_NumTokens, my_ctx); - return NULL; - } -}; - -//! Test for cancelling an algorithm from outside (from a task running in parallel with the algorithm). -void TestCancelation1_pipeline () { - ResetGlobals(); - g_ThrowException = false; - intptr_t threshold = 10; - tbb::task_group_context ctx; - ctx.reset(); - tbb::empty_task &r = *new( tbb::task::allocate_root() ) tbb::empty_task; - r.set_ref_count(3); - r.spawn( *new( r.allocate_child() ) CancellatorTask(ctx, threshold) ); - __TBB_Yield(); - r.spawn( *new( r.allocate_child() ) PipelineLauncherTask<FilterToCancel>(ctx) ); - TRY(); - r.wait_for_all(); - CATCH_AND_FAIL(); - r.destroy(r); - ASSERT( g_TGCCancelled <= g_NumThreads, "Too many tasks survived cancellation"); - ASSERT (g_CurExecuted < g_ExecutedAtLastCatch + g_NumThreads, "Too many tasks were executed after cancellation"); -} - -class FilterToCancel2 : public tbb::filter { -public: - FilterToCancel2(bool is_parallel) - : filter ( is_parallel ? tbb::filter::parallel : tbb::filter::serial_in_order) - {} - - void* operator()(void* item) { - ++g_CurExecuted; - Harness::ConcurrencyTracker ct; - // The test will hang (and be timed out by the test system) if is_cancelled() is broken - while( !tbb::task::self().is_cancelled() ) - __TBB_Yield(); - return item; - } -}; - -//! Test for cancelling an algorithm from outside (from a task running in parallel with the algorithm). -/** This version also tests task::is_cancelled() method. **/ -void TestCancelation2_pipeline () { - ResetGlobals(); - RunCancellationTest<PipelineLauncherTask<FilterToCancel2>, CancellatorTask2>(); - // g_CurExecuted is always >= g_ExecutedAtLastCatch, because the latter is always a snapshot of the - // former, and g_CurExecuted is monotonic increasing. so the comparison should be at least ==. - // If another filter is started after cancel but before cancellation is propagated, then the - // number will be larger. - ASSERT (g_CurExecuted <= g_ExecutedAtLastCatch, "Some tasks were executed after cancellation"); -} - -void RunPipelineTests() { - REMARK( "pipeline tests\n" ); - tbb::task_scheduler_init init (g_NumThreads); - g_Master = Harness::CurrentTid(); - g_NumTokens = 2 * g_NumThreads; - - Test0_pipeline(); -#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN - TestWithDifferentFilters<Test1_pipeline>(); - TestWithDifferentFilters<Test2_pipeline>(); - TestWithDifferentFilters<Test3_pipeline>(); - TestWithDifferentFilters<Test4_pipeline>(); - TestWithDifferentFilters<Test5_pipeline>(); -#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN */ - TestCancelation1_pipeline(); - TestCancelation2_pipeline(); -} - - -#if TBB_USE_EXCEPTIONS - -class MyCapturedException : public tbb::captured_exception { -public: - static int m_refCount; - - MyCapturedException () : tbb::captured_exception("MyCapturedException", "test") { ++m_refCount; } - ~MyCapturedException () throw() { --m_refCount; } - - MyCapturedException* move () throw() { - MyCapturedException* movee = (MyCapturedException*)malloc(sizeof(MyCapturedException)); - return ::new (movee) MyCapturedException; - } - void destroy () throw() { - this->~MyCapturedException(); - free(this); - } - void operator delete ( void* p ) { free(p); } -}; - -int MyCapturedException::m_refCount = 0; - -void DeleteTbbException ( volatile tbb::tbb_exception* pe ) { - delete pe; -} - -void TestTbbExceptionAPI () { - const char *name = "Test captured exception", - *reason = "Unit testing"; - tbb::captured_exception e(name, reason); - ASSERT (strcmp(e.name(), name) == 0, "Setting captured exception name failed"); - ASSERT (strcmp(e.what(), reason) == 0, "Setting captured exception reason failed"); - tbb::captured_exception c(e); - ASSERT (strcmp(c.name(), e.name()) == 0, "Copying captured exception name failed"); - ASSERT (strcmp(c.what(), e.what()) == 0, "Copying captured exception reason failed"); - tbb::captured_exception *m = e.move(); - ASSERT (strcmp(m->name(), name) == 0, "Moving captured exception name failed"); - ASSERT (strcmp(m->what(), reason) == 0, "Moving captured exception reason failed"); - ASSERT (!e.name() && !e.what(), "Moving semantics broken"); - m->destroy(); - - MyCapturedException mce; - MyCapturedException *mmce = mce.move(); - ASSERT( MyCapturedException::m_refCount == 2, NULL ); - DeleteTbbException(mmce); - ASSERT( MyCapturedException::m_refCount == 1, NULL ); -} - -#endif /* TBB_USE_EXCEPTIONS */ - -/** If min and max thread numbers specified on the command line are different, - the test is run only for 2 sizes of the thread pool (MinThread and MaxThread) - to be able to test the high and low contention modes while keeping the test reasonably fast **/ -int TestMain () { - if(tbb::task_scheduler_init::default_num_threads() == 1) { - REPORT("Known issue: tests require multiple hardware threads\n"); - return Harness::Skipped; - } - REMARK ("Using %s\n", TBB_USE_CAPTURED_EXCEPTION ? "tbb:captured_exception" : "exact exception propagation"); - MinThread = min(tbb::task_scheduler_init::default_num_threads(), max(2, MinThread)); - MaxThread = max(MinThread, min(tbb::task_scheduler_init::default_num_threads(), MaxThread)); - ASSERT (FLAT_RANGE >= FLAT_GRAIN * MaxThread, "Fix defines"); - int step = max((MaxThread - MinThread + 1)/2, 1); - for ( g_NumThreads = MinThread; g_NumThreads <= MaxThread; g_NumThreads += step ) { - REMARK ("Number of threads %d\n", g_NumThreads); - // Execute in all the possible modes - for ( size_t j = 0; j < 4; ++j ) { - g_ExceptionInMaster = (j & 1) != 0; - g_SolitaryException = (j & 2) != 0; - REMARK("g_ExceptionInMaster==%s, g_SolitaryException==%s\n", g_ExceptionInMaster?"T":"F", g_SolitaryException?"T":"F"); - RunParForAndReduceTests(); - RunParDoTests(); - RunPipelineTests(); - } - } -#if TBB_USE_EXCEPTIONS - TestTbbExceptionAPI(); -#endif -#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN - REPORT("Known issue: exception handling tests are skipped.\n"); -#endif - return Harness::Done; -} - -#else /* !__TBB_TASK_GROUP_CONTEXT */ - -int TestMain () { - return Harness::Skipped; -} - -#endif /* !__TBB_TASK_GROUP_CONTEXT */ diff --git a/src/tbb/src/test/test_eh_flow_graph.cpp b/src/tbb/src/test/test_eh_flow_graph.cpp deleted file mode 100644 index fc411bb2f..000000000 --- a/src/tbb/src/test/test_eh_flow_graph.cpp +++ /dev/null @@ -1,2035 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness_defs.h" - -#if _MSC_VER - #pragma warning (disable: 4503) // Suppress "decorated name length exceeded, name was truncated" warning - #if !TBB_USE_EXCEPTIONS - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (disable: 4530) - #endif -#endif - -#if __TBB_MSVC_UNREACHABLE_CODE_IGNORED - // Suppress "unreachable code" warning by VC++ 17.0-18.0 (VS 2012 or newer) - #pragma warning (disable: 4702) -#endif - -#include "harness.h" - -// global task_scheduler_observer is an imperfect tool to find how many threads are really -// participating. That was the hope, but it counts the entries into the marketplace, -// not the arena. -// #define USE_TASK_SCHEDULER_OBSERVER 1 - -#if _MSC_VER && defined(__INTEL_COMPILER) && !TBB_USE_DEBUG - #define TBB_RUN_BUFFERING_TEST __INTEL_COMPILER > 1210 -#else - #define TBB_RUN_BUFFERING_TEST 1 -#endif - -#if TBB_USE_EXCEPTIONS -#if USE_TASK_SCHEDULER_OBSERVER -#include "tbb/task_scheduler_observer.h" -#endif -#include "tbb/flow_graph.h" -#include "tbb/task_scheduler_init.h" -#include <iostream> -#include <vector> -#include "harness_assert.h" -#include "harness_checktype.h" - -inline intptr_t Existed() { return INT_MAX; } // resolve Existed in harness_eh.h - -#include "harness_eh.h" -#include <stdexcept> - -#define NUM_ITEMS 15 -int g_NumItems; - -tbb::atomic<unsigned> nExceptions; -tbb::atomic<intptr_t> g_TGCCancelled; - -enum TestNodeTypeEnum { nonThrowing, isThrowing }; - -static const size_t unlimited_type = 0; -static const size_t serial_type = 1; -static const size_t limited_type = 4; - -template<TestNodeTypeEnum T> struct TestNodeTypeName; -template<> struct TestNodeTypeName<nonThrowing> { static const char *name() { return "nonThrowing"; } }; -template<> struct TestNodeTypeName<isThrowing> { static const char *name() { return "isThrowing"; } }; - -template<size_t Conc> struct concurrencyName; -template<> struct concurrencyName<serial_type>{ static const char *name() { return "serial"; } }; -template<> struct concurrencyName<unlimited_type>{ static const char *name() { return "unlimited"; } }; -template<> struct concurrencyName<limited_type>{ static const char *name() { return "limited"; } }; - -// Class that provides waiting and throwing behavior. If we are not throwing, do nothing -// If serial, we can't wait for concurrency to peak; we may be the bottleneck and will -// stop further processing. We will execute g_NumThreads + 10 times (the "10" is somewhat -// arbitrary, and just makes sure there are enough items in the graph to keep it flowing), -// If parallel or serial and throwing, use Harness::ConcurrencyTracker to wait. - -template<size_t Conc, TestNodeTypeEnum t = nonThrowing> -class WaitThrow; - -template<> -class WaitThrow<serial_type,nonThrowing> { -protected: - void WaitAndThrow(int cnt, const char * /*name*/) { - if(cnt > g_NumThreads + 10) { - Harness::ConcurrencyTracker ct; - WaitUntilConcurrencyPeaks(); - } - } -}; - -template<> -class WaitThrow<serial_type,isThrowing> { -protected: - void WaitAndThrow(int cnt, const char * /*name*/) { - if(cnt > g_NumThreads + 10) { - Harness::ConcurrencyTracker ct; - WaitUntilConcurrencyPeaks(); - ThrowTestException(1); - } - } -}; - -// for nodes with limited concurrency, if that concurrency is < g_NumThreads, we need -// to make sure enough other nodes wait for concurrency to peak. If we are attached to -// N successors, for each item we pass to a successor, we will get N executions of the -// "absorbers" (because we broadcast to successors.) for an odd number of threads we -// need (g_NumThreads - limited + 1) / 2 items (that will give us one extra execution -// of an "absorber", but we can't change that without changing the behavior of the node.) -template<> -class WaitThrow<limited_type,nonThrowing> { -protected: - void WaitAndThrow(int cnt, const char * /*name*/) { - if(cnt <= (g_NumThreads - (int)limited_type + 1)/2) { - return; - } - Harness::ConcurrencyTracker ct; - WaitUntilConcurrencyPeaks(); - } -}; - -template<> -class WaitThrow<limited_type,isThrowing> { -protected: - void WaitAndThrow(int cnt, const char * /*name*/) { - Harness::ConcurrencyTracker ct; - if(cnt <= (g_NumThreads - (int)limited_type + 1)/2) { - return; - } - WaitUntilConcurrencyPeaks(); - ThrowTestException(1); - } -}; - -template<> -class WaitThrow<unlimited_type,nonThrowing> { -protected: - void WaitAndThrow(int /*cnt*/, const char * /*name*/) { - Harness::ConcurrencyTracker ct; - WaitUntilConcurrencyPeaks(); - } -}; - -template<> -class WaitThrow<unlimited_type,isThrowing> { -protected: - void WaitAndThrow(int /*cnt*/, const char * /*name*/) { - Harness::ConcurrencyTracker ct; - WaitUntilConcurrencyPeaks(); - ThrowTestException(1); - } -}; - -void -ResetGlobals(bool throwException = true, bool flog = false) { - nExceptions = 0; - g_TGCCancelled = 0; - ResetEhGlobals(throwException, flog); -} - -// -------source_node body ------------------ -template <class OutputType, TestNodeTypeEnum TType> -class test_source_body : WaitThrow<serial_type, TType> { - using WaitThrow<serial_type, TType>::WaitAndThrow; - tbb::atomic<int> *my_current_val; - int my_mult; -public: - test_source_body(tbb::atomic<int> &my_cnt, int multiplier = 1) : my_current_val(&my_cnt), my_mult(multiplier) { } - - bool operator()(OutputType & out) { - UPDATE_COUNTS(); - out = OutputType(my_mult * ++(*my_current_val)); - if(*my_current_val > g_NumItems) { - *my_current_val = g_NumItems; - return false; - } - WaitAndThrow((int)out,"test_source_body"); - return true; - } - - int count_value() { return (int)*my_current_val; } -}; - -template <TestNodeTypeEnum TType> -class test_source_body<tbb::flow::continue_msg, TType> : WaitThrow<serial_type, TType> { - using WaitThrow<serial_type, TType>::WaitAndThrow; - tbb::atomic<int> *my_current_val; -public: - test_source_body(tbb::atomic<int> &my_cnt) : my_current_val(&my_cnt) { } - - bool operator()(tbb::flow::continue_msg & out) { - UPDATE_COUNTS(); - int outint = ++(*my_current_val); - out = tbb::flow::continue_msg(); - if(*my_current_val > g_NumItems) { - *my_current_val = g_NumItems; - return false; - } - WaitAndThrow(outint,"test_source_body"); - return true; - } - - int count_value() { return (int)*my_current_val; } -}; - -// -------{function/continue}_node body ------------------ -template<class InputType, class OutputType, TestNodeTypeEnum T, size_t Conc> -class absorber_body : WaitThrow<Conc,T> { - using WaitThrow<Conc,T>::WaitAndThrow; - tbb::atomic<int> *my_count; -public: - absorber_body(tbb::atomic<int> &my_cnt) : my_count(&my_cnt) { } - OutputType operator()(const InputType &/*p_in*/) { - UPDATE_COUNTS(); - int out = ++(*my_count); - WaitAndThrow(out,"absorber_body"); - return OutputType(); - } - int count_value() { return *my_count; } -}; - -// -------multifunction_node body ------------------ - -// helper classes -template<int N,class PortsType> -struct IssueOutput { - typedef typename tbb::flow::tuple_element<N-1,PortsType>::type::output_type my_type; - - static void issue_tuple_element( PortsType &my_ports) { - ASSERT(tbb::flow::get<N-1>(my_ports).try_put(my_type()), "Error putting to successor"); - IssueOutput<N-1,PortsType>::issue_tuple_element(my_ports); - } -}; - -template<class PortsType> -struct IssueOutput<1,PortsType> { - typedef typename tbb::flow::tuple_element<0,PortsType>::type::output_type my_type; - - static void issue_tuple_element( PortsType &my_ports) { - ASSERT(tbb::flow::get<0>(my_ports).try_put(my_type()), "Error putting to successor"); - } -}; - -template<class InputType, class OutputTupleType, TestNodeTypeEnum T, size_t Conc> -class multifunction_node_body : WaitThrow<Conc,T> { - using WaitThrow<Conc,T>::WaitAndThrow; - static const int N = tbb::flow::tuple_size<OutputTupleType>::value; - typedef typename tbb::flow::multifunction_node<InputType,OutputTupleType> NodeType; - typedef typename NodeType::output_ports_type PortsType; - tbb::atomic<int> *my_count; -public: - multifunction_node_body(tbb::atomic<int> &my_cnt) : my_count(&my_cnt) { } - void operator()(const InputType& /*in*/, PortsType &my_ports) { - UPDATE_COUNTS(); - int out = ++(*my_count); - WaitAndThrow(out,"multifunction_node_body"); - // issue an item to each output port. - IssueOutput<N,PortsType>::issue_tuple_element(my_ports); - } - - int count_value() { return *my_count; } -}; - -// --------- body to sort items in sequencer_node -template<class BufferItemType> -struct sequencer_body { - size_t operator()(const BufferItemType &s) { - return size_t(s) - 1; - } -}; - -// --------- body to compare the "priorities" of objects for priority_queue_node five priority levels 0-4. -template<class T> -struct myLess { - bool operator()(const T &t1, const T &t2) { - return (int(t1) % 5) < (int(t2) % 5); - } -}; - -// --------- type for < comparison in priority_queue_node. -template<class ItemType> -struct less_body : public std::binary_function<ItemType,ItemType,bool> { - bool operator()(const ItemType &lhs, const ItemType &rhs) { - return ((int)lhs % 3) < ((int)rhs % 3); - } -}; - -// --------- tag methods for tag_matching join_node -template<typename TT> -class tag_func { - TT my_mult; -public: - tag_func(TT multiplier) : my_mult(multiplier) { } - void operator=( const tag_func& other){my_mult = other.my_mult;} - // operator() will return [0 .. Count) - tbb::flow::tag_value operator()( TT v) { - tbb::flow::tag_value t = tbb::flow::tag_value(v / my_mult); - return t; - } -}; - -// --------- Source body for split_node test. -template <class OutputTuple, TestNodeTypeEnum TType> -class tuple_test_source_body : WaitThrow<serial_type, TType> { - typedef typename tbb::flow::tuple_element<0,OutputTuple>::type ItemType0; - typedef typename tbb::flow::tuple_element<1,OutputTuple>::type ItemType1; - using WaitThrow<serial_type, TType>::WaitAndThrow; - tbb::atomic<int> *my_current_val; -public: - tuple_test_source_body(tbb::atomic<int> &my_cnt) : my_current_val(&my_cnt) { } - - bool operator()(OutputTuple & out) { - UPDATE_COUNTS(); - int ival = ++(*my_current_val); - out = OutputTuple(ItemType0(ival),ItemType1(ival)); - if(*my_current_val > g_NumItems) { - *my_current_val = g_NumItems; // jam the final value; we assert on it later. - return false; - } - WaitAndThrow(ival,"tuple_test_source_body"); - return true; - } - - int count_value() { return (int)*my_current_val; } -}; - -// ------- end of node bodies - -// source_node is only-serial. source_node can throw, or the function_node can throw. -// graph being tested is -// -// source_node+---+parallel function_node -// -// After each run the graph is reset(), to test the reset functionality. -// - - -template<class ItemType, TestNodeTypeEnum srcThrowType, TestNodeTypeEnum absorbThrowType> -void run_one_source_node_test(bool throwException, bool flog) { - typedef test_source_body<ItemType,srcThrowType> src_body_type; - typedef absorber_body<ItemType, tbb::flow::continue_msg, absorbThrowType, unlimited_type> parallel_absorb_body_type; - tbb::atomic<int> source_body_count; - tbb::atomic<int> absorber_body_count; - source_body_count = 0; - absorber_body_count = 0; - - tbb::flow::graph g; - - g_Master = Harness::CurrentTid(); - -#if USE_TASK_SCHEDULER_OBSERVER - eh_test_observer o; - o.observe(true); -#endif - - tbb::flow::source_node<ItemType> sn(g, src_body_type(source_body_count),/*is_active*/false); - parallel_absorb_body_type ab2(absorber_body_count); - tbb::flow::function_node<ItemType> parallel_fn(g,tbb::flow::unlimited,ab2); - make_edge(sn, parallel_fn); - for(int runcnt = 0; runcnt < 2; ++runcnt) { - ResetGlobals(throwException,flog); - if(throwException) { - TRY(); - sn.activate(); - g.wait_for_all(); - CATCH_AND_ASSERT(); - } - else { - TRY(); - sn.activate(); - g.wait_for_all(); - CATCH_AND_FAIL(); - } - - bool okayNoExceptionsCaught = (g_ExceptionInMaster && !g_MasterExecutedThrow) || (!g_ExceptionInMaster && !g_NonMasterExecutedThrow) || !throwException; - int src_cnt = tbb::flow::copy_body<src_body_type>(sn).count_value(); - int sink_cnt = tbb::flow::copy_body<parallel_absorb_body_type>(parallel_fn).count_value(); - if(throwException) { - ASSERT(g.exception_thrown() || okayNoExceptionsCaught, "Exception flag in flow::graph not set"); - ASSERT(g.is_cancelled() || okayNoExceptionsCaught, "canceled flag not set"); - ASSERT(src_cnt <= g_NumItems, "Too many source_node items emitted"); - ASSERT(sink_cnt <= src_cnt, "Too many source_node items received"); - } - else { - ASSERT(!g.exception_thrown(), "Exception flag in flow::graph set but no throw occurred"); - ASSERT(!g.is_cancelled(), "canceled flag set but no throw occurred"); - ASSERT(src_cnt == g_NumItems, "Incorrect # source_node items emitted"); - ASSERT(sink_cnt == src_cnt, "Incorrect # source_node items received"); - } - g.reset(); // resets the body of the source_node and the absorb_nodes. - source_body_count = 0; - absorber_body_count = 0; - ASSERT(!g.exception_thrown(), "Reset didn't clear exception_thrown()"); - ASSERT(!g.is_cancelled(), "Reset didn't clear is_cancelled()"); - src_cnt = tbb::flow::copy_body<src_body_type>(sn).count_value(); - sink_cnt = tbb::flow::copy_body<parallel_absorb_body_type>(parallel_fn).count_value(); - ASSERT(src_cnt == 0, "source_node count not reset"); - ASSERT(sink_cnt == 0, "sink_node count not reset"); - } -#if USE_TASK_SCHEDULER_OBSERVER - o.observe(false); -#endif -} // run_one_source_node_test - - -template<class ItemType, TestNodeTypeEnum srcThrowType, TestNodeTypeEnum absorbThrowType> -void run_source_node_test() { - run_one_source_node_test<ItemType,srcThrowType,absorbThrowType>(false,false); - run_one_source_node_test<ItemType,srcThrowType,absorbThrowType>(true,false); - run_one_source_node_test<ItemType,srcThrowType,absorbThrowType>(true,true); -} // run_source_node_test - -void test_source_node() { - REMARK("Testing source_node\n"); - check_type<int>::check_type_counter = 0; - g_Wakeup_Msg = "source_node(1): Missed wakeup or machine is overloaded?"; - run_source_node_test<check_type<int>, isThrowing, nonThrowing>(); - ASSERT(!check_type<int>::check_type_counter, "Some items leaked in test"); - g_Wakeup_Msg = "source_node(2): Missed wakeup or machine is overloaded?"; - run_source_node_test<int, isThrowing, nonThrowing>(); - g_Wakeup_Msg = "source_node(3): Missed wakeup or machine is overloaded?"; - run_source_node_test<int, nonThrowing, isThrowing>(); - g_Wakeup_Msg = "source_node(4): Missed wakeup or machine is overloaded?"; - run_source_node_test<int, isThrowing, isThrowing>(); - g_Wakeup_Msg = "source_node(5): Missed wakeup or machine is overloaded?"; - run_source_node_test<check_type<int>, isThrowing, isThrowing>(); - g_Wakeup_Msg = g_Orig_Wakeup_Msg; - ASSERT(!check_type<int>::check_type_counter, "Some items leaked in test"); -} - -// -------- utilities & types to test function_node and multifunction_node. - -// need to tell the template which node type I am using so it attaches successors correctly. -enum NodeFetchType { func_node_type, multifunc_node_type }; - -template<class NodeType, class ItemType, int indx, NodeFetchType NFT> -struct AttachPoint; - -template<class NodeType, class ItemType, int indx> -struct AttachPoint<NodeType,ItemType,indx,multifunc_node_type> { - static tbb::flow::sender<ItemType> &GetSender(NodeType &n) { - return tbb::flow::output_port<indx>(n); - } -}; - -template<class NodeType, class ItemType, int indx> -struct AttachPoint<NodeType,ItemType,indx,func_node_type> { - static tbb::flow::sender<ItemType> &GetSender(NodeType &n) { - return n; - } -}; - - -// common template for running function_node, multifunction_node. continue_node -// has different firing requirements, so it needs a different graph topology. -template< - class SourceNodeType, - class SourceNodeBodyType0, - class SourceNodeBodyType1, - NodeFetchType NFT, - class TestNodeType, - class TestNodeBodyType, - class TypeToSink0, // what kind of item are we sending to sink0 - class TypeToSink1, // what kind of item are we sending to sink1 - class SinkNodeType0, // will be same for function; - class SinkNodeType1, // may differ for multifunction_node - class SinkNodeBodyType0, - class SinkNodeBodyType1, - size_t Conc - > -void -run_one_functype_node_test(bool throwException, bool flog, const char * /*name*/) { - - char mymsg[132]; - char *saved_msg = const_cast<char *>(g_Wakeup_Msg); - tbb::flow::graph g; - - tbb::atomic<int> source0_count; - tbb::atomic<int> source1_count; - tbb::atomic<int> sink0_count; - tbb::atomic<int> sink1_count; - tbb::atomic<int> test_count; - source0_count = source1_count = sink0_count = sink1_count = test_count = 0; - -#if USE_TASK_SCHEDULER_OBSERVER - eh_test_observer o; - o.observe(true); -#endif - - g_Master = Harness::CurrentTid(); - SourceNodeType source0(g, SourceNodeBodyType0(source0_count),/*is_active*/false); - SourceNodeType source1(g, SourceNodeBodyType1(source1_count),/*is_active*/false); - TestNodeType node_to_test(g, Conc, TestNodeBodyType(test_count)); - SinkNodeType0 sink0(g,tbb::flow::unlimited,SinkNodeBodyType0(sink0_count)); - SinkNodeType1 sink1(g,tbb::flow::unlimited,SinkNodeBodyType1(sink1_count)); - make_edge(source0, node_to_test); - make_edge(source1, node_to_test); - make_edge(AttachPoint<TestNodeType, TypeToSink0, 0, NFT>::GetSender(node_to_test), sink0); - make_edge(AttachPoint<TestNodeType, TypeToSink1, 1, NFT>::GetSender(node_to_test), sink1); - - for(int iter = 0; iter < 2; ++iter) { // run, reset, run again - sprintf(mymsg, "%s iter=%d, threads=%d, throw=%s, flog=%s", saved_msg, iter, g_NumThreads, - throwException?"T":"F", flog?"T":"F"); - g_Wakeup_Msg = mymsg; - ResetGlobals(throwException,flog); - if(throwException) { - TRY(); - source0.activate(); - source1.activate(); - g.wait_for_all(); - CATCH_AND_ASSERT(); - } - else { - TRY(); - source0.activate(); - source1.activate(); - g.wait_for_all(); - CATCH_AND_FAIL(); - } - bool okayNoExceptionsCaught = (g_ExceptionInMaster && !g_MasterExecutedThrow) || (!g_ExceptionInMaster && !g_NonMasterExecutedThrow) || !throwException; - int sb0_cnt = tbb::flow::copy_body<SourceNodeBodyType0>(source0).count_value(); - int sb1_cnt = tbb::flow::copy_body<SourceNodeBodyType1>(source1).count_value(); - int t_cnt = tbb::flow::copy_body<TestNodeBodyType>(node_to_test).count_value(); - int nb0_cnt = tbb::flow::copy_body<SinkNodeBodyType0>(sink0).count_value(); - int nb1_cnt = tbb::flow::copy_body<SinkNodeBodyType1>(sink1).count_value(); - if(throwException) { - ASSERT(g.exception_thrown() || okayNoExceptionsCaught, "Exception not caught by graph"); - ASSERT(g.is_cancelled() || okayNoExceptionsCaught, "Cancellation not signalled in graph"); - ASSERT(sb0_cnt + sb1_cnt <= 2*g_NumItems, "Too many items sent by sources"); - ASSERT(sb0_cnt + sb1_cnt >= t_cnt, "Too many items received by test node"); - ASSERT(nb0_cnt + nb1_cnt <= t_cnt*2, "Too many items received by sink nodes"); - } - else { - ASSERT(!g.exception_thrown(), "Exception flag in flow::graph set but no throw occurred"); - ASSERT(!g.is_cancelled(), "canceled flag set but no throw occurred"); - ASSERT(sb0_cnt + sb1_cnt == 2*g_NumItems, "Missing invocations of source_nodes"); - ASSERT(t_cnt == 2*g_NumItems, "Not all items reached test node"); - ASSERT(nb0_cnt == 2*g_NumItems && nb1_cnt == 2*g_NumItems, "Missing items in absorbers"); - } - g.reset(); // resets the body of the source_nodes, test_node and the absorb_nodes. - source0_count = source1_count = sink0_count = sink1_count = test_count = 0; - ASSERT(0 == tbb::flow::copy_body<SourceNodeBodyType0>(source0).count_value(),"Reset source 0 failed"); - ASSERT(0 == tbb::flow::copy_body<SourceNodeBodyType1>(source1).count_value(),"Reset source 1 failed"); - ASSERT(0 == tbb::flow::copy_body<TestNodeBodyType>(node_to_test).count_value(),"Reset test_node failed"); - ASSERT(0 == tbb::flow::copy_body<SinkNodeBodyType0>(sink0).count_value(),"Reset sink 0 failed"); - ASSERT(0 == tbb::flow::copy_body<SinkNodeBodyType1>(sink1).count_value(),"Reset sink 1 failed"); - - g_Wakeup_Msg = saved_msg; - } -#if USE_TASK_SCHEDULER_OBSERVER - o.observe(false); -#endif -} - -// Test function_node -// -// graph being tested is -// -// source_node -\ /- parallel function_node -// \ / -// +function_node+ -// / \ x -// source_node -/ \- parallel function_node -// -// After each run the graph is reset(), to test the reset functionality. -// -template< - TestNodeTypeEnum SType1, // does source node 1 throw? - TestNodeTypeEnum SType2, // does source node 2 throw? - class Item12, // type of item passed between sources and test node - TestNodeTypeEnum FType, // does function node throw? - class Item23, // type passed from function_node to sink nodes - TestNodeTypeEnum NType1, // does sink node 1 throw? - TestNodeTypeEnum NType2, // does sink node 1 throw? - tbb::flow::graph_buffer_policy NodePolicy, // rejecting,queueing - size_t Conc // is node concurrent? {serial | limited | unlimited} -> -void run_function_node_test() { - - typedef test_source_body<Item12,SType1> SBodyType1; - typedef test_source_body<Item12,SType2> SBodyType2; - typedef absorber_body<Item12, Item23, FType, Conc> TestBodyType; - typedef absorber_body<Item23,tbb::flow::continue_msg, NType1, unlimited_type> SinkBodyType1; - typedef absorber_body<Item23,tbb::flow::continue_msg, NType2, unlimited_type> SinkBodyType2; - - typedef tbb::flow::source_node<Item12> SrcType; - typedef tbb::flow::function_node<Item12, Item23, NodePolicy> TestType; - typedef tbb::flow::function_node<Item23,tbb::flow::continue_msg> SnkType; - - for(int i = 0; i < 4; ++i ) { - if(i != 2) { // doesn't make sense to flog a non-throwing test - bool doThrow = (i & 0x1) != 0; - bool doFlog = (i & 0x2) != 0; - run_one_functype_node_test< - /*SourceNodeType*/ SrcType, - /*SourceNodeBodyType0*/ SBodyType1, - /*SourceNodeBodyType1*/ SBodyType2, - /* NFT */ func_node_type, - /*TestNodeType*/ TestType, - /*TestNodeBodyType*/ TestBodyType, - /*TypeToSink0 */ Item23, - /*TypeToSink1 */ Item23, - /*SinkNodeType0*/ SnkType, - /*SinkNodeType1*/ SnkType, - /*SinkNodeBodyType1*/ SinkBodyType1, - /*SinkNodeBodyType2*/ SinkBodyType2, - /*Conc*/ Conc> - (doThrow,doFlog,"function_node"); - } - } -} // run_function_node_test - -void test_function_node() { - REMARK("Testing function_node\n"); - // serial rejecting - g_Wakeup_Msg = "function_node(1a): Missed wakeup or machine is overloaded?"; - run_function_node_test<isThrowing, nonThrowing, int, nonThrowing, int, nonThrowing, nonThrowing, tbb::flow::rejecting, serial_type>(); - g_Wakeup_Msg = "function_node(1b): Missed wakeup or machine is overloaded?"; - run_function_node_test<nonThrowing, nonThrowing, int, isThrowing, int, nonThrowing, nonThrowing, tbb::flow::rejecting, serial_type>(); - g_Wakeup_Msg = "function_node(1c): Missed wakeup or machine is overloaded?"; - run_function_node_test<nonThrowing, nonThrowing, int, nonThrowing, int, isThrowing, nonThrowing, tbb::flow::rejecting, serial_type>(); - - // serial queueing - g_Wakeup_Msg = "function_node(2): Missed wakeup or machine is overloaded?"; - run_function_node_test<isThrowing, nonThrowing, int, nonThrowing, int, nonThrowing, nonThrowing, tbb::flow::queueing, serial_type>(); - run_function_node_test<nonThrowing, nonThrowing, int, isThrowing, int, nonThrowing, nonThrowing, tbb::flow::queueing, serial_type>(); - run_function_node_test<nonThrowing, nonThrowing, int, nonThrowing, int, isThrowing, nonThrowing, tbb::flow::queueing, serial_type>(); - check_type<int>::check_type_counter = 0; - run_function_node_test<nonThrowing, nonThrowing, check_type<int>, nonThrowing, check_type<int>, isThrowing, nonThrowing, tbb::flow::queueing, serial_type>(); - ASSERT(!check_type<int>::check_type_counter, "Some items leaked in test"); - - // unlimited parallel rejecting - g_Wakeup_Msg = "function_node(3): Missed wakeup or machine is overloaded?"; - run_function_node_test<isThrowing, nonThrowing, int, nonThrowing, int, nonThrowing, nonThrowing, tbb::flow::rejecting, unlimited_type>(); - run_function_node_test<nonThrowing, nonThrowing, int, isThrowing, int, nonThrowing, nonThrowing, tbb::flow::rejecting, unlimited_type>(); - run_function_node_test<nonThrowing, nonThrowing, int, nonThrowing, int, nonThrowing, isThrowing, tbb::flow::rejecting, unlimited_type>(); - - // limited parallel rejecting - g_Wakeup_Msg = "function_node(4): Missed wakeup or machine is overloaded?"; - run_function_node_test<isThrowing, nonThrowing, int, nonThrowing, int, nonThrowing, nonThrowing, tbb::flow::rejecting, limited_type>(); - run_function_node_test<nonThrowing, nonThrowing, int, isThrowing, int, nonThrowing, nonThrowing, tbb::flow::rejecting, (size_t)limited_type>(); - run_function_node_test<nonThrowing, nonThrowing, int, nonThrowing, int, nonThrowing, isThrowing, tbb::flow::rejecting, (size_t)limited_type>(); - - // limited parallel queueing - g_Wakeup_Msg = "function_node(5): Missed wakeup or machine is overloaded?"; - run_function_node_test<isThrowing, nonThrowing, int, nonThrowing, int, nonThrowing, nonThrowing, tbb::flow::queueing, (size_t)limited_type>(); - run_function_node_test<nonThrowing, nonThrowing, int, isThrowing, int, nonThrowing, nonThrowing, tbb::flow::queueing, (size_t)limited_type>(); - run_function_node_test<nonThrowing, nonThrowing, int, nonThrowing, int, nonThrowing, isThrowing, tbb::flow::queueing, (size_t)limited_type>(); - - // everyone throwing - g_Wakeup_Msg = "function_node(6): Missed wakeup or machine is overloaded?"; - run_function_node_test<isThrowing, isThrowing, int, isThrowing, int, isThrowing, isThrowing, tbb::flow::rejecting, unlimited_type>(); - g_Wakeup_Msg = g_Orig_Wakeup_Msg; -} - -// ----------------------------------- multifunction_node ---------------------------------- -// Test multifunction_node. -// -// graph being tested is -// -// source_node -\ /- parallel function_node -// \ / -// +multifunction_node+ -// / \ x -// source_node -/ \- parallel function_node -// -// After each run the graph is reset(), to test the reset functionality. The -// multifunction_node will put an item to each successor for every item -// received. -// -template< - TestNodeTypeEnum SType0, // does source node 1 throw? - TestNodeTypeEnum SType1, // does source node 2 thorw? - class Item12, // type of item passed between sources and test node - TestNodeTypeEnum FType, // does multifunction node throw? - class ItemTuple, // tuple of types passed from multifunction_node to sink nodes - TestNodeTypeEnum NType1, // does sink node 1 throw? - TestNodeTypeEnum NType2, // does sink node 2 throw? - tbb::flow::graph_buffer_policy NodePolicy, // rejecting,queueing - size_t Conc // is node concurrent? {serial | limited | unlimited} -> -void run_multifunction_node_test() { - - typedef typename tbb::flow::tuple_element<0,ItemTuple>::type Item23Type0; - typedef typename tbb::flow::tuple_element<1,ItemTuple>::type Item23Type1; - typedef test_source_body<Item12,SType0> SBodyType1; - typedef test_source_body<Item12,SType1> SBodyType2; - typedef multifunction_node_body<Item12, ItemTuple, FType, Conc> TestBodyType; - typedef absorber_body<Item23Type0,tbb::flow::continue_msg, NType1, unlimited_type> SinkBodyType1; - typedef absorber_body<Item23Type1,tbb::flow::continue_msg, NType2, unlimited_type> SinkBodyType2; - - typedef tbb::flow::source_node<Item12> SrcType; - typedef tbb::flow::multifunction_node<Item12, ItemTuple, NodePolicy> TestType; - typedef tbb::flow::function_node<Item23Type0,tbb::flow::continue_msg> SnkType0; - typedef tbb::flow::function_node<Item23Type1,tbb::flow::continue_msg> SnkType1; - - for(int i = 0; i < 4; ++i ) { - if(i != 2) { // doesn't make sense to flog a non-throwing test - bool doThrow = (i & 0x1) != 0; - bool doFlog = (i & 0x2) != 0; - run_one_functype_node_test< - /*SourceNodeType*/ SrcType, - /*SourceNodeBodyType0*/ SBodyType1, - /*SourceNodeBodyType1*/ SBodyType2, - /*NFT*/ multifunc_node_type, - /*TestNodeType*/ TestType, - /*TestNodeBodyType*/ TestBodyType, - /*TypeToSink0*/ Item23Type0, - /*TypeToSink1*/ Item23Type1, - /*SinkNodeType0*/ SnkType0, - /*SinkNodeType1*/ SnkType1, - /*SinkNodeBodyType0*/ SinkBodyType1, - /*SinkNodeBodyType1*/ SinkBodyType2, - /*Conc*/ Conc> - (doThrow,doFlog,"multifunction_node"); - } - } -} // run_multifunction_node_test - -void test_multifunction_node() { - REMARK("Testing multifunction_node\n"); - g_Wakeup_Msg = "multifunction_node(source throws,rejecting,serial): Missed wakeup or machine is overloaded?"; - // serial rejecting - run_multifunction_node_test<isThrowing, nonThrowing, int, nonThrowing, tbb::flow::tuple<int,float>, nonThrowing, nonThrowing, tbb::flow::rejecting, serial_type>(); - g_Wakeup_Msg = "multifunction_node(test throws,rejecting,serial): Missed wakeup or machine is overloaded?"; - run_multifunction_node_test<nonThrowing, nonThrowing, int, isThrowing, tbb::flow::tuple<int,int>, nonThrowing, nonThrowing, tbb::flow::rejecting, serial_type>(); - g_Wakeup_Msg = "multifunction_node(sink throws,rejecting,serial): Missed wakeup or machine is overloaded?"; - run_multifunction_node_test<nonThrowing, nonThrowing, int, nonThrowing, tbb::flow::tuple<int,int>, isThrowing, nonThrowing, tbb::flow::rejecting, serial_type>(); - - g_Wakeup_Msg = "multifunction_node(2): Missed wakeup or machine is overloaded?"; - // serial queueing - run_multifunction_node_test<isThrowing, nonThrowing, int, nonThrowing, tbb::flow::tuple<int,int>, nonThrowing, nonThrowing, tbb::flow::queueing, serial_type>(); - run_multifunction_node_test<nonThrowing, nonThrowing, int, isThrowing, tbb::flow::tuple<int,int>, nonThrowing, nonThrowing, tbb::flow::queueing, serial_type>(); - run_multifunction_node_test<nonThrowing, nonThrowing, int, nonThrowing, tbb::flow::tuple<int,int>, isThrowing, nonThrowing, tbb::flow::queueing, serial_type>(); - check_type<int>::check_type_counter = 0; - run_multifunction_node_test<nonThrowing, nonThrowing, check_type<int>, nonThrowing, tbb::flow::tuple<check_type<int>, check_type<int> >, isThrowing, nonThrowing, tbb::flow::queueing, serial_type>(); - ASSERT(!check_type<int>::check_type_counter, "Some items leaked in test"); - - g_Wakeup_Msg = "multifunction_node(3): Missed wakeup or machine is overloaded?"; - // unlimited parallel rejecting - run_multifunction_node_test<isThrowing, nonThrowing, int, nonThrowing, tbb::flow::tuple<int,int>, nonThrowing, nonThrowing, tbb::flow::rejecting, unlimited_type>(); - run_multifunction_node_test<nonThrowing, nonThrowing, int, isThrowing, tbb::flow::tuple<int,int>, nonThrowing, nonThrowing, tbb::flow::rejecting, unlimited_type>(); - run_multifunction_node_test<nonThrowing, nonThrowing, int, nonThrowing, tbb::flow::tuple<int,int>, nonThrowing, isThrowing, tbb::flow::rejecting, unlimited_type>(); - - g_Wakeup_Msg = "multifunction_node(4): Missed wakeup or machine is overloaded?"; - // limited parallel rejecting - run_multifunction_node_test<isThrowing, nonThrowing, int, nonThrowing, tbb::flow::tuple<int,int>, nonThrowing, nonThrowing, tbb::flow::rejecting, limited_type>(); - run_multifunction_node_test<nonThrowing, nonThrowing, int, isThrowing, tbb::flow::tuple<int,int>, nonThrowing, nonThrowing, tbb::flow::rejecting, (size_t)limited_type>(); - run_multifunction_node_test<nonThrowing, nonThrowing, int, nonThrowing, tbb::flow::tuple<int,int>, nonThrowing, isThrowing, tbb::flow::rejecting, (size_t)limited_type>(); - - g_Wakeup_Msg = "multifunction_node(5): Missed wakeup or machine is overloaded?"; - // limited parallel queueing - run_multifunction_node_test<isThrowing, nonThrowing, int, nonThrowing, tbb::flow::tuple<int,int>, nonThrowing, nonThrowing, tbb::flow::queueing, (size_t)limited_type>(); - run_multifunction_node_test<nonThrowing, nonThrowing, int, isThrowing, tbb::flow::tuple<int,int>, nonThrowing, nonThrowing, tbb::flow::queueing, (size_t)limited_type>(); - run_multifunction_node_test<nonThrowing, nonThrowing, int, nonThrowing, tbb::flow::tuple<int,int>, nonThrowing, isThrowing, tbb::flow::queueing, (size_t)limited_type>(); - - g_Wakeup_Msg = "multifunction_node(6): Missed wakeup or machine is overloaded?"; - // everyone throwing - run_multifunction_node_test<isThrowing, isThrowing, int, isThrowing, tbb::flow::tuple<int,int>, isThrowing, isThrowing, tbb::flow::rejecting, unlimited_type>(); - g_Wakeup_Msg = g_Orig_Wakeup_Msg; -} - -// -// Continue node has T predecessors. when it receives messages (continue_msg) on T predecessors -// it executes the body of the node, and forwards a continue_msg to its successors. -// However many predecessors the continue_node has, that's how many continue_msgs it receives -// on input before forwarding a message. -// -// The graph will look like -// -// +broadcast_node+ -// / \ ___ -// source_node+------>+broadcast_node+ +continue_node+--->+absorber -// \ / -// +broadcast_node+ -// -// The continue_node has unlimited parallelism, no input buffering, and broadcasts to successors. -// The absorber is parallel, so each item emitted by the source will result in one thread -// spinning. So for N threads we pass N-1 continue_messages, then spin wait and then throw if -// we are allowed to. - -template < class SourceNodeType, class SourceNodeBodyType, class TTestNodeType, class TestNodeBodyType, - class SinkNodeType, class SinkNodeBodyType> -void run_one_continue_node_test (bool throwException, bool flog) { - tbb::flow::graph g; - - tbb::atomic<int> source_count; - tbb::atomic<int> test_count; - tbb::atomic<int> sink_count; - source_count = test_count = sink_count = 0; -#if USE_TASK_SCHEDULER_OBSERVER - eh_test_observer o; - o.observe(true); -#endif - g_Master = Harness::CurrentTid(); - SourceNodeType source(g, SourceNodeBodyType(source_count),/*is_active*/false); - TTestNodeType node_to_test(g, TestNodeBodyType(test_count)); - SinkNodeType sink(g,tbb::flow::unlimited,SinkNodeBodyType(sink_count)); - tbb::flow::broadcast_node<tbb::flow::continue_msg> b1(g), b2(g), b3(g); - make_edge(source, b1); - make_edge(b1,b2); - make_edge(b1,b3); - make_edge(b2,node_to_test); - make_edge(b3,node_to_test); - make_edge(node_to_test, sink); - for(int iter = 0; iter < 2; ++iter) { - ResetGlobals(throwException,flog); - if(throwException) { - TRY(); - source.activate(); - g.wait_for_all(); - CATCH_AND_ASSERT(); - } - else { - TRY(); - source.activate(); - g.wait_for_all(); - CATCH_AND_FAIL(); - } - bool okayNoExceptionsCaught = (g_ExceptionInMaster && !g_MasterExecutedThrow) || (!g_ExceptionInMaster && !g_NonMasterExecutedThrow) || !throwException; - int sb_cnt = tbb::flow::copy_body<SourceNodeBodyType>(source).count_value(); - int t_cnt = tbb::flow::copy_body<TestNodeBodyType>(node_to_test).count_value(); - int nb_cnt = tbb::flow::copy_body<SinkNodeBodyType>(sink).count_value(); - if(throwException) { - ASSERT(g.exception_thrown() || okayNoExceptionsCaught, "Exception not caught by graph"); - ASSERT(g.is_cancelled() || okayNoExceptionsCaught, "Cancellation not signalled in graph"); - ASSERT(sb_cnt <= g_NumItems, "Too many items sent by sources"); - ASSERT(sb_cnt >= t_cnt, "Too many items received by test node"); - ASSERT(nb_cnt <= t_cnt, "Too many items received by sink nodes"); - } - else { - ASSERT(!g.exception_thrown(), "Exception flag in flow::graph set but no throw occurred"); - ASSERT(!g.is_cancelled(), "canceled flag set but no throw occurred"); - ASSERT(sb_cnt == g_NumItems, "Missing invocations of source_node"); - ASSERT(t_cnt == g_NumItems, "Not all items reached test node"); - ASSERT(nb_cnt == g_NumItems, "Missing items in absorbers"); - } - g.reset(); // resets the body of the source_nodes, test_node and the absorb_nodes. - source_count = test_count = sink_count = 0; - ASSERT(0 == (int)test_count, "Atomic wasn't reset properly"); - ASSERT(0 == tbb::flow::copy_body<SourceNodeBodyType>(source).count_value(),"Reset source failed"); - ASSERT(0 == tbb::flow::copy_body<TestNodeBodyType>(node_to_test).count_value(),"Reset test_node failed"); - ASSERT(0 == tbb::flow::copy_body<SinkNodeBodyType>(sink).count_value(),"Reset sink failed"); - } -#if USE_TASK_SCHEDULER_OBSERVER - o.observe(false); -#endif -} - -template< - class ItemType, - TestNodeTypeEnum SType, // does source node throw? - TestNodeTypeEnum CType, // does continue_node throw? - TestNodeTypeEnum AType> // does absorber throw -void run_continue_node_test() { - typedef test_source_body<tbb::flow::continue_msg,SType> SBodyType; - typedef absorber_body<tbb::flow::continue_msg,ItemType,CType,unlimited_type> ContBodyType; - typedef absorber_body<ItemType,tbb::flow::continue_msg, AType, unlimited_type> SinkBodyType; - - typedef tbb::flow::source_node<tbb::flow::continue_msg> SrcType; - typedef tbb::flow::continue_node<ItemType> TestType; - typedef tbb::flow::function_node<ItemType,tbb::flow::continue_msg> SnkType; - - for(int i = 0; i < 4; ++i ) { - if(i == 2) continue; // don't run (false,true); it doesn't make sense. - bool doThrow = (i & 0x1) != 0; - bool doFlog = (i & 0x2) != 0; - run_one_continue_node_test< - /*SourceNodeType*/ SrcType, - /*SourceNodeBodyType*/ SBodyType, - /*TestNodeType*/ TestType, - /*TestNodeBodyType*/ ContBodyType, - /*SinkNodeType*/ SnkType, - /*SinkNodeBodyType*/ SinkBodyType> - (doThrow,doFlog); - } -} - -// -void test_continue_node() { - REMARK("Testing continue_node\n"); - g_Wakeup_Msg = "buffer_node(non,is,non): Missed wakeup or machine is overloaded?"; - run_continue_node_test<int,nonThrowing,isThrowing,nonThrowing>(); - g_Wakeup_Msg = "buffer_node(non,non,is): Missed wakeup or machine is overloaded?"; - run_continue_node_test<int,nonThrowing,nonThrowing,isThrowing>(); - g_Wakeup_Msg = "buffer_node(is,non,non): Missed wakeup or machine is overloaded?"; - run_continue_node_test<int,isThrowing,nonThrowing,nonThrowing>(); - g_Wakeup_Msg = "buffer_node(is,is,is): Missed wakeup or machine is overloaded?"; - run_continue_node_test<int,isThrowing,isThrowing,isThrowing>(); - check_type<double>::check_type_counter = 0; - run_continue_node_test<check_type<double>,isThrowing,isThrowing,isThrowing>(); - ASSERT(!check_type<double>::check_type_counter, "Dropped objects in continue_node test"); - g_Wakeup_Msg = g_Orig_Wakeup_Msg; -} - -// ---------- buffer_node queue_node overwrite_node -------------- - -template< - class BufferItemType, // - class SourceNodeType, - class SourceNodeBodyType, - class TestNodeType, - class SinkNodeType, - class SinkNodeBodyType > -void run_one_buffer_node_test(bool throwException,bool flog) { - tbb::flow::graph g; - - tbb::atomic<int> source_count; - tbb::atomic<int> sink_count; - source_count = sink_count = 0; -#if USE_TASK_SCHEDULER_OBSERVER - eh_test_observer o; - o.observe(true); -#endif - g_Master = Harness::CurrentTid(); - SourceNodeType source(g, SourceNodeBodyType(source_count),/*is_active*/false); - TestNodeType node_to_test(g); - SinkNodeType sink(g,tbb::flow::unlimited,SinkNodeBodyType(sink_count)); - make_edge(source,node_to_test); - make_edge(node_to_test, sink); - for(int iter = 0; iter < 2; ++iter) { - ResetGlobals(throwException,flog); - if(throwException) { - TRY(); - source.activate(); - g.wait_for_all(); - CATCH_AND_ASSERT(); - } - else { - TRY(); - source.activate(); - g.wait_for_all(); - CATCH_AND_FAIL(); - } - bool okayNoExceptionsCaught = (g_ExceptionInMaster && !g_MasterExecutedThrow) || (!g_ExceptionInMaster && !g_NonMasterExecutedThrow) || !throwException; - int sb_cnt = tbb::flow::copy_body<SourceNodeBodyType>(source).count_value(); - int nb_cnt = tbb::flow::copy_body<SinkNodeBodyType>(sink).count_value(); - if(throwException) { - ASSERT(g.exception_thrown() || okayNoExceptionsCaught, "Exception not caught by graph"); - ASSERT(g.is_cancelled() || okayNoExceptionsCaught, "Cancellation not signalled in graph"); - ASSERT(sb_cnt <= g_NumItems, "Too many items sent by sources"); - ASSERT(nb_cnt <= sb_cnt, "Too many items received by sink nodes"); - } - else { - ASSERT(!g.exception_thrown(), "Exception flag in flow::graph set but no throw occurred"); - ASSERT(!g.is_cancelled(), "canceled flag set but no throw occurred"); - ASSERT(sb_cnt == g_NumItems, "Missing invocations of source_node"); - ASSERT(nb_cnt == g_NumItems, "Missing items in absorbers"); - } - if(iter == 0) { - remove_edge(node_to_test, sink); - node_to_test.try_put(BufferItemType()); - g.wait_for_all(); - g.reset(); - source_count = sink_count = 0; - BufferItemType tmp; - ASSERT(!node_to_test.try_get(tmp), "node not empty"); - make_edge(node_to_test, sink); - g.wait_for_all(); - } - else { - g.reset(); - source_count = sink_count = 0; - } - ASSERT(0 == tbb::flow::copy_body<SourceNodeBodyType>(source).count_value(),"Reset source failed"); - ASSERT(0 == tbb::flow::copy_body<SinkNodeBodyType>(sink).count_value(),"Reset sink failed"); - } - -#if USE_TASK_SCHEDULER_OBSERVER - o.observe(false); -#endif -} -template<class BufferItemType, - TestNodeTypeEnum SourceThrowType, - TestNodeTypeEnum SinkThrowType> -void run_buffer_queue_and_overwrite_node_test() { - typedef test_source_body<BufferItemType,SourceThrowType> SourceBodyType; - typedef absorber_body<BufferItemType,tbb::flow::continue_msg,SinkThrowType,unlimited_type> SinkBodyType; - - typedef tbb::flow::source_node<BufferItemType> SrcType; - typedef tbb::flow::buffer_node<BufferItemType> BufType; - typedef tbb::flow::queue_node<BufferItemType> QueType; - typedef tbb::flow::overwrite_node<BufferItemType> OvrType; - typedef tbb::flow::function_node<BufferItemType,tbb::flow::continue_msg> SnkType; - - for(int i = 0; i < 4; ++i) { - if(i == 2) continue; // no need to test flog w/o throws - bool throwException = (i & 0x1) != 0; - bool doFlog = (i & 0x2) != 0; -#if TBB_RUN_BUFFERING_TEST - run_one_buffer_node_test< - /* class BufferItemType*/ BufferItemType, - /*class SourceNodeType*/ SrcType, - /*class SourceNodeBodyType*/ SourceBodyType, - /*class TestNodeType*/ BufType, - /*class SinkNodeType*/ SnkType, - /*class SinkNodeBodyType*/ SinkBodyType - >(throwException, doFlog); - run_one_buffer_node_test< - /* class BufferItemType*/ BufferItemType, - /*class SourceNodeType*/ SrcType, - /*class SourceNodeBodyType*/ SourceBodyType, - /*class TestNodeType*/ QueType, - /*class SinkNodeType*/ SnkType, - /*class SinkNodeBodyType*/ SinkBodyType - >(throwException, doFlog); -#endif - run_one_buffer_node_test< - /* class BufferItemType*/ BufferItemType, - /*class SourceNodeType*/ SrcType, - /*class SourceNodeBodyType*/ SourceBodyType, - /*class TestNodeType*/ OvrType, - /*class SinkNodeType*/ SnkType, - /*class SinkNodeBodyType*/ SinkBodyType - >(throwException, doFlog); - } -} - -void test_buffer_queue_and_overwrite_node() { - REMARK("Testing buffer_node, queue_node and overwrite_node\n"); -#if TBB_RUN_BUFFERING_TEST -#else - REMARK("skip buffer and queue test (known issue)\n"); -#endif - g_Wakeup_Msg = "buffer, queue, overwrite(is,non): Missed wakeup or machine is overloaded?"; - run_buffer_queue_and_overwrite_node_test<int,isThrowing,nonThrowing>(); - g_Wakeup_Msg = "buffer, queue, overwrite(non,is): Missed wakeup or machine is overloaded?"; - run_buffer_queue_and_overwrite_node_test<int,nonThrowing,isThrowing>(); - g_Wakeup_Msg = "buffer, queue, overwrite(is,is): Missed wakeup or machine is overloaded?"; - run_buffer_queue_and_overwrite_node_test<int,isThrowing,isThrowing>(); - g_Wakeup_Msg = g_Orig_Wakeup_Msg; -} - -// ---------- sequencer_node ------------------------- - - -template< - class BufferItemType, // - class SourceNodeType, - class SourceNodeBodyType, - class TestNodeType, - class SeqBodyType, - class SinkNodeType, - class SinkNodeBodyType > -void run_one_sequencer_node_test(bool throwException,bool flog) { - tbb::flow::graph g; - - tbb::atomic<int> source_count; - tbb::atomic<int> sink_count; - source_count = sink_count = 0; -#if USE_TASK_SCHEDULER_OBSERVER - eh_test_observer o; - o.observe(true); -#endif - g_Master = Harness::CurrentTid(); - SourceNodeType source(g, SourceNodeBodyType(source_count),/*is_active*/false); - TestNodeType node_to_test(g,SeqBodyType()); - SinkNodeType sink(g,tbb::flow::unlimited,SinkNodeBodyType(sink_count)); - make_edge(source,node_to_test); - make_edge(node_to_test, sink); - for(int iter = 0; iter < 2; ++iter) { - ResetGlobals(throwException,flog); - if(throwException) { - TRY(); - source.activate(); - g.wait_for_all(); - CATCH_AND_ASSERT(); - } - else { - TRY(); - source.activate(); - g.wait_for_all(); - CATCH_AND_FAIL(); - } - bool okayNoExceptionsCaught = (g_ExceptionInMaster && !g_MasterExecutedThrow) || (!g_ExceptionInMaster && !g_NonMasterExecutedThrow) || !throwException; - int sb_cnt = tbb::flow::copy_body<SourceNodeBodyType>(source).count_value(); - int nb_cnt = tbb::flow::copy_body<SinkNodeBodyType>(sink).count_value(); - if(throwException) { - ASSERT(g.exception_thrown() || okayNoExceptionsCaught, "Exception not caught by graph"); - ASSERT(g.is_cancelled() || okayNoExceptionsCaught, "Cancellation not signalled in graph"); - ASSERT(sb_cnt <= g_NumItems, "Too many items sent by sources"); - ASSERT(nb_cnt <= sb_cnt, "Too many items received by sink nodes"); - } - else { - ASSERT(!g.exception_thrown(), "Exception flag in flow::graph set but no throw occurred"); - ASSERT(!g.is_cancelled(), "canceled flag set but no throw occurred"); - ASSERT(sb_cnt == g_NumItems, "Missing invocations of source_node"); - ASSERT(nb_cnt == g_NumItems, "Missing items in absorbers"); - } - if(iter == 0) { - remove_edge(node_to_test, sink); - node_to_test.try_put(BufferItemType(g_NumItems + 1)); - node_to_test.try_put(BufferItemType()); - g.wait_for_all(); - g.reset(); - source_count = sink_count = 0; - make_edge(node_to_test, sink); - g.wait_for_all(); - } - else { - g.reset(); - source_count = sink_count = 0; - } - ASSERT(0 == tbb::flow::copy_body<SourceNodeBodyType>(source).count_value(),"Reset source failed"); - ASSERT(0 == tbb::flow::copy_body<SinkNodeBodyType>(sink).count_value(),"Reset sink failed"); - } - -#if USE_TASK_SCHEDULER_OBSERVER - o.observe(false); -#endif -} - -template<class BufferItemType, - TestNodeTypeEnum SourceThrowType, - TestNodeTypeEnum SinkThrowType> -void run_sequencer_node_test() { - typedef test_source_body<BufferItemType,SourceThrowType> SourceBodyType; - typedef absorber_body<BufferItemType,tbb::flow::continue_msg,SinkThrowType,unlimited_type> SinkBodyType; - typedef sequencer_body<BufferItemType> SeqBodyType; - - typedef tbb::flow::source_node<BufferItemType> SrcType; - typedef tbb::flow::sequencer_node<BufferItemType> SeqType; - typedef tbb::flow::function_node<BufferItemType,tbb::flow::continue_msg> SnkType; - - for(int i = 0; i < 4; ++i) { - if(i == 2) continue; // no need to test flog w/o throws - bool throwException = (i & 0x1) != 0; - bool doFlog = (i & 0x2) != 0; - run_one_sequencer_node_test< - /* class BufferItemType*/ BufferItemType, - /*class SourceNodeType*/ SrcType, - /*class SourceNodeBodyType*/ SourceBodyType, - /*class TestNodeType*/ SeqType, - /*class SeqBodyType*/ SeqBodyType, - /*class SinkNodeType*/ SnkType, - /*class SinkNodeBodyType*/ SinkBodyType - >(throwException, doFlog); - } -} - - - -void test_sequencer_node() { - REMARK("Testing sequencer_node\n"); - g_Wakeup_Msg = "sequencer_node(is,non): Missed wakeup or machine is overloaded?"; - run_sequencer_node_test<int, isThrowing,nonThrowing>(); - check_type<int>::check_type_counter = 0; - g_Wakeup_Msg = "sequencer_node(non,is): Missed wakeup or machine is overloaded?"; - run_sequencer_node_test<check_type<int>, nonThrowing,isThrowing>(); - ASSERT(!check_type<int>::check_type_counter, "Dropped objects in sequencer_node test"); - g_Wakeup_Msg = "sequencer_node(is,is): Missed wakeup or machine is overloaded?"; - run_sequencer_node_test<int, isThrowing,isThrowing>(); - g_Wakeup_Msg = g_Orig_Wakeup_Msg; -} - -// ------------ priority_queue_node ------------------ - -template< - class BufferItemType, - class SourceNodeType, - class SourceNodeBodyType, - class TestNodeType, - class SinkNodeType, - class SinkNodeBodyType > -void run_one_priority_queue_node_test(bool throwException,bool flog) { - tbb::flow::graph g; - - tbb::atomic<int> source_count; - tbb::atomic<int> sink_count; - source_count = sink_count = 0; -#if USE_TASK_SCHEDULER_OBSERVER - eh_test_observer o; - o.observe(true); -#endif - g_Master = Harness::CurrentTid(); - SourceNodeType source(g, SourceNodeBodyType(source_count),/*is_active*/false); - - TestNodeType node_to_test(g); - - SinkNodeType sink(g,tbb::flow::unlimited,SinkNodeBodyType(sink_count)); - - make_edge(source,node_to_test); - make_edge(node_to_test, sink); - for(int iter = 0; iter < 2; ++iter) { - ResetGlobals(throwException,flog); - if(throwException) { - TRY(); - source.activate(); - g.wait_for_all(); - CATCH_AND_ASSERT(); - } - else { - TRY(); - source.activate(); - g.wait_for_all(); - CATCH_AND_FAIL(); - } - bool okayNoExceptionsCaught = (g_ExceptionInMaster && !g_MasterExecutedThrow) || (!g_ExceptionInMaster && !g_NonMasterExecutedThrow) || !throwException; - int sb_cnt = tbb::flow::copy_body<SourceNodeBodyType>(source).count_value(); - int nb_cnt = tbb::flow::copy_body<SinkNodeBodyType>(sink).count_value(); - if(throwException) { - ASSERT(g.exception_thrown() || okayNoExceptionsCaught, "Exception not caught by graph"); - ASSERT(g.is_cancelled() || okayNoExceptionsCaught, "Cancellation not signalled in graph"); - ASSERT(sb_cnt <= g_NumItems, "Too many items sent by sources"); - ASSERT(nb_cnt <= sb_cnt, "Too many items received by sink nodes"); - } - else { - ASSERT(!g.exception_thrown(), "Exception flag in flow::graph set but no throw occurred"); - ASSERT(!g.is_cancelled(), "canceled flag set but no throw occurred"); - ASSERT(sb_cnt == g_NumItems, "Missing invocations of source_node"); - ASSERT(nb_cnt == g_NumItems, "Missing items in absorbers"); - } - if(iter == 0) { - remove_edge(node_to_test, sink); - node_to_test.try_put(BufferItemType(g_NumItems + 1)); - node_to_test.try_put(BufferItemType(g_NumItems + 2)); - node_to_test.try_put(BufferItemType()); - g.wait_for_all(); - g.reset(); - source_count = sink_count = 0; - make_edge(node_to_test, sink); - g.wait_for_all(); - } - else { - g.reset(); - source_count = sink_count = 0; - } - ASSERT(0 == tbb::flow::copy_body<SourceNodeBodyType>(source).count_value(),"Reset source failed"); - ASSERT(0 == tbb::flow::copy_body<SinkNodeBodyType>(sink).count_value(),"Reset sink failed"); - } - -#if USE_TASK_SCHEDULER_OBSERVER - o.observe(false); -#endif -} - -template<class BufferItemType, - TestNodeTypeEnum SourceThrowType, - TestNodeTypeEnum SinkThrowType> -void run_priority_queue_node_test() { - typedef test_source_body<BufferItemType,SourceThrowType> SourceBodyType; - typedef absorber_body<BufferItemType,tbb::flow::continue_msg,SinkThrowType,unlimited_type> SinkBodyType; - typedef less_body<BufferItemType> LessBodyType; - - typedef tbb::flow::source_node<BufferItemType> SrcType; - typedef tbb::flow::priority_queue_node<BufferItemType,LessBodyType> PrqType; - typedef tbb::flow::function_node<BufferItemType,tbb::flow::continue_msg> SnkType; - - for(int i = 0; i < 4; ++i) { - if(i == 2) continue; // no need to test flog w/o throws - bool throwException = (i & 0x1) != 0; - bool doFlog = (i & 0x2) != 0; - run_one_priority_queue_node_test< - /* class BufferItemType*/ BufferItemType, - /*class SourceNodeType*/ SrcType, - /*class SourceNodeBodyType*/ SourceBodyType, - /*class TestNodeType*/ PrqType, - /*class SinkNodeType*/ SnkType, - /*class SinkNodeBodyType*/ SinkBodyType - >(throwException, doFlog); - } -} - -void test_priority_queue_node() { - REMARK("Testing priority_queue_node\n"); - g_Wakeup_Msg = "priority_queue_node(is,non): Missed wakeup or machine is overloaded?"; - run_priority_queue_node_test<int, isThrowing,nonThrowing>(); - check_type<int>::check_type_counter = 0; - g_Wakeup_Msg = "priority_queue_node(non,is): Missed wakeup or machine is overloaded?"; - run_priority_queue_node_test<check_type<int>, nonThrowing,isThrowing>(); - ASSERT(!check_type<int>::check_type_counter, "Dropped objects in priority_queue_node test"); - g_Wakeup_Msg = "priority_queue_node(is,is): Missed wakeup or machine is overloaded?"; - run_priority_queue_node_test<int, isThrowing,isThrowing>(); - g_Wakeup_Msg = g_Orig_Wakeup_Msg; -} - -// ------------------- join_node ---------------- -template<tbb::flow::graph_buffer_policy JP> struct graph_policy_name{ - static const char* name() {return "unknown"; } -}; -template<> struct graph_policy_name<tbb::flow::queueing> { - static const char* name() {return "queueing"; } -}; -template<> struct graph_policy_name<tbb::flow::reserving> { - static const char* name() {return "reserving"; } -}; -template<> struct graph_policy_name<tbb::flow::tag_matching> { - static const char* name() {return "tag_matching"; } -}; - - -template< - tbb::flow::graph_buffer_policy JP, - class OutputTuple, - class SourceType0, - class SourceBodyType0, - class SourceType1, - class SourceBodyType1, - class TestJoinType, - class SinkType, - class SinkBodyType - > -struct run_one_join_node_test { - run_one_join_node_test() {} - static void execute_test(bool throwException,bool flog) { - typedef typename tbb::flow::tuple_element<0,OutputTuple>::type ItemType0; - typedef typename tbb::flow::tuple_element<1,OutputTuple>::type ItemType1; - - tbb::flow::graph g; - tbb::atomic<int>source0_count; - tbb::atomic<int>source1_count; - tbb::atomic<int>sink_count; - source0_count = source1_count = sink_count = 0; -#if USE_TASK_SCHEDULER_OBSERVER - eh_test_observer o; - o.observe(true); -#endif - g_Master = Harness::CurrentTid(); - SourceType0 source0(g, SourceBodyType0(source0_count),/*is_active*/false); - SourceType1 source1(g, SourceBodyType1(source1_count),/*is_active*/false); - TestJoinType node_to_test(g); - SinkType sink(g,tbb::flow::unlimited,SinkBodyType(sink_count)); - make_edge(source0,tbb::flow::input_port<0>(node_to_test)); - make_edge(source1,tbb::flow::input_port<1>(node_to_test)); - make_edge(node_to_test, sink); - for(int iter = 0; iter < 2; ++iter) { - ResetGlobals(throwException,flog); - if(throwException) { - TRY(); - source0.activate(); - source1.activate(); - g.wait_for_all(); - CATCH_AND_ASSERT(); - } - else { - TRY(); - source0.activate(); - source1.activate(); - g.wait_for_all(); - CATCH_AND_FAIL(); - } - bool okayNoExceptionsCaught = (g_ExceptionInMaster && !g_MasterExecutedThrow) || (!g_ExceptionInMaster && !g_NonMasterExecutedThrow) || !throwException; - int sb0_cnt = tbb::flow::copy_body<SourceBodyType0>(source0).count_value(); - int sb1_cnt = tbb::flow::copy_body<SourceBodyType1>(source1).count_value(); - int nb_cnt = tbb::flow::copy_body<SinkBodyType>(sink).count_value(); - if(throwException) { - ASSERT(g.exception_thrown() || okayNoExceptionsCaught, "Exception not caught by graph"); - ASSERT(g.is_cancelled() || okayNoExceptionsCaught, "Cancellation not signalled in graph"); - ASSERT(sb0_cnt <= g_NumItems && sb1_cnt <= g_NumItems, "Too many items sent by sources"); - ASSERT(nb_cnt <= ((sb0_cnt < sb1_cnt) ? sb0_cnt : sb1_cnt), "Too many items received by sink nodes"); - } - else { - ASSERT(!g.exception_thrown(), "Exception flag in flow::graph set but no throw occurred"); - ASSERT(!g.is_cancelled(), "canceled flag set but no throw occurred"); - ASSERT(sb0_cnt == g_NumItems, "Missing invocations of source_node0"); - ASSERT(sb1_cnt == g_NumItems, "Missing invocations of source_node1"); - ASSERT(nb_cnt == g_NumItems, "Missing items in absorbers"); - } - if(iter == 0) { - remove_edge(node_to_test, sink); - tbb::flow::input_port<0>(node_to_test).try_put(ItemType0(g_NumItems + 1)); - tbb::flow::input_port<1>(node_to_test).try_put(ItemType1(g_NumItems + 2)); - g.wait_for_all(); - g.reset(); - source0_count = source1_count = sink_count = 0; - make_edge(node_to_test, sink); - g.wait_for_all(); - } - else { - g.wait_for_all(); - g.reset(); - source0_count = source1_count = sink_count = 0; - } - ASSERT(0 == tbb::flow::copy_body<SourceBodyType0>(source0).count_value(),"Reset source failed"); - ASSERT(0 == tbb::flow::copy_body<SourceBodyType1>(source1).count_value(),"Reset source failed"); - nb_cnt = tbb::flow::copy_body<SinkBodyType>(sink).count_value(); - ASSERT(0 == tbb::flow::copy_body<SinkBodyType>(sink).count_value(),"Reset sink failed"); - } - -#if USE_TASK_SCHEDULER_OBSERVER - o.observe(false); -#endif - } -}; // run_one_join_node_test - -template< - class OutputTuple, - class SourceType0, - class SourceBodyType0, - class SourceType1, - class SourceBodyType1, - class TestJoinType, - class SinkType, - class SinkBodyType - > -struct run_one_join_node_test< - tbb::flow::tag_matching, - OutputTuple, - SourceType0, - SourceBodyType0, - SourceType1, - SourceBodyType1, - TestJoinType, - SinkType, - SinkBodyType - > { - run_one_join_node_test() {} - static void execute_test(bool throwException,bool flog) { - typedef typename tbb::flow::tuple_element<0,OutputTuple>::type ItemType0; - typedef typename tbb::flow::tuple_element<1,OutputTuple>::type ItemType1; - - tbb::flow::graph g; - - tbb::atomic<int>source0_count; - tbb::atomic<int>source1_count; - tbb::atomic<int>sink_count; - source0_count = source1_count = sink_count = 0; -#if USE_TASK_SCHEDULER_OBSERVER - eh_test_observer o; - o.observe(true); -#endif - g_Master = Harness::CurrentTid(); - SourceType0 source0(g, SourceBodyType0(source0_count, 2),/*is_active*/false); - SourceType1 source1(g, SourceBodyType1(source1_count, 3),/*is_active*/false); - TestJoinType node_to_test(g, tag_func<ItemType0>(ItemType0(2)), tag_func<ItemType1>(ItemType1(3))); - SinkType sink(g,tbb::flow::unlimited,SinkBodyType(sink_count)); - make_edge(source0,tbb::flow::input_port<0>(node_to_test)); - make_edge(source1,tbb::flow::input_port<1>(node_to_test)); - make_edge(node_to_test, sink); - for(int iter = 0; iter < 2; ++iter) { - ResetGlobals(throwException,flog); - if(throwException) { - TRY(); - source0.activate(); - source1.activate(); - g.wait_for_all(); - CATCH_AND_ASSERT(); - } - else { - TRY(); - source0.activate(); - source1.activate(); - g.wait_for_all(); - CATCH_AND_FAIL(); - } - bool okayNoExceptionsCaught = (g_ExceptionInMaster && !g_MasterExecutedThrow) || (!g_ExceptionInMaster && !g_NonMasterExecutedThrow) || !throwException; - int sb0_cnt = tbb::flow::copy_body<SourceBodyType0>(source0).count_value(); - int sb1_cnt = tbb::flow::copy_body<SourceBodyType1>(source1).count_value(); - int nb_cnt = tbb::flow::copy_body<SinkBodyType>(sink).count_value(); - if(throwException) { - ASSERT(g.exception_thrown() || okayNoExceptionsCaught, "Exception not caught by graph"); - ASSERT(g.is_cancelled() || okayNoExceptionsCaught, "Cancellation not signalled in graph"); - ASSERT(sb0_cnt <= g_NumItems && sb1_cnt <= g_NumItems, "Too many items sent by sources"); - ASSERT(nb_cnt <= ((sb0_cnt < sb1_cnt) ? sb0_cnt : sb1_cnt), "Too many items received by sink nodes"); - } - else { - ASSERT(!g.exception_thrown(), "Exception flag in flow::graph set but no throw occurred"); - ASSERT(!g.is_cancelled(), "canceled flag set but no throw occurred"); - ASSERT(sb0_cnt == g_NumItems, "Missing invocations of source_node0"); - ASSERT(sb1_cnt == g_NumItems, "Missing invocations of source_node1"); - ASSERT(nb_cnt == g_NumItems, "Missing items in absorbers"); - } - if(iter == 0) { - remove_edge(node_to_test, sink); - tbb::flow::input_port<0>(node_to_test).try_put(ItemType0(g_NumItems + 4)); - tbb::flow::input_port<1>(node_to_test).try_put(ItemType1(g_NumItems + 2)); - g.wait_for_all(); // have to wait for the graph to stop again.... - g.reset(); // resets the body of the source_nodes, test_node and the absorb_nodes. - source0_count = source1_count = sink_count = 0; - make_edge(node_to_test, sink); - g.wait_for_all(); // have to wait for the graph to stop again.... - } - else { - g.wait_for_all(); - g.reset(); - source0_count = source1_count = sink_count = 0; - } - ASSERT(0 == tbb::flow::copy_body<SourceBodyType0>(source0).count_value(),"Reset source failed"); - ASSERT(0 == tbb::flow::copy_body<SourceBodyType1>(source1).count_value(),"Reset source failed"); - nb_cnt = tbb::flow::copy_body<SinkBodyType>(sink).count_value(); - ASSERT(0 == tbb::flow::copy_body<SinkBodyType>(sink).count_value(),"Reset sink failed"); - } - -#if USE_TASK_SCHEDULER_OBSERVER - o.observe(false); -#endif - } -}; // run_one_join_node_test<tag_matching> - -template<tbb::flow::graph_buffer_policy JP, class OutputTuple, - TestNodeTypeEnum SourceThrowType, - TestNodeTypeEnum SinkThrowType> -void run_join_node_test() { - typedef typename tbb::flow::tuple_element<0,OutputTuple>::type ItemType0; - typedef typename tbb::flow::tuple_element<1,OutputTuple>::type ItemType1; - typedef test_source_body<ItemType0,SourceThrowType> SourceBodyType0; - typedef test_source_body<ItemType1,SourceThrowType> SourceBodyType1; - typedef absorber_body<OutputTuple,tbb::flow::continue_msg,SinkThrowType,unlimited_type> SinkBodyType; - - typedef typename tbb::flow::source_node<ItemType0> SourceType0; - typedef typename tbb::flow::source_node<ItemType1> SourceType1; - typedef typename tbb::flow::join_node<OutputTuple,JP> TestJoinType; - typedef typename tbb::flow::function_node<OutputTuple,tbb::flow::continue_msg> SinkType; - - for(int i = 0; i < 4; ++i) { - if(2 == i) continue; - bool throwException = (i & 0x1) != 0; - bool doFlog = (i & 0x2) != 0; - run_one_join_node_test< - JP, - OutputTuple, - SourceType0, - SourceBodyType0, - SourceType1, - SourceBodyType1, - TestJoinType, - SinkType, - SinkBodyType>::execute_test(throwException,doFlog); - } -} - -template<tbb::flow::graph_buffer_policy JP> -void test_join_node() { - REMARK("Testing join_node<%s>\n", graph_policy_name<JP>::name()); - // only doing two-input joins - g_Wakeup_Msg = "join(is,non): Missed wakeup or machine is overloaded?"; - run_join_node_test<JP, tbb::flow::tuple<int,int>, isThrowing, nonThrowing>(); - check_type<int>::check_type_counter = 0; - g_Wakeup_Msg = "join(non,is): Missed wakeup or machine is overloaded?"; - run_join_node_test<JP, tbb::flow::tuple<check_type<int>,int>, nonThrowing, isThrowing>(); - ASSERT(!check_type<int>::check_type_counter, "Dropped items in test"); - g_Wakeup_Msg = "join(is,is): Missed wakeup or machine is overloaded?"; - run_join_node_test<JP, tbb::flow::tuple<int,int>, isThrowing, isThrowing>(); - g_Wakeup_Msg = g_Orig_Wakeup_Msg; -} - -// ------------------- limiter_node ------------- - -template< - class BufferItemType, // - class SourceNodeType, - class SourceNodeBodyType, - class TestNodeType, - class SinkNodeType, - class SinkNodeBodyType > -void run_one_limiter_node_test(bool throwException,bool flog) { - tbb::flow::graph g; - - tbb::atomic<int> source_count; - tbb::atomic<int> sink_count; - source_count = sink_count = 0; -#if USE_TASK_SCHEDULER_OBSERVER - eh_test_observer o; - o.observe(true); -#endif - g_Master = Harness::CurrentTid(); - SourceNodeType source(g, SourceNodeBodyType(source_count),/*is_active*/false); - TestNodeType node_to_test(g,g_NumThreads + 1); - SinkNodeType sink(g,tbb::flow::unlimited,SinkNodeBodyType(sink_count)); - make_edge(source,node_to_test); - make_edge(node_to_test, sink); - for(int iter = 0; iter < 2; ++iter) { - ResetGlobals(throwException,flog); - if(throwException) { - TRY(); - source.activate(); - g.wait_for_all(); - CATCH_AND_ASSERT(); - } - else { - TRY(); - source.activate(); - g.wait_for_all(); - CATCH_AND_FAIL(); - } - bool okayNoExceptionsCaught = (g_ExceptionInMaster && !g_MasterExecutedThrow) || (!g_ExceptionInMaster && !g_NonMasterExecutedThrow) || !throwException; - int sb_cnt = tbb::flow::copy_body<SourceNodeBodyType>(source).count_value(); - int nb_cnt = tbb::flow::copy_body<SinkNodeBodyType>(sink).count_value(); - if(throwException) { - ASSERT(g.exception_thrown() || okayNoExceptionsCaught, "Exception not caught by graph"); - ASSERT(g.is_cancelled() || okayNoExceptionsCaught, "Cancellation not signalled in graph"); - ASSERT(sb_cnt <= g_NumItems, "Too many items sent by sources"); - ASSERT(nb_cnt <= sb_cnt, "Too many items received by sink nodes"); - } - else { - ASSERT(!g.exception_thrown(), "Exception flag in flow::graph set but no throw occurred"); - ASSERT(!g.is_cancelled(), "canceled flag set but no throw occurred"); - // we stop after limiter's limit, which is g_NumThreads + 1. The source_node - // is invoked one extra time, filling its buffer, so its limit is g_NumThreads + 2. - ASSERT(sb_cnt == g_NumThreads + 2, "Missing invocations of source_node"); - ASSERT(nb_cnt == g_NumThreads + 1, "Missing items in absorbers"); - } - if(iter == 0) { - remove_edge(node_to_test, sink); - node_to_test.try_put(BufferItemType()); - node_to_test.try_put(BufferItemType()); - g.wait_for_all(); - g.reset(); - source_count = sink_count = 0; - BufferItemType tmp; - ASSERT(!node_to_test.try_get(tmp), "node not empty"); - make_edge(node_to_test, sink); - g.wait_for_all(); - } - else { - g.reset(); - source_count = sink_count = 0; - } - ASSERT(0 == tbb::flow::copy_body<SourceNodeBodyType>(source).count_value(),"Reset source failed"); - ASSERT(0 == tbb::flow::copy_body<SinkNodeBodyType>(sink).count_value(),"Reset sink failed"); - } - -#if USE_TASK_SCHEDULER_OBSERVER - o.observe(false); -#endif -} - -template<class BufferItemType, - TestNodeTypeEnum SourceThrowType, - TestNodeTypeEnum SinkThrowType> -void run_limiter_node_test() { - typedef test_source_body<BufferItemType,SourceThrowType> SourceBodyType; - typedef absorber_body<BufferItemType,tbb::flow::continue_msg,SinkThrowType,unlimited_type> SinkBodyType; - - typedef tbb::flow::source_node<BufferItemType> SrcType; - typedef tbb::flow::limiter_node<BufferItemType> LmtType; - typedef tbb::flow::function_node<BufferItemType,tbb::flow::continue_msg> SnkType; - - for(int i = 0; i < 4; ++i) { - if(i == 2) continue; // no need to test flog w/o throws - bool throwException = (i & 0x1) != 0; - bool doFlog = (i & 0x2) != 0; - run_one_limiter_node_test< - /* class BufferItemType*/ BufferItemType, - /*class SourceNodeType*/ SrcType, - /*class SourceNodeBodyType*/ SourceBodyType, - /*class TestNodeType*/ LmtType, - /*class SinkNodeType*/ SnkType, - /*class SinkNodeBodyType*/ SinkBodyType - >(throwException, doFlog); - } -} - -void test_limiter_node() { - REMARK("Testing limiter_node\n"); - g_Wakeup_Msg = "limiter_node(is,non): Missed wakeup or machine is overloaded?"; - run_limiter_node_test<int,isThrowing,nonThrowing>(); - g_Wakeup_Msg = "limiter_node(non,is): Missed wakeup or machine is overloaded?"; - run_limiter_node_test<int,nonThrowing,isThrowing>(); - g_Wakeup_Msg = "limiter_node(is,is): Missed wakeup or machine is overloaded?"; - run_limiter_node_test<int,isThrowing,isThrowing>(); - g_Wakeup_Msg = g_Orig_Wakeup_Msg; -} - -// -------- split_node -------------------- - -template< - class InputTuple, - class SourceType, - class SourceBodyType, - class TestSplitType, - class SinkType0, - class SinkBodyType0, - class SinkType1, - class SinkBodyType1> -void run_one_split_node_test(bool throwException, bool flog) { - - tbb::flow::graph g; - - tbb::atomic<int> source_count; - tbb::atomic<int> sink0_count; - tbb::atomic<int> sink1_count; - source_count = sink0_count = sink1_count = 0; -#if USE_TASK_SCHEDULER_OBSERVER - eh_test_observer o; - o.observe(true); -#endif - - g_Master = Harness::CurrentTid(); - SourceType source(g, SourceBodyType(source_count),/*is_active*/false); - TestSplitType node_to_test(g); - SinkType0 sink0(g,tbb::flow::unlimited,SinkBodyType0(sink0_count)); - SinkType1 sink1(g,tbb::flow::unlimited,SinkBodyType1(sink1_count)); - make_edge(source, node_to_test); - make_edge(tbb::flow::output_port<0>(node_to_test), sink0); - make_edge(tbb::flow::output_port<1>(node_to_test), sink1); - - for(int iter = 0; iter < 2; ++iter) { // run, reset, run again - ResetGlobals(throwException,flog); - if(throwException) { - TRY(); - source.activate(); - g.wait_for_all(); - CATCH_AND_ASSERT(); - } - else { - TRY(); - source.activate(); - g.wait_for_all(); - CATCH_AND_FAIL(); - } - bool okayNoExceptionsCaught = (g_ExceptionInMaster && !g_MasterExecutedThrow) || (!g_ExceptionInMaster && !g_NonMasterExecutedThrow) || !throwException; - int sb_cnt = tbb::flow::copy_body<SourceBodyType>(source).count_value(); - int nb0_cnt = tbb::flow::copy_body<SinkBodyType0>(sink0).count_value(); - int nb1_cnt = tbb::flow::copy_body<SinkBodyType1>(sink1).count_value(); - if(throwException) { - ASSERT(g.exception_thrown() || okayNoExceptionsCaught, "Exception not caught by graph"); - ASSERT(g.is_cancelled() || okayNoExceptionsCaught, "Cancellation not signalled in graph"); - ASSERT(sb_cnt <= 2*g_NumItems, "Too many items sent by source"); - ASSERT(nb0_cnt + nb1_cnt <= sb_cnt*2, "Too many items received by sink nodes"); - } - else { - ASSERT(!g.exception_thrown(), "Exception flag in flow::graph set but no throw occurred"); - ASSERT(!g.is_cancelled(), "canceled flag set but no throw occurred"); - ASSERT(sb_cnt == g_NumItems, "Missing invocations of source_nodes"); - ASSERT(nb0_cnt == g_NumItems && nb1_cnt == g_NumItems, "Missing items in absorbers"); - } - g.reset(); // resets the body of the source_nodes and the absorb_nodes. - source_count = sink0_count = sink1_count = 0; - ASSERT(0 == tbb::flow::copy_body<SourceBodyType>(source).count_value(),"Reset source failed"); - ASSERT(0 == tbb::flow::copy_body<SinkBodyType0>(sink0).count_value(),"Reset sink 0 failed"); - ASSERT(0 == tbb::flow::copy_body<SinkBodyType1>(sink1).count_value(),"Reset sink 1 failed"); - } -#if USE_TASK_SCHEDULER_OBSERVER - o.observe(false); -#endif -} - -template<class InputTuple, - TestNodeTypeEnum SourceThrowType, - TestNodeTypeEnum SinkThrowType> -void run_split_node_test() { - typedef typename tbb::flow::tuple_element<0,InputTuple>::type ItemType0; - typedef typename tbb::flow::tuple_element<1,InputTuple>::type ItemType1; - typedef tuple_test_source_body<InputTuple,SourceThrowType> SourceBodyType; - typedef absorber_body<ItemType0,tbb::flow::continue_msg,SinkThrowType,unlimited_type> SinkBodyType0; - typedef absorber_body<ItemType1,tbb::flow::continue_msg,SinkThrowType,unlimited_type> SinkBodyType1; - - typedef typename tbb::flow::source_node<InputTuple> SourceType; - typedef typename tbb::flow::split_node<InputTuple> TestSplitType; - typedef typename tbb::flow::function_node<ItemType0,tbb::flow::continue_msg> SinkType0; - typedef typename tbb::flow::function_node<ItemType1,tbb::flow::continue_msg> SinkType1; - - for(int i = 0; i < 4; ++i) { - if(2 == i) continue; - bool throwException = (i & 0x1) != 0; - bool doFlog = (i & 0x2) != 0; - run_one_split_node_test< - InputTuple, - SourceType, - SourceBodyType, - TestSplitType, - SinkType0, - SinkBodyType0, - SinkType1, - SinkBodyType1> - (throwException,doFlog); - } -} - -void test_split_node() { - REMARK("Testing split_node\n"); - g_Wakeup_Msg = "split_node(is,non): Missed wakeup or machine is overloaded?"; - run_split_node_test<tbb::flow::tuple<int,int>, isThrowing, nonThrowing>(); - g_Wakeup_Msg = "split_node(non,is): Missed wakeup or machine is overloaded?"; - run_split_node_test<tbb::flow::tuple<int,int>, nonThrowing, isThrowing>(); - g_Wakeup_Msg = "split_node(is,is): Missed wakeup or machine is overloaded?"; - run_split_node_test<tbb::flow::tuple<int,int>, isThrowing, isThrowing>(); - g_Wakeup_Msg = g_Orig_Wakeup_Msg; -} - -// --------- indexer_node ---------------------- - -template < class InputTuple, - class SourceType0, - class SourceBodyType0, - class SourceType1, - class SourceBodyType1, - class TestNodeType, - class SinkType, - class SinkBodyType> -void run_one_indexer_node_test(bool throwException,bool flog) { - typedef typename tbb::flow::tuple_element<0,InputTuple>::type ItemType0; - typedef typename tbb::flow::tuple_element<1,InputTuple>::type ItemType1; - - tbb::flow::graph g; - - tbb::atomic<int> source0_count; - tbb::atomic<int> source1_count; - tbb::atomic<int> sink_count; - source0_count = source1_count = sink_count = 0; -#if USE_TASK_SCHEDULER_OBSERVER - eh_test_observer o; - o.observe(true); -#endif - g_Master = Harness::CurrentTid(); - SourceType0 source0(g, SourceBodyType0(source0_count),/*is_active*/false); - SourceType1 source1(g, SourceBodyType1(source1_count),/*is_active*/false); - TestNodeType node_to_test(g); - SinkType sink(g,tbb::flow::unlimited,SinkBodyType(sink_count)); - make_edge(source0,tbb::flow::input_port<0>(node_to_test)); - make_edge(source1,tbb::flow::input_port<1>(node_to_test)); - make_edge(node_to_test, sink); - for(int iter = 0; iter < 2; ++iter) { - ResetGlobals(throwException,flog); - if(throwException) { - TRY(); - source0.activate(); - source1.activate(); - g.wait_for_all(); - CATCH_AND_ASSERT(); - } - else { - TRY(); - source0.activate(); - source1.activate(); - g.wait_for_all(); - CATCH_AND_FAIL(); - } - bool okayNoExceptionsCaught = (g_ExceptionInMaster && !g_MasterExecutedThrow) || (!g_ExceptionInMaster && !g_NonMasterExecutedThrow) || !throwException; - int sb0_cnt = tbb::flow::copy_body<SourceBodyType0>(source0).count_value(); - int sb1_cnt = tbb::flow::copy_body<SourceBodyType1>(source1).count_value(); - int nb_cnt = tbb::flow::copy_body<SinkBodyType>(sink).count_value(); - if(throwException) { - ASSERT(g.exception_thrown() || okayNoExceptionsCaught, "Exception not caught by graph"); - ASSERT(g.is_cancelled() || okayNoExceptionsCaught, "Cancellation not signalled in graph"); - ASSERT(sb0_cnt <= g_NumItems && sb1_cnt <= g_NumItems, "Too many items sent by sources"); - ASSERT(nb_cnt <= sb0_cnt + sb1_cnt, "Too many items received by sink nodes"); - } - else { - ASSERT(!g.exception_thrown(), "Exception flag in flow::graph set but no throw occurred"); - ASSERT(!g.is_cancelled(), "canceled flag set but no throw occurred"); - ASSERT(sb0_cnt == g_NumItems, "Missing invocations of source_node0"); - ASSERT(sb1_cnt == g_NumItems, "Missing invocations of source_node1"); - ASSERT(nb_cnt == 2*g_NumItems, "Missing items in absorbers"); - } - if(iter == 0) { - remove_edge(node_to_test, sink); - tbb::flow::input_port<0>(node_to_test).try_put(ItemType0(g_NumItems + 4)); - tbb::flow::input_port<1>(node_to_test).try_put(ItemType1(g_NumItems + 2)); - g.wait_for_all(); - g.reset(); - source0_count = source1_count = sink_count = 0; - make_edge(node_to_test, sink); - g.wait_for_all(); - } - else { - g.wait_for_all(); - g.reset(); - source0_count = source1_count = sink_count = 0; - } - ASSERT(0 == tbb::flow::copy_body<SourceBodyType0>(source0).count_value(),"Reset source failed"); - ASSERT(0 == tbb::flow::copy_body<SourceBodyType1>(source1).count_value(),"Reset source failed"); - nb_cnt = tbb::flow::copy_body<SinkBodyType>(sink).count_value(); - ASSERT(0 == tbb::flow::copy_body<SinkBodyType>(sink).count_value(),"Reset sink failed"); - } - -#if USE_TASK_SCHEDULER_OBSERVER - o.observe(false); -#endif -} - -template<class InputTuple, - TestNodeTypeEnum SourceThrowType, - TestNodeTypeEnum SinkThrowType> -void run_indexer_node_test() { - typedef typename tbb::flow::tuple_element<0,InputTuple>::type ItemType0; - typedef typename tbb::flow::tuple_element<1,InputTuple>::type ItemType1; - typedef test_source_body<ItemType0,SourceThrowType> SourceBodyType0; - typedef test_source_body<ItemType1,SourceThrowType> SourceBodyType1; - typedef typename tbb::flow::indexer_node<ItemType0, ItemType1> TestNodeType; - typedef absorber_body<typename TestNodeType::output_type,tbb::flow::continue_msg,SinkThrowType,unlimited_type> SinkBodyType; - - typedef typename tbb::flow::source_node<ItemType0> SourceType0; - typedef typename tbb::flow::source_node<ItemType1> SourceType1; - typedef typename tbb::flow::function_node<typename TestNodeType::output_type,tbb::flow::continue_msg> SinkType; - - for(int i = 0; i < 4; ++i) { - if(2 == i) continue; - bool throwException = (i & 0x1) != 0; - bool doFlog = (i & 0x2) != 0; - run_one_indexer_node_test< - InputTuple, - SourceType0, - SourceBodyType0, - SourceType1, - SourceBodyType1, - TestNodeType, - SinkType, - SinkBodyType>(throwException,doFlog); - } -} - -void test_indexer_node() { - REMARK("Testing indexer_node\n"); - g_Wakeup_Msg = "indexer_node(is,non): Missed wakeup or machine is overloaded?"; - run_indexer_node_test<tbb::flow::tuple<int,int>, isThrowing, nonThrowing>(); - g_Wakeup_Msg = "indexer_node(non,is): Missed wakeup or machine is overloaded?"; - run_indexer_node_test<tbb::flow::tuple<int,int>, nonThrowing, isThrowing>(); - g_Wakeup_Msg = "indexer_node(is,is): Missed wakeup or machine is overloaded?"; - run_indexer_node_test<tbb::flow::tuple<int,int>, isThrowing, isThrowing>(); - g_Wakeup_Msg = g_Orig_Wakeup_Msg;; -} - -/////////////////////////////////////////////// -// whole-graph exception test - -class Foo { -private: - // std::vector<int>& m_vec; - std::vector<int>* m_vec; -public: - Foo(std::vector<int>& vec) : m_vec(&vec) { } - void operator() (tbb::flow::continue_msg) const { - ++nExceptions; - m_vec->at(m_vec->size()); // Will throw out_of_range exception - ASSERT(false, "Exception not thrown by invalid access"); - } -}; - -// test from user ahelwer: http://software.intel.com/en-us/forums/showthread.php?t=103786 -// exception thrown in graph node, not caught in wait_for_all() -void -test_flow_graph_exception0() { - // Initializes body - std::vector<int> vec; - vec.push_back(0); - Foo f(vec); - nExceptions = 0; - - // Construct graph and nodes - tbb::flow::graph g; - tbb::flow::broadcast_node<tbb::flow::continue_msg> start(g); - tbb::flow::continue_node<tbb::flow::continue_msg> fooNode(g, f); - - // Construct edge - tbb::flow::make_edge(start, fooNode); - - // Execute graph - ASSERT(!g.exception_thrown(), "exception_thrown flag already set"); - ASSERT(!g.is_cancelled(), "canceled flag already set"); - try { - start.try_put(tbb::flow::continue_msg()); - g.wait_for_all(); - ASSERT(false, "Exception not thrown"); - } - catch(std::out_of_range& ex) { - REMARK("Exception: %s (expected)\n", ex.what()); - } - catch(...) { - REMARK("Unknown exception caught (expected)\n"); - } - ASSERT(nExceptions > 0, "Exception caught, but no body signaled exception being thrown"); - nExceptions = 0; - ASSERT(g.exception_thrown(), "Exception not intercepted"); - // if exception set, cancellation also set. - ASSERT(g.is_cancelled(), "Exception cancellation not signaled"); - // in case we got an exception - try { - g.wait_for_all(); // context still signalled canceled, my_exception still set. - } - catch(...) { - ASSERT(false, "Second exception thrown but no task executing"); - } - ASSERT(nExceptions == 0, "body signaled exception being thrown, but no body executed"); - ASSERT(!g.exception_thrown(), "exception_thrown flag not reset"); - ASSERT(!g.is_cancelled(), "canceled flag not reset"); -} - -void TestOneThreadNum(int nThread) { - REMARK("Testing %d threads\n", nThread); - g_NumItems = ((nThread > NUM_ITEMS) ? nThread *2 : NUM_ITEMS); - g_NumThreads = nThread; - tbb::task_scheduler_init init(nThread); - // whole-graph exception catch and rethrow test - test_flow_graph_exception0(); - for(int i = 0; i < 4; ++i) { - g_ExceptionInMaster = (i & 1) != 0; - g_SolitaryException = (i & 2) != 0; - REMARK("g_ExceptionInMaster == %s, g_SolitaryException == %s\n", - g_ExceptionInMaster ? "T":"F", - g_SolitaryException ? "T":"F"); - test_source_node(); - test_function_node(); - test_continue_node(); // also test broadcast_node - test_multifunction_node(); - // single- and multi-item buffering nodes - test_buffer_queue_and_overwrite_node(); - test_sequencer_node(); - test_priority_queue_node(); - - // join_nodes - test_join_node<tbb::flow::queueing>(); - test_join_node<tbb::flow::reserving>(); - test_join_node<tbb::flow::tag_matching>(); - - test_limiter_node(); - test_split_node(); - // graph for write_once_node will be complicated by the fact the node will - // not do try_puts after it has been set. To get parallelism of N we have - // to attach N successor nodes to the write_once (or play some similar game). - // test_write_once_node(); - test_indexer_node(); - } -} -#endif // TBB_USE_EXCEPTIONS - -#if TBB_USE_EXCEPTIONS -int TestMain() { - // reversing the order of tests - for(int nThread=MaxThread; nThread >= MinThread; --nThread) { - TestOneThreadNum(nThread); - } - - return Harness::Done; -} -#else -int TestMain() { - return Harness::Skipped; -} -#endif // TBB_USE_EXCEPTIONS diff --git a/src/tbb/src/test/test_eh_tasks.cpp b/src/tbb/src/test/test_eh_tasks.cpp deleted file mode 100644 index eee222319..000000000 --- a/src/tbb/src/test/test_eh_tasks.cpp +++ /dev/null @@ -1,787 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness.h" - -#if __TBB_TASK_GROUP_CONTEXT - -#define __TBB_ATOMICS_CODEGEN_BROKEN __SUNPRO_CC - -#define private public -#include "tbb/task.h" -#undef private - -#include "tbb/task_scheduler_init.h" -#include "tbb/spin_mutex.h" -#include "tbb/tick_count.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include <string> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#define NUM_CHILD_TASKS 256 -#define NUM_ROOT_TASKS 32 -#define NUM_ROOTS_IN_GROUP 8 - -//! Statistics about number of tasks in different states -class TaskStats { - typedef tbb::spin_mutex::scoped_lock lock_t; - //! Number of tasks allocated that was ever allocated - volatile intptr_t m_Existed; - //! Number of tasks executed to the moment - volatile intptr_t m_Executed; - //! Number of tasks allocated but not yet destroyed to the moment - volatile intptr_t m_Existing; - - mutable tbb::spin_mutex m_Mutex; -public: - //! Assumes that assignment is noncontended for the left-hand operand - const TaskStats& operator= ( const TaskStats& rhs ) { - if ( this != &rhs ) { - lock_t lock(rhs.m_Mutex); - m_Existed = rhs.m_Existed; - m_Executed = rhs.m_Executed; - m_Existing = rhs.m_Existing; - } - return *this; - } - intptr_t Existed() const { return m_Existed; } - intptr_t Executed() const { return m_Executed; } - intptr_t Existing() const { return m_Existing; } - void IncExisted() { lock_t lock(m_Mutex); ++m_Existed; ++m_Existing; } - void IncExecuted() { lock_t lock(m_Mutex); ++m_Executed; } - void DecExisting() { lock_t lock(m_Mutex); --m_Existing; } - //! Assumed to be used in uncontended manner only - void Reset() { m_Executed = m_Existing = m_Existed = 0; } -}; - -TaskStats g_CurStat; - -inline intptr_t Existed () { return g_CurStat.Existed(); } - -#include "harness_eh.h" - -bool g_BoostExecutedCount = true; -volatile bool g_TaskWasCancelled = false; - -inline void ResetGlobals () { - ResetEhGlobals(); - g_BoostExecutedCount = true; - g_TaskWasCancelled = false; - g_CurStat.Reset(); -} - -#define ASSERT_TEST_POSTCOND() \ - ASSERT (g_CurStat.Existed() >= g_CurStat.Executed(), "Total number of tasks is less than executed"); \ - ASSERT (!g_CurStat.Existing(), "Not all task objects have been destroyed"); \ - ASSERT (!tbb::task::self().is_cancelled(), "Scheduler's default context has not been cleaned up properly"); - -inline void WaitForException () { - int n = 0; - while ( ++n < c_Timeout && !__TBB_load_with_acquire(g_ExceptionCaught) ) - __TBB_Yield(); - ASSERT_WARNING( n < c_Timeout, "WaitForException failed" ); -} - -class TaskBase : public tbb::task { - tbb::task* execute () { - tbb::task* t = NULL; - __TBB_TRY { - t = do_execute(); - } __TBB_CATCH( ... ) { - g_CurStat.IncExecuted(); - __TBB_RETHROW(); - } - g_CurStat.IncExecuted(); - return t; - } -protected: - TaskBase ( bool throwException = true ) : m_Throw(throwException) { g_CurStat.IncExisted(); } - ~TaskBase () { g_CurStat.DecExisting(); } - - virtual tbb::task* do_execute () = 0; - - bool m_Throw; -}; // class TaskBase - -class LeafTask : public TaskBase { - tbb::task* do_execute () { - Harness::ConcurrencyTracker ct; - WaitUntilConcurrencyPeaks(); - if ( g_BoostExecutedCount ) - ++g_CurExecuted; - if ( m_Throw ) - ThrowTestException(NUM_CHILD_TASKS/2); - if ( !g_ThrowException ) - __TBB_Yield(); - return NULL; - } -public: - LeafTask ( bool throw_exception = true ) : TaskBase(throw_exception) {} -}; - -class SimpleRootTask : public TaskBase { - tbb::task* do_execute () { - set_ref_count(NUM_CHILD_TASKS + 1); - for ( size_t i = 0; i < NUM_CHILD_TASKS; ++i ) - spawn( *new( allocate_child() ) LeafTask(m_Throw) ); - wait_for_all(); - return NULL; - } -public: - SimpleRootTask ( bool throw_exception = true ) : TaskBase(throw_exception) {} -}; - -#if TBB_USE_EXCEPTIONS - -class SimpleThrowingTask : public tbb::task { -public: - tbb::task* execute () { throw 0; } - ~SimpleThrowingTask() {} -}; - -//! Checks if innermost running task information is updated correctly during cancellation processing -void Test0 () { - tbb::task_scheduler_init init (1); - tbb::empty_task &r = *new( tbb::task::allocate_root() ) tbb::empty_task; - tbb::task_list tl; - tl.push_back( *new( r.allocate_child() ) SimpleThrowingTask ); - tl.push_back( *new( r.allocate_child() ) SimpleThrowingTask ); - r.set_ref_count( 3 ); - try { - r.spawn_and_wait_for_all( tl ); - } - catch (...) {} - r.destroy( r ); -} - -//! Default exception behavior test. -/** Allocates a root task that spawns a bunch of children, one or several of which throw - a test exception in a worker or master thread (depending on the global setting). **/ -void Test1 () { - ResetGlobals(); - tbb::empty_task &r = *new( tbb::task::allocate_root() ) tbb::empty_task; - ASSERT (!g_CurStat.Existing() && !g_CurStat.Existed() && !g_CurStat.Executed(), - "something wrong with the task accounting"); - r.set_ref_count(NUM_CHILD_TASKS + 1); - for ( int i = 0; i < NUM_CHILD_TASKS; ++i ) - r.spawn( *new( r.allocate_child() ) LeafTask ); - TRY(); - r.wait_for_all(); - CATCH_AND_ASSERT(); - r.destroy(r); - ASSERT_TEST_POSTCOND(); -} // void Test1 () - -//! Default exception behavior test. -/** Allocates and spawns root task that runs a bunch of children, one of which throws - a test exception in a worker thread. (Similar to Test1, except that the root task - is spawned by the test function, and children are created by the root task instead - of the test function body.) **/ -void Test2 () { - ResetGlobals(); - SimpleRootTask &r = *new( tbb::task::allocate_root() ) SimpleRootTask; - ASSERT (g_CurStat.Existing() == 1 && g_CurStat.Existed() == 1 && !g_CurStat.Executed(), - "something wrong with the task accounting"); - TRY(); - tbb::task::spawn_root_and_wait(r); - CATCH_AND_ASSERT(); - ASSERT (g_ExceptionCaught, "no exception occurred"); - ASSERT_TEST_POSTCOND(); -} // void Test2 () - -//! The same as Test2() except the root task has explicit context. -/** The context is initialized as bound in order to check correctness of its associating - with a root task. **/ -void Test3 () { - ResetGlobals(); - tbb::task_group_context ctx(tbb::task_group_context::bound); - SimpleRootTask &r = *new( tbb::task::allocate_root(ctx) ) SimpleRootTask; - ASSERT (g_CurStat.Existing() == 1 && g_CurStat.Existed() == 1 && !g_CurStat.Executed(), - "something wrong with the task accounting"); - TRY(); - tbb::task::spawn_root_and_wait(r); - CATCH_AND_ASSERT(); - ASSERT (g_ExceptionCaught, "no exception occurred"); - ASSERT_TEST_POSTCOND(); -} // void Test2 () - -class RootLauncherTask : public TaskBase { - tbb::task_group_context::kind_type m_CtxKind; - - tbb::task* do_execute () { - tbb::task_group_context ctx(m_CtxKind); - SimpleRootTask &r = *new( allocate_root() ) SimpleRootTask; - r.change_group(ctx); - TRY(); - spawn_root_and_wait(r); - // Give a child of our siblings a chance to throw the test exception - WaitForException(); - CATCH(); - ASSERT (__TBB_EXCEPTION_TYPE_INFO_BROKEN || !g_UnknownException, "unknown exception was caught"); - return NULL; - } -public: - RootLauncherTask ( tbb::task_group_context::kind_type ctx_kind = tbb::task_group_context::isolated ) : m_CtxKind(ctx_kind) {} -}; - -/** Allocates and spawns a bunch of roots, which allocate and spawn new root with - isolated context, which at last spawns a bunch of children each, one of which - throws a test exception in a worker thread. **/ -void Test4 () { - ResetGlobals(); - tbb::task_list tl; - for ( size_t i = 0; i < NUM_ROOT_TASKS; ++i ) - tl.push_back( *new( tbb::task::allocate_root() ) RootLauncherTask ); - TRY(); - tbb::task::spawn_root_and_wait(tl); - CATCH_AND_ASSERT(); - ASSERT (!l_ExceptionCaughtAtCurrentLevel, "exception in this scope is unexpected"); - intptr_t num_tasks_expected = NUM_ROOT_TASKS * (NUM_CHILD_TASKS + 2); - ASSERT (g_CurStat.Existed() == num_tasks_expected, "Wrong total number of tasks"); - if ( g_SolitaryException ) - ASSERT (g_CurStat.Executed() >= num_tasks_expected - NUM_CHILD_TASKS, "Unexpected number of executed tasks"); - ASSERT_TEST_POSTCOND(); -} // void Test4 () - -/** The same as Test4, except the contexts are bound. **/ -void Test4_1 () { - ResetGlobals(); - tbb::task_list tl; - for ( size_t i = 0; i < NUM_ROOT_TASKS; ++i ) - tl.push_back( *new( tbb::task::allocate_root() ) RootLauncherTask(tbb::task_group_context::bound) ); - TRY(); - tbb::task::spawn_root_and_wait(tl); - CATCH_AND_ASSERT(); - ASSERT (!l_ExceptionCaughtAtCurrentLevel, "exception in this scope is unexpected"); - intptr_t num_tasks_expected = NUM_ROOT_TASKS * (NUM_CHILD_TASKS + 2); - ASSERT (g_CurStat.Existed() == num_tasks_expected, "Wrong total number of tasks"); - if ( g_SolitaryException ) - ASSERT (g_CurStat.Executed() >= num_tasks_expected - NUM_CHILD_TASKS, "Unexpected number of executed tasks"); - ASSERT_TEST_POSTCOND(); -} // void Test4_1 () - - -class RootsGroupLauncherTask : public TaskBase { - tbb::task* do_execute () { - tbb::task_group_context ctx (tbb::task_group_context::isolated); - tbb::task_list tl; - for ( size_t i = 0; i < NUM_ROOT_TASKS; ++i ) - tl.push_back( *new( allocate_root(ctx) ) SimpleRootTask ); - TRY(); - spawn_root_and_wait(tl); - // Give worker a chance to throw exception - WaitForException(); - CATCH_AND_ASSERT(); - return NULL; - } -}; - -/** Allocates and spawns a bunch of roots, which allocate and spawn groups of roots - with an isolated context shared by all group members, which at last spawn a bunch - of children each, one of which throws a test exception in a worker thread. **/ -void Test5 () { - ResetGlobals(); - tbb::task_list tl; - for ( size_t i = 0; i < NUM_ROOTS_IN_GROUP; ++i ) - tl.push_back( *new( tbb::task::allocate_root() ) RootsGroupLauncherTask ); - TRY(); - tbb::task::spawn_root_and_wait(tl); - CATCH_AND_ASSERT(); - ASSERT (!l_ExceptionCaughtAtCurrentLevel, "unexpected exception intercepted"); - if ( g_SolitaryException ) { - intptr_t num_tasks_expected = NUM_ROOTS_IN_GROUP * (1 + NUM_ROOT_TASKS * (1 + NUM_CHILD_TASKS)); - intptr_t min_num_tasks_executed = num_tasks_expected - NUM_ROOT_TASKS * (NUM_CHILD_TASKS + 1); - ASSERT (g_CurStat.Executed() >= min_num_tasks_executed, "Too few tasks executed"); - } - ASSERT_TEST_POSTCOND(); -} // void Test5 () - -class ThrowingRootLauncherTask : public TaskBase { - tbb::task* do_execute () { - tbb::task_group_context ctx (tbb::task_group_context::bound); - SimpleRootTask &r = *new( allocate_root(ctx) ) SimpleRootTask(false); - TRY(); - spawn_root_and_wait(r); - CATCH(); - ASSERT (!l_ExceptionCaughtAtCurrentLevel, "unexpected exception intercepted"); - ThrowTestException(NUM_CHILD_TASKS); - g_TaskWasCancelled |= is_cancelled(); - return NULL; - } -}; - -class BoundHierarchyLauncherTask : public TaskBase { - bool m_Recover; - - void alloc_roots ( tbb::task_group_context& ctx, tbb::task_list& tl ) { - for ( size_t i = 0; i < NUM_ROOT_TASKS; ++i ) - tl.push_back( *new( allocate_root(ctx) ) ThrowingRootLauncherTask ); - } - - tbb::task* do_execute () { - tbb::task_group_context ctx (tbb::task_group_context::isolated); - tbb::task_list tl; - alloc_roots(ctx, tl); - TRY(); - spawn_root_and_wait(tl); - CATCH_AND_ASSERT(); - ASSERT (l_ExceptionCaughtAtCurrentLevel, "no exception occurred"); - ASSERT (!tl.empty(), "task list was cleared somehow"); - if ( g_SolitaryException ) - ASSERT (g_TaskWasCancelled, "No tasks were cancelled despite of exception"); - if ( m_Recover ) { - // Test task_group_context::unbind and task_group_context::reset methods - g_ThrowException = false; - l_ExceptionCaughtAtCurrentLevel = false; - tl.clear(); - alloc_roots(ctx, tl); - ctx.reset(); - try { - spawn_root_and_wait(tl); - } - catch (...) { - l_ExceptionCaughtAtCurrentLevel = true; - } - ASSERT (!l_ExceptionCaughtAtCurrentLevel, "unexpected exception occurred"); - } - return NULL; - } -public: - BoundHierarchyLauncherTask ( bool recover = false ) : m_Recover(recover) {} - -}; // class BoundHierarchyLauncherTask - -//! Test for bound contexts forming 2 level tree. Exception is thrown on the 1st (root) level. -/** Allocates and spawns a root that spawns a bunch of 2nd level roots sharing - the same isolated context, each of which in their turn spawns a single 3rd level - root with the bound context, and these 3rd level roots spawn bunches of leaves - in the end. Leaves do not generate exceptions. The test exception is generated - by one of the 2nd level roots. **/ -void Test6 () { - ResetGlobals(); - BoundHierarchyLauncherTask &r = *new( tbb::task::allocate_root() ) BoundHierarchyLauncherTask; - TRY(); - tbb::task::spawn_root_and_wait(r); - CATCH_AND_ASSERT(); - ASSERT (!l_ExceptionCaughtAtCurrentLevel, "unexpected exception intercepted"); - // After the first of the branches (ThrowingRootLauncherTask) completes, - // the rest of the task tree may be collapsed before having a chance to execute leaves. - // A number of branches running concurrently with the first one will be able to spawn leaves though. - /// \todo: If additional checkpoints are added to scheduler the following assertion must weaken - intptr_t num_tasks_expected = 1 + NUM_ROOT_TASKS * (2 + NUM_CHILD_TASKS); - intptr_t min_num_tasks_created = 1 + g_NumThreads * 2 + NUM_CHILD_TASKS; - // 2 stands for BoundHierarchyLauncherTask and SimpleRootTask - // 1 corresponds to BoundHierarchyLauncherTask - intptr_t min_num_tasks_executed = 2 + 1 + NUM_CHILD_TASKS; - ASSERT (g_CurStat.Existed() <= num_tasks_expected, "Number of expected tasks is calculated incorrectly"); - ASSERT (g_CurStat.Existed() >= min_num_tasks_created, "Too few tasks created"); - ASSERT (g_CurStat.Executed() >= min_num_tasks_executed, "Too few tasks executed"); - ASSERT_TEST_POSTCOND(); -} // void Test6 () - -//! Tests task_group_context::unbind and task_group_context::reset methods. -/** Allocates and spawns a root that spawns a bunch of 2nd level roots sharing - the same isolated context, each of which in their turn spawns a single 3rd level - root with the bound context, and these 3rd level roots spawn bunches of leaves - in the end. Leaves do not generate exceptions. The test exception is generated - by one of the 2nd level roots. **/ -void Test7 () { - ResetGlobals(); - BoundHierarchyLauncherTask &r = *new( tbb::task::allocate_root() ) BoundHierarchyLauncherTask; - TRY(); - tbb::task::spawn_root_and_wait(r); - CATCH_AND_ASSERT(); - ASSERT (!l_ExceptionCaughtAtCurrentLevel, "unexpected exception intercepted"); - ASSERT_TEST_POSTCOND(); -} // void Test6 () - -class BoundHierarchyLauncherTask2 : public TaskBase { - tbb::task* do_execute () { - tbb::task_group_context ctx; - tbb::task_list tl; - for ( size_t i = 0; i < NUM_ROOT_TASKS; ++i ) - tl.push_back( *new( allocate_root(ctx) ) RootLauncherTask(tbb::task_group_context::bound) ); - TRY(); - spawn_root_and_wait(tl); - CATCH_AND_ASSERT(); - // Exception must be intercepted by RootLauncherTask - ASSERT (!l_ExceptionCaughtAtCurrentLevel, "no exception occurred"); - return NULL; - } -}; // class BoundHierarchyLauncherTask2 - -//! Test for bound contexts forming 2 level tree. Exception is thrown in the 2nd (outer) level. -/** Allocates and spawns a root that spawns a bunch of 2nd level roots sharing - the same isolated context, each of which in their turn spawns a single 3rd level - root with the bound context, and these 3rd level roots spawn bunches of leaves - in the end. The test exception is generated by one of the leaves. **/ -void Test8 () { - ResetGlobals(); - BoundHierarchyLauncherTask2 &r = *new( tbb::task::allocate_root() ) BoundHierarchyLauncherTask2; - TRY(); - tbb::task::spawn_root_and_wait(r); - CATCH_AND_ASSERT(); - ASSERT (!l_ExceptionCaughtAtCurrentLevel, "unexpected exception intercepted"); - if ( g_SolitaryException ) { - intptr_t num_tasks_expected = 1 + NUM_ROOT_TASKS * (2 + NUM_CHILD_TASKS); - intptr_t min_num_tasks_created = 1 + g_NumThreads * (2 + NUM_CHILD_TASKS); - intptr_t min_num_tasks_executed = num_tasks_expected - (NUM_CHILD_TASKS + 1); - ASSERT (g_CurStat.Existed() <= num_tasks_expected, "Number of expected tasks is calculated incorrectly"); - ASSERT (g_CurStat.Existed() >= min_num_tasks_created, "Too few tasks created"); - ASSERT (g_CurStat.Executed() >= min_num_tasks_executed, "Too few tasks executed"); - } - ASSERT_TEST_POSTCOND(); -} // void Test8 () - -template<typename T> -void ThrowMovableException ( intptr_t threshold, const T& data ) { - if ( !IsThrowingThread() ) - return; - if ( !g_SolitaryException ) { -#if __TBB_ATOMICS_CODEGEN_BROKEN - g_ExceptionsThrown = g_ExceptionsThrown + 1; -#else - ++g_ExceptionsThrown; -#endif - throw tbb::movable_exception<T>(data); - } - while ( g_CurStat.Existed() < threshold ) - __TBB_Yield(); - if ( g_ExceptionsThrown.compare_and_swap(1, 0) == 0 ) - throw tbb::movable_exception<T>(data); -} - -const int g_IntExceptionData = -375; -const std::string g_StringExceptionData = "My test string"; - -// Exception data class implementing minimal requirements of tbb::movable_exception -class ExceptionData { - const ExceptionData& operator = ( const ExceptionData& src ); - explicit ExceptionData ( int n ) : m_Int(n), m_String(g_StringExceptionData) {} -public: - ExceptionData ( const ExceptionData& src ) : m_Int(src.m_Int), m_String(src.m_String) {} - ~ExceptionData () {} - - int m_Int; - std::string m_String; - - // Simple way to provide an instance when all initializing constructors are private - // and to avoid memory reclamation problems. - static ExceptionData s_data; -}; - -ExceptionData ExceptionData::s_data(g_IntExceptionData); - -typedef tbb::movable_exception<int> SolitaryMovableException; -typedef tbb::movable_exception<ExceptionData> MultipleMovableException; - -class LeafTaskWithMovableExceptions : public TaskBase { - tbb::task* do_execute () { - Harness::ConcurrencyTracker ct; - WaitUntilConcurrencyPeaks(); - if ( g_SolitaryException ) - ThrowMovableException<int>(NUM_CHILD_TASKS/2, g_IntExceptionData); - else - ThrowMovableException<ExceptionData>(NUM_CHILD_TASKS/2, ExceptionData::s_data); - return NULL; - } -}; - -void CheckException ( tbb::tbb_exception& e ) { - ASSERT (strcmp(e.name(), (g_SolitaryException ? typeid(SolitaryMovableException) - : typeid(MultipleMovableException)).name() ) == 0, - "Unexpected original exception name"); - ASSERT (strcmp(e.what(), "tbb::movable_exception") == 0, "Unexpected original exception info "); - if ( g_SolitaryException ) { - SolitaryMovableException& me = dynamic_cast<SolitaryMovableException&>(e); - ASSERT (me.data() == g_IntExceptionData, "Unexpected solitary movable_exception data"); - } - else { - MultipleMovableException& me = dynamic_cast<MultipleMovableException&>(e); - ASSERT (me.data().m_Int == g_IntExceptionData, "Unexpected multiple movable_exception int data"); - ASSERT (me.data().m_String == g_StringExceptionData, "Unexpected multiple movable_exception string data"); - } -} - -void CheckException () { - try { - throw; - } catch ( tbb::tbb_exception& e ) { - CheckException(e); - } - catch ( ... ) { - } -} - -//! Test for movable_exception behavior, and external exception recording. -/** Allocates a root task that spawns a bunch of children, one or several of which throw - a movable exception in a worker or master thread (depending on the global settings). - The test also checks the correctness of multiple rethrowing of the pending exception. **/ -void TestMovableException () { - REMARK( "TestMovableException\n" ); - ResetGlobals(); - bool bUnsupported = false; - tbb::task_group_context ctx; - tbb::empty_task *r = new( tbb::task::allocate_root() ) tbb::empty_task; - ASSERT (!g_CurStat.Existing() && !g_CurStat.Existed() && !g_CurStat.Executed(), - "something wrong with the task accounting"); - r->set_ref_count(NUM_CHILD_TASKS + 1); - for ( int i = 0; i < NUM_CHILD_TASKS; ++i ) - r->spawn( *new( r->allocate_child() ) LeafTaskWithMovableExceptions ); - TRY() - r->wait_for_all(); - } catch ( ... ) { - ASSERT (!ctx.is_group_execution_cancelled(), ""); - CheckException(); - try { - throw; - } catch ( tbb::tbb_exception& e ) { - CheckException(e); - g_ExceptionCaught = l_ExceptionCaughtAtCurrentLevel = true; - } - catch ( ... ) { - g_ExceptionCaught = true; - g_UnknownException = unknownException = true; - } - try { - ctx.register_pending_exception(); - } catch ( ... ) { - bUnsupported = true; - REPORT( "Warning: register_pending_exception() failed. This is expected in case of linking with static msvcrt\n" ); - } - ASSERT (ctx.is_group_execution_cancelled() || bUnsupported, "After exception registration the context must be in the cancelled state"); - } - r->destroy(*r); - ASSERT_EXCEPTION(); - ASSERT_TEST_POSTCOND(); - - r = new( tbb::task::allocate_root(ctx) ) tbb::empty_task; - r->set_ref_count(1); - g_ExceptionCaught = g_UnknownException = false; - try { - r->wait_for_all(); - } catch ( tbb::tbb_exception& e ) { - CheckException(e); - g_ExceptionCaught = true; - } - catch ( ... ) { - g_ExceptionCaught = true; - g_UnknownException = true; - } - ASSERT (g_ExceptionCaught || bUnsupported, "no exception occurred"); - ASSERT (__TBB_EXCEPTION_TYPE_INFO_BROKEN || !g_UnknownException || bUnsupported, "unknown exception was caught"); - r->destroy(*r); -} // void Test10 () - -#endif /* TBB_USE_EXCEPTIONS */ - -template<class T> -class CtxLauncherTask : public tbb::task { - tbb::task_group_context &m_Ctx; - - tbb::task* execute () { - spawn_root_and_wait( *new( allocate_root(m_Ctx) ) T ); - return NULL; - } -public: - CtxLauncherTask ( tbb::task_group_context& ctx ) : m_Ctx(ctx) {} -}; - -//! Test for cancelling a task hierarchy from outside (from a task running in parallel with it). -void TestCancelation () { - ResetGlobals(); - g_ThrowException = false; - tbb::task_group_context ctx; - tbb::task_list tl; - tl.push_back( *new( tbb::task::allocate_root() ) CtxLauncherTask<SimpleRootTask>(ctx) ); - tl.push_back( *new( tbb::task::allocate_root() ) CancellatorTask(ctx, NUM_CHILD_TASKS / 4) ); - TRY(); - tbb::task::spawn_root_and_wait(tl); - CATCH_AND_FAIL(); - ASSERT (g_CurStat.Executed() <= g_ExecutedAtLastCatch + g_NumThreads, "Too many tasks were executed after cancellation"); - ASSERT_TEST_POSTCOND(); -} // void Test9 () - -class CtxDestroyerTask : public tbb::task { - int m_nestingLevel; - - tbb::task* execute () { - ASSERT ( m_nestingLevel >= 0 && m_nestingLevel < MaxNestingDepth, "Wrong nesting level. The test is broken" ); - tbb::task_group_context ctx; - tbb::task *t = new( allocate_root(ctx) ) tbb::empty_task; - int level = ++m_nestingLevel; - if ( level < MaxNestingDepth ) { - execute(); - } - else { - if ( !CancellatorTask::WaitUntilReady() ) - REPORT( "Warning: missing wakeup\n" ); - ++g_CurExecuted; - } - if ( ctx.is_group_execution_cancelled() ) - ++s_numCancelled; - t->destroy(*t); - return NULL; - } -public: - CtxDestroyerTask () : m_nestingLevel(0) { s_numCancelled = 0; } - - static const int MaxNestingDepth = 256; - static int s_numCancelled; -}; - -int CtxDestroyerTask::s_numCancelled = 0; - -//! Test for data race between cancellation propagation and context destruction. -/** If the data race ever occurs, an assertion inside TBB will be triggered. **/ -void TestCtxDestruction () { - REMARK( "TestCtxDestruction\n" ); - for ( size_t i = 0; i < 10; ++i ) { - tbb::task_group_context ctx; - tbb::task_list tl; - ResetGlobals(); - g_BoostExecutedCount = false; - g_ThrowException = false; - CancellatorTask::Reset(); - - tl.push_back( *new( tbb::task::allocate_root() ) CtxLauncherTask<CtxDestroyerTask>(ctx) ); - tl.push_back( *new( tbb::task::allocate_root() ) CancellatorTask(ctx, 1) ); - tbb::task::spawn_root_and_wait(tl); - ASSERT( g_CurExecuted == 1, "Test is broken" ); - ASSERT( CtxDestroyerTask::s_numCancelled <= CtxDestroyerTask::MaxNestingDepth, "Test is broken" ); - } -} // void TestCtxDestruction() - -#include <algorithm> -#include "harness_barrier.h" - -class CtxConcurrentDestroyer : NoAssign, Harness::NoAfterlife { - static const int ContextsPerThread = 512; - - static int s_Concurrency; - static int s_NumContexts; - static tbb::task_group_context** s_Contexts; - static char* s_Buffer; - static Harness::SpinBarrier s_Barrier; - static Harness::SpinBarrier s_ExitBarrier; - - struct Shuffler { - void operator() () const { std::random_shuffle(s_Contexts, s_Contexts + s_NumContexts); } - }; -public: - static void Init ( int p ) { - s_Concurrency = p; - s_NumContexts = p * ContextsPerThread; - s_Contexts = new tbb::task_group_context*[s_NumContexts]; - s_Buffer = new char[s_NumContexts * sizeof(tbb::task_group_context)]; - s_Barrier.initialize( p ); - s_ExitBarrier.initialize( p ); - } - static void Uninit () { - for ( int i = 0; i < s_NumContexts; ++i ) { - tbb::internal::context_list_node_t &node = s_Contexts[i]->my_node; - ASSERT( !node.my_next && !node.my_prev, "Destroyed context was written to during context chain update" ); - } - delete []s_Contexts; - delete []s_Buffer; - } - - void operator() ( int id ) const { - int begin = ContextsPerThread * id, - end = begin + ContextsPerThread; - for ( int i = begin; i < end; ++i ) - s_Contexts[i] = new( s_Buffer + i * sizeof(tbb::task_group_context) ) tbb::task_group_context; - s_Barrier.wait( Shuffler() ); - for ( int i = begin; i < end; ++i ) { - s_Contexts[i]->tbb::task_group_context::~task_group_context(); - memset( s_Contexts[i], 0, sizeof(tbb::task_group_context) ); - } - s_ExitBarrier.wait(); - } -}; // class CtxConcurrentDestroyer - -int CtxConcurrentDestroyer::s_Concurrency; -int CtxConcurrentDestroyer::s_NumContexts; -tbb::task_group_context** CtxConcurrentDestroyer::s_Contexts; -char* CtxConcurrentDestroyer::s_Buffer; -Harness::SpinBarrier CtxConcurrentDestroyer::s_Barrier; -Harness::SpinBarrier CtxConcurrentDestroyer::s_ExitBarrier; - -void TestConcurrentCtxDestruction () { - REMARK( "TestConcurrentCtxDestruction\n" ); - CtxConcurrentDestroyer::Init(g_NumThreads); - NativeParallelFor( g_NumThreads, CtxConcurrentDestroyer() ); - CtxConcurrentDestroyer::Uninit(); -} - -void RunTests () { - REMARK ("Number of threads %d\n", g_NumThreads); - tbb::task_scheduler_init init (g_NumThreads); - g_Master = Harness::CurrentTid(); -#if TBB_USE_EXCEPTIONS - Test1(); - Test2(); - Test3(); - Test4(); - Test4_1(); - Test5(); - Test6(); - Test7(); - Test8(); - TestMovableException(); -#endif /* TBB_USE_EXCEPTIONS */ - TestCancelation(); - TestCtxDestruction(); -#if !RML_USE_WCRM - TestConcurrentCtxDestruction(); -#endif -} - -int TestMain () { - REMARK ("Using %s\n", TBB_USE_CAPTURED_EXCEPTION ? "tbb:captured_exception" : "exact exception propagation"); - MinThread = min(NUM_ROOTS_IN_GROUP, min(tbb::task_scheduler_init::default_num_threads(), max(2, MinThread))); - MaxThread = min(NUM_ROOTS_IN_GROUP, max(MinThread, min(tbb::task_scheduler_init::default_num_threads(), MaxThread))); - ASSERT (NUM_ROOTS_IN_GROUP < NUM_ROOT_TASKS, "Fix defines"); -#if TBB_USE_EXCEPTIONS - // Test0 always runs on one thread - Test0(); -#endif /* TBB_USE_EXCEPTIONS */ - g_SolitaryException = 0; - for ( g_NumThreads = MinThread; g_NumThreads <= MaxThread; ++g_NumThreads ) - RunTests(); - return Harness::Done; -} - -#else /* !__TBB_TASK_GROUP_CONTEXT */ - -int TestMain () { - return Harness::Skipped; -} - -#endif /* !__TBB_TASK_GROUP_CONTEXT */ diff --git a/src/tbb/src/test/test_enumerable_thread_specific.cpp b/src/tbb/src/test/test_enumerable_thread_specific.cpp deleted file mode 100644 index ce676f9bd..000000000 --- a/src/tbb/src/test/test_enumerable_thread_specific.cpp +++ /dev/null @@ -1,1040 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/enumerable_thread_specific.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/parallel_for.h" -#include "tbb/parallel_reduce.h" -#include "tbb/blocked_range.h" -#include "tbb/tick_count.h" -#include "tbb/tbb_allocator.h" -#include "tbb/tbb_thread.h" -#include "tbb/atomic.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include <cstring> -#include <vector> -#include <deque> -#include <list> -#include <map> -#include <utility> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "harness_assert.h" -#include "harness.h" - -#if __TBB_GCC_WARNING_SUPPRESSION_PRESENT -#pragma GCC diagnostic ignored "-Wuninitialized" -#endif - -static tbb::atomic<int> construction_counter; -static tbb::atomic<int> destruction_counter; - -#if TBB_USE_DEBUG -const int REPETITIONS = 4; -const int N = 10000; -const int RANGE_MIN=1000; -#else -const int REPETITIONS = 10; -const int N = 100000; -const int RANGE_MIN=10000; -#endif -const int VALID_NUMBER_OF_KEYS = 100; -const double EXPECTED_SUM = (REPETITIONS + 1) * N; - -//! A minimal class that occupies N bytes. -/** Defines default and copy constructor, and allows implicit operator&. - Hides operator=. */ -template<size_t N=tbb::internal::NFS_MaxLineSize> -class minimal: NoAssign { -private: - int my_value; - bool is_constructed; - char pad[N-sizeof(int) - sizeof(bool)]; -public: - minimal() : NoAssign(), my_value(0) { ++construction_counter; is_constructed = true; } - minimal( const minimal &m ) : NoAssign(), my_value(m.my_value) { ++construction_counter; is_constructed = true; } - ~minimal() { ++destruction_counter; ASSERT(is_constructed, NULL); is_constructed = false; } - void set_value( const int i ) { ASSERT(is_constructed, NULL); my_value = i; } - int value( ) const { ASSERT(is_constructed, NULL); return my_value; } -}; - -// -// A helper class that simplifies writing the tests since minimal does not -// define = or + operators. -// - -template< typename T > -struct test_helper { - static inline void init(T &e) { e = static_cast<T>(0); } - static inline void sum(T &e, const int addend ) { e += static_cast<T>(addend); } - static inline void sum(T &e, const double addend ) { e += static_cast<T>(addend); } - static inline void set(T &e, const int value ) { e = static_cast<T>(value); } - static inline double get(const T &e ) { return static_cast<double>(e); } -}; - -template<size_t N> -struct test_helper<minimal<N> > { - static inline void init(minimal<N> &sum) { sum.set_value( 0 ); } - static inline void sum(minimal<N> &sum, const int addend ) { sum.set_value( sum.value() + addend); } - static inline void sum(minimal<N> &sum, const double addend ) { sum.set_value( sum.value() + static_cast<int>(addend)); } - static inline void sum(minimal<N> &sum, const minimal<N> &addend ) { sum.set_value( sum.value() + addend.value()); } - static inline void set(minimal<N> &v, const int value ) { v.set_value( static_cast<int>(value) ); } - static inline double get(const minimal<N> &sum ) { return static_cast<double>(sum.value()); } -}; - -//! Tag class used to make certain constructors hard to invoke accidentally. -struct SecretTagType {} SecretTag; - -//// functors and routines for initialization and combine - -// Addition - -template <typename T> -struct FunctorAddCombineRef { - T operator()(const T& left, const T& right) const { - return left+right; - } -}; - -template <size_t N> -struct FunctorAddCombineRef<minimal<N> > { - minimal<N> operator()(const minimal<N>& left, const minimal<N>& right) const { - minimal<N> result; - result.set_value( left.value() + right.value() ); - return result; - } -}; - -//! Counts instances of FunctorFinit -static tbb::atomic<int> FinitCounter; - -template <typename T, int Value> -struct FunctorFinit { - FunctorFinit( const FunctorFinit& ) {++FinitCounter;} - FunctorFinit( SecretTagType ) {++FinitCounter;} - ~FunctorFinit() {--FinitCounter;} - T operator()() { return Value; } -}; - -template <size_t N, int Value> -struct FunctorFinit<minimal<N>,Value> { - FunctorFinit( const FunctorFinit& ) {++FinitCounter;} - FunctorFinit( SecretTagType ) {++FinitCounter;} - ~FunctorFinit() {--FinitCounter;} - minimal<N> operator()() { - minimal<N> result; - result.set_value( Value ); - return result; - } -}; - -template <typename T> -struct FunctorAddCombine { - T operator()(T left, T right ) const { - return FunctorAddCombineRef<T>()( left, right ); - } -}; - -template <typename T> -T my_combine_ref( const T &left, const T &right) { - return FunctorAddCombineRef<T>()( left, right ); -} - -template <typename T> -T my_combine( T left, T right) { return my_combine_ref(left,right); } - -template <typename T> -class combine_one_helper { -public: - combine_one_helper(T& _result) : my_result(_result) {} - void operator()(const T& new_bit) { test_helper<T>::sum(my_result, new_bit); } - combine_one_helper& operator=(const combine_one_helper& other) { - test_helper<T>::set(my_result, test_helper<T>::get(other)); - return *this; - } -private: - T& my_result; -}; - -//// end functors and routines - -template< typename T > -void run_serial_scalar_tests(const char *test_name) { - tbb::tick_count t0; - T sum; - test_helper<T>::init(sum); - - REMARK("Testing serial %s... ", test_name); - for (int t = -1; t < REPETITIONS; ++t) { - if (Verbose && t == 0) t0 = tbb::tick_count::now(); - for (int i = 0; i < N; ++i) { - test_helper<T>::sum(sum,1); - } - } - - double result_value = test_helper<T>::get(sum); - ASSERT( EXPECTED_SUM == result_value, NULL); - REMARK("done\nserial %s, 0, %g, %g\n", test_name, result_value, ( tbb::tick_count::now() - t0).seconds()); -} - - -template <typename T> -class parallel_scalar_body: NoAssign { - - tbb::enumerable_thread_specific<T> &sums; - -public: - - parallel_scalar_body ( tbb::enumerable_thread_specific<T> &_sums ) : sums(_sums) { } - - void operator()( const tbb::blocked_range<int> &r ) const { - for (int i = r.begin(); i != r.end(); ++i) - test_helper<T>::sum( sums.local(), 1 ); - } - -}; - -template< typename T > -void run_parallel_scalar_tests_nocombine(const char *test_name) { - - typedef tbb::enumerable_thread_specific<T> ets_type; - - // We assume that static_sums zero-initialized or has a default constructor that zeros it. - static ets_type static_sums = ets_type( T() ); - - T exemplar; - test_helper<T>::init(exemplar); - T exemplar23; - test_helper<T>::set(exemplar23,23); - - for (int p = MinThread; p <= MaxThread; ++p) { - REMARK("Testing parallel %s on %d thread(s)... ", test_name, p); - tbb::task_scheduler_init init(p); - tbb::tick_count t0; - - T iterator_sum; - test_helper<T>::init(iterator_sum); - - T finit_ets_sum; - test_helper<T>::init(finit_ets_sum); - - T const_iterator_sum; - test_helper<T>::init(const_iterator_sum); - - T range_sum; - test_helper<T>::init(range_sum); - - T const_range_sum; - test_helper<T>::init(const_range_sum); - - T cconst_sum; - test_helper<T>::init(cconst_sum); - - T assign_sum; - test_helper<T>::init(assign_sum); - - T cassgn_sum; - test_helper<T>::init(cassgn_sum); - T non_cassgn_sum; - test_helper<T>::init(non_cassgn_sum); - - T static_sum; - test_helper<T>::init(static_sum); - - for (int t = -1; t < REPETITIONS; ++t) { - if (Verbose && t == 0) t0 = tbb::tick_count::now(); - - static_sums.clear(); - - ets_type sums(exemplar); - FunctorFinit<T,0> my_finit(SecretTag); - ets_type finit_ets(my_finit); - - ASSERT( sums.empty(), NULL); - tbb::parallel_for( tbb::blocked_range<int>( 0, N, RANGE_MIN ), parallel_scalar_body<T>( sums ) ); - ASSERT( !sums.empty(), NULL); - - ASSERT( finit_ets.empty(), NULL); - tbb::parallel_for( tbb::blocked_range<int>( 0, N, RANGE_MIN ), parallel_scalar_body<T>( finit_ets ) ); - ASSERT( !finit_ets.empty(), NULL); - - ASSERT(static_sums.empty(), NULL); - tbb::parallel_for( tbb::blocked_range<int>( 0, N, RANGE_MIN ), parallel_scalar_body<T>( static_sums ) ); - ASSERT( !static_sums.empty(), NULL); - - // use iterator - typename ets_type::size_type size = 0; - for ( typename ets_type::iterator i = sums.begin(); i != sums.end(); ++i ) { - ++size; - test_helper<T>::sum(iterator_sum, *i); - } - ASSERT( sums.size() == size, NULL); - - // use const_iterator - for ( typename ets_type::const_iterator i = sums.begin(); i != sums.end(); ++i ) { - test_helper<T>::sum(const_iterator_sum, *i); - } - - // use range_type - typename ets_type::range_type r = sums.range(); - for ( typename ets_type::range_type::const_iterator i = r.begin(); i != r.end(); ++i ) { - test_helper<T>::sum(range_sum, *i); - } - - // use const_range_type - typename ets_type::const_range_type cr = sums.range(); - for ( typename ets_type::const_range_type::iterator i = cr.begin(); i != cr.end(); ++i ) { - test_helper<T>::sum(const_range_sum, *i); - } - - // test copy constructor, with TLS-cached locals - typedef typename tbb::enumerable_thread_specific<T, tbb::cache_aligned_allocator<T>, tbb::ets_key_per_instance> cached_ets_type; - - cached_ets_type cconst(sums); - - for ( typename cached_ets_type::const_iterator i = cconst.begin(); i != cconst.end(); ++i ) { - test_helper<T>::sum(cconst_sum, *i); - } - - // test assignment - ets_type assigned; - assigned = sums; - - for ( typename ets_type::const_iterator i = assigned.begin(); i != assigned.end(); ++i ) { - test_helper<T>::sum(assign_sum, *i); - } - - // test assign to and from cached locals - cached_ets_type cassgn; - cassgn = sums; - for ( typename cached_ets_type::const_iterator i = cassgn.begin(); i != cassgn.end(); ++i ) { - test_helper<T>::sum(cassgn_sum, *i); - } - - ets_type non_cassgn; - non_cassgn = cassgn; - for ( typename ets_type::const_iterator i = non_cassgn.begin(); i != non_cassgn.end(); ++i ) { - test_helper<T>::sum(non_cassgn_sum, *i); - } - - // test finit-initialized ets - for(typename ets_type::const_iterator i = finit_ets.begin(); i != finit_ets.end(); ++i) { - test_helper<T>::sum(finit_ets_sum, *i); - } - - // test static ets - for(typename ets_type::const_iterator i = static_sums.begin(); i != static_sums.end(); ++i) { - test_helper<T>::sum(static_sum, *i); - } - - } - - ASSERT( EXPECTED_SUM == test_helper<T>::get(iterator_sum), NULL); - ASSERT( EXPECTED_SUM == test_helper<T>::get(const_iterator_sum), NULL); - ASSERT( EXPECTED_SUM == test_helper<T>::get(range_sum), NULL); - ASSERT( EXPECTED_SUM == test_helper<T>::get(const_range_sum), NULL); - - ASSERT( EXPECTED_SUM == test_helper<T>::get(cconst_sum), NULL); - ASSERT( EXPECTED_SUM == test_helper<T>::get(assign_sum), NULL); - ASSERT( EXPECTED_SUM == test_helper<T>::get(cassgn_sum), NULL); - ASSERT( EXPECTED_SUM == test_helper<T>::get(non_cassgn_sum), NULL); - ASSERT( EXPECTED_SUM == test_helper<T>::get(finit_ets_sum), NULL); - ASSERT( EXPECTED_SUM == test_helper<T>::get(static_sum), NULL); - - REMARK("done\nparallel %s, %d, %g, %g\n", test_name, p, test_helper<T>::get(iterator_sum), - ( tbb::tick_count::now() - t0).seconds()); - } -} - -template< typename T > -void run_parallel_scalar_tests(const char *test_name) { - - typedef tbb::enumerable_thread_specific<T> ets_type; - - // We assume that static_sums zero-initialized or has a default constructor that zeros it. - static ets_type static_sums = ets_type( T() ); - - T exemplar; - test_helper<T>::init(exemplar); - - run_parallel_scalar_tests_nocombine<T>(test_name); - - for (int p = MinThread; p <= MaxThread; ++p) { - REMARK("Testing parallel %s on %d thread(s)... ", test_name, p); - tbb::task_scheduler_init init(p); - tbb::tick_count t0; - - T combine_sum; - test_helper<T>::init(combine_sum); - - T combine_ref_sum; - test_helper<T>::init(combine_ref_sum); - - T combine_one_sum; - test_helper<T>::init(combine_one_sum); - - T static_sum; - test_helper<T>::init(static_sum); - - for (int t = -1; t < REPETITIONS; ++t) { - if (Verbose && t == 0) t0 = tbb::tick_count::now(); - - static_sums.clear(); - - ets_type sums(exemplar); - - ASSERT( sums.empty(), NULL); - tbb::parallel_for( tbb::blocked_range<int>( 0, N, RANGE_MIN ), parallel_scalar_body<T>( sums ) ); - ASSERT( !sums.empty(), NULL); - - ASSERT(static_sums.empty(), NULL); - tbb::parallel_for( tbb::blocked_range<int>( 0, N, RANGE_MIN ), parallel_scalar_body<T>( static_sums ) ); - ASSERT( !static_sums.empty(), NULL); - - - // Use combine - test_helper<T>::sum(combine_sum, sums.combine(my_combine<T>)); - test_helper<T>::sum(combine_ref_sum, sums.combine(my_combine_ref<T>)); - test_helper<T>::sum(static_sum, static_sums.combine(my_combine<T>)); - - combine_one_helper<T> my_helper(combine_one_sum); - sums.combine_each(my_helper); - } - - - ASSERT( EXPECTED_SUM == test_helper<T>::get(combine_sum), NULL); - ASSERT( EXPECTED_SUM == test_helper<T>::get(combine_ref_sum), NULL); - ASSERT( EXPECTED_SUM == test_helper<T>::get(static_sum), NULL); - - REMARK("done\nparallel combine %s, %d, %g, %g\n", test_name, p, test_helper<T>::get(combine_sum), - ( tbb::tick_count::now() - t0).seconds()); - } -} - -template <typename T> -class parallel_vector_for_body: NoAssign { - - tbb::enumerable_thread_specific< std::vector<T, tbb::tbb_allocator<T> > > &locals; - -public: - - parallel_vector_for_body ( tbb::enumerable_thread_specific< std::vector<T, tbb::tbb_allocator<T> > > &_locals ) : locals(_locals) { } - - void operator()( const tbb::blocked_range<int> &r ) const { - T one; - test_helper<T>::set(one, 1); - - for (int i = r.begin(); i < r.end(); ++i) { - locals.local().push_back( one ); - } - } - -}; - -template <typename R, typename T> -struct parallel_vector_reduce_body { - - T sum; - size_t count; - - parallel_vector_reduce_body ( ) : count(0) { test_helper<T>::init(sum); } - parallel_vector_reduce_body ( parallel_vector_reduce_body<R, T> &, tbb::split ) : count(0) { test_helper<T>::init(sum); } - - void operator()( const R &r ) { - for (typename R::iterator ri = r.begin(); ri != r.end(); ++ri) { - const std::vector< T, tbb::tbb_allocator<T> > &v = *ri; - ++count; - for (typename std::vector<T, tbb::tbb_allocator<T> >::const_iterator vi = v.begin(); vi != v.end(); ++vi) { - test_helper<T>::sum(sum, *vi); - } - } - } - - void join( const parallel_vector_reduce_body &b ) { - test_helper<T>::sum(sum,b.sum); - count += b.count; - } - -}; - -template< typename T > -void run_parallel_vector_tests(const char *test_name) { - tbb::tick_count t0; - typedef std::vector<T, tbb::tbb_allocator<T> > container_type; - - for (int p = MinThread; p <= MaxThread; ++p) { - REMARK("Testing parallel %s on %d thread(s)... ", test_name, p); - tbb::task_scheduler_init init(p); - - T sum; - test_helper<T>::init(sum); - - for (int t = -1; t < REPETITIONS; ++t) { - if (Verbose && t == 0) t0 = tbb::tick_count::now(); - typedef typename tbb::enumerable_thread_specific< container_type > ets_type; - ets_type vs; - - ASSERT( vs.empty(), NULL); - tbb::parallel_for ( tbb::blocked_range<int> (0, N, RANGE_MIN), parallel_vector_for_body<T>( vs ) ); - ASSERT( !vs.empty(), NULL); - - // copy construct - ets_type vs2(vs); // this causes an assertion failure, related to allocators... - - // assign - ets_type vs3; - vs3 = vs; - - parallel_vector_reduce_body< typename tbb::enumerable_thread_specific< std::vector< T, tbb::tbb_allocator<T> > >::const_range_type, T > pvrb; - tbb::parallel_reduce ( vs.range(1), pvrb ); - - test_helper<T>::sum(sum, pvrb.sum); - - ASSERT( vs.size() == pvrb.count, NULL); - - tbb::flattened2d<ets_type> fvs = flatten2d(vs); - size_t ccount = fvs.size(); - size_t elem_cnt = 0; - for(typename tbb::flattened2d<ets_type>::const_iterator i = fvs.begin(); i != fvs.end(); ++i) { - ++elem_cnt; - }; - ASSERT(ccount == elem_cnt, NULL); - - elem_cnt = 0; - for(typename tbb::flattened2d<ets_type>::iterator i = fvs.begin(); i != fvs.end(); ++i) { - ++elem_cnt; - }; - ASSERT(ccount == elem_cnt, NULL); - } - - double result_value = test_helper<T>::get(sum); - ASSERT( EXPECTED_SUM == result_value, NULL); - REMARK("done\nparallel %s, %d, %g, %g\n", test_name, p, result_value, ( tbb::tick_count::now() - t0).seconds()); - } -} - -template<typename T> -void run_cross_type_vector_tests(const char *test_name) { - tbb::tick_count t0; - typedef std::vector<T, tbb::tbb_allocator<T> > container_type; - - for (int p = MinThread; p <= MaxThread; ++p) { - REMARK("Testing parallel %s on %d thread(s)... ", test_name, p); - tbb::task_scheduler_init init(p); - - T sum; - test_helper<T>::init(sum); - - for (int t = -1; t < REPETITIONS; ++t) { - if (Verbose && t == 0) t0 = tbb::tick_count::now(); - typedef typename tbb::enumerable_thread_specific< container_type, tbb::cache_aligned_allocator<container_type>, tbb::ets_no_key > ets_nokey_type; - typedef typename tbb::enumerable_thread_specific< container_type, tbb::cache_aligned_allocator<container_type>, tbb::ets_key_per_instance > ets_tlskey_type; - ets_nokey_type vs; - - ASSERT( vs.empty(), NULL); - tbb::parallel_for ( tbb::blocked_range<int> (0, N, RANGE_MIN), parallel_vector_for_body<T>( vs ) ); - ASSERT( !vs.empty(), NULL); - - // copy construct - ets_tlskey_type vs2(vs); - - // assign - ets_nokey_type vs3; - vs3 = vs2; - - parallel_vector_reduce_body< typename tbb::enumerable_thread_specific< std::vector< T, tbb::tbb_allocator<T> > >::const_range_type, T > pvrb; - tbb::parallel_reduce ( vs3.range(1), pvrb ); - - test_helper<T>::sum(sum, pvrb.sum); - - ASSERT( vs3.size() == pvrb.count, NULL); - - tbb::flattened2d<ets_nokey_type> fvs = flatten2d(vs3); - size_t ccount = fvs.size(); - size_t elem_cnt = 0; - for(typename tbb::flattened2d<ets_nokey_type>::const_iterator i = fvs.begin(); i != fvs.end(); ++i) { - ++elem_cnt; - }; - ASSERT(ccount == elem_cnt, NULL); - - elem_cnt = 0; - for(typename tbb::flattened2d<ets_nokey_type>::iterator i = fvs.begin(); i != fvs.end(); ++i) { - ++elem_cnt; - }; - ASSERT(ccount == elem_cnt, NULL); - } - - double result_value = test_helper<T>::get(sum); - ASSERT( EXPECTED_SUM == result_value, NULL); - REMARK("done\nparallel %s, %d, %g, %g\n", test_name, p, result_value, ( tbb::tick_count::now() - t0).seconds()); - } -} - -template< typename T > -void run_serial_vector_tests(const char *test_name) { - tbb::tick_count t0; - T sum; - test_helper<T>::init(sum); - T one; - test_helper<T>::set(one, 1); - - REMARK("Testing serial %s... ", test_name); - for (int t = -1; t < REPETITIONS; ++t) { - if (Verbose && t == 0) t0 = tbb::tick_count::now(); - std::vector<T, tbb::tbb_allocator<T> > v; - for (int i = 0; i < N; ++i) { - v.push_back( one ); - } - for (typename std::vector<T, tbb::tbb_allocator<T> >::const_iterator i = v.begin(); i != v.end(); ++i) - test_helper<T>::sum(sum, *i); - } - - double result_value = test_helper<T>::get(sum); - ASSERT( EXPECTED_SUM == result_value, NULL); - REMARK("done\nserial %s, 0, %g, %g\n", test_name, result_value, ( tbb::tick_count::now() - t0).seconds()); -} - -const size_t line_size = tbb::internal::NFS_MaxLineSize; - -void -run_serial_tests() { - run_serial_scalar_tests<int>("int"); - run_serial_scalar_tests<double>("double"); - run_serial_scalar_tests<minimal<> >("minimal<>"); - run_serial_vector_tests<int>("std::vector<int, tbb::tbb_allocator<int> >"); - run_serial_vector_tests<double>("std::vector<double, tbb::tbb_allocator<double> >"); -} - -void -run_parallel_tests() { - run_parallel_scalar_tests<int>("int"); - run_parallel_scalar_tests<double>("double"); - run_parallel_scalar_tests_nocombine<minimal<> >("minimal<>"); - run_parallel_vector_tests<int>("std::vector<int, tbb::tbb_allocator<int> >"); - run_parallel_vector_tests<double>("std::vector<double, tbb::tbb_allocator<double> >"); -} - -void -run_cross_type_tests() { - // cross-type scalar tests are part of run_serial_scalar_tests - run_cross_type_vector_tests<int>("std::vector<int, tbb::tbb_allocator<int> >"); - run_parallel_vector_tests<double>("std::vector<double, tbb::tbb_allocator<double> >"); -} - -typedef tbb::enumerable_thread_specific<minimal<line_size> > flogged_ets; - -class set_body { - flogged_ets *a; - -public: - set_body( flogged_ets*_a ) : a(_a) { } - - void operator() ( ) const { - for (int i = 0; i < VALID_NUMBER_OF_KEYS; ++i) { - a[i].local().set_value(i + 1); - } - } - -}; - -void do_tbb_threads( int max_threads, flogged_ets a[] ) { - std::vector< tbb::tbb_thread * > threads; - - for (int p = 0; p < max_threads; ++p) { - threads.push_back( new tbb::tbb_thread ( set_body( a ) ) ); - } - - for (int p = 0; p < max_threads; ++p) { - threads[p]->join(); - } - - for(int p = 0; p < max_threads; ++p) { - delete threads[p]; - } -} - -void -flog_key_creation_and_deletion() { - const int FLOG_REPETITIONS = 100; - - for (int p = MinThread; p <= MaxThread; ++p) { - REMARK("Testing repeated deletes on %d threads... ", p); - - for (int j = 0; j < FLOG_REPETITIONS; ++j) { - construction_counter = 0; - destruction_counter = 0; - - // causes VALID_NUMER_OF_KEYS exemplar instances to be constructed - flogged_ets* a = new flogged_ets[VALID_NUMBER_OF_KEYS]; - ASSERT(int(construction_counter) == 0, NULL); // no exemplars or actual locals have been constructed - ASSERT(int(destruction_counter) == 0, NULL); // and none have been destroyed - - // causes p * VALID_NUMBER_OF_KEYS minimals to be created - do_tbb_threads(p, a); - - for (int i = 0; i < VALID_NUMBER_OF_KEYS; ++i) { - int pcnt = 0; - for ( flogged_ets::iterator tli = a[i].begin(); tli != a[i].end(); ++tli ) { - ASSERT( (*tli).value() == i+1, NULL ); - ++pcnt; - } - ASSERT( pcnt == p, NULL); // should be one local per thread. - } - delete[] a; - } - - ASSERT( int(construction_counter) == (p)*VALID_NUMBER_OF_KEYS, NULL ); - ASSERT( int(destruction_counter) == (p)*VALID_NUMBER_OF_KEYS, NULL ); - - REMARK("done\nTesting repeated clears on %d threads... ", p); - - construction_counter = 0; - destruction_counter = 0; - - // causes VALID_NUMER_OF_KEYS exemplar instances to be constructed - flogged_ets* a = new flogged_ets[VALID_NUMBER_OF_KEYS]; - - for (int j = 0; j < FLOG_REPETITIONS; ++j) { - - // causes p * VALID_NUMBER_OF_KEYS minimals to be created - do_tbb_threads(p, a); - - for (int i = 0; i < VALID_NUMBER_OF_KEYS; ++i) { - for ( flogged_ets::iterator tli = a[i].begin(); tli != a[i].end(); ++tli ) { - ASSERT( (*tli).value() == i+1, NULL ); - } - a[i].clear(); - ASSERT( static_cast<int>(a[i].end() - a[i].begin()) == 0, NULL ); - } - - } - - delete[] a; - - ASSERT( int(construction_counter) == (FLOG_REPETITIONS*p)*VALID_NUMBER_OF_KEYS, NULL ); - ASSERT( int(destruction_counter) == (FLOG_REPETITIONS*p)*VALID_NUMBER_OF_KEYS, NULL ); - - REMARK("done\n"); - } - -} - -template <typename inner_container> -void -flog_segmented_interator() { - - bool found_error = false; - typedef typename inner_container::value_type T; - typedef std::vector< inner_container > nested_vec; - inner_container my_inner_container; - my_inner_container.clear(); - nested_vec my_vec; - - // simple nested vector (neither level empty) - const int maxval = 10; - for(int i=0; i < maxval; i++) { - my_vec.push_back(my_inner_container); - for(int j = 0; j < maxval; j++) { - my_vec.at(i).push_back((T)(maxval * i + j)); - } - } - - tbb::internal::segmented_iterator<nested_vec, T> my_si(my_vec); - - T ii; - for(my_si=my_vec.begin(), ii=0; my_si != my_vec.end(); ++my_si, ++ii) { - if((*my_si) != ii) { - found_error = true; - REMARK( "*my_si=%d\n", int(*my_si)); - } - } - - // outer level empty - my_vec.clear(); - for(my_si=my_vec.begin(); my_si != my_vec.end(); ++my_si) { - found_error = true; - } - - // inner levels empty - my_vec.clear(); - for(int i =0; i < maxval; ++i) { - my_vec.push_back(my_inner_container); - } - for(my_si = my_vec.begin(); my_si != my_vec.end(); ++my_si) { - found_error = true; - } - - // every other inner container is empty - my_vec.clear(); - for(int i=0; i < maxval; ++i) { - my_vec.push_back(my_inner_container); - if(i%2) { - for(int j = 0; j < maxval; ++j) { - my_vec.at(i).push_back((T)(maxval * (i/2) + j)); - } - } - } - for(my_si = my_vec.begin(), ii=0; my_si != my_vec.end(); ++my_si, ++ii) { - if((*my_si) != ii) { - found_error = true; - REMARK("*my_si=%d, ii=%d\n", (int)(*my_si), (int)ii); - } - } - - tbb::internal::segmented_iterator<nested_vec, const T> my_csi(my_vec); - for(my_csi=my_vec.begin(), ii=0; my_csi != my_vec.end(); ++my_csi, ++ii) { - if((*my_csi) != ii) { - found_error = true; - REMARK( "*my_csi=%d\n", int(*my_csi)); - } - } - - // outer level empty - my_vec.clear(); - for(my_csi=my_vec.begin(); my_csi != my_vec.end(); ++my_csi) { - found_error = true; - } - - // inner levels empty - my_vec.clear(); - for(int i =0; i < maxval; ++i) { - my_vec.push_back(my_inner_container); - } - for(my_csi = my_vec.begin(); my_csi != my_vec.end(); ++my_csi) { - found_error = true; - } - - // every other inner container is empty - my_vec.clear(); - for(int i=0; i < maxval; ++i) { - my_vec.push_back(my_inner_container); - if(i%2) { - for(int j = 0; j < maxval; ++j) { - my_vec.at(i).push_back((T)(maxval * (i/2) + j)); - } - } - } - for(my_csi = my_vec.begin(), ii=0; my_csi != my_vec.end(); ++my_csi, ++ii) { - if((*my_csi) != ii) { - found_error = true; - REMARK("*my_csi=%d, ii=%d\n", (int)(*my_csi), (int)ii); - } - } - - - if(found_error) REPORT("segmented_iterator failed\n"); -} - -template <typename Key, typename Val> -void -flog_segmented_iterator_map() { - typedef typename std::map<Key, Val> my_map; - typedef std::vector< my_map > nested_vec; - my_map my_inner_container; - my_inner_container.clear(); - nested_vec my_vec; - my_vec.clear(); - bool found_error = false; - - // simple nested vector (neither level empty) - const int maxval = 4; - for(int i=0; i < maxval; i++) { - my_vec.push_back(my_inner_container); - for(int j = 0; j < maxval; j++) { - my_vec.at(i).insert(std::make_pair<Key,Val>(maxval * i + j, 2*(maxval*i + j))); - } - } - - tbb::internal::segmented_iterator<nested_vec, std::pair<const Key, Val> > my_si(my_vec); - Key ii; - for(my_si=my_vec.begin(), ii=0; my_si != my_vec.end(); ++my_si, ++ii) { - if(((*my_si).first != ii) || ((*my_si).second != 2*ii)) { - found_error = true; - REMARK( "ii=%d, (*my_si).first=%d, second=%d\n",ii, int((*my_si).first), int((*my_si).second)); - } - } - - tbb::internal::segmented_iterator<nested_vec, const std::pair<const Key, Val> > my_csi(my_vec); - for(my_csi=my_vec.begin(), ii=0; my_csi != my_vec.end(); ++my_csi, ++ii) { - if(((*my_csi).first != ii) || ((*my_csi).second != 2*ii)) { - found_error = true; - REMARK( "ii=%d, (*my_csi).first=%d, second=%d\n",ii, int((*my_csi).first), int((*my_csi).second)); - } - } - if(found_error) REPORT("segmented_iterator_map failed\n"); -} - -void -run_segmented_iterator_tests() { - // only the following containers can be used with the segmented iterator. - REMARK("Running Segmented Iterator Tests\n"); - flog_segmented_interator<std::vector< int > >(); - flog_segmented_interator<std::vector< double > >(); - flog_segmented_interator<std::deque< int > >(); - flog_segmented_interator<std::deque< double > >(); - flog_segmented_interator<std::list< int > >(); - flog_segmented_interator<std::list< double > >(); - - flog_segmented_iterator_map<int, int>(); - flog_segmented_iterator_map<int, double>(); -} - -template <typename T> -void -run_assign_and_copy_constructor_test(const char *test_name) { - REMARK("Testing assignment and copy construction for %s\n", test_name); - - // test initializer with exemplar - T initializer0; - test_helper<T>::init(initializer0); - T initializer7; - test_helper<T>::set(initializer7,7); - tbb::enumerable_thread_specific<T> create1(initializer7); - (void) create1.local(); // create an initialized value - ASSERT(7 == test_helper<T>::get(create1.local()), NULL); - - // test copy construction with exemplar initializer - create1.clear(); - tbb::enumerable_thread_specific<T> copy1(create1); - (void) copy1.local(); - ASSERT(7 == test_helper<T>::get(copy1.local()), NULL); - - // test copy assignment with exemplar initializer - create1.clear(); - tbb::enumerable_thread_specific<T> assign1(initializer0); - assign1 = create1; - (void) assign1.local(); - ASSERT(7 == test_helper<T>::get(assign1.local()), NULL); - - // test creation with finit function - FunctorFinit<T,7> my_finit7(SecretTag); - tbb::enumerable_thread_specific<T> create2(my_finit7); - (void) create2.local(); - ASSERT(7 == test_helper<T>::get(create2.local()), NULL); - - // test copy construction with function initializer - create2.clear(); - tbb::enumerable_thread_specific<T> copy2(create2); - (void) copy2.local(); - ASSERT(7 == test_helper<T>::get(copy2.local()), NULL); - - // test copy assignment with function initializer - create2.clear(); - FunctorFinit<T,0> my_finit(SecretTag); - tbb::enumerable_thread_specific<T> assign2(my_finit); - assign2 = create2; - (void) assign2.local(); - ASSERT(7 == test_helper<T>::get(assign2.local()), NULL); -} - -void -run_assignment_and_copy_constructor_tests() { - REMARK("Running assignment and copy constructor tests\n"); - run_assign_and_copy_constructor_test<int>("int"); - run_assign_and_copy_constructor_test<double>("double"); - // Try class sizes that are close to a cache line in size, in order to check padding calculations. - run_assign_and_copy_constructor_test<minimal<line_size-1> >("minimal<line_size-1>"); - run_assign_and_copy_constructor_test<minimal<line_size> >("minimal<line_size>"); - run_assign_and_copy_constructor_test<minimal<line_size+1> >("minimal<line_size+1>"); - ASSERT(FinitCounter==0, NULL); -} - -// Class with no default constructor -class HasNoDefaultConstructor { - HasNoDefaultConstructor(); -public: - HasNoDefaultConstructor( SecretTagType ) {} -}; - -// Initialization functor for a HasNoDefaultConstructor -struct HasNoDefaultConstructorFinit { - HasNoDefaultConstructor operator()() { - return HasNoDefaultConstructor(SecretTag); - } -}; - -struct HasNoDefaultConstructorCombine { - HasNoDefaultConstructor operator()( HasNoDefaultConstructor, HasNoDefaultConstructor ) { - return HasNoDefaultConstructor(SecretTag); - } -}; - -//! Test situations where only default constructor or copy constructor is required. -void TestInstantiation() { - // Test instantiation is possible when copy constructor is not required. - tbb::enumerable_thread_specific<NoCopy> ets1; - - // Test instantiation when default constructor is not required, because exemplar is provided. - HasNoDefaultConstructor x(SecretTag); - tbb::enumerable_thread_specific<HasNoDefaultConstructor> ets2(x); - ets2.combine(HasNoDefaultConstructorCombine()); - - // Test instantiation when default constructor is not required, because init function is provided. - HasNoDefaultConstructorFinit f; - tbb::enumerable_thread_specific<HasNoDefaultConstructor> ets3(f); - ets3.combine(HasNoDefaultConstructorCombine()); -} - -class BigType { -public: - BigType() { /* avoid cl warning C4345 about default initialization of POD types */ } - char my_data[12 * 1024 * 1024]; -}; - -void TestConstructor() { - typedef tbb::enumerable_thread_specific<BigType> CounterBigType; - // Test default constructor - CounterBigType MyCounters; - // Create a local instance. - CounterBigType::reference my_local = MyCounters.local(); - my_local.my_data[0] = 'a'; - // Test copy constructor - CounterBigType MyCounters2(MyCounters); -} - -int TestMain () { - TestInstantiation(); - run_segmented_iterator_tests(); - flog_key_creation_and_deletion(); - - if (MinThread == 0) { - run_serial_tests(); - MinThread = 1; - } - if (MaxThread > 0) { - run_parallel_tests(); - run_cross_type_tests(); - } - - run_assignment_and_copy_constructor_tests(); - - TestConstructor(); - - return Harness::Done; -} diff --git a/src/tbb/src/test/test_examples_common_utility.cpp b/src/tbb/src/test/test_examples_common_utility.cpp deleted file mode 100644 index dcfa2ca67..000000000 --- a/src/tbb/src/test/test_examples_common_utility.cpp +++ /dev/null @@ -1,589 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness_defs.h" // for suppress_unused_warning -#include "harness_assert.h" -#include "../../examples/common/utility/utility.h" -#include <sstream> - -namespace implementation_unit_tests { - namespace argument_dest_test_suite{ - void test_type_impl_parse_and_store_simple_parse(){ - int a=0; - utility::internal::type_impl<int> a_("","",a); - a_.parse_and_store("9"); - ASSERT(a==9,""); - } - void test_default_value_of_is_matched(){ - //Testing for result of is_matched() for arguments not yet tried to be parsed. - //I.e. values were set up by argument::constructor. - using utility::internal::argument; - int i; - argument b("","",i); - ASSERT(!b.is_matched(),""); - - argument c = b; - ASSERT(!c.is_matched(),""); - - argument d = b; - d = c; - ASSERT(!d.is_matched(),""); - } - } - //TODO: test cases for argument type management - namespace compile_only{ - //TODO: enhance these to actually do checks by a negative test, or (if possible) - //by a positive test that at compile time selects between two alternatives, - //depending on whether operators exist or not (yes, SFINAE :)) - - //as non_pod class does provide the operators, and test do not check that compiler - //will reject types which don't have those. - using utility::cli_argument_pack; - void arg_chain(){ - cli_argument_pack p; - int size=0; - p.arg(size,"size","size"); - } - namespace tc_helper{ - struct non_pod{ - std::string s; - friend std::ostream& operator<<(std::ostream& o, non_pod){ return o;} - friend std::istream& operator>>(std::istream& i, non_pod){ return i;} - }; - } - void non_pod_dest_type(){ - cli_argument_pack p; - tc_helper::non_pod np; - p.arg(np,"",""); - } - } - namespace cli_argument_pack_suite{ - void test_copy_assign(){ - using utility::cli_argument_pack; - int i=9; - std::stringstream expected_output; using std::endl; - expected_output - << " Program usage is:" << endl - << " the_binary_name [i=value]" - << endl << endl - << " where:" << endl - << " i - i desc (9)" << endl - ; - cli_argument_pack copy(cli_argument_pack().arg(i,"i","i desc")); - ASSERT(copy.usage_string("the_binary_name") == expected_output.str(),"usage string is not as expected"); - cli_argument_pack assignee; assignee = copy; - ASSERT(assignee.usage_string("the_binary_name") == expected_output.str(),"Copying of cli_argument_pack breaks generation of usage string?"); - } - } -} - -#include <utility> -namespace high_level_api_tests { - using utility::cli_argument_pack; - using utility::internal::array_length; - - static const char * wrong_exception = "wrong exception thrown"; - static const char * wrong_exception_description = "caught exception has wrong description"; - void test_parse_basic(){ - char const* argv[]={"some.exe","1","a"}; - cli_argument_pack p; - int i=0; char a=' '; - p.positional_arg(i,"int","").positional_arg(a,"char",""); - p.parse(array_length(argv),argv); - ASSERT(i==1,""); - ASSERT(a=='a',""); - } - //helper function for test of named flag parsing - template<typename T, size_t N> - bool parse_silent_flag( T(& argv)[N]){ - cli_argument_pack p; - bool silent=false; - p.arg(silent,"silent","is extra info needed"); - p.parse(array_length(argv),argv); - return silent; - } - void test_named_flags_success(){ - char const* argv[]={"some.exe","silent"}; - ASSERT(true == parse_silent_flag(argv),""); - } - - void test_named_flags_failure(){ - try { - char const* argv[]={"some.exe","1"}; - parse_silent_flag(argv); - ASSERT(false,"exception was expected due to invalid argument, but not caught"); - } - catch(std::invalid_argument& e){ - ASSERT(e.what()==std::string("unknown parameter starting at:'1'"),wrong_exception_description); - } - catch(...){ASSERT(false,wrong_exception);} - } - - //helper function for test of named flag parsing - template<typename T, size_t N> - std::pair<bool,int> parse_silent_flag_and_int( T(& argv)[N]){ - cli_argument_pack p; - bool silent=false; - int i=125; - p - .arg(silent,"silent","is extra info needed") - .positional_arg(i,"int",""); - p.parse(array_length(argv),argv); - return std::make_pair(silent,i); - } - - void test_named_flags_failure_and_other_arg(){ - char const* argv[]={"some.exe","1"}; - ASSERT(std::make_pair(false,1) == parse_silent_flag_and_int(argv),""); - } - - void test_named_flags_and_other_arg(){ - char const* argv[]={"some.exe","silent","7"}; - ASSERT(std::make_pair(true,7) == parse_silent_flag_and_int(argv),""); - } - - void test_named_flags_and_other_arg_different_order(){ - char const* argv[]={"some.exe","7","silent"}; - ASSERT(std::make_pair(true,7) == parse_silent_flag_and_int(argv),""); - } - - void test_flags_only_others_default(){ - char const* argv[]={"some.exe","silent"}; - ASSERT(std::make_pair(true,125) == parse_silent_flag_and_int(argv),""); - } - - namespace parameters_validation_test_suite{ - namespace test_validation_function_called_helpers{ - struct validator{ - static bool called; - static bool accept(const int & ){ - called = true; - return true; - } - }; - bool validator::called =false; - } - void test_validation_function_called(){ - using test_validation_function_called_helpers::validator; - - char const* argv[]={"some.exe","7"}; - cli_argument_pack p; - int size =0; - p.positional_arg(size,"size","",validator::accept); - p.parse(array_length(argv),argv); - ASSERT((validator::called),"validation function has not been called"); - } - void test_validation_failed(){ - struct validator{ - static bool reject(const int &){ - return false; - } - }; - char const* argv[]={"some.exe","7"}; - cli_argument_pack p; - int size =0; - p.positional_arg(size,"size","",validator::reject); - try { - p.parse(array_length(argv),argv); - ASSERT((false),"An exception was expected due to failed argument validation, " - "but no exception thrown"); - } - catch(std::invalid_argument& e){ - std::string error_msg("'7' is invalid value for argument 'size'"); - ASSERT(e.what()==error_msg , wrong_exception_description); - } - catch(...){ASSERT((false),wrong_exception);} - } - } - namespace error_handling { - void test_wrong_input(){ - char const* argv[]={"some.exe","silent"}; - cli_argument_pack p; - int size =0; - p.positional_arg(size,"size",""); - try{ - p.parse(array_length(argv),argv); - ASSERT(false,"An exception was expected due to wrong input, but no exception thrown"); - } - catch(std::invalid_argument & e){ - std::string error_msg("'silent' is incorrect input for argument 'size' (error converting string 'silent')"); - ASSERT(e.what()==error_msg, wrong_exception_description); - } - catch(...){ASSERT(false,wrong_exception);} - } - void test_duplicate_arg_names(){ - cli_argument_pack p; - int a=0; - p.arg(a,"a",""); - try{ - int dup_a=0; - p.arg(dup_a,"a",""); - ASSERT(false, "An exception was expected due adding duplicate parameter name, but not thrown"); - } - catch(std::invalid_argument& e){ - ASSERT(e.what()==std::string("argument with name: 'a' already registered"),wrong_exception_description); - } - catch(...){ASSERT(false,wrong_exception);} - } - void test_duplicate_positional_arg_names(){ - cli_argument_pack p; - int a=0; - p.positional_arg(a,"a",""); - try{ - int dup_a=0; - p.positional_arg(dup_a,"a",""); - ASSERT(false, "An exception was expected due adding duplicate parameter name, but not thrown"); - } - catch(std::invalid_argument& e){ - ASSERT(e.what()==std::string("argument with name: 'a' already registered"),wrong_exception_description); - } - catch(...){ASSERT(false,wrong_exception);} - } - } - namespace usage_string { - void test_one_arg(){ - cli_argument_pack p; - int size =9; - p.arg(size,"size","size of problem domain"); - std::string const binary_name = "binary.exe"; - std::stringstream expected_output; - using std::endl; - expected_output << " Program usage is:" << endl - << " " << binary_name << " [size=value]" - << endl << endl - << " where:" << endl - << " size - size of problem domain (9)" << endl - ; - std::string usage= p.usage_string(binary_name); - ASSERT(usage==expected_output.str(),""); - } - void test_named_and_postional_args(){ - cli_argument_pack p; - int size =9; - int length =8; - int stride = 7; - p - .arg(size,"size","") - .positional_arg(length,"length","") - .positional_arg(stride,"stride",""); - std::string const binary_name = "binary.exe"; - std::stringstream expected_output; - using std::endl; - expected_output << " Program usage is:" << endl - << " " << binary_name << " [size=value] [length=value] [stride=value] [length [stride]]" - << endl << endl - << " where:" << endl - << " size - (9)" << endl - << " length - (8)" << endl - << " stride - (7)" << endl - ; - std::string usage= p.usage_string(binary_name); - ASSERT(usage==expected_output.str(),""); - } - void test_bool_flag(){ - bool flag=false; - cli_argument_pack p; - p.arg(flag,"flag",""); - std::string const binary_name = "binary.exe"; - std::stringstream expected_output; - using std::endl; - expected_output << " Program usage is:" << endl - << " " << binary_name << " [flag]" - << endl << endl - << " where:" << endl - << " flag - (0)" << endl - ; - std::string usage= p.usage_string(binary_name); - ASSERT(usage==expected_output.str(),""); - - } - - } - namespace name_positional_syntax { - void test_basic(){ - cli_argument_pack p; - int size =0; - int time = 0; - p - .positional_arg(size,"size","") - .positional_arg(time,"time",""); - char const* argv[]={"some.exe","1","2"}; - p.parse(array_length(argv),argv); - ASSERT(size==1,""); - ASSERT(time==2,""); - } - void test_positional_args_explicitly_named(){ - const char* no_or_wrong_exception_error_msg = "exception was expected but not thrown, or wrong exception caught"; - //TODO: Similar functionality is used all over the test. Generalize this helper further, and use as wide within the test as possible? - struct failed_with_exception{ - static bool _(cli_argument_pack & p, std::size_t argc, char const* argv[]){ - try{ - p.parse(argc,argv); - return false; - } - catch(std::exception &){ - return true; - } - catch(...){ - return false; - } - } - }; - { - cli_argument_pack p; - int a,b,c,d; - p - .positional_arg(a,"a","") - .positional_arg(b,"b","") - .positional_arg(c,"c","") - .positional_arg(d,"d",""); - char const* argv[]={"some.exe","a=7","0","1","2","4"}; - ASSERT(failed_with_exception::_(p,array_length(argv),argv),no_or_wrong_exception_error_msg); - } - { - cli_argument_pack p; - int a,b,c,d; - p - .positional_arg(a,"a","") - .positional_arg(b,"b","") - .positional_arg(c,"c","") - .positional_arg(d,"d",""); - char const* argv[]={"some.exe","a=7","0","1","2"}; - ASSERT(failed_with_exception::_(p,array_length(argv),argv),no_or_wrong_exception_error_msg); - } - { - cli_argument_pack p; - int a=-1,b=-1,c = -1,d=-1; - p - .positional_arg(a,"a","") - .positional_arg(b,"b","") - .positional_arg(c,"c","") - .positional_arg(d,"d",""); - char const* argv[]={"some.exe","0","1","d=7",}; - ASSERT(!failed_with_exception::_(p,array_length(argv),argv),"unexpected exception"); - ASSERT(a==0,""); ASSERT(b==1,""); ASSERT(c==-1,"");ASSERT(d==7,""); - } - } - } - namespace name_value_syntax { - void test_basic(){ - cli_argument_pack p; - int size =0; - p.arg(size,"size","size of problem domain"); - char const* argv[]={"some.exe","size=7"}; - p.parse(array_length(argv),argv); - ASSERT(size==7,""); - } - - void test_relaxed_order(){ - cli_argument_pack p; - int size =0; - int time=0; - p - .arg(size,"size","") - .arg(time,"time",""); - char const* argv[]={"some.exe","time=1","size=2"}; - p.parse(array_length(argv),argv); - ASSERT(size==2,""); - ASSERT(time==1,""); - } - - } - namespace number_of_argument_value{ - void test_only_single_values_allowed(){ - cli_argument_pack p; - int a=0; - p.arg(a,"a",""); - const char* argv[] = {"","a=7","a=8"}; - try { - p.parse(array_length(argv),argv); - ASSERT(false,"exception was expected due to duplicated values provided in input, but not thrown"); - } - catch(std::invalid_argument& e){ - //TODO: use patterns (regexp ?) to generate /validate exception descriptions - ASSERT(e.what() == std::string("several values specified for: 'a' argument"),wrong_exception_description); - } - catch(...){ASSERT(false,wrong_exception);} - } - } - namespace thread_range_tests{ - using utility::thread_number_range; - using utility::internal::thread_range_step; - using utility::internal::step_function_multiply; - using utility::internal::step_function_plus; - using utility::internal::step_function_power2_ladder; - - int auto_value(){ - return 100; - } - bool operator ==(thread_range_step const& left, utility::internal::thread_range_step const& right){ - return (left.step_function == right.step_function) - && (left.step_function_argument == right.step_function_argument) - ; - } - - bool operator ==(thread_number_range const& left, thread_number_range const& right){ - return (left.auto_number_of_threads==right.auto_number_of_threads) - && (left.first == right.first) - && (left.last == right.last) - && (left.step == right.step) - ; - } - - void constructor_default_values(){ - thread_number_range r(auto_value); - const int default_num_threads = auto_value(); - ASSERT((r.first==1)&&(r.last==default_num_threads),""); - } - void validation(){ - try{ - thread_number_range range(auto_value,12,6); - Harness::suppress_unused_warning(range); - ASSERT(false,"exception was expected due to invalid range specified, but not thrown"); - } - catch(std::invalid_argument& e){ - ASSERT(e.what() == std::string("decreasing sequence not allowed"), wrong_exception_description); - } - catch(...){ASSERT(false,wrong_exception);} - } - - thread_number_range thread_number_range_from_string(std::string const& string_to_parse){ - thread_number_range r(auto_value,0,0); - std::stringstream str(string_to_parse); str>>r; - return r; - } - static const char* thread_range_parse_failed = "error parsing thread range string"; - void post_process_single_value(){ - ASSERT(thread_number_range_from_string("auto") == - thread_number_range(auto_value,auto_value(),auto_value()) - ,thread_range_parse_failed - ); - } - void post_process_pair_value(){ - ASSERT(thread_number_range_from_string("1:auto") == - thread_number_range(auto_value,1,auto_value()) - ,thread_range_parse_failed - ); - - ASSERT(thread_number_range_from_string("auto:auto") == - thread_number_range(auto_value,auto_value(),auto_value()) - ,thread_range_parse_failed - ); - } - - void post_process_troika_value_with_plus_step(){ - ASSERT(thread_number_range_from_string("1:auto:+2") == - thread_number_range(auto_value,1,auto_value(),thread_range_step(step_function_plus,2)) - ,thread_range_parse_failed - ); - } - - void post_process_troika_value_with_multiply_step(){ - ASSERT(thread_number_range_from_string("1:auto:*2.6") == - thread_number_range(auto_value,1,auto_value(),thread_range_step(step_function_multiply,2.6)) - ,thread_range_parse_failed - ); - } - - void post_process_troika_value_with_ladder_step(){ - try{ - thread_number_range range = thread_number_range_from_string("1:16:#3"); - Harness::suppress_unused_warning(range); - ASSERT(false,"exception was expected due to invalid range specified, but not thrown"); - } - catch(std::invalid_argument& e){ - ASSERT(e.what() == std::string("the argument of # should be a power of 2"), wrong_exception_description); - } - catch(...){ASSERT(false,wrong_exception);} - - ASSERT(thread_number_range_from_string("1:32:#4") == - thread_number_range(auto_value,1,32,thread_range_step(step_function_power2_ladder,4)) - ,thread_range_parse_failed - ); - } - - void test_print_content(){ - std::stringstream str; - str<<thread_number_range(auto_value,1,8,thread_range_step(step_function_multiply,2)); - ASSERT(str.str() == "1:8:*2","Unexpected string"); - } - } -} - -void run_implementation_unit_tests(){ - using namespace implementation_unit_tests; - argument_dest_test_suite::test_type_impl_parse_and_store_simple_parse(); - argument_dest_test_suite::test_default_value_of_is_matched(); - - cli_argument_pack_suite::test_copy_assign(); -} -void run_high_level_api_tests(){ - using namespace high_level_api_tests; - - test_parse_basic(); - test_named_flags_success(); - test_named_flags_failure(); - test_named_flags_failure_and_other_arg(); - test_named_flags_and_other_arg(); - test_flags_only_others_default(); - test_named_flags_and_other_arg_different_order(); - - usage_string::test_one_arg(); - usage_string::test_named_and_postional_args(); - usage_string::test_bool_flag(); - - parameters_validation_test_suite::test_validation_function_called(); - parameters_validation_test_suite::test_validation_failed(); - - name_value_syntax::test_basic(); - name_value_syntax::test_relaxed_order(); - - number_of_argument_value::test_only_single_values_allowed(); - - name_positional_syntax::test_basic(); - name_positional_syntax::test_positional_args_explicitly_named(); - - error_handling::test_wrong_input(); - error_handling::test_duplicate_arg_names(); - error_handling::test_duplicate_positional_arg_names(); - - thread_range_tests::constructor_default_values(); - thread_range_tests::validation(); - thread_range_tests::post_process_single_value(); - thread_range_tests::post_process_pair_value(); - thread_range_tests::post_process_troika_value_with_plus_step(); - thread_range_tests::post_process_troika_value_with_multiply_step(); - thread_range_tests::post_process_troika_value_with_ladder_step(); - thread_range_tests::test_print_content(); -} - -#include "harness.h" -int TestMain(){ - Harness::suppress_unused_warning(utility::thread_number_range_desc); - try{ - run_implementation_unit_tests(); - run_high_level_api_tests(); - }catch(std::exception& e){ - //something went wrong , dump any possible details - std::stringstream str; str<< "run time error: " << e.what()<<std::endl; - ASSERT(false,str.str().c_str()); - } - return Harness::Done; - -} diff --git a/src/tbb/src/test/test_fast_random.cpp b/src/tbb/src/test/test_fast_random.cpp deleted file mode 100644 index 319173c58..000000000 --- a/src/tbb/src/test/test_fast_random.cpp +++ /dev/null @@ -1,202 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -/** - The test checks that for different ranges of random numbers (from 0 to - [MinThread, MaxThread]) generated with different seeds the probability - of each number in the range deviates from the ideal random distribution - by no more than AcceptableDeviation percent. -**/ - -#define HARNESS_DEFAULT_MIN_THREADS 2 -#define HARNESS_DEFAULT_MAX_THREADS 32 - -#include <algorithm> // include it first to avoid error on define below -#define private public -#include "harness_inject_scheduler.h" -#undef private - -#define TEST_TOTAL_SEQUENCE 0 - -#include "harness.h" -#include "tbb/atomic.h" - -//! Coefficient defining tolerable deviation from ideal random distribution -const double AcceptableDeviation = 2.1; -//! Tolerable probability of failure to achieve tolerable distribution -const double AcceptableProbabilityOfOutliers = 1e-5; -//! Coefficient defining the length of random numbers series used to estimate the distribution -/** Number of random values generated per each range element. I.e. the larger is - the range, the longer is the series of random values. **/ -const uintptr_t SeriesBaseLen = 100; -//! Number of random numbers series to generate -const uintptr_t NumSeries = 100; -//! Number of random number generation series with different seeds -const uintptr_t NumSeeds = 100; - -tbb::atomic<uintptr_t> NumHighOutliers; -tbb::atomic<uintptr_t> NumLowOutliers; - -inline void CheckProbability ( double probability, double expectedProbability, int index, int numIndices, void* seed ) { - double lowerBound = expectedProbability / AcceptableDeviation, - upperBound = expectedProbability * AcceptableDeviation; - if ( probability < lowerBound ) { - if ( !NumLowOutliers ) - REMARK( "Warning: Probability %.3f of hitting index %d among %d elements is out of acceptable range (%.3f - %.3f) for seed %p\n", - probability, index, numIndices, lowerBound, upperBound, seed ); - ++NumLowOutliers; - } - else if ( probability > upperBound ) { - if ( !NumHighOutliers ) - REMARK( "Warning: Probability %.3f of hitting index %d among %d elements is out of acceptable range (%.3f - %.3f) for seed %p\n", - probability, index, numIndices, lowerBound, upperBound, seed ); - ++NumHighOutliers; - } -} - -struct CheckDistributionBody { - void operator() ( int id ) const { - uintptr_t randomRange = id + MinThread; - uintptr_t *curHits = new uintptr_t[randomRange] -#if TEST_TOTAL_SEQUENCE - , *totalHits = new uintptr_t[randomRange] -#endif - ; - double expectedProbability = 1./randomRange; - // Loop through different seeds - for ( uintptr_t i = 0; i < NumSeeds; ++i ) { - // Seed value mimics the one used by the TBB task scheduler - void* seed = (char*)&curHits + i * 16; - tbb::internal::FastRandom random( seed ); - // According to Section 3.2.1.2 of Volume 2 of Knuth's Art of Computer Programming - // the following conditions must be hold for m=2^32: - ASSERT((random.c&1)!=0, "c is relatively prime to m"); - ASSERT((random.a-1)%4==0, "a-1 is a multiple of p, for every prime p dividing m." - " And a-1 is a multiple of 4, if m is a multiple of 4"); - - memset( curHits, 0, randomRange * sizeof(uintptr_t) ); -#if TEST_TOTAL_SEQUENCE - memset( totalHits, 0, randomRange * sizeof(uintptr_t) ); -#endif - const uintptr_t seriesLen = randomRange * SeriesBaseLen, - experimentLen = NumSeries * seriesLen; - uintptr_t *curSeries = new uintptr_t[seriesLen], // circular buffer - randsGenerated = 0; - // Initialize statistics - while ( randsGenerated < seriesLen ) { - uintptr_t idx = random.get() % randomRange; - ++curHits[idx]; -#if TEST_TOTAL_SEQUENCE - ++totalHits[idx]; -#endif - curSeries[randsGenerated++] = idx; - } - while ( randsGenerated < experimentLen ) { - for ( uintptr_t j = 0; j < randomRange; ++j ) { - CheckProbability( double(curHits[j])/seriesLen, expectedProbability, j, randomRange, seed ); -#if TEST_TOTAL_SEQUENCE - CheckProbability( double(totalHits[j])/randsGenerated, expectedProbability, j, randomRange, seed ); -#endif - } - --curHits[curSeries[randsGenerated % seriesLen]]; - int idx = random.get() % randomRange; - ++curHits[idx]; -#if TEST_TOTAL_SEQUENCE - ++totalHits[idx]; -#endif - curSeries[randsGenerated++ % seriesLen] = idx; - } - delete [] curSeries; - } - delete [] curHits; -#if TEST_TOTAL_SEQUENCE - delete [] totalHits; -#endif - } -}; - -struct rng { - tbb::internal::FastRandom my_fast_random; - rng (unsigned seed):my_fast_random(seed) {} - unsigned short operator()(){return my_fast_random.get();} -}; - -template <std::size_t seriesLen > -struct SingleCheck{ - bool operator()(unsigned seed)const{ - std::size_t series1[seriesLen]={0}; - std::size_t series2[seriesLen]={0}; - std::generate(series1,series1+seriesLen,rng(seed)); - std::generate(series2,series2+seriesLen,rng(seed)); - return std::equal(series1,series1+seriesLen,series2); - } -}; - -template <std::size_t seriesLen ,size_t seedsNum> -struct CheckReproducibilityBody:NoAssign{ - unsigned short seeds[seedsNum]; - const std::size_t grainSize; - CheckReproducibilityBody(std::size_t GrainSize): grainSize(GrainSize){ - //first generate seeds to check on, and make sure that sequence is reproducible - ASSERT(SingleCheck<seedsNum>()(0),"Series generated by FastRandom must be reproducible"); - std::generate(seeds,seeds+seedsNum,rng(0)); - } - - void operator()(int id)const{ - for (size_t i=id*grainSize; (i<seedsNum)&&(i< ((id+1)*grainSize));++i ){ - ASSERT(SingleCheck<seriesLen>()(i),"Series generated by FastRandom must be reproducible"); - } - } - -}; -#include "tbb/tbb_thread.h" - -int TestMain () { - ASSERT( AcceptableDeviation < 100, NULL ); - MinThread = max(MinThread, 2); - MaxThread = max(MinThread, MaxThread); - double NumChecks = double(NumSeeds) * (MaxThread - MinThread + 1) * (MaxThread + MinThread) / 2.0 * (SeriesBaseLen * NumSeries - SeriesBaseLen); - REMARK( "Number of distribution quality checks %g\n", NumChecks ); - NumLowOutliers = NumHighOutliers = 0; - // Parallelism is used in this test only to speed up the long serial checks - // Essentially it is a loop over random number ranges - // Ideally tbb::parallel_for could be used to parallelize the outermost loop - // in CheckDistributionBody, but it is not used to avoid unit test contamination. - int P = tbb::tbb_thread::hardware_concurrency(); - enum {reproducibilitySeedsToTest=1000}; - enum {reproducibilitySeriesLen=100}; - CheckReproducibilityBody<reproducibilitySeriesLen,reproducibilitySeedsToTest> CheckReproducibility(reproducibilitySeedsToTest/MaxThread); - while ( MinThread <= MaxThread ) { - int ThreadsToRun = min(P, MaxThread - MinThread + 1); - REMARK("Checking random range [%d;%d)\n", MinThread, MinThread+ThreadsToRun); - NativeParallelFor( ThreadsToRun, CheckDistributionBody() ); - NativeParallelFor( ThreadsToRun, CheckReproducibility ); - MinThread += P; - } - double observedProbabilityOfOutliers = (NumLowOutliers + NumHighOutliers) / NumChecks; - if ( observedProbabilityOfOutliers > AcceptableProbabilityOfOutliers ) { - if ( NumLowOutliers ) - REPORT( "Warning: %d cases of too low probability of a given number detected\n", (int)NumLowOutliers ); - if ( NumHighOutliers ) - REPORT( "Warning: %d cases of too high probability of a given number detected\n", (int)NumHighOutliers ); - ASSERT( observedProbabilityOfOutliers <= AcceptableProbabilityOfOutliers, NULL ); - } - return Harness::Done; -} diff --git a/src/tbb/src/test/test_flow_graph.cpp b/src/tbb/src/test/test_flow_graph.cpp deleted file mode 100644 index f71d6b809..000000000 --- a/src/tbb/src/test/test_flow_graph.cpp +++ /dev/null @@ -1,223 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness_graph.h" -#include "harness_barrier.h" -#include "tbb/task_scheduler_init.h" - -const int T = 4; -const int W = 4; - -struct decrement_wait : NoAssign { - - tbb::flow::graph * const my_graph; - bool * const my_done_flag; - - decrement_wait( tbb::flow::graph &h, bool *done_flag ) : my_graph(&h), my_done_flag(done_flag) {} - - void operator()(int i) const { - Harness::Sleep(10*i); - my_done_flag[i] = true; - my_graph->decrement_wait_count(); - } -}; - -static void test_wait_count() { - tbb::flow::graph h; - for (int i = 0; i < T; ++i ) { - bool done_flag[W]; - for (int j = 0; j < W; ++j ) { - for ( int w = 0; w < W; ++w ) done_flag[w] = false; - for ( int w = 0; w < j; ++w ) h.increment_wait_count(); - - NativeParallelFor( j, decrement_wait(h, done_flag) ); - h.wait_for_all(); - for ( int w = 0; w < W; ++w ) { - if ( w < j ) ASSERT( done_flag[w] == true, NULL ); - else ASSERT( done_flag[w] == false, NULL ); - } - } - } -} - -const int F = 100; - -#if __TBB_LAMBDAS_PRESENT -bool lambda_flag[F]; -#endif -bool functor_flag[F]; - -struct set_functor { - int my_i; - set_functor( int i ) : my_i(i) {} - void operator()() { functor_flag[my_i] = true; } -}; - -struct return_functor { - int my_i; - return_functor( int i ) : my_i(i) {} - int operator()() { return my_i; } -}; - -static void test_run() { - tbb::flow::graph h; - for (int i = 0; i < T; ++i ) { - - // Create receivers and flag arrays - #if __TBB_LAMBDAS_PRESENT - harness_mapped_receiver<int> lambda_r; - lambda_r.initialize_map( F, 1 ); - #endif - harness_mapped_receiver<int> functor_r; - functor_r.initialize_map( F, 1 ); - - // Initialize flag arrays - for (int j = 0; j < F; ++j ) { - #if __TBB_LAMBDAS_PRESENT - lambda_flag[j] = false; - #endif - functor_flag[j] = false; - } - - for ( int j = 0; j < F; ++j ) { - #if __TBB_LAMBDAS_PRESENT - h.run( [=]() { lambda_flag[j] = true; } ); - h.run( lambda_r, [=]() { return j; } ); - #endif - h.run( set_functor(j) ); - h.run( functor_r, return_functor(j) ); - } - h.wait_for_all(); - for ( int j = 0; j < F; ++j ) { - #if __TBB_LAMBDAS_PRESENT - ASSERT( lambda_flag[i] == true, NULL ); - #endif - ASSERT( functor_flag[i] == true, NULL ); - } - #if __TBB_LAMBDAS_PRESENT - lambda_r.validate(); - #endif - functor_r.validate(); - } -} - -// Encapsulate object we want to store in vector (because contained type must have -// copy constructor and assignment operator -class my_int_buffer { - tbb::flow::buffer_node<int> *b; - tbb::flow::graph& my_graph; -public: - my_int_buffer(tbb::flow::graph &g) : my_graph(g) { b = new tbb::flow::buffer_node<int>(my_graph); } - my_int_buffer(const my_int_buffer& other) : my_graph(other.my_graph) { - b = new tbb::flow::buffer_node<int>(my_graph); - } - ~my_int_buffer() { delete b; } - my_int_buffer& operator=(const my_int_buffer& /*other*/) { - return *this; - } -}; - -// test the graph iterator, delete nodes from graph, test again -void test_iterator() { - tbb::flow::graph g; - my_int_buffer a_buffer(g); - my_int_buffer b_buffer(g); - my_int_buffer c_buffer(g); - my_int_buffer *d_buffer = new my_int_buffer(g); - my_int_buffer e_buffer(g); - std::vector< my_int_buffer > my_buffer_vector(10, c_buffer); - - int count = 0; - for (tbb::flow::graph::iterator it = g.begin(); it != g.end(); ++it) { - count++; - } - ASSERT(count==15, "error in iterator count"); - - delete d_buffer; - - count = 0; - for (tbb::flow::graph::iterator it = g.begin(); it != g.end(); ++it) { - count++; - } - ASSERT(count==14, "error in iterator count"); - - my_buffer_vector.clear(); - - count = 0; - for (tbb::flow::graph::iterator it = g.begin(); it != g.end(); ++it) { - count++; - } - ASSERT(count==4, "error in iterator count"); -} - -class AddRemoveBody : NoAssign { - tbb::flow::graph& g; - int nThreads; - Harness::SpinBarrier &barrier; -public: - AddRemoveBody(int nthr, Harness::SpinBarrier &barrier_, tbb::flow::graph& _g) : - g(_g), nThreads(nthr), barrier(barrier_) - {} - void operator()(const int /*threadID*/) const { - my_int_buffer b(g); - { - std::vector<my_int_buffer> my_buffer_vector(100, b); - barrier.wait(); // wait until all nodes are created - // now test that the proper number of nodes were created - int count = 0; - for (tbb::flow::graph::iterator it = g.begin(); it != g.end(); ++it) { - count++; - } - ASSERT(count==101*nThreads, "error in iterator count"); - barrier.wait(); // wait until all threads are done counting - } // all nodes but for the initial node on this thread are deleted - barrier.wait(); // wait until all threads have deleted all nodes in their vectors - // now test that all the nodes were deleted except for the initial node - int count = 0; - for (tbb::flow::graph::iterator it = g.begin(); it != g.end(); ++it) { - count++; - } - ASSERT(count==nThreads, "error in iterator count"); - barrier.wait(); // wait until all threads are done counting - } // initial node gets deleted -}; - -void test_parallel(int nThreads) { - tbb::flow::graph g; - Harness::SpinBarrier barrier(nThreads); - AddRemoveBody body(nThreads, barrier, g); - NativeParallelFor(nThreads, body); -} - -int TestMain() { - if( MinThread<1 ) { - REPORT("number of threads must be positive\n"); - exit(1); - } - for( int p=MinThread; p<=MaxThread; ++p ) { - tbb::task_scheduler_init init(p); - test_wait_count(); - test_run(); - test_iterator(); - test_parallel(p); - } - return Harness::Done; -} - diff --git a/src/tbb/src/test/test_flow_graph_whitebox.cpp b/src/tbb/src/test/test_flow_graph_whitebox.cpp deleted file mode 100644 index c9dbb33c8..000000000 --- a/src/tbb/src/test/test_flow_graph_whitebox.cpp +++ /dev/null @@ -1,755 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#define HARNESS_DEFAULT_MIN_THREADS 3 -#define HARNESS_DEFAULT_MAX_THREADS 4 - -#if _MSC_VER - #pragma warning (disable: 4503) // Suppress "decorated name length exceeded, name was truncated" warning - #if !TBB_USE_EXCEPTIONS - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (disable: 4530) - #endif - #if _MSC_VER==1700 && !defined(__INTEL_COMPILER) - // Suppress "unreachable code" warning by VC++ 17.0 (VS 2012) - #pragma warning (disable: 4702) - #endif -#endif - -#include "harness.h" - -#define TBB_PREVIEW_GRAPH_NODES 1 - -// need these to get proper external names for private methods in library. -#include "tbb/spin_mutex.h" -#include "tbb/spin_rw_mutex.h" -#include "tbb/task.h" - -#define WAIT_MAX 1000000 - -#define private public -#define protected public -#include "tbb/flow_graph.h" -#undef protected -#undef private -#include "tbb/task_scheduler_init.h" -#include "harness_graph.h" - -#define BACKOFF_WAIT(ex,msg) \ -{ \ - int wait_cnt = 0; \ - tbb::internal::atomic_backoff backoff; \ - do { \ - backoff.pause(); \ - ++wait_cnt; \ - } \ - while( (ex) && (wait_cnt < WAIT_MAX)); \ - ASSERT(wait_cnt < WAIT_MAX, msg); \ -} - -template<typename T> -struct receiverBody { - tbb::flow::continue_msg operator()(const T &/*in*/) { - return tbb::flow::continue_msg(); - } -}; - -// split_nodes cannot have predecessors -// they do not reject messages and always forward. -// they reject edge reversals from successors. -void TestSplitNode() { - typedef tbb::flow::split_node<tbb::flow::tuple<int> > snode_type; - tbb::flow::graph g; - snode_type snode(g); - tbb::flow::function_node<int> rcvr(g,tbb::flow::unlimited, receiverBody<int>()); - REMARK("Testing split_node\n"); - ASSERT(tbb::flow::output_port<0>(snode).my_successors.empty(), "Constructed split_node has successors"); - // tbb::flow::output_port<0>(snode) - tbb::flow::make_edge(tbb::flow::output_port<0>(snode), rcvr); - ASSERT(!(tbb::flow::output_port<0>(snode).my_successors.empty()), "after make_edge, split_node has no successor."); - snode.try_put(tbb::flow::tuple<int>(1)); - g.wait_for_all(); - g.reset(); - ASSERT(!(tbb::flow::output_port<0>(snode).my_successors.empty()), "after reset(), split_node has no successor."); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - g.reset(tbb::flow::rf_extract); - ASSERT(tbb::flow::output_port<0>(snode).my_successors.empty(), "after reset(rf_extract), split_node has a successor."); -#endif -} - -// buffering nodes cannot have predecessors -// they do not reject messages and always save or forward -// they allow edge reversals from successors -template< typename B > -void TestBufferingNode(const char * name) { - tbb::flow::graph g; - B bnode(g); - tbb::flow::function_node<int,int,tbb::flow::rejecting> fnode(g, tbb::flow::serial, serial_fn_body<int>(serial_fn_state0)); - REMARK("Testing %s:", name); - for(int icnt = 0; icnt < 2; icnt++) { - bool reverse_edge = (icnt & 0x2) != 0; - serial_fn_state0 = 0; // reset to waiting state. - REMARK(" make_edge"); - tbb::flow::make_edge(bnode, fnode); - ASSERT(!bnode.my_successors.empty(), "buffering node has no successor after make_edge"); - REMARK(" try_put"); - bnode.try_put(1); // will forward to the fnode - BACKOFF_WAIT(serial_fn_state0 == 0, "Timed out waiting for first put"); - if(reverse_edge) { - REMARK(" try_put2"); - bnode.try_put(2); // will reverse the edge - // cannot do a wait_for_all here; the function_node is still executing - BACKOFF_WAIT(!bnode.my_successors.empty(), "Timed out waiting after 2nd put"); - // at this point the only task running is the one for the function_node. - ASSERT(bnode.my_successors.empty(), "successor not removed"); - } - else { - ASSERT(!bnode.my_successors.empty(), "buffering node has no successor after forwarding message"); - } - serial_fn_state0 = 0; // release the function_node. - if(reverse_edge) { - // have to do a second release because the function_node will get the 2nd item - BACKOFF_WAIT( serial_fn_state0 == 0, "Timed out waiting after 2nd put"); - serial_fn_state0 = 0; // release the function_node. - } - g.wait_for_all(); - REMARK(" remove_edge"); - tbb::flow::remove_edge(bnode, fnode); - ASSERT(bnode.my_successors.empty(), "buffering node has a successor after remove_edge"); - } - tbb::flow::join_node<tbb::flow::tuple<int,int>,tbb::flow::reserving> jnode(g); - tbb::flow::make_edge(bnode, tbb::flow::input_port<0>(jnode)); // will spawn a task - g.wait_for_all(); - ASSERT(!bnode.my_successors.empty(), "buffering node has no successor after attaching to join"); - REMARK(" reverse"); - bnode.try_put(1); // the edge should reverse - g.wait_for_all(); - ASSERT(bnode.my_successors.empty(), "buffering node has a successor after reserving"); - REMARK(" reset()"); - g.wait_for_all(); - g.reset(); // should be in forward direction again - ASSERT(!bnode.my_successors.empty(), "buffering node has no successor after reset()"); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - REMARK(" remove_edge"); - g.reset(tbb::flow::rf_extract); - ASSERT(bnode.my_successors.empty(), "buffering node has a successor after reset(rf_extract)"); - tbb::flow::make_edge(bnode, tbb::flow::input_port<0>(jnode)); // add edge again - // reverse edge by adding to buffer. - bnode.try_put(1); // the edge should reverse - g.wait_for_all(); - ASSERT(bnode.my_successors.empty(), "buffering node has a successor after reserving"); - REMARK(" remove_edge(reversed)"); - g.reset(tbb::flow::rf_extract); - ASSERT(bnode.my_successors.empty(), "buffering node has no successor after reset()"); - ASSERT(tbb::flow::input_port<0>(jnode).my_predecessors.empty(), "predecessor not reset"); -#endif - REMARK(" done\n"); - g.wait_for_all(); -} - -// continue_node has only predecessor count -// they do not have predecessors, only the counts -// successor edges cannot be reversed -void TestContinueNode() { - tbb::flow::graph g; - tbb::flow::function_node<int> fnode0(g, tbb::flow::serial, serial_fn_body<int>(serial_fn_state0)); - tbb::flow::continue_node<int> cnode(g, 1, serial_continue_body<int>(serial_continue_state0)); - tbb::flow::function_node<int> fnode1(g, tbb::flow::serial, serial_fn_body<int>(serial_fn_state1)); - tbb::flow::make_edge(fnode0, cnode); - tbb::flow::make_edge(cnode, fnode1); - REMARK("Testing continue_node:"); - for( int icnt = 0; icnt < 2; ++icnt ) { - REMARK( " initial%d", icnt); - ASSERT(cnode.my_predecessor_count == 2, "predecessor addition didn't increment count"); - ASSERT(!cnode.successors().empty(), "successors empty though we added one"); - ASSERT(cnode.my_current_count == 0, "state of continue_receiver incorrect"); - serial_continue_state0 = 0; - serial_fn_state0 = 0; - serial_fn_state1 = 0; - - fnode0.try_put(1); // start the first function node. - BACKOFF_WAIT(!serial_fn_state0, "Timed out waiting for function_node to start"); - // Now the body of function_node 0 is executing. - serial_fn_state0 = 0; // release the node - // wait for node to count the message (or for the node body to execute, which would be wrong) - BACKOFF_WAIT(serial_continue_state0 == 0 && cnode.my_current_count == 0, "Timed out waiting for continue_state0 to change"); - ASSERT(serial_continue_state0 == 0, "Improperly released continue_node"); - ASSERT(cnode.my_current_count == 1, "state of continue_receiver incorrect"); - if(icnt == 0) { // first time through, let the continue_node fire - REMARK(" firing"); - fnode0.try_put(1); // second message - BACKOFF_WAIT(serial_fn_state0 == 0, "timeout waiting for continue_body to execute"); - // Now the body of function_node 0 is executing. - serial_fn_state0 = 0; // release the node - - BACKOFF_WAIT(!serial_continue_state0,"continue_node didn't start"); // now we wait for the continue_node. - ASSERT(cnode.my_current_count == 0, " my_current_count not reset before body of continue_node started"); - serial_continue_state0 = 0; // release the continue_node - BACKOFF_WAIT(!serial_fn_state1,"successor function_node didn't start"); // wait for the successor function_node to enter body - serial_fn_state1 = 0; // release successor function_node. - g.wait_for_all(); - - // try a try_get() - { - int i; - ASSERT(!cnode.try_get(i), "try_get not rejected"); - } - - REMARK(" reset"); - ASSERT(!cnode.my_successors.empty(), "Empty successors in built graph (before reset)"); - ASSERT(cnode.my_predecessor_count == 2, "predecessor_count reset (before reset)"); - g.reset(); // should still be the same - ASSERT(!cnode.my_successors.empty(), "Empty successors in built graph (after reset)" ); - ASSERT(cnode.my_predecessor_count == 2, "predecessor_count reset (after reset)"); - } - else { // we're going to see if the rf_extract resets things. -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - g.wait_for_all(); - REMARK(" reset(rf_extract)"); - ASSERT(!cnode.my_successors.empty(), "Empty successors in built graph (before reset)"); - ASSERT(cnode.my_predecessor_count == 2, "predecessor_count reset (before reset)"); - g.reset(tbb::flow::rf_extract); // should be in forward direction again - ASSERT(cnode.my_current_count == 0, "state of continue_receiver incorrect after reset(rf_extract)"); - ASSERT(cnode.my_successors.empty(), "buffering node has a successor after reset(rf_extract)"); - ASSERT(cnode.my_predecessor_count == cnode.my_initial_predecessor_count, "predecessor count not reset"); -#endif - } - } - - REMARK(" done\n"); - -} - -// function_node has predecessors and successors -// try_get() rejects -// successor edges cannot be reversed -// predecessors will reverse (only rejecting will reverse) -void TestFunctionNode() { - tbb::flow::graph g; - tbb::flow::queue_node<int> qnode0(g); - tbb::flow::function_node<int,int, tbb::flow::rejecting > fnode0(g, tbb::flow::serial, serial_fn_body<int>(serial_fn_state0)); - // queueing function node - tbb::flow::function_node<int,int> fnode1(g, tbb::flow::serial, serial_fn_body<int>(serial_fn_state0)); - - tbb::flow::queue_node<int> qnode1(g); - - tbb::flow::make_edge(fnode0, qnode1); - tbb::flow::make_edge(qnode0, fnode0); - - serial_fn_state0 = 2; // just let it go - // see if the darned thing will work.... - qnode0.try_put(1); - g.wait_for_all(); - int ii; - ASSERT(qnode1.try_get(ii) && ii == 1, "output not passed"); - tbb::flow::remove_edge(qnode0, fnode0); - tbb::flow::remove_edge(fnode0, qnode1); - - tbb::flow::make_edge(fnode1, qnode1); - tbb::flow::make_edge(qnode0, fnode1); - - serial_fn_state0 = 2; // just let it go - // see if the darned thing will work.... - qnode0.try_put(1); - g.wait_for_all(); - ASSERT(qnode1.try_get(ii) && ii == 1, "output not passed"); - tbb::flow::remove_edge(qnode0, fnode1); - tbb::flow::remove_edge(fnode1, qnode1); - - // rejecting - tbb::flow::make_edge(fnode0, qnode1); - REMARK("Testing rejecting function_node:"); - ASSERT(!fnode0.my_queue, "node should have no queue"); - ASSERT(!fnode0.my_successors.empty(), "successor edge not added"); - REMARK(" add_pred"); - ASSERT(fnode0.register_predecessor(qnode0), "Cannot register as predecessor"); - ASSERT(!fnode0.my_predecessors.empty(), "Missing predecessor"); - REMARK(" reset"); - g.wait_for_all(); - g.reset(); // should reverse the edge from the input to the function node. - ASSERT(!qnode0.my_successors.empty(), "empty successors after reset()"); - ASSERT(fnode0.my_predecessors.empty(), "predecessor not reversed"); - tbb::flow::remove_edge(qnode0, fnode0); - tbb::flow::remove_edge(fnode0, qnode1); - REMARK("\n"); - - // queueing - tbb::flow::make_edge(fnode1, qnode1); - REMARK("Testing queueing function_node:"); - ASSERT(fnode1.my_queue, "node should have no queue"); - ASSERT(!fnode1.my_successors.empty(), "successor edge not added"); - REMARK(" add_pred"); - ASSERT(fnode1.register_predecessor(qnode0), "Cannot register as predecessor"); - ASSERT(!fnode1.my_predecessors.empty(), "Missing predecessor"); - REMARK(" reset"); - g.wait_for_all(); - g.reset(); // should reverse the edge from the input to the function node. - ASSERT(!qnode0.my_successors.empty(), "empty successors after reset()"); - ASSERT(fnode1.my_predecessors.empty(), "predecessor not reversed"); - tbb::flow::remove_edge(qnode0, fnode1); - tbb::flow::remove_edge(fnode1, qnode1); - REMARK("\n"); - - serial_fn_state0 = 0; // make the function_node wait - tbb::flow::make_edge(qnode0, fnode0); - REMARK(" start_func"); - qnode0.try_put(1); - BACKOFF_WAIT(serial_fn_state0 == 0, "Timed out waiting after 1st put"); - // now if we put an item to the queues the edges to the function_node will reverse. - REMARK(" put_node(2)"); - qnode0.try_put(2); // start queue node. - // wait for the edges to reverse - BACKOFF_WAIT(fnode0.my_predecessors.empty(), "Timed out waiting"); - ASSERT(!fnode0.my_predecessors.empty(), "function_node edge not reversed"); - g.my_root_task->cancel_group_execution(); - // release the function_node - serial_fn_state0 = 2; - g.wait_for_all(); - ASSERT(!fnode0.my_predecessors.empty() && qnode0.my_successors.empty(), "function_node edge not reversed"); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - g.reset(tbb::flow::rf_extract); - ASSERT(fnode0.my_predecessors.empty() && qnode0.my_successors.empty(), "function_node edge not removed"); - ASSERT(fnode0.my_successors.empty(), "successor to fnode not removed"); -#endif - REMARK(" done\n"); -} - -template<typename TT> -class tag_func { - TT my_mult; -public: - tag_func(TT multiplier) : my_mult(multiplier) { } - void operator=( const tag_func& other){my_mult = other.my_mult;} - // operator() will return [0 .. Count) - tbb::flow::tag_value operator()( TT v) { - tbb::flow::tag_value t = tbb::flow::tag_value(v / my_mult); - return t; - } -}; - -template<tbb::flow::graph_buffer_policy JNODE_TYPE> -void -TestSimpleSuccessorArc(const char *name) { - tbb::flow::graph g; - { - //tbb::flow::join_node<tbb::flow::tuple<int>, tbb::flow::queueing> qj(g); - REMARK("Join<%s> successor test ", name); - tbb::flow::join_node<tbb::flow::tuple<int>, JNODE_TYPE> qj(g); - tbb::flow::broadcast_node<tbb::flow::tuple<int> > bnode(g); - tbb::flow::make_edge(qj, bnode); - ASSERT(!qj.my_successors.empty(),"successor missing after linking"); - g.reset(); - ASSERT(!qj.my_successors.empty(),"successor missing after reset()"); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - g.reset(tbb::flow::rf_extract); - ASSERT(qj.my_successors.empty(), "successors not removed after reset(rf_extract)"); -#endif - } -} - -template<> -void -TestSimpleSuccessorArc<tbb::flow::tag_matching>(const char *name) { - tbb::flow::graph g; - { - REMARK("Join<%s> successor test ", name); - typedef tbb::flow::tuple<int,int> my_tuple; - tbb::flow::join_node<my_tuple, tbb::flow::tag_matching> qj(g, - tag_func<int>(1), - tag_func<int>(1) - ); - tbb::flow::broadcast_node<my_tuple > bnode(g); - tbb::flow::make_edge(qj, bnode); - ASSERT(!qj.my_successors.empty(),"successor missing after linking"); - g.reset(); - ASSERT(!qj.my_successors.empty(),"successor missing after reset()"); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - g.reset(tbb::flow::rf_extract); - ASSERT(qj.my_successors.empty(), "successors not removed after reset(rf_extract)"); -#endif - } -} - -void -TestJoinNode() { - tbb::flow::graph g; - - TestSimpleSuccessorArc<tbb::flow::queueing>("queueing"); - TestSimpleSuccessorArc<tbb::flow::reserving>("reserving"); - TestSimpleSuccessorArc<tbb::flow::tag_matching>("tag_matching"); - - // queueing and tagging join nodes have input queues, so the input ports do not reverse. - REMARK(" reserving preds"); - { - tbb::flow::join_node<tbb::flow::tuple<int,int>, tbb::flow::reserving> rj(g); - tbb::flow::queue_node<int> q0(g); - tbb::flow::queue_node<int> q1(g); - tbb::flow::make_edge(q0,tbb::flow::input_port<0>(rj)); - tbb::flow::make_edge(q1,tbb::flow::input_port<1>(rj)); - q0.try_put(1); - g.wait_for_all(); // quiesce - ASSERT(!(tbb::flow::input_port<0>(rj).my_predecessors.empty()),"reversed port missing predecessor"); - ASSERT((tbb::flow::input_port<1>(rj).my_predecessors.empty()),"non-reversed port has pred"); - g.reset(); - ASSERT((tbb::flow::input_port<0>(rj).my_predecessors.empty()),"reversed port has pred after reset()"); - ASSERT((tbb::flow::input_port<1>(rj).my_predecessors.empty()),"non-reversed port has pred after reset()"); - q1.try_put(2); - g.wait_for_all(); // quiesce - ASSERT(!(tbb::flow::input_port<1>(rj).my_predecessors.empty()),"reversed port missing predecessor"); - ASSERT((tbb::flow::input_port<0>(rj).my_predecessors.empty()),"non-reversed port has pred"); - g.reset(); - ASSERT((tbb::flow::input_port<1>(rj).my_predecessors.empty()),"reversed port has pred after reset()"); - ASSERT((tbb::flow::input_port<0>(rj).my_predecessors.empty()),"non-reversed port has pred after reset()"); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - // should reset predecessors just as regular reset. - q1.try_put(3); - g.wait_for_all(); // quiesce - ASSERT(!(tbb::flow::input_port<1>(rj).my_predecessors.empty()),"reversed port missing predecessor"); - ASSERT((tbb::flow::input_port<0>(rj).my_predecessors.empty()),"non-reversed port has pred"); - g.reset(tbb::flow::rf_extract); - ASSERT((tbb::flow::input_port<1>(rj).my_predecessors.empty()),"reversed port has pred after reset()"); - ASSERT((tbb::flow::input_port<0>(rj).my_predecessors.empty()),"non-reversed port has pred after reset()"); - ASSERT(q0.my_successors.empty(), "edge not removed by reset(rf_extract)"); - ASSERT(q1.my_successors.empty(), "edge not removed by reset(rf_extract)"); -#endif - } - REMARK(" done\n"); -} - -void -TestLimiterNode() { - int out_int; - tbb::flow::graph g; - tbb::flow::limiter_node<int> ln(g,1); - REMARK("Testing limiter_node: preds and succs"); - ASSERT(ln.decrement.my_predecessor_count == 0, "error in pred count"); - ASSERT(ln.decrement.my_initial_predecessor_count == 0, "error in initial pred count"); - ASSERT(ln.decrement.my_current_count == 0, "error in current count"); - ASSERT(ln.init_decrement_predecessors == 0, "error in decrement predecessors"); - ASSERT(ln.my_threshold == 1, "error in my_threshold"); - tbb::flow::queue_node<int> inq(g); - tbb::flow::queue_node<int> outq(g); - tbb::flow::broadcast_node<tbb::flow::continue_msg> bn(g); - - tbb::flow::make_edge(inq,ln); - tbb::flow::make_edge(ln,outq); - tbb::flow::make_edge(bn,ln.decrement); - - g.wait_for_all(); - ASSERT(!(ln.my_successors.empty()),"successors empty after make_edge"); - ASSERT(ln.my_predecessors.empty(), "input edge reversed"); - inq.try_put(1); - g.wait_for_all(); - ASSERT(outq.try_get(out_int) && out_int == 1, "limiter_node didn't pass first value"); - ASSERT(ln.my_predecessors.empty(), "input edge reversed"); - inq.try_put(2); - g.wait_for_all(); - ASSERT(!outq.try_get(out_int), "limiter_node incorrectly passed second input"); - ASSERT(!ln.my_predecessors.empty(), "input edge to limiter_node not reversed"); - bn.try_put(tbb::flow::continue_msg()); - g.wait_for_all(); - ASSERT(outq.try_get(out_int) && out_int == 2, "limiter_node didn't pass second value"); - g.wait_for_all(); - ASSERT(!ln.my_predecessors.empty(), "input edge was reversed(after try_get())"); - g.reset(); - ASSERT(ln.my_predecessors.empty(), "input edge not reset"); - inq.try_put(3); - g.wait_for_all(); - ASSERT(outq.try_get(out_int) && out_int == 3, "limiter_node didn't pass third value"); - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - REMARK(" rf_extract"); - // currently the limiter_node will not pass another message - g.reset(tbb::flow::rf_extract); - ASSERT(ln.decrement.my_predecessor_count == 0, "error in pred count"); - ASSERT(ln.decrement.my_initial_predecessor_count == 0, "error in initial pred count"); - ASSERT(ln.decrement.my_current_count == 0, "error in current count"); - ASSERT(ln.init_decrement_predecessors == 0, "error in decrement predecessors"); - ASSERT(ln.my_threshold == 1, "error in my_threshold"); - ASSERT(ln.my_predecessors.empty(), "preds not reset(rf_extract)"); - ASSERT(ln.my_successors.empty(), "preds not reset(rf_extract)"); - ASSERT(inq.my_successors.empty(), "Arc not removed on reset(rf_extract)"); - ASSERT(inq.my_successors.empty(), "Arc not removed on reset(rf_extract)"); - ASSERT(bn.my_successors.empty(), "control edge not removed on reset(rf_extract)"); - tbb::flow::make_edge(inq,ln); - tbb::flow::make_edge(ln,outq); - inq.try_put(4); - inq.try_put(5); - g.wait_for_all(); - ASSERT(outq.try_get(out_int),"missing output after reset(rf_extract)"); - ASSERT(out_int == 4, "input incorrect (4)"); - bn.try_put(tbb::flow::continue_msg()); - g.wait_for_all(); - ASSERT(!outq.try_get(out_int),"second output incorrectly passed (rf_extract)"); -#endif - REMARK(" done\n"); -} - -template<typename MF_TYPE> -struct mf_body { - tbb::atomic<int> *_flag; - mf_body( tbb::atomic<int> &myatomic) : _flag(&myatomic) { } - void operator()( const int& in, typename MF_TYPE::output_ports_type &outports) { - if(*_flag == 0) { - *_flag = 1; - BACKOFF_WAIT(*_flag == 1, "multifunction_node not released"); - } - - if(in & 0x1) tbb::flow::get<1>(outports).try_put(in); - else tbb::flow::get<0>(outports).try_put(in); - } -}; - -template<tbb::flow::graph_buffer_policy P, typename T> -struct test_reversal; -template<typename T> -struct test_reversal<tbb::flow::queueing, T> { - test_reversal() { REMARK("<queueing>"); } - // queueing node will not reverse. - bool operator()( T &node) { return node.my_predecessors.empty(); } -}; - -template<typename T> -struct test_reversal<tbb::flow::rejecting, T> { - test_reversal() { REMARK("<rejecting>"); } - bool operator()( T &node) { return !node.my_predecessors.empty(); } -}; - -template<tbb::flow::graph_buffer_policy P> -void -TestMultifunctionNode() { - typedef tbb::flow::multifunction_node<int, tbb::flow::tuple<int, int>, P> multinode_type; - REMARK("Testing multifunction_node"); - test_reversal<P,multinode_type> my_test; - REMARK(":"); - tbb::flow::graph g; - multinode_type mf(g, tbb::flow::serial, mf_body<multinode_type>(serial_fn_state0)); - tbb::flow::queue_node<int> qin(g); - tbb::flow::queue_node<int> qodd_out(g); - tbb::flow::queue_node<int> qeven_out(g); - tbb::flow::make_edge(qin,mf); - tbb::flow::make_edge(tbb::flow::output_port<0>(mf), qeven_out); - tbb::flow::make_edge(tbb::flow::output_port<1>(mf), qodd_out); - g.wait_for_all(); - for( int ii = 0; ii < -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - 2 -#else - 1 -#endif - ; ++ii) { - serial_fn_state0 = 0; - if(ii == 0) REMARK(" reset preds"); else REMARK(" 2nd"); - qin.try_put(0); - // wait for node to be active - BACKOFF_WAIT(serial_fn_state0 == 0, "timed out waiting for first put"); - qin.try_put(1); - BACKOFF_WAIT((!my_test(mf)), "Timed out waiting"); - ASSERT(my_test(mf), "fail second put test"); - g.my_root_task->cancel_group_execution(); - // release node - serial_fn_state0 = 2; - g.wait_for_all(); - ASSERT(my_test(mf), "fail cancel group test"); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - if( ii == 1) { - REMARK(" rf_extract"); - g.reset(tbb::flow::rf_extract); - ASSERT(tbb::flow::output_port<0>(mf).my_successors.empty(), "output_port<0> not reset (rf_extract)"); - ASSERT(tbb::flow::output_port<1>(mf).my_successors.empty(), "output_port<1> not reset (rf_extract)"); - } - else -#endif - { - g.reset(); - } - ASSERT(mf.my_predecessors.empty(), "edge didn't reset"); - ASSERT((ii == 0 && !qin.my_successors.empty()) || (ii == 1 && qin.my_successors.empty()), "edge didn't reset"); - } - REMARK(" done\n"); -} - -// indexer_node is like a broadcast_node, in that none of its inputs reverse, and it -// never allows a successor to reverse its edge, so we only need test the successors. -void -TestIndexerNode() { - tbb::flow::graph g; - typedef tbb::flow::indexer_node< int, int > indexernode_type; - indexernode_type inode(g); - REMARK("Testing indexer_node:"); - tbb::flow::queue_node<indexernode_type::output_type> qout(g); - tbb::flow::make_edge(inode,qout); - g.wait_for_all(); - ASSERT(!inode.my_successors.empty(), "successor of indexer_node missing"); - g.reset(); - ASSERT(!inode.my_successors.empty(), "successor of indexer_node missing after reset"); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - g.reset(tbb::flow::rf_extract); - ASSERT(inode.my_successors.empty(), "successor of indexer_node not removed by reset(rf_extract)"); -#endif - REMARK(" done\n"); -} - -template<typename Node> -void -TestScalarNode(const char *name) { - tbb::flow::graph g; - Node on(g); - tbb::flow::queue_node<int> qout(g); - REMARK("Testing %s:", name); - tbb::flow::make_edge(on,qout); - g.wait_for_all(); - ASSERT(!on.my_successors.empty(), "edge not added"); - g.reset(); - ASSERT(!on.my_successors.empty(), "edge improperly removed"); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - g.reset(tbb::flow::rf_extract); - ASSERT(on.my_successors.empty(), "edge not removed by reset(rf_extract)"); -#endif - REMARK(" done\n"); -} - -struct seq_body { - size_t operator()(const int &in) { - return size_t(in / 3); - } -}; - -// sequencer_node behaves like a queueing node, but requires a different constructor. -void -TestSequencerNode() { - tbb::flow::graph g; - tbb::flow::sequencer_node<int> bnode(g, seq_body()); - REMARK("Testing sequencer_node:"); - tbb::flow::function_node<int> fnode(g, tbb::flow::serial, serial_fn_body<int>(serial_fn_state0)); - REMARK("Testing sequencer_node:"); - serial_fn_state0 = 0; // reset to waiting state. - REMARK(" make_edge"); - tbb::flow::make_edge(bnode, fnode); - ASSERT(!bnode.my_successors.empty(), "buffering node has no successor after make_edge"); - REMARK(" try_put"); - bnode.try_put(0); // will forward to the fnode - BACKOFF_WAIT( serial_fn_state0 == 0, "timeout waiting for function_node"); // wait for the function_node to fire up - ASSERT(!bnode.my_successors.empty(), "buffering node has no successor after forwarding message"); - serial_fn_state0 = 0; - g.wait_for_all(); - REMARK(" remove_edge"); - tbb::flow::remove_edge(bnode, fnode); - ASSERT(bnode.my_successors.empty(), "buffering node has a successor after remove_edge"); - tbb::flow::join_node<tbb::flow::tuple<int,int>,tbb::flow::reserving> jnode(g); - tbb::flow::make_edge(bnode, tbb::flow::input_port<0>(jnode)); // will spawn a task - g.wait_for_all(); - ASSERT(!bnode.my_successors.empty(), "buffering node has no successor after attaching to join"); - REMARK(" reverse"); - bnode.try_put(3); // the edge should reverse - g.wait_for_all(); - ASSERT(bnode.my_successors.empty(), "buffering node has a successor after reserving"); - REMARK(" reset()"); - g.wait_for_all(); - g.reset(); // should be in forward direction again - ASSERT(!bnode.my_successors.empty(), "buffering node has no successor after reset()"); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - REMARK(" remove_edge"); - g.reset(tbb::flow::rf_extract); // should be in forward direction again - ASSERT(bnode.my_successors.empty(), "buffering node has a successor after reset(rf_extract)"); - ASSERT(fnode.my_predecessors.empty(), "buffering node reversed after reset(rf_extract)"); -#endif - REMARK(" done\n"); - g.wait_for_all(); -} - -struct snode_body { - int max_cnt; - int my_cnt; - snode_body( const int &in) : max_cnt(in) { my_cnt = 0; } - bool operator()(int &out) { - if(max_cnt <= my_cnt++) return false; - out = my_cnt; - return true; - } -}; - -void -TestSourceNode() { - tbb::flow::graph g; - tbb::flow::source_node<int> sn(g, snode_body(4), false); - REMARK("Testing source_node:"); - tbb::flow::queue_node<int> qin(g); - tbb::flow::join_node<tbb::flow::tuple<int,int>, tbb::flow::reserving> jn(g); - tbb::flow::queue_node<tbb::flow::tuple<int,int> > qout(g); - - REMARK(" make_edges"); - tbb::flow::make_edge(sn, tbb::flow::input_port<0>(jn)); - tbb::flow::make_edge(qin, tbb::flow::input_port<1>(jn)); - tbb::flow::make_edge(jn,qout); - ASSERT(!sn.my_successors.empty(), "source node has no successor after make_edge"); - g.wait_for_all(); - g.reset(); - ASSERT(!sn.my_successors.empty(), "source node has no successor after reset"); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - g.wait_for_all(); - g.reset(tbb::flow::rf_extract); - ASSERT(sn.my_successors.empty(), "source node has successor after reset(rf_extract)"); - tbb::flow::make_edge(sn, tbb::flow::input_port<0>(jn)); - tbb::flow::make_edge(qin, tbb::flow::input_port<1>(jn)); - tbb::flow::make_edge(jn,qout); - g.wait_for_all(); -#endif - REMARK(" activate"); - sn.activate(); // will forward to the fnode - REMARK(" wait1"); - BACKOFF_WAIT( !sn.my_successors.empty(), "Timed out waiting for edge to reverse"); - ASSERT(sn.my_successors.empty(), "source node has no successor after forwarding message"); - - g.wait_for_all(); - g.reset(); - ASSERT(!sn.my_successors.empty(), "source_node has no successors after reset"); - ASSERT(tbb::flow::input_port<0>(jn).my_predecessors.empty(), "successor if source_node has pred after reset."); - REMARK(" done\n"); -} - -int TestMain() { - - if(MinThread < 3) MinThread = 3; - tbb::task_scheduler_init init(MinThread); // tests presume at least three threads - - TestBufferingNode< tbb::flow::buffer_node<int> >("buffer_node"); - TestBufferingNode< tbb::flow::priority_queue_node<int> >("priority_queue_node"); - TestBufferingNode< tbb::flow::queue_node<int> >("queue_node"); - TestSequencerNode(); - - TestMultifunctionNode<tbb::flow::rejecting>(); - TestMultifunctionNode<tbb::flow::queueing>(); - TestSourceNode(); - TestContinueNode(); - TestFunctionNode(); - - TestJoinNode(); - - TestLimiterNode(); - TestIndexerNode(); - TestSplitNode(); - TestScalarNode<tbb::flow::broadcast_node<int> >("broadcast_node"); - TestScalarNode<tbb::flow::overwrite_node<int> >("overwrite_node"); - TestScalarNode<tbb::flow::write_once_node<int> >("write_once_node"); - - return Harness::Done; -} - diff --git a/src/tbb/src/test/test_fp.cpp b/src/tbb/src/test/test_fp.cpp deleted file mode 100644 index 9c16bb7af..000000000 --- a/src/tbb/src/test/test_fp.cpp +++ /dev/null @@ -1,385 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -/** This test checks the automatic propagation of master thread FPU settings - into the worker threads. **/ - -#include "harness_fp.h" -#include "harness.h" -#define private public -#include "tbb/task.h" -#undef private -#include "tbb/parallel_for.h" -#include "tbb/task_scheduler_init.h" - -const int N = 500000; - -#if ( __TBB_x86_32 || __TBB_x86_64 ) && __TBB_CPU_CTL_ENV_PRESENT && !defined(__TBB_WIN32_USE_CL_BUILTINS) -#include "harness_barrier.h" - -class CheckNoSseStatusPropagationBody : public NoAssign { - Harness::SpinBarrier &barrier; -public: - CheckNoSseStatusPropagationBody( Harness::SpinBarrier &_barrier ) : barrier(_barrier) {} - void operator()( const tbb::blocked_range<int>& ) const { - barrier.wait(); - tbb::internal::cpu_ctl_env ctl; - ctl.get_env(); - ASSERT( (ctl.mxcsr & SSE_STATUS_MASK) == 0, "FPU control status bits have been propagated." ); - } -}; - -void CheckNoSseStatusPropagation() { - tbb::internal::cpu_ctl_env ctl; - ctl.get_env(); - ctl.mxcsr |= SSE_STATUS_MASK; - ctl.set_env(); - const int num_threads = tbb::task_scheduler_init::default_num_threads(); - Harness::SpinBarrier barrier(num_threads); - tbb::task_scheduler_init init(num_threads); - tbb::parallel_for( tbb::blocked_range<int>(0, num_threads), CheckNoSseStatusPropagationBody(barrier) ); - ctl.mxcsr &= ~SSE_STATUS_MASK; - ctl.set_env(); -} -#else /* Other archs */ -void CheckNoSseStatusPropagation() {} -#endif /* Other archs */ - -class RoundingModeCheckBody { - int m_mode; - int m_sseMode; -public: - void operator() ( int /*iter*/ ) const { - ASSERT( GetRoundingMode() == m_mode, "FPU control state has not been propagated." ); - ASSERT( GetSseMode() == m_sseMode, "SSE control state has not been propagated." ); - } - - RoundingModeCheckBody ( int mode, int sseMode ) : m_mode(mode), m_sseMode(sseMode) {} -}; - -void TestArenaFpuEnvPropagation( int id ) { - // TBB scheduler instance in a master thread captures the FPU control state - // at the moment of its initialization and passes it to the workers toiling - // on its behalf. - for( int k = 0; k < NumSseModes; ++k ) { - int sse_mode = SseModes[(k + id) % NumSseModes]; - SetSseMode( sse_mode ); - for( int i = 0; i < NumRoundingModes; ++i ) { - int mode = RoundingModes[(i + id) % NumRoundingModes]; - SetRoundingMode( mode ); - // New mode must be set before TBB scheduler is initialized - tbb::task_scheduler_init init; - tbb::parallel_for( 0, N, 1, RoundingModeCheckBody(mode, sse_mode) ); - ASSERT( GetRoundingMode() == mode, NULL ); - } - } -} - -#if __TBB_FP_CONTEXT -void TestArenaFpuEnvPersistence( int id ) { - // Since the following loop uses auto-initialization, the scheduler instance - // implicitly created by the first parallel_for invocation will persist - // until the thread ends, and thus workers will use the mode set by the - // first iteration. - int captured_mode = RoundingModes[id % NumRoundingModes]; - int captured_sse_mode = SseModes[id % NumSseModes]; - for( int k = 0; k < NumSseModes; ++k ) { - int sse_mode = SseModes[(k + id) % NumSseModes]; - SetSseMode( sse_mode ); - for( int i = 0; i < NumRoundingModes; ++i ) { - int mode = RoundingModes[(i + id) % NumRoundingModes]; - SetRoundingMode( mode ); - tbb::parallel_for( 0, N, 1, RoundingModeCheckBody(captured_mode, captured_sse_mode) ); - ASSERT( GetRoundingMode() == mode, NULL ); - } - } -} -#endif - -class LauncherBody { -public: - void operator() ( int id ) const { - TestArenaFpuEnvPropagation( id ); -#if __TBB_FP_CONTEXT - TestArenaFpuEnvPersistence( id ); -#endif - } -}; - -void TestFpuEnvPropagation () { - const int p = tbb::task_scheduler_init::default_num_threads(); - // The test should be run in an oversubscription mode. So create 4*p threads but - // limit the oversubscription for big machines (p>32) with 4*32+(p-32) threads. - const int num_threads = p + (NumRoundingModes-1)*min(p,32); - NativeParallelFor ( num_threads, LauncherBody() ); -} - -void TestCpuCtlEnvApi () { - for( int k = 0; k < NumSseModes; ++k ) { - SetSseMode( SseModes[k] ); - for( int i = 0; i < NumRoundingModes; ++i ) { - SetRoundingMode( RoundingModes[i] ); - ASSERT( GetRoundingMode() == RoundingModes[i], NULL ); - ASSERT( GetSseMode() == SseModes[k], NULL ); - } - } -} - -#if __TBB_FP_CONTEXT -const int numModes = NumRoundingModes*NumSseModes; -const int numArenas = 4; -tbb::task_group_context *contexts[numModes]; -// +1 for a default context -int roundingModes[numModes+numArenas]; -int sseModes[numModes+numArenas]; - -class TestContextFpuEnvBody { - int arenaNum; - int mode; - int depth; -public: - TestContextFpuEnvBody( int _arenaNum, int _mode, int _depth = 0 ) : arenaNum(_arenaNum), mode(_mode), depth(_depth) {} - void operator()( const tbb::blocked_range<int> &r ) const; -}; - -inline void SetMode( int mode ) { - SetRoundingMode( roundingModes[mode] ); - SetSseMode( sseModes[mode] ); -} - -inline void AssertMode( int mode ) { - ASSERT( GetRoundingMode() == roundingModes[mode], "FPU control state has not been set correctly." ); - ASSERT( GetSseMode() == sseModes[mode], "SSE control state has not been set correctly." ); -} - -inline int SetNextMode( int mode, int step ) { - const int nextMode = (mode+step)%numModes; - SetMode( nextMode ); - return nextMode; -} - -class TestContextFpuEnvTask : public tbb::task { - int arenaNum; - int mode; - int depth; -#if __TBB_CPU_CTL_ENV_PRESENT - static const int MAX_DEPTH = 3; -#else - static const int MAX_DEPTH = 4; -#endif -public: - TestContextFpuEnvTask( int _arenaNum, int _mode, int _depth = 0 ) : arenaNum(_arenaNum), mode(_mode), depth(_depth) {} - tbb::task* execute() { - AssertMode( mode ); - if ( depth < MAX_DEPTH ) { - // Test default context. - const int newMode1 = SetNextMode( mode, depth+1 ); - tbb::parallel_for( tbb::blocked_range<int>(0, numModes+1), TestContextFpuEnvBody( arenaNum, mode, depth+1 ) ); - AssertMode( newMode1 ); - - // Test user default context. - const int newMode2 = SetNextMode( newMode1, depth+1 ); - tbb::task_group_context ctx1; - const int newMode3 = SetNextMode( newMode2, depth+1 ); - tbb::parallel_for( tbb::blocked_range<int>(0, numModes+1), TestContextFpuEnvBody( arenaNum, mode, depth+1 ), ctx1 ); - AssertMode( newMode3 ); - - // Test user context which captured FPU control settings. - const int newMode4 = SetNextMode( newMode3, depth+1 ); - // Capture newMode4 - ctx1.capture_fp_settings(); - const int newMode5 = SetNextMode( newMode4, depth+1 ); - tbb::parallel_for( tbb::blocked_range<int>(0, numModes+1), TestContextFpuEnvBody( arenaNum, newMode4, depth+1 ), ctx1 ); - AssertMode( newMode5 ); - - // And again test user context which captured FPU control settings to check multiple captures. - const int newMode6 = SetNextMode( newMode5, depth+1 ); - // Capture newMode6 - ctx1.capture_fp_settings(); - const int newMode7 = SetNextMode( newMode6, depth+1 ); - tbb::parallel_for( tbb::blocked_range<int>(0, numModes+1), TestContextFpuEnvBody( arenaNum, newMode6, depth+1 ), ctx1 ); - AssertMode( newMode7 ); - - // Test an isolated context. The isolated context should use default FPU control settings. - const int newMode8 = SetNextMode( newMode7, depth+1 ); - tbb::task_group_context ctx2( tbb::task_group_context::isolated ); - const int newMode9 = SetNextMode( newMode8, depth+1 ); - tbb::parallel_for( tbb::blocked_range<int>(0, numModes+1), TestContextFpuEnvBody( arenaNum, numModes+arenaNum, depth+1 ), ctx2 ); - AssertMode( newMode9 ); - - // The binding should not owerrite captured FPU control settings. - const int newMode10 = SetNextMode( newMode9, depth+1 ); - tbb::task_group_context ctx3; - ctx3.capture_fp_settings(); - const int newMode11 = SetNextMode( newMode10, depth+1 ); - tbb::parallel_for( tbb::blocked_range<int>(0, numModes+1), TestContextFpuEnvBody( arenaNum, newMode10, depth+1 ), ctx3 ); - AssertMode( newMode11 ); - - // Restore initial mode since user code in tbb::task::execute should not change FPU settings. - SetMode( mode ); - } - - return NULL; - } -}; - -void TestContextFpuEnvBody::operator()( const tbb::blocked_range<int> &r ) const { - AssertMode( mode ); - - const int newMode = SetNextMode( mode, depth+2 ); - - int end = r.end(); - if ( end-1 == numModes ) { - // For a default context our mode should be inherited. - tbb::task::spawn_root_and_wait( - *new( tbb::task::allocate_root() ) TestContextFpuEnvTask( arenaNum, mode, depth ) ); - AssertMode( newMode ); - end--; - } - for ( int i=r.begin(); i<end; ++i ) { - tbb::task::spawn_root_and_wait( - *new( tbb::task::allocate_root(*contexts[i]) ) TestContextFpuEnvTask( arenaNum, i, depth ) ); - AssertMode( newMode ); - } - - // Restore initial mode since user code in tbb::task::execute should not change FPU settings. - SetMode( mode ); -} - -class TestContextFpuEnvNativeLoopBody { -public: - void operator() ( int arenaNum ) const { - SetMode(numModes+arenaNum); - tbb::task_scheduler_init init; - tbb::task::spawn_root_and_wait( *new (tbb::task::allocate_root() ) TestContextFpuEnvTask( arenaNum, numModes+arenaNum ) ); - } -}; - -#if TBB_USE_EXCEPTIONS -const int NUM_ITERS = 1000; -class TestContextFpuEnvEhBody { - int mode; - int eh_iter; - int depth; -public: - TestContextFpuEnvEhBody( int _mode, int _eh_iter, int _depth = 0 ) : mode(_mode), eh_iter(_eh_iter), depth(_depth) {} - void operator()( const tbb::blocked_range<int> &r ) const { - AssertMode( mode ); - if ( depth < 1 ) { - const int newMode1 = SetNextMode( mode, 1 ); - tbb::task_group_context ctx; - ctx.capture_fp_settings(); - const int newMode2 = SetNextMode( newMode1, 1 ); - try { - tbb::parallel_for( tbb::blocked_range<int>(0, NUM_ITERS), TestContextFpuEnvEhBody(newMode1,rand()%NUM_ITERS,1), tbb::simple_partitioner(), ctx ); - } catch (...) { - AssertMode( newMode2 ); - if ( r.begin() == eh_iter ) throw; - } - AssertMode( newMode2 ); - SetMode( mode ); - } else if ( r.begin() == eh_iter ) throw 0; - } -}; - -class TestContextFpuEnvEhNativeLoopBody { -public: - void operator() ( int arenaNum ) const { - SetMode( arenaNum%numModes ); - try { - tbb::parallel_for( tbb::blocked_range<int>(0, NUM_ITERS), TestContextFpuEnvEhBody((arenaNum+1)%numModes,rand()%NUM_ITERS), - tbb::simple_partitioner(), *contexts[(arenaNum+1)%numModes] ); - ASSERT( false, "parallel_for has not thrown an exception." ); - } catch (...) { - AssertMode( arenaNum%numModes ); - } - } -}; -#endif /* TBB_USE_EXCEPTIONS */ - -void TestContextFpuEnv() { - // Prepare contexts' fp modes. - for ( int i = 0, modeNum = 0; i < NumRoundingModes; ++i ) { - const int roundingMode = RoundingModes[i]; - SetRoundingMode( roundingMode ); - for( int j = 0; j < NumSseModes; ++j, ++modeNum ) { - const int sseMode = SseModes[j]; - SetSseMode( sseMode ); - - contexts[modeNum] = new tbb::task_group_context( tbb::task_group_context::isolated, - tbb::task_group_context::default_traits | tbb::task_group_context::fp_settings ); - roundingModes[modeNum] = roundingMode; - sseModes[modeNum] = sseMode; - } - } - // Prepare arenas' fp modes. - for ( int arenaNum = 0; arenaNum < numArenas; ++arenaNum ) { - roundingModes[numModes+arenaNum] = roundingModes[arenaNum%numModes]; - sseModes[numModes+arenaNum] = sseModes[arenaNum%numModes]; - } - NativeParallelFor( numArenas, TestContextFpuEnvNativeLoopBody() ); -#if TBB_USE_EXCEPTIONS - NativeParallelFor( numArenas, TestContextFpuEnvEhNativeLoopBody() ); -#endif - for ( int modeNum = 0; modeNum < numModes; ++modeNum ) - delete contexts[modeNum]; -} - -tbb::task_group_context glbIsolatedCtx( tbb::task_group_context::isolated ); -int glbIsolatedCtxMode = -1; - -struct TestGlobalIsolatedContextTask : public tbb::task { - tbb::task* execute() { - AssertFPMode( glbIsolatedCtxMode ); - return NULL; - } -}; - -#include "tbb/mutex.h" - -struct TestGlobalIsolatedContextNativeLoopBody { - void operator()( int threadId ) const { - FPModeContext fpGuard( threadId ); - static tbb::mutex rootAllocMutex; - rootAllocMutex.lock(); - if ( glbIsolatedCtxMode == -1 ) - glbIsolatedCtxMode = threadId; - tbb::task &root = *new (tbb::task::allocate_root( glbIsolatedCtx )) TestGlobalIsolatedContextTask(); - rootAllocMutex.unlock(); - tbb::task::spawn_root_and_wait( root ); - } -}; - -void TestGlobalIsolatedContext() { - ASSERT( numArenas > 1, NULL ); - NativeParallelFor( numArenas, TestGlobalIsolatedContextNativeLoopBody() ); -} -#endif /* __TBB_FP_CONTEXT */ - -int TestMain () { - TestCpuCtlEnvApi(); - TestFpuEnvPropagation(); - CheckNoSseStatusPropagation(); -#if __TBB_FP_CONTEXT - TestContextFpuEnv(); - TestGlobalIsolatedContext(); -#endif - return Harness::Done; -} diff --git a/src/tbb/src/test/test_function_node.cpp b/src/tbb/src/test/test_function_node.cpp deleted file mode 100644 index fed3a4857..000000000 --- a/src/tbb/src/test/test_function_node.cpp +++ /dev/null @@ -1,578 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness_graph.h" - -#include "tbb/task_scheduler_init.h" -#include "tbb/spin_rw_mutex.h" - -#define N 100 -#define MAX_NODES 4 - -//! Performs test on function nodes with limited concurrency and buffering -/** Theses tests check: - 1) that the number of executing copies never exceed the concurrency limit - 2) that the node never rejects - 3) that no items are lost - and 4) all of this happens even if there are multiple predecessors and successors -*/ - -template< typename InputType > -struct parallel_put_until_limit : private NoAssign { - - harness_counting_sender<InputType> *my_senders; - - parallel_put_until_limit( harness_counting_sender<InputType> *senders ) : my_senders(senders) {} - - void operator()( int i ) const { - if ( my_senders ) { - my_senders[i].try_put_until_limit(); - } - } - -}; - -template<typename IO> -struct pass_through { - IO operator()(const IO& i) { return i; } -}; - -template< typename InputType, typename OutputType, typename Body > -void buffered_levels( size_t concurrency, Body body ) { - - // Do for lc = 1 to concurrency level - for ( size_t lc = 1; lc <= concurrency; ++lc ) { - tbb::flow::graph g; - - // Set the execute_counter back to zero in the harness - harness_graph_executor<InputType, OutputType>::execute_count = 0; - // Set the number of current executors to zero. - harness_graph_executor<InputType, OutputType>::current_executors = 0; - // Set the max allowed executors to lc. There is a check in the functor to make sure this is never exceeded. - harness_graph_executor<InputType, OutputType>::max_executors = lc; - - // Create the function_node with the appropriate concurrency level, and use default buffering - tbb::flow::function_node< InputType, OutputType > exe_node( g, lc, body ); - tbb::flow::function_node<InputType, InputType> pass_thru( g, tbb::flow::unlimited, pass_through<InputType>()); - - // Create a vector of identical exe_nodes and pass_thrus - std::vector< tbb::flow::function_node< InputType, OutputType > > exe_vec(2, exe_node); - std::vector< tbb::flow::function_node< InputType, InputType > > pass_thru_vec(2, pass_thru); - // Attach each pass_thru to its corresponding exe_node - for (size_t node_idx=0; node_idx<exe_vec.size(); ++node_idx) { - tbb::flow::make_edge(pass_thru_vec[node_idx], exe_vec[node_idx]); - } - - // TODO: why the test is executed serially for the node pairs, not concurrently? - for (size_t node_idx=0; node_idx<exe_vec.size(); ++node_idx) { - // For num_receivers = 1 to MAX_NODES - for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) { - // Create num_receivers counting receivers and connect the exe_vec[node_idx] to them. - harness_mapped_receiver<OutputType> *receivers = new harness_mapped_receiver<OutputType>[num_receivers]; - for (size_t r = 0; r < num_receivers; ++r ) { - tbb::flow::make_edge( exe_vec[node_idx], receivers[r] ); - } - - // Do the test with varying numbers of senders - harness_counting_sender<InputType> *senders = NULL; - for (size_t num_senders = 1; num_senders <= MAX_NODES; ++num_senders ) { - // Create num_senders senders, set there message limit each to N, and connect them to pass_thru_vec[node_idx] - senders = new harness_counting_sender<InputType>[num_senders]; - for (size_t s = 0; s < num_senders; ++s ) { - senders[s].my_limit = N; - senders[s].register_successor(pass_thru_vec[node_idx] ); - } - - // Initialize the receivers so they know how many senders and messages to check for - for (size_t r = 0; r < num_receivers; ++r ) { - receivers[r].initialize_map( N, num_senders ); - } - - // Do the test - NativeParallelFor( (int)num_senders, parallel_put_until_limit<InputType>(senders) ); - g.wait_for_all(); - - // confirm that each sender was requested from N times - for (size_t s = 0; s < num_senders; ++s ) { - size_t n = senders[s].my_received; - ASSERT( n == N, NULL ); - ASSERT( senders[s].my_receiver == &pass_thru_vec[node_idx], NULL ); - } - // validate the receivers - for (size_t r = 0; r < num_receivers; ++r ) { - receivers[r].validate(); - } - delete [] senders; - } - for (size_t r = 0; r < num_receivers; ++r ) { - tbb::flow::remove_edge( exe_vec[node_idx], receivers[r] ); - } - ASSERT( exe_vec[node_idx].try_put( InputType() ) == true, NULL ); - g.wait_for_all(); - for (size_t r = 0; r < num_receivers; ++r ) { - // since it's detached, nothing should have changed - receivers[r].validate(); - } - delete [] receivers; - } // for num_receivers - } // for node_idx - } // for concurrency level lc -} - -const size_t Offset = 123; -tbb::atomic<size_t> global_execute_count; - -struct inc_functor { - - tbb::atomic<size_t> local_execute_count; - inc_functor( ) { local_execute_count = 0; } - inc_functor( const inc_functor &f ) { local_execute_count = f.local_execute_count; } - void operator=( const inc_functor &f ) { local_execute_count = f.local_execute_count; } - - int operator()( int i ) { - ++global_execute_count; - ++local_execute_count; - return i; - } - -}; - -template< typename InputType, typename OutputType > -void buffered_levels_with_copy( size_t concurrency ) { - - // Do for lc = 1 to concurrency level - for ( size_t lc = 1; lc <= concurrency; ++lc ) { - tbb::flow::graph g; - - inc_functor cf; - cf.local_execute_count = Offset; - global_execute_count = Offset; - - tbb::flow::function_node< InputType, OutputType > exe_node( g, lc, cf ); - - for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) { - harness_mapped_receiver<OutputType> *receivers = new harness_mapped_receiver<OutputType>[num_receivers]; - for (size_t r = 0; r < num_receivers; ++r ) { - tbb::flow::make_edge( exe_node, receivers[r] ); - } - - harness_counting_sender<InputType> *senders = NULL; - for (size_t num_senders = 1; num_senders <= MAX_NODES; ++num_senders ) { - senders = new harness_counting_sender<InputType>[num_senders]; - for (size_t s = 0; s < num_senders; ++s ) { - senders[s].my_limit = N; - tbb::flow::make_edge( senders[s], exe_node ); - } - - for (size_t r = 0; r < num_receivers; ++r ) { - receivers[r].initialize_map( N, num_senders ); - } - - NativeParallelFor( (int)num_senders, parallel_put_until_limit<InputType>(senders) ); - g.wait_for_all(); - - for (size_t s = 0; s < num_senders; ++s ) { - size_t n = senders[s].my_received; - ASSERT( n == N, NULL ); - ASSERT( senders[s].my_receiver == &exe_node, NULL ); - } - for (size_t r = 0; r < num_receivers; ++r ) { - receivers[r].validate(); - } - delete [] senders; - } - for (size_t r = 0; r < num_receivers; ++r ) { - tbb::flow::remove_edge( exe_node, receivers[r] ); - } - ASSERT( exe_node.try_put( InputType() ) == true, NULL ); - g.wait_for_all(); - for (size_t r = 0; r < num_receivers; ++r ) { - receivers[r].validate(); - } - delete [] receivers; - } - - // validate that the local body matches the global execute_count and both are correct - inc_functor body_copy = tbb::flow::copy_body<inc_functor>( exe_node ); - const size_t expected_count = N/2 * MAX_NODES * MAX_NODES * ( MAX_NODES + 1 ) + MAX_NODES + Offset; - size_t global_count = global_execute_count; - size_t inc_count = body_copy.local_execute_count; - ASSERT( global_count == expected_count && global_count == inc_count, NULL ); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - g.reset(tbb::flow::rf_reset_bodies); - body_copy = tbb::flow::copy_body<inc_functor>( exe_node ); - inc_count = body_copy.local_execute_count; - ASSERT( Offset == inc_count, "reset(rf_reset_bodies) did not reset functor" ); -#endif - } -} - -template< typename InputType, typename OutputType > -void run_buffered_levels( int c ) { - #if __TBB_LAMBDAS_PRESENT - buffered_levels<InputType,OutputType>( c, []( InputType i ) -> OutputType { return harness_graph_executor<InputType, OutputType>::func(i); } ); - #endif - buffered_levels<InputType,OutputType>( c, &harness_graph_executor<InputType, OutputType>::func ); - buffered_levels<InputType,OutputType>( c, typename harness_graph_executor<InputType, OutputType>::functor() ); - buffered_levels_with_copy<InputType,OutputType>( c ); -} - - -//! Performs test on executable nodes with limited concurrency -/** Theses tests check: - 1) that the nodes will accepts puts up to the concurrency limit, - 2) the nodes do not exceed the concurrency limit even when run with more threads (this is checked in the harness_graph_executor), - 3) the nodes will receive puts from multiple successors simultaneously, - and 4) the nodes will send to multiple predecessors. - There is no checking of the contents of the messages for corruption. -*/ - -template< typename InputType, typename OutputType, typename Body > -void concurrency_levels( size_t concurrency, Body body ) { - - for ( size_t lc = 1; lc <= concurrency; ++lc ) { - tbb::flow::graph g; - - // Set the execute_counter back to zero in the harness - harness_graph_executor<InputType, OutputType>::execute_count = 0; - // Set the number of current executors to zero. - harness_graph_executor<InputType, OutputType>::current_executors = 0; - // Set the max allowed executors to lc. There is a check in the functor to make sure this is never exceeded. - harness_graph_executor<InputType, OutputType>::max_executors = lc; - - typedef tbb::flow::function_node< InputType, OutputType, tbb::flow::rejecting > fnode_type; - fnode_type exe_node( g, lc, body ); - - for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) { - - harness_counting_receiver<OutputType> *receivers = new harness_counting_receiver<OutputType>[num_receivers]; - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - ASSERT(exe_node.successor_count() == 0, NULL); - ASSERT(exe_node.predecessor_count() == 0, NULL); -#endif - - for (size_t r = 0; r < num_receivers; ++r ) { - tbb::flow::make_edge( exe_node, receivers[r] ); - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - ASSERT(exe_node.successor_count() == num_receivers, NULL); - typename fnode_type::successor_vector_type my_succs; - exe_node.copy_successors(my_succs); - ASSERT(my_succs.size() == num_receivers, NULL); - typename fnode_type::predecessor_vector_type my_preds; - exe_node.copy_predecessors(my_preds); - ASSERT(my_preds.size() == 0, NULL); -#endif - - harness_counting_sender<InputType> *senders = NULL; - - for (size_t num_senders = 1; num_senders <= MAX_NODES; ++num_senders ) { - senders = new harness_counting_sender<InputType>[num_senders]; - { - // Exclusively lock m to prevent exe_node from finishing - tbb::spin_rw_mutex::scoped_lock l( harness_graph_executor<InputType, OutputType>::template mutex_holder<tbb::spin_rw_mutex>::mutex ); - - // put to lc level, it will accept and then block at m - for ( size_t c = 0 ; c < lc ; ++c ) { - ASSERT( exe_node.try_put( InputType() ) == true, NULL ); - } - // it only accepts to lc level - ASSERT( exe_node.try_put( InputType() ) == false, NULL ); - - for (size_t s = 0; s < num_senders; ++s ) { - // register a sender - senders[s].my_limit = N; - exe_node.register_predecessor( senders[s] ); - } - - } // release lock at end of scope, setting the exe node free to continue - // wait for graph to settle down - g.wait_for_all(); - - // confirm that each sender was requested from N times - for (size_t s = 0; s < num_senders; ++s ) { - size_t n = senders[s].my_received; - ASSERT( n == N, NULL ); - ASSERT( senders[s].my_receiver == &exe_node, NULL ); - } - // confirm that each receivers got N * num_senders + the initial lc puts - for (size_t r = 0; r < num_receivers; ++r ) { - size_t n = receivers[r].my_count; - ASSERT( n == num_senders*N+lc, NULL ); - receivers[r].my_count = 0; - } - delete [] senders; - } - for (size_t r = 0; r < num_receivers; ++r ) { - tbb::flow::remove_edge( exe_node, receivers[r] ); - } - ASSERT( exe_node.try_put( InputType() ) == true, NULL ); - g.wait_for_all(); - for (size_t r = 0; r < num_receivers; ++r ) { - ASSERT( int(receivers[r].my_count) == 0, NULL ); - } - delete [] receivers; - } - - } -} - -template< typename InputType, typename OutputType > -void run_concurrency_levels( int c ) { - #if __TBB_LAMBDAS_PRESENT - concurrency_levels<InputType,OutputType>( c, []( InputType i ) -> OutputType { return harness_graph_executor<InputType, OutputType>::template tfunc<tbb::spin_rw_mutex>(i); } ); - #endif - concurrency_levels<InputType,OutputType>( c, &harness_graph_executor<InputType, OutputType>::template tfunc<tbb::spin_rw_mutex> ); - concurrency_levels<InputType,OutputType>( c, typename harness_graph_executor<InputType, OutputType>::template tfunctor<tbb::spin_rw_mutex>() ); -} - - -struct empty_no_assign { - empty_no_assign() {} - empty_no_assign( int ) {} - operator int() { return 0; } -}; - -template< typename InputType > -struct parallel_puts : private NoAssign { - - tbb::flow::receiver< InputType > * const my_exe_node; - - parallel_puts( tbb::flow::receiver< InputType > &exe_node ) : my_exe_node(&exe_node) {} - - void operator()( int ) const { - for ( int i = 0; i < N; ++i ) { - // the nodes will accept all puts - ASSERT( my_exe_node->try_put( InputType() ) == true, NULL ); - } - } - -}; - -//! Performs test on executable nodes with unlimited concurrency -/** Theses tests check: - 1) that the nodes will accept all puts - 2) the nodes will receive puts from multiple predecessors simultaneously, - and 3) the nodes will send to multiple successors. - There is no checking of the contents of the messages for corruption. -*/ - -template< typename InputType, typename OutputType, typename Body > -void unlimited_concurrency( Body body ) { - - for (int p = 1; p < 2*MaxThread; ++p) { - tbb::flow::graph g; - tbb::flow::function_node< InputType, OutputType, tbb::flow::rejecting > exe_node( g, tbb::flow::unlimited, body ); - - for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) { - - harness_counting_receiver<OutputType> *receivers = new harness_counting_receiver<OutputType>[num_receivers]; - harness_graph_executor<InputType, OutputType>::execute_count = 0; - - for (size_t r = 0; r < num_receivers; ++r ) { - tbb::flow::make_edge( exe_node, receivers[r] ); - } - - NativeParallelFor( p, parallel_puts<InputType>(exe_node) ); - g.wait_for_all(); - - // 2) the nodes will receive puts from multiple predecessors simultaneously, - size_t ec = harness_graph_executor<InputType, OutputType>::execute_count; - ASSERT( (int)ec == p*N, NULL ); - for (size_t r = 0; r < num_receivers; ++r ) { - size_t c = receivers[r].my_count; - // 3) the nodes will send to multiple successors. - ASSERT( (int)c == p*N, NULL ); - } - } - } -} - -template< typename InputType, typename OutputType > -void run_unlimited_concurrency() { - harness_graph_executor<InputType, OutputType>::max_executors = 0; - #if __TBB_LAMBDAS_PRESENT - unlimited_concurrency<InputType,OutputType>( []( InputType i ) -> OutputType { return harness_graph_executor<InputType, OutputType>::func(i); } ); - #endif - unlimited_concurrency<InputType,OutputType>( &harness_graph_executor<InputType, OutputType>::func ); - unlimited_concurrency<InputType,OutputType>( typename harness_graph_executor<InputType, OutputType>::functor() ); -} - -struct continue_msg_to_int { - int my_int; - continue_msg_to_int(int x) : my_int(x) {} - int operator()(tbb::flow::continue_msg) { return my_int; } -}; - -void test_function_node_with_continue_msg_as_input() { - // If this function terminates, then this test is successful - tbb::flow::graph g; - - tbb::flow::broadcast_node<tbb::flow::continue_msg> Start(g); - - tbb::flow::function_node<tbb::flow::continue_msg, int, tbb::flow::rejecting> FN1( g, tbb::flow::serial, continue_msg_to_int(42)); - tbb::flow::function_node<tbb::flow::continue_msg, int, tbb::flow::rejecting> FN2( g, tbb::flow::serial, continue_msg_to_int(43)); - - tbb::flow::make_edge( Start, FN1 ); - tbb::flow::make_edge( Start, FN2 ); - - Start.try_put( tbb::flow::continue_msg() ); - g.wait_for_all(); -} - -//! Tests limited concurrency cases for nodes that accept data messages -void test_concurrency(int num_threads) { - tbb::task_scheduler_init init(num_threads); - run_concurrency_levels<int,int>(num_threads); - run_concurrency_levels<int,tbb::flow::continue_msg>(num_threads); - run_buffered_levels<int, int>(num_threads); - run_unlimited_concurrency<int,int>(); - run_unlimited_concurrency<int,empty_no_assign>(); - run_unlimited_concurrency<empty_no_assign,int>(); - run_unlimited_concurrency<empty_no_assign,empty_no_assign>(); - run_unlimited_concurrency<int,tbb::flow::continue_msg>(); - run_unlimited_concurrency<empty_no_assign,tbb::flow::continue_msg>(); - test_function_node_with_continue_msg_as_input(); -} - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES -struct add_to_counter { - int* counter; - add_to_counter(int& var):counter(&var){} - int operator()(int i){*counter+=1; return i + 1;} -}; - -template<tbb::flow::graph_buffer_policy FTYPE> -void test_extract() { - int my_count = 0; - int cm; - tbb::flow::graph g; - tbb::flow::broadcast_node<int> b0(g); - tbb::flow::broadcast_node<int> b1(g); - tbb::flow::function_node<int, int, FTYPE> f0(g, tbb::flow::unlimited, add_to_counter(my_count)); - tbb::flow::queue_node<int> q0(g); - - tbb::flow::make_edge(b0, f0); - tbb::flow::make_edge(b1, f0); - tbb::flow::make_edge(f0, q0); - for( int i = 0; i < 2; ++i ) { - ASSERT(b0.predecessor_count() == 0 && b0.successor_count() == 1, "b0 has incorrect counts"); - ASSERT(b1.predecessor_count() == 0 && b1.successor_count() == 1, "b1 has incorrect counts"); - ASSERT(f0.predecessor_count() == 2 && f0.successor_count() == 1, "f0 has incorrect counts"); - ASSERT(q0.predecessor_count() == 1 && q0.successor_count() == 0, "q0 has incorrect counts"); - - /* b0 */ - /* \ */ - /* f0 - q0 */ - /* / */ - /* b1 */ - - b0.try_put(1); - g.wait_for_all(); - ASSERT(my_count == 1, "function_node didn't fire"); - ASSERT(q0.try_get(cm), "function_node didn't forward"); - b1.try_put(1); - g.wait_for_all(); - ASSERT(my_count == 2, "function_node didn't fire"); - ASSERT(q0.try_get(cm), "function_node didn't forward"); - - b0.extract(); - - /* b0 */ - /* */ - /* f0 - q0 */ - /* / */ - /* b1 */ - - ASSERT(b0.predecessor_count() == 0 && b0.successor_count() == 0, "b0 has incorrect counts"); - ASSERT(b1.predecessor_count() == 0 && b1.successor_count() == 1, "b1 has incorrect counts"); - ASSERT(f0.predecessor_count() == 1 && f0.successor_count() == 1, "f0 has incorrect counts"); - ASSERT(q0.predecessor_count() == 1 && q0.successor_count() == 0, "q0 has incorrect counts"); - b0.try_put(1); - b0.try_put(1); - g.wait_for_all(); - ASSERT(my_count == 2, "b0 messages being forwarded to function_node even though it is disconnected"); - b1.try_put(1); - g.wait_for_all(); - ASSERT(my_count == 3, "function_node didn't fire though it has only one predecessor"); - ASSERT(q0.try_get(cm), "function_node didn't forward second time"); - - f0.extract(); - - /* b0 */ - /* */ - /* f0 q0 */ - /* */ - /* b1 */ - - ASSERT(b0.predecessor_count() == 0 && b0.successor_count() == 0, "b0 has incorrect counts"); - ASSERT(b1.predecessor_count() == 0 && b1.successor_count() == 0, "b1 has incorrect counts"); - ASSERT(f0.predecessor_count() == 0 && f0.successor_count() == 0, "f0 has incorrect counts"); - ASSERT(q0.predecessor_count() == 0 && q0.successor_count() == 0, "q0 has incorrect counts"); - b0.try_put(1); - b0.try_put(1); - b1.try_put(1); - b1.try_put(1); - g.wait_for_all(); - ASSERT(my_count == 3, "function_node didn't fire though it has only one predecessor"); - ASSERT(!q0.try_get(cm), "function_node forwarded though it shouldn't"); - make_edge(b0, f0); - - /* b0 */ - /* \ */ - /* f0 q0 */ - /* */ - /* b1 */ - - ASSERT(b0.predecessor_count() == 0 && b0.successor_count() == 1, "b0 has incorrect counts"); - ASSERT(b1.predecessor_count() == 0 && b1.successor_count() == 0, "b1 has incorrect counts"); - ASSERT(f0.predecessor_count() == 1 && f0.successor_count() == 0, "f0 has incorrect counts"); - ASSERT(q0.predecessor_count() == 0 && q0.successor_count() == 0, "q0 has incorrect counts"); - - b0.try_put(int()); - g.wait_for_all(); - - ASSERT(my_count == 4, "function_node didn't fire though it has only one predecessor"); - ASSERT(!q0.try_get(cm), "function_node forwarded though it shouldn't"); - - tbb::flow::make_edge(b1, f0); - tbb::flow::make_edge(f0, q0); - my_count = 0; - } -} -#endif - -int TestMain() { - if( MinThread<1 ) { - REPORT("number of threads must be positive\n"); - exit(1); - } - for( int p=MinThread; p<=MaxThread; ++p ) { - test_concurrency(p); - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - test_extract<tbb::flow::rejecting>(); - test_extract<tbb::flow::queueing>(); -#endif - return Harness::Done; -} - diff --git a/src/tbb/src/test/test_halt.cpp b/src/tbb/src/test/test_halt.cpp deleted file mode 100644 index 173fcff8e..000000000 --- a/src/tbb/src/test/test_halt.cpp +++ /dev/null @@ -1,110 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness_defs.h" -#include <cstdio> -#include <cstdlib> -#include <cassert> -#include <utility> -#include "tbb/task.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/tick_count.h" -#include "tbb/parallel_for.h" -#include "tbb/blocked_range.h" -#include "tbb/mutex.h" -#include "tbb/spin_mutex.h" -#include "tbb/queuing_mutex.h" -#include "harness.h" - -using namespace std; -using namespace tbb; - -///////////////////// Parallel methods //////////////////////// - -// *** Serial shared by mutexes *** // -int SharedI = 1, SharedN; -template<typename M> -class SharedSerialFibBody: NoAssign { - M &mutex; -public: - SharedSerialFibBody( M &m ) : mutex( m ) {} - //! main loop - void operator()( const blocked_range<int>& /*range*/ ) const { - for(;;) { - typename M::scoped_lock lock( mutex ); - if(SharedI >= SharedN) break; - volatile double sum = 7.3; - sum *= 11.17; - ++SharedI; - } - } -}; - -//! Root function -template<class M> -void SharedSerialFib(int n) -{ - SharedI = 1; - SharedN = n; - M mutex; - parallel_for( blocked_range<int>(0,4,1), SharedSerialFibBody<M>( mutex ) ); -} - -/////////////////////////// Main //////////////////////////////////////////////////// - -double Tsum = 0; int Tnum = 0; - -typedef void (*MeasureFunc)(int); -//! Measure ticks count in loop [2..n] -void Measure(const char *name, MeasureFunc func, int n) -{ - tick_count t0; - tick_count::interval_t T; - REMARK("%s",name); - t0 = tick_count::now(); - for(int number = 2; number <= n; number++) - func(number); - T = tick_count::now() - t0; - double avg = Tnum? Tsum/Tnum : 1; - if (avg == 0.0) avg = 1; - if(avg * 100 < T.seconds()) { - REPORT("Warning: halting detected (%g sec, av: %g)\n", T.seconds(), avg); - ASSERT(avg * 1000 > T.seconds(), "Too long halting period"); - } else { - Tsum += T.seconds(); Tnum++; - } - REMARK("\t- in %f msec\n", T.seconds()*1000); -} - -int TestMain () { - MinThread = max(2, MinThread); - int NumbersCount = 100; - short recycle = 100; - do { - for(int threads = MinThread; threads <= MaxThread; threads++) { - task_scheduler_init scheduler_init(threads); - REMARK("Threads number is %d\t", threads); - Measure("Shared serial (wrapper mutex)\t", SharedSerialFib<mutex>, NumbersCount); - //sum = Measure("Shared serial (spin_mutex)", SharedSerialFib<tbb::spin_mutex>, NumbersCount); - //sum = Measure("Shared serial (queuing_mutex)", SharedSerialFib<tbb::queuing_mutex>, NumbersCount); - } - } while(--recycle); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_handle_perror.cpp b/src/tbb/src/test/test_handle_perror.cpp deleted file mode 100644 index 5fc91c119..000000000 --- a/src/tbb/src/test/test_handle_perror.cpp +++ /dev/null @@ -1,69 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Program for basic correctness of handle_perror, which is internal -// to the TBB shared library. - -#include <cerrno> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include <stdexcept> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "../tbb/tbb_misc.h" -#include "harness.h" - -#if TBB_USE_EXCEPTIONS - -static void TestHandlePerror() { - bool caught = false; - try { - tbb::internal::handle_perror( EAGAIN, "apple" ); - } catch( std::runtime_error& e ) { -#if TBB_USE_EXCEPTIONS - REMARK("caught runtime_exception('%s')\n",e.what()); - ASSERT( memcmp(e.what(),"apple: ",7)==0, NULL ); - ASSERT( strlen(strstr(e.what(), strerror(EAGAIN))), "bad error message?" ); -#endif /* TBB_USE_EXCEPTIONS */ - caught = true; - } - ASSERT( caught, NULL ); -} - -int TestMain () { - TestHandlePerror(); - return Harness::Done; -} - -#else /* !TBB_USE_EXCEPTIONS */ - -int TestMain () { - return Harness::Skipped; -} - -#endif /* TBB_USE_EXCEPTIONS */ diff --git a/src/tbb/src/test/test_hw_concurrency.cpp b/src/tbb/src/test/test_hw_concurrency.cpp deleted file mode 100644 index b82ee33ec..000000000 --- a/src/tbb/src/test/test_hw_concurrency.cpp +++ /dev/null @@ -1,56 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness_defs.h" - -#if __TBB_TEST_SKIP_AFFINITY -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#include "harness.h" -int TestMain() { - return Harness::Skipped; -} -#else /* affinity mask can be set and used by TBB */ - -#include "harness.h" -#include "harness_concurrency.h" - -#include "tbb/task_scheduler_init.h" -#include "tbb/tbb_thread.h" -#include "tbb/enumerable_thread_specific.h" - -// The declaration of a global ETS object is needed to check that -// it does not initialize the task scheduler, and in particular -// does not set the default thread number. TODO: add other objects -// that should not initialize the scheduler. -tbb::enumerable_thread_specific<std::size_t> ets; - -int TestMain () { - int maxProcs = Harness::GetMaxProcs(); - - if ( maxProcs < 2 ) - return Harness::Skipped; - - int availableProcs = maxProcs/2; - ASSERT( Harness::LimitNumberOfThreads( availableProcs ) == availableProcs, "LimitNumberOfThreads has not set the requested limitation." ); - ASSERT( tbb::task_scheduler_init::default_num_threads() == availableProcs, NULL ); - ASSERT( (int)tbb::tbb_thread::hardware_concurrency() == availableProcs, NULL ); - return Harness::Done; -} -#endif /* __TBB_TEST_SKIP_AFFINITY */ diff --git a/src/tbb/src/test/test_indexer_node.cpp b/src/tbb/src/test/test_indexer_node.cpp deleted file mode 100644 index 89cb4efc6..000000000 --- a/src/tbb/src/test/test_indexer_node.cpp +++ /dev/null @@ -1,880 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness.h" -#include "tbb/flow_graph.h" - -// -// Tests -// - - #if defined(_MSC_VER) && _MSC_VER < 1600 - #pragma warning (disable : 4503) //disabling the "decorated name length exceeded" warning for VS2008 and earlier -#endif - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES -template< typename T > -class test_indexer_extract { -protected: - typedef tbb::flow::indexer_node<T, T> my_node_t; - typedef tbb::flow::queue_node<T> in_node_t; - typedef tbb::flow::queue_node<typename my_node_t::output_type> out_node_t; - - tbb::flow::graph g; - in_node_t in0; - in_node_t in1; - in_node_t in2; - my_node_t middle; - out_node_t out0; - out_node_t out1; - in_node_t *ins[3]; - out_node_t *outs[2]; - typename in_node_t::successor_type *ms_p0_ptr; - typename in_node_t::successor_type *ms_p1_ptr; - typename out_node_t::predecessor_type *mp_ptr; - typename in_node_t::predecessor_vector_type in0_p_vec; - typename in_node_t::successor_vector_type in0_s_vec; - typename in_node_t::predecessor_vector_type in1_p_vec; - typename in_node_t::successor_vector_type in1_s_vec; - typename in_node_t::predecessor_vector_type in2_p_vec; - typename in_node_t::successor_vector_type in2_s_vec; - typename out_node_t::predecessor_vector_type out0_p_vec; - typename out_node_t::successor_vector_type out0_s_vec; - typename out_node_t::predecessor_vector_type out1_p_vec; - typename out_node_t::successor_vector_type out1_s_vec; - typename in_node_t::predecessor_vector_type mp0_vec; - typename in_node_t::predecessor_vector_type mp1_vec; - typename out_node_t::successor_vector_type ms_vec; - - virtual void set_up_vectors() { - in0_p_vec.clear(); - in0_s_vec.clear(); - in1_p_vec.clear(); - in1_s_vec.clear(); - in2_p_vec.clear(); - in2_s_vec.clear(); - out0_p_vec.clear(); - out0_s_vec.clear(); - out1_p_vec.clear(); - out1_s_vec.clear(); - mp0_vec.clear(); - mp1_vec.clear(); - ms_vec.clear(); - - in0.copy_predecessors(in0_p_vec); - in0.copy_successors(in0_s_vec); - in1.copy_predecessors(in1_p_vec); - in1.copy_successors(in1_s_vec); - in2.copy_predecessors(in2_p_vec); - in2.copy_successors(in2_s_vec); - tbb::flow::input_port<0>(middle).copy_predecessors(mp0_vec); - tbb::flow::input_port<1>(middle).copy_predecessors(mp1_vec); - middle.copy_successors(ms_vec); - out0.copy_predecessors(out0_p_vec); - out0.copy_successors(out0_s_vec); - out1.copy_predecessors(out1_p_vec); - out1.copy_successors(out1_s_vec); - } - - void check_output(int &r, typename my_node_t::output_type &v) { - T t = tbb::flow::cast_to<T>(v); - if ( t == 1 || t == 2 ) { - ASSERT( v.tag() == 0, "value came in on wrong port" ); - } else if ( t == 4 || t == 8 ) { - ASSERT( v.tag() == 1, "value came in on wrong port" ); - } else { - ASSERT( false, "incorrect value passed through indexer_node" ); - } - ASSERT( (r&t) == 0, "duplicate value passed through indexer_node" ); - r |= t; - } - - void make_and_validate_full_graph() { - /* in0 */ - /* \ */ - /* port0 out0 */ - /* / | / */ - /* in1 middle */ - /* | \ */ - /* in2 - port1 out1 */ - tbb::flow::make_edge( in0, tbb::flow::input_port<0>(middle) ); - tbb::flow::make_edge( in1, tbb::flow::input_port<0>(middle) ); - tbb::flow::make_edge( in2, tbb::flow::input_port<1>(middle) ); - tbb::flow::make_edge( middle, out0 ); - tbb::flow::make_edge( middle, out1 ); - - set_up_vectors(); - - ASSERT( in0.predecessor_count() == 0 && in0_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in0.successor_count() == 1 && in0_s_vec.size() == 1 && in0_s_vec[0] == ms_p0_ptr, "expected 1 successor" ); - ASSERT( in1.predecessor_count() == 0 && in1_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in1.successor_count() == 1 && in1_s_vec.size() == 1 && in1_s_vec[0] == ms_p0_ptr, "expected 1 successor" ); - ASSERT( in2.predecessor_count() == 0 && in2_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in2.successor_count() == 1 && in2_s_vec.size() == 1 && in2_s_vec[0] == ms_p1_ptr, "expected 1 successor" ); - ASSERT( tbb::flow::input_port<0>(middle).predecessor_count() == 2 && mp0_vec.size() == 2, "expected 2 predecessors" ); - ASSERT( tbb::flow::input_port<1>(middle).predecessor_count() == 1 && mp1_vec.size() == 1, "expected 1 predecessors" ); - ASSERT( middle.successor_count() == 2 && ms_vec.size() == 2, "expected 2 successors" ); - ASSERT( out0.predecessor_count() == 1 && out0_p_vec.size() == 1 && out0_p_vec[0] == mp_ptr, "expected 1 predecessor" ); - ASSERT( out0.successor_count() == 0 && out0_s_vec.size() == 0, "expected 0 successors" ); - ASSERT( out1.predecessor_count() == 1 && out1_p_vec.size() == 1 && out1_p_vec[0] == mp_ptr, "expected 1 predecessor" ); - ASSERT( out1.successor_count() == 0 && out1_s_vec.size() == 0, "expected 0 successors" ); - - int first_pred = mp0_vec[0] == ins[0] ? 0 : ( mp0_vec[0] == ins[1] ? 1 : -1 ); - int second_pred = mp0_vec[1] == ins[0] ? 0 : ( mp0_vec[1] == ins[1] ? 1 : -1 ); - ASSERT( first_pred != -1 && second_pred != -1 && first_pred != second_pred, "bad predecessor(s) for middle port 0" ); - - ASSERT( mp1_vec[0] == ins[2], "bad predecessor for middle port 1" ); - - int first_succ = ms_vec[0] == outs[0] ? 0 : ( ms_vec[0] == outs[1] ? 1 : -1 ); - int second_succ = ms_vec[1] == outs[0] ? 0 : ( ms_vec[1] == outs[1] ? 1 : -1 ); - ASSERT( first_succ != -1 && second_succ != -1 && first_succ != second_succ, "bad successor(s) for middle" ); - - in0.try_put(1); - in1.try_put(2); - in2.try_put(8); - in2.try_put(4); - g.wait_for_all(); - - T v_in; - - ASSERT( in0.try_get(v_in) == false, "buffer should not have a value" ); - ASSERT( in1.try_get(v_in) == false, "buffer should not have a value" ); - ASSERT( in1.try_get(v_in) == false, "buffer should not have a value" ); - ASSERT( in2.try_get(v_in) == false, "buffer should not have a value" ); - ASSERT( in2.try_get(v_in) == false, "buffer should not have a value" ); - - typename my_node_t::output_type v; - T r = 0; - while ( out0.try_get(v) ) { - check_output(r,v); - g.wait_for_all(); - } - ASSERT( r == 15, "not all values received" ); - - r = 0; - while ( out1.try_get(v) ) { - check_output(r,v); - g.wait_for_all(); - } - ASSERT( r == 15, "not all values received" ); - g.wait_for_all(); - } - - void validate_partial_graph() { - /* in0 */ - /* */ - /* port0 out0 */ - /* / | */ - /* in1 middle */ - /* | \ */ - /* in2 - port1 out1 */ - set_up_vectors(); - - ASSERT( in0.predecessor_count() == 0 && in0_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in0.successor_count() == 0 && in0_s_vec.size() == 0, "expected 0 successors" ); - ASSERT( in1.predecessor_count() == 0 && in1_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in1.successor_count() == 1 && in1_s_vec.size() == 1 && in1_s_vec[0] == ms_p0_ptr, "expected 1 successor" ); - ASSERT( in2.predecessor_count() == 0 && in2_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in2.successor_count() == 1 && in2_s_vec.size() == 1 && in2_s_vec[0] == ms_p1_ptr, "expected 1 successor" ); - ASSERT( tbb::flow::input_port<0>(middle).predecessor_count() == 1 && mp0_vec.size() == 1 && mp0_vec[0] == ins[1], "expected 1 predecessor" ); - ASSERT( tbb::flow::input_port<1>(middle).predecessor_count() == 1 && mp1_vec.size() == 1 && mp1_vec[0] == ins[2], "expected 1 predecessor" ); - ASSERT( middle.successor_count() == 1 && ms_vec.size() == 1 && ms_vec[0] == outs[1], "expected 1 successor" ); - ASSERT( out0.predecessor_count() == 0 && out0_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( out0.successor_count() == 0 && out0_s_vec.size() == 0, "expected 0 successors" ); - ASSERT( out1.predecessor_count() == 1 && out1_p_vec.size() == 1 && out1_p_vec[0] == mp_ptr, "expected 1 predecessor" ); - ASSERT( out1.successor_count() == 0 && out1_s_vec.size() == 0, "expected 0 successors" ); - - in0.try_put(1); - in1.try_put(2); - in2.try_put(8); - in2.try_put(4); - g.wait_for_all(); - - T v_in; - typename my_node_t::output_type v; - - ASSERT( in0.try_get(v_in) == true && v_in == 1, "buffer should have a value of 1" ); - ASSERT( in1.try_get(v_in) == false, "buffer should not have a value" ); - ASSERT( out0.try_get(v) == false, "buffer should not have a value" ); - ASSERT( in0.try_get(v_in) == false, "buffer should not have a value" ); - - T r = 0; - while ( out1.try_get(v) ) { - check_output(r,v); - g.wait_for_all(); - } - ASSERT( r == 14, "not all values received" ); - g.wait_for_all(); - } - - void validate_empty_graph() { - /* in0 */ - /* */ - /* port0 out0 */ - /* | */ - /* in1 middle */ - /* | */ - /* in2 port1 out1 */ - set_up_vectors(); - - ASSERT( in0.predecessor_count() == 0 && in0_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in0.successor_count() == 0 && in0_s_vec.size() == 0, "expected 0 successors" ); - ASSERT( in1.predecessor_count() == 0 && in1_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in1.successor_count() == 0 && in1_s_vec.size() == 0, "expected 0 successors" ); - ASSERT( in2.predecessor_count() == 0 && in2_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in2.successor_count() == 0 && in2_s_vec.size() == 0, "expected 0 successors" ); - ASSERT( tbb::flow::input_port<0>(middle).predecessor_count() == 0 && mp0_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( tbb::flow::input_port<1>(middle).predecessor_count() == 0 && mp1_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( middle.successor_count() == 0 && ms_vec.size() == 0, "expected 0 successors" ); - ASSERT( out0.predecessor_count() == 0 && out0_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( out0.successor_count() == 0 && out0_s_vec.size() == 0, "expected 0 successors" ); - ASSERT( out1.predecessor_count() == 0 && out1_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( out1.successor_count() == 0 && out1_s_vec.size() == 0, "expected 0 successors" ); - - in0.try_put(1); - in1.try_put(2); - in2.try_put(8); - in2.try_put(4); - g.wait_for_all(); - - T v_in; - typename my_node_t::output_type v; - - ASSERT( in0.try_get(v_in) == true && v_in == 1, "buffer should have a value of 1" ); - ASSERT( in1.try_get(v_in) == true && v_in == 2, "buffer should have a value of 2" ); - ASSERT( in2.try_get(v_in) == true && v_in == 8, "buffer should have a value of 8" ); - ASSERT( in2.try_get(v_in) == true && v_in == 4, "buffer should have a value of 4" ); - ASSERT( out0.try_get(v) == false, "buffer should not have a value" ); - ASSERT( out1.try_get(v) == false, "buffer should not have a value" ); - g.wait_for_all(); - g.reset(); // NOTE: this should not be necessary!!!!! But it is!!!! - } - -public: - - test_indexer_extract() : in0(g), in1(g), in2(g), middle(g), out0(g), out1(g) { - ins[0] = &in0; - ins[1] = &in1; - ins[2] = &in2; - outs[0] = &out0; - outs[1] = &out1; - ms_p0_ptr = static_cast< typename in_node_t::successor_type * >(&tbb::flow::input_port<0>(middle)); - ms_p1_ptr = static_cast< typename in_node_t::successor_type * >(&tbb::flow::input_port<1>(middle)); - mp_ptr = static_cast< typename out_node_t::predecessor_type *>(&middle); - } - - virtual ~test_indexer_extract() {} - - void run_tests() { - REMARK("full graph\n"); - make_and_validate_full_graph(); - - in0.extract(); - out0.extract(); - REMARK("partial graph\n"); - validate_partial_graph(); - - in1.extract(); - in2.extract(); - out1.extract(); - REMARK("empty graph\n"); - validate_empty_graph(); - - REMARK("full graph\n"); - make_and_validate_full_graph(); - - middle.extract(); - REMARK("empty graph\n"); - validate_empty_graph(); - - REMARK("full graph\n"); - make_and_validate_full_graph(); - - in0.extract(); - in1.extract(); - in2.extract(); - middle.extract(); - REMARK("empty graph\n"); - validate_empty_graph(); - - REMARK("full graph\n"); - make_and_validate_full_graph(); - - out0.extract(); - out1.extract(); - middle.extract(); - REMARK("empty graph\n"); - validate_empty_graph(); - - REMARK("full graph\n"); - make_and_validate_full_graph(); - } -}; -#endif - -const int Count = 150; -const int MaxPorts = 10; -const int MaxNSources = 5; // max # of source_nodes to register for each indexer_node input in parallel test -bool outputCheck[MaxPorts][Count]; // for checking output - -void -check_outputCheck( int nUsed, int maxCnt) { - for(int i=0; i < nUsed; ++i) { - for( int j = 0; j < maxCnt; ++j) { - ASSERT(outputCheck[i][j], NULL); - } - } -} - -void -reset_outputCheck( int nUsed, int maxCnt) { - for(int i=0; i < nUsed; ++i) { - for( int j = 0; j < maxCnt; ++j) { - outputCheck[i][j] = false; - } - } -} - -class test_class { - public: - test_class() { my_val = 0; } - test_class(int i) { my_val = i; } - operator int() { return my_val; } - private: - int my_val; -}; - -template<typename T> -class name_of { -public: - static const char* name() { return "Unknown"; } -}; -template<> -class name_of<int> { -public: - static const char* name() { return "int"; } -}; -template<> -class name_of<float> { -public: - static const char* name() { return "float"; } -}; -template<> -class name_of<double> { -public: - static const char* name() { return "double"; } -}; -template<> -class name_of<long> { -public: - static const char* name() { return "long"; } -}; -template<> -class name_of<short> { -public: - static const char* name() { return "short"; } -}; -template<> -class name_of<test_class> { -public: - static const char* name() { return "test_class"; } -}; - -// TT must be arithmetic, and shouldn't wrap around for reasonable sizes of Count (which is now 150, and maxPorts is 10, -// so the max number generated right now is 1500 or so.) Source will generate a series of TT with value -// (init_val + (i-1)*addend) * my_mult, where i is the i-th invocation of the body. We are attaching addend -// source nodes to a indexer_port, and each will generate part of the numerical series the port is expecting -// to receive. If there is only one source node, the series order will be maintained; if more than one, -// this is not guaranteed. -// The manual specifies bodies can be assigned, so we can't hide the operator=. -template<typename TT> -class source_body { - TT my_mult; - int my_count; - int addend; -public: - source_body(TT multiplier, int init_val, int addto) : my_mult(multiplier), my_count(init_val), addend(addto) { } - bool operator()( TT &v) { - int lc = my_count; - v = my_mult * (TT)my_count; - my_count += addend; - return lc < Count; - } -}; - -// allocator for indexer_node. - -template<typename IType> -class makeIndexer { -public: - static IType *create() { - IType *temp = new IType(); - return temp; - } - static void destroy(IType *p) { delete p; } -}; - -template<int ELEM, typename INT> -struct getval_helper { - - typedef typename INT::output_type OT; - typedef typename tbb::flow::tuple_element<ELEM-1, typename INT::tuple_types>::type stored_type; - - static int get_integer_val(OT const &o) { - stored_type res = tbb::flow::cast_to<stored_type>(o); - return (int)res; - } -}; - -// holder for source_node pointers for eventual deletion - -static void* all_source_nodes[MaxPorts][MaxNSources]; - -template<int ELEM, typename INT> -class source_node_helper { -public: - typedef INT indexer_node_type; - typedef typename indexer_node_type::output_type TT; - typedef typename tbb::flow::tuple_element<ELEM-1,typename INT::tuple_types>::type IT; - typedef typename tbb::flow::source_node<IT> my_source_node_type; - static void print_remark() { - source_node_helper<ELEM-1,INT>::print_remark(); - REMARK(", %s", name_of<IT>::name()); - } - static void add_source_nodes(indexer_node_type &my_indexer, tbb::flow::graph &g, int nInputs) { - for(int i=0; i < nInputs; ++i) { - my_source_node_type *new_node = new my_source_node_type(g, source_body<IT>((IT)(ELEM+1), i, nInputs)); - tbb::flow::make_edge(*new_node, tbb::flow::input_port<ELEM-1>(my_indexer)); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - ASSERT(new_node->successor_count() == 1, NULL); -#endif - all_source_nodes[ELEM-1][i] = (void *)new_node; - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - ASSERT(tbb::flow::input_port<ELEM-1>(my_indexer).predecessor_count() == (size_t)nInputs, NULL); -#endif - // add the next source_node - source_node_helper<ELEM-1, INT>::add_source_nodes(my_indexer, g, nInputs); - } - static void check_value(TT &v) { - if(v.tag() == ELEM-1) { - int ival = getval_helper<ELEM,INT>::get_integer_val(v); - ASSERT(!(ival%(ELEM+1)), NULL); - ival /= (ELEM+1); - ASSERT(!outputCheck[ELEM-1][ival], NULL); - outputCheck[ELEM-1][ival] = true; - } - else { - source_node_helper<ELEM-1,INT>::check_value(v); - } - } - - static void remove_source_nodes(indexer_node_type& my_indexer, int nInputs) { - for(int i=0; i< nInputs; ++i) { - my_source_node_type *dp = reinterpret_cast<my_source_node_type *>(all_source_nodes[ELEM-1][i]); - tbb::flow::remove_edge(*dp, tbb::flow::input_port<ELEM-1>(my_indexer)); - delete dp; - } - source_node_helper<ELEM-1, INT>::remove_source_nodes(my_indexer, nInputs); - } -}; - -template<typename INT> -class source_node_helper<1, INT> { - typedef INT indexer_node_type; - typedef typename indexer_node_type::output_type TT; - typedef typename tbb::flow::tuple_element<0, typename INT::tuple_types>::type IT; - typedef typename tbb::flow::source_node<IT> my_source_node_type; -public: - static void print_remark() { - REMARK("Parallel test of indexer_node< %s", name_of<IT>::name()); - } - static void add_source_nodes(indexer_node_type &my_indexer, tbb::flow::graph &g, int nInputs) { - for(int i=0; i < nInputs; ++i) { - my_source_node_type *new_node = new my_source_node_type(g, source_body<IT>((IT)2, i, nInputs)); - tbb::flow::make_edge(*new_node, tbb::flow::input_port<0>(my_indexer)); - all_source_nodes[0][i] = (void *)new_node; - } - } - static void check_value(TT &v) { - int ival = getval_helper<1,INT>::get_integer_val(v); - ASSERT(!(ival%2), NULL); - ival /= 2; - ASSERT(!outputCheck[0][ival], NULL); - outputCheck[0][ival] = true; - } - static void remove_source_nodes(indexer_node_type& my_indexer, int nInputs) { - for(int i=0; i < nInputs; ++i) { - my_source_node_type *dp = reinterpret_cast<my_source_node_type *>(all_source_nodes[0][i]); - tbb::flow::remove_edge(*dp, tbb::flow::input_port<0>(my_indexer)); - delete dp; - } - } -}; - -template<typename IType> -class parallel_test { -public: - typedef typename IType::output_type TType; - typedef typename IType::tuple_types union_types; - static const int SIZE = tbb::flow::tuple_size<union_types>::value; - static void test() { - TType v; - source_node_helper<SIZE,IType>::print_remark(); - REMARK(" >\n"); - for(int i=0; i < MaxPorts; ++i) { - for(int j=0; j < MaxNSources; ++j) { - all_source_nodes[i][j] = NULL; - } - } - for(int nInputs = 1; nInputs <= MaxNSources; ++nInputs) { - tbb::flow::graph g; - IType* my_indexer = new IType(g); //makeIndexer<IType>::create(); - tbb::flow::queue_node<TType> outq1(g); - tbb::flow::queue_node<TType> outq2(g); - - tbb::flow::make_edge(*my_indexer, outq1); - tbb::flow::make_edge(*my_indexer, outq2); - - source_node_helper<SIZE, IType>::add_source_nodes((*my_indexer), g, nInputs); - - g.wait_for_all(); - - reset_outputCheck(SIZE, Count); - for(int i=0; i < Count*SIZE; ++i) { - ASSERT(outq1.try_get(v), NULL); - source_node_helper<SIZE, IType>::check_value(v); - } - - check_outputCheck(SIZE, Count); - reset_outputCheck(SIZE, Count); - - for(int i=0; i < Count*SIZE; i++) { - ASSERT(outq2.try_get(v), NULL);; - source_node_helper<SIZE, IType>::check_value(v); - } - check_outputCheck(SIZE, Count); - - ASSERT(!outq1.try_get(v), NULL); - ASSERT(!outq2.try_get(v), NULL); - - source_node_helper<SIZE, IType>::remove_source_nodes((*my_indexer), nInputs); - tbb::flow::remove_edge(*my_indexer, outq1); - tbb::flow::remove_edge(*my_indexer, outq2); - makeIndexer<IType>::destroy(my_indexer); - } - } -}; - -std::vector<int> last_index_seen; - -template<int ELEM, typename IType> -class serial_queue_helper { -public: - typedef typename IType::output_type OT; - typedef typename IType::tuple_types TT; - typedef typename tbb::flow::tuple_element<ELEM-1,TT>::type IT; - static void print_remark() { - serial_queue_helper<ELEM-1,IType>::print_remark(); - REMARK(", %s", name_of<IT>::name()); - } - static void fill_one_queue(int maxVal, IType &my_indexer) { - // fill queue to "left" of me - serial_queue_helper<ELEM-1,IType>::fill_one_queue(maxVal,my_indexer); - for(int i = 0; i < maxVal; ++i) { - ASSERT(tbb::flow::input_port<ELEM-1>(my_indexer).try_put((IT)(i*(ELEM+1))), NULL); - } - } - static void put_one_queue_val(int myVal, IType &my_indexer) { - // put this val to my "left". - serial_queue_helper<ELEM-1,IType>::put_one_queue_val(myVal, my_indexer); - ASSERT(tbb::flow::input_port<ELEM-1>(my_indexer).try_put((IT)(myVal*(ELEM+1))), NULL); - } - static void check_queue_value(OT &v) { - if(ELEM - 1 == v.tag()) { - // this assumes each or node input is queueing. - int rval = getval_helper<ELEM,IType>::get_integer_val(v); - ASSERT( rval == (last_index_seen[ELEM-1]+1)*(ELEM+1), NULL); - last_index_seen[ELEM-1] = rval / (ELEM+1); - } - else { - serial_queue_helper<ELEM-1,IType>::check_queue_value(v); - } - } -}; - -template<typename IType> -class serial_queue_helper<1, IType> { -public: - typedef typename IType::output_type OT; - typedef typename IType::tuple_types TT; - typedef typename tbb::flow::tuple_element<0,TT>::type IT; - static void print_remark() { - REMARK("Serial test of indexer_node< %s", name_of<IT>::name()); - } - static void fill_one_queue(int maxVal, IType &my_indexer) { - for(int i = 0; i < maxVal; ++i) { - ASSERT(tbb::flow::input_port<0>(my_indexer).try_put((IT)(i*2)), NULL); - } - } - static void put_one_queue_val(int myVal, IType &my_indexer) { - ASSERT(tbb::flow::input_port<0>(my_indexer).try_put((IT)(myVal*2)), NULL); - } - static void check_queue_value(OT &v) { - ASSERT(v.tag() == 0, NULL); // won't get here unless true - int rval = getval_helper<1,IType>::get_integer_val(v); - ASSERT( rval == (last_index_seen[0]+1)*2, NULL); - last_index_seen[0] = rval / 2; - } -}; - -template<typename IType, typename TType, int SIZE> -void test_one_serial( IType &my_indexer, tbb::flow::graph &g) { - last_index_seen.clear(); - for(int ii=0; ii < SIZE; ++ii) last_index_seen.push_back(-1); - - typedef TType q3_input_type; - tbb::flow::queue_node< q3_input_type > q3(g); - q3_input_type v; - - tbb::flow::make_edge(my_indexer, q3); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - ASSERT(my_indexer.successor_count() == 1, NULL); - ASSERT(tbb::flow::input_port<0>(my_indexer).predecessor_count() == 0, NULL); -#endif - - // fill each queue with its value one-at-a-time - for (int i = 0; i < Count; ++i ) { - serial_queue_helper<SIZE,IType>::put_one_queue_val(i,my_indexer); - } - - g.wait_for_all(); - for (int i = 0; i < Count * SIZE; ++i ) { - g.wait_for_all(); - ASSERT(q3.try_get( v ), "Error in try_get()"); - { - serial_queue_helper<SIZE,IType>::check_queue_value(v); - } - } - ASSERT(!q3.try_get( v ), "extra values in output queue"); - for(int ii=0; ii < SIZE; ++ii) last_index_seen[ii] = -1; - - // fill each queue completely before filling the next. - serial_queue_helper<SIZE, IType>::fill_one_queue(Count,my_indexer); - - g.wait_for_all(); - for (int i = 0; i < Count*SIZE; ++i ) { - g.wait_for_all(); - ASSERT(q3.try_get( v ), "Error in try_get()"); - { - serial_queue_helper<SIZE,IType>::check_queue_value(v); - } - } - ASSERT(!q3.try_get( v ), "extra values in output queue"); -} - -// -// Single predecessor at each port, single accepting successor -// * put to buffer before port0, then put to buffer before port1, ... -// * fill buffer before port0 then fill buffer before port1, ... - -template<typename IType> -class serial_test { - typedef typename IType::output_type TType; // this is the union - typedef typename IType::tuple_types union_types; - static const int SIZE = tbb::flow::tuple_size<union_types>::value; -public: -static void test() { - tbb::flow::graph g; - static const int ELEMS = 3; - IType* my_indexer = new IType(g); //makeIndexer<IType>::create(g); - - serial_queue_helper<SIZE, IType>::print_remark(); REMARK(" >\n"); - - test_one_serial<IType,TType,SIZE>(*my_indexer, g); - - std::vector<IType> indexer_vector(ELEMS,*my_indexer); - - makeIndexer<IType>::destroy(my_indexer); - - for(int e = 0; e < ELEMS; ++e) { - test_one_serial<IType,TType,SIZE>(indexer_vector[e], g); - } -} - -}; // serial_test - -template< - template<typename> class TestType, // serial_test or parallel_test - typename T0, typename T1=void, typename T2=void, typename T3=void, typename T4=void, - typename T5=void, typename T6=void, typename T7=void, typename T8=void, typename T9=void> // type of the inputs to the indexer_node -class generate_test { -public: - typedef tbb::flow::indexer_node<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9> indexer_node_type; - static void do_test() { - TestType<indexer_node_type>::test(); - } -}; - -//specializations for indexer node inputs -template< - template<typename> class TestType, - typename T0, typename T1, typename T2, typename T3, typename T4, - typename T5, typename T6, typename T7, typename T8> -class generate_test<TestType, T0, T1, T2, T3, T4, T5, T6, T7, T8> { -public: - typedef tbb::flow::indexer_node<T0, T1, T2, T3, T4, T5, T6, T7, T8> indexer_node_type; - static void do_test() { - TestType<indexer_node_type>::test(); - } -}; - -template< - template<typename> class TestType, - typename T0, typename T1, typename T2, typename T3, typename T4, - typename T5, typename T6, typename T7> -class generate_test<TestType, T0, T1, T2, T3, T4, T5, T6, T7> { -public: - typedef tbb::flow::indexer_node<T0, T1, T2, T3, T4, T5, T6, T7> indexer_node_type; - static void do_test() { - TestType<indexer_node_type>::test(); - } -}; - -template< - template<typename> class TestType, - typename T0, typename T1, typename T2, typename T3, typename T4, - typename T5, typename T6> -class generate_test<TestType, T0, T1, T2, T3, T4, T5, T6> { -public: - typedef tbb::flow::indexer_node<T0, T1, T2, T3, T4, T5, T6> indexer_node_type; - static void do_test() { - TestType<indexer_node_type>::test(); - } -}; - -template< - template<typename> class TestType, - typename T0, typename T1, typename T2, typename T3, typename T4, - typename T5> -class generate_test<TestType, T0, T1, T2, T3, T4, T5> { -public: - typedef tbb::flow::indexer_node<T0, T1, T2, T3, T4, T5> indexer_node_type; - static void do_test() { - TestType<indexer_node_type>::test(); - } -}; - -template< - template<typename> class TestType, - typename T0, typename T1, typename T2, typename T3, typename T4> -class generate_test<TestType, T0, T1, T2, T3, T4> { -public: - typedef tbb::flow::indexer_node<T0, T1, T2, T3, T4> indexer_node_type; - static void do_test() { - TestType<indexer_node_type>::test(); - } -}; - -template< - template<typename> class TestType, - typename T0, typename T1, typename T2, typename T3> -class generate_test<TestType, T0, T1, T2, T3> { -public: - typedef tbb::flow::indexer_node<T0, T1, T2, T3> indexer_node_type; - static void do_test() { - TestType<indexer_node_type>::test(); - } -}; - -template< - template<typename> class TestType, - typename T0, typename T1, typename T2> -class generate_test<TestType, T0, T1, T2> { -public: - typedef tbb::flow::indexer_node<T0, T1, T2> indexer_node_type; - static void do_test() { - TestType<indexer_node_type>::test(); - } -}; - -template< - template<typename> class TestType, - typename T0, typename T1> -class generate_test<TestType, T0, T1> { -public: - typedef tbb::flow::indexer_node<T0, T1> indexer_node_type; - static void do_test() { - TestType<indexer_node_type>::test(); - } -}; - -template< - template<typename> class TestType, - typename T0> -class generate_test<TestType, T0> { -public: - typedef tbb::flow::indexer_node<T0> indexer_node_type; - static void do_test() { - TestType<indexer_node_type>::test(); - } -}; - -int TestMain() { - REMARK("Testing indexer_node, "); -#if __TBB_USE_TBB_TUPLE - REMARK("using TBB tuple\n"); -#else - REMARK("using platform tuple\n"); -#endif - - for (int p = 0; p < 2; ++p) { - generate_test<serial_test, float>::do_test(); -#if MAX_TUPLE_TEST_SIZE >= 4 - generate_test<serial_test, float, double, int>::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 6 - generate_test<serial_test, double, double, int, long, int, short>::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 8 - generate_test<serial_test, float, double, double, double, float, int, float, long>::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 10 - generate_test<serial_test, float, double, int, double, double, float, long, int, float, long>::do_test(); -#endif - generate_test<parallel_test, float, double>::do_test(); -#if MAX_TUPLE_TEST_SIZE >= 3 - generate_test<parallel_test, float, int, long>::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 5 - generate_test<parallel_test, double, double, int, int, short>::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 7 - generate_test<parallel_test, float, int, double, float, long, float, long>::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 9 - generate_test<parallel_test, float, double, int, double, double, long, int, float, long>::do_test(); -#endif - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - test_indexer_extract<int>().run_tests(); -#endif - return Harness::Done; -} diff --git a/src/tbb/src/test/test_initializer_list.h b/src/tbb/src/test/test_initializer_list.h deleted file mode 100644 index bd5295b8b..000000000 --- a/src/tbb/src/test/test_initializer_list.h +++ /dev/null @@ -1,173 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_test_initializer_list_H -#define __TBB_test_initializer_list_H -#include "tbb/tbb_config.h" - -#if __TBB_INITIALIZER_LISTS_PRESENT -#include <initializer_list> -#include <vector> -#include "harness_defs.h" //for int_to_type - -namespace initializer_list_support_tests{ - template<typename container_type, typename element_type> - void test_constructor(std::initializer_list<element_type> il, container_type const& expected){ - container_type vd (il); - ASSERT(vd == expected,"initialization via explicit constructor call with init list failed"); - } - - - template<typename container_type, typename element_type> - void test_assignment_operator(std::initializer_list<element_type> il, container_type const& expected){ - container_type va; - va = il; - ASSERT(va == expected,"init list operator= failed"); - } - - struct skip_test { - template<typename container_type, typename element_type> - static void do_test(std::initializer_list<element_type>, container_type const&) { /* do nothing */ } - }; - - struct test_assign { - template<typename container_type, typename element_type> - static void do_test( std::initializer_list<element_type> il, container_type const& expected ) { - container_type vae; - vae.assign( il ); - ASSERT( vae == expected, "init list assign(begin,end) failed" ); - } - }; - - struct test_special_insert { - template<typename container_type, typename element_type> - static void do_test( std::initializer_list<element_type> il, container_type const& expected ) { - container_type vd; - vd.insert( il ); - ASSERT( vd == expected, "inserting with an initializer list failed" ); - } - }; - - template <typename container_type, typename test_assign, typename test_special> - void TestInitListSupport(std::initializer_list<typename container_type::value_type> il){ - typedef typename container_type::value_type element_type; - std::vector<element_type> test_seq(il.begin(),il.end()); - container_type expected(test_seq.begin(), test_seq.end()); - - test_constructor<container_type,element_type>(il, expected); - test_assignment_operator<container_type,element_type>(il, expected); - test_assign::do_test(il, expected); - test_special::do_test(il, expected); - } - - template <typename container_type, typename test_special = skip_test> - void TestInitListSupport(std::initializer_list<typename container_type::value_type> il) { - TestInitListSupport<container_type, test_assign, test_special>(il); - } - - template <typename container_type, typename test_special = skip_test> - void TestInitListSupportWithoutAssign(std::initializer_list<typename container_type::value_type> il){ - TestInitListSupport<container_type, skip_test, test_special>(il); - } - - //TODO: add test for no leaks, and correct element lifetime - //the need for macro comes from desire to test different scenarios where initializer sequence is compile time constant - #define __TBB_TEST_INIT_LIST_SUITE_SINGLE(FUNC_NAME, CONTAINER, ELEMENT_TYPE, INIT_SEQ) \ - void FUNC_NAME(){ \ - typedef ELEMENT_TYPE element_type; \ - typedef CONTAINER<element_type> container_type; \ - element_type test_seq[] = INIT_SEQ; \ - container_type expected(test_seq,test_seq + Harness::array_length(test_seq)); \ - \ - /*test for explicit contructor call*/ \ - container_type vd INIT_SEQ; \ - ASSERT(vd == expected,"initialization via explicit constructor call with init list failed"); \ - /*test for explicit contructor call with std::initializer_list*/ \ - \ - std::initializer_list<element_type> init_list = INIT_SEQ; \ - container_type v1 (init_list); \ - ASSERT(v1 == expected,"initialization via explicit constructor call with std::initializer_list failed"); \ - \ - /*implicit constructor call test*/ \ - container_type v = INIT_SEQ; \ - ASSERT(v == expected,"init list constructor failed"); \ - \ - /*assignment operator test*/ \ - /*TODO: count created and destroyed injects to assert that no extra copy of vector was created implicitly*/ \ - container_type va; \ - va = INIT_SEQ; \ - ASSERT(va == expected,"init list operator= failed"); \ - /*assign(begin,end) test*/ \ - container_type vae; \ - vae.assign(INIT_SEQ); \ - ASSERT(vae == expected,"init list assign(begin,end) failed"); \ - } \ - - namespace initializer_list_helpers{ - template<typename T> - class ad_hoc_container{ - std::vector<T> vec; - public: - ad_hoc_container(){} - template<typename InputIterator> - ad_hoc_container(InputIterator begin, InputIterator end) : vec(begin,end) {} - ad_hoc_container(std::initializer_list<T> il) : vec(il.begin(),il.end()) {} - ad_hoc_container(ad_hoc_container const& other) : vec(other.vec) {} - ad_hoc_container& operator=(ad_hoc_container const& rhs){ vec=rhs.vec; return *this;} - ad_hoc_container& operator=(std::initializer_list<T> il){ vec.assign(il.begin(),il.end()); return *this;} - template<typename InputIterator> - void assign(InputIterator begin, InputIterator end){ vec.assign(begin,end);} - void assign(std::initializer_list<T> il){ vec.assign(il.begin(),il.end());} - friend bool operator==(ad_hoc_container<T> const& lhs, ad_hoc_container<T> const& rhs){ return lhs.vec==rhs.vec;} - }; - } - - #define AD_HOC_INIT_SEQ {1,2,3,4} - __TBB_TEST_INIT_LIST_SUITE_SINGLE(TestCompilerSupportInt, initializer_list_helpers::ad_hoc_container, int, AD_HOC_INIT_SEQ ) - #undef AD_HOC_INIT_SEQ - - #if __TBB_CPP11_INIT_LIST_ASSIGN_OP_RESOLUTION_BROKEN - void TestCompilerSupportIntPair(){ - REPORT("Known issue: skip initializer_list compiler test for std::pair list elements.\n"); - } - #else - #define AD_HOC_PAIR_INIT_SEQ {{1,1}, {2,2},{3,3}, {4,4}} - #define AD_HOC_INIT_SEQ_PAIR_TYPE std::pair<int,int> - __TBB_TEST_INIT_LIST_SUITE_SINGLE(TestCompilerSupportIntPair, initializer_list_helpers::ad_hoc_container, AD_HOC_INIT_SEQ_PAIR_TYPE, AD_HOC_PAIR_INIT_SEQ ) - #undef AD_HOC_INIT_SEQ_PAIR_TYPE - #undef AD_HOC_PAIR_INIT_SEQ - #endif - - bool TestCompilerForInitializerList(); - namespace { - const bool conpiler_init_list_tests_are_run = TestCompilerForInitializerList(); - } - - //TODO: move this to test_compiler - bool TestCompilerForInitializerList(){ - TestCompilerSupportInt(); - TestCompilerSupportIntPair(); - tbb::internal::suppress_unused_warning(conpiler_init_list_tests_are_run); - return true; - } -} // namespace initializer_list_support_tests - -#endif //__TBB_INITIALIZER_LISTS_PRESENT -#endif //__TBB_test_initializer_list_H diff --git a/src/tbb/src/test/test_inits_loop.cpp b/src/tbb/src/test/test_inits_loop.cpp deleted file mode 100644 index b83296a40..000000000 --- a/src/tbb/src/test/test_inits_loop.cpp +++ /dev/null @@ -1,94 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if __APPLE__ - -#define HARNESS_CUSTOM_MAIN 1 -#include "harness.h" -#include <cstdlib> -#include "tbb/task_scheduler_init.h" - -#include <sys/types.h> -#include <sys/wait.h> -#include <unistd.h> -#include <signal.h> -#include <errno.h> - -bool exec_test(const char *self) { - int status = 1; - pid_t p = fork(); - if(p < 0) { - REPORT("fork error: errno=%d: %s\n", errno, strerror(errno)); - return true; - } - else if(p) { // parent - if(waitpid(p, &status, 0) != p) { - REPORT("wait error: errno=%d: %s\n", errno, strerror(errno)); - return true; - } - if(WIFEXITED(status)) { - if(!WEXITSTATUS(status)) return false; // ok - else REPORT("child has exited with return code 0x%x\n", WEXITSTATUS(status)); - } else { - REPORT("child error 0x%x:%s%s ", status, WIFSIGNALED(status)?" signalled":"", - WIFSTOPPED(status)?" stopped":""); - if(WIFSIGNALED(status)) - REPORT("%s%s", sys_siglist[WTERMSIG(status)], WCOREDUMP(status)?" core dumped":""); - if(WIFSTOPPED(status)) - REPORT("with %d stop-code", WSTOPSIG(status)); - REPORT("\n"); - } - } - else { // child - // reproduces error much often - execl(self, self, "0", NULL); - REPORT("exec fails %s: %d: %s\n", self, errno, strerror(errno)); - exit(2); - } - return true; -} - -HARNESS_EXPORT -int main( int argc, char * argv[] ) { - MinThread = 3000; - ParseCommandLine( argc, argv ); - if( MinThread <= 0 ) { - tbb::task_scheduler_init init( 2 ); // even number required for an error - } else { - for(int i = 0; i<MinThread; i++) { - if(exec_test(argv[0])) { - REPORT("ERROR: execution fails at %d-th iteration!\n", i); - exit(1); - } - } - REPORT("done\n"); - } -} - -#else /* !__APPLE__ */ - -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#include "harness.h" - -int TestMain () { - return Harness::Skipped; -} - -#endif /* !__APPLE__ */ diff --git a/src/tbb/src/test/test_intrusive_list.cpp b/src/tbb/src/test/test_intrusive_list.cpp deleted file mode 100644 index 194cf55ca..000000000 --- a/src/tbb/src/test/test_intrusive_list.cpp +++ /dev/null @@ -1,141 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#include "harness.h" - -#include "../tbb/intrusive_list.h" - -using tbb::internal::intrusive_list_node; - -// Machine word filled with repeated pattern of FC bits -const uintptr_t NoliMeTangere = ~uintptr_t(0)/0xFF*0xFC; - -struct VerificationBase : Harness::NoAfterlife { - uintptr_t m_Canary; - VerificationBase () : m_Canary(NoliMeTangere) {} -}; - -struct DataItemWithInheritedNodeBase : intrusive_list_node { - int m_Data; -public: - DataItemWithInheritedNodeBase ( int value ) : m_Data(value) {} - - int Data() const { return m_Data; } -}; - -class DataItemWithInheritedNode : public VerificationBase, public DataItemWithInheritedNodeBase { - friend class tbb::internal::intrusive_list<DataItemWithInheritedNode>; -public: - DataItemWithInheritedNode ( int value ) : DataItemWithInheritedNodeBase(value) {} -}; - -struct DataItemWithMemberNodeBase { - int m_Data; -public: - // Cannot be used by member_intrusive_list to form lists of objects derived from DataItemBase - intrusive_list_node m_BaseNode; - - DataItemWithMemberNodeBase ( int value ) : m_Data(value) {} - - int Data() const { return m_Data; } -}; - -class DataItemWithMemberNodes : public VerificationBase, public DataItemWithMemberNodeBase { -public: - intrusive_list_node m_Node; - - DataItemWithMemberNodes ( int value ) : DataItemWithMemberNodeBase(value) {} -}; - -typedef tbb::internal::intrusive_list<DataItemWithInheritedNode> IntrusiveList1; -typedef tbb::internal::memptr_intrusive_list<DataItemWithMemberNodes, - DataItemWithMemberNodeBase, &DataItemWithMemberNodeBase::m_BaseNode> IntrusiveList2; -typedef tbb::internal::memptr_intrusive_list<DataItemWithMemberNodes, - DataItemWithMemberNodes, &DataItemWithMemberNodes::m_Node> IntrusiveList3; - -const int NumElements = 256 * 1024; - -//! Iterates through the list forward and backward checking the validity of values stored by the list nodes -template<class List, class Iterator> -void CheckListNodes ( List& il, int valueStep ) { - int i; - Iterator it = il.begin(); - for ( i = valueStep - 1; it != il.end(); ++it, i += valueStep ) { - ASSERT( it->Data() == i, "Unexpected node value while iterating forward" ); - ASSERT( (*it).m_Canary == NoliMeTangere, "Memory corruption" ); - } - ASSERT( i == NumElements + valueStep - 1, "Wrong number of list elements while iterating forward" ); - it = il.end(); - for ( i = NumElements - 1, it--; it != il.end(); --it, i -= valueStep ) { - ASSERT( (*it).Data() == i, "Unexpected node value while iterating backward" ); - ASSERT( it->m_Canary == NoliMeTangere, "Memory corruption" ); - } - ASSERT( i == -1, "Wrong number of list elements while iterating backward" ); -} - -template<class List, class Item> -void TestListOperations () { - typedef typename List::iterator iterator; - List il; - for ( int i = NumElements - 1; i >= 0; --i ) - il.push_front( *new Item(i) ); - CheckListNodes<const List, typename List::const_iterator>( il, 1 ); - iterator it = il.begin(); - for ( ; it != il.end(); ++it ) { - Item &item = *it; - it = il.erase( it ); - delete &item; - } - CheckListNodes<List, iterator>( il, 2 ); - for ( it = il.begin(); it != il.end(); ++it ) { - Item &item = *it; - il.remove( *it++ ); - delete &item; - } - CheckListNodes<List, iterator>( il, 4 ); -} - -#include "harness_bad_expr.h" - -template<class List, class Item> -void TestListAssertions () { -#if TRY_BAD_EXPR_ENABLED - tbb::set_assertion_handler( AssertionFailureHandler ); - List il1, il2; - Item n1(1), n2(2), n3(3); - il1.push_front(n1); - TRY_BAD_EXPR( il2.push_front(n1), "only one intrusive list" ); - TRY_BAD_EXPR( il1.push_front(n1), "only one intrusive list" ); - il2.push_front(n2); - TRY_BAD_EXPR( il1.remove(n3), "not in the list" ); - tbb::set_assertion_handler( ReportError ); -#endif /* TRY_BAD_EXPR_ENABLED */ -} - -int TestMain () { - TestListOperations<IntrusiveList1, DataItemWithInheritedNode>(); - TestListOperations<IntrusiveList2, DataItemWithMemberNodes>(); - TestListOperations<IntrusiveList3, DataItemWithMemberNodes>(); - TestListAssertions<IntrusiveList1, DataItemWithInheritedNode>(); - TestListAssertions<IntrusiveList2, DataItemWithMemberNodes>(); - TestListAssertions<IntrusiveList3, DataItemWithMemberNodes>(); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_ittnotify.cpp b/src/tbb/src/test/test_ittnotify.cpp deleted file mode 100644 index f03496b9f..000000000 --- a/src/tbb/src/test/test_ittnotify.cpp +++ /dev/null @@ -1,90 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !TBB_USE_THREADING_TOOLS - #define TBB_USE_THREADING_TOOLS 1 -#endif - -#include "harness.h" - -#if DO_ITT_NOTIFY - -#include "tbb/spin_mutex.h" -#include "tbb/spin_rw_mutex.h" -#include "tbb/queuing_rw_mutex.h" -#include "tbb/queuing_mutex.h" -#include "tbb/mutex.h" -#include "tbb/recursive_mutex.h" -#include "tbb/parallel_for.h" -#include "tbb/blocked_range.h" -#include "tbb/task_scheduler_init.h" - - -#include "../tbb/itt_notify.h" - - -template<typename M> -class WorkEmulator: NoAssign { - M& m_mutex; - static volatile size_t s_anchor; -public: - void operator()( tbb::blocked_range<size_t>& range ) const { - for( size_t i=range.begin(); i!=range.end(); ++i ) { - typename M::scoped_lock lock(m_mutex); - for ( size_t j = 0; j!=range.end(); ++j ) - s_anchor = (s_anchor - i) / 2 + (s_anchor + j) / 2; - } - } - WorkEmulator( M& mutex ) : m_mutex(mutex) {} -}; - -template<typename M> -volatile size_t WorkEmulator<M>::s_anchor = 0; - - -template<class M> -void Test( const char * name ) { - REMARK("Testing %s\n",name); - M mtx; - tbb::profiling::set_name(mtx, name); - - const int n = 10000; - tbb::parallel_for( tbb::blocked_range<size_t>(0,n,n/100), WorkEmulator<M>(mtx) ); -} - - #define TEST_MUTEX(type, name) Test<tbb::type>( name ) - -#endif /* !DO_ITT_NOTIFY */ - -int TestMain () { -#if DO_ITT_NOTIFY - for( int p=MinThread; p<=MaxThread; ++p ) { - REMARK( "testing with %d workers\n", p ); - tbb::task_scheduler_init init( p ); - TEST_MUTEX( spin_mutex, "Spin Mutex" ); - TEST_MUTEX( queuing_mutex, "Queuing Mutex" ); - TEST_MUTEX( queuing_rw_mutex, "Queuing RW Mutex" ); - TEST_MUTEX( spin_rw_mutex, "Spin RW Mutex" ); - } - return Harness::Done; -#else /* !DO_ITT_NOTIFY */ - return Harness::Skipped; -#endif /* !DO_ITT_NOTIFY */ -} diff --git a/src/tbb/src/test/test_join_node.cpp b/src/tbb/src/test/test_join_node.cpp deleted file mode 100644 index a713597b7..000000000 --- a/src/tbb/src/test/test_join_node.cpp +++ /dev/null @@ -1,1661 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness.h" -#include "tbb/atomic.h" -#include "harness_checktype.h" - -#include "tbb/flow_graph.h" -#include "tbb/task_scheduler_init.h" - -#if defined(_MSC_VER) && _MSC_VER < 1600 - #pragma warning (disable : 4503) //disabling the "decorated name length exceeded" warning for VS2008 and earlier -#endif - -// -// Tests -// - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES -template< typename T, typename NODE_TYPE > -class test_join_base_extract : NoAssign { -protected: - typedef typename NODE_TYPE::output_type tuple_t; - typedef tbb::flow::queue_node<T> in_queue_t; - typedef tbb::flow::queue_node<tuple_t> out_queue_t; - - tbb::flow::graph &g; - in_queue_t &in0; - in_queue_t &in1; - in_queue_t &in2; - NODE_TYPE &middle; - out_queue_t &out0; - out_queue_t &out1; - in_queue_t *ins[3]; - out_queue_t *outs[2]; - typename in_queue_t::successor_type *ms_p0_ptr; - typename in_queue_t::successor_type *ms_p1_ptr; - typename out_queue_t::predecessor_type *mp_ptr; - typename in_queue_t::predecessor_vector_type in0_p_vec; - typename in_queue_t::successor_vector_type in0_s_vec; - typename in_queue_t::predecessor_vector_type in1_p_vec; - typename in_queue_t::successor_vector_type in1_s_vec; - typename in_queue_t::predecessor_vector_type in2_p_vec; - typename in_queue_t::successor_vector_type in2_s_vec; - typename out_queue_t::predecessor_vector_type out0_p_vec; - typename out_queue_t::successor_vector_type out0_s_vec; - typename out_queue_t::predecessor_vector_type out1_p_vec; - typename out_queue_t::successor_vector_type out1_s_vec; - typename in_queue_t::predecessor_vector_type mp0_vec; - typename in_queue_t::predecessor_vector_type mp1_vec; - typename out_queue_t::successor_vector_type ms_vec; - - virtual void set_up_vectors() { - in0_p_vec.clear(); - in0_s_vec.clear(); - in1_p_vec.clear(); - in1_s_vec.clear(); - in2_p_vec.clear(); - in2_s_vec.clear(); - out0_p_vec.clear(); - out0_s_vec.clear(); - out1_p_vec.clear(); - out1_s_vec.clear(); - mp0_vec.clear(); - mp1_vec.clear(); - ms_vec.clear(); - - in0.copy_predecessors(in0_p_vec); - in0.copy_successors(in0_s_vec); - in1.copy_predecessors(in1_p_vec); - in1.copy_successors(in1_s_vec); - in2.copy_predecessors(in2_p_vec); - in2.copy_successors(in2_s_vec); - tbb::flow::input_port<0>(middle).copy_predecessors(mp0_vec); - tbb::flow::input_port<1>(middle).copy_predecessors(mp1_vec); - middle.copy_successors(ms_vec); - out0.copy_predecessors(out0_p_vec); - out0.copy_successors(out0_s_vec); - out1.copy_predecessors(out1_p_vec); - out1.copy_successors(out1_s_vec); - } - - void check_tuple( T &r, tuple_t &v ) { - T t0 = tbb::flow::get<0>(v); - T t1 = tbb::flow::get<1>(v); - ASSERT( (t0 == 1 || t0 == 2) && (t0&r) == 0, "duplicate value" ); - r |= t0; - ASSERT( (t1 == 4 || t1 == 8) && (t1&r) == 0, "duplicate value" ); - r |= t1; - } - - void make_and_validate_full_graph() { - /* in0 */ - /* \ */ - /* port0 out0 */ - /* / | / */ - /* in1 middle */ - /* | \ */ - /* in2 - port1 out1 */ - tbb::flow::make_edge( in0, tbb::flow::input_port<0>(middle) ); - tbb::flow::make_edge( in1, tbb::flow::input_port<0>(middle) ); - tbb::flow::make_edge( in2, tbb::flow::input_port<1>(middle) ); - tbb::flow::make_edge( middle, out0 ); - tbb::flow::make_edge( middle, out1 ); - - set_up_vectors(); - - ASSERT( in0.predecessor_count() == 0 && in0_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in0.successor_count() == 1 && in0_s_vec.size() == 1 && in0_s_vec[0] == ms_p0_ptr, "expected 1 successor" ); - ASSERT( in1.predecessor_count() == 0 && in1_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in1.successor_count() == 1 && in1_s_vec.size() == 1 && in1_s_vec[0] == ms_p0_ptr, "expected 1 successor" ); - ASSERT( in2.predecessor_count() == 0 && in2_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in2.successor_count() == 1 && in2_s_vec.size() == 1 && in2_s_vec[0] == ms_p1_ptr, "expected 1 successor" ); - ASSERT( tbb::flow::input_port<0>(middle).predecessor_count() == 2 && mp0_vec.size() == 2, "expected 2 predecessors" ); - ASSERT( tbb::flow::input_port<1>(middle).predecessor_count() == 1 && mp1_vec.size() == 1, "expected 1 predecessors" ); - ASSERT( middle.successor_count() == 2 && ms_vec.size() == 2, "expected 2 successors" ); - ASSERT( out0.predecessor_count() == 1 && out0_p_vec.size() == 1 && out0_p_vec[0] == mp_ptr, "expected 1 predecessor" ); - ASSERT( out0.successor_count() == 0 && out0_s_vec.size() == 0, "expected 0 successors" ); - ASSERT( out1.predecessor_count() == 1 && out1_p_vec.size() == 1 && out1_p_vec[0] == mp_ptr, "expected 1 predecessor" ); - ASSERT( out1.successor_count() == 0 && out1_s_vec.size() == 0, "expected 0 successors" ); - - int first_pred = mp0_vec[0] == ins[0] ? 0 : ( mp0_vec[0] == ins[1] ? 1 : -1 ); - int second_pred = mp0_vec[1] == ins[0] ? 0 : ( mp0_vec[1] == ins[1] ? 1 : -1 ); - ASSERT( first_pred != -1 && second_pred != -1 && first_pred != second_pred, "bad predecessor(s) for middle port 0" ); - - ASSERT( mp1_vec[0] == ins[2], "bad predecessor for middle port 1" ); - - int first_succ = ms_vec[0] == outs[0] ? 0 : ( ms_vec[0] == outs[1] ? 1 : -1 ); - int second_succ = ms_vec[1] == outs[0] ? 0 : ( ms_vec[1] == outs[1] ? 1 : -1 ); - ASSERT( first_succ != -1 && second_succ != -1 && first_succ != second_succ, "bad successor(s) for middle" ); - - in0.try_put(1); - in1.try_put(2); - in2.try_put(8); - in2.try_put(4); - g.wait_for_all(); - - T v_in; - tuple_t v; - - ASSERT( in0.try_get(v_in) == false, "buffer should not have a value" ); - ASSERT( in1.try_get(v_in) == false, "buffer should not have a value" ); - ASSERT( in1.try_get(v_in) == false, "buffer should not have a value" ); - ASSERT( in2.try_get(v_in) == false, "buffer should not have a value" ); - ASSERT( in2.try_get(v_in) == false, "buffer should not have a value" ); - - T r = 0; - while ( out0.try_get(v) ) { - check_tuple(r,v); - g.wait_for_all(); - } - ASSERT( r == 15, "not all values received" ); - - r = 0; - while ( out1.try_get(v) ) { - check_tuple(r,v); - g.wait_for_all(); - } - ASSERT( r == 15, "not all values received" ); - g.wait_for_all(); - } - - void validate_partial_graph() { - /* in0 */ - /* */ - /* port0 out0 */ - /* / | */ - /* in1 middle */ - /* | \ */ - /* in2 - port1 out1 */ - set_up_vectors(); - - ASSERT( in0.predecessor_count() == 0 && in0_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in0.successor_count() == 0 && in0_s_vec.size() == 0, "expected 0 successors" ); - ASSERT( in1.predecessor_count() == 0 && in1_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in1.successor_count() == 1 && in1_s_vec.size() == 1 && in1_s_vec[0] == ms_p0_ptr, "expected 1 successor" ); - ASSERT( in2.predecessor_count() == 0 && in2_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in2.successor_count() == 1 && in2_s_vec.size() == 1 && in2_s_vec[0] == ms_p1_ptr, "expected 1 successor" ); - ASSERT( tbb::flow::input_port<0>(middle).predecessor_count() == 1 && mp0_vec.size() == 1 && mp0_vec[0] == ins[1], "expected 1 predecessor" ); - ASSERT( tbb::flow::input_port<1>(middle).predecessor_count() == 1 && mp1_vec.size() == 1 && mp1_vec[0] == ins[2], "expected 1 predecessor" ); - ASSERT( middle.successor_count() == 1 && ms_vec.size() == 1 && ms_vec[0] == outs[1], "expected 1 successor" ); - ASSERT( out0.predecessor_count() == 0 && out0_p_vec.size() == 0, "expected 1 predecessor" ); - ASSERT( out0.successor_count() == 0 && out0_s_vec.size() == 0, "expected 0 successors" ); - ASSERT( out1.predecessor_count() == 1 && out1_p_vec.size() == 1 && out1_p_vec[0] == mp_ptr, "expected 1 predecessor" ); - ASSERT( out1.successor_count() == 0 && out1_s_vec.size() == 0, "expected 0 successors" ); - - in0.try_put(1); - in1.try_put(2); - in2.try_put(8); - in2.try_put(4); - g.wait_for_all(); - - T v_in; - tuple_t v; - - ASSERT( in0.try_get(v_in) == true && v_in == 1, "buffer should have a value of 1" ); - ASSERT( in1.try_get(v_in) == false, "buffer should not have a value" ); - ASSERT( out0.try_get(v) == false, "buffer should not have a value" ); - ASSERT( out1.try_get(v) == true && tbb::flow::get<0>(v) == 2 && tbb::flow::get<1>(v) == 8, "buffer should have a value of < 2, 8 >" ); - ASSERT( in0.try_get(v_in) == false, "buffer should not have a value" ); - g.wait_for_all(); - g.reset(); // for queueing and tag_matching the 4 is now in the join - } - - void validate_empty_graph() { - /* in0 */ - /* */ - /* port0 out0 */ - /* | */ - /* in1 middle */ - /* | */ - /* in2 port1 out1 */ - set_up_vectors(); - - ASSERT( in0.predecessor_count() == 0 && in0_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in0.successor_count() == 0 && in0_s_vec.size() == 0, "expected 0 successors" ); - ASSERT( in1.predecessor_count() == 0 && in1_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in1.successor_count() == 0 && in1_s_vec.size() == 0, "expected 0 successors" ); - ASSERT( in2.predecessor_count() == 0 && in2_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( in2.successor_count() == 0 && in2_s_vec.size() == 0, "expected 0 successors" ); - ASSERT( tbb::flow::input_port<0>(middle).predecessor_count() == 0 && mp0_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( tbb::flow::input_port<1>(middle).predecessor_count() == 0 && mp1_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( middle.successor_count() == 0 && ms_vec.size() == 0, "expected 0 successors" ); - ASSERT( out0.predecessor_count() == 0 && out0_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( out0.successor_count() == 0 && out0_s_vec.size() == 0, "expected 0 successors" ); - ASSERT( out1.predecessor_count() == 0 && out1_p_vec.size() == 0, "expected 0 predecessors" ); - ASSERT( out1.successor_count() == 0 && out1_s_vec.size() == 0, "expected 0 successors" ); - - in0.try_put(1); - in1.try_put(2); - in2.try_put(8); - in2.try_put(4); - g.wait_for_all(); - - T v_in; - tuple_t v; - - ASSERT( in0.try_get(v_in) == true && v_in == 1, "buffer should have a value of 1" ); - ASSERT( in1.try_get(v_in) == true && v_in == 2, "buffer should have a value of 2" ); - ASSERT( in2.try_get(v_in) == true && v_in == 8, "buffer should have a value of 8" ); - ASSERT( in2.try_get(v_in) == true && v_in == 4, "buffer should have a value of 4" ); - ASSERT( out0.try_get(v) == false, "buffer should not have a value" ); - ASSERT( out1.try_get(v) == false, "buffer should not have a value" ); - g.wait_for_all(); - g.reset(); // NOTE: this should not be necessary!!!!! But it is!!!! - } - -public: - - test_join_base_extract(tbb::flow::graph &_g, in_queue_t &_in0, in_queue_t &_in1, in_queue_t &_in2, NODE_TYPE &m, out_queue_t &_out0, out_queue_t &_out1) : - g(_g), in0(_in0), in1(_in1), in2(_in2), middle(m), out0(_out0), out1(_out1) { - ins[0] = &in0; - ins[1] = &in1; - ins[2] = &in2; - outs[0] = &out0; - outs[1] = &out1; - ms_p0_ptr = static_cast< typename in_queue_t::successor_type * >(&tbb::flow::input_port<0>(middle)); - ms_p1_ptr = static_cast< typename in_queue_t::successor_type * >(&tbb::flow::input_port<1>(middle)); - mp_ptr = static_cast< typename out_queue_t::predecessor_type *>(&middle); - } - - virtual ~test_join_base_extract() {} - - void run_tests() { - REMARK("full graph\n"); - make_and_validate_full_graph(); - - in0.extract(); - out0.extract(); - REMARK("partial graph\n"); - validate_partial_graph(); - - in1.extract(); - in2.extract(); - out1.extract(); - REMARK("empty graph\n"); - validate_empty_graph(); - - REMARK("full graph\n"); - make_and_validate_full_graph(); - - middle.extract(); - REMARK("empty graph\n"); - validate_empty_graph(); - - REMARK("full graph\n"); - make_and_validate_full_graph(); - - in0.extract(); - in1.extract(); - in2.extract(); - middle.extract(); - REMARK("empty graph\n"); - validate_empty_graph(); - - REMARK("full graph\n"); - make_and_validate_full_graph(); - - out0.extract(); - out1.extract(); - middle.extract(); - REMARK("empty graph\n"); - validate_empty_graph(); - - REMARK("full graph\n"); - make_and_validate_full_graph(); - } -}; - -template< typename T, typename NODE_TYPE > -class test_join_extract : public test_join_base_extract< T, NODE_TYPE > { -protected: - typedef typename NODE_TYPE::output_type tuple_t; - typedef tbb::flow::queue_node<T> in_queue_t; - typedef tbb::flow::queue_node<tuple_t> out_queue_t; - - tbb::flow::graph my_g; - in_queue_t my_in0; - in_queue_t my_in1; - in_queue_t my_in2; - NODE_TYPE my_middle; - out_queue_t my_out0; - out_queue_t my_out1; - -public: - test_join_extract() : test_join_base_extract<T, NODE_TYPE>( my_g, my_in0, my_in1, my_in2, my_middle, my_out0, my_out1 ), - my_in0(my_g), my_in1(my_g), my_in2(my_g), my_middle(my_g), my_out0(my_g), my_out1(my_g) { } -}; - -template< typename T > -class test_join_extract<T, tbb::flow::join_node< tbb::flow::tuple<T,T>, tbb::flow::tag_matching> > : - public test_join_base_extract< T, tbb::flow::join_node< tbb::flow::tuple<T,T>, tbb::flow::tag_matching> > { -protected: - typedef tbb::flow::join_node< tbb::flow::tuple<T,T>, tbb::flow::tag_matching> my_node_t; - - typedef typename my_node_t::output_type tuple_t; - typedef tbb::flow::queue_node<T> in_queue_t; - typedef tbb::flow::queue_node<tuple_t> out_queue_t; - - tbb::flow::graph my_g; - in_queue_t my_in0; - in_queue_t my_in1; - in_queue_t my_in2; - my_node_t my_middle; - out_queue_t my_out0; - out_queue_t my_out1; - struct tag_match_0 { size_t operator()(T v) { return v; } }; - struct tag_match_1 { size_t operator()(T v) { return v/4; } }; -public: - test_join_extract() : test_join_base_extract<T, my_node_t>( my_g, my_in0, my_in1, my_in2, my_middle, my_out0, my_out1 ), - my_in0(my_g), my_in1(my_g), my_in2(my_g), my_middle(my_g, tag_match_0(), tag_match_1()), my_out0(my_g), my_out1(my_g) { } -}; -#endif - -struct threebyte { - unsigned char b1; - unsigned char b2; - unsigned char b3; - threebyte(int i=0) { b1 = (unsigned char)i; } - threebyte(const threebyte &other) : b1(other.b1), b2(other.b2), b3(other.b3) { } - operator int() { return (int)b1; } -}; - -const int Count = 150; -const int Recirc_count = 1000; // number of tuples to be generated -const int MaxPorts = 10; -const int MaxNSources = 5; // max # of source_nodes to register for each join_node input in parallel test -bool outputCheck[MaxPorts][Count]; // for checking output - -using tbb::flow::NO_TAG; - -void -check_outputCheck( int nUsed, int maxCnt) { - for(int i=0; i < nUsed; ++i) { - for( int j = 0; j < maxCnt; ++j) { - ASSERT(outputCheck[i][j], NULL); - } - } -} - -void -reset_outputCheck( int nUsed, int maxCnt) { - for(int i=0; i < nUsed; ++i) { - for( int j = 0; j < maxCnt; ++j) { - outputCheck[i][j] = false; - } - } -} - -template<typename T> -class name_of { -public: - static const char* name() { return "Unknown"; } -}; -template<typename T> -class name_of<check_type<T> > { -public: - static const char* name() { return "checktype"; } -}; -template<> -class name_of<int> { -public: - static const char* name() { return "int"; } -}; -template<> -class name_of<float> { -public: - static const char* name() { return "float"; } -}; -template<> -class name_of<double> { -public: - static const char* name() { return "double"; } -}; -template<> -class name_of<long> { -public: - static const char* name() { return "long"; } -}; -template<> -class name_of<short> { -public: - static const char* name() { return "short"; } -}; -template<> -class name_of<threebyte> { -public: - static const char* name() {return "threebyte"; } -}; - -// for recirculating tags, input is tuple<index,continue_msg> -// output is index*my_mult cast to the right type -template<typename TT> -class recirc_func_body { - TT my_mult; -public: - typedef tbb::flow::tuple<int, tbb::flow::continue_msg> input_type; - recirc_func_body(TT multiplier ) : my_mult(multiplier) {} - recirc_func_body(const recirc_func_body &other) : my_mult(other.my_mult) { } - void operator=( const recirc_func_body &other) { my_mult = other.my_mult; } - TT operator()(const input_type &v) { - return TT(tbb::flow::get<0>(v)) * my_mult; - } -}; - -static int input_count; // source_nodes are serial -static tbb::atomic<int> output_count; - -// emit input_count continue_msg -class recirc_source_node_body { -public: - bool operator()(tbb::flow::continue_msg &v ) { - --input_count; - v = tbb::flow::continue_msg(); - return 0 <= input_count; - } -}; - -// T must be arithmetic, and shouldn't wrap around for reasonable sizes of Count (which is now 150, and maxPorts is 10, -// so the max number generated right now is 1500 or so.) Source will generate a series of TT with value -// (init_val + (i-1)*addend) * my_mult, where i is the i-th invocation of the body. We are attaching addend -// source nodes to a join_port, and each will generate part of the numerical series the port is expecting -// to receive. If there is only one source node, the series order will be maintained; if more than one, -// this is not guaranteed. -template<typename TT> -class source_body { - TT my_mult; - int my_count; - int addend; -public: - source_body(TT multiplier, int init_val, int addto) : my_mult(multiplier), my_count(init_val), addend(addto) { } - void operator=( const source_body& other) {my_mult=other.my_mult; my_count=other.my_count; addend=other.addend;} - bool operator()( TT &v) { - int lc = my_count; - v = my_mult * (TT)my_count; - my_count += addend; - return lc < Count; - } -}; - -template<typename TT> -class tag_func { - TT my_mult; -public: - tag_func(TT multiplier) : my_mult(multiplier) { } - void operator=( const tag_func& other){my_mult = other.my_mult;} - // operator() will return [0 .. Count) - tbb::flow::tag_value operator()( TT v) { - tbb::flow::tag_value t = tbb::flow::tag_value(v / my_mult); - return t; - } -}; - -// allocator for join_node. This is specialized for tag_matching joins because they require a variable number -// of tag_value methods passed to the constructor - -template<int N, typename JType, tbb::flow::graph_buffer_policy JP> -class makeJoin { -public: - static JType *create(tbb::flow::graph& g) { - JType *temp = new JType(g); - return temp; - } - static void destroy(JType *p) { delete p; } -}; - - -template<typename JType> -class makeJoin<2,JType,tbb::flow::tag_matching> { - typedef typename JType::output_type TType; - typedef typename tbb::flow::tuple_element<0, TType>::type T0; - typedef typename tbb::flow::tuple_element<1, TType>::type T1; -public: - static JType *create(tbb::flow::graph& g) { - JType *temp = new JType(g, - tag_func<T0>(T0(2)), - tag_func<T1>(T1(3)) - ); - return temp; - } - static void destroy(JType *p) { delete p; } -}; - -#if MAX_TUPLE_TEST_SIZE >= 3 -template<typename JType> -class makeJoin<3,JType,tbb::flow::tag_matching> { - typedef typename JType::output_type TType; - typedef typename tbb::flow::tuple_element<0, TType>::type T0; - typedef typename tbb::flow::tuple_element<1, TType>::type T1; - typedef typename tbb::flow::tuple_element<2, TType>::type T2; -public: - static JType *create(tbb::flow::graph& g) { - JType *temp = new JType(g, - tag_func<T0>(T0(2)), - tag_func<T1>(T1(3)), - tag_func<T2>(T2(4)) - ); - return temp; - } - static void destroy(JType *p) { delete p; } -}; -#endif -#if MAX_TUPLE_TEST_SIZE >= 4 -template<typename JType> -class makeJoin<4,JType,tbb::flow::tag_matching> { - typedef typename JType::output_type TType; - typedef typename tbb::flow::tuple_element<0, TType>::type T0; - typedef typename tbb::flow::tuple_element<1, TType>::type T1; - typedef typename tbb::flow::tuple_element<2, TType>::type T2; - typedef typename tbb::flow::tuple_element<3, TType>::type T3; -public: - static JType *create(tbb::flow::graph& g) { - JType *temp = new JType(g, - tag_func<T0>(T0(2)), - tag_func<T1>(T1(3)), - tag_func<T2>(T2(4)), - tag_func<T3>(T3(5)) - ); - return temp; - } - static void destroy(JType *p) { delete p; } -}; -#endif -#if MAX_TUPLE_TEST_SIZE >= 5 -template<typename JType> -class makeJoin<5,JType,tbb::flow::tag_matching> { - typedef typename JType::output_type TType; - typedef typename tbb::flow::tuple_element<0, TType>::type T0; - typedef typename tbb::flow::tuple_element<1, TType>::type T1; - typedef typename tbb::flow::tuple_element<2, TType>::type T2; - typedef typename tbb::flow::tuple_element<3, TType>::type T3; - typedef typename tbb::flow::tuple_element<4, TType>::type T4; -public: - static JType *create(tbb::flow::graph& g) { - JType *temp = new JType(g, - tag_func<T0>(T0(2)), - tag_func<T1>(T1(3)), - tag_func<T2>(T2(4)), - tag_func<T3>(T3(5)), - tag_func<T4>(T4(6)) - ); - return temp; - } - static void destroy(JType *p) { delete p; } -}; -#endif -#if MAX_TUPLE_TEST_SIZE >= 6 -template<typename JType> -class makeJoin<6,JType,tbb::flow::tag_matching> { - typedef typename JType::output_type TType; - typedef typename tbb::flow::tuple_element<0, TType>::type T0; - typedef typename tbb::flow::tuple_element<1, TType>::type T1; - typedef typename tbb::flow::tuple_element<2, TType>::type T2; - typedef typename tbb::flow::tuple_element<3, TType>::type T3; - typedef typename tbb::flow::tuple_element<4, TType>::type T4; - typedef typename tbb::flow::tuple_element<5, TType>::type T5; -public: - static JType *create(tbb::flow::graph& g) { - JType *temp = new JType(g, - tag_func<T0>(T0(2)), - tag_func<T1>(T1(3)), - tag_func<T2>(T2(4)), - tag_func<T3>(T3(5)), - tag_func<T4>(T4(6)), - tag_func<T5>(T5(7)) - ); - return temp; - } - static void destroy(JType *p) { delete p; } -}; -#endif - -#if MAX_TUPLE_TEST_SIZE >= 7 -template<typename JType> -class makeJoin<7,JType,tbb::flow::tag_matching> { - typedef typename JType::output_type TType; - typedef typename tbb::flow::tuple_element<0, TType>::type T0; - typedef typename tbb::flow::tuple_element<1, TType>::type T1; - typedef typename tbb::flow::tuple_element<2, TType>::type T2; - typedef typename tbb::flow::tuple_element<3, TType>::type T3; - typedef typename tbb::flow::tuple_element<4, TType>::type T4; - typedef typename tbb::flow::tuple_element<5, TType>::type T5; - typedef typename tbb::flow::tuple_element<6, TType>::type T6; -public: - static JType *create(tbb::flow::graph& g) { - JType *temp = new JType(g, - tag_func<T0>(T0(2)), - tag_func<T1>(T1(3)), - tag_func<T2>(T2(4)), - tag_func<T3>(T3(5)), - tag_func<T4>(T4(6)), - tag_func<T5>(T5(7)), - tag_func<T6>(T6(8)) - ); - return temp; - } - static void destroy(JType *p) { delete p; } -}; -#endif - -#if MAX_TUPLE_TEST_SIZE >= 8 -template<typename JType> -class makeJoin<8,JType,tbb::flow::tag_matching> { - typedef typename JType::output_type TType; - typedef typename tbb::flow::tuple_element<0, TType>::type T0; - typedef typename tbb::flow::tuple_element<1, TType>::type T1; - typedef typename tbb::flow::tuple_element<2, TType>::type T2; - typedef typename tbb::flow::tuple_element<3, TType>::type T3; - typedef typename tbb::flow::tuple_element<4, TType>::type T4; - typedef typename tbb::flow::tuple_element<5, TType>::type T5; - typedef typename tbb::flow::tuple_element<6, TType>::type T6; - typedef typename tbb::flow::tuple_element<7, TType>::type T7; -public: - static JType *create(tbb::flow::graph& g) { - JType *temp = new JType(g, - tag_func<T0>(T0(2)), - tag_func<T1>(T1(3)), - tag_func<T2>(T2(4)), - tag_func<T3>(T3(5)), - tag_func<T4>(T4(6)), - tag_func<T5>(T5(7)), - tag_func<T6>(T6(8)), - tag_func<T7>(T7(9)) - ); - return temp; - } - static void destroy(JType *p) { delete p; } -}; -#endif - -#if MAX_TUPLE_TEST_SIZE >= 9 -template<typename JType> -class makeJoin<9,JType,tbb::flow::tag_matching> { - typedef typename JType::output_type TType; - typedef typename tbb::flow::tuple_element<0, TType>::type T0; - typedef typename tbb::flow::tuple_element<1, TType>::type T1; - typedef typename tbb::flow::tuple_element<2, TType>::type T2; - typedef typename tbb::flow::tuple_element<3, TType>::type T3; - typedef typename tbb::flow::tuple_element<4, TType>::type T4; - typedef typename tbb::flow::tuple_element<5, TType>::type T5; - typedef typename tbb::flow::tuple_element<6, TType>::type T6; - typedef typename tbb::flow::tuple_element<7, TType>::type T7; - typedef typename tbb::flow::tuple_element<8, TType>::type T8; -public: - static JType *create(tbb::flow::graph& g) { - JType *temp = new JType(g, - tag_func<T0>(T0(2)), - tag_func<T1>(T1(3)), - tag_func<T2>(T2(4)), - tag_func<T3>(T3(5)), - tag_func<T4>(T4(6)), - tag_func<T5>(T5(7)), - tag_func<T6>(T6(8)), - tag_func<T7>(T7(9)), - tag_func<T8>(T8(10)) - ); - return temp; - } - static void destroy(JType *p) { delete p; } -}; -#endif - -#if MAX_TUPLE_TEST_SIZE >= 10 -template<typename JType> -class makeJoin<10,JType,tbb::flow::tag_matching> { - typedef typename JType::output_type TType; - typedef typename tbb::flow::tuple_element<0, TType>::type T0; - typedef typename tbb::flow::tuple_element<1, TType>::type T1; - typedef typename tbb::flow::tuple_element<2, TType>::type T2; - typedef typename tbb::flow::tuple_element<3, TType>::type T3; - typedef typename tbb::flow::tuple_element<4, TType>::type T4; - typedef typename tbb::flow::tuple_element<5, TType>::type T5; - typedef typename tbb::flow::tuple_element<6, TType>::type T6; - typedef typename tbb::flow::tuple_element<7, TType>::type T7; - typedef typename tbb::flow::tuple_element<8, TType>::type T8; - typedef typename tbb::flow::tuple_element<9, TType>::type T9; -public: - static JType *create(tbb::flow::graph& g) { - JType *temp = new JType(g, - tag_func<T0>(T0(2)), - tag_func<T1>(T1(3)), - tag_func<T2>(T2(4)), - tag_func<T3>(T3(5)), - tag_func<T4>(T4(6)), - tag_func<T5>(T5(7)), - tag_func<T6>(T6(8)), - tag_func<T7>(T7(9)), - tag_func<T8>(T8(10)), - tag_func<T9>(T9(11)) - ); - return temp; - } - static void destroy(JType *p) { delete p; } -}; -#endif - -// holder for source_node pointers for eventual deletion - -static void* all_source_nodes[MaxPorts][MaxNSources]; - -template<int ELEM, typename JNT> -class source_node_helper { -public: - typedef JNT join_node_type; - typedef tbb::flow::join_node<tbb::flow::tuple<int, tbb::flow::continue_msg>, tbb::flow::reserving> input_join_type; - typedef typename join_node_type::output_type TT; - typedef typename tbb::flow::tuple_element<ELEM-1,TT>::type IT; - typedef typename tbb::flow::source_node<IT> my_source_node_type; - typedef typename tbb::flow::function_node<tbb::flow::tuple<int,tbb::flow::continue_msg>, IT> my_recirc_function_type; - static void print_remark(const char * str) { - source_node_helper<ELEM-1,JNT>::print_remark(str); - REMARK(", %s", name_of<IT>::name()); - } - static void add_source_nodes(join_node_type &my_join, tbb::flow::graph &g, int nInputs) { - for(int i=0; i < nInputs; ++i) { - my_source_node_type *new_node = new my_source_node_type(g, source_body<IT>((IT)(ELEM+1), i, nInputs)); - tbb::flow::make_edge( *new_node, tbb::flow::input_port<ELEM-1>(my_join) ); - all_source_nodes[ELEM-1][i] = (void *)new_node; - } - // add the next source_node - source_node_helper<ELEM-1, JNT>::add_source_nodes(my_join, g, nInputs); - } - - static void add_recirc_func_nodes(join_node_type &my_join, input_join_type &my_input, tbb::flow::graph &g) { - my_recirc_function_type *new_node = new my_recirc_function_type(g, tbb::flow::unlimited, recirc_func_body<IT>((IT)(ELEM+1))); - tbb::flow::make_edge(*new_node, tbb::flow::input_port<ELEM-1>(my_join)); - tbb::flow::make_edge(my_input, *new_node); - all_source_nodes[ELEM-1][0] = (void *)new_node; - source_node_helper<ELEM-1, JNT>::add_recirc_func_nodes(my_join, my_input, g); - } - - static void only_check_value(const int i, const TT &v) { - ASSERT( tbb::flow::get<ELEM-1>(v) == (IT)(i*(ELEM+1)), NULL); - source_node_helper<ELEM-1,JNT>::only_check_value(i, v); - } - - static void check_value(int i, TT &v, bool is_serial) { - // the fetched value will match only if there is only one source_node. - ASSERT(!is_serial || tbb::flow::get<ELEM-1>(v) == (IT)(i*(ELEM+1)), NULL); - // tally the fetched value. - int ival = (int)tbb::flow::get<ELEM-1>(v); - ASSERT(!(ival%(ELEM+1)), NULL); - ival /= (ELEM+1); - ASSERT(!outputCheck[ELEM-1][ival], NULL); - outputCheck[ELEM-1][ival] = true; - source_node_helper<ELEM-1,JNT>::check_value(i, v, is_serial); - } - static void remove_source_nodes(join_node_type& my_join, int nInputs) { - for(int i=0; i< nInputs; ++i) { - my_source_node_type *dp = reinterpret_cast<my_source_node_type *>(all_source_nodes[ELEM-1][i]); - tbb::flow::remove_edge( *dp, tbb::flow::input_port<ELEM-1>(my_join) ); - delete dp; - } - source_node_helper<ELEM-1, JNT>::remove_source_nodes(my_join, nInputs); - } - - static void remove_recirc_func_nodes(join_node_type& my_join, input_join_type &my_input) { - my_recirc_function_type *fn = reinterpret_cast<my_recirc_function_type *>(all_source_nodes[ELEM-1][0]); - tbb::flow::remove_edge( *fn, tbb::flow::input_port<ELEM-1>(my_join) ); - tbb::flow::remove_edge( my_input, *fn); - delete fn; - source_node_helper<ELEM-1, JNT>::remove_recirc_func_nodes(my_join,my_input); - } -}; - -template<typename JNT> -class source_node_helper<1, JNT> { - typedef JNT join_node_type; - typedef tbb::flow::join_node<tbb::flow::tuple<int, tbb::flow::continue_msg>, tbb::flow::reserving> input_join_type; - typedef typename join_node_type::output_type TT; - typedef typename tbb::flow::tuple_element<0,TT>::type IT; - typedef typename tbb::flow::source_node<IT> my_source_node_type; - typedef typename tbb::flow::function_node<tbb::flow::tuple<int,tbb::flow::continue_msg>, IT> my_recirc_function_type; -public: - static void print_remark(const char * str) { - REMARK("%s< %s", str, name_of<IT>::name()); - } - static void add_source_nodes(join_node_type &my_join, tbb::flow::graph &g, int nInputs) { - for(int i=0; i < nInputs; ++i) { - my_source_node_type *new_node = new my_source_node_type(g, source_body<IT>((IT)2, i, nInputs)); - tbb::flow::make_edge( *new_node, tbb::flow::input_port<0>(my_join) ); - all_source_nodes[0][i] = (void *)new_node; - } - } - - static void add_recirc_func_nodes(join_node_type &my_join, input_join_type &my_input, tbb::flow::graph &g) { - my_recirc_function_type *new_node = new my_recirc_function_type(g, tbb::flow::unlimited, recirc_func_body<IT>((IT)(2))); - tbb::flow::make_edge(*new_node, tbb::flow::input_port<0>(my_join)); - tbb::flow::make_edge(my_input, *new_node); - all_source_nodes[0][0] = (void *)new_node; - } - - static void only_check_value(const int i, const TT &v) { - ASSERT( tbb::flow::get<0>(v) == (IT)(i*2), NULL); - } - - static void check_value(int i, TT &v, bool is_serial) { - ASSERT(!is_serial || tbb::flow::get<0>(v) == (IT)(i*(2)), NULL); - int ival = (int)tbb::flow::get<0>(v); - ASSERT(!(ival%2), NULL); - ival /= 2; - ASSERT(!outputCheck[0][ival], NULL); - outputCheck[0][ival] = true; - } - static void remove_source_nodes(join_node_type& my_join, int nInputs) { - for(int i=0; i < nInputs; ++i) { - my_source_node_type *dp = reinterpret_cast<my_source_node_type *>(all_source_nodes[0][i]); - tbb::flow::remove_edge( *dp, tbb::flow::input_port<0>(my_join) ); - delete dp; - } - } - - static void remove_recirc_func_nodes(join_node_type& my_join, input_join_type &my_input) { - my_recirc_function_type *fn = reinterpret_cast<my_recirc_function_type *>(all_source_nodes[0][0]); - tbb::flow::remove_edge( *fn, tbb::flow::input_port<0>(my_join) ); - tbb::flow::remove_edge( my_input, *fn); - delete fn; - } -}; - -// get the tag from the output tuple and emit it. -// the first tuple component is tag * 2 cast to the type -template<typename OutputTupleType> -class recirc_output_func_body { -public: - // we only need this to use source_node_helper - typedef typename tbb::flow::join_node<OutputTupleType, tbb::flow::tag_matching> join_node_type; - static const int N = tbb::flow::tuple_size<OutputTupleType>::value; - int operator()(const OutputTupleType &v) { - int out = int(tbb::flow::get<0>(v)) / 2; - source_node_helper<N,join_node_type>::only_check_value(out,v); - ++output_count; - return out; - } -}; - -template<typename JType> -class tag_recirculation_test { -public: - typedef typename JType::output_type TType; - typedef typename tbb::flow::tuple<int, tbb::flow::continue_msg> input_tuple_type; - typedef tbb::flow::join_node<input_tuple_type,tbb::flow::reserving> input_join_type; - static const int N = tbb::flow::tuple_size<TType>::value; - static void test() { - source_node_helper<N,JType>::print_remark("Recirculation test of tag-matching join"); - REMARK(" >\n"); - for(int maxTag = 1; maxTag <10; maxTag *= 3) { - for(int i=0; i < N; ++i) all_source_nodes[i][0] = NULL; - - tbb::flow::graph g; - // this is the tag-matching join we're testing - JType * my_join = makeJoin<N,JType, tbb::flow::tag_matching>::create(g); - // source_node for continue messages - tbb::flow::source_node<tbb::flow::continue_msg> snode(g, recirc_source_node_body(), false); - // reserving join that matches recirculating tags with continue messages. - input_join_type * my_input_join = makeJoin<2,input_join_type,tbb::flow::reserving>::create(g); - // tbb::flow::make_edge(snode, tbb::flow::input_port<1>(*my_input_join)); - tbb::flow::make_edge(snode, tbb::flow::get<1>(my_input_join->input_ports())); - // queue to hold the tags - tbb::flow::queue_node<int> tag_queue(g); - tbb::flow::make_edge(tag_queue, tbb::flow::input_port<0>(*my_input_join)); - // add all the function_nodes that are inputs to the tag-matching join - source_node_helper<N,JType>::add_recirc_func_nodes(*my_join, *my_input_join, g); - // add the function_node that accepts the output of the join and emits the int tag it was based on - tbb::flow::function_node<TType, int> recreate_tag(g, tbb::flow::unlimited, recirc_output_func_body<TType>()); - tbb::flow::make_edge(*my_join, recreate_tag); - // now the recirculating part (output back to the queue) - tbb::flow::make_edge(recreate_tag, tag_queue); - - // put the tags into the queue - for(int t = 1; t <= maxTag; ++t) tag_queue.try_put(t); - - input_count = Recirc_count; - output_count = 0; - - // start up the source node to get things going - snode.activate(); - - // wait for everything to stop - g.wait_for_all(); - - ASSERT(output_count == Recirc_count, "not all instances were received"); - - int j; - // grab the tags from the queue, record them - std::vector<bool> out_tally(maxTag, false); - for(int i = 0; i < maxTag; ++i) { - ASSERT(tag_queue.try_get(j), "not enough tags in queue"); - ASSERT(!out_tally.at(j-1), "duplicate tag from queue"); - out_tally[j-1] = true; - } - ASSERT(!tag_queue.try_get(j), "Extra tags in recirculation queue"); - - // deconstruct graph - source_node_helper<N, JType>::remove_recirc_func_nodes(*my_join, *my_input_join); - tbb::flow::remove_edge(*my_join, recreate_tag); - makeJoin<N,JType,tbb::flow::tag_matching>::destroy(my_join); - tbb::flow::remove_edge(tag_queue, tbb::flow::input_port<0>(*my_input_join)); - tbb::flow::remove_edge(snode, tbb::flow::input_port<1>(*my_input_join)); - makeJoin<2,input_join_type,tbb::flow::reserving>::destroy(my_input_join); - } - } -}; - -template<typename JType, tbb::flow::graph_buffer_policy JP> -class parallel_test { -public: - typedef typename JType::output_type TType; - static const int TUPLE_SIZE = tbb::flow::tuple_size<TType>::value; - static const tbb::flow::graph_buffer_policy jp = JP; - static void test() { - TType v; - source_node_helper<TUPLE_SIZE,JType>::print_remark("Parallel test of join_node"); - REMARK(" >\n"); - for(int i=0; i < MaxPorts; ++i) { - for(int j=0; j < MaxNSources; ++j) { - all_source_nodes[i][j] = NULL; - } - } - for(int nInputs = 1; nInputs <= MaxNSources; ++nInputs) { - tbb::flow::graph g; - // JType my_join(g); - bool not_out_of_order = (nInputs == 1) && (jp != tbb::flow::tag_matching); - JType* my_join = makeJoin<TUPLE_SIZE,JType,JP>::create(g); - tbb::flow::queue_node<TType> outq1(g); - tbb::flow::queue_node<TType> outq2(g); - - tbb::flow::make_edge( *my_join, outq1 ); - tbb::flow::make_edge( *my_join, outq2 ); - - source_node_helper<TUPLE_SIZE, JType>::add_source_nodes((*my_join), g, nInputs); - - g.wait_for_all(); - - reset_outputCheck(TUPLE_SIZE, Count); - for(int i=0; i < Count; ++i) { - ASSERT(outq1.try_get(v), NULL); - source_node_helper<TUPLE_SIZE, JType>::check_value(i, v, not_out_of_order); - } - - check_outputCheck(TUPLE_SIZE, Count); - reset_outputCheck(TUPLE_SIZE, Count); - - for(int i=0; i < Count; i++) { - ASSERT(outq2.try_get(v), NULL);; - source_node_helper<TUPLE_SIZE, JType>::check_value(i, v, not_out_of_order); - } - check_outputCheck(TUPLE_SIZE, Count); - - ASSERT(!outq1.try_get(v), NULL); - ASSERT(!outq2.try_get(v), NULL); - - source_node_helper<TUPLE_SIZE, JType>::remove_source_nodes((*my_join), nInputs); - tbb::flow::remove_edge( *my_join, outq1 ); - tbb::flow::remove_edge( *my_join, outq2 ); - makeJoin<TUPLE_SIZE,JType,JP>::destroy(my_join); - } - } -}; - - -template<int ELEM, typename JType> -class serial_queue_helper { -public: - typedef typename JType::output_type TT; - typedef typename tbb::flow::tuple_element<ELEM-1,TT>::type IT; - typedef typename tbb::flow::queue_node<IT> my_queue_node_type; - static void print_remark() { - serial_queue_helper<ELEM-1,JType>::print_remark(); - REMARK(", %s", name_of<IT>::name()); - } - static void add_queue_nodes(tbb::flow::graph &g, JType &my_join) { - serial_queue_helper<ELEM-1,JType>::add_queue_nodes(g, my_join); - my_queue_node_type *new_node = new my_queue_node_type(g); - tbb::flow::make_edge( *new_node, tbb::flow::get<ELEM-1>(my_join.input_ports()) ); - all_source_nodes[ELEM-1][0] = (void *)new_node; - } - static void fill_one_queue(int maxVal) { - // fill queue to "left" of me - my_queue_node_type *qptr = reinterpret_cast<my_queue_node_type *>(all_source_nodes[ELEM-1][0]); - serial_queue_helper<ELEM-1,JType>::fill_one_queue(maxVal); - for(int i = 0; i < maxVal; ++i) { - ASSERT(qptr->try_put((IT)(i*(ELEM+1))), NULL); - } - } - static void put_one_queue_val(int myVal) { - // put this val to my "left". - serial_queue_helper<ELEM-1,JType>::put_one_queue_val(myVal); - my_queue_node_type *qptr = reinterpret_cast<my_queue_node_type *>(all_source_nodes[ELEM-1][0]); - ASSERT(qptr->try_put((IT)(myVal*(ELEM+1))), NULL); - } - static void check_queue_value(int i, TT &v) { - serial_queue_helper<ELEM-1,JType>::check_queue_value(i, v); - ASSERT( tbb::flow::get<ELEM-1>(v) == (IT)(i * (ELEM+1)), NULL); - } - static void remove_queue_nodes(JType &my_join) { - my_queue_node_type *vptr = reinterpret_cast<my_queue_node_type *>(all_source_nodes[ELEM-1][0]); - tbb::flow::remove_edge( *vptr, tbb::flow::get<ELEM-1>(my_join.input_ports()) ); - serial_queue_helper<ELEM-1, JType>::remove_queue_nodes(my_join); - delete vptr; - } -}; - -template<typename JType> -class serial_queue_helper<1, JType> { -public: - typedef typename JType::output_type TT; - typedef typename tbb::flow::tuple_element<0,TT>::type IT; - typedef typename tbb::flow::queue_node<IT> my_queue_node_type; - static void print_remark() { - REMARK("Serial test of join_node< %s", name_of<IT>::name()); - } - static void add_queue_nodes(tbb::flow::graph &g, JType &my_join) { - my_queue_node_type *new_node = new my_queue_node_type(g); - tbb::flow::make_edge( *new_node, tbb::flow::input_port<0>(my_join) ); - all_source_nodes[0][0] = (void *)new_node; - } - static void fill_one_queue(int maxVal) { - my_queue_node_type *qptr = reinterpret_cast<my_queue_node_type *>(all_source_nodes[0][0]); - for(int i = 0; i < maxVal; ++i) { - ASSERT(qptr->try_put((IT)(i*2)), NULL); - } - } - static void put_one_queue_val(int myVal) { - my_queue_node_type *qptr = reinterpret_cast<my_queue_node_type *>(all_source_nodes[0][0]); - ASSERT(qptr->try_put((IT)(myVal*2)), NULL); - } - static void check_queue_value(int i, TT &v) { - ASSERT( tbb::flow::get<0>(v) == (IT)(i*2), NULL); - } - static void remove_queue_nodes(JType &my_join) { - my_queue_node_type *vptr = reinterpret_cast<my_queue_node_type *>(all_source_nodes[0][0]); - tbb::flow::remove_edge( *vptr, tbb::flow::get<0>(my_join.input_ports()) ); - delete vptr; - } -}; - -// -// Single reservable predecessor at each port, single accepting successor -// * put to buffer before port0, then put to buffer before port1, ... -// * fill buffer before port0 then fill buffer before port1, ... - -template<typename JType, tbb::flow::graph_buffer_policy JP> -void test_one_serial( JType &my_join, tbb::flow::graph &g) { - typedef typename JType::output_type TType; - static const int TUPLE_SIZE = tbb::flow::tuple_size<TType>::value; - std::vector<bool> flags; - serial_queue_helper<TUPLE_SIZE, JType>::add_queue_nodes(g,my_join); - typedef TType q3_input_type; - tbb::flow::queue_node< q3_input_type > q3(g); - - tbb::flow::make_edge( my_join, q3 ); - - // fill each queue with its value one-at-a-time - flags.clear(); - for (int i = 0; i < Count; ++i ) { - serial_queue_helper<TUPLE_SIZE,JType>::put_one_queue_val(i); - flags.push_back(false); - } - - g.wait_for_all(); - tbb::flow::graph_buffer_policy jp = JP; - for (int i = 0; i < Count; ++i ) { - q3_input_type v; - g.wait_for_all(); - ASSERT(q3.try_get( v ), "Error in try_get()"); - if(jp == tbb::flow::tag_matching) { - // because we look up tags in the hash table, the output may be out of order. - int j = int(tbb::flow::get<0>(v)) / 2; // figure what the index should be - serial_queue_helper<TUPLE_SIZE,JType>::check_queue_value(j, v); - flags[j] = true; - } - else { - serial_queue_helper<TUPLE_SIZE,JType>::check_queue_value(i, v); - } - } - - if(jp == tbb::flow::tag_matching) { - for(int i = 0; i < Count; ++i) { - ASSERT(flags[i], NULL); - flags[i] = false; - } - } - - // fill each queue completely before filling the next. - serial_queue_helper<TUPLE_SIZE, JType>::fill_one_queue(Count); - - g.wait_for_all(); - for (int i = 0; i < Count; ++i ) { - q3_input_type v; - g.wait_for_all(); - ASSERT(q3.try_get( v ), "Error in try_get()"); - if(jp == tbb::flow::tag_matching) { - int j = int(tbb::flow::get<0>(v)) / 2; - serial_queue_helper<TUPLE_SIZE,JType>::check_queue_value(j, v); - flags[i] = true; - } - else { - serial_queue_helper<TUPLE_SIZE,JType>::check_queue_value(i, v); - } - } - - if(jp == tbb::flow::tag_matching) { - for(int i = 0; i < Count; ++i) { - ASSERT(flags[i], NULL); - } - } - - serial_queue_helper<TUPLE_SIZE, JType>::remove_queue_nodes(my_join); - -} - -template<typename JType, tbb::flow::graph_buffer_policy JP> -class serial_test { - typedef typename JType::output_type TType; - static const int TUPLE_SIZE = tbb::flow::tuple_size<TType>::value; - static const int ELEMS = 3; -public: -static void test() { - tbb::flow::graph g; - std::vector<bool> flags; - flags.reserve(Count); - JType* my_join = makeJoin<TUPLE_SIZE,JType,JP>::create(g); - serial_queue_helper<TUPLE_SIZE, JType>::print_remark(); REMARK(" >\n"); - - test_one_serial<JType,JP>( *my_join, g); - // build the vector with copy construction from the used join node. - std::vector<JType>join_vector(ELEMS, *my_join); - // destroy the tired old join_node in case we're accidentally reusing pieces of it. - makeJoin<TUPLE_SIZE,JType,JP>::destroy(my_join); - - - for(int e = 0; e < ELEMS; ++e) { // exercise each of the vector elements - test_one_serial<JType,JP>( join_vector[e], g); - } -} - -}; // serial_test - -template< - template<typename, tbb::flow::graph_buffer_policy> class TestType, // serial_test or parallel_test - typename OutputTupleType, // type of the output of the join - tbb::flow::graph_buffer_policy J> // graph_buffer_policy (reserving, queueing or tag_matching) -class generate_test { -public: - typedef tbb::flow::join_node<OutputTupleType,J> join_node_type; - static void do_test() { - TestType<join_node_type,J>::test(); - } -}; - -template<typename JType> -class generate_recirc_test { -public: - typedef tbb::flow::join_node<JType, tbb::flow::tag_matching> join_node_type; - static void do_test() { - tag_recirculation_test<join_node_type>::test(); - } -}; - -template<tbb::flow::graph_buffer_policy JP> -void test_input_port_policies(); - -// join_node (reserving) does not consume inputs until an item is available at -// every input. It tries to reserve each input, and if any fails it releases the -// reservation. When it builds a tuple it broadcasts to all its successors and -// consumes all the inputs. -// -// So our test will put an item at one input port, then attach another node to the -// same node (a queue node in this case). The second successor should receive the -// item in the queue, emptying it. -// -// We then place an item in the second input queue, and check the output queues; they -// should still be empty. Then we place an item in the first queue; the output queues -// should then receive a tuple. -// -// we then attach another function node to the second input. It should not receive -// an item, verifying that the item in the queue is consumed. -template<> -void test_input_port_policies<tbb::flow::reserving>() { - tbb::flow::graph g; - typedef tbb::flow::join_node<tbb::flow::tuple<int, int>, tbb::flow::reserving > JType; // two-phase is the default policy - // create join_node<type0,type1> jn - JType jn(g); - // create output_queue oq0, oq1 - typedef JType::output_type OQType; - tbb::flow::queue_node<OQType> oq0(g); - tbb::flow::queue_node<OQType> oq1(g); - // create iq0, iq1 - typedef tbb::flow::queue_node<int> IQType; - IQType iq0(g); - IQType iq1(g); - // create qnp, qnq - IQType qnp(g); - IQType qnq(g); - REMARK("Testing policies of join_node<reserving> input ports\n"); - // attach jn to oq0, oq1 - tbb::flow::make_edge( jn, oq0 ); - tbb::flow::make_edge( jn, oq1 ); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - ASSERT(jn.successor_count() == 2, NULL); - JType::successor_vector_type my_succs; - jn.copy_successors(my_succs); - ASSERT(my_succs.size() == 2, NULL); -#endif - // attach iq0, iq1 to jn - tbb::flow::make_edge( iq0, tbb::flow::get<0>(jn.input_ports()) ); - tbb::flow::make_edge( iq1, tbb::flow::get<1>(jn.input_ports()) ); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - ASSERT(tbb::flow::get<0>(jn.input_ports()).predecessor_count() == 1, NULL); - std::vector<tbb::flow::sender<int> *> my_0preds; - tbb::flow::input_port<0>(jn).copy_predecessors(my_0preds); - ASSERT(my_0preds.size() == 1, NULL); -#endif - for(int loop = 0; loop < 3; ++loop) { - // place one item in iq0 - ASSERT(iq0.try_put(1), "Error putting to iq1"); - // attach iq0 to qnp - tbb::flow::make_edge( iq0, qnp ); - // qnp should have an item in it. - g.wait_for_all(); - { - int i; - ASSERT(qnp.try_get(i) && i == 1, "Error in item fetched by qnp"); - } - // place item in iq1 - ASSERT(iq1.try_put(2), "Error putting to iq1"); - // oq0, oq1 should be empty - g.wait_for_all(); - { - OQType t1; - ASSERT(!oq0.try_get(t1) && !oq1.try_get(t1), "oq0 and oq1 not empty"); - } - // detach qnp from iq0 - tbb::flow::remove_edge( iq0, qnp); // if we don't remove qnp it will gobble any values we put in iq0 - // place item in iq0 - ASSERT(iq0.try_put(3), "Error on second put to iq0"); - // oq0, oq1 should have items in them - g.wait_for_all(); - { - OQType t0; - OQType t1; - ASSERT(oq0.try_get(t0) && tbb::flow::get<0>(t0) == 3 && tbb::flow::get<1>(t0) == 2, "Error in oq0 output"); - ASSERT(oq1.try_get(t1) && tbb::flow::get<0>(t1) == 3 && tbb::flow::get<1>(t1) == 2, "Error in oq1 output"); - } - // attach qnp to iq0, qnq to iq1 - // qnp and qnq should be empty - tbb::flow::make_edge( iq0, qnp ); - tbb::flow::make_edge( iq1, qnq ); - g.wait_for_all(); - { - int i; - ASSERT(!qnp.try_get(i), "iq0 still had value in it"); - ASSERT(!qnq.try_get(i), "iq1 still had value in it"); - } - tbb::flow::remove_edge( iq0, qnp ); - tbb::flow::remove_edge( iq1, qnq ); - } // for ( int loop ... -} - -// join_node (queueing) consumes inputs as soon as they are available at -// any input. When it builds a tuple it broadcasts to all its successors and -// discards the broadcast values. -// -// So our test will put an item at one input port, then attach another node to the -// same node (a queue node in this case). The second successor should not receive -// an item (because the join consumed it). -// -// We then place an item in the second input queue, and check the output queues; they -// should each have a tuple. -// -// we then attach another function node to the second input. It should not receive -// an item, verifying that the item in the queue is consumed. -template<> -void test_input_port_policies<tbb::flow::queueing>() { - tbb::flow::graph g; - typedef tbb::flow::join_node<tbb::flow::tuple<int, int>, tbb::flow::queueing > JType; - // create join_node<type0,type1> jn - JType jn(g); - // create output_queue oq0, oq1 - typedef JType::output_type OQType; - tbb::flow::queue_node<OQType> oq0(g); - tbb::flow::queue_node<OQType> oq1(g); - // create iq0, iq1 - typedef tbb::flow::queue_node<int> IQType; - IQType iq0(g); - IQType iq1(g); - // create qnp, qnq - IQType qnp(g); - IQType qnq(g); - REMARK("Testing policies of join_node<queueing> input ports\n"); - // attach jn to oq0, oq1 - tbb::flow::make_edge( jn, oq0 ); - tbb::flow::make_edge( jn, oq1 ); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - ASSERT(jn.successor_count() == 2, NULL); - JType::successor_vector_type my_succs; - jn.copy_successors(my_succs); - ASSERT(my_succs.size() == 2, NULL); -#endif - // attach iq0, iq1 to jn - tbb::flow::make_edge( iq0, tbb::flow::get<0>(jn.input_ports()) ); - tbb::flow::make_edge( iq1, tbb::flow::get<1>(jn.input_ports()) ); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - ASSERT(tbb::flow::get<0>(jn.input_ports()).predecessor_count() == 1, NULL); - std::vector<tbb::flow::sender<int> *> my_0preds; - tbb::flow::input_port<0>(jn).copy_predecessors(my_0preds); - ASSERT(my_0preds.size() == 1, NULL); -#endif - for(int loop = 0; loop < 3; ++loop) { - // place one item in iq0 - ASSERT(iq0.try_put(1), "Error putting to iq1"); - // attach iq0 to qnp - tbb::flow::make_edge( iq0, qnp ); - // qnp should have an item in it. - g.wait_for_all(); - { - int i; - ASSERT(!qnp.try_get(i), "Item was received by qnp"); - } - // place item in iq1 - ASSERT(iq1.try_put(2), "Error putting to iq1"); - // oq0, oq1 should have items - g.wait_for_all(); - { - OQType t0; - OQType t1; - ASSERT(oq0.try_get(t0) && tbb::flow::get<0>(t0) == 1 && tbb::flow::get<1>(t0) == 2, "Error in oq0 output"); - ASSERT(oq1.try_get(t1) && tbb::flow::get<0>(t1) == 1 && tbb::flow::get<1>(t1) == 2, "Error in oq1 output"); - } - // attach qnq to iq1 - // qnp and qnq should be empty - tbb::flow::make_edge( iq1, qnq ); - g.wait_for_all(); - { - int i; - ASSERT(!qnp.try_get(i), "iq0 still had value in it"); - ASSERT(!qnq.try_get(i), "iq1 still had value in it"); - } - tbb::flow::remove_edge( iq0, qnp ); - tbb::flow::remove_edge( iq1, qnq ); - } // for ( int loop ... -} - -template<typename T> -struct myTagValue { - tbb::flow::tag_value operator()(T i) { return tbb::flow::tag_value(i); } -}; - -template<> -struct myTagValue<check_type<int> > { - tbb::flow::tag_value operator()(check_type<int> i) { return tbb::flow::tag_value((int)i); } -}; - -// join_node (tag_matching) consumes inputs as soon as they are available at -// any input. When it builds a tuple it broadcasts to all its successors and -// discards the broadcast values. -// -// It chooses the tuple it broadcasts by matching the tag values returned by the -// methods given the constructor of the join, in this case the method just casts -// the value in each port to tag_value. -// -// So our test will put an item at one input port, then attach another node to the -// same node (a queue node in this case). The second successor should not receive -// an item (because the join consumed it). -// -// We then place an item in the second input queue, and check the output queues; they -// should each have a tuple. -// -// we then attach another queue node to the second input. It should not receive -// an item, verifying that the item in the queue is consumed. -// -// We will then exercise the join with a bunch of values, and the output order should -// be determined by the order we insert items into the second queue. (Each tuple set -// corresponding to a tag will be complete when the second item is inserted.) -template<> -void test_input_port_policies<tbb::flow::tag_matching>() { - tbb::flow::graph g; - typedef tbb::flow::join_node<tbb::flow::tuple<int, check_type<int> >, tbb::flow::tag_matching > JoinNodeType; - typedef JoinNodeType::output_type CheckTupleType; - JoinNodeType testJoinNode(g, myTagValue<int>(), myTagValue<check_type<int> >()); - tbb::flow::queue_node<CheckTupleType> checkTupleQueue0(g); - tbb::flow::queue_node<CheckTupleType> checkTupleQueue1(g); - { - Check<check_type<int> > my_check; - - - typedef tbb::flow::queue_node<int> IntQueueType; - typedef tbb::flow::queue_node<check_type<int> > CheckQueueType; - IntQueueType intInputQueue(g); - CheckQueueType checkInputQueue(g); - IntQueueType intEmptyTestQueue(g); - CheckQueueType checkEmptyTestQueue(g); - REMARK("Testing policies of join_node<tag_matching> input ports\n"); - // attach testJoinNode to checkTupleQueue0, checkTupleQueue1 - tbb::flow::make_edge( testJoinNode, checkTupleQueue0 ); - tbb::flow::make_edge( testJoinNode, checkTupleQueue1 ); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - ASSERT(testJoinNode.successor_count() == 2, NULL); - JoinNodeType::successor_vector_type my_succs; - testJoinNode.copy_successors(my_succs); - ASSERT(my_succs.size() == 2, NULL); -#endif - // attach intInputQueue, checkInputQueue to testJoinNode - tbb::flow::make_edge( intInputQueue, tbb::flow::input_port<0>(testJoinNode) ); - tbb::flow::make_edge( checkInputQueue, tbb::flow::input_port<1>(testJoinNode) ); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - ASSERT(tbb::flow::input_port<0>(testJoinNode).predecessor_count() == 1, NULL); - std::vector<tbb::flow::sender<int> *> my_0preds; - tbb::flow::input_port<0>(testJoinNode).copy_predecessors(my_0preds); - ASSERT(my_0preds.size() == 1, NULL); -#endif - - // we'll put four discrete values in the inputs to the join_node. Each - // set of inputs should result in one output. (NO_TAG is currently defined - // to be tag_value(-1), so zero is an allowed tag_value.) - for(int loop = 0; loop < 4; ++loop) { - // place one item in intInputQueue - ASSERT(intInputQueue.try_put(loop), "Error putting to intInputQueue"); - // attach intInputQueue to intEmptyTestQueue - tbb::flow::make_edge( intInputQueue, intEmptyTestQueue ); - // intEmptyTestQueue should not have an item in it. (the join consumed it.) - g.wait_for_all(); - { - int intVal0; - ASSERT(!intEmptyTestQueue.try_get(intVal0), "Item was received by intEmptyTestQueue"); - } - // place item in checkInputQueue - check_type<int> checkVal0(loop); - ASSERT(checkInputQueue.try_put(checkVal0), "Error putting to checkInputQueue"); - // checkTupleQueue0, checkTupleQueue1 should have items - g.wait_for_all(); - { - CheckTupleType t0; - CheckTupleType t1; - ASSERT(checkTupleQueue0.try_get(t0) && tbb::flow::get<0>(t0) == loop && (int)tbb::flow::get<1>(t0) == loop, "Error in checkTupleQueue0 output"); - ASSERT(checkTupleQueue1.try_get(t1) && tbb::flow::get<0>(t1) == loop && (int)tbb::flow::get<1>(t1) == loop, "Error in checkTupleQueue1 output"); - ASSERT(!checkTupleQueue0.try_get(t0), "extra object in output queue checkTupleQueue0"); - ASSERT(!checkTupleQueue1.try_get(t0), "extra object in output queue checkTupleQueue1"); - } - // attach checkEmptyTestQueue to checkInputQueue - // intEmptyTestQueue and checkEmptyTestQueue should be empty - tbb::flow::make_edge( checkInputQueue, checkEmptyTestQueue ); - g.wait_for_all(); - { - int intVal1; - check_type<int> checkVal1; - //REMARK("loop == %d point 4.7 count is %d %d\n", loop, my_check(), my_check(1) ); // +1 - ASSERT(!intEmptyTestQueue.try_get(intVal1), "intInputQueue still had value in it"); - ASSERT(!checkEmptyTestQueue.try_get(checkVal1), "checkInputQueue still had value in it"); - } - tbb::flow::remove_edge( intInputQueue, intEmptyTestQueue ); - tbb::flow::remove_edge( checkInputQueue, checkEmptyTestQueue ); - } // for ( int loop ... - - // Now we'll put [4 .. nValues - 1] in intInputQueue, and then put [4 .. nValues - 1] in checkInputQueue in - // a different order. We should see tuples in the output queues in the order we inserted - // the integers into checkInputQueue. - const int nValues = 100; - const int nIncr = 31; // relatively prime to nValues - - for(int loop = 4; loop < 4+nValues; ++loop) { - // place one item in intInputQueue - ASSERT(intInputQueue.try_put(loop), "Error putting to intInputQueue"); - g.wait_for_all(); - { - CheckTupleType t3; - ASSERT(!checkTupleQueue0.try_get(t3), "Object in output queue"); - ASSERT(!checkTupleQueue1.try_get(t3), "Object in output queue"); - } - } // for ( int loop ... - - for(int loop = 1; loop <= nValues; ++loop) { - int lp1 = 4 + (loop * nIncr)%nValues; - // place item in checkInputQueue - ASSERT(checkInputQueue.try_put(lp1), "Error putting to checkInputQueue"); - // checkTupleQueue0, checkTupleQueue1 should have items - g.wait_for_all(); - { - CheckTupleType t0; - CheckTupleType t1; - ASSERT(checkTupleQueue0.try_get(t0) && tbb::flow::get<0>(t0) == lp1 && tbb::flow::get<1>(t0) == lp1, "Error in checkTupleQueue0 output"); - ASSERT(checkTupleQueue1.try_get(t1) && tbb::flow::get<0>(t1) == lp1 && tbb::flow::get<1>(t1) == lp1, "Error in checkTupleQueue1 output"); - ASSERT(!checkTupleQueue0.try_get(t0), "extra object in output queue checkTupleQueue0"); - ASSERT(!checkTupleQueue1.try_get(t0), "extra object in output queue checkTupleQueue1"); - } - } // for ( int loop ... - } // Check -} - -int TestMain() { -#if __TBB_USE_TBB_TUPLE - REMARK(" Using TBB tuple\n"); -#else - REMARK(" Using platform tuple\n"); -#endif - - - test_input_port_policies<tbb::flow::reserving>(); - test_input_port_policies<tbb::flow::queueing>(); - test_input_port_policies<tbb::flow::tag_matching>(); - for (int p = 0; p < 2; ++p) { - REMARK("reserving\n"); - generate_test<serial_test, tbb::flow::tuple<threebyte, double>, tbb::flow::reserving >::do_test(); -#if MAX_TUPLE_TEST_SIZE >= 4 - { - Check<check_type<int> > my_check; - generate_test<serial_test, tbb::flow::tuple<float, double, check_type<int>, long>, tbb::flow::reserving >::do_test(); - } -#endif -#if MAX_TUPLE_TEST_SIZE >= 6 - generate_test<serial_test, tbb::flow::tuple<double, double, int, long, int, short>, tbb::flow::reserving >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 8 - generate_test<serial_test, tbb::flow::tuple<float, double, double, double, float, int, float, long>, tbb::flow::reserving >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 10 - generate_test<serial_test, tbb::flow::tuple<float, double, int, double, double, float, long, int, float, long>, tbb::flow::reserving >::do_test(); -#endif - { - Check<check_type<int> > my_check1; - generate_test<parallel_test, tbb::flow::tuple<float, check_type<int> >, tbb::flow::reserving >::do_test(); - } -#if MAX_TUPLE_TEST_SIZE >= 3 - generate_test<parallel_test, tbb::flow::tuple<float, int, long>, tbb::flow::reserving >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 5 - generate_test<parallel_test, tbb::flow::tuple<double, double, int, int, short>, tbb::flow::reserving >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 7 - generate_test<parallel_test, tbb::flow::tuple<float, int, double, float, long, float, long>, tbb::flow::reserving >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 9 - generate_test<parallel_test, tbb::flow::tuple<float, double, int, double, double, long, int, float, long>, tbb::flow::reserving >::do_test(); -#endif - REMARK("queueing\n"); - generate_test<serial_test, tbb::flow::tuple<float, double>, tbb::flow::queueing >::do_test(); -#if MAX_TUPLE_TEST_SIZE >= 4 - generate_test<serial_test, tbb::flow::tuple<float, double, int, long>, tbb::flow::queueing >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 6 - generate_test<serial_test, tbb::flow::tuple<double, double, int, long, int, short>, tbb::flow::queueing >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 8 - generate_test<serial_test, tbb::flow::tuple<float, double, double, double, float, int, float, long>, tbb::flow::queueing >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 10 - generate_test<serial_test, tbb::flow::tuple<float, double, int, double, double, float, long, int, float, long>, tbb::flow::queueing >::do_test(); -#endif - generate_test<parallel_test, tbb::flow::tuple<float, double>, tbb::flow::queueing >::do_test(); -#if MAX_TUPLE_TEST_SIZE >= 3 - generate_test<parallel_test, tbb::flow::tuple<float, int, long>, tbb::flow::queueing >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 5 - generate_test<parallel_test, tbb::flow::tuple<double, double, int, int, short>, tbb::flow::queueing >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 7 - generate_test<parallel_test, tbb::flow::tuple<float, int, double, float, long, float, long>, tbb::flow::queueing >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 9 - generate_test<parallel_test, tbb::flow::tuple<float, double, int, double, double, long, int, float, long>, tbb::flow::queueing >::do_test(); -#endif - REMARK("tag_matching\n"); - generate_test<serial_test, tbb::flow::tuple<float, double>, tbb::flow::tag_matching >::do_test(); -#if MAX_TUPLE_TEST_SIZE >= 4 - generate_test<serial_test, tbb::flow::tuple<float, double, int, long>, tbb::flow::tag_matching >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 6 - generate_test<serial_test, tbb::flow::tuple<double, double, int, long, int, short>, tbb::flow::tag_matching >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 8 - generate_test<serial_test, tbb::flow::tuple<float, double, double, double, float, int, float, long>, tbb::flow::tag_matching >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 10 - generate_test<serial_test, tbb::flow::tuple<float, double, int, double, double, float, long, int, float, long>, tbb::flow::tag_matching >::do_test(); -#endif - generate_test<parallel_test, tbb::flow::tuple<float, double>, tbb::flow::tag_matching >::do_test(); -#if MAX_TUPLE_TEST_SIZE >= 3 - generate_test<parallel_test, tbb::flow::tuple<float, int, long>, tbb::flow::tag_matching >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 5 - generate_test<parallel_test, tbb::flow::tuple<double, double, int, int, short>, tbb::flow::tag_matching >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 7 - generate_test<parallel_test, tbb::flow::tuple<float, int, double, float, long, float, long>, tbb::flow::tag_matching >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 9 - generate_test<parallel_test, tbb::flow::tuple<float, double, int, double, double, long, int, float, long>, tbb::flow::tag_matching >::do_test(); -#endif - generate_recirc_test<tbb::flow::tuple<float,double> >::do_test(); -#if MAX_TUPLE_TEST_SIZE >= 5 - generate_recirc_test<tbb::flow::tuple<double, double, int, int, short> >::do_test(); -#endif - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - REMARK("test queueing extract\n"); - test_join_extract<int, tbb::flow::join_node< tbb::flow::tuple<int,int>, tbb::flow::queueing> >().run_tests(); - REMARK("test tag_matching extract\n"); - test_join_extract<int, tbb::flow::join_node< tbb::flow::tuple<int,int>, tbb::flow::tag_matching> >().run_tests(); - REMARK("test reserving extract\n"); - test_join_extract<int, tbb::flow::join_node< tbb::flow::tuple<int,int>, tbb::flow::reserving> >().run_tests(); -#endif - return Harness::Done; -} diff --git a/src/tbb/src/test/test_lambda.cpp b/src/tbb/src/test/test_lambda.cpp deleted file mode 100644 index 1f2ed04f6..000000000 --- a/src/tbb/src/test/test_lambda.cpp +++ /dev/null @@ -1,249 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness_defs.h" -#if __TBB_TEST_SKIP_LAMBDA - -#include "harness.h" -int TestMain() { - REPORT("Known issue: lambdas are not properly supported on the platform \n"); - return Harness::Skipped; -} - -#else /*__TBB_TEST_SKIP_LAMBDA*/ - -#define NOMINMAX -#include "tbb/tbb.h" -#include "tbb/combinable.h" -#include <cstdio> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include <list> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -using namespace std; -using namespace tbb; - -typedef pair<int,int> max_element_t; - -void f(int val, int *arr, int start, int stop) { - for (int i=start; i<=stop; ++i) { - arr[i] = val; - } -} - -#include "harness.h" - -#if __TBB_TASK_GROUP_CONTEXT -int Fib(int n) { - if( n<2 ) { - return n; - } else { - int x=0, y=0; - task_group g; - g.run( [&]{x=Fib(n-1);} ); // spawn a task - g.run( [&]{y=Fib(n-2);} ); // spawn another task - g.wait(); // wait for both tasks to complete - return x+y; - } -} -#endif /* !__TBB_TASK_GROUP_CONTEXT */ - -#include "harness_report.h" -#include "harness_assert.h" - -int TestMain () { - const int N = 1000; - const int Grainsize = N/1000; - int a[N]; - int max_sum; - ASSERT( MinThread>=1, "Error: Number of threads must be positive.\n"); - - for(int p=MinThread; p<=MaxThread; ++p) { - task_scheduler_init init(p); - - REMARK("Running lambda expression tests on %d threads...\n", p); - - //test parallel_for - REMARK("Testing parallel_for... "); - parallel_for(blocked_range<int>(0,N,Grainsize), - [&] (blocked_range<int>& r) { - for (int i=r.begin(); i!=r.end(); ++i) a[i] = i; - }); - ASSERT(a[0]==0 && a[N-1]==N-1, "parallel_for w/lambdas failed.\n"); - REMARK("passed.\n"); - - //test parallel_reduce - REMARK("Testing parallel_reduce... "); - int sum = parallel_reduce(blocked_range<int>(0,N,Grainsize), int(0), - [&] (blocked_range<int>& r, int current_sum) -> int { - for (int i=r.begin(); i!=r.end(); ++i) - current_sum += a[i]*(1000-i); - return current_sum; - }, - [] (const int x1, const int x2) { - return x1+x2; - } ); - - max_element_t max_el = - parallel_reduce(blocked_range<int>(0,N,Grainsize), make_pair(a[0], 0), - [&] (blocked_range<int>& r, max_element_t current_max) - -> max_element_t { - for (int i=r.begin(); i!=r.end(); ++i) - if (a[i]>current_max.first) - current_max = make_pair(a[i], i); - return current_max; - }, - [] (const max_element_t x1, const max_element_t x2) { - return (x1.first>x2.first)?x1:x2; - }); - ASSERT(sum==166666500 && max_el.first==999 && max_el.second==999, - "parallel_reduce w/lambdas failed.\n"); - REMARK("passed.\n"); - - //test parallel_do - REMARK("Testing parallel_do... "); - list<int> s; - s.push_back(0); - - parallel_do(s.begin(), s.end(), - [&](int foo, parallel_do_feeder<int>& feeder) { - if (foo == 42) return; - else if (foo>42) { - s.push_back(foo-3); - feeder.add(foo-3); - } else { - s.push_back(foo+5); - feeder.add(foo+5); - } - }); - ASSERT(s.back()==42, "parallel_do w/lambda failed.\n"); - REMARK("passed.\n"); - - //test parallel_invoke - REMARK("Testing parallel_invoke... "); - parallel_invoke([&]{ f(2, a, 0, N/3); }, - [&]{ f(1, a, N/3+1, 2*(N/3)); }, - [&]{ f(0, a, 2*(N/3)+1, N-1); }); - ASSERT(a[0]==2.0 && a[N-1]==0.0, "parallel_invoke w/lambda failed.\n"); - REMARK("passed.\n"); - - //test tbb_thread - REMARK("Testing tbb_thread... "); - tbb_thread::id myId; - tbb_thread myThread([](int x, int y) { - ASSERT(x==42 && y==64, "tbb_thread w/lambda failed.\n"); - REMARK("passed.\n"); - }, 42, 64); - myThread.join(); - -#if __TBB_TASK_GROUP_CONTEXT - // test task_group - REMARK("Testing task_group... "); - int result; - result = Fib(32); - ASSERT(result==2178309, "task_group w/lambda failed.\n"); - REMARK("passed.\n"); -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - // Reset array a to index values - parallel_for(blocked_range<int>(0,N,Grainsize), - [&] (blocked_range<int>& r) { - for (int i=r.begin(); i!=r.end(); ++i) a[i] = i; - }); - // test parallel_sort - REMARK("Testing parallel_sort... "); - int pivot = 42; - - // sort nearest by increasing distance from pivot - parallel_sort(a, a+N, - [&](int x, int y) { return(abs(pivot-x) < abs(pivot-y)); }); - ASSERT(a[0]==42 && a[N-1]==N-1, "parallel_sort w/lambda failed.\n"); - REMARK("passed.\n"); - - //test combinable - REMARK("Testing combinable... "); - combinable<std::pair<int,int> > minmax_c([&]() { return std::make_pair(a[0], a[0]); } ); - - parallel_for(blocked_range<int>(0,N), - [&] (const blocked_range<int> &r) { - std::pair<int,int>& mmr = minmax_c.local(); - for(int i=r.begin(); i!=r.end(); ++i) { - if (mmr.first > a[i]) mmr.first = a[i]; - if (mmr.second < a[i]) mmr.second = a[i]; - } - }); - max_sum = 0; - minmax_c.combine_each([&max_sum](std::pair<int,int> x) { - int tsum = x.first + x.second; - if( tsum>max_sum ) max_sum = tsum; - }); - ASSERT( (N-1)<=max_sum && max_sum<=a[0]+N-1, "combinable::combine_each /w lambda failed." ); - - std::pair<int,int> minmax_result_c; - minmax_result_c = - minmax_c.combine([](std::pair<int,int> x, std::pair<int,int> y) { - return std::make_pair(x.first<y.first?x.first:y.first, - x.second>y.second?x.second:y.second); - }); - ASSERT(minmax_result_c.first==0 && minmax_result_c.second==999, - "combinable w/lambda failed.\n"); - REMARK("passed.\n"); - - //test enumerable_thread_specific - REMARK("Testing enumerable_thread_specific... "); - enumerable_thread_specific< std::pair<int,int> > minmax_ets([&]() { return std::make_pair(a[0], a[0]); } ); - - max_sum = 0; - parallel_for(blocked_range<int>(0,N), - [&] (const blocked_range<int> &r) { - std::pair<int,int>& mmr = minmax_ets.local(); - for(int i=r.begin(); i!=r.end(); ++i) { - if (mmr.first > a[i]) mmr.first = a[i]; - if (mmr.second < a[i]) mmr.second = a[i]; - } - }); - minmax_ets.combine_each([&max_sum](std::pair<int,int> x) { - int tsum = x.first + x.second; - if( tsum>max_sum ) max_sum = tsum; - }); - ASSERT( (N-1)<=max_sum && max_sum<=a[0]+N-1, "enumerable_thread_specific::combine_each /w lambda failed." ); - - std::pair<int,int> minmax_result_ets; - minmax_result_ets = - minmax_ets.combine([](std::pair<int,int> x, std::pair<int,int> y) { - return std::make_pair(x.first<y.first?x.first:y.first, - x.second>y.second?x.second:y.second); - }); - ASSERT(minmax_result_ets.first==0 && minmax_result_ets.second==999, - "enumerable_thread_specific w/lambda failed.\n"); - REMARK("passed.\n"); - } - return Harness::Done; -} -#endif /* __TBB_TEST_SKIP_LAMBDA */ diff --git a/src/tbb/src/test/test_limiter_node.cpp b/src/tbb/src/test/test_limiter_node.cpp deleted file mode 100644 index 1df332996..000000000 --- a/src/tbb/src/test/test_limiter_node.cpp +++ /dev/null @@ -1,521 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness.h" -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES -#include "harness_graph.h" -#endif -#include "tbb/flow_graph.h" -#include "tbb/atomic.h" -#include "tbb/task_scheduler_init.h" - -const int L = 10; -const int N = 1000; - -template< typename T > -struct serial_receiver : public tbb::flow::receiver<T> { - T next_value; - - serial_receiver() : next_value(T(0)) {} - - /* override */ tbb::task *try_put_task( const T &v ) { - ASSERT( next_value++ == v, NULL ); - return const_cast<tbb::task *>(tbb::flow::interface7::SUCCESSFULLY_ENQUEUED); - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - void internal_add_built_predecessor( tbb::flow::sender<T> & ) { } - void internal_delete_built_predecessor( tbb::flow::sender<T> & ) { } - void copy_predecessors( std::vector<tbb::flow::sender<T>*> & ) { } - size_t predecessor_count() { return 0; } -#endif - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void reset_receiver(tbb::flow::reset_flags /*f*/) {next_value = T(0);} -#else - /*override*/void reset_receiver() {next_value = T(0);} -#endif -}; - -template< typename T > -struct parallel_receiver : public tbb::flow::receiver<T> { - - tbb::atomic<int> my_count; - - parallel_receiver() { my_count = 0; } - - /* override */ tbb::task *try_put_task( const T &/*v*/ ) { - ++my_count; - return const_cast<tbb::task *>(tbb::flow::interface7::SUCCESSFULLY_ENQUEUED); - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - void internal_add_built_predecessor( tbb::flow::sender<T> & ) { } - void internal_delete_built_predecessor( tbb::flow::sender<T> & ) { } - void copy_predecessors( std::vector<tbb::flow::sender<T>*> & ) { } - size_t predecessor_count( ) { return 0; } - /*override*/void reset_receiver(tbb::flow::reset_flags /*f*/) {my_count = 0;} -#else - /*override*/void reset_receiver() {my_count = 0;} -#endif -}; - -template< typename T > -struct empty_sender : public tbb::flow::sender<T> { - /* override */ bool register_successor( tbb::flow::receiver<T> & ) { return false; } - /* override */ bool remove_successor( tbb::flow::receiver<T> & ) { return false; } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - void internal_add_built_successor( tbb::flow::receiver<T> & ) { } - void internal_delete_built_successor( tbb::flow::receiver<T> & ) { } - void copy_successors( std::vector<tbb::flow::receiver<T>*> & ) { } - size_t successor_count() { return 0; } -#endif - -}; - - -template< typename T > -struct put_body : NoAssign { - - tbb::flow::limiter_node<T> &my_lim; - tbb::atomic<int> &my_accept_count; - - put_body( tbb::flow::limiter_node<T> &lim, tbb::atomic<int> &accept_count ) : - my_lim(lim), my_accept_count(accept_count) {} - - void operator()( int ) const { - for ( int i = 0; i < L; ++i ) { - bool msg = my_lim.try_put( T(i) ); - if ( msg == true ) - ++my_accept_count; - } - } -}; - -template< typename T > -struct put_dec_body : NoAssign { - - tbb::flow::limiter_node<T> &my_lim; - tbb::atomic<int> &my_accept_count; - - put_dec_body( tbb::flow::limiter_node<T> &lim, tbb::atomic<int> &accept_count ) : - my_lim(lim), my_accept_count(accept_count) {} - - void operator()( int ) const { - int local_accept_count = 0; - while ( local_accept_count < N ) { - bool msg = my_lim.try_put( T(local_accept_count) ); - if ( msg == true ) { - ++local_accept_count; - ++my_accept_count; - my_lim.decrement.try_put( tbb::flow::continue_msg() ); - } - } - } - -}; - -template< typename T > -void test_puts_with_decrements( int num_threads, tbb::flow::limiter_node< T >& lim ) { - parallel_receiver<T> r; - empty_sender< tbb::flow::continue_msg > s; - tbb::atomic<int> accept_count; - accept_count = 0; - tbb::flow::make_edge( lim, r ); - tbb::flow::make_edge(s, lim.decrement); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - ASSERT(lim.decrement.predecessor_count() == 1, NULL); - ASSERT(lim.successor_count() == 1, NULL); - ASSERT(lim.predecessor_count() == 0, NULL); - std::vector<tbb::flow::sender<tbb::flow::continue_msg> *> dec_preds; - lim.decrement.copy_predecessors(dec_preds); - ASSERT(dec_preds.size() == 1, NULL); -#endif - // test puts with decrements - NativeParallelFor( num_threads, put_dec_body<T>(lim, accept_count) ); - int c = accept_count; - ASSERT( c == N*num_threads, NULL ); - ASSERT( r.my_count == N*num_threads, NULL ); -} - -// -// Tests -// -// limiter only forwards below the limit, multiple parallel senders / single receiver -// mutiple parallel senders that put to decrement at each accept, limiter accepts new messages -// -// -template< typename T > -int test_parallel(int num_threads) { - - // test puts with no decrements - for ( int i = 0; i < L; ++i ) { - tbb::flow::graph g; - tbb::flow::limiter_node< T > lim(g, i); - parallel_receiver<T> r; - tbb::atomic<int> accept_count; - accept_count = 0; - tbb::flow::make_edge( lim, r ); - // test puts with no decrements - NativeParallelFor( num_threads, put_body<T>(lim, accept_count) ); - g.wait_for_all(); - int c = accept_count; - ASSERT( c == i, NULL ); - } - - // test puts with decrements - for ( int i = 1; i < L; ++i ) { - tbb::flow::graph g; - tbb::flow::limiter_node< T > lim(g, i); - test_puts_with_decrements(num_threads, lim); - tbb::flow::limiter_node< T > lim_copy( lim ); - test_puts_with_decrements(num_threads, lim_copy); - } - - return 0; -} - -// -// Tests -// -// limiter only forwards below the limit, single sender / single receiver -// at reject, a put to decrement, will cause next message to be accepted -// -template< typename T > -int test_serial() { - - // test puts with no decrements - for ( int i = 0; i < L; ++i ) { - tbb::flow::graph g; - tbb::flow::limiter_node< T > lim(g, i); - serial_receiver<T> r; - tbb::flow::make_edge( lim, r ); - for ( int j = 0; j < L; ++j ) { - bool msg = lim.try_put( T(j) ); - ASSERT( ( j < i && msg == true ) || ( j >= i && msg == false ), NULL ); - } - g.wait_for_all(); - } - - // test puts with decrements - for ( int i = 1; i < L; ++i ) { - tbb::flow::graph g; - tbb::flow::limiter_node< T > lim(g, i); - serial_receiver<T> r; - empty_sender< tbb::flow::continue_msg > s; - tbb::flow::make_edge( lim, r ); - tbb::flow::make_edge(s, lim.decrement); - for ( int j = 0; j < N; ++j ) { - bool msg = lim.try_put( T(j) ); - ASSERT( ( j < i && msg == true ) || ( j >= i && msg == false ), NULL ); - if ( msg == false ) { - lim.decrement.try_put( tbb::flow::continue_msg() ); - msg = lim.try_put( T(j) ); - ASSERT( msg == true, NULL ); - } - } - } - return 0; -} - -// reported bug in limiter (http://software.intel.com/en-us/comment/1752355) -#define DECREMENT_OUTPUT 1 // the port number of the decrement output of the multifunction_node -#define LIMITER_OUTPUT 0 // port number of the integer output - -typedef tbb::flow::multifunction_node<int, tbb::flow::tuple<int,tbb::flow::continue_msg> > mfnode_type; - -tbb::atomic<size_t> emit_count; -tbb::atomic<size_t> emit_sum; -tbb::atomic<size_t> receive_count; -tbb::atomic<size_t> receive_sum; - -struct mfnode_body { - int max_cnt; - tbb::atomic<int>* my_cnt; - mfnode_body(const int& _max, tbb::atomic<int> &_my) : max_cnt(_max), my_cnt(&_my) { } - void operator()(const int &/*in*/, mfnode_type::output_ports_type &out) { - int lcnt = ++(*my_cnt); - if(lcnt > max_cnt) { - return; - } - // put one continue_msg to the decrement of the limiter. - if(!tbb::flow::get<DECREMENT_OUTPUT>(out).try_put(tbb::flow::continue_msg())) { - ASSERT(false,"Unexpected rejection of decrement"); - } - { - // put messages to the input of the limiter_node until it rejects. - while( tbb::flow::get<LIMITER_OUTPUT>(out).try_put(lcnt) ) { - emit_sum += lcnt; - ++emit_count; - } - } - } -}; - -struct fn_body { - int operator()(const int &in) { - receive_sum += in; - ++receive_count; - return in; - } -}; - -// +------------+ -// +---------+ | v -// | mf_node |0---+ +----------+ +----------+ -// +->| |1---------->| lim_node |--------->| fn_node |--+ -// | +---------+ +----------+ +----------+ | -// | | -// | | -// +-------------------------------------------------------------+ -// -void -test_multifunction_to_limiter(int _max, int _nparallel) { - tbb::flow::graph g; - emit_count = 0; - emit_sum = 0; - receive_count = 0; - receive_sum = 0; - tbb::atomic<int> local_cnt; - local_cnt = 0; - mfnode_type mf_node(g, tbb::flow::unlimited, mfnode_body(_max, local_cnt)); - tbb::flow::function_node<int, int> fn_node(g, tbb::flow::unlimited, fn_body()); - tbb::flow::limiter_node<int> lim_node(g, _nparallel); - tbb::flow::make_edge(tbb::flow::output_port<LIMITER_OUTPUT>(mf_node), lim_node); - tbb::flow::make_edge(tbb::flow::output_port<DECREMENT_OUTPUT>(mf_node), lim_node.decrement); - tbb::flow::make_edge(lim_node, fn_node); - tbb::flow::make_edge(fn_node, mf_node); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - REMARK("pred cnt == %d\n",(int)(lim_node.predecessor_count())); - REMARK("succ cnt == %d\n",(int)(lim_node.successor_count())); - tbb::flow::limiter_node<int>::successor_vector_type my_succs; - lim_node.copy_successors(my_succs); - REMARK("succ cnt from vector == %d\n",(int)(my_succs.size())); - tbb::flow::limiter_node<int>::predecessor_vector_type my_preds; - lim_node.copy_predecessors(my_preds); - REMARK("pred cnt from vector == %d\n",(int)(my_preds.size())); -#endif - mf_node.try_put(1); - g.wait_for_all(); - ASSERT(emit_count == receive_count, "counts do not match"); - ASSERT(emit_sum == receive_sum, "sums do not match"); - - // reset, test again - g.reset(); - emit_count = 0; - emit_sum = 0; - receive_count = 0; - receive_sum = 0; - local_cnt = 0;; - mf_node.try_put(1); - g.wait_for_all(); - ASSERT(emit_count == receive_count, "counts do not match"); - ASSERT(emit_sum == receive_sum, "sums do not match"); -} - - -void -test_continue_msg_reception() { - tbb::flow::graph g; - tbb::flow::limiter_node<int> ln(g,2); - tbb::flow::queue_node<int> qn(g); - tbb::flow::make_edge(ln, qn); - ln.decrement.try_put(tbb::flow::continue_msg()); - ln.try_put(42); - g.wait_for_all(); - int outint; - ASSERT(qn.try_get(outint) && outint == 42, "initial put to decrement stops node"); -} - - -// -// This test ascertains that if a message is not successfully put -// to a successor, the message is not dropped but released. -// - -using namespace tbb::flow; -void test_reserve_release_messages() { - graph g; - - //making two queue_nodes: one broadcast_node and one limiter_node - queue_node<int> input_queue(g); - queue_node<int> output_queue(g); - broadcast_node<continue_msg> broad(g); - limiter_node<int> limit(g,2,1); //threshold of 2 - - //edges - make_edge(input_queue, limit); - make_edge(limit, output_queue); - make_edge(broad,limit.decrement); - - int list[4] = {19, 33, 72, 98}; //list to be put to the input queue - - input_queue.try_put(list[0]); // succeeds - input_queue.try_put(list[1]); // succeeds - input_queue.try_put(list[2]); // fails, stored in upstream buffer - g.wait_for_all(); - - remove_edge(limit, output_queue); //remove successor - - //sending continue messages to the decrement port of the limiter - broad.try_put(continue_msg()); - broad.try_put(continue_msg()); //failed message retrieved. - g.wait_for_all(); - - make_edge(limit, output_queue); //putting the successor back - - broad.try_put(continue_msg()); - broad.try_put(continue_msg()); //drop the count - - input_queue.try_put(list[3]); //success - g.wait_for_all(); - - int var=0; - - for (int i=0; i<4; i++){ - output_queue.try_get(var); - ASSERT(var==list[i], "some data dropped, input does not match output"); - g.wait_for_all(); - } -} - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES -void test_extract() { - tbb::flow::graph g; - int j; - tbb::flow::limiter_node<int> node0(g, /*threshold*/1); - tbb::flow::queue_node<int> q0(g); - tbb::flow::queue_node<int> q1(g); - tbb::flow::queue_node<int> q2(g); - tbb::flow::broadcast_node<tbb::flow::continue_msg> b0(g); - tbb::flow::broadcast_node<tbb::flow::continue_msg> b1(g); - - for( int i = 0; i < 2; ++i ) { - - ASSERT(node0.predecessor_count() == 0, "incorrect predecessor count at start"); - ASSERT(node0.successor_count() == 0, "incorrect successor count at start"); - ASSERT(node0.decrement.predecessor_count() == 0, "incorrect decrement pred count at start"); - - tbb::flow::make_edge(q0, node0); - tbb::flow::make_edge(q1, node0); - tbb::flow::make_edge(node0, q2); - tbb::flow::make_edge(b0, node0.decrement); - tbb::flow::make_edge(b1, node0.decrement); - g.wait_for_all(); - - /* b0 b1 */ - /* \ | */ - /* q0\ \ | */ - /* \ \| */ - /* +-node0---q2 */ - /* / */ - /* q1/ */ - - q0.try_put(i); - g.wait_for_all(); - ASSERT(node0.predecessor_count() == 2, "incorrect predecessor count after construction"); - ASSERT(node0.successor_count() == 1, "incorrect successor count after construction"); - ASSERT(node0.decrement.predecessor_count() == 2, "incorrect decrement pred count after construction"); - ASSERT(q2.try_get(j) && j == i, "improper value forwarded to output queue"); - q0.try_put(2*i); - g.wait_for_all(); - ASSERT(!q2.try_get(j), "limiter_node forwarded item improperly"); - b0.try_put(tbb::flow::continue_msg()); - g.wait_for_all(); - ASSERT(!q2.try_get(j), "limiter_node forwarded item improperly"); - b0.try_put(tbb::flow::continue_msg()); - g.wait_for_all(); - ASSERT(q2.try_get(j) && j == 2*i, "limiter_node failed to forward item"); - - tbb::flow::limiter_node<int>::successor_vector_type sv; - tbb::flow::limiter_node<int>::predecessor_vector_type pv; - tbb::flow::continue_receiver::predecessor_vector_type dv; - std::vector<tbb::flow::receiver<int>*> sv1; - std::vector<tbb::flow::sender<int>*> pv1; - std::vector<tbb::flow::sender<tbb::flow::continue_msg>*> dv1; - - node0.copy_predecessors(pv); - node0.copy_successors(sv); - node0.decrement.copy_predecessors(dv); - pv1.push_back(&(q0)); - pv1.push_back(&(q1)); - sv1.push_back(&(q2)); - dv1.push_back(&(b0)); - dv1.push_back(&(b1)); - - ASSERT(pv.size() == 2, "improper size for predecessors"); - ASSERT(sv.size() == 1, "improper size for successors"); - ASSERT(lists_match(pv,pv1), "predecesosr lists do not match"); - ASSERT(lists_match(sv,sv1), "successor lists do not match"); - ASSERT(lists_match(dv,dv1), "successor lists do not match"); - - if(i == 0) { - node0.extract(); - } - else { - q0.extract(); - b0.extract(); - q2.extract(); - - ASSERT(node0.predecessor_count() == 1, "incorrect predecessor count after extract second iter"); - ASSERT(node0.successor_count() == 0, "incorrect successor count after extract second iter"); - ASSERT(node0.decrement.predecessor_count() == 1, "incorrect decrement pred count after extract second iter"); - - node0.copy_predecessors(pv); - node0.copy_successors(sv); - node0.decrement.copy_predecessors(dv); - pv1.clear(); - sv1.clear(); - dv1.clear(); - pv1.push_back(&(q1)); - dv1.push_back(&(b1)); - - ASSERT(lists_match(pv,pv1), "predecesosr lists do not match second iter"); - ASSERT(lists_match(sv,sv1), "successor lists do not match second iter"); - ASSERT(lists_match(dv,dv1), "successor lists do not match second iter"); - - q1.extract(); - b1.extract(); - } - ASSERT(node0.predecessor_count() == 0, "incorrect predecessor count after extract"); - ASSERT(node0.successor_count() == 0, "incorrect successor count after extract"); - ASSERT(node0.decrement.predecessor_count() == 0, "incorrect decrement pred count after extract"); - - } - -} -#endif // TBB_PREVIEW_FLOW_GRAPH_FEATURES - -int TestMain() { - for (int i = 1; i <= 8; ++i) { - tbb::task_scheduler_init init(i); - test_serial<int>(); - test_parallel<int>(i); - } - test_continue_msg_reception(); - test_multifunction_to_limiter(30,3); - test_multifunction_to_limiter(300,13); - test_multifunction_to_limiter(3000,1); - test_reserve_release_messages(); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - test_extract(); -#endif - return Harness::Done; -} diff --git a/src/tbb/src/test/test_malloc_atexit.cpp b/src/tbb/src/test/test_malloc_atexit.cpp deleted file mode 100644 index 56230104b..000000000 --- a/src/tbb/src/test/test_malloc_atexit.cpp +++ /dev/null @@ -1,169 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -/* Regression test against a bug in TBB allocator manifested when - dynamic library calls atexit() or registers dtors of static objects. - If the allocator is not initialized yet, we can get deadlock, - because allocator library has static object dtors as well, they - registered during allocator initialization, and atexit() is protected - by non-recursive mutex in some versions of GLIBC. - */ - -#include <stdlib.h> -#include "../tbbmalloc/proxy.h" // __TBB_malloc_safer_msize -#include "tbb/tbb_config.h" // for __TBB_WIN8UI_SUPPORT - -#if !(_WIN32||_WIN64 || MALLOC_UNIXLIKE_OVERLOAD_ENABLED || MALLOC_ZONE_OVERLOAD_ENABLED) || __TBB_WIN8UI_SUPPORT || __MINGW32__ || __MINGW64__ -#define HARNESS_SKIP_TEST 1 -#endif - -// __TBB_malloc_safer_msize() returns 0 for unknown objects, -// thus we can detect ownership -#if _USRDLL - #if _WIN32||_WIN64 -extern __declspec(dllexport) - #endif -bool dll_isMallocOverloaded() -#else -bool exe_isMallocOverloaded() -#endif -{ - const size_t reqSz = 8; - void *o = malloc(reqSz); - bool ret = __TBB_malloc_safer_msize(o, NULL) >= reqSz; - free(o); - return ret; -} - -#if _USRDLL - -#if MALLOC_UNIXLIKE_OVERLOAD_ENABLED || MALLOC_ZONE_OVERLOAD_ENABLED - -#define HARNESS_CUSTOM_MAIN 1 -#include "harness.h" - -#include <dlfcn.h> -#if __APPLE__ -#include <malloc/malloc.h> -#define malloc_usable_size(p) malloc_size(p) -#else -#include <malloc.h> -#endif -#include <signal.h> - -#if __linux__ && !__ANDROID__ -extern "C" { -void __libc_free(void *ptr); -void *__libc_realloc(void *ptr, size_t size); - -// check that such kind of free/realloc overload works correctly -void free(void *ptr) -{ - __libc_free(ptr); -} - -void *realloc(void *ptr, size_t size) -{ - return __libc_realloc(ptr, size); -} -} // extern "C" -#endif // __linux__ && !__ANDROID__ - -#endif // MALLOC_UNIXLIKE_OVERLOAD_ENABLED || MALLOC_ZONE_OVERLOAD_ENABLED - -// Even when the test is skipped, dll source must not be empty to generate .lib to link with. - -#ifndef _PGO_INSTRUMENT -void dummyFunction() {} - -// TODO: enable the check under Android -#if (MALLOC_UNIXLIKE_OVERLOAD_ENABLED || MALLOC_ZONE_OVERLOAD_ENABLED) && !__ANDROID__ -typedef void *(malloc_type)(size_t); - -static void SigSegv(int) -{ - REPORT("Known issue: SIGSEGV during work with memory allocated by replaced allocator.\n" - "skip\n"); - exit(0); -} - -// TODO: Using of SIGSEGV can be eliminated via parsing /proc/self/maps -// and series of system malloc calls. -void TestReplacedAllocFunc() -{ - struct sigaction sa, sa_default; - malloc_type *orig_malloc = (malloc_type*)dlsym(RTLD_NEXT, "malloc"); - void *p = (*orig_malloc)(16); - - // protect potentially unsafe actions - sigemptyset(&sa.sa_mask); - sa.sa_flags = 0; - sa.sa_handler = SigSegv; - if (sigaction(SIGSEGV, &sa, &sa_default)) - ASSERT(0, "sigaction failed"); - - ASSERT(malloc_usable_size(p) >= 16, NULL); - free(p); - // no more unsafe actions, restore SIGSEGV - if (sigaction(SIGSEGV, &sa_default, NULL)) - ASSERT(0, "sigaction failed"); -} -#else -void TestReplacedAllocFunc() { } -#endif - -class Foo { -public: - Foo() { - // add a lot of exit handlers to cause memory allocation - for (int i=0; i<1024; i++) - atexit(dummyFunction); - TestReplacedAllocFunc(); - } -}; - -static Foo f; -#endif - -#else // _USRDLL -#include "harness.h" - -#if _WIN32||_WIN64 -#include "tbb/tbbmalloc_proxy.h" - -extern __declspec(dllimport) -#endif -bool dll_isMallocOverloaded(); - -int TestMain () { -#ifdef _PGO_INSTRUMENT - REPORT("Known issue: test_malloc_atexit hangs if compiled with -prof-genx\n"); - return Harness::Skipped; -#elif __TBB_MIC_OFFLOAD - REPORT("Known issue: libmalloc_proxy.so is loaded too late in the offload mode on the target when linked via -lmalloc_proxy\n"); - return Harness::Skipped; -#else - ASSERT( dll_isMallocOverloaded(), "malloc was not replaced" ); - ASSERT( exe_isMallocOverloaded(), "malloc was not replaced" ); - return Harness::Done; -#endif -} - -#endif // _USRDLL diff --git a/src/tbb/src/test/test_malloc_compliance.cpp b/src/tbb/src/test/test_malloc_compliance.cpp deleted file mode 100644 index 8f0c538d0..000000000 --- a/src/tbb/src/test/test_malloc_compliance.cpp +++ /dev/null @@ -1,1039 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -const unsigned MByte = 1024*1024; -bool __tbb_test_errno = false; - -#include "tbb/tbb_config.h" - -#if __TBB_WIN8UI_SUPPORT -// testing allocator itself not iterfaces -// so we can use desktop functions -#define _CRT_USE_WINAPI_FAMILY_DESKTOP_APP !_M_ARM -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#include "harness.h" -// FIXME: fix the test to support New Windows *8 Store Apps mode. -int TestMain() { - return Harness::Skipped; -} -#else /* __TBB_WIN8UI_SUPPORT */ - -/* _WIN32_WINNT should be defined at the very beginning, - because other headers might include <windows.h> -*/ - -#if _WIN32 || _WIN64 -#undef _WIN32_WINNT -#define _WIN32_WINNT 0x0501 -#include "tbb/machine/windows_api.h" -#include <stdio.h> -#include "harness_report.h" - -#if _MSC_VER && defined(_MT) && defined(_DLL) - #pragma comment(lib, "version.lib") // to use GetFileVersionInfo* -#endif - -void limitMem( size_t limit ) -{ - static HANDLE hJob = NULL; - JOBOBJECT_EXTENDED_LIMIT_INFORMATION jobInfo; - - jobInfo.BasicLimitInformation.LimitFlags = JOB_OBJECT_LIMIT_PROCESS_MEMORY; - jobInfo.ProcessMemoryLimit = limit? limit*MByte : 2*MByte*1024; - if (NULL == hJob) { - if (NULL == (hJob = CreateJobObject(NULL, NULL))) { - REPORT("Can't assign create job object: %ld\n", GetLastError()); - exit(1); - } - if (0 == AssignProcessToJobObject(hJob, GetCurrentProcess())) { - REPORT("Can't assign process to job object: %ld\n", GetLastError()); - exit(1); - } - } - if (0 == SetInformationJobObject(hJob, JobObjectExtendedLimitInformation, - &jobInfo, sizeof(jobInfo))) { - REPORT("Can't set limits: %ld\n", GetLastError()); - exit(1); - } -} -// Do not test errno with static VC runtime -#else -#include <sys/resource.h> -#include <stdlib.h> -#include <stdio.h> -#include <errno.h> -#include <sys/types.h> // uint64_t on FreeBSD, needed for rlim_t -#include "harness_report.h" - -void limitMem( size_t limit ) -{ - rlimit rlim; - rlim.rlim_cur = limit? limit*MByte : (rlim_t)RLIM_INFINITY; - rlim.rlim_max = (rlim_t)RLIM_INFINITY; - int ret = setrlimit(RLIMIT_AS,&rlim); - if (0 != ret) { - REPORT("Can't set limits: errno %d\n", errno); - exit(1); - } -} -#endif - -#define ASSERT_ERRNO(cond, msg) ASSERT( !__tbb_test_errno || (cond), msg ) -#define CHECK_ERRNO(cond) (__tbb_test_errno && (cond)) - -#include <time.h> -#include <errno.h> -#define __TBB_NO_IMPLICIT_LINKAGE 1 -#include "tbb/scalable_allocator.h" - -#define HARNESS_CUSTOM_MAIN 1 -#define HARNESS_TBBMALLOC_THREAD_SHUTDOWN 1 -#include "harness.h" -#include "harness_barrier.h" -#if !__TBB_SOURCE_DIRECTLY_INCLUDED -#include "harness_tbb_independence.h" -#endif -#if __linux__ -#include <stdint.h> // uintptr_t -#endif -#if _WIN32 || _WIN64 -#include <malloc.h> // _aligned_(malloc|free|realloc) -#if __MINGW64__ -// Workaround a bug in MinGW64 headers with _aligned_(malloc|free) not declared by default -extern "C" void __cdecl _aligned_free(void *); -extern "C" void *__cdecl _aligned_malloc(size_t,size_t); -#endif -#endif - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include <vector> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -const size_t COUNT_ELEM_CALLOC = 2; -const int COUNT_TESTS = 1000; -const int COUNT_ELEM = 25000; -const size_t MAX_SIZE = 1000; -const int COUNTEXPERIMENT = 10000; - -const char strError[]="failed"; -const char strOk[]="done"; - -typedef unsigned int UINT; -typedef unsigned char UCHAR; -typedef unsigned long DWORD; -typedef unsigned char BYTE; - - -typedef void* TestMalloc(size_t size); -typedef void* TestCalloc(size_t num, size_t size); -typedef void* TestRealloc(void* memblock, size_t size); -typedef void TestFree(void* memblock); -typedef int TestPosixMemalign(void **memptr, size_t alignment, size_t size); -typedef void* TestAlignedMalloc(size_t size, size_t alignment); -typedef void* TestAlignedRealloc(void* memblock, size_t size, size_t alignment); -typedef void TestAlignedFree(void* memblock); - -TestMalloc* Tmalloc; -TestCalloc* Tcalloc; -TestRealloc* Trealloc; -TestFree* Tfree; -TestAlignedFree* Taligned_free; -// call alignment-related function via pointer and check result's alignment -int Tposix_memalign(void **memptr, size_t alignment, size_t size); -void* Taligned_malloc(size_t size, size_t alignment); -void* Taligned_realloc(void* memblock, size_t size, size_t alignment); - -// pointers to alignment-related functions used while testing -TestPosixMemalign* Rposix_memalign; -TestAlignedMalloc* Raligned_malloc; -TestAlignedRealloc* Raligned_realloc; - -bool error_occurred = false; - -#if __APPLE__ -// Tests that use the variable are skipped on OS X* -#else -static bool perProcessLimits = true; -#endif - -const size_t POWERS_OF_2 = 20; - -struct MemStruct -{ - void* Pointer; - UINT Size; - - MemStruct() : Pointer(NULL), Size(0) {} - MemStruct(void* ptr, UINT sz) : Pointer(ptr), Size(sz) {} -}; - -class CMemTest: NoAssign -{ - UINT CountErrors; - bool FullLog; - Harness::SpinBarrier *limitBarrier; - static bool firstTime; - -public: - CMemTest(Harness::SpinBarrier *barrier, bool isVerbose=false) : - CountErrors(0), limitBarrier(barrier) - { - srand((UINT)time(NULL)); - FullLog=isVerbose; - rand(); - } - void InvariantDataRealloc(bool aligned); //realloc does not change data - void NULLReturn(UINT MinSize, UINT MaxSize, int total_threads); // NULL pointer + check errno - void UniquePointer(); // unique pointer - check with padding - void AddrArifm(); // unique pointer - check with pointer arithmetic - bool ShouldReportError(); - void Free_NULL(); // - void Zerofilling(); // check if arrays are zero-filled - void TestAlignedParameters(); - void RunAllTests(int total_threads); - ~CMemTest() {} -}; - -class Limit { - size_t limit; -public: - Limit(size_t a_limit) : limit(a_limit) {} - void operator() () const { - limitMem(limit); - } -}; - -int argC; -char** argV; - -struct RoundRobin: NoAssign { - const long number_of_threads; - mutable CMemTest test; - - RoundRobin( long p, Harness::SpinBarrier *limitBarrier, bool verbose ) : - number_of_threads(p), test(limitBarrier, verbose) {} - void operator()( int /*id*/ ) const - { - test.RunAllTests(number_of_threads); - } -}; - -bool CMemTest::firstTime = true; - -static void setSystemAllocs() -{ - Tmalloc=malloc; - Trealloc=realloc; - Tcalloc=calloc; - Tfree=free; -#if _WIN32 || _WIN64 - Raligned_malloc=_aligned_malloc; - Raligned_realloc=_aligned_realloc; - Taligned_free=_aligned_free; - Rposix_memalign=0; -#elif __APPLE__ || __sun || __ANDROID__ -// OS X*, Solaris, and Android don't have posix_memalign - Raligned_malloc=0; - Raligned_realloc=0; - Taligned_free=0; - Rposix_memalign=0; -#else - Raligned_malloc=0; - Raligned_realloc=0; - Taligned_free=0; - Rposix_memalign=posix_memalign; -#endif -} - -// check that realloc works as free and as malloc -void ReallocParam() -{ - const int ITERS = 1000; - int i; - void *bufs[ITERS]; - - bufs[0] = Trealloc(NULL, 30*MByte); - ASSERT(bufs[0], "Can't get memory to start the test."); - - for (i=1; i<ITERS; i++) - { - bufs[i] = Trealloc(NULL, 30*MByte); - if (NULL == bufs[i]) - break; - } - ASSERT(i<ITERS, "Limits should be decreased for the test to work."); - - Trealloc(bufs[0], 0); - /* There is a race for the free space between different threads at - this point. So, have to run the test sequentially. - */ - bufs[0] = Trealloc(NULL, 30*MByte); - ASSERT(bufs[0], NULL); - - for (int j=0; j<i; j++) - Trealloc(bufs[j], 0); -} - -HARNESS_EXPORT -int main(int argc, char* argv[]) { - argC=argc; - argV=argv; - MaxThread = MinThread = 1; - Tmalloc=scalable_malloc; - Trealloc=scalable_realloc; - Tcalloc=scalable_calloc; - Tfree=scalable_free; - Rposix_memalign=scalable_posix_memalign; - Raligned_malloc=scalable_aligned_malloc; - Raligned_realloc=scalable_aligned_realloc; - Taligned_free=scalable_aligned_free; - - // check if we were called to test standard behavior - for (int i=1; i< argc; i++) { - if (strcmp((char*)*(argv+i),"-s")==0) - { -#if __INTEL_COMPILER == 1400 && __linux__ - // Workaround for Intel(R) C++ Compiler XE, version 14.0.0.080: - // unable to call setSystemAllocs() in such configuration. - REPORT("Known issue: Standard allocator testing is not supported.\n"); - REPORT( "skip\n" ); - return 0; -#else - setSystemAllocs(); - argC--; - break; -#endif - } - } - - ParseCommandLine( argC, argV ); -#if __linux__ - /* According to man pthreads - "NPTL threads do not share resource limits (fixed in kernel 2.6.10)". - Use per-threads limits for affected systems. - */ - if ( LinuxKernelVersion() < 2*1000000 + 6*1000 + 10) - perProcessLimits = false; -#endif - //------------------------------------- -#if __APPLE__ - /* Skip due to lack of memory limit enforcing under OS X*. */ -#else - limitMem(200); - ReallocParam(); - limitMem(0); -#endif - -//for linux and dynamic runtime errno is used to check allocator functions -//check if library compiled with /MD(d) and we can use errno -#if _MSC_VER -#if defined(_MT) && defined(_DLL) //check errno if test itself compiled with /MD(d) only - char* version_info_block = NULL; - int version_info_block_size; - LPVOID comments_block = NULL; - UINT comments_block_size; -#ifdef _DEBUG -#define __TBBMALLOCDLL "tbbmalloc_debug.dll" -#else //_DEBUG -#define __TBBMALLOCDLL "tbbmalloc.dll" -#endif //_DEBUG - version_info_block_size = GetFileVersionInfoSize( __TBBMALLOCDLL, (LPDWORD)&version_info_block_size ); - if( version_info_block_size - && ((version_info_block = (char*)malloc(version_info_block_size)) != NULL) - && GetFileVersionInfo( __TBBMALLOCDLL, NULL, version_info_block_size, version_info_block ) - && VerQueryValue( version_info_block, "\\StringFileInfo\\000004b0\\Comments", &comments_block, &comments_block_size ) - && strstr( (char*)comments_block, "/MD" ) - ){ - __tbb_test_errno = true; - } - if( version_info_block ) free( version_info_block ); -#endif // defined(_MT) && defined(_DLL) -#else // _MSC_VER - __tbb_test_errno = true; -#endif // _MSC_VER - - for( int p=MaxThread; p>=MinThread; --p ) { - REMARK("testing with %d threads\n", p ); - for (int limit=0; limit<2; limit++) { - int ret = scalable_allocation_mode(TBBMALLOC_SET_SOFT_HEAP_LIMIT, - 16*1024*limit); - ASSERT(ret==TBBMALLOC_OK, NULL); - Harness::SpinBarrier *barrier = new Harness::SpinBarrier(p); - NativeParallelFor( p, RoundRobin(p, barrier, Verbose) ); - delete barrier; - } - } - int ret = scalable_allocation_mode(TBBMALLOC_SET_SOFT_HEAP_LIMIT, 0); - ASSERT(ret==TBBMALLOC_OK, NULL); - if( !error_occurred ) - REPORT("done\n"); - return 0; -} - -// if non-zero byte found, returns bad value address plus 1 -size_t NonZero(void *ptr, size_t size) -{ - size_t words = size / sizeof(intptr_t); - size_t tailSz = size % sizeof(intptr_t); - intptr_t *buf =(intptr_t*)ptr; - char *bufTail =(char*)(buf+words); - - for (size_t i=0; i<words; i++) - if (buf[i]) { - for (unsigned b=0; b<sizeof(intptr_t); b++) - if (((char*)(buf+i))[b]) - return sizeof(intptr_t)*i + b + 1; - } - for (size_t i=0; i<tailSz; i++) - if (bufTail[i]) { - return words*sizeof(intptr_t)+i+1; - } - return 0; -} - -struct TestStruct -{ - DWORD field1:2; - DWORD field2:6; - double field3; - UCHAR field4[100]; - TestStruct* field5; - std::vector<int> field7; - double field8; -}; - -int Tposix_memalign(void **memptr, size_t alignment, size_t size) -{ - int ret = Rposix_memalign(memptr, alignment, size); - if (0 == ret) - ASSERT(0==((uintptr_t)*memptr & (alignment-1)), - "allocation result should be aligned"); - return ret; -} -void* Taligned_malloc(size_t size, size_t alignment) -{ - void *ret = Raligned_malloc(size, alignment); - if (0 != ret) - ASSERT(0==((uintptr_t)ret & (alignment-1)), - "allocation result should be aligned"); - return ret; -} -void* Taligned_realloc(void* memblock, size_t size, size_t alignment) -{ - void *ret = Raligned_realloc(memblock, size, alignment); - if (0 != ret) - ASSERT(0==((uintptr_t)ret & (alignment-1)), - "allocation result should be aligned"); - return ret; -} - -inline size_t choose_random_alignment() { - return sizeof(void*)<<(rand() % POWERS_OF_2); -} - -void CMemTest::InvariantDataRealloc(bool aligned) -{ - size_t size, sizeMin; - CountErrors=0; - if (FullLog) REPORT("\nInvariant data by realloc...."); - UCHAR* pchar; - sizeMin=size=rand()%MAX_SIZE+10; - pchar = aligned? - (UCHAR*)Taligned_realloc(NULL,size,choose_random_alignment()) - : (UCHAR*)Trealloc(NULL,size); - if (NULL == pchar) - return; - for (size_t k=0; k<size; k++) - pchar[k]=(UCHAR)k%255+1; - for (int i=0; i<COUNTEXPERIMENT; i++) - { - size=rand()%MAX_SIZE+10; - UCHAR *pcharNew = aligned? - (UCHAR*)Taligned_realloc(pchar,size, choose_random_alignment()) - : (UCHAR*)Trealloc(pchar,size); - if (NULL == pcharNew) - continue; - pchar = pcharNew; - sizeMin=size<sizeMin ? size : sizeMin; - for (size_t k=0; k<sizeMin; k++) - if (pchar[k] != (UCHAR)k%255+1) - { - CountErrors++; - if (ShouldReportError()) - { - REPORT("stand '%c', must stand '%c'\n",pchar[k],(UCHAR)k%255+1); - REPORT("error: data changed (at %llu, SizeMin=%llu)\n", - (long long unsigned)k,(long long unsigned)sizeMin); - } - } - } - if (aligned) - Taligned_realloc(pchar,0,choose_random_alignment()); - else - Trealloc(pchar,0); - if (CountErrors) REPORT("%s\n",strError); - else if (FullLog) REPORT("%s\n",strOk); - error_occurred |= ( CountErrors>0 ) ; - //REPORT("end check\n"); -} - -struct PtrSize { - void *ptr; - size_t size; -}; - -static int cmpAddrs(const void *p1, const void *p2) -{ - const PtrSize *a = (const PtrSize *)p1; - const PtrSize *b = (const PtrSize *)p2; - - return a->ptr < b->ptr ? -1 : ( a->ptr == b->ptr ? 0 : 1); -} - -void CMemTest::AddrArifm() -{ - PtrSize *arr = (PtrSize*)Tmalloc(COUNT_ELEM*sizeof(PtrSize)); - - if (FullLog) REPORT("\nUnique pointer using Address arithmetics\n"); - if (FullLog) REPORT("malloc...."); - ASSERT(arr, NULL); - for (int i=0; i<COUNT_ELEM; i++) - { - arr[i].size=rand()%MAX_SIZE; - arr[i].ptr=Tmalloc(arr[i].size); - } - qsort(arr, COUNT_ELEM, sizeof(PtrSize), cmpAddrs); - - for (int i=0; i<COUNT_ELEM-1; i++) - { - if (NULL!=arr[i].ptr && NULL!=arr[i+1].ptr) - ASSERT((uintptr_t)arr[i].ptr+arr[i].size <= (uintptr_t)arr[i+1].ptr, - "intersection detected"); - } - //---------------------------------------------------------------- - if (FullLog) REPORT("realloc...."); - for (int i=0; i<COUNT_ELEM; i++) - { - size_t count=arr[i].size*2; - void *tmpAddr=Trealloc(arr[i].ptr,count); - if (NULL!=tmpAddr) { - arr[i].ptr = tmpAddr; - arr[i].size = count; - } else if (count==0) { // becasue realloc(..., 0) works as free - arr[i].ptr = NULL; - arr[i].size = 0; - } - } - qsort(arr, COUNT_ELEM, sizeof(PtrSize), cmpAddrs); - - for (int i=0; i<COUNT_ELEM-1; i++) - { - if (NULL!=arr[i].ptr && NULL!=arr[i+1].ptr) - ASSERT((uintptr_t)arr[i].ptr+arr[i].size <= (uintptr_t)arr[i+1].ptr, - "intersection detected"); - } - for (int i=0; i<COUNT_ELEM; i++) - { - Tfree(arr[i].ptr); - } - //------------------------------------------- - if (FullLog) REPORT("calloc...."); - for (int i=0; i<COUNT_ELEM; i++) - { - arr[i].size=rand()%MAX_SIZE; - arr[i].ptr=Tcalloc(arr[i].size,1); - } - qsort(arr, COUNT_ELEM, sizeof(PtrSize), cmpAddrs); - - for (int i=0; i<COUNT_ELEM-1; i++) - { - if (NULL!=arr[i].ptr && NULL!=arr[i+1].ptr) - ASSERT((uintptr_t)arr[i].ptr+arr[i].size <= (uintptr_t)arr[i+1].ptr, - "intersection detected"); - } - for (int i=0; i<COUNT_ELEM; i++) - { - Tfree(arr[i].ptr); - } - Tfree(arr); -} - -void CMemTest::Zerofilling() -{ - TestStruct* TSMas; - size_t CountElement; - CountErrors=0; - if (FullLog) REPORT("\nzeroings elements of array...."); - //test struct - for (int i=0; i<COUNTEXPERIMENT; i++) - { - CountElement=rand()%MAX_SIZE; - TSMas=(TestStruct*)Tcalloc(CountElement,sizeof(TestStruct)); - if (NULL == TSMas) - continue; - for (size_t j=0; j<CountElement; j++) - { - if (NonZero(TSMas+j, sizeof(TestStruct))) - { - CountErrors++; - if (ShouldReportError()) REPORT("detect nonzero element at TestStruct\n"); - } - } - Tfree(TSMas); - } - if (CountErrors) REPORT("%s\n",strError); - else if (FullLog) REPORT("%s\n",strOk); - error_occurred |= ( CountErrors>0 ) ; -} - -#if !__APPLE__ - -void myMemset(void *ptr, int c, size_t n) -{ -#if __linux__ && __i386__ -// memset in Fedora 13 not always correctly sets memory to required values. - char *p = (char*)ptr; - for (size_t i=0; i<n; i++) - p[i] = c; -#else - memset(ptr, c, n); -#endif -} - -// This test requires more than TOTAL_MB_ALLOC MB of RAM. -#if __ANDROID__ -// Android requires lower limit due to lack of virtual memory. -#define TOTAL_MB_ALLOC 200 -#else -#define TOTAL_MB_ALLOC 800 -#endif -void CMemTest::NULLReturn(UINT MinSize, UINT MaxSize, int total_threads) -{ - const int MB_PER_THREAD = TOTAL_MB_ALLOC / total_threads; - // find size to guarantee getting NULL for 1024 B allocations - const int MAXNUM_1024 = (MB_PER_THREAD + (MB_PER_THREAD>>2)) * 1024; - - std::vector<MemStruct> PointerList; - void *tmp; - CountErrors=0; - int CountNULL, num_1024; - if (FullLog) REPORT("\nNULL return & check errno:\n"); - UINT Size; - Limit limit_total(TOTAL_MB_ALLOC), no_limit(0); - void **buf_1024 = (void**)Tmalloc(MAXNUM_1024*sizeof(void*)); - - ASSERT(buf_1024, NULL); - /* We must have space for pointers when memory limit is hit. - Reserve enough for the worst case, taking into account race for - limited space between threads. - */ - PointerList.reserve(TOTAL_MB_ALLOC*MByte/MinSize); - - /* There is a bug in the specific version of GLIBC (2.5-12) shipped - with RHEL5 that leads to erroneous working of the test - on Intel64 and IPF systems when setrlimit-related part is enabled. - Switching to GLIBC 2.5-18 from RHEL5.1 resolved the issue. - */ - if (perProcessLimits) - limitBarrier->wait(limit_total); - else - limitMem(MB_PER_THREAD); - - /* regression test against the bug in allocator when it dereference NULL - while lack of memory - */ - for (num_1024=0; num_1024<MAXNUM_1024; num_1024++) { - buf_1024[num_1024] = Tcalloc(1024, 1); - if (! buf_1024[num_1024]) { - ASSERT_ERRNO(errno == ENOMEM, NULL); - break; - } - } - for (int i=0; i<num_1024; i++) - Tfree(buf_1024[i]); - Tfree(buf_1024); - - do { - Size=rand()%(MaxSize-MinSize)+MinSize; - tmp=Tmalloc(Size); - if (tmp != NULL) - { - myMemset(tmp, 0, Size); - PointerList.push_back(MemStruct(tmp, Size)); - } - } while(tmp != NULL); - ASSERT_ERRNO(errno == ENOMEM, NULL); - if (FullLog) REPORT("\n"); - - // preparation complete, now running tests - // malloc - if (FullLog) REPORT("malloc...."); - CountNULL = 0; - while (CountNULL==0) - for (int j=0; j<COUNT_TESTS; j++) - { - Size=rand()%(MaxSize-MinSize)+MinSize; - errno = ENOMEM+j+1; - tmp=Tmalloc(Size); - if (tmp == NULL) - { - CountNULL++; - if ( CHECK_ERRNO(errno != ENOMEM) ) { - CountErrors++; - if (ShouldReportError()) REPORT("NULL returned, error: errno (%d) != ENOMEM\n", errno); - } - } - else - { - // Technically, if malloc returns a non-NULL pointer, it is allowed to set errno anyway. - // However, on most systems it does not set errno. - bool known_issue = false; -#if __linux__ - if( CHECK_ERRNO(errno==ENOMEM) ) known_issue = true; -#endif /* __linux__ */ - if ( CHECK_ERRNO(errno != ENOMEM+j+1) && !known_issue) { - CountErrors++; - if (ShouldReportError()) REPORT("error: errno changed to %d though valid pointer was returned\n", errno); - } - myMemset(tmp, 0, Size); - PointerList.push_back(MemStruct(tmp, Size)); - } - } - if (FullLog) REPORT("end malloc\n"); - if (CountErrors) REPORT("%s\n",strError); - else if (FullLog) REPORT("%s\n",strOk); - error_occurred |= ( CountErrors>0 ) ; - - CountErrors=0; - //calloc - if (FullLog) REPORT("calloc...."); - CountNULL = 0; - while (CountNULL==0) - for (int j=0; j<COUNT_TESTS; j++) - { - Size=rand()%(MaxSize-MinSize)+MinSize; - errno = ENOMEM+j+1; - tmp=Tcalloc(COUNT_ELEM_CALLOC,Size); - if (tmp == NULL) - { - CountNULL++; - if ( CHECK_ERRNO(errno != ENOMEM) ){ - CountErrors++; - if (ShouldReportError()) REPORT("NULL returned, error: errno(%d) != ENOMEM\n", errno); - } - } - else - { - // Technically, if calloc returns a non-NULL pointer, it is allowed to set errno anyway. - // However, on most systems it does not set errno. - bool known_issue = false; -#if __linux__ - if( CHECK_ERRNO(errno==ENOMEM) ) known_issue = true; -#endif /* __linux__ */ - if ( CHECK_ERRNO(errno != ENOMEM+j+1) && !known_issue ) { - CountErrors++; - if (ShouldReportError()) REPORT("error: errno changed to %d though valid pointer was returned\n", errno); - } - PointerList.push_back(MemStruct(tmp, Size)); - } - } - if (FullLog) REPORT("end calloc\n"); - if (CountErrors) REPORT("%s\n",strError); - else if (FullLog) REPORT("%s\n",strOk); - error_occurred |= ( CountErrors>0 ) ; - CountErrors=0; - if (FullLog) REPORT("realloc...."); - CountNULL = 0; - if (PointerList.size() > 0) - while (CountNULL==0) - for (size_t i=0; i<(size_t)COUNT_TESTS && i<PointerList.size(); i++) - { - errno = 0; - tmp=Trealloc(PointerList[i].Pointer,PointerList[i].Size*2); - if (PointerList[i].Pointer == tmp) // the same place - { - bool known_issue = false; -#if __linux__ - if( errno==ENOMEM ) known_issue = true; -#endif /* __linux__ */ - if (errno != 0 && !known_issue) { - CountErrors++; - if (ShouldReportError()) REPORT("valid pointer returned, error: errno not kept\n"); - } - PointerList[i].Size *= 2; - } - else if (tmp != PointerList[i].Pointer && tmp != NULL) // another place - { - bool known_issue = false; -#if __linux__ - if( errno==ENOMEM ) known_issue = true; -#endif /* __linux__ */ - if (errno != 0 && !known_issue) { - CountErrors++; - if (ShouldReportError()) REPORT("valid pointer returned, error: errno not kept\n"); - } - // newly allocated area have to be zeroed - myMemset((char*)tmp + PointerList[i].Size, 0, PointerList[i].Size); - PointerList[i].Pointer = tmp; - PointerList[i].Size *= 2; - } - else if (tmp == NULL) - { - CountNULL++; - if ( CHECK_ERRNO(errno != ENOMEM) ) - { - CountErrors++; - if (ShouldReportError()) REPORT("NULL returned, error: errno(%d) != ENOMEM\n", errno); - } - // check data integrity - if (NonZero(PointerList[i].Pointer, PointerList[i].Size)) { - CountErrors++; - if (ShouldReportError()) REPORT("NULL returned, error: data changed\n"); - } - } - } - if (FullLog) REPORT("realloc end\n"); - if (CountErrors) REPORT("%s\n",strError); - else if (FullLog) REPORT("%s\n",strOk); - error_occurred |= ( CountErrors>0 ) ; - for (UINT i=0; i<PointerList.size(); i++) - { - Tfree(PointerList[i].Pointer); - } - - if (perProcessLimits) - limitBarrier->wait(no_limit); - else - limitMem(0); -} -#endif /* #if __APPLE__ */ - -void CMemTest::UniquePointer() -{ - CountErrors=0; - int **MasPointer = (int **)Tmalloc(sizeof(int*)*COUNT_ELEM); - size_t *MasCountElem = (size_t*)Tmalloc(sizeof(size_t)*COUNT_ELEM); - if (FullLog) REPORT("\nUnique pointer using 0\n"); - ASSERT(MasCountElem && MasPointer, NULL); - // - //------------------------------------------------------- - //malloc - for (int i=0; i<COUNT_ELEM; i++) - { - MasCountElem[i]=rand()%MAX_SIZE; - MasPointer[i]=(int*)Tmalloc(MasCountElem[i]*sizeof(int)); - if (NULL == MasPointer[i]) - MasCountElem[i]=0; - memset(MasPointer[i], 0, sizeof(int)*MasCountElem[i]); - } - if (FullLog) REPORT("malloc...."); - for (UINT i=0; i<COUNT_ELEM-1; i++) - { - if (size_t badOff = NonZero(MasPointer[i], sizeof(int)*MasCountElem[i])) { - CountErrors++; - if (ShouldReportError()) - REPORT("error, detect non-zero at %p\n", (char*)MasPointer[i]+badOff-1); - } - memset(MasPointer[i], 1, sizeof(int)*MasCountElem[i]); - } - if (CountErrors) REPORT("%s\n",strError); - else if (FullLog) REPORT("%s\n",strOk); - error_occurred |= ( CountErrors>0 ) ; - //---------------------------------------------------------- - //calloc - for (int i=0; i<COUNT_ELEM; i++) - Tfree(MasPointer[i]); - CountErrors=0; - for (long i=0; i<COUNT_ELEM; i++) - { - MasPointer[i]=(int*)Tcalloc(MasCountElem[i]*sizeof(int),2); - if (NULL == MasPointer[i]) - MasCountElem[i]=0; - } - if (FullLog) REPORT("calloc...."); - for (int i=0; i<COUNT_ELEM-1; i++) - { - if (size_t badOff = NonZero(MasPointer[i], sizeof(int)*MasCountElem[i])) { - CountErrors++; - if (ShouldReportError()) - REPORT("error, detect non-zero at %p\n", (char*)MasPointer[i]+badOff-1); - } - memset(MasPointer[i], 1, sizeof(int)*MasCountElem[i]); - } - if (CountErrors) REPORT("%s\n",strError); - else if (FullLog) REPORT("%s\n",strOk); - error_occurred |= ( CountErrors>0 ) ; - //--------------------------------------------------------- - //realloc - CountErrors=0; - for (int i=0; i<COUNT_ELEM; i++) - { - MasCountElem[i]*=2; - *(MasPointer+i)= - (int*)Trealloc(*(MasPointer+i),MasCountElem[i]*sizeof(int)); - if (NULL == MasPointer[i]) - MasCountElem[i]=0; - memset(MasPointer[i], 0, sizeof(int)*MasCountElem[i]); - } - if (FullLog) REPORT("realloc...."); - for (int i=0; i<COUNT_ELEM-1; i++) - { - if (NonZero(MasPointer[i], sizeof(int)*MasCountElem[i])) - CountErrors++; - memset(MasPointer[i], 1, sizeof(int)*MasCountElem[i]); - } - if (CountErrors) REPORT("%s\n",strError); - else if (FullLog) REPORT("%s\n",strOk); - error_occurred |= ( CountErrors>0 ) ; - for (int i=0; i<COUNT_ELEM; i++) - Tfree(MasPointer[i]); - Tfree(MasCountElem); - Tfree(MasPointer); -} - -bool CMemTest::ShouldReportError() -{ - if (FullLog) - return true; - else - if (firstTime) { - firstTime = false; - return true; - } else - return false; -} - -void CMemTest::Free_NULL() -{ - CountErrors=0; - if (FullLog) REPORT("\ncall free with parameter NULL...."); - errno = 0; - for (int i=0; i<COUNTEXPERIMENT; i++) - { - Tfree(NULL); - if (errno != 0) - { - CountErrors++; - if (ShouldReportError()) REPORT("error is found by a call free with parameter NULL\n"); - } - } - if (CountErrors) REPORT("%s\n",strError); - else if (FullLog) REPORT("%s\n",strOk); - error_occurred |= ( CountErrors>0 ) ; -} - -void CMemTest::TestAlignedParameters() -{ - void *memptr; - int ret; - - if (Rposix_memalign) { - // alignment isn't power of 2 - for (int bad_align=3; bad_align<16; bad_align++) - if (bad_align&(bad_align-1)) { - ret = Tposix_memalign(NULL, bad_align, 100); - ASSERT(EINVAL==ret, NULL); - } - - memptr = &ret; - ret = Tposix_memalign(&memptr, 5*sizeof(void*), 100); - ASSERT(memptr == &ret, - "memptr should not be changed after unsuccessful call"); - ASSERT(EINVAL==ret, NULL); - - // alignment is power of 2, but not a multiple of sizeof(void *), - // we expect that sizeof(void*) > 2 - ret = Tposix_memalign(NULL, 2, 100); - ASSERT(EINVAL==ret, NULL); - } - if (Raligned_malloc) { - // alignment isn't power of 2 - for (int bad_align=3; bad_align<16; bad_align++) - if (bad_align&(bad_align-1)) { - memptr = Taligned_malloc(100, bad_align); - ASSERT(NULL==memptr, NULL); - ASSERT_ERRNO(EINVAL==errno, NULL); - } - - // size is zero - memptr = Taligned_malloc(0, 16); - ASSERT(NULL==memptr, "size is zero, so must return NULL"); - ASSERT_ERRNO(EINVAL==errno, NULL); - } - if (Taligned_free) { - // NULL pointer is OK to free - errno = 0; - Taligned_free(NULL); - /* As there is no return value for free, strictly speaking we can't - check errno here. But checked implementations obey the assertion. - */ - ASSERT_ERRNO(0==errno, NULL); - } - if (Raligned_realloc) { - for (int i=1; i<20; i++) { - // checks that calls work correctly in presence of non-zero errno - errno = i; - void *ptr = Taligned_malloc(i*10, 128); - ASSERT(NULL!=ptr, NULL); - ASSERT_ERRNO(0!=errno, NULL); - // if size is zero and pointer is not NULL, works like free - memptr = Taligned_realloc(ptr, 0, 64); - ASSERT(NULL==memptr, NULL); - ASSERT_ERRNO(0!=errno, NULL); - } - // alignment isn't power of 2 - for (int bad_align=3; bad_align<16; bad_align++) - if (bad_align&(bad_align-1)) { - void *ptr = &bad_align; - memptr = Taligned_realloc(&ptr, 100, bad_align); - ASSERT(NULL==memptr, NULL); - ASSERT(&bad_align==ptr, NULL); - ASSERT_ERRNO(EINVAL==errno, NULL); - } - } -} - -void CMemTest::RunAllTests(int total_threads) -{ - Zerofilling(); - Free_NULL(); - InvariantDataRealloc(/*aligned=*/false); - if (Raligned_realloc) - InvariantDataRealloc(/*aligned=*/true); - TestAlignedParameters(); - UniquePointer(); - AddrArifm(); -#if __APPLE__ - REPORT("Known issue: some tests are skipped on OS X*\n"); -#else - NULLReturn(1*MByte,100*MByte,total_threads); -#endif - if (FullLog) REPORT("Tests for %d threads ended\n", total_threads); -} - -#endif /* __TBB_WIN8UI_SUPPORT */ diff --git a/src/tbb/src/test/test_malloc_init_shutdown.cpp b/src/tbb/src/test/test_malloc_init_shutdown.cpp deleted file mode 100644 index 43cc6948f..000000000 --- a/src/tbb/src/test/test_malloc_init_shutdown.cpp +++ /dev/null @@ -1,176 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/scalable_allocator.h" -#include "tbb/atomic.h" -#include "tbb/aligned_space.h" - -#define HARNESS_TBBMALLOC_THREAD_SHUTDOWN 1 -#include "harness.h" -#include "harness_barrier.h" -#if !__TBB_SOURCE_DIRECTLY_INCLUDED -#include "harness_tbb_independence.h" -#endif - -tbb::atomic<int> FinishedTasks; -const int MaxTasks = 16; - -/*--------------------------------------------------------------------*/ -// The regression test against a bug triggered when malloc initialization -// and thread shutdown were called simultaneously, in which case -// Windows dynamic loader lock and allocator initialization/termination lock -// were taken in different order. - -class TestFunc1 { - Harness::SpinBarrier* my_barr; -public: - TestFunc1 (Harness::SpinBarrier& barr) : my_barr(&barr) {} - void operator() (bool do_malloc) const { - my_barr->wait(); - if (do_malloc) scalable_malloc(10); - ++FinishedTasks; - } -}; - -typedef NativeParallelForTask<bool,TestFunc1> TestTask1; - -void Test1 () { - int NTasks = min(MaxTasks, max(2, MaxThread)); - Harness::SpinBarrier barr(NTasks); - TestFunc1 tf(barr); - FinishedTasks = 0; - tbb::aligned_space<TestTask1,MaxTasks> tasks; - - for(int i=0; i<NTasks; ++i) { - TestTask1* t = tasks.begin()+i; - new(t) TestTask1(i%2==0, tf); - t->start(); - } - - Harness::Sleep(1000); // wait a second :) - ASSERT( FinishedTasks==NTasks, "Some threads appear to deadlock" ); - - for(int i=0; i<NTasks; ++i) { - TestTask1* t = tasks.begin()+i; - t->wait_to_finish(); - t->~TestTask1(); - } -} - -/*--------------------------------------------------------------------*/ -// The regression test against a bug when cross-thread deallocation -// caused livelock at thread shutdown. - -void* gPtr = NULL; - -class TestFunc2a { - Harness::SpinBarrier* my_barr; -public: - TestFunc2a (Harness::SpinBarrier& barr) : my_barr(&barr) {} - void operator() (int) const { - gPtr = scalable_malloc(8); - my_barr->wait(); - ++FinishedTasks; - } -}; - -typedef NativeParallelForTask<int,TestFunc2a> TestTask2a; - -class TestFunc2b: NoAssign { - Harness::SpinBarrier* my_barr; - TestTask2a& my_ward; -public: - TestFunc2b (Harness::SpinBarrier& barr, TestTask2a& t) : my_barr(&barr), my_ward(t) {} - void operator() (int) const { - tbb::internal::spin_wait_while_eq(gPtr, (void*)NULL); - scalable_free(gPtr); - my_barr->wait(); - my_ward.wait_to_finish(); - ++FinishedTasks; - } -}; -void Test2() { - Harness::SpinBarrier barr(2); - TestFunc2a func2a(barr); - TestTask2a t2a(0, func2a); - TestFunc2b func2b(barr, t2a); - NativeParallelForTask<int,TestFunc2b> t2b(1, func2b); - FinishedTasks = 0; - t2a.start(); t2b.start(); - Harness::Sleep(1000); // wait a second :) - ASSERT( FinishedTasks==2, "Threads appear to deadlock" ); - t2b.wait_to_finish(); // t2a is monitored by t2b -} - -#if _WIN32||_WIN64 - -void TestKeyDtor() {} - -#else - -void *currSmall, *prevSmall, *currLarge, *prevLarge; - -extern "C" void threadDtor(void*) { - // First, release memory that was allocated before; - // it will not re-initialize the thread-local data if already deleted - prevSmall = currSmall; - scalable_free(currSmall); - prevLarge = currLarge; - scalable_free(currLarge); - // Then, allocate more memory. - // It will re-initialize the allocator data in the thread. - scalable_free(scalable_malloc(8)); -} - -inline bool intersectingObjects(const void *p1, const void *p2, size_t n) -{ - return (size_t)labs((uintptr_t)p1 - (uintptr_t)p2) < n; -} - -struct TestThread: NoAssign { - TestThread(int ) {} - - void operator()( int /*id*/ ) const { - pthread_key_t key; - - currSmall = scalable_malloc(8); - ASSERT(!prevSmall || currSmall==prevSmall, "Possible memory leak"); - currLarge = scalable_malloc(32*1024); - // intersectingObjects takes into account object shuffle - ASSERT(!prevLarge || intersectingObjects(currLarge, prevLarge, 32*1024), "Possible memory leak"); - pthread_key_create( &key, &threadDtor ); - pthread_setspecific(key, (const void*)42); - } -}; - -// test releasing memory from pthread key destructor -void TestKeyDtor() { - for (int i=0; i<4; i++) - NativeParallelFor( 1, TestThread(1) ); -} - -#endif // _WIN32||_WIN64 - -int TestMain () { - Test1(); // requires malloc initialization so should be first - Test2(); - TestKeyDtor(); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_malloc_lib_unload.cpp b/src/tbb/src/test/test_malloc_lib_unload.cpp deleted file mode 100644 index a775eba7a..000000000 --- a/src/tbb/src/test/test_malloc_lib_unload.cpp +++ /dev/null @@ -1,216 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if _USRDLL - -#include <stdlib.h> // for NULL -#include "harness_assert.h" -#define HARNESS_CUSTOM_MAIN 1 -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#include "harness.h" - -const char *globalCallMsg = "A TBB allocator function call is resolved into wrong implementation."; - -#if _WIN32||_WIN64 -// must be defined in DLL for linker to not drop the dependence on the DLL. -extern "C" { - extern __declspec(dllexport) void *scalable_malloc(size_t); - extern __declspec(dllexport) void scalable_free (void *); - extern __declspec(dllexport) void safer_scalable_free (void *, void (*)(void*)); - extern __declspec(dllexport) void *scalable_realloc(void *, size_t); - extern __declspec(dllexport) void *safer_scalable_realloc(void *, size_t, void *); - extern __declspec(dllexport) void *scalable_calloc(size_t, size_t); - extern __declspec(dllexport) int scalable_posix_memalign(void **, size_t, size_t); - extern __declspec(dllexport) void *scalable_aligned_malloc(size_t, size_t); - extern __declspec(dllexport) void *scalable_aligned_realloc(void *, size_t, size_t); - extern __declspec(dllexport) void *safer_scalable_aligned_realloc(void *, size_t, size_t, void *); - extern __declspec(dllexport) void scalable_aligned_free(void *); - extern __declspec(dllexport) size_t scalable_msize(void *); - extern __declspec(dllexport) size_t safer_scalable_msize (void *, size_t (*)(void*)); -} -#endif - -// Those functions must not be called instead of presented in dynamic library. -extern "C" void *scalable_malloc(size_t) -{ - ASSERT(0, globalCallMsg); - return NULL; -} -extern "C" void scalable_free (void *) -{ - ASSERT(0, globalCallMsg); -} -extern "C" void safer_scalable_free (void *, void (*)(void*)) -{ - ASSERT(0, globalCallMsg); -} -extern "C" void *scalable_realloc(void *, size_t) -{ - ASSERT(0, globalCallMsg); - return NULL; -} -extern "C" void *safer_scalable_realloc(void *, size_t, void *) -{ - ASSERT(0, globalCallMsg); - return NULL; -} -extern "C" void *scalable_calloc(size_t, size_t) -{ - ASSERT(0, globalCallMsg); - return NULL; -} -extern "C" int scalable_posix_memalign(void **, size_t, size_t) -{ - ASSERT(0, globalCallMsg); - return 0; -} -extern "C" void *scalable_aligned_malloc(size_t, size_t) -{ - ASSERT(0, globalCallMsg); - return NULL; -} -extern "C" void *scalable_aligned_realloc(void *, size_t, size_t) -{ - ASSERT(0, globalCallMsg); - return NULL; -} -extern "C" void *safer_scalable_aligned_realloc(void *, size_t, size_t, void *) -{ - ASSERT(0, globalCallMsg); - return NULL; -} -extern "C" void scalable_aligned_free(void *) -{ - ASSERT(0, globalCallMsg); -} -extern "C" size_t scalable_msize(void *) -{ - ASSERT(0, globalCallMsg); - return 0; -} -extern "C" size_t safer_scalable_msize (void *, size_t (*)(void*)) -{ - ASSERT(0, globalCallMsg); - return 0; -} - -#else // _USRDLL - -// harness_defs.h must be included before tbb_stddef.h to overcome exception-dependent -// system headers that come from tbb_stddef.h -#include "harness_defs.h" -#include "tbb/tbb_stddef.h" -#if __TBB_WIN8UI_SUPPORT || __TBB_SOURCE_DIRECTLY_INCLUDED -#define HARNESS_SKIP_TEST 1 -#endif -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#define HARNESS_TBBMALLOC_THREAD_SHUTDOWN 1 -#include "harness.h" - -#if !HARNESS_SKIP_TEST - -#include "harness_dynamic_libs.h" -#include "harness_memory.h" - -extern "C" { -#if _WIN32||_WIN64 -extern __declspec(dllimport) -#endif -void *scalable_malloc(size_t); -} - -struct Run { - void operator()( int /*id*/ ) const { - using namespace Harness; - - void* (*malloc_ptr)(std::size_t); - void (*free_ptr)(void*); - - void* (*aligned_malloc_ptr)(size_t size, size_t alignment); - void (*aligned_free_ptr)(void*); - - const char* actual_name; - LIBRARY_HANDLE lib = OpenLibrary(actual_name = MALLOCLIB_NAME1); - if (!lib) lib = OpenLibrary(actual_name = MALLOCLIB_NAME2); - if (!lib) { - REPORT("Can't load " MALLOCLIB_NAME1 " or " MALLOCLIB_NAME2 "\n"); - exit(1); - } - (FunctionAddress&)malloc_ptr = GetAddress(lib, "scalable_malloc"); - (FunctionAddress&)free_ptr = GetAddress(lib, "scalable_free"); - (FunctionAddress&)aligned_malloc_ptr = GetAddress(lib, "scalable_aligned_malloc"); - (FunctionAddress&)aligned_free_ptr = GetAddress(lib, "scalable_aligned_free"); - - for (size_t sz = 1024; sz <= 10*1024 ; sz*=10) { - void *p1 = aligned_malloc_ptr(sz, 16); - memset(p1, 0, sz); - aligned_free_ptr(p1); - } - - void *p = malloc_ptr(100); - memset(p, 1, 100); - free_ptr(p); - - CloseLibrary(lib); -#if _WIN32 || _WIN64 - ASSERT(GetModuleHandle(actual_name), - "allocator library must not be unloaded"); -#else - ASSERT(dlsym(RTLD_DEFAULT, "scalable_malloc"), - "allocator library must not be unloaded"); -#endif - } -}; - -int TestMain () { - int i; - std::ptrdiff_t memory_leak; - - // warm-up run - NativeParallelFor( 1, Run() ); - /* 1st call to GetMemoryUsage() allocate some memory, - but it seems memory consumption stabilized after this. - */ - GetMemoryUsage(); - std::size_t memory_in_use = GetMemoryUsage(); - ASSERT(memory_in_use == GetMemoryUsage(), - "Memory consumption should not increase after 1st GetMemoryUsage() call"); - - // expect that memory consumption stabilized after several runs - for (i=0; i<3; i++) { - std::size_t memory_in_use = GetMemoryUsage(); - for (int j=0; j<10; j++) - NativeParallelFor( 1, Run() ); - memory_leak = GetMemoryUsage() - memory_in_use; - if (memory_leak == 0) // possibly too strong? - break; - } - if(3==i) { - // not stabilized, could be leak - REPORT( "Error: memory leak of up to %ld bytes\n", static_cast<long>(memory_leak)); - exit(1); - } - - return Harness::Done; -} - -#endif /* HARNESS_SKIP_TEST */ - -#endif // _USRDLL diff --git a/src/tbb/src/test/test_malloc_overload.cpp b/src/tbb/src/test/test_malloc_overload.cpp deleted file mode 100644 index 7cf502294..000000000 --- a/src/tbb/src/test/test_malloc_overload.cpp +++ /dev/null @@ -1,386 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - - -#if (_WIN32 || _WIN64) -// As the test is intentionally build with /EHs-, suppress multiple VS2005's -// warnings like C4530: C++ exception handler used, but unwind semantics are not enabled -#if defined(_MSC_VER) && !__INTEL_COMPILER -/* ICC 10.1 and 11.0 generates code that uses std::_Raise_handler, - but it's only defined in libcpmt(d), which the test doesn't linked with. - */ -#undef _HAS_EXCEPTIONS -#define _HAS_EXCEPTIONS _CPPUNWIND -#endif -// to use strdup and putenv w/o warnings -#define _CRT_NONSTDC_NO_DEPRECATE 1 -#endif // _WIN32 || _WIN64 - -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -// LD_PRELOAD mechanism is broken in offload -#if __TBB_MIC_OFFLOAD -#define HARNESS_SKIP_TEST 1 -#endif -#include "harness.h" - -#if __linux__ || __APPLE__ -#define MALLOC_REPLACEMENT_AVAILABLE 1 -#elif _WIN32 && !__MINGW32__ && !__MINGW64__ && !__TBB_WIN8UI_SUPPORT -#define MALLOC_REPLACEMENT_AVAILABLE 2 -#include "tbb/tbbmalloc_proxy.h" -#endif - -#if MALLOC_REPLACEMENT_AVAILABLE - -#include "harness_report.h" -#include "harness_assert.h" -#include "harness_defs.h" -#include <stdlib.h> -#include <string.h> -#if !__APPLE__ -#include <malloc.h> -#endif -#include <stdio.h> -#include <new> -#if MALLOC_REPLACEMENT_AVAILABLE == 1 -#include <unistd.h> // for sysconf -#include <dlfcn.h> -#endif - -#if __linux__ -#include <stdint.h> // for uintptr_t - -extern "C" { -void *__libc_malloc(size_t size); -void *__libc_realloc(void *ptr, size_t size); -void *__libc_calloc(size_t num, size_t size); -void __libc_free(void *ptr); -void *__libc_memalign(size_t alignment, size_t size); -void *__libc_pvalloc(size_t size); -void *__libc_valloc(size_t size); -#if __ANDROID__ -#define malloc_usable_size(p) dlmalloc_usable_size(p) -size_t dlmalloc_usable_size(const void *ptr); -#endif -} - -#elif __APPLE__ - -#include <malloc/malloc.h> -#define malloc_usable_size(p) malloc_size(p) - -#elif _WIN32 -#include <stddef.h> -#if __MINGW32__ -#include <unistd.h> -#else -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -#endif - -#endif /* OS selection */ - -#if _WIN32 -// On Windows, the trick with string "dependence on msvcpXX.dll" is necessary to create -// dependence on msvcpXX.dll, for sake of a regression test. -// On Linux, C++ RTL headers are undesirable because of breaking strict ANSI mode. -#if defined(_MSC_VER) && _MSC_VER >= 1300 && _MSC_VER <= 1310 && !defined(__INTEL_COMPILER) -/* Fixing compilation error reported by VS2003 for exception class - when _HAS_EXCEPTIONS is 0: - bad_cast that inherited from exception is not in std namespace. -*/ -using namespace std; -#endif -#include <string> -#endif - -template<typename T> -static inline T alignDown(T arg, uintptr_t alignment) { - return T( (uintptr_t)arg & ~(alignment-1)); -} -template<typename T> -static inline T alignUp(T arg, uintptr_t alignment) { - return T(((uintptr_t)arg+(alignment-1)) & ~(alignment-1)); -} -template<typename T> -static inline bool isAligned(T arg, uintptr_t alignment) { - return 0==((uintptr_t)arg & (alignment-1)); -} - -/* start of code replicated from src/tbbmalloc */ - -class BackRefIdx { // composite index to backreference array -private: - uint16_t master; // index in BackRefMaster - uint16_t largeObj:1; // is this object "large"? - uint16_t offset :15; // offset from beginning of BackRefBlock -public: - BackRefIdx() : master((uint16_t)-1) {} - bool isInvalid() { return master == (uint16_t)-1; } - bool isLargeObject() const { return largeObj; } - uint16_t getMaster() const { return master; } - uint16_t getOffset() const { return offset; } - - // only newBackRef can modify BackRefIdx - static BackRefIdx newBackRef(bool largeObj); -}; - -class ExtMemoryPool; - -class BlockI { - intptr_t blockState[2]; -}; - -struct LargeMemoryBlock : public BlockI { - LargeMemoryBlock *next, // ptrs in list of cached blocks - *prev, - *gPrev, // in pool's global list - *gNext; - uintptr_t age; // age of block while in cache - size_t objectSize; // the size requested by a client - size_t unalignedSize; // the size requested from getMemory - bool fromMapMemory; - BackRefIdx backRefIdx; // cached here, used copy is in LargeObjectHdr - void registerInPool(ExtMemoryPool *extMemPool); - void unregisterFromPool(ExtMemoryPool *extMemPool); -}; - -struct LargeObjectHdr { - LargeMemoryBlock *memoryBlock; - /* Have to duplicate it here from CachedObjectHdr, - as backreference must be checked without further pointer dereference. - Points to LargeObjectHdr. */ - BackRefIdx backRefIdx; -}; - -/* - * Objects of size minLargeObjectSize and larger are considered large objects. - */ -const uintptr_t blockSize = 16*1024; -#if __powerpc64__ || __ppc64__ || __bgp__ -const int estimatedCacheLineSize = 128; -#else -const int estimatedCacheLineSize = 64; -#endif -const uint32_t fittingAlignment = estimatedCacheLineSize; -#define SET_FITTING_SIZE(N) ( (blockSize-2*estimatedCacheLineSize)/N ) & ~(fittingAlignment-1) -const uint32_t fittingSize5 = SET_FITTING_SIZE(2); // 8128/8064 -#undef SET_FITTING_SIZE -const uint32_t minLargeObjectSize = fittingSize5 + 1; - -/* end of code replicated from src/tbbmalloc */ - -static void scalableMallocCheckSize(void *object, size_t size) -{ - ASSERT(object, NULL); - if (size >= minLargeObjectSize) { - LargeMemoryBlock *lmb = ((LargeObjectHdr*)object-1)->memoryBlock; - ASSERT(uintptr_t(lmb)<uintptr_t(((LargeObjectHdr*)object-1)) - && lmb->objectSize >= size, NULL); - } -#if MALLOC_REPLACEMENT_AVAILABLE == 1 - ASSERT(malloc_usable_size(object) >= size, NULL); -#elif MALLOC_REPLACEMENT_AVAILABLE == 2 - // Check that _msize works correctly - ASSERT(_msize(object) >= size, NULL); - ASSERT(size<8 || _aligned_msize(object,8,0) >= size, NULL); -#endif -} - -struct BigStruct { - char f[minLargeObjectSize]; -}; - -void CheckStdFuncOverload(void *(*malloc_p)(size_t), void *(*calloc_p)(size_t, size_t), - void *(*realloc_p)(void *, size_t), void (*free_p)(void *)) -{ - void *ptr = malloc_p(minLargeObjectSize); - scalableMallocCheckSize(ptr, minLargeObjectSize); - free(ptr); - - ptr = calloc_p(minLargeObjectSize, 2); - scalableMallocCheckSize(ptr, 2*minLargeObjectSize); - void *ptr1 = realloc_p(ptr, 10*minLargeObjectSize); - scalableMallocCheckSize(ptr1, 10*minLargeObjectSize); - free_p(ptr1); -} - -#if MALLOC_REPLACEMENT_AVAILABLE == 1 - -void CheckUnixAlignFuncOverload(void *(*memalign_p)(size_t, size_t), - void *(*valloc_p)(size_t), void (*free_p)(void*)) -{ - if (memalign_p) { - void *ptr = memalign_p(128, 4*minLargeObjectSize); - scalableMallocCheckSize(ptr, 4*minLargeObjectSize); - ASSERT(isAligned(ptr, 128), NULL); - free_p(ptr); - } - void *ptr = valloc_p(minLargeObjectSize); - scalableMallocCheckSize(ptr, minLargeObjectSize); - ASSERT(isAligned(ptr, sysconf(_SC_PAGESIZE)), NULL); - free_p(ptr); -} - -#if __TBB_PVALLOC_PRESENT -void CheckPvalloc(void *(*pvalloc_p)(size_t), void (*free_p)(void*)) -{ - const long memoryPageSize = sysconf(_SC_PAGESIZE); - // request large object with not power-of-2 size - const size_t largeSz = alignUp(minLargeObjectSize, 16*1024) + 1; - - for (size_t sz = 0; sz<=largeSz; sz+=largeSz) { - void *ptr = pvalloc_p(sz); - scalableMallocCheckSize(ptr, sz? alignUp(sz, memoryPageSize) : memoryPageSize); - ASSERT(isAligned(ptr, memoryPageSize), NULL); - free_p(ptr); - } -} -#else -#define CheckPvalloc(alloc_p, free_p) ((void)0) -#endif - -#endif // MALLOC_REPLACEMENT_AVAILABLE - -#if __ANDROID__ -// Workaround for an issue with strdup somehow bypassing our malloc replacement on Android. -char *strdup(const char *str) { - REPORT( "Known issue: malloc replacement does not work for strdup on Android.\n" ); - size_t len = strlen(str)+1; - void *new_str = malloc(len); - return new_str ? reinterpret_cast<char *>(memcpy(new_str, str, len)) : 0; -} -#endif - -int TestMain() { - void *ptr, *ptr1; - -#if MALLOC_REPLACEMENT_AVAILABLE == 1 - ASSERT(dlsym(RTLD_DEFAULT, "scalable_malloc"), - "Lost dependence on malloc_proxy or LD_PRELOAD was not set?"); -#endif - -/* On Windows, memory block size returned by _msize() is sometimes used - to calculate the size for an extended block. Substituting _msize, - scalable_msize initially returned 0 for regions not allocated by the scalable - allocator, which led to incorrect memory reallocation and subsequent crashes. - It was found that adding a new environment variable triggers the error. -*/ - ASSERT(getenv("PATH"), "We assume that PATH is set everywhere."); - char *pathCopy = strdup(getenv("PATH")); -#if __ANDROID__ - ASSERT(strcmp(pathCopy,getenv("PATH")) == 0, "strdup workaround does not work as expected."); -#endif - const char *newEnvName = "__TBBMALLOC_OVERLOAD_REGRESSION_TEST_FOR_REALLOC_AND_MSIZE"; - char *newEnv = (char*)malloc(3 + strlen(newEnvName)); - - ASSERT(!getenv(newEnvName), "Environment variable should not be used before."); - strcpy(newEnv, newEnvName); - strcat(newEnv, "=1"); - int r = putenv(newEnv); - ASSERT(!r, NULL); - char *path = getenv("PATH"); - ASSERT(path && 0==strcmp(path, pathCopy), "Environment was changed erroneously."); - free(pathCopy); - free(newEnv); - - CheckStdFuncOverload(malloc, calloc, realloc, free); -#if MALLOC_REPLACEMENT_AVAILABLE == 1 - -#if __TBB_POSIX_MEMALIGN_PRESENT - int ret = posix_memalign(&ptr, 1024, 3*minLargeObjectSize); - scalableMallocCheckSize(ptr, 3*minLargeObjectSize); - ASSERT(0==ret && isAligned(ptr, 1024), NULL); - free(ptr); -#endif - -#if __linux__ - CheckUnixAlignFuncOverload(memalign, valloc, free); - CheckPvalloc(pvalloc, free); - - struct mallinfo info = mallinfo(); - // right now mallinfo initialized by zero - ASSERT(!info.arena && !info.ordblks && !info.smblks && !info.hblks - && !info.hblkhd && !info.usmblks && !info.fsmblks - && !info.uordblks && !info.fordblks && !info.keepcost, NULL); - - #if !__ANDROID__ - // These non-standard functions are exported by GLIBC, and might be used - // in conjunction with standard malloc/free. Test that we overload them as well. - // Bionic doesn't have them. - CheckStdFuncOverload(__libc_malloc, __libc_calloc, __libc_realloc, __libc_free); - CheckUnixAlignFuncOverload(__libc_memalign, __libc_valloc, __libc_free); - CheckPvalloc(__libc_pvalloc, __libc_free); - #endif -#elif __APPLE__ - CheckUnixAlignFuncOverload(NULL, valloc, free); -#endif // __linux__ - -#elif MALLOC_REPLACEMENT_AVAILABLE == 2 - - ptr = _aligned_malloc(minLargeObjectSize, 16); - scalableMallocCheckSize(ptr, minLargeObjectSize); - ASSERT(isAligned(ptr, 16), NULL); - - // Testing of workaround for vs "is power of 2 pow N" bug that accepts zeros - ptr1 = _aligned_malloc(minLargeObjectSize, 0); - scalableMallocCheckSize(ptr, minLargeObjectSize); - ASSERT(isAligned(ptr, sizeof(void*)), NULL); - _aligned_free(ptr1); - - ptr1 = _aligned_realloc(ptr, minLargeObjectSize*10, 16); - scalableMallocCheckSize(ptr1, minLargeObjectSize*10); - ASSERT(isAligned(ptr, 16), NULL); - _aligned_free(ptr1); - -#endif - - BigStruct *f = new BigStruct; - scalableMallocCheckSize(f, sizeof(BigStruct)); - delete f; - - f = new BigStruct[10]; - scalableMallocCheckSize(f, 10*sizeof(BigStruct)); - delete []f; - - f = new(std::nothrow) BigStruct; - scalableMallocCheckSize(f, sizeof(BigStruct)); - delete f; - - f = new(std::nothrow) BigStruct[2]; - scalableMallocCheckSize(f, 2*sizeof(BigStruct)); - delete []f; - -#if _WIN32 - std::string stdstring = "dependence on msvcpXX.dll"; - ASSERT(strcmp(stdstring.c_str(), "dependence on msvcpXX.dll") == 0, NULL); -#endif - - return Harness::Done; -} - -#else /* !MALLOC_REPLACEMENT_AVAILABLE */ -#include <stdio.h> - -int TestMain() { - return Harness::Skipped; -} -#endif /* !MALLOC_REPLACEMENT_AVAILABLE */ diff --git a/src/tbb/src/test/test_malloc_pools.cpp b/src/tbb/src/test/test_malloc_pools.cpp deleted file mode 100644 index 49b84ac64..000000000 --- a/src/tbb/src/test/test_malloc_pools.cpp +++ /dev/null @@ -1,510 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/scalable_allocator.h" -#include "tbb/atomic.h" -#define HARNESS_TBBMALLOC_THREAD_SHUTDOWN 1 -#include "harness.h" -#include "harness_barrier.h" -#if !__TBB_SOURCE_DIRECTLY_INCLUDED -#include "harness_tbb_independence.h" -#endif - -template<typename T> -static inline T alignUp (T arg, uintptr_t alignment) { - return T(((uintptr_t)arg+(alignment-1)) & ~(alignment-1)); -} - -struct PoolSpace: NoCopy { - size_t pos; - int regions; - size_t bufSize; - char *space; - - static const size_t BUF_SIZE = 8*1024*1024; - - PoolSpace(size_t bufSz = BUF_SIZE) : - pos(0), regions(0), - bufSize(bufSz), space(new char[bufSize]) { - memset(space, 0, bufSize); - } - ~PoolSpace() { - delete []space; - } -}; - -static PoolSpace *poolSpace; - -struct MallocPoolHeader { - void *rawPtr; - size_t userSize; -}; - -static tbb::atomic<int> liveRegions; - -static void *getMallocMem(intptr_t /*pool_id*/, size_t &bytes) -{ - void *rawPtr = malloc(bytes+sizeof(MallocPoolHeader)); - if (!rawPtr) - return NULL; - void *ret = (void *)((uintptr_t)rawPtr+sizeof(MallocPoolHeader)); - - MallocPoolHeader *hdr = (MallocPoolHeader*)ret-1; - hdr->rawPtr = rawPtr; - hdr->userSize = bytes; - - liveRegions++; - - return ret; -} - -static int putMallocMem(intptr_t /*pool_id*/, void *ptr, size_t bytes) -{ - MallocPoolHeader *hdr = (MallocPoolHeader*)ptr-1; - ASSERT(bytes == hdr->userSize, "Invalid size in pool callback."); - free(hdr->rawPtr); - - liveRegions--; - - return 0; -} - -void TestPoolReset() -{ - rml::MemPoolPolicy pol(getMallocMem, putMallocMem); - rml::MemoryPool *pool; - - pool_create_v1(0, &pol, &pool); - for (int i=0; i<100; i++) { - ASSERT(pool_malloc(pool, 8), NULL); - ASSERT(pool_malloc(pool, 50*1024), NULL); - } - int regionsBeforeReset = liveRegions; - pool_reset(pool); - for (int i=0; i<100; i++) { - ASSERT(pool_malloc(pool, 8), NULL); - ASSERT(pool_malloc(pool, 50*1024), NULL); - } - ASSERT(regionsBeforeReset == liveRegions, - "Expected no new regions allocation."); - pool_destroy(pool); - ASSERT(!liveRegions, "Expected all regions were released."); -} - -class SharedPoolRun: NoAssign { - static long threadNum; - static Harness::SpinBarrier startB, - mallocDone; - static rml::MemoryPool *pool; - static void **crossThread, - **afterTerm; -public: - static const int OBJ_CNT = 100; - - static void init(int num, rml::MemoryPool *pl, void **crThread, void **aTerm) { - threadNum = num; - pool = pl; - crossThread = crThread; - afterTerm = aTerm; - startB.initialize(threadNum); - mallocDone.initialize(threadNum); - } - - void operator()( int id ) const { - const int ITERS = 1000; - void *local[ITERS]; - - startB.wait(); - for (int i=id*OBJ_CNT; i<(id+1)*OBJ_CNT; i++) { - afterTerm[i] = pool_malloc(pool, i%2? 8*1024 : 9*1024); - memset(afterTerm[i], i, i%2? 8*1024 : 9*1024); - crossThread[i] = pool_malloc(pool, i%2? 9*1024 : 8*1024); - memset(crossThread[i], i, i%2? 9*1024 : 8*1024); - } - - for (int i=1; i<ITERS; i+=2) { - local[i-1] = pool_malloc(pool, 6*1024); - memset(local[i-1], i, 6*1024); - local[i] = pool_malloc(pool, 16*1024); - memset(local[i], i, 16*1024); - } - mallocDone.wait(); - int myVictim = threadNum-id-1; - for (int i=myVictim*OBJ_CNT; i<(myVictim+1)*OBJ_CNT; i++) - pool_free(pool, crossThread[i]); - for (int i=0; i<ITERS; i++) - pool_free(pool, local[i]); - } -}; - -long SharedPoolRun::threadNum; -Harness::SpinBarrier SharedPoolRun::startB, - SharedPoolRun::mallocDone; -rml::MemoryPool *SharedPoolRun::pool; -void **SharedPoolRun::crossThread, - **SharedPoolRun::afterTerm; - -// single pool shared by different threads -void TestSharedPool() -{ - rml::MemPoolPolicy pol(getMallocMem, putMallocMem); - rml::MemoryPool *pool; - - pool_create_v1(0, &pol, &pool); - void **crossThread = new void*[MaxThread * SharedPoolRun::OBJ_CNT]; - void **afterTerm = new void*[MaxThread * SharedPoolRun::OBJ_CNT]; - - for (int p=MinThread; p<=MaxThread; p++) { - SharedPoolRun::init(p, pool, crossThread, afterTerm); - SharedPoolRun thr; - - void *hugeObj = pool_malloc(pool, 10*1024*1024); - ASSERT(hugeObj, NULL); - - NativeParallelFor( p, thr ); - - pool_free(pool, hugeObj); - for (int i=0; i<p*SharedPoolRun::OBJ_CNT; i++) - pool_free(pool, afterTerm[i]); - } - delete []afterTerm; - delete []crossThread; - - pool_destroy(pool); - ASSERT(!liveRegions, "Expected all regions were released."); -} - -void *CrossThreadGetMem(intptr_t pool_id, size_t &bytes) -{ - if (poolSpace[pool_id].pos + bytes > poolSpace[pool_id].bufSize) - return NULL; - - void *ret = poolSpace[pool_id].space + poolSpace[pool_id].pos; - poolSpace[pool_id].pos += bytes; - poolSpace[pool_id].regions++; - - return ret; -} - -int CrossThreadPutMem(intptr_t pool_id, void* /*raw_ptr*/, size_t /*raw_bytes*/) -{ - poolSpace[pool_id].regions--; - return 0; -} - -class CrossThreadRun: NoAssign { - static long number_of_threads; - static Harness::SpinBarrier barrier; - static rml::MemoryPool **pool; - static char **obj; -public: - static void initBarrier(unsigned thrds) { barrier.initialize(thrds); } - static void init(long num) { - number_of_threads = num; - pool = new rml::MemoryPool*[number_of_threads]; - poolSpace = new PoolSpace[number_of_threads]; - obj = new char*[number_of_threads]; - } - static void destroy() { - for (long i=0; i<number_of_threads; i++) - ASSERT(!poolSpace[i].regions, "Memory leak detected"); - delete []pool; - delete []poolSpace; - delete []obj; - } - CrossThreadRun() {} - void operator()( int id ) const { - rml::MemPoolPolicy pol(CrossThreadGetMem, CrossThreadPutMem); - const int objLen = 10*id; - - pool_create_v1(id, &pol, &pool[id]); - obj[id] = (char*)pool_malloc(pool[id], objLen); - ASSERT(obj[id], NULL); - memset(obj[id], id, objLen); - - { - const size_t lrgSz = 2*16*1024; - void *ptrLarge = pool_malloc(pool[id], lrgSz); - ASSERT(ptrLarge, NULL); - memset(ptrLarge, 1, lrgSz); - - // consume all small objects - while (pool_malloc(pool[id], 5*1024)) - ; - // releasing of large object can give a chance to allocate more - pool_free(pool[id], ptrLarge); - - ASSERT(pool_malloc(pool[id], 5*1024), NULL); - } - - barrier.wait(); - int myPool = number_of_threads-id-1; - for (int i=0; i<10*myPool; i++) - ASSERT(myPool==obj[myPool][i], NULL); - pool_free(pool[myPool], obj[myPool]); - pool_destroy(pool[myPool]); - } -}; - -long CrossThreadRun::number_of_threads; -Harness::SpinBarrier CrossThreadRun::barrier; -rml::MemoryPool **CrossThreadRun::pool; -char **CrossThreadRun::obj; - -// pools created, used and destored by different threads -void TestCrossThreadPools() -{ - for (int p=MinThread; p<=MaxThread; p++) { - CrossThreadRun::initBarrier(p); - CrossThreadRun::init(p); - NativeParallelFor( p, CrossThreadRun() ); - for (int i=0; i<p; i++) - ASSERT(!poolSpace[i].regions, "Region leak detected"); - CrossThreadRun::destroy(); - } -} - -// buffer is too small to pool be created, but must not leak resourses -void TestTooSmallBuffer() -{ - poolSpace = new PoolSpace(8*1024); - - rml::MemPoolPolicy pol(CrossThreadGetMem, CrossThreadPutMem); - rml::MemoryPool *pool; - pool_create_v1(0, &pol, &pool); - pool_destroy(pool); - ASSERT(!poolSpace[0].regions, "No leaks."); - - delete poolSpace; -} - -static void *fixedBufGetMem(intptr_t /*pool_id*/, size_t &bytes) -{ - static const size_t BUF_SZ = 8*1024*1024; - static char buf[BUF_SZ]; - static bool used; - - if (used) - return NULL; - used = true; - bytes = BUF_SZ; - return buf; -} - -void TestFixedBufferPool() -{ - void *ptrs[7]; - rml::MemPoolPolicy pol(fixedBufGetMem, NULL, 0, /*fixedSizePool=*/true, - /*keepMemTillDestroy=*/false); - rml::MemoryPool *pool; - - pool_create_v1(0, &pol, &pool); - void *largeObj = pool_malloc(pool, 7*1024*1024); - ASSERT(largeObj, NULL); - pool_free(pool, largeObj); - - for (int i=0; i<7; i++) { - ptrs[i] = pool_malloc(pool, 1024*1024); - ASSERT(ptrs[i], NULL); - } - for (int i=0; i<7; i++) - pool_free(pool, ptrs[i]); - - largeObj = pool_malloc(pool, 7*1024*1024); - ASSERT(largeObj, NULL); - pool_free(pool, largeObj); - - pool_destroy(pool); -} - -static size_t currGranularity; - -static void *getGranMem(intptr_t /*pool_id*/, size_t &bytes) -{ - ASSERT(!(bytes%currGranularity), "Region size mismatch granularity."); - return malloc(bytes); -} - -static int putGranMem(intptr_t /*pool_id*/, void *ptr, size_t bytes) -{ - ASSERT(!(bytes%currGranularity), "Region size mismatch granularity."); - free(ptr); - return 0; -} - -static void TestPoolGranularity() -{ - rml::MemPoolPolicy pol(getGranMem, putGranMem); - const size_t grans[] = {4*1024, 2*1024*1024, 6*1024*1024, 10*1024*1024}; - - for (unsigned i=0; i<sizeof(grans)/sizeof(grans[0]); i++) { - pol.granularity = currGranularity = grans[i]; - rml::MemoryPool *pool; - - pool_create_v1(0, &pol, &pool); - for (int sz=500*1024; sz<16*1024*1024; sz+=101*1024) { - void *p = pool_malloc(pool, sz); - ASSERT(p, "Can't allocate memory in pool."); - pool_free(pool, p); - } - pool_destroy(pool); - } -} - -static size_t putMemCalls, getMemCalls; - -static void *getMemPolicy(intptr_t /*pool_id*/, size_t &bytes) -{ - getMemCalls++; - return malloc(bytes); -} - -static int putMemPolicy(intptr_t /*pool_id*/, void *ptr, size_t /*bytes*/) -{ - putMemCalls++; - free(ptr); - return 0; -} - -static void TestPoolKeepTillDestroy() -{ - const int ITERS = 50*1024; - void *ptrs[2*ITERS+1]; - rml::MemPoolPolicy pol(getMemPolicy, putMemPolicy); - rml::MemoryPool *pool; - - // 1st create default pool that returns memory back to callback, - // then use keepMemTillDestroy policy - for (int keep=0; keep<2; keep++) { - getMemCalls = putMemCalls = 0; - if (keep) - pol.keepAllMemory = 1; - pool_create_v1(0, &pol, &pool); - for (int i=0; i<2*ITERS; i+=2) { - ptrs[i] = pool_malloc(pool, 7*1024); - ptrs[i+1] = pool_malloc(pool, 10*1024); - } - ptrs[2*ITERS] = pool_malloc(pool, 8*1024*1024); - ASSERT(!putMemCalls, NULL); - for (int i=0; i<2*ITERS; i++) - pool_free(pool, ptrs[i]); - pool_free(pool, ptrs[2*ITERS]); - size_t totalPutMemCalls = putMemCalls; - if (keep) - ASSERT(!putMemCalls, NULL); - else { - ASSERT(putMemCalls, NULL); - putMemCalls = 0; - } - size_t currGetCalls = getMemCalls; - pool_malloc(pool, 8*1024*1024); - if (keep) - ASSERT(currGetCalls == getMemCalls, "Must not lead to new getMem call"); - size_t currPuts = putMemCalls; - pool_reset(pool); - ASSERT(currPuts == putMemCalls, "Pool is not releasing memory during reset."); - pool_destroy(pool); - ASSERT(putMemCalls, NULL); - totalPutMemCalls += putMemCalls; - ASSERT(getMemCalls == totalPutMemCalls, "Memory leak detected."); - } - -} - -static bool memEqual(char *buf, size_t size, int val) -{ - bool memEq = true; - for (size_t k=0; k<size; k++) - if (buf[k] != val) - memEq = false; - return memEq; -} - -static void TestEntries() -{ - const int SZ = 4; - const int ALGN = 4; - size_t size[SZ] = {8, 8000, 9000, 100*1024}; - size_t algn[ALGN] = {8, 64, 4*1024, 8*1024*1024}; - - rml::MemPoolPolicy pol(getGranMem, putGranMem); - currGranularity = 1; // not check granularity in the test - rml::MemoryPool *pool; - - pool_create_v1(0, &pol, &pool); - for (int i=0; i<SZ; i++) - for (int j=0; j<ALGN; j++) { - char *p = (char*)pool_aligned_malloc(pool, size[i], algn[j]); - ASSERT(p && 0==((uintptr_t)p & (algn[j]-1)), NULL); - memset(p, j, size[i]); - - size_t curr_algn = algn[rand() % ALGN]; - size_t curr_sz = size[rand() % SZ]; - char *p1 = (char*)pool_aligned_realloc(pool, p, curr_sz, curr_algn); - ASSERT(p1 && 0==((uintptr_t)p1 & (curr_algn-1)), NULL); - ASSERT(memEqual(p1, min(size[i], curr_sz), j), NULL); - - memset(p1, j+1, curr_sz); - size_t curr_sz1 = size[rand() % SZ]; - char *p2 = (char*)pool_realloc(pool, p1, curr_sz1); - ASSERT(p2, NULL); - ASSERT(memEqual(p2, min(curr_sz1, curr_sz), j+1), NULL); - - pool_free(pool, p2); - } - - pool_destroy(pool); -} - -static void TestPoolCreation() -{ - using namespace rml; - - putMemCalls = getMemCalls = 0; - - MemPoolPolicy nullPolicy(NULL, putMemPolicy), - emptyFreePolicy(getMemPolicy, NULL), - okPolicy(getMemPolicy, putMemPolicy); - MemoryPool *pool; - - MemPoolError res = pool_create_v1(0, &nullPolicy, &pool); - ASSERT(res==INVALID_POLICY, "pool with empty pAlloc can't be created"); - res = pool_create_v1(0, &emptyFreePolicy, &pool); - ASSERT(res==INVALID_POLICY, "pool with empty pFree can't be created"); - ASSERT(!putMemCalls && !getMemCalls, "no callback calls are expected"); - res = pool_create_v1(0, &okPolicy, &pool); - ASSERT(res==POOL_OK, NULL); - pool_destroy(pool); - ASSERT(putMemCalls == getMemCalls, "no leaks after pool_destroy"); -} - -int TestMain () { - TestTooSmallBuffer(); - TestPoolReset(); - TestSharedPool(); - TestCrossThreadPools(); - TestFixedBufferPool(); - TestPoolGranularity(); - TestPoolKeepTillDestroy(); - TestEntries(); - TestPoolCreation(); - - return Harness::Done; -} diff --git a/src/tbb/src/test/test_malloc_pure_c.c b/src/tbb/src/test/test_malloc_pure_c.c deleted file mode 100644 index 1caa56e74..000000000 --- a/src/tbb/src/test/test_malloc_pure_c.c +++ /dev/null @@ -1,123 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifdef __cplusplus -#error For testing purpose, this file should be compiled with a C compiler, not C++ -#endif /*__cplusplus */ - -#include "tbb/scalable_allocator.h" -#include <stdio.h> -#include <assert.h> -#include <stdlib.h> /* for atexit */ - -/* - * The test is to check if the scalable_allocator.h and its functions - * can be used from pure C programs; also some regression checks are done - */ - -#if __linux__ -/* huge pages supported only under Linux so far */ -const int ExpectedResultHugePages = TBBMALLOC_OK; -#else -const int ExpectedResultHugePages = TBBMALLOC_NO_EFFECT; -#endif - -#if __TBB_SOURCE_DIRECTLY_INCLUDED -#include "../tbbmalloc/tbbmalloc_internal_api.h" -#else -#define __TBB_mallocProcessShutdownNotification() -#endif - -/* test that it's possible to call allocation function from atexit - after mallocProcessShutdownNotification() called */ -static void MyExit(void) { - void *p = scalable_malloc(32); - assert(p); - scalable_free(p); - __TBB_mallocProcessShutdownNotification(); -} - -int main(void) { - size_t i, j; - int curr_mode, res; - void *p1, *p2; - - atexit( MyExit ); - for ( curr_mode = 0; curr_mode<=1; curr_mode++) { - assert(ExpectedResultHugePages == - scalable_allocation_mode(TBBMALLOC_USE_HUGE_PAGES, !curr_mode)); - p1 = scalable_malloc(10*1024*1024); - assert(p1); - assert(ExpectedResultHugePages == - scalable_allocation_mode(TBBMALLOC_USE_HUGE_PAGES, curr_mode)); - scalable_free(p1); - } - /* note that huge pages (if supported) are still enabled at this point */ -#if __TBB_SOURCE_DIRECTLY_INCLUDED - assert(TBBMALLOC_OK == - scalable_allocation_mode(TBBMALLOC_INTERNAL_SOURCE_INCLUDED, 0)); -#endif - - for( i=0; i<=1<<16; ++i) { - p1 = scalable_malloc(i); - if( !p1 ) - printf("Warning: there should be memory but scalable_malloc returned NULL\n"); - scalable_free(p1); - } - p1 = p2 = NULL; - for( i=1024*1024; ; i/=2 ) - { - scalable_free(p1); - p1 = scalable_realloc(p2, i); - p2 = scalable_calloc(i, 32); - if (p2) { - if (i<sizeof(size_t)) { - for (j=0; j<i; j++) - assert(0==*((char*)p2+j)); - } else { - for (j=0; j<i; j+=sizeof(size_t)) - assert(0==*((size_t*)p2+j)); - } - } - scalable_free(p2); - p2 = scalable_malloc(i); - if (i==0) break; - } - for( i=1; i<1024*1024; i*=2 ) - { - scalable_free(p1); - p1 = scalable_realloc(p2, i); - p2 = scalable_malloc(i); - } - scalable_free(p1); - scalable_free(p2); - res = scalable_allocation_command(TBBMALLOC_CLEAN_ALL_BUFFERS, NULL); - assert(res == TBBMALLOC_OK); - res = scalable_allocation_command(TBBMALLOC_CLEAN_THREAD_BUFFERS, NULL); - /* expect all caches cleaned before, so got nothing from CLEAN_THREAD_BUFFERS */ - assert(res == TBBMALLOC_NO_EFFECT); - /* check that invalid param argument give expected result*/ - res = scalable_allocation_command(TBBMALLOC_CLEAN_THREAD_BUFFERS, - (void*)(intptr_t)1); - assert(res == TBBMALLOC_INVALID_PARAM); - __TBB_mallocProcessShutdownNotification(); - printf("done\n"); - return 0; -} diff --git a/src/tbb/src/test/test_malloc_regression.cpp b/src/tbb/src/test/test_malloc_regression.cpp deleted file mode 100644 index 07544ff52..000000000 --- a/src/tbb/src/test/test_malloc_regression.cpp +++ /dev/null @@ -1,181 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#define HARNESS_NO_PARSE_COMMAND_LINE 1 - -#include <stdio.h> -#include "tbb/scalable_allocator.h" - -class minimalAllocFree { -public: - void operator()(int size) const { - tbb::scalable_allocator<char> a; - char* str = a.allocate( size ); - a.deallocate( str, size ); - } -}; - -#define HARNESS_TBBMALLOC_THREAD_SHUTDOWN 1 -#include "harness.h" - -template<typename Body, typename Arg> -void RunThread(const Body& body, const Arg& arg) { - NativeParallelForTask<Arg,Body> job(arg, body); - job.start(); - job.wait_to_finish(); -} - -/*--------------------------------------------------------------------*/ -// The regression test against bug #1518 where thread bootstrap allocations "leaked" - -#include "harness_memory.h" - -bool TestBootstrapLeak() { - /* In the bug 1518, each thread leaked ~384 bytes. - Initially, scalable allocator maps 1MB. Thus it is necessary to take out most of this space. - 1MB is chunked into 16K blocks; of those, one block is for thread bootstrap, and one more - should be reserved for the test body. 62 blocks left, each can serve 15 objects of 1024 bytes. - */ - const int alloc_size = 1024; - const int take_out_count = 15*62; - - tbb::scalable_allocator<char> a; - char* array[take_out_count]; - for( int i=0; i<take_out_count; ++i ) - array[i] = a.allocate( alloc_size ); - - RunThread( minimalAllocFree(), alloc_size ); // for threading library to take some memory - size_t memory_in_use = GetMemoryUsage(); - // Wait for memory usage data to "stabilize". The test number (1000) has nothing underneath. - for( int i=0; i<1000; i++) { - if( GetMemoryUsage()!=memory_in_use ) { - memory_in_use = GetMemoryUsage(); - i = -1; - } - } - - ptrdiff_t memory_leak = 0; - // Note that 16K bootstrap memory block is enough to serve 42 threads. - const int num_thread_runs = 200; - for (int run=0; run<3; run++) { - memory_in_use = GetMemoryUsage(); - for( int i=0; i<num_thread_runs; ++i ) - RunThread( minimalAllocFree(), alloc_size ); - - memory_leak = GetMemoryUsage() - memory_in_use; - if (!memory_leak) - break; - } - if( memory_leak>0 ) { // possibly too strong? - REPORT( "Error: memory leak of up to %ld bytes\n", static_cast<long>(memory_leak)); - } - - for( int i=0; i<take_out_count; ++i ) - a.deallocate( array[i], alloc_size ); - - return memory_leak<=0; -} - -/*--------------------------------------------------------------------*/ -// The regression test against a bug with incompatible semantics of msize and realloc - -bool TestReallocMsize(size_t startSz) { - bool passed = true; - - char *buf = (char*)scalable_malloc(startSz); - ASSERT(buf, ""); - size_t realSz = scalable_msize(buf); - ASSERT(realSz>=startSz, "scalable_msize must be not less then allocated size"); - memset(buf, 'a', realSz-1); - buf[realSz-1] = 0; - char *buf1 = (char*)scalable_realloc(buf, 2*realSz); - ASSERT(buf1, ""); - ASSERT(scalable_msize(buf1)>=2*realSz, - "scalable_msize must be not less then allocated size"); - buf1[2*realSz-1] = 0; - if ( strspn(buf1, "a") < realSz-1 ) { - REPORT( "Error: data broken for %d Bytes object.\n", startSz); - passed = false; - } - scalable_free(buf1); - - return passed; -} - -// regression test against incorrect work of msize/realloc -// for aligned objects -void TestAlignedMsize() -{ - const int NUM = 4; - char *p[NUM]; - size_t objSizes[NUM]; - size_t allocSz[] = {8, 512, 2*1024, 4*2024, 8*1024, 0}; - - for (int s=0; allocSz[s]; s++) { - for (int i=0; i<NUM; i++) - p[i] = (char*)scalable_aligned_malloc(16, allocSz[s]); - - for (int i=0; i<NUM; i++) { - objSizes[i] = scalable_msize(p[i]); - memset(p[i], i, objSizes[i]); - } - for (int i=0; i<NUM; i++) { - for (unsigned j=0; j<objSizes[i]; j++) - ASSERT(((char*)p[i])[j] == i, "Error: data broken\n"); - } - - for (int i=0; i<NUM; i++) { - p[i] = (char*)scalable_aligned_realloc(p[i], allocSz[s], 16); - memset(p[i], i, allocSz[s]); - } - for (int i=0; i<NUM; i++) { - for (unsigned j=0; j<objSizes[i]; j++) - ASSERT(((char*)p[i])[j] == i, "Error: data broken\n"); - } - for (int i=0; i<NUM; i++) - scalable_free(p[i]); - } -} - -/*--------------------------------------------------------------------*/ -// The main test function - -int TestMain () { - bool passed = true; - // Check whether memory usage data can be obtained; if not, skip test_bootstrap_leak. - if( GetMemoryUsage() ) - passed &= TestBootstrapLeak(); - - // TestReallocMsize runs for each power of 2 and each Fibonacci number below 64K - for (size_t a=1, b=1, sum=1; sum<=64*1024; ) { - passed &= TestReallocMsize(sum); - a = b; - b = sum; - sum = a+b; - } - for (size_t a=2; a<=64*1024; a*=2) - passed &= TestReallocMsize(a); - - ASSERT( passed, "Test failed" ); - - TestAlignedMsize(); - - return Harness::Done; -} diff --git a/src/tbb/src/test/test_malloc_used_by_lib.cpp b/src/tbb/src/test/test_malloc_used_by_lib.cpp deleted file mode 100644 index f8c64c12a..000000000 --- a/src/tbb/src/test/test_malloc_used_by_lib.cpp +++ /dev/null @@ -1,174 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if _USRDLL - -#include <stdlib.h> -#include "harness_defs.h" -#include "tbb/scalable_allocator.h" -#if __TBB_SOURCE_DIRECTLY_INCLUDED -#include "../tbbmalloc/tbbmalloc_internal_api.h" -#endif - -#define HARNESS_CUSTOM_MAIN 1 -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#include "harness.h" -#include "harness_assert.h" - -#if _WIN32||_WIN64 -extern "C" { - extern __declspec(dllexport) void callDll(); -} -#endif - -extern "C" void callDll() -{ - static const int NUM = 20; - void *ptrs[NUM]; - - for (int i=0; i<NUM; i++) { - ptrs[i] = scalable_malloc(i*1024); - ASSERT(ptrs[i], NULL); - } - for (int i=0; i<NUM; i++) - scalable_free(ptrs[i]); -#if __TBB_SOURCE_DIRECTLY_INCLUDED && (_WIN32||_WIN64) - __TBB_mallocThreadShutdownNotification(); -#endif -} - -#if __TBB_SOURCE_DIRECTLY_INCLUDED - -struct RegisterProcessShutdownNotification { - ~RegisterProcessShutdownNotification() { - __TBB_mallocProcessShutdownNotification(); - } -}; - -static RegisterProcessShutdownNotification reg; - -#endif - -#else // _USRDLL - -#define __TBB_NO_IMPLICIT_LINKAGE 1 -#include "harness_dynamic_libs.h" -#if __TBB_WIN8UI_SUPPORT -// FIXME: fix the test to support Windows* 8 Store Apps mode. -#define HARNESS_SKIP_TEST 1 -#endif -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#include "harness.h" - -#if !HARNESS_SKIP_TEST - -#include "harness_memory.h" -#include "harness_tbb_independence.h" -#include "harness_barrier.h" - -class UseDll { - Harness::FunctionAddress run; -public: - UseDll(Harness::FunctionAddress runPtr) : run(runPtr) { } - void operator()( int /*id*/ ) const { - (*run)(); - } -}; - -void LoadThreadsUnload() -{ - Harness::LIBRARY_HANDLE lib = - Harness::OpenLibrary(TEST_LIBRARY_NAME("test_malloc_used_by_lib_dll")); - ASSERT(lib, "Can't load " TEST_LIBRARY_NAME("test_malloc_used_by_lib_dll")); - NativeParallelFor( 4, UseDll( Harness::GetAddress(lib, "callDll") ) ); - Harness::CloseLibrary(lib); -} - -struct UnloadCallback { - Harness::LIBRARY_HANDLE lib; - - void operator() () const { - Harness::CloseLibrary(lib); - } -}; - -struct RunWithLoad : NoAssign { - static Harness::SpinBarrier startBarr, endBarr; - static UnloadCallback unloadCallback; - static Harness::FunctionAddress runPtr; - - void operator()(int id) const { - if (!id) { - Harness::LIBRARY_HANDLE lib = - Harness::OpenLibrary(TEST_LIBRARY_NAME("test_malloc_used_by_lib_dll")); - ASSERT(lib, "Can't load " TEST_LIBRARY_NAME("test_malloc_used_by_lib_dll")); - runPtr = Harness::GetAddress(lib, "callDll"); - unloadCallback.lib = lib; - } - startBarr.wait(); - (*runPtr)(); - endBarr.wait(unloadCallback); - } -}; - -Harness::SpinBarrier RunWithLoad::startBarr, RunWithLoad::endBarr; -UnloadCallback RunWithLoad::unloadCallback; -Harness::FunctionAddress RunWithLoad::runPtr; - -void ThreadsLoadUnload() -{ - const int threads = 4; - - RunWithLoad::startBarr.initialize(threads); - RunWithLoad::endBarr.initialize(threads); - NativeParallelFor(threads, RunWithLoad()); -} - -int TestMain () { - const int ITERS = 20; - int i; - std::ptrdiff_t memory_leak = 0; - - GetMemoryUsage(); - - for (int run = 0; run<2; run++) { - // expect that memory consumption stabilized after several runs - for (i=0; i<ITERS; i++) { - std::size_t memory_in_use = GetMemoryUsage(); - if (run) - LoadThreadsUnload(); - else - ThreadsLoadUnload(); - memory_leak = GetMemoryUsage() - memory_in_use; - if (memory_leak == 0) // possibly too strong? - break; - } - if(i==ITERS) { - // not stabilized, could be leak - REPORT( "Error: memory leak of up to %ld bytes\n", static_cast<long>(memory_leak)); - exit(1); - } - } - - return Harness::Done; -} - -#endif /* HARNESS_SKIP_TEST */ -#endif // _USRDLL diff --git a/src/tbb/src/test/test_malloc_whitebox.cpp b/src/tbb/src/test/test_malloc_whitebox.cpp deleted file mode 100644 index f4175583e..000000000 --- a/src/tbb/src/test/test_malloc_whitebox.cpp +++ /dev/null @@ -1,1144 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -/* to prevent loading dynamic TBBmalloc at startup, that is not needed - for the whitebox test */ -#define __TBB_SOURCE_DIRECTLY_INCLUDED 1 - -// According to C99 standard INTPTR_MIN defined for C++ -// iff __STDC_LIMIT_MACROS pre-defined -#define __STDC_LIMIT_MACROS 1 - -// To disable exceptions in <vector> and <list> on Windows* -#undef _HAS_EXCEPTIONS -#define _HAS_EXCEPTIONS _CPPUNWIND - -#define HARNESS_TBBMALLOC_THREAD_SHUTDOWN 1 - -#include "harness.h" -#include "harness_barrier.h" - -// To not depends on ITT support stuff -#ifdef DO_ITT_NOTIFY -#undef DO_ITT_NOTIFY -#endif - -#define __TBB_MALLOC_WHITEBOX_TEST 1 // to get access to allocator internals -// help trigger rare race condition -#define WhiteboxTestingYield() (__TBB_Yield(), __TBB_Yield(), __TBB_Yield(), __TBB_Yield()) - -#define protected public -#define private public -#include "../tbbmalloc/frontend.cpp" -#undef protected -#undef private -#include "../tbbmalloc/backend.cpp" -#include "../tbbmalloc/backref.cpp" - -namespace tbbmalloc_whitebox { - size_t locGetProcessed = 0; - size_t locPutProcessed = 0; -} -#include "../tbbmalloc/large_objects.cpp" -#include "../tbbmalloc/tbbmalloc.cpp" - -const int LARGE_MEM_SIZES_NUM = 10; -const size_t MByte = 1024*1024; - -class AllocInfo { - int *p; - int val; - int size; -public: - AllocInfo() : p(NULL), val(0), size(0) {} - explicit AllocInfo(int sz) : p((int*)scalable_malloc(sz*sizeof(int))), - val(rand()), size(sz) { - ASSERT(p, NULL); - for (int k=0; k<size; k++) - p[k] = val; - } - void check() const { - for (int k=0; k<size; k++) - ASSERT(p[k] == val, NULL); - } - void clear() { - scalable_free(p); - } -}; - -class SimpleBarrier: NoAssign { -protected: - static Harness::SpinBarrier barrier; -public: - static void initBarrier(unsigned thrds) { barrier.initialize(thrds); } -}; - -Harness::SpinBarrier SimpleBarrier::barrier; - -class TestLargeObjCache: public SimpleBarrier { -public: - static int largeMemSizes[LARGE_MEM_SIZES_NUM]; - - TestLargeObjCache( ) {} - - void operator()( int /*mynum*/ ) const { - AllocInfo allocs[LARGE_MEM_SIZES_NUM]; - - // push to maximal cache limit - for (int i=0; i<2; i++) { - const int sizes[] = { MByte/sizeof(int), - (MByte-2*LargeObjectCache::largeBlockCacheStep)/sizeof(int) }; - for (int q=0; q<2; q++) { - size_t curr = 0; - for (int j=0; j<LARGE_MEM_SIZES_NUM; j++, curr++) - new (allocs+curr) AllocInfo(sizes[q]); - - for (size_t j=0; j<curr; j++) { - allocs[j].check(); - allocs[j].clear(); - } - } - } - - barrier.wait(); - - // check caching correctness - for (int i=0; i<1000; i++) { - size_t curr = 0; - for (int j=0; j<LARGE_MEM_SIZES_NUM-1; j++, curr++) - new (allocs+curr) AllocInfo(largeMemSizes[j]); - - new (allocs+curr) - AllocInfo((int)(4*minLargeObjectSize + - 2*minLargeObjectSize*(1.*rand()/RAND_MAX))); - curr++; - - for (size_t j=0; j<curr; j++) { - allocs[j].check(); - allocs[j].clear(); - } - } - } -}; - -int TestLargeObjCache::largeMemSizes[LARGE_MEM_SIZES_NUM]; - -void TestLargeObjectCache() -{ - for (int i=0; i<LARGE_MEM_SIZES_NUM; i++) - TestLargeObjCache::largeMemSizes[i] = - (int)(minLargeObjectSize + 2*minLargeObjectSize*(1.*rand()/RAND_MAX)); - - for( int p=MaxThread; p>=MinThread; --p ) { - TestLargeObjCache::initBarrier( p ); - NativeParallelFor( p, TestLargeObjCache() ); - } -} - -#if MALLOC_CHECK_RECURSION - -class TestStartupAlloc: public SimpleBarrier { - struct TestBlock { - void *ptr; - size_t sz; - }; - static const int ITERS = 100; -public: - TestStartupAlloc() {} - void operator()(int) const { - TestBlock blocks1[ITERS], blocks2[ITERS]; - - barrier.wait(); - - for (int i=0; i<ITERS; i++) { - blocks1[i].sz = rand() % minLargeObjectSize; - blocks1[i].ptr = StartupBlock::allocate(blocks1[i].sz); - ASSERT(blocks1[i].ptr && StartupBlock::msize(blocks1[i].ptr)>=blocks1[i].sz - && 0==(uintptr_t)blocks1[i].ptr % sizeof(void*), NULL); - memset(blocks1[i].ptr, i, blocks1[i].sz); - } - for (int i=0; i<ITERS; i++) { - blocks2[i].sz = rand() % minLargeObjectSize; - blocks2[i].ptr = StartupBlock::allocate(blocks2[i].sz); - ASSERT(blocks2[i].ptr && StartupBlock::msize(blocks2[i].ptr)>=blocks2[i].sz - && 0==(uintptr_t)blocks2[i].ptr % sizeof(void*), NULL); - memset(blocks2[i].ptr, i, blocks2[i].sz); - - for (size_t j=0; j<blocks1[i].sz; j++) - ASSERT(*((char*)blocks1[i].ptr+j) == i, NULL); - Block *block = (Block *)alignDown(blocks1[i].ptr, slabSize); - ((StartupBlock *)block)->free(blocks1[i].ptr); - } - for (int i=ITERS-1; i>=0; i--) { - for (size_t j=0; j<blocks2[i].sz; j++) - ASSERT(*((char*)blocks2[i].ptr+j) == i, NULL); - Block *block = (Block *)alignDown(blocks2[i].ptr, slabSize); - ((StartupBlock *)block)->free(blocks2[i].ptr); - } - } -}; - -#endif /* MALLOC_CHECK_RECURSION */ - -class BackRefWork: NoAssign { - struct TestBlock { - intptr_t data; - BackRefIdx idx; - }; - static const int ITERS = 2*BR_MAX_CNT+2; -public: - BackRefWork() {} - void operator()(int) const { - TestBlock blocks[ITERS]; - - for (int i=0; i<ITERS; i++) { - blocks[i].idx = BackRefIdx::newBackRef(/*largeObj=*/false); - setBackRef(blocks[i].idx, &blocks[i].data); - } - for (int i=0; i<ITERS; i++) - ASSERT((Block*)&blocks[i].data == getBackRef(blocks[i].idx), NULL); - for (int i=ITERS-1; i>=0; i--) - removeBackRef(blocks[i].idx); - } -}; - -class LocalCachesHit: NoAssign { - // set ITERS to trigger possible leak of backreferences - // during cleanup on cache overflow and on thread termination - static const int ITERS = 2*(FreeBlockPool::POOL_HIGH_MARK + - LocalLOC::LOC_HIGH_MARK); -public: - LocalCachesHit() {} - void operator()(int) const { - void *objsSmall[ITERS], *objsLarge[ITERS]; - - for (int i=0; i<ITERS; i++) { - objsSmall[i] = scalable_malloc(minLargeObjectSize-1); - objsLarge[i] = scalable_malloc(minLargeObjectSize); - } - for (int i=0; i<ITERS; i++) { - scalable_free(objsSmall[i]); - scalable_free(objsLarge[i]); - } -#ifdef USE_WINTHREAD - // Under Windows DllMain is used for mallocThreadShutdownNotification - // calling. As DllMain is not used during whitebox testing, - // we have to call the callback manually. - __TBB_mallocThreadShutdownNotification(); -#endif - } -}; - -static size_t allocatedBackRefCount() -{ - size_t cnt = 0; - for (int i=0; i<=backRefMaster->lastUsed; i++) - cnt += backRefMaster->backRefBl[i]->allocatedCount; - return cnt; -} - -class TestInvalidBackrefs: public SimpleBarrier { -#if __ANDROID__ - // Android requires lower iters due to lack of virtual memory. - static const int BACKREF_GROWTH_ITERS = 50*1024; -#else - static const int BACKREF_GROWTH_ITERS = 200*1024; -#endif - - static tbb::atomic<bool> backrefGrowthDone; - static void *ptrs[BACKREF_GROWTH_ITERS]; -public: - TestInvalidBackrefs() {} - void operator()(int id) const { - - if (!id) { - backrefGrowthDone = false; - barrier.wait(); - - for (int i=0; i<BACKREF_GROWTH_ITERS; i++) - ptrs[i] = scalable_malloc(minLargeObjectSize); - backrefGrowthDone = true; - for (int i=0; i<BACKREF_GROWTH_ITERS; i++) - scalable_free(ptrs[i]); - } else { - void *p2 = scalable_malloc(minLargeObjectSize-1); - char *p1 = (char*)scalable_malloc(minLargeObjectSize-1); - LargeObjectHdr *hdr = - (LargeObjectHdr*)(p1+minLargeObjectSize-1 - sizeof(LargeObjectHdr)); - hdr->backRefIdx.master = 7; - hdr->backRefIdx.largeObj = 1; - hdr->backRefIdx.offset = 2000; - - barrier.wait(); - - while (!backrefGrowthDone) { - scalable_free(p2); - p2 = scalable_malloc(minLargeObjectSize-1); - } - scalable_free(p1); - scalable_free(p2); - } - } -}; - -tbb::atomic<bool> TestInvalidBackrefs::backrefGrowthDone; -void *TestInvalidBackrefs::ptrs[BACKREF_GROWTH_ITERS]; - -void TestBackRef() { - size_t beforeNumBackRef, afterNumBackRef; - - beforeNumBackRef = allocatedBackRefCount(); - for( int p=MaxThread; p>=MinThread; --p ) - NativeParallelFor( p, BackRefWork() ); - afterNumBackRef = allocatedBackRefCount(); - ASSERT(beforeNumBackRef==afterNumBackRef, "backreference leak detected"); - - // lastUsed marks peak resource consumption. As we allocate below the mark, - // it must not move up, otherwise there is a resource leak. - int sustLastUsed = backRefMaster->lastUsed; - NativeParallelFor( 1, BackRefWork() ); - ASSERT(sustLastUsed == backRefMaster->lastUsed, "backreference leak detected"); - - // check leak of back references while per-thread caches are in use - // warm up needed to cover bootStrapMalloc call - NativeParallelFor( 1, LocalCachesHit() ); - beforeNumBackRef = allocatedBackRefCount(); - NativeParallelFor( 2, LocalCachesHit() ); - int res = scalable_allocation_command(TBBMALLOC_CLEAN_ALL_BUFFERS, NULL); - ASSERT(res == TBBMALLOC_OK, NULL); - afterNumBackRef = allocatedBackRefCount(); - ASSERT(beforeNumBackRef>=afterNumBackRef, "backreference leak detected"); - - // This is a regression test against race condition between backreference - // extension and checking invalid BackRefIdx. - // While detecting is object large or small, scalable_free 1st check for - // large objects, so there is a chance to prepend small object with - // seems valid BackRefIdx for large objects, and thus trigger the bug. - TestInvalidBackrefs::initBarrier(MaxThread); - NativeParallelFor( MaxThread, TestInvalidBackrefs() ); -} - -void *getMem(intptr_t /*pool_id*/, size_t &bytes) -{ - const size_t BUF_SIZE = 8*1024*1024; - static char space[BUF_SIZE]; - static size_t pos; - - if (pos + bytes > BUF_SIZE) - return NULL; - - void *ret = space + pos; - pos += bytes; - - return ret; -} - -int putMem(intptr_t /*pool_id*/, void* /*raw_ptr*/, size_t /*raw_bytes*/) -{ - return 0; -} - -struct MallocPoolHeader { - void *rawPtr; - size_t userSize; -}; - -void *getMallocMem(intptr_t /*pool_id*/, size_t &bytes) -{ - void *rawPtr = malloc(bytes+sizeof(MallocPoolHeader)); - void *ret = (void *)((uintptr_t)rawPtr+sizeof(MallocPoolHeader)); - - MallocPoolHeader *hdr = (MallocPoolHeader*)ret-1; - hdr->rawPtr = rawPtr; - hdr->userSize = bytes; - - return ret; -} - -int putMallocMem(intptr_t /*pool_id*/, void *ptr, size_t bytes) -{ - MallocPoolHeader *hdr = (MallocPoolHeader*)ptr-1; - ASSERT(bytes == hdr->userSize, "Invalid size in pool callback."); - free(hdr->rawPtr); - - return 0; -} - -class StressLOCacheWork: NoAssign { - rml::MemoryPool *mallocPool; -public: - StressLOCacheWork(rml::MemoryPool *mallocPool) : mallocPool(mallocPool) {} - void operator()(int) const { - for (size_t sz=minLargeObjectSize; sz<1*1024*1024; - sz+=LargeObjectCache::largeBlockCacheStep) { - void *ptr = pool_malloc(mallocPool, sz); - ASSERT(ptr, "Memory was not allocated"); - memset(ptr, sz, sz); - pool_free(mallocPool, ptr); - } - } -}; - -void TestPools() { - rml::MemPoolPolicy pol(getMem, putMem); - size_t beforeNumBackRef, afterNumBackRef; - - rml::MemoryPool *pool1; - rml::MemoryPool *pool2; - pool_create_v1(0, &pol, &pool1); - pool_create_v1(0, &pol, &pool2); - pool_destroy(pool1); - pool_destroy(pool2); - - scalable_allocation_command(TBBMALLOC_CLEAN_ALL_BUFFERS, NULL); - beforeNumBackRef = allocatedBackRefCount(); - rml::MemoryPool *fixedPool; - - pool_create_v1(0, &pol, &fixedPool); - pol.pAlloc = getMallocMem; - pol.pFree = putMallocMem; - pol.granularity = 8; - rml::MemoryPool *mallocPool; - - pool_create_v1(0, &pol, &mallocPool); -/* check that large object cache (LOC) returns correct size for cached objects - passBackendSz Byte objects are cached in LOC, but bypassed the backend, so - memory requested directly from allocation callback. - nextPassBackendSz Byte objects must fit to another LOC bin, - so that their allocation/realeasing leads to cache cleanup. - All this is expecting to lead to releasing of passBackendSz Byte object - from LOC during LOC cleanup, and putMallocMem checks that returned size - is correct. -*/ - const size_t passBackendSz = Backend::maxBinned_HugePage+1, - anotherLOCBinSz = minLargeObjectSize+1; - for (int i=0; i<10; i++) { // run long enough to be cached - void *p = pool_malloc(mallocPool, passBackendSz); - ASSERT(p, "Memory was not allocated"); - pool_free(mallocPool, p); - } - // run long enough to passBackendSz allocation was cleaned from cache - // and returned back to putMallocMem for size checking - for (int i=0; i<1000; i++) { - void *p = pool_malloc(mallocPool, anotherLOCBinSz); - ASSERT(p, "Memory was not allocated"); - pool_free(mallocPool, p); - } - - void *smallObj = pool_malloc(fixedPool, 10); - ASSERT(smallObj, "Memory was not allocated"); - memset(smallObj, 1, 10); - void *ptr = pool_malloc(fixedPool, 1024); - ASSERT(ptr, "Memory was not allocated"); - memset(ptr, 1, 1024); - void *largeObj = pool_malloc(fixedPool, minLargeObjectSize); - ASSERT(largeObj, "Memory was not allocated"); - memset(largeObj, 1, minLargeObjectSize); - ptr = pool_malloc(fixedPool, minLargeObjectSize); - ASSERT(ptr, "Memory was not allocated"); - memset(ptr, minLargeObjectSize, minLargeObjectSize); - pool_malloc(fixedPool, 10*minLargeObjectSize); // no leak for unsuccessful allocations - pool_free(fixedPool, smallObj); - pool_free(fixedPool, largeObj); - - // provoke large object cache cleanup and hope no leaks occurs - for( int p=MaxThread; p>=MinThread; --p ) - NativeParallelFor( p, StressLOCacheWork(mallocPool) ); - pool_destroy(mallocPool); - pool_destroy(fixedPool); - - scalable_allocation_command(TBBMALLOC_CLEAN_ALL_BUFFERS, NULL); - afterNumBackRef = allocatedBackRefCount(); - ASSERT(beforeNumBackRef==afterNumBackRef, "backreference leak detected"); - - { - // test usedSize/cachedSize and LOC bitmask correctness - void *p[5]; - pool_create_v1(0, &pol, &mallocPool); - const LargeObjectCache *loc = &((rml::internal::MemoryPool*)mallocPool)->extMemPool.loc; - p[3] = pool_malloc(mallocPool, minLargeObjectSize+2*LargeObjectCache::largeBlockCacheStep); - for (int i=0; i<10; i++) { - p[0] = pool_malloc(mallocPool, minLargeObjectSize); - p[1] = pool_malloc(mallocPool, minLargeObjectSize+LargeObjectCache::largeBlockCacheStep); - pool_free(mallocPool, p[0]); - pool_free(mallocPool, p[1]); - } - ASSERT(loc->getUsedSize(), NULL); - pool_free(mallocPool, p[3]); - ASSERT(loc->getLOCSize() < 3*(minLargeObjectSize+LargeObjectCache::largeBlockCacheStep), NULL); - const size_t maxLocalLOCSize = LocalLOCImpl<3,30>::getMaxSize(); - ASSERT(loc->getUsedSize() <= maxLocalLOCSize, NULL); - for (int i=0; i<3; i++) - p[i] = pool_malloc(mallocPool, minLargeObjectSize+i*LargeObjectCache::largeBlockCacheStep); - size_t currUser = loc->getUsedSize(); - ASSERT(!loc->getLOCSize() && currUser >= 3*(minLargeObjectSize+LargeObjectCache::largeBlockCacheStep), NULL); - p[4] = pool_malloc(mallocPool, minLargeObjectSize+3*LargeObjectCache::largeBlockCacheStep); - ASSERT(loc->getUsedSize() - currUser >= minLargeObjectSize+3*LargeObjectCache::largeBlockCacheStep, NULL); - pool_free(mallocPool, p[4]); - ASSERT(loc->getUsedSize() <= currUser+maxLocalLOCSize, NULL); - pool_reset(mallocPool); - ASSERT(!loc->getLOCSize() && !loc->getUsedSize(), NULL); - pool_destroy(mallocPool); - } - // To test LOC we need bigger lists than released by current LocalLOC - // in production code. Create special LocalLOC. - { - LocalLOCImpl<2, 20> lLOC; - pool_create_v1(0, &pol, &mallocPool); - rml::internal::ExtMemoryPool *mPool = &((rml::internal::MemoryPool*)mallocPool)->extMemPool; - const LargeObjectCache *loc = &((rml::internal::MemoryPool*)mallocPool)->extMemPool.loc; - for (int i=0; i<22; i++) { - void *o = pool_malloc(mallocPool, minLargeObjectSize+i*LargeObjectCache::largeBlockCacheStep); - bool ret = lLOC.put(((LargeObjectHdr*)o - 1)->memoryBlock, mPool); - ASSERT(ret, NULL); - - o = pool_malloc(mallocPool, minLargeObjectSize+i*LargeObjectCache::largeBlockCacheStep); - ret = lLOC.put(((LargeObjectHdr*)o - 1)->memoryBlock, mPool); - ASSERT(ret, NULL); - } - lLOC.externalCleanup(mPool); - ASSERT(!loc->getUsedSize(), NULL); - - pool_destroy(mallocPool); - } -} - -void TestObjectRecognition() { - size_t headersSize = sizeof(LargeMemoryBlock)+sizeof(LargeObjectHdr); - unsigned falseObjectSize = 113; // unsigned is the type expected by getObjectSize - size_t obtainedSize; - - ASSERT(sizeof(BackRefIdx)==4, "Unexpected size of BackRefIdx"); - ASSERT(getObjectSize(falseObjectSize)!=falseObjectSize, "Error in test: bad choice for false object size"); - - void* mem = scalable_malloc(2*slabSize); - ASSERT(mem, "Memory was not allocated"); - Block* falseBlock = (Block*)alignUp((uintptr_t)mem, slabSize); - falseBlock->objectSize = falseObjectSize; - char* falseSO = (char*)falseBlock + falseObjectSize*7; - ASSERT(alignDown(falseSO, slabSize)==(void*)falseBlock, "Error in test: false object offset is too big"); - - void* bufferLOH = scalable_malloc(2*slabSize + headersSize); - ASSERT(bufferLOH, "Memory was not allocated"); - LargeObjectHdr* falseLO = - (LargeObjectHdr*)alignUp((uintptr_t)bufferLOH + headersSize, slabSize); - LargeObjectHdr* headerLO = (LargeObjectHdr*)falseLO-1; - headerLO->memoryBlock = (LargeMemoryBlock*)bufferLOH; - headerLO->memoryBlock->unalignedSize = 2*slabSize + headersSize; - headerLO->memoryBlock->objectSize = slabSize + headersSize; - headerLO->backRefIdx = BackRefIdx::newBackRef(/*largeObj=*/true); - setBackRef(headerLO->backRefIdx, headerLO); - ASSERT(scalable_msize(falseLO) == slabSize + headersSize, - "Error in test: LOH falsification failed"); - removeBackRef(headerLO->backRefIdx); - - const int NUM_OF_IDX = BR_MAX_CNT+2; - BackRefIdx idxs[NUM_OF_IDX]; - for (int cnt=0; cnt<2; cnt++) { - for (int master = -10; master<10; master++) { - falseBlock->backRefIdx.master = (uint16_t)master; - headerLO->backRefIdx.master = (uint16_t)master; - - for (int bl = -10; bl<BR_MAX_CNT+10; bl++) { - falseBlock->backRefIdx.offset = (uint16_t)bl; - headerLO->backRefIdx.offset = (uint16_t)bl; - - for (int largeObj = 0; largeObj<2; largeObj++) { - falseBlock->backRefIdx.largeObj = largeObj; - headerLO->backRefIdx.largeObj = largeObj; - - obtainedSize = __TBB_malloc_safer_msize(falseSO, NULL); - ASSERT(obtainedSize==0, "Incorrect pointer accepted"); - obtainedSize = __TBB_malloc_safer_msize(falseLO, NULL); - ASSERT(obtainedSize==0, "Incorrect pointer accepted"); - } - } - } - if (cnt == 1) { - for (int i=0; i<NUM_OF_IDX; i++) - removeBackRef(idxs[i]); - break; - } - for (int i=0; i<NUM_OF_IDX; i++) { - idxs[i] = BackRefIdx::newBackRef(/*largeObj=*/false); - setBackRef(idxs[i], NULL); - } - } - char *smallPtr = (char*)scalable_malloc(falseObjectSize); - obtainedSize = __TBB_malloc_safer_msize(smallPtr, NULL); - ASSERT(obtainedSize==getObjectSize(falseObjectSize), "Correct pointer not accepted?"); - scalable_free(smallPtr); - - obtainedSize = __TBB_malloc_safer_msize(mem, NULL); - ASSERT(obtainedSize>=2*slabSize, "Correct pointer not accepted?"); - scalable_free(mem); - scalable_free(bufferLOH); -} - -class TestBackendWork: public SimpleBarrier { - struct TestBlock { - intptr_t data; - BackRefIdx idx; - }; - static const int ITERS = 20; - - rml::internal::Backend *backend; -public: - TestBackendWork(rml::internal::Backend *bknd) : backend(bknd) {} - void operator()(int) const { - barrier.wait(); - - for (int i=0; i<ITERS; i++) { - BlockI *slabBlock = backend->getSlabBlock(1); - ASSERT(slabBlock, "Memory was not allocated"); - LargeMemoryBlock *lmb = backend->getLargeBlock(8*1024); - backend->putSlabBlock(slabBlock); - backend->putLargeBlock(lmb); - } - } -}; - -void TestBackend() -{ - rml::MemPoolPolicy pol(getMallocMem, putMallocMem); - rml::MemoryPool *mPool; - pool_create_v1(0, &pol, &mPool); - rml::internal::ExtMemoryPool *ePool = - &((rml::internal::MemoryPool*)mPool)->extMemPool; - rml::internal::Backend *backend = &ePool->backend; - - for( int p=MaxThread; p>=MinThread; --p ) { - // regression test against an race condition in backend synchronization, - // triggered only when WhiteboxTestingYield() call yields - for (int i=0; i<100; i++) { - TestBackendWork::initBarrier(p); - NativeParallelFor( p, TestBackendWork(backend) ); - } - } - - BlockI *block = backend->getSlabBlock(1); - ASSERT(block, "Memory was not allocated"); - backend->putSlabBlock(block); - - // Checks if the backend increases and decreases the amount of allocated memory when memory is allocated. - const size_t memSize0 = backend->getTotalMemSize(); - LargeMemoryBlock *lmb = backend->getLargeBlock(4*MByte); - ASSERT( lmb, ASSERT_TEXT ); - - const size_t memSize1 = backend->getTotalMemSize(); - ASSERT( (intptr_t)(memSize1-memSize0) >= 4*MByte, "The backend has not increased the amount of using memory." ); - - backend->putLargeBlock(lmb); - const size_t memSize2 = backend->getTotalMemSize(); - ASSERT( memSize2 == memSize0, "The backend has not decreased the amount of using memory." ); - - pool_destroy(mPool); -} - -void TestBitMask() -{ - BitMaskMin<256> mask; - - mask.reset(); - mask.set(10, 1); - mask.set(5, 1); - mask.set(1, 1); - ASSERT(mask.getMinTrue(2) == 5, NULL); - - mask.reset(); - mask.set(0, 1); - mask.set(64, 1); - mask.set(63, 1); - mask.set(200, 1); - mask.set(255, 1); - ASSERT(mask.getMinTrue(0) == 0, NULL); - ASSERT(mask.getMinTrue(1) == 63, NULL); - ASSERT(mask.getMinTrue(63) == 63, NULL); - ASSERT(mask.getMinTrue(64) == 64, NULL); - ASSERT(mask.getMinTrue(101) == 200, NULL); - ASSERT(mask.getMinTrue(201) == 255, NULL); - mask.set(255, 0); - ASSERT(mask.getMinTrue(201) == -1, NULL); -} - -size_t getMemSize() -{ - return defaultMemPool->extMemPool.backend.getTotalMemSize(); -} - -class CheckNotCached { - static size_t memSize; -public: - void operator() () const { - int res = scalable_allocation_mode(TBBMALLOC_SET_SOFT_HEAP_LIMIT, 1); - ASSERT(res == TBBMALLOC_OK, NULL); - if (memSize==(size_t)-1) { - memSize = getMemSize(); - } else { - ASSERT(getMemSize() == memSize, NULL); - memSize=(size_t)-1; - } - } -}; - -size_t CheckNotCached::memSize = (size_t)-1; - -class RunTestHeapLimit: public SimpleBarrier { -public: - void operator()( int /*mynum*/ ) const { - // Provoke bootstrap heap initialization before recording memory size. - // NOTE: The initialization should be processed only with a "large" - // object. Since the "small" object allocation lead to blocking of a - // slab as an active block and it is impossible to release it with - // foreign thread. - scalable_free(scalable_malloc(minLargeObjectSize)); - barrier.wait(CheckNotCached()); - for (size_t n = minLargeObjectSize; n < 5*1024*1024; n += 128*1024) - scalable_free(scalable_malloc(n)); - barrier.wait(CheckNotCached()); - } -}; - -void TestHeapLimit() -{ - if(!isMallocInitialized()) doInitialization(); - // tiny limit to stop caching - int res = scalable_allocation_mode(TBBMALLOC_SET_SOFT_HEAP_LIMIT, 1); - ASSERT(res == TBBMALLOC_OK, NULL); - // Provoke bootstrap heap initialization before recording memory size. - scalable_free(scalable_malloc(8)); - size_t n, sizeBefore = getMemSize(); - - // Try to provoke call to OS for memory to check that - // requests are not fulfilled from caches. - // Single call is not enough here because of backend fragmentation. - for (n = minLargeObjectSize; n < 10*1024*1024; n += 16*1024) { - void *p = scalable_malloc(n); - bool leave = (sizeBefore != getMemSize()); - scalable_free(p); - if (leave) - break; - ASSERT(sizeBefore == getMemSize(), "No caching expected"); - } - ASSERT(n < 10*1024*1024, "scalable_malloc doesn't provoke OS request for memory, " - "is some internal cache still used?"); - - for( int p=MaxThread; p>=MinThread; --p ) { - RunTestHeapLimit::initBarrier( p ); - NativeParallelFor( p, RunTestHeapLimit() ); - } - // it's try to match limit as well as set limit, so call here - res = scalable_allocation_mode(TBBMALLOC_SET_SOFT_HEAP_LIMIT, 1); - ASSERT(res == TBBMALLOC_OK, NULL); - size_t m = getMemSize(); - ASSERT(sizeBefore == m, NULL); - // restore default - res = scalable_allocation_mode(TBBMALLOC_SET_SOFT_HEAP_LIMIT, 0); - ASSERT(res == TBBMALLOC_OK, NULL); -} - -void checkNoHugePages() -{ - ASSERT(!hugePages.enabled, "scalable_allocation_mode " - "must have priority over environment variable"); -} - -/*---------------------------------------------------------------------------*/ -// The regression test against a bug in TBBMALLOC_CLEAN_ALL_BUFFERS allocation -// command. When cleanup is requested the backend should process the queue of -// postponed coalescing requests otherwise not all unsued memory might be -// deallocated. - -const size_t alloc_size = 16*1024; -const int total_alloc_size = 100 * 1024 * 1024; -const int num_allocs = total_alloc_size / alloc_size; -void *ptrs[num_allocs]; - -tbb::atomic<int> deallocs_counter; - -struct TestCleanAllBuffersDeallocate : public SimpleBarrier { - void operator() ( int ) const { - barrier.wait(); - for( int i = deallocs_counter++; i < num_allocs; i = deallocs_counter++ ) - scalable_free( ptrs[i] ); - } -}; - -// The idea is to allocate a set of objects and then deallocate them in random -// order in parallel to force occuring conflicts in backend during coalescing. -// Thus if the backend does not check the queue of postponed coalescing -// requests it will not be able to unmap all memory and a memory leak will be -// observed. -void TestCleanAllBuffers() { - const int num_threads = 8; - // Clean up if something was allocated before the test - scalable_allocation_command(TBBMALLOC_CLEAN_ALL_BUFFERS,0); - - size_t memory_in_use_before = getMemSize(); - for ( int i=0; i<num_allocs; ++i ) { - ptrs[i] = scalable_malloc( alloc_size ); - ASSERT( ptrs[i] != NULL, "scalable_malloc has return zero." ); - } - deallocs_counter = 0; - TestCleanAllBuffersDeallocate::initBarrier(num_threads); - NativeParallelFor(num_threads, TestCleanAllBuffersDeallocate()); - // TODO: reproduce the conditions for bug reproduction more reliably - if ( defaultMemPool->extMemPool.backend.coalescQ.blocksToFree == NULL ) - REMARK( "Warning: The queue of postponed coalescing requests is empty. Unable to create the condition for bug reproduction.\n" ); - ASSERT( scalable_allocation_command(TBBMALLOC_CLEAN_ALL_BUFFERS,0) == TBBMALLOC_OK, "The cleanup request has not cleaned anything." ); - size_t memory_in_use_after = getMemSize(); - - REMARK( "memory_in_use_before = %ld\nmemory_in_use_after = %ld\n", memory_in_use_before, memory_in_use_after ); - - size_t memory_leak = memory_in_use_after - memory_in_use_before; - ASSERT( memory_leak == 0, "The backend has not processed the queue of postponed coalescing requests during cleanup." ); -} -/*---------------------------------------------------------------------------*/ -/*------------------------- Large Object Cache tests ------------------------*/ -#if _MSC_VER==1600 || _MSC_VER==1500 - // ignore C4275: non dll-interface class 'stdext::exception' used as - // base for dll-interface class 'std::bad_cast' - #pragma warning (disable: 4275) -#endif -#include <vector> -#include <list> - -// default constructor of CacheBin -template<typename Props> -rml::internal::LargeObjectCacheImpl<Props>::CacheBin::CacheBin() {} - -template<typename Props> -class CacheBinModel { - - typedef typename rml::internal::LargeObjectCacheImpl<Props>::CacheBin CacheBinType; - - // The emulated cache bin. - CacheBinType cacheBinModel; - // The reference to real cahce bin inside the large object cache. - CacheBinType &cacheBin; - - const size_t size; - - // save only current time - std::list<uintptr_t> objects; - - void doCleanup() { - if ( cacheBinModel.cachedSize > Props::TooLargeFactor*cacheBinModel.usedSize ) tooLargeLOC++; - else tooLargeLOC = 0; - - if (tooLargeLOC>3 && cacheBinModel.ageThreshold) - cacheBinModel.ageThreshold = (cacheBinModel.ageThreshold + cacheBinModel.meanHitRange)/2; - - uintptr_t currTime = cacheCurrTime; - while (!objects.empty() && (intptr_t)(currTime - objects.front()) > cacheBinModel.ageThreshold) { - cacheBinModel.cachedSize -= size; - cacheBinModel.lastCleanedAge = objects.front(); - objects.pop_front(); - } - - cacheBinModel.oldest = objects.empty() ? 0 : objects.front(); - } - -public: - CacheBinModel(CacheBinType &_cacheBin, size_t allocSize) : cacheBin(_cacheBin), size(allocSize) { - cacheBinModel.oldest = cacheBin.oldest; - cacheBinModel.lastCleanedAge = cacheBin.lastCleanedAge; - cacheBinModel.ageThreshold = cacheBin.ageThreshold; - cacheBinModel.usedSize = cacheBin.usedSize; - cacheBinModel.cachedSize = cacheBin.cachedSize; - cacheBinModel.meanHitRange = cacheBin.meanHitRange; - cacheBinModel.lastGet = cacheBin.lastGet; - } - void get() { - uintptr_t currTime = ++cacheCurrTime; - - if ( objects.empty() ) { - const uintptr_t sinceLastGet = currTime - cacheBinModel.lastGet; - if ( ( cacheBinModel.ageThreshold && sinceLastGet > Props::LongWaitFactor*cacheBinModel.ageThreshold ) || - ( cacheBinModel.lastCleanedAge && sinceLastGet > Props::LongWaitFactor*(cacheBinModel.lastCleanedAge - cacheBinModel.lastGet) ) ) - cacheBinModel.lastCleanedAge = cacheBinModel.ageThreshold = 0; - - if (cacheBinModel.lastCleanedAge) - cacheBinModel.ageThreshold = Props::OnMissFactor*(currTime - cacheBinModel.lastCleanedAge); - } else { - uintptr_t obj_age = objects.back(); - objects.pop_back(); - if ( objects.empty() ) cacheBinModel.oldest = 0; - - intptr_t hitRange = currTime - obj_age; - cacheBinModel.meanHitRange = cacheBinModel.meanHitRange? (cacheBinModel.meanHitRange + hitRange)/2 : hitRange; - - cacheBinModel.cachedSize -= size; - } - - cacheBinModel.usedSize += size; - cacheBinModel.lastGet = currTime; - - if ( currTime % rml::internal::cacheCleanupFreq == 0 ) doCleanup(); - } - - void putList( int num ) { - uintptr_t currTime = cacheCurrTime; - cacheCurrTime += num; - - cacheBinModel.usedSize -= num*size; - - bool cleanUpNeeded = false; - if ( !cacheBinModel.lastCleanedAge ) { - cacheBinModel.lastCleanedAge = ++currTime; - cleanUpNeeded |= currTime % rml::internal::cacheCleanupFreq == 0; - num--; - } - - for ( int i=1; i<=num; ++i ) { - currTime+=1; - cleanUpNeeded |= currTime % rml::internal::cacheCleanupFreq == 0; - if ( objects.empty() ) - cacheBinModel.oldest = currTime; - objects.push_back(currTime); - } - - cacheBinModel.cachedSize += num*size; - - if ( cleanUpNeeded ) doCleanup(); - } - - void check() { - ASSERT(cacheBinModel.oldest == cacheBin.oldest, ASSERT_TEXT); - ASSERT(cacheBinModel.lastCleanedAge == cacheBin.lastCleanedAge, ASSERT_TEXT); - ASSERT(cacheBinModel.ageThreshold == cacheBin.ageThreshold, ASSERT_TEXT); - ASSERT(cacheBinModel.usedSize == cacheBin.usedSize, ASSERT_TEXT); - ASSERT(cacheBinModel.cachedSize == cacheBin.cachedSize, ASSERT_TEXT); - ASSERT(cacheBinModel.meanHitRange == cacheBin.meanHitRange, ASSERT_TEXT); - ASSERT(cacheBinModel.lastGet == cacheBin.lastGet, ASSERT_TEXT); - } - - static uintptr_t cacheCurrTime; - static intptr_t tooLargeLOC; -}; - -template<typename Props> uintptr_t CacheBinModel<Props>::cacheCurrTime; -template<typename Props> intptr_t CacheBinModel<Props>::tooLargeLOC; - -template <typename Scenarion> -void LOCModelTester() { - defaultMemPool->extMemPool.loc.cleanAll(); - defaultMemPool->extMemPool.loc.reset(); - - const size_t size = 16 * 1024; - const size_t headersSize = sizeof(rml::internal::LargeMemoryBlock)+sizeof(rml::internal::LargeObjectHdr); - const size_t allocationSize = LargeObjectCache::alignToBin(size+headersSize+rml::internal::largeObjectAlignment); - const int binIdx = defaultMemPool->extMemPool.loc.largeCache.sizeToIdx( allocationSize ); - - CacheBinModel<rml::internal::LargeObjectCache::LargeCacheTypeProps>::cacheCurrTime = defaultMemPool->extMemPool.loc.cacheCurrTime; - CacheBinModel<rml::internal::LargeObjectCache::LargeCacheTypeProps>::tooLargeLOC = defaultMemPool->extMemPool.loc.largeCache.tooLargeLOC; - CacheBinModel<rml::internal::LargeObjectCache::LargeCacheTypeProps> cacheBinModel(defaultMemPool->extMemPool.loc.largeCache.bin[binIdx], allocationSize); - - Scenarion scen; - for (rml::internal::LargeMemoryBlock *lmb = scen.next(); (intptr_t)lmb != (intptr_t)-1; lmb = scen.next()) { - if ( lmb ) { - int num=1; - for (rml::internal::LargeMemoryBlock *curr = lmb; curr->next; curr=curr->next) num+=1; - defaultMemPool->extMemPool.freeLargeObject(lmb); - cacheBinModel.putList(num); - } else { - scen.saveLmb(defaultMemPool->extMemPool.mallocLargeObject(allocationSize)); - cacheBinModel.get(); - } - - cacheBinModel.check(); - } -} - -class TestBootstrap { - bool allocating; - std::vector<rml::internal::LargeMemoryBlock*> lmbArray; -public: - TestBootstrap() : allocating(true) {} - - rml::internal::LargeMemoryBlock* next() { - if ( allocating ) - return NULL; - if ( !lmbArray.empty() ) { - rml::internal::LargeMemoryBlock *ret = lmbArray.back(); - lmbArray.pop_back(); - return ret; - } - return (rml::internal::LargeMemoryBlock*)-1; - } - - void saveLmb( rml::internal::LargeMemoryBlock *lmb ) { - lmb->next = NULL; - lmbArray.push_back(lmb); - if ( lmbArray.size() == 1000 ) allocating = false; - } -}; - -class TestRandom { - std::vector<rml::internal::LargeMemoryBlock*> lmbArray; - int numOps; -public: - TestRandom() : numOps(100000) { - srand(1234); - } - - rml::internal::LargeMemoryBlock* next() { - if ( numOps-- ) { - if ( lmbArray.empty() || rand() / (RAND_MAX>>1) == 0 ) - return NULL; - size_t ind = rand()%lmbArray.size(); - if ( ind != lmbArray.size()-1 ) std::swap(lmbArray[ind],lmbArray[lmbArray.size()-1]); - rml::internal::LargeMemoryBlock *lmb = lmbArray.back(); - lmbArray.pop_back(); - return lmb; - } - return (rml::internal::LargeMemoryBlock*)-1; - } - - void saveLmb( rml::internal::LargeMemoryBlock *lmb ) { - lmb->next = NULL; - lmbArray.push_back(lmb); - } -}; - -class TestCollapsingMallocFree : public SimpleBarrier { -public: - static const int NUM_ALLOCS = 100000; - const int num_threads; - - TestCollapsingMallocFree( int _num_threads ) : num_threads(_num_threads) { - initBarrier( num_threads ); - } - - void operator() ( int ) const { - const size_t size = 16 * 1024; - const size_t headersSize = sizeof(rml::internal::LargeMemoryBlock)+sizeof(rml::internal::LargeObjectHdr); - const size_t allocationSize = LargeObjectCache::alignToBin(size+headersSize+rml::internal::largeObjectAlignment); - - barrier.wait(); - for ( int i=0; i<NUM_ALLOCS; ++i ) { - defaultMemPool->extMemPool.freeLargeObject( - defaultMemPool->extMemPool.mallocLargeObject(allocationSize) ); - } - } - - void check() { - ASSERT( tbbmalloc_whitebox::locGetProcessed == tbbmalloc_whitebox::locPutProcessed, ASSERT_TEXT ); - ASSERT( tbbmalloc_whitebox::locGetProcessed < num_threads*NUM_ALLOCS, "No one Malloc/Free pair was collapsed." ); - } -}; - -class TestCollapsingBootstrap : public SimpleBarrier { - class CheckNumAllocs { - const int num_threads; - public: - CheckNumAllocs( int _num_threads ) : num_threads(_num_threads) {} - void operator()() const { - ASSERT( tbbmalloc_whitebox::locGetProcessed == num_threads*NUM_ALLOCS, ASSERT_TEXT ); - ASSERT( tbbmalloc_whitebox::locPutProcessed == 0, ASSERT_TEXT ); - } - }; -public: - static const int NUM_ALLOCS = 1000; - const int num_threads; - - TestCollapsingBootstrap( int _num_threads ) : num_threads(_num_threads) { - initBarrier( num_threads ); - } - - void operator() ( int ) const { - const size_t size = 16 * 1024; - size_t headersSize = sizeof(rml::internal::LargeMemoryBlock)+sizeof(rml::internal::LargeObjectHdr); - size_t allocationSize = LargeObjectCache::alignToBin(size+headersSize+rml::internal::largeObjectAlignment); - - barrier.wait(); - rml::internal::LargeMemoryBlock *lmbArray[NUM_ALLOCS]; - for ( int i=0; i<NUM_ALLOCS; ++i ) - lmbArray[i] = defaultMemPool->extMemPool.mallocLargeObject(allocationSize); - - barrier.wait(CheckNumAllocs(num_threads)); - for ( int i=0; i<NUM_ALLOCS; ++i ) - defaultMemPool->extMemPool.freeLargeObject( lmbArray[i] ); - } - - void check() { - ASSERT( tbbmalloc_whitebox::locGetProcessed == tbbmalloc_whitebox::locPutProcessed, ASSERT_TEXT ); - ASSERT( tbbmalloc_whitebox::locGetProcessed == num_threads*NUM_ALLOCS, ASSERT_TEXT ); - } -}; - -template <typename Scenario> -void LOCCollapsingTester( int num_threads ) { - tbbmalloc_whitebox::locGetProcessed = 0; - tbbmalloc_whitebox::locPutProcessed = 0; - defaultMemPool->extMemPool.loc.cleanAll(); - defaultMemPool->extMemPool.loc.reset(); - - Scenario scen(num_threads); - NativeParallelFor(num_threads, scen); - - scen.check(); -} - -void TestLOC() { - LOCModelTester<TestBootstrap>(); - LOCModelTester<TestRandom>(); - - const int num_threads = 16; - LOCCollapsingTester<TestCollapsingBootstrap>( num_threads ); - if ( num_threads > 1 ) { - REMARK( "num_threads = %d\n", num_threads ); - LOCCollapsingTester<TestCollapsingMallocFree>( num_threads ); - } else { - REPORT( "Warning: concurrency is too low for TestMallocFreeCollapsing ( num_threads = %d )\n", num_threads ); - } -} -/*---------------------------------------------------------------------------*/ - -int TestMain () { - scalable_allocation_mode(USE_HUGE_PAGES, 0); -#if !_XBOX && !__TBB_WIN8UI_SUPPORT - putenv((char*)"TBB_MALLOC_USE_HUGE_PAGES=yes"); -#endif - checkNoHugePages(); - // backreference requires that initialization was done - if(!isMallocInitialized()) doInitialization(); - checkNoHugePages(); - // to succeed, leak detection must be the 1st memory-intensive test - TestBackRef(); - TestPools(); - TestBackend(); - -#if MALLOC_CHECK_RECURSION - for( int p=MaxThread; p>=MinThread; --p ) { - TestStartupAlloc::initBarrier( p ); - NativeParallelFor( p, TestStartupAlloc() ); - ASSERT(!firstStartupBlock, "Startup heap memory leak detected"); - } -#endif - - TestLargeObjectCache(); - TestObjectRecognition(); - TestBitMask(); - TestHeapLimit(); - TestCleanAllBuffers(); - TestLOC(); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_model_plugin.cpp b/src/tbb/src/test/test_model_plugin.cpp deleted file mode 100644 index fb09f8e35..000000000 --- a/src/tbb/src/test/test_model_plugin.cpp +++ /dev/null @@ -1,27 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" - -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#include "harness.h" -int TestMain() { - return Harness::Skipped; -} diff --git a/src/tbb/src/test/test_multifunction_node.cpp b/src/tbb/src/test/test_multifunction_node.cpp deleted file mode 100644 index 04e72871f..000000000 --- a/src/tbb/src/test/test_multifunction_node.cpp +++ /dev/null @@ -1,668 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness_graph.h" - -#include "tbb/task_scheduler_init.h" -#include "tbb/spin_rw_mutex.h" - -#if TBB_USE_DEBUG -#define N 16 -#else -#define N 100 -#endif -#define MAX_NODES 4 - -//! Performs test on function nodes with limited concurrency and buffering -/** Theses tests check: - 1) that the number of executing copies never exceed the concurrency limit - 2) that the node never rejects - 3) that no items are lost - and 4) all of this happens even if there are multiple predecessors and successors -*/ - -template< typename InputType > -struct parallel_put_until_limit : private NoAssign { - - harness_counting_sender<InputType> *my_senders; - - parallel_put_until_limit( harness_counting_sender<InputType> *senders ) : my_senders(senders) {} - - void operator()( int i ) const { - if ( my_senders ) { - my_senders[i].try_put_until_limit(); - } - } - -}; - -//! exercise buffered multifunction_node. -template< typename InputType, typename OutputTuple, typename Body > -void buffered_levels( size_t concurrency, Body body ) { - typedef typename tbb::flow::tuple_element<0,OutputTuple>::type OutputType; - // Do for lc = 1 to concurrency level - for ( size_t lc = 1; lc <= concurrency; ++lc ) { - tbb::flow::graph g; - - // Set the execute_counter back to zero in the harness - harness_graph_multifunction_executor<InputType, OutputTuple>::execute_count = 0; - // Set the number of current executors to zero. - harness_graph_multifunction_executor<InputType, OutputTuple>::current_executors = 0; - // Set the max allowed executors to lc. There is a check in the functor to make sure this is never exceeded. - harness_graph_multifunction_executor<InputType, OutputTuple>::max_executors = lc; - - // Create the function_node with the appropriate concurrency level, and use default buffering - tbb::flow::multifunction_node< InputType, OutputTuple > exe_node( g, lc, body ); - - //Create a vector of identical exe_nodes - std::vector< tbb::flow::multifunction_node< InputType, OutputTuple > > exe_vec(2, exe_node); - - // exercise each of the copied nodes - for (size_t node_idx=0; node_idx<exe_vec.size(); ++node_idx) { - for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) { - // Create num_receivers counting receivers and connect the exe_vec[node_idx] to them. - harness_mapped_receiver<OutputType> *receivers = new harness_mapped_receiver<OutputType>[num_receivers]; - for (size_t r = 0; r < num_receivers; ++r ) { - tbb::flow::make_edge( tbb::flow::output_port<0>(exe_vec[node_idx]), receivers[r] ); - } - - // Do the test with varying numbers of senders - harness_counting_sender<InputType> *senders = NULL; - for (size_t num_senders = 1; num_senders <= MAX_NODES; ++num_senders ) { - // Create num_senders senders, set there message limit each to N, and connect them to the exe_vec[node_idx] - senders = new harness_counting_sender<InputType>[num_senders]; - for (size_t s = 0; s < num_senders; ++s ) { - senders[s].my_limit = N; - tbb::flow::make_edge( senders[s], exe_vec[node_idx] ); - } - - // Initialize the receivers so they know how many senders and messages to check for - for (size_t r = 0; r < num_receivers; ++r ) { - receivers[r].initialize_map( N, num_senders ); - } - - // Do the test - NativeParallelFor( (int)num_senders, parallel_put_until_limit<InputType>(senders) ); - g.wait_for_all(); - - // confirm that each sender was requested from N times - for (size_t s = 0; s < num_senders; ++s ) { - size_t n = senders[s].my_received; - ASSERT( n == N, NULL ); - ASSERT( senders[s].my_receiver == &exe_vec[node_idx], NULL ); - } - // validate the receivers - for (size_t r = 0; r < num_receivers; ++r ) { - receivers[r].validate(); - } - delete [] senders; - } - for (size_t r = 0; r < num_receivers; ++r ) { - tbb::flow::remove_edge( tbb::flow::output_port<0>(exe_vec[node_idx]), receivers[r] ); - } - ASSERT( exe_vec[node_idx].try_put( InputType() ) == true, NULL ); - g.wait_for_all(); - for (size_t r = 0; r < num_receivers; ++r ) { - // since it's detached, nothing should have changed - receivers[r].validate(); - } - delete [] receivers; - } - } - } -} - -const size_t Offset = 123; -tbb::atomic<size_t> global_execute_count; - -struct inc_functor { - - tbb::atomic<size_t> local_execute_count; - inc_functor( ) { local_execute_count = 0; } - inc_functor( const inc_functor &f ) { local_execute_count = f.local_execute_count; } - - template<typename output_ports_type> - void operator()( int i, output_ports_type &p ) { - ++global_execute_count; - ++local_execute_count; - (void)tbb::flow::get<0>(p).try_put(i); - } - -}; - -template< typename InputType, typename OutputTuple > -void buffered_levels_with_copy( size_t concurrency ) { - typedef typename tbb::flow::tuple_element<0,OutputTuple>::type OutputType; - // Do for lc = 1 to concurrency level - for ( size_t lc = 1; lc <= concurrency; ++lc ) { - tbb::flow::graph g; - - inc_functor cf; - cf.local_execute_count = Offset; - global_execute_count = Offset; - - tbb::flow::multifunction_node< InputType, OutputTuple > exe_node( g, lc, cf ); - - for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) { - harness_mapped_receiver<OutputType> *receivers = new harness_mapped_receiver<OutputType>[num_receivers]; - for (size_t r = 0; r < num_receivers; ++r ) { - tbb::flow::make_edge( tbb::flow::output_port<0>(exe_node), receivers[r] ); - } - - harness_counting_sender<InputType> *senders = NULL; - for (size_t num_senders = 1; num_senders <= MAX_NODES; ++num_senders ) { - senders = new harness_counting_sender<InputType>[num_senders]; - for (size_t s = 0; s < num_senders; ++s ) { - senders[s].my_limit = N; - tbb::flow::make_edge( senders[s], exe_node ); - } - - for (size_t r = 0; r < num_receivers; ++r ) { - receivers[r].initialize_map( N, num_senders ); - } - - NativeParallelFor( (int)num_senders, parallel_put_until_limit<InputType>(senders) ); - g.wait_for_all(); - - for (size_t s = 0; s < num_senders; ++s ) { - size_t n = senders[s].my_received; - ASSERT( n == N, NULL ); - ASSERT( senders[s].my_receiver == &exe_node, NULL ); - } - for (size_t r = 0; r < num_receivers; ++r ) { - receivers[r].validate(); - } - delete [] senders; - } - for (size_t r = 0; r < num_receivers; ++r ) { - tbb::flow::remove_edge( tbb::flow::output_port<0>(exe_node), receivers[r] ); - } - ASSERT( exe_node.try_put( InputType() ) == true, NULL ); - g.wait_for_all(); - for (size_t r = 0; r < num_receivers; ++r ) { - receivers[r].validate(); - } - delete [] receivers; - } - - // validate that the local body matches the global execute_count and both are correct - inc_functor body_copy = tbb::flow::copy_body<inc_functor>( exe_node ); - const size_t expected_count = N/2 * MAX_NODES * MAX_NODES * ( MAX_NODES + 1 ) + MAX_NODES + Offset; - size_t global_count = global_execute_count; - size_t inc_count = body_copy.local_execute_count; - ASSERT( global_count == expected_count && global_count == inc_count, NULL ); - } -} - -template< typename InputType, typename OutputTuple > -void run_buffered_levels( int c ) { - #if __TBB_LAMBDAS_PRESENT - typedef typename tbb::flow::multifunction_node<InputType,OutputTuple>::output_ports_type output_ports_type; - buffered_levels<InputType,OutputTuple>( c, []( InputType i, output_ports_type &p ) { harness_graph_multifunction_executor<InputType, OutputTuple>::func(i,p); } ); - #endif - buffered_levels<InputType,OutputTuple>( c, &harness_graph_multifunction_executor<InputType, OutputTuple>::func ); - buffered_levels<InputType,OutputTuple>( c, typename harness_graph_multifunction_executor<InputType, OutputTuple>::functor() ); - buffered_levels_with_copy<InputType,OutputTuple>( c ); -} - - -//! Performs test on executable nodes with limited concurrency -/** Theses tests check: - 1) that the nodes will accepts puts up to the concurrency limit, - 2) the nodes do not exceed the concurrency limit even when run with more threads (this is checked in the harness_graph_executor), - 3) the nodes will receive puts from multiple successors simultaneously, - and 4) the nodes will send to multiple predecessors. - There is no checking of the contents of the messages for corruption. -*/ - -template< typename InputType, typename OutputTuple, typename Body > -void concurrency_levels( size_t concurrency, Body body ) { - typedef typename tbb::flow::tuple_element<0,OutputTuple>::type OutputType; - for ( size_t lc = 1; lc <= concurrency; ++lc ) { - tbb::flow::graph g; - - // Set the execute_counter back to zero in the harness - harness_graph_multifunction_executor<InputType, OutputTuple>::execute_count = 0; - // Set the number of current executors to zero. - harness_graph_multifunction_executor<InputType, OutputTuple>::current_executors = 0; - // Set the max allowed executors to lc. There is a check in the functor to make sure this is never exceeded. - harness_graph_multifunction_executor<InputType, OutputTuple>::max_executors = lc; - - - tbb::flow::multifunction_node< InputType, OutputTuple, tbb::flow::rejecting > exe_node( g, lc, body ); - - for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) { - - harness_counting_receiver<OutputType> *receivers = new harness_counting_receiver<OutputType>[num_receivers]; - - for (size_t r = 0; r < num_receivers; ++r ) { - tbb::flow::make_edge( tbb::flow::output_port<0>(exe_node), receivers[r] ); - } - - harness_counting_sender<InputType> *senders = NULL; - - for (size_t num_senders = 1; num_senders <= MAX_NODES; ++num_senders ) { - { - // Exclusively lock m to prevent exe_node from finishing - tbb::spin_rw_mutex::scoped_lock l( harness_graph_multifunction_executor< InputType, OutputTuple>::template mutex_holder<tbb::spin_rw_mutex>::mutex ); - - // put to lc level, it will accept and then block at m - for ( size_t c = 0 ; c < lc ; ++c ) { - ASSERT( exe_node.try_put( InputType() ) == true, NULL ); - } - // it only accepts to lc level - ASSERT( exe_node.try_put( InputType() ) == false, NULL ); - - senders = new harness_counting_sender<InputType>[num_senders]; - for (size_t s = 0; s < num_senders; ++s ) { - // register a sender - senders[s].my_limit = N; - exe_node.register_predecessor( senders[s] ); - } - - } // release lock at end of scope, setting the exe node free to continue - // wait for graph to settle down - g.wait_for_all(); - - // confirm that each sender was requested from N times - for (size_t s = 0; s < num_senders; ++s ) { - size_t n = senders[s].my_received; - ASSERT( n == N, NULL ); - ASSERT( senders[s].my_receiver == &exe_node, NULL ); - } - // confirm that each receivers got N * num_senders + the initial lc puts - for (size_t r = 0; r < num_receivers; ++r ) { - size_t n = receivers[r].my_count; - ASSERT( n == num_senders*N+lc, NULL ); - receivers[r].my_count = 0; - } - delete [] senders; - } - for (size_t r = 0; r < num_receivers; ++r ) { - tbb::flow::remove_edge( tbb::flow::output_port<0>(exe_node), receivers[r] ); - } - ASSERT( exe_node.try_put( InputType() ) == true, NULL ); - g.wait_for_all(); - for (size_t r = 0; r < num_receivers; ++r ) { - ASSERT( int(receivers[r].my_count) == 0, NULL ); - } - delete [] receivers; - } - } -} - -template< typename InputType, typename OutputTuple > -void run_concurrency_levels( int c ) { - #if __TBB_LAMBDAS_PRESENT - typedef typename tbb::flow::multifunction_node<InputType,OutputTuple>::output_ports_type output_ports_type; - concurrency_levels<InputType,OutputTuple>( c, []( InputType i, output_ports_type &p ) { harness_graph_multifunction_executor<InputType, OutputTuple>::template tfunc<tbb::spin_rw_mutex>(i,p); } ); - #endif - concurrency_levels<InputType,OutputTuple>( c, &harness_graph_multifunction_executor<InputType, OutputTuple>::template tfunc<tbb::spin_rw_mutex> ); - concurrency_levels<InputType,OutputTuple>( c, typename harness_graph_multifunction_executor<InputType, OutputTuple>::template tfunctor<tbb::spin_rw_mutex>() ); -} - - -struct empty_no_assign { - empty_no_assign() {} - empty_no_assign( int ) {} - operator int() { return 0; } - operator int() const { return 0; } -}; - -template< typename InputType > -struct parallel_puts : private NoAssign { - - tbb::flow::receiver< InputType > * const my_exe_node; - - parallel_puts( tbb::flow::receiver< InputType > &exe_node ) : my_exe_node(&exe_node) {} - - void operator()( int ) const { - for ( int i = 0; i < N; ++i ) { - // the nodes will accept all puts - ASSERT( my_exe_node->try_put( InputType() ) == true, NULL ); - } - } - -}; - -//! Performs test on executable nodes with unlimited concurrency -/** These tests check: - 1) that the nodes will accept all puts - 2) the nodes will receive puts from multiple predecessors simultaneously, - and 3) the nodes will send to multiple successors. - There is no checking of the contents of the messages for corruption. -*/ - -template< typename InputType, typename OutputTuple, typename Body > -void unlimited_concurrency( Body body ) { - typedef typename tbb::flow::tuple_element<0,OutputTuple>::type OutputType; - - for (int p = 1; p < 2*MaxThread; ++p) { - tbb::flow::graph g; - tbb::flow::multifunction_node< InputType, OutputTuple, tbb::flow::rejecting > exe_node( g, tbb::flow::unlimited, body ); - - for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) { - harness_counting_receiver<OutputType> *receivers = new harness_counting_receiver<OutputType>[num_receivers]; - harness_graph_multifunction_executor<InputType, OutputTuple>::execute_count = 0; - - for (size_t r = 0; r < num_receivers; ++r ) { - tbb::flow::make_edge( tbb::flow::output_port<0>(exe_node), receivers[r] ); - } - - NativeParallelFor( p, parallel_puts<InputType>(exe_node) ); - g.wait_for_all(); - - // 2) the nodes will receive puts from multiple predecessors simultaneously, - size_t ec = harness_graph_multifunction_executor<InputType, OutputTuple>::execute_count; - ASSERT( (int)ec == p*N, NULL ); - for (size_t r = 0; r < num_receivers; ++r ) { - size_t c = receivers[r].my_count; - // 3) the nodes will send to multiple successors. - ASSERT( (int)c == p*N, NULL ); - } - } - } -} - -template< typename InputType, typename OutputTuple > -void run_unlimited_concurrency() { - harness_graph_multifunction_executor<InputType, OutputTuple>::max_executors = 0; - #if __TBB_LAMBDAS_PRESENT - typedef typename tbb::flow::multifunction_node<InputType,OutputTuple>::output_ports_type output_ports_type; - unlimited_concurrency<InputType,OutputTuple>( []( InputType i, output_ports_type &p ) { harness_graph_multifunction_executor<InputType, OutputTuple>::func(i,p); } ); - #endif - unlimited_concurrency<InputType,OutputTuple>( &harness_graph_multifunction_executor<InputType, OutputTuple>::func ); - unlimited_concurrency<InputType,OutputTuple>( typename harness_graph_multifunction_executor<InputType, OutputTuple>::functor() ); -} - -template<typename InputType, typename OutputTuple> -struct oddEvenBody { - typedef typename tbb::flow::multifunction_node<InputType,OutputTuple>::output_ports_type output_ports_type; - typedef typename tbb::flow::tuple_element<0,OutputTuple>::type EvenType; - typedef typename tbb::flow::tuple_element<1,OutputTuple>::type OddType; - void operator() (const InputType &i, output_ports_type &p) { - if((int)i % 2) { - (void)tbb::flow::get<1>(p).try_put(OddType(i)); - } - else { - (void)tbb::flow::get<0>(p).try_put(EvenType(i)); - } - } -}; - -template<typename InputType, typename OutputTuple > -void run_multiport_test(int num_threads) { - typedef typename tbb::flow::multifunction_node<InputType, OutputTuple> mo_node_type; - typedef typename tbb::flow::tuple_element<0,OutputTuple>::type EvenType; - typedef typename tbb::flow::tuple_element<1,OutputTuple>::type OddType; - tbb::task_scheduler_init init(num_threads); - tbb::flow::graph g; - mo_node_type mo_node(g, tbb::flow::unlimited, oddEvenBody<InputType, OutputTuple>() ); - - tbb::flow::queue_node<EvenType> q0(g); - tbb::flow::queue_node<OddType> q1(g); - - tbb::flow::make_edge(tbb::flow::output_port<0>(mo_node), q0); - tbb::flow::make_edge(tbb::flow::output_port<1>(mo_node), q1); - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - ASSERT(mo_node.predecessor_count() == 0, NULL); - ASSERT(tbb::flow::output_port<0>(mo_node).successor_count() == 1, NULL); - std::vector< tbb::flow::receiver<EvenType> *> my_0succs; - tbb::flow::output_port<0>(mo_node).copy_successors(my_0succs); - ASSERT(my_0succs.size() == 1, NULL); - typename mo_node_type::predecessor_vector_type my_preds; - mo_node.copy_predecessors(my_preds); - ASSERT(my_preds.size() == 0, NULL); -#endif - - for(InputType i = 0; i < N; ++i) { - mo_node.try_put(i); - } - - g.wait_for_all(); - for(int i = 0; i < N/2; ++i) { - EvenType e; - OddType o; - ASSERT(q0.try_get(e) && (int)e % 2 == 0, NULL); - ASSERT(q1.try_get(o) && (int)o % 2 == 1, NULL); - } -} - -//! Tests limited concurrency cases for nodes that accept data messages -void test_concurrency(int num_threads) { - tbb::task_scheduler_init init(num_threads); - run_concurrency_levels<int,tbb::flow::tuple<int> >(num_threads); - run_concurrency_levels<int,tbb::flow::tuple<tbb::flow::continue_msg> >(num_threads); - run_buffered_levels<int, tbb::flow::tuple<int> >(num_threads); - run_unlimited_concurrency<int, tbb::flow::tuple<int> >(); - run_unlimited_concurrency<int,tbb::flow::tuple<empty_no_assign> >(); - run_unlimited_concurrency<empty_no_assign,tbb::flow::tuple<int> >(); - run_unlimited_concurrency<empty_no_assign,tbb::flow::tuple<empty_no_assign> >(); - run_unlimited_concurrency<int,tbb::flow::tuple<tbb::flow::continue_msg> >(); - run_unlimited_concurrency<empty_no_assign,tbb::flow::tuple<tbb::flow::continue_msg> >(); - run_multiport_test<int, tbb::flow::tuple<int, int> >(num_threads); - run_multiport_test<float, tbb::flow::tuple<int, double> >(num_threads); -} - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES -// the integer received indicates which output ports should succeed and which should fail -// on try_put(). -typedef tbb::flow::multifunction_node<int, tbb::flow::tuple<int, int> > mf_node; - -struct add_to_counter { - int my_invocations; - int *counter; - add_to_counter(int& var):counter(&var){ my_invocations = 0;} - void operator()(const int &i, mf_node::output_ports_type &outports) { - *counter+=1; - ++my_invocations; - if(i & 0x1) { - ASSERT(tbb::flow::get<0>(outports).try_put(i), "port 0 expected to succeed"); - } - else { - ASSERT(!tbb::flow::get<0>(outports).try_put(i), "port 0 expected to fail"); - } - if(i & 0x2) { - ASSERT(tbb::flow::get<1>(outports).try_put(i), "port 1 expected to succeed"); - } - else { - ASSERT(!tbb::flow::get<1>(outports).try_put(i), "port 1 expected to fail"); - } - } - int my_inner() { return my_invocations; } -}; - -template<tbb::flow::graph_buffer_policy FTYPE> -void test_extract() { - int my_count = 0; - int cm; - tbb::flow::graph g; - tbb::flow::broadcast_node<int> b0(g); - tbb::flow::broadcast_node<int> b1(g); - tbb::flow::multifunction_node<int, tbb::flow::tuple<int,int>, FTYPE> mf0(g, tbb::flow::unlimited, add_to_counter(my_count)); - tbb::flow::queue_node<int> q0(g); - tbb::flow::queue_node<int> q1(g); - - tbb::flow::make_edge(b0, mf0); - tbb::flow::make_edge(b1, mf0); - tbb::flow::make_edge(tbb::flow::output_port<0>(mf0), q0); - tbb::flow::make_edge(tbb::flow::output_port<1>(mf0), q1); - for( int i = 0; i < 2; ++i ) { - - /* b0 */ - /* \ |--q0 */ - /* mf0+ */ - /* / |--q1 */ - /* b1 */ - - ASSERT(b0.predecessor_count() == 0 && b0.successor_count() == 1, "b0 has incorrect counts"); - ASSERT(b1.predecessor_count() == 0 && b1.successor_count() == 1, "b1 has incorrect counts"); - ASSERT(mf0.predecessor_count() == 2 - && tbb::flow::output_port<0>(mf0).successor_count() == 1 - && tbb::flow::output_port<1>(mf0).successor_count() == 1 - , "mf0 has incorrect counts"); - ASSERT(q0.predecessor_count() == 1 && q0.successor_count() == 0, "q0 has incorrect counts"); - ASSERT(q1.predecessor_count() == 1 && q1.successor_count() == 0, "q0 has incorrect counts"); - b0.try_put(3); - g.wait_for_all(); - ASSERT(my_count == 1, "multifunction_node didn't fire"); - ASSERT(q0.try_get(cm), "multifunction_node didn't forward to 0"); - ASSERT(q1.try_get(cm), "multifunction_node didn't forward to 1"); - b1.try_put(3); - g.wait_for_all(); - ASSERT(my_count == 2, "multifunction_node didn't fire"); - ASSERT(q0.try_get(cm), "multifunction_node didn't forward to 0"); - ASSERT(q1.try_get(cm), "multifunction_node didn't forward to 1"); - - b0.extract(); - - - /* b0 */ - /* |--q0 */ - /* mf0+ */ - /* / |--q1 */ - /* b1 */ - - ASSERT(b0.predecessor_count() == 0 && b0.successor_count() == 0, "b0 has incorrect counts"); - ASSERT(b1.predecessor_count() == 0 && b1.successor_count() == 1, "b1 has incorrect counts"); - ASSERT(mf0.predecessor_count() == 1 - && tbb::flow::output_port<0>(mf0).successor_count() == 1 - && tbb::flow::output_port<1>(mf0).successor_count() == 1 - , "mf0 has incorrect counts"); - ASSERT(q0.predecessor_count() == 1 && q0.successor_count() == 0, "q0 has incorrect counts"); - ASSERT(q1.predecessor_count() == 1 && q1.successor_count() == 0, "q0 has incorrect counts"); - b0.try_put(1); - b0.try_put(1); - g.wait_for_all(); - ASSERT(my_count == 2, "b0 messages being forwarded to multifunction_node even though it is disconnected"); - b1.try_put(3); - g.wait_for_all(); - ASSERT(my_count == 3, "multifunction_node didn't fire though it has only one predecessor"); - ASSERT(q0.try_get(cm), "multifunction_node didn't forward second time"); - ASSERT(q1.try_get(cm), "multifunction_node didn't forward second time"); - - q0.extract(); - - /* b0 */ - /* | q0 */ - /* mf0+ */ - /* / |--q1 */ - /* b1 */ - - ASSERT(b0.predecessor_count() == 0 && b0.successor_count() == 0, "b0 has incorrect counts"); - ASSERT(b1.predecessor_count() == 0 && b1.successor_count() == 1, "b1 has incorrect counts"); - ASSERT(mf0.predecessor_count() == 1 - && tbb::flow::output_port<0>(mf0).successor_count() == 0 - && tbb::flow::output_port<1>(mf0).successor_count() == 1 - , "mf0 has incorrect counts"); - ASSERT(q0.predecessor_count() == 0 && q0.successor_count() == 0, "q0 has incorrect counts"); - ASSERT(q1.predecessor_count() == 1 && q1.successor_count() == 0, "q0 has incorrect counts"); - b0.try_put(1); - b0.try_put(1); - g.wait_for_all(); - ASSERT(my_count == 3, "b0 messages being forwarded to multifunction_node even though it is disconnected"); - b1.try_put(2); - g.wait_for_all(); - ASSERT(my_count == 4, "multifunction_node didn't fire though it has one predecessor"); - ASSERT(!q0.try_get(cm), "multifunction_node forwarded"); - ASSERT(q1.try_get(cm), "multifunction_node forwarded"); - - if(i == 0) { - mf0.extract(); - } - else { - mf0.extract(tbb::flow::rf_reset_bodies); - } - - - /* b0 */ - /* | q0 */ - /* mf0+ */ - /* | q1 */ - /* b1 */ - - ASSERT(b0.predecessor_count() == 0 && b0.successor_count() == 0, "b0 has incorrect counts"); - ASSERT(b1.predecessor_count() == 0 && b1.successor_count() == 0, "b1 has incorrect counts"); - ASSERT(mf0.predecessor_count() == 0 - && tbb::flow::output_port<0>(mf0).successor_count() == 0 - && tbb::flow::output_port<1>(mf0).successor_count() == 0 - , "mf0 has incorrect counts"); - ASSERT(q0.predecessor_count() == 0 && q0.successor_count() == 0, "q0 has incorrect counts"); - ASSERT(q1.predecessor_count() == 0 && q1.successor_count() == 0, "q0 has incorrect counts"); - b0.try_put(1); - b0.try_put(1); - g.wait_for_all(); - ASSERT(my_count == 4, "b0 messages being forwarded to multifunction_node even though it is disconnected"); - b1.try_put(2); - g.wait_for_all(); - ASSERT(my_count == 4, "b1 messages being forwarded to multifunction_node even though it is disconnected"); - ASSERT(!q0.try_get(cm), "multifunction_node forwarded"); - ASSERT(!q1.try_get(cm), "multifunction_node forwarded"); - make_edge(b0, mf0); - - /* b0 */ - /* \ | q0 */ - /* mf0+ */ - /* | q1 */ - /* b1 */ - - ASSERT(b0.predecessor_count() == 0 && b0.successor_count() == 1, "b0 has incorrect counts"); - ASSERT(b1.predecessor_count() == 0 && b1.successor_count() == 0, "b1 has incorrect counts"); - ASSERT(mf0.predecessor_count() == 1 - && tbb::flow::output_port<0>(mf0).successor_count() == 0 - && tbb::flow::output_port<1>(mf0).successor_count() == 0 - , "mf0 has incorrect counts"); - ASSERT(q0.predecessor_count() == 0 && q0.successor_count() == 0, "q0 has incorrect counts"); - ASSERT(q1.predecessor_count() == 0 && q1.successor_count() == 0, "q0 has incorrect counts"); - b0.try_put(0); - g.wait_for_all(); - ASSERT(my_count == 5, "multifunction_node didn't fire though it has one predecessor"); - b1.try_put(2); - g.wait_for_all(); - ASSERT(my_count == 5, "multifunction_node fired though it has only one predecessor"); - ASSERT(!q0.try_get(cm), "multifunction_node forwarded"); - ASSERT(!q1.try_get(cm), "multifunction_node forwarded"); - - tbb::flow::make_edge(b1, mf0); - tbb::flow::make_edge(tbb::flow::output_port<0>(mf0), q0); - tbb::flow::make_edge(tbb::flow::output_port<1>(mf0), q1); - ASSERT( ( i == 0 && tbb::flow::copy_body<add_to_counter>(mf0).my_inner() == 5 ) || - ( i == 1 && tbb::flow::copy_body<add_to_counter>(mf0).my_inner() == 1 ) , "reset_bodies failed"); - my_count = 0; - } -} -#endif - -int TestMain() { - if( MinThread<1 ) { - REPORT("number of threads must be positive\n"); - exit(1); - } - for( int p=MinThread; p<=MaxThread; ++p ) { - test_concurrency(p); - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - test_extract<tbb::flow::rejecting>(); - test_extract<tbb::flow::queueing>(); -#endif - return Harness::Done; -} diff --git a/src/tbb/src/test/test_mutex.cpp b/src/tbb/src/test/test_mutex.cpp deleted file mode 100644 index ac6f822c5..000000000 --- a/src/tbb/src/test/test_mutex.cpp +++ /dev/null @@ -1,684 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -//------------------------------------------------------------------------ -// Test TBB mutexes when used with parallel_for.h -// -// Usage: test_Mutex.exe [-v] nthread -// -// The -v option causes timing information to be printed. -// -// Compile with _OPENMP and -openmp -//------------------------------------------------------------------------ -#include "harness_defs.h" -#include "tbb/spin_mutex.h" -#include "tbb/critical_section.h" -#include "tbb/spin_rw_mutex.h" -#include "tbb/queuing_rw_mutex.h" -#include "tbb/queuing_mutex.h" -#include "tbb/mutex.h" -#include "tbb/recursive_mutex.h" -#include "tbb/null_mutex.h" -#include "tbb/null_rw_mutex.h" -#include "tbb/parallel_for.h" -#include "tbb/blocked_range.h" -#include "tbb/tick_count.h" -#include "tbb/atomic.h" -#include "harness.h" -#include <cstdlib> -#include <cstdio> -#if _OPENMP -#include "test/OpenMP_Mutex.h" -#endif /* _OPENMP */ -#include "tbb/tbb_profiling.h" - -#ifndef TBB_TEST_LOW_WORKLOAD - #define TBB_TEST_LOW_WORKLOAD TBB_USE_THREADING_TOOLS -#endif - -// This test deliberately avoids a "using tbb" statement, -// so that the error of putting types in the wrong namespace will be caught. - -template<typename M> -struct Counter { - typedef M mutex_type; - M mutex; - volatile long value; -}; - -//! Function object for use with parallel_for.h. -template<typename C> -struct AddOne: NoAssign { - C& counter; - /** Increments counter once for each iteration in the iteration space. */ - void operator()( tbb::blocked_range<size_t>& range ) const { - for( size_t i=range.begin(); i!=range.end(); ++i ) { - if( i&1 ) { - // Try implicit acquire and explicit release - typename C::mutex_type::scoped_lock lock(counter.mutex); - counter.value = counter.value+1; - lock.release(); - } else { - // Try explicit acquire and implicit release - typename C::mutex_type::scoped_lock lock; - lock.acquire(counter.mutex); - counter.value = counter.value+1; - } - } - } - AddOne( C& counter_ ) : counter(counter_) {} -}; - -//! Adaptor for using ISO C++0x style mutex as a TBB-style mutex. -template<typename M> -class TBB_MutexFromISO_Mutex { - M my_iso_mutex; -public: - typedef TBB_MutexFromISO_Mutex mutex_type; - - class scoped_lock; - friend class scoped_lock; - - class scoped_lock { - mutex_type* my_mutex; - public: - scoped_lock() : my_mutex(NULL) {} - scoped_lock( mutex_type& m ) : my_mutex(NULL) { - acquire(m); - } - scoped_lock( mutex_type& m, bool is_writer ) : my_mutex(NULL) { - acquire(m,is_writer); - } - void acquire( mutex_type& m ) { - m.my_iso_mutex.lock(); - my_mutex = &m; - } - bool try_acquire( mutex_type& m ) { - if( m.my_iso_mutex.try_lock() ) { - my_mutex = &m; - return true; - } else { - return false; - } - } - void release() { - my_mutex->my_iso_mutex.unlock(); - my_mutex = NULL; - } - - // Methods for reader-writer mutex - // These methods can be instantiated only if M supports lock_read() and try_lock_read(). - - void acquire( mutex_type& m, bool is_writer ) { - if( is_writer ) m.my_iso_mutex.lock(); - else m.my_iso_mutex.lock_read(); - my_mutex = &m; - } - bool try_acquire( mutex_type& m, bool is_writer ) { - if( is_writer ? m.my_iso_mutex.try_lock() : m.my_iso_mutex.try_lock_read() ) { - my_mutex = &m; - return true; - } else { - return false; - } - } - bool upgrade_to_writer() { - my_mutex->my_iso_mutex.unlock(); - my_mutex->my_iso_mutex.lock(); - return false; - } - bool downgrade_to_reader() { - my_mutex->my_iso_mutex.unlock(); - my_mutex->my_iso_mutex.lock_read(); - return false; - } - ~scoped_lock() { - if( my_mutex ) - release(); - } - }; - - static const bool is_recursive_mutex = M::is_recursive_mutex; - static const bool is_rw_mutex = M::is_rw_mutex; -}; - -namespace tbb { - namespace profiling { - template<typename M> - void set_name( const TBB_MutexFromISO_Mutex<M>&, const char* ) {} - } -} - -//! Generic test of a TBB mutex type M. -/** Does not test features specific to reader-writer locks. */ -template<typename M> -void Test( const char * name ) { - REMARK("%s size == %d, time = ",name, sizeof(M)); - Counter<M> counter; - counter.value = 0; - tbb::profiling::set_name(counter.mutex, name); -#if TBB_TEST_LOW_WORKLOAD - const int n = 10000; -#else - const int n = 100000; -#endif /* TBB_TEST_LOW_WORKLOAD */ - tbb::tick_count t0 = tbb::tick_count::now(); - tbb::parallel_for(tbb::blocked_range<size_t>(0,n,n/10),AddOne<Counter<M> >(counter)); - tbb::tick_count t1 = tbb::tick_count::now(); - REMARK("%g usec\n",(t1-t0).seconds()); - if( counter.value!=n ) - REPORT("ERROR for %s: counter.value=%ld\n",name,counter.value); -} - -template<typename M, size_t N> -struct Invariant { - typedef M mutex_type; - M mutex; - const char* mutex_name; - volatile long value[N]; - Invariant( const char* mutex_name_ ) : - mutex_name(mutex_name_) - { - for( size_t k=0; k<N; ++k ) - value[k] = 0; - tbb::profiling::set_name(mutex, mutex_name_); - } - ~Invariant() { - } - void update() { - for( size_t k=0; k<N; ++k ) - ++value[k]; - } - bool value_is( long expected_value ) const { - long tmp; - for( size_t k=0; k<N; ++k ) - if( (tmp=value[k])!=expected_value ) { - REPORT("ERROR: %ld!=%ld\n", tmp, expected_value); - return false; - } - return true; - } - bool is_okay() { - return value_is( value[0] ); - } -}; - -//! Function object for use with parallel_for.h. -template<typename I> -struct TwiddleInvariant: NoAssign { - I& invariant; - TwiddleInvariant( I& invariant_ ) : invariant(invariant_) {} - - /** Increments counter once for each iteration in the iteration space. */ - void operator()( tbb::blocked_range<size_t>& range ) const { - for( size_t i=range.begin(); i!=range.end(); ++i ) { - //! Every 8th access is a write access - const bool write = (i%8)==7; - bool okay = true; - bool lock_kept = true; - if( (i/8)&1 ) { - // Try implicit acquire and explicit release - typename I::mutex_type::scoped_lock lock(invariant.mutex,write); - execute_aux(lock, i, write, /*ref*/okay, /*ref*/lock_kept); - lock.release(); - } else { - // Try explicit acquire and implicit release - typename I::mutex_type::scoped_lock lock; - lock.acquire(invariant.mutex,write); - execute_aux(lock, i, write, /*ref*/okay, /*ref*/lock_kept); - } - if( !okay ) { - REPORT( "ERROR for %s at %ld: %s %s %s %s\n",invariant.mutex_name, long(i), - write ? "write," : "read,", - write ? (i%16==7?"downgrade,":"") : (i%8==3?"upgrade,":""), - lock_kept ? "lock kept," : "lock not kept,", // TODO: only if downgrade/upgrade - (i/8)&1 ? "impl/expl" : "expl/impl" ); - } - } - } -private: - void execute_aux(typename I::mutex_type::scoped_lock & lock, const size_t i, const bool write, bool & okay, bool & lock_kept) const { - if( write ) { - long my_value = invariant.value[0]; - invariant.update(); - if( i%16==7 ) { - lock_kept = lock.downgrade_to_reader(); - if( !lock_kept ) - my_value = invariant.value[0] - 1; - okay = invariant.value_is(my_value+1); - } - } else { - okay = invariant.is_okay(); - if( i%8==3 ) { - long my_value = invariant.value[0]; - lock_kept = lock.upgrade_to_writer(); - if( !lock_kept ) - my_value = invariant.value[0]; - invariant.update(); - okay = invariant.value_is(my_value+1); - } - } - } -}; - -/** This test is generic so that we can test any other kinds of ReaderWriter locks we write later. */ -template<typename M> -void TestReaderWriterLock( const char * mutex_name ) { - REMARK( "%s readers & writers time = ", mutex_name ); - Invariant<M,8> invariant(mutex_name); -#if TBB_TEST_LOW_WORKLOAD - const size_t n = 10000; -#else - const size_t n = 500000; -#endif /* TBB_TEST_LOW_WORKLOAD */ - tbb::tick_count t0 = tbb::tick_count::now(); - tbb::parallel_for(tbb::blocked_range<size_t>(0,n,n/100),TwiddleInvariant<Invariant<M,8> >(invariant)); - tbb::tick_count t1 = tbb::tick_count::now(); - // There is either a writer or a reader upgraded to a writer for each 4th iteration - long expected_value = n/4; - if( !invariant.value_is(expected_value) ) - REPORT("ERROR for %s: final invariant value is wrong\n",mutex_name); - REMARK( "%g usec\n", (t1-t0).seconds() ); -} - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Suppress "conditional expression is constant" warning. - #pragma warning( push ) - #pragma warning( disable: 4127 ) -#endif - -/** Test try_acquire_reader functionality of a non-reenterable reader-writer mutex */ -template<typename M> -void TestTryAcquireReader_OneThread( const char * mutex_name ) { - M tested_mutex; - typename M::scoped_lock lock1; - if( M::is_rw_mutex ) { - if( lock1.try_acquire(tested_mutex, false) ) - lock1.release(); - else - REPORT("ERROR for %s: try_acquire failed though it should not\n", mutex_name); - { - typename M::scoped_lock lock2(tested_mutex, false); // read lock - if( lock1.try_acquire(tested_mutex) ) // attempt to acquire read - REPORT("ERROR for %s: try_acquire succeeded though it should not (1)\n", mutex_name); - lock2.release(); // unlock - lock2.acquire(tested_mutex, true); // write lock - if( lock1.try_acquire(tested_mutex, false) ) // attempt to acquire read - REPORT("ERROR for %s: try_acquire succeeded though it should not (2)\n", mutex_name); - } - if( lock1.try_acquire(tested_mutex, false) ) - lock1.release(); - else - REPORT("ERROR for %s: try_acquire failed though it should not\n", mutex_name); - } -} - -/** Test try_acquire functionality of a non-reenterable mutex */ -template<typename M> -void TestTryAcquire_OneThread( const char * mutex_name ) { - M tested_mutex; - typename M::scoped_lock lock1; - if( lock1.try_acquire(tested_mutex) ) - lock1.release(); - else - REPORT("ERROR for %s: try_acquire failed though it should not\n", mutex_name); - { - if( M::is_recursive_mutex ) { - typename M::scoped_lock lock2(tested_mutex); - if( lock1.try_acquire(tested_mutex) ) - lock1.release(); - else - REPORT("ERROR for %s: try_acquire on recursive lock failed though it should not\n", mutex_name); - //windows.. -- both are recursive - } else { - typename M::scoped_lock lock2(tested_mutex); - if( lock1.try_acquire(tested_mutex) ) - REPORT("ERROR for %s: try_acquire succeeded though it should not (3)\n", mutex_name); - } - } - if( lock1.try_acquire(tested_mutex) ) - lock1.release(); - else - REPORT("ERROR for %s: try_acquire failed though it should not\n", mutex_name); -} - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning( pop ) -#endif - -const int RecurN = 4; -int RecurArray[ RecurN ]; -tbb::recursive_mutex RecurMutex[ RecurN ]; - -struct RecursiveAcquisition { - /** x = number being decoded in base N - max_lock = index of highest lock acquired so far - mask = bit mask; ith bit set if lock i has been acquired. */ - void Body( size_t x, int max_lock=-1, unsigned int mask=0 ) const - { - int i = (int) (x % RecurN); - bool first = (mask&1U<<i)==0; - if( first ) { - // first time to acquire lock - if( i<max_lock ) - // out of order acquisition might lead to deadlock, so stop - return; - max_lock = i; - } - - if( (i&1)!=0 ) { - // acquire lock on location RecurArray[i] using explicit acquire - tbb::recursive_mutex::scoped_lock r_lock; - r_lock.acquire( RecurMutex[i] ); - int a = RecurArray[i]; - ASSERT( (a==0)==first, "should be either a==0 if it is the first time to acquire the lock or a!=0 otherwise" ); - ++RecurArray[i]; - if( x ) - Body( x/RecurN, max_lock, mask|1U<<i ); - --RecurArray[i]; - ASSERT( a==RecurArray[i], "a is not equal to RecurArray[i]" ); - - // release lock on location RecurArray[i] using explicit release; otherwise, use implicit one - if( (i&2)!=0 ) r_lock.release(); - } else { - // acquire lock on location RecurArray[i] using implicit acquire - tbb::recursive_mutex::scoped_lock r_lock( RecurMutex[i] ); - int a = RecurArray[i]; - - ASSERT( (a==0)==first, "should be either a==0 if it is the first time to acquire the lock or a!=0 otherwise" ); - - ++RecurArray[i]; - if( x ) - Body( x/RecurN, max_lock, mask|1U<<i ); - --RecurArray[i]; - - ASSERT( a==RecurArray[i], "a is not equal to RecurArray[i]" ); - - // release lock on location RecurArray[i] using explicit release; otherwise, use implicit one - if( (i&2)!=0 ) r_lock.release(); - } - } - - void operator()( const tbb::blocked_range<size_t> &r ) const - { - for( size_t x=r.begin(); x<r.end(); x++ ) { - Body( x ); - } - } -}; - -/** This test is generic so that we may test other kinds of recursive mutexes.*/ -template<typename M> -void TestRecursiveMutex( const char * mutex_name ) -{ - for ( int i = 0; i < RecurN; ++i ) { - tbb::profiling::set_name(RecurMutex[i], mutex_name); - } - tbb::tick_count t0 = tbb::tick_count::now(); - tbb::parallel_for(tbb::blocked_range<size_t>(0,10000,500), RecursiveAcquisition()); - tbb::tick_count t1 = tbb::tick_count::now(); - REMARK( "%s recursive mutex time = %g usec\n", mutex_name, (t1-t0).seconds() ); -} - -template<typename C> -struct NullRecursive: NoAssign { - void recurse_till( size_t i, size_t till ) const { - if( i==till ) { - counter.value = counter.value+1; - return; - } - if( i&1 ) { - typename C::mutex_type::scoped_lock lock2(counter.mutex); - recurse_till( i+1, till ); - lock2.release(); - } else { - typename C::mutex_type::scoped_lock lock2; - lock2.acquire(counter.mutex); - recurse_till( i+1, till ); - } - } - - void operator()( tbb::blocked_range<size_t>& range ) const { - typename C::mutex_type::scoped_lock lock(counter.mutex); - recurse_till( range.begin(), range.end() ); - } - NullRecursive( C& counter_ ) : counter(counter_) { - ASSERT( C::mutex_type::is_recursive_mutex, "Null mutex should be a recursive mutex." ); - } - C& counter; -}; - -template<typename M> -struct NullUpgradeDowngrade: NoAssign { - void operator()( tbb::blocked_range<size_t>& range ) const { - typename M::scoped_lock lock2; - for( size_t i=range.begin(); i!=range.end(); ++i ) { - if( i&1 ) { - typename M::scoped_lock lock1(my_mutex, true) ; - if( lock1.downgrade_to_reader()==false ) - REPORT("ERROR for %s: downgrade should always succeed\n", name); - } else { - lock2.acquire( my_mutex, false ); - if( lock2.upgrade_to_writer()==false ) - REPORT("ERROR for %s: upgrade should always succeed\n", name); - lock2.release(); - } - } - } - - NullUpgradeDowngrade( M& m_, const char* n_ ) : my_mutex(m_), name(n_) {} - M& my_mutex; - const char* name; -} ; - -template<typename M> -void TestNullMutex( const char * name ) { - Counter<M> counter; - counter.value = 0; - const int n = 100; - REMARK("TestNullMutex<%s>",name); - { - tbb::parallel_for(tbb::blocked_range<size_t>(0,n,10),AddOne<Counter<M> >(counter)); - } - counter.value = 0; - { - tbb::parallel_for(tbb::blocked_range<size_t>(0,n,10),NullRecursive<Counter<M> >(counter)); - } - REMARK("\n"); -} - -template<typename M> -void TestNullRWMutex( const char * name ) { - REMARK("TestNullRWMutex<%s>",name); - const int n = 100; - M m; - tbb::parallel_for(tbb::blocked_range<size_t>(0,n,10),NullUpgradeDowngrade<M>(m, name)); - REMARK("\n"); -} - -//! Test ISO C++0x compatibility portion of TBB mutex -template<typename M> -void TestISO( const char * name ) { - typedef TBB_MutexFromISO_Mutex<M> tbb_from_iso; - Test<tbb_from_iso>( name ); -} - -//! Test ISO C++0x try_lock functionality of a non-reenterable mutex */ -template<typename M> -void TestTryAcquire_OneThreadISO( const char * name ) { - typedef TBB_MutexFromISO_Mutex<M> tbb_from_iso; - TestTryAcquire_OneThread<tbb_from_iso>( name ); -} - -//! Test ISO-like C++0x compatibility portion of TBB reader-writer mutex -template<typename M> -void TestReaderWriterLockISO( const char * name ) { - typedef TBB_MutexFromISO_Mutex<M> tbb_from_iso; - TestReaderWriterLock<tbb_from_iso>( name ); - TestTryAcquireReader_OneThread<tbb_from_iso>( name ); -} - -//! Test ISO C++0x compatibility portion of TBB recursive mutex -template<typename M> -void TestRecursiveMutexISO( const char * name ) { - typedef TBB_MutexFromISO_Mutex<M> tbb_from_iso; - TestRecursiveMutex<tbb_from_iso>(name); -} - -#include "harness_tsx.h" -#include "tbb/task_scheduler_init.h" - -#if __TBB_TSX_TESTING_ENABLED_FOR_THIS_COMPILER - -//! Function object for use with parallel_for.h to see if a transaction is actually attempted. -tbb::atomic<size_t> n_transactions_attempted; -template<typename C> -struct AddOne_CheckTransaction: NoAssign { - C& counter; - /** Increments counter once for each iteration in the iteration space. */ - void operator()( tbb::blocked_range<size_t>& range ) const { - for( size_t i=range.begin(); i!=range.end(); ++i ) { - bool transaction_attempted = false; - { - typename C::mutex_type::scoped_lock lock(counter.mutex); - if( IsInsideTx() ) transaction_attempted = true; - counter.value = counter.value+1; - } - if( transaction_attempted ) ++n_transactions_attempted; - __TBB_Pause(i); - } - } - AddOne_CheckTransaction( C& counter_ ) : counter(counter_) {} -}; - -/* TestTransaction() checks if a speculative mutex actually uses transactions. */ -template<typename M> -void TestTransaction( const char * name ) -{ - Counter<M> counter; -#if TBB_TEST_LOW_WORKLOAD - const int n = 100; -#else - const int n = 1000; -#endif - REMARK("TestTransaction with %s: ",name); - - n_transactions_attempted = 0; - tbb::tick_count start, stop; - for( int i=0; i<5 && n_transactions_attempted==0; ++i ) { - counter.value = 0; - start = tbb::tick_count::now(); - tbb::parallel_for(tbb::blocked_range<size_t>(0,n,2),AddOne_CheckTransaction<Counter<M> >(counter)); - stop = tbb::tick_count::now(); - if( counter.value!=n ) { - REPORT("ERROR for %s: counter.value=%ld\n",name,counter.value); - break; - } - } - - if( n_transactions_attempted==0 ) - REPORT( "ERROR: transactions were never attempted\n" ); - else - REMARK("%d successful transactions in %6.6f seconds\n", (int)n_transactions_attempted, (stop - start).seconds()); -} -#endif /* __TBB_TSX_TESTING_ENABLED_FOR_THIS_COMPILER */ - -int TestMain () { - for( int p=MinThread; p<=MaxThread; ++p ) { - tbb::task_scheduler_init init( p ); - REMARK( "testing with %d workers\n", static_cast<int>(p) ); -#if TBB_TEST_LOW_WORKLOAD - // The amount of work is decreased in this mode to bring the length - // of the runs under tools into the tolerable limits. - const int n = 1; -#else - const int n = 3; -#endif - // Run each test several times. - for( int i=0; i<n; ++i ) { - TestNullMutex<tbb::null_mutex>( "Null Mutex" ); - TestNullMutex<tbb::null_rw_mutex>( "Null RW Mutex" ); - TestNullRWMutex<tbb::null_rw_mutex>( "Null RW Mutex" ); - Test<tbb::spin_mutex>( "Spin Mutex" ); - Test<tbb::speculative_spin_mutex>( "Spin Mutex/speculative" ); -#if _OPENMP - Test<OpenMP_Mutex>( "OpenMP_Mutex" ); -#endif /* _OPENMP */ - Test<tbb::queuing_mutex>( "Queuing Mutex" ); - Test<tbb::mutex>( "Wrapper Mutex" ); - Test<tbb::recursive_mutex>( "Recursive Mutex" ); - Test<tbb::queuing_rw_mutex>( "Queuing RW Mutex" ); - Test<tbb::spin_rw_mutex>( "Spin RW Mutex" ); - Test<tbb::speculative_spin_rw_mutex>( "Spin RW Mutex/speculative" ); - - TestTryAcquire_OneThread<tbb::spin_mutex>("Spin Mutex"); - TestTryAcquire_OneThread<tbb::speculative_spin_mutex>("Spin Mutex/speculative"); - TestTryAcquire_OneThread<tbb::queuing_mutex>("Queuing Mutex"); -#if USE_PTHREAD - // under ifdef because on Windows tbb::mutex is reenterable and the test will fail - TestTryAcquire_OneThread<tbb::mutex>("Wrapper Mutex"); -#endif /* USE_PTHREAD */ - TestTryAcquire_OneThread<tbb::recursive_mutex>( "Recursive Mutex" ); - TestTryAcquire_OneThread<tbb::spin_rw_mutex>("Spin RW Mutex"); // only tests try_acquire for writers - TestTryAcquire_OneThread<tbb::speculative_spin_rw_mutex>("Spin RW Mutex/speculative"); // only tests try_acquire for writers - TestTryAcquire_OneThread<tbb::queuing_rw_mutex>("Queuing RW Mutex"); // only tests try_acquire for writers - - TestTryAcquireReader_OneThread<tbb::spin_rw_mutex>("Spin RW Mutex"); - TestTryAcquireReader_OneThread<tbb::speculative_spin_rw_mutex>("Spin RW Mutex/speculative"); - TestTryAcquireReader_OneThread<tbb::queuing_rw_mutex>("Queuing RW Mutex"); - - TestReaderWriterLock<tbb::queuing_rw_mutex>( "Queuing RW Mutex" ); - TestReaderWriterLock<tbb::spin_rw_mutex>( "Spin RW Mutex" ); - TestReaderWriterLock<tbb::speculative_spin_rw_mutex>( "Spin RW Mutex/speculative" ); - - TestRecursiveMutex<tbb::recursive_mutex>( "Recursive Mutex" ); - - // Test ISO C++11 interface - TestISO<tbb::spin_mutex>( "ISO Spin Mutex" ); - TestISO<tbb::mutex>( "ISO Mutex" ); - TestISO<tbb::spin_rw_mutex>( "ISO Spin RW Mutex" ); - TestISO<tbb::recursive_mutex>( "ISO Recursive Mutex" ); - TestISO<tbb::critical_section>( "ISO Critical Section" ); - TestTryAcquire_OneThreadISO<tbb::spin_mutex>( "ISO Spin Mutex" ); -#if USE_PTHREAD - // under ifdef because on Windows tbb::mutex is reenterable and the test will fail - TestTryAcquire_OneThreadISO<tbb::mutex>( "ISO Mutex" ); -#endif /* USE_PTHREAD */ - TestTryAcquire_OneThreadISO<tbb::spin_rw_mutex>( "ISO Spin RW Mutex" ); - TestTryAcquire_OneThreadISO<tbb::recursive_mutex>( "ISO Recursive Mutex" ); - TestTryAcquire_OneThreadISO<tbb::critical_section>( "ISO Critical Section" ); - TestReaderWriterLockISO<tbb::spin_rw_mutex>( "ISO Spin RW Mutex" ); - TestRecursiveMutexISO<tbb::recursive_mutex>( "ISO Recursive Mutex" ); - } - } - -#if __TBB_TSX_TESTING_ENABLED_FOR_THIS_COMPILER - // additional test for speculative mutexes to see if we actually attempt lock elisions - if( have_TSX() ) { - tbb::task_scheduler_init init( MaxThread ); - TestTransaction<tbb::speculative_spin_mutex>( "Spin Mutex/speculative" ); - TestTransaction<tbb::speculative_spin_rw_mutex>( "Spin RW Mutex/speculative" ); - } - else { - REMARK("Hardware transactions not supported\n"); - } -#endif - return Harness::Done; -} diff --git a/src/tbb/src/test/test_mutex_native_threads.cpp b/src/tbb/src/test/test_mutex_native_threads.cpp deleted file mode 100644 index 925878fc6..000000000 --- a/src/tbb/src/test/test_mutex_native_threads.cpp +++ /dev/null @@ -1,218 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/spin_mutex.h" -#include "tbb/queuing_mutex.h" -#include "tbb/queuing_rw_mutex.h" -#include "tbb/spin_rw_mutex.h" -#include "tbb/tick_count.h" -#include "tbb/atomic.h" - -#include "harness.h" - -// This test deliberately avoids a "using tbb" statement, -// so that the error of putting types in the wrong namespace will be caught. - -template<typename M> -struct Counter { - typedef M mutex_type; - M mutex; - volatile long value; - void flog_once( size_t mode ); -}; - -template<typename M> -void Counter<M>::flog_once(size_t mode) -/** Increments counter once for each iteration in the iteration space. */ -{ - if( mode&1 ) { - // Try implicit acquire and explicit release - typename mutex_type::scoped_lock lock(mutex); - value = value+1; - lock.release(); - } else { - // Try explicit acquire and implicit release - typename mutex_type::scoped_lock lock; - lock.acquire(mutex); - value = value+1; - } -} - -template<typename M, long N> -struct Invariant { - typedef M mutex_type; - M mutex; - const char* mutex_name; - volatile long value[N]; - Invariant( const char* mutex_name_ ) : - mutex_name(mutex_name_) - { - for( long k=0; k<N; ++k ) - value[k] = 0; - } - void update() { - for( long k=0; k<N; ++k ) - ++value[k]; - } - bool value_is( long expected_value ) const { - long tmp; - for( long k=0; k<N; ++k ) - if( (tmp=value[k])!=expected_value ) { - REPORT("ERROR: %ld!=%ld\n", tmp, expected_value); - return false; - } - return true; - } - bool is_okay() { - return value_is( value[0] ); - } - void flog_once( size_t mode ); -}; - -template<typename M, long N> -void Invariant<M,N>::flog_once( size_t mode ) -{ - //! Every 8th access is a write access - bool write = (mode%8)==7; - bool okay = true; - bool lock_kept = true; - if( (mode/8)&1 ) { - // Try implicit acquire and explicit release - typename mutex_type::scoped_lock lock(mutex,write); - if( write ) { - long my_value = value[0]; - update(); - if( mode%16==7 ) { - lock_kept = lock.downgrade_to_reader(); - if( !lock_kept ) - my_value = value[0] - 1; - okay = value_is(my_value+1); - } - } else { - okay = is_okay(); - if( mode%8==3 ) { - long my_value = value[0]; - lock_kept = lock.upgrade_to_writer(); - if( !lock_kept ) - my_value = value[0]; - update(); - okay = value_is(my_value+1); - } - } - lock.release(); - } else { - // Try explicit acquire and implicit release - typename mutex_type::scoped_lock lock; - lock.acquire(mutex,write); - if( write ) { - long my_value = value[0]; - update(); - if( mode%16==7 ) { - lock_kept = lock.downgrade_to_reader(); - if( !lock_kept ) - my_value = value[0] - 1; - okay = value_is(my_value+1); - } - } else { - okay = is_okay(); - if( mode%8==3 ) { - long my_value = value[0]; - lock_kept = lock.upgrade_to_writer(); - if( !lock_kept ) - my_value = value[0]; - update(); - okay = value_is(my_value+1); - } - } - } - if( !okay ) { - REPORT( "ERROR for %s at %ld: %s %s %s %s\n",mutex_name, long(mode), - write?"write,":"read,", write?(mode%16==7?"downgrade,":""):(mode%8==3?"upgrade,":""), - lock_kept?"lock kept,":"lock not kept,", (mode/8)&1?"imp/exp":"exp/imp" ); - } -} - -static tbb::atomic<size_t> Order; - -template<typename State, long TestSize> -struct Work: NoAssign { - static const size_t chunk = 100; - State& state; - Work( State& state_ ) : state(state_) {} - void operator()( int ) const { - size_t step; - while( (step=Order.fetch_and_add<tbb::acquire>(chunk))<TestSize ) - for( size_t i=0; i<chunk && step<TestSize; ++i, ++step ) - state.flog_once(step); - } -}; - -//! Generic test of a TBB Mutex type M. -/** Does not test features specific to reader-writer locks. */ -template<typename M> -void Test( const char * name, int nthread ) { - REMARK("testing %s\n",name); - Counter<M> counter; - counter.value = 0; - Order = 0; - // use the macro because of a gcc 4.6 bug -#define TEST_SIZE 100000 - tbb::tick_count t0 = tbb::tick_count::now(); - NativeParallelFor( nthread, Work<Counter<M>, TEST_SIZE>(counter) ); - tbb::tick_count t1 = tbb::tick_count::now(); - - REMARK("%s time = %g usec\n",name, (t1-t0).seconds() ); - if( counter.value!=TEST_SIZE ) - REPORT("ERROR for %s: counter.value=%ld != %ld=test_size\n",name,counter.value,TEST_SIZE); -#undef TEST_SIZE -} - - -//! Generic test of TBB ReaderWriterMutex type M -template<typename M> -void TestReaderWriter( const char * mutex_name, int nthread ) { - REMARK("testing %s\n",mutex_name); - Invariant<M,8> invariant(mutex_name); - Order = 0; - // use the macro because of a gcc 4.6 bug -#define TEST_SIZE 1000000 - tbb::tick_count t0 = tbb::tick_count::now(); - NativeParallelFor( nthread, Work<Invariant<M,8>, TEST_SIZE>(invariant) ); - tbb::tick_count t1 = tbb::tick_count::now(); - // There is either a writer or a reader upgraded to a writer for each 4th iteration - long expected_value = TEST_SIZE/4; - if( !invariant.value_is(expected_value) ) - REPORT("ERROR for %s: final invariant value is wrong\n",mutex_name); - REMARK("%s readers & writers time = %g usec\n",mutex_name,(t1-t0).seconds()); -#undef TEST_SIZE -} - -int TestMain () { - for( int p=MinThread; p<=MaxThread; ++p ) { - REMARK( "testing with %d threads\n", p ); - Test<tbb::spin_mutex>( "spin_mutex", p ); - Test<tbb::queuing_mutex>( "queuing_mutex", p ); - Test<tbb::queuing_rw_mutex>( "queuing_rw_mutex", p ); - Test<tbb::spin_rw_mutex>( "spin_rw_mutex", p ); - TestReaderWriter<tbb::queuing_rw_mutex>( "queuing_rw_mutex", p ); - TestReaderWriter<tbb::spin_rw_mutex>( "spin_rw_mutex", p ); - } - return Harness::Done; -} diff --git a/src/tbb/src/test/test_openmp.cpp b/src/tbb/src/test/test_openmp.cpp deleted file mode 100644 index 512cdc8cd..000000000 --- a/src/tbb/src/test/test_openmp.cpp +++ /dev/null @@ -1,222 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Test mixing OpenMP and TBB - -/* SCR #471 - Below is workaround to compile test within enviroment of Intel Compiler - but by Microsoft Compiler. So, there is wrong "omp.h" file included and - manifest section is missed from .exe file - restoring here. - - As of Visual Studio 2010, crtassem.h is no longer shipped. - */ -#if !defined(__INTEL_COMPILER) && _MSC_VER >= 1400 && _MSC_VER < 1600 - #include <crtassem.h> - #if !defined(_OPENMP) - #define _OPENMP - #if defined(_DEBUG) - #pragma comment(lib, "vcompd") - #else // _DEBUG - #pragma comment(lib, "vcomp") - #endif // _DEBUG - #endif // _OPENMP - - #if defined(_DEBUG) - #if defined(_M_IX86) - #pragma comment(linker,"/manifestdependency:\"type='win32' " \ - "name='" __LIBRARIES_ASSEMBLY_NAME_PREFIX ".DebugOpenMP' " \ - "version='" _CRT_ASSEMBLY_VERSION "' " \ - "processorArchitecture='x86' " \ - "publicKeyToken='" _VC_ASSEMBLY_PUBLICKEYTOKEN "'\"") - #elif defined(_M_X64) - #pragma comment(linker,"/manifestdependency:\"type='win32' " \ - "name='" __LIBRARIES_ASSEMBLY_NAME_PREFIX ".DebugOpenMP' " \ - "version='" _CRT_ASSEMBLY_VERSION "' " \ - "processorArchitecture='amd64' " \ - "publicKeyToken='" _VC_ASSEMBLY_PUBLICKEYTOKEN "'\"") - #elif defined(_M_IA64) - #pragma comment(linker,"/manifestdependency:\"type='win32' " \ - "name='" __LIBRARIES_ASSEMBLY_NAME_PREFIX ".DebugOpenMP' " \ - "version='" _CRT_ASSEMBLY_VERSION "' " \ - "processorArchitecture='ia64' " \ - "publicKeyToken='" _VC_ASSEMBLY_PUBLICKEYTOKEN "'\"") - #endif - #else // _DEBUG - #if defined(_M_IX86) - #pragma comment(linker,"/manifestdependency:\"type='win32' " \ - "name='" __LIBRARIES_ASSEMBLY_NAME_PREFIX ".OpenMP' " \ - "version='" _CRT_ASSEMBLY_VERSION "' " \ - "processorArchitecture='x86' " \ - "publicKeyToken='" _VC_ASSEMBLY_PUBLICKEYTOKEN "'\"") - #elif defined(_M_X64) - #pragma comment(linker,"/manifestdependency:\"type='win32' " \ - "name='" __LIBRARIES_ASSEMBLY_NAME_PREFIX ".OpenMP' " \ - "version='" _CRT_ASSEMBLY_VERSION "' " \ - "processorArchitecture='amd64' " \ - "publicKeyToken='" _VC_ASSEMBLY_PUBLICKEYTOKEN "'\"") - #elif defined(_M_IA64) - #pragma comment(linker,"/manifestdependency:\"type='win32' " \ - "name='" __LIBRARIES_ASSEMBLY_NAME_PREFIX ".OpenMP' " \ - "version='" _CRT_ASSEMBLY_VERSION "' " \ - "processorArchitecture='ia64' " \ - "publicKeyToken='" _VC_ASSEMBLY_PUBLICKEYTOKEN "'\"") - #endif - #endif // _DEBUG - #define _OPENMP_NOFORCE_MANIFEST -#endif - -#include <omp.h> - - -typedef short T; - -void SerialConvolve( T c[], const T a[], int m, const T b[], int n ) { - for( int i=0; i<m+n-1; ++i ) { - int start = i<n ? 0 : i-n+1; - int finish = i<m ? i+1 : m; - T sum = 0; - for( int j=start; j<finish; ++j ) - sum += a[j]*b[i-j]; - c[i] = sum; - } -} - -#define OPENMP_ASYNC_SHUTDOWN_BROKEN (__INTEL_COMPILER<=1400 && __linux__) -#define TBB_PREVIEW_WAITING_FOR_WORKERS 1 - -#include "tbb/blocked_range.h" -#include "tbb/parallel_for.h" -#include "tbb/parallel_reduce.h" -#include "tbb/task_scheduler_init.h" -#include "harness.h" - -using namespace tbb; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Suppress overzealous warning about short+=short - #pragma warning( push ) - #pragma warning( disable: 4244 ) -#endif - -class InnerBody: NoAssign { - const T* my_a; - const T* my_b; - const int i; -public: - T sum; - InnerBody( T /*c*/[], const T a[], const T b[], int i ) : - my_a(a), my_b(b), i(i), sum(0) - {} - InnerBody( InnerBody& x, split ) : - my_a(x.my_a), my_b(x.my_b), i(x.i), sum(0) - { - } - void join( InnerBody& x ) {sum += x.sum;} - void operator()( const blocked_range<int>& range ) { - for( int j=range.begin(); j!=range.end(); ++j ) - sum += my_a[j]*my_b[i-j]; - } -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning( pop ) -#endif - -//! Test OpenMMP loop around TBB loop -void OpenMP_TBB_Convolve( T c[], const T a[], int m, const T b[], int n ) { - REMARK("testing OpenMP loop around TBB loop\n"); -#pragma omp parallel - { - task_scheduler_init init; -#pragma omp for - for( int i=0; i<m+n-1; ++i ) { - int start = i<n ? 0 : i-n+1; - int finish = i<m ? i+1 : m; - InnerBody body(c,a,b,i); - parallel_reduce( blocked_range<int>(start,finish,10), body ); - c[i] = body.sum; - } - } -} - -class OuterBody: NoAssign { - const T* my_a; - const T* my_b; - T* my_c; - const int m; - const int n; -public: - T sum; - OuterBody( T c[], const T a[], int m_, const T b[], int n_ ) : - my_a(a), my_b(b), my_c(c), m(m_), n(n_) - {} - void operator()( const blocked_range<int>& range ) const { - for( int i=range.begin(); i!=range.end(); ++i ) { - int start = i<n ? 0 : i-n+1; - int finish = i<m ? i+1 : m; - T sum = 0; -#pragma omp parallel for reduction(+:sum) - for( int j=start; j<finish; ++j ) - sum += my_a[j]*my_b[i-j]; - my_c[i] = sum; - } - } -}; - -//! Test TBB loop around OpenMP loop -void TBB_OpenMP_Convolve( T c[], const T a[], int m, const T b[], int n ) { - REMARK("testing TBB loop around OpenMP loop\n"); - parallel_for( blocked_range<int>(0,m+n-1,10), OuterBody( c, a, m, b, n ) ); -} - -#include <stdio.h> - -const int M = 17*17; -const int N = 13*13; -T a[M], b[N]; -T expected[M+N], actual[M+N]; - -template <class Func> -void RunTest( Func F, int m, int n, int p, bool wait_workers = false ) { - task_scheduler_init init( p, 0, wait_workers ); - memset( actual, -1, (m+n)*sizeof(T) ); - F( actual, a, m, b, n ); - ASSERT( memcmp(actual, expected, (m+n-1)*sizeof(T))==0, NULL ); -} - -int TestMain () { - MinThread = 1; - for( int p=MinThread; p<=MaxThread; ++p ) { - for( int m=1; m<=M; m*=17 ) { - for( int n=1; n<=N; n*=13 ) { - for( int i=0; i<m; ++i ) a[i] = T(1+i/5); - for( int i=0; i<n; ++i ) b[i] = T(1+i/7); - SerialConvolve( expected, a, m, b, n ); - RunTest( OpenMP_TBB_Convolve, m, n, p ); - RunTest( TBB_OpenMP_Convolve, m, n, p -#if OPENMP_ASYNC_SHUTDOWN_BROKEN - ,true -#endif - ); - } - } - } - return Harness::Done; -} diff --git a/src/tbb/src/test/test_overwrite_node.cpp b/src/tbb/src/test/test_overwrite_node.cpp deleted file mode 100644 index 9bc1cc19b..000000000 --- a/src/tbb/src/test/test_overwrite_node.cpp +++ /dev/null @@ -1,159 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness_graph.h" - -#include "tbb/task_scheduler_init.h" - -#define N 300 -#define T 4 -#define M 5 - -template< typename R > -void simple_read_write_tests() { - tbb::flow::graph g; - tbb::flow::overwrite_node<R> n(g); - - for ( int t = 0; t < T; ++t ) { - R v0(N+1); - harness_counting_receiver<R> r[M]; - - ASSERT( n.is_valid() == false, NULL ); - ASSERT( n.try_get( v0 ) == false, NULL ); - if ( t % 2 ) { - ASSERT( n.try_put( static_cast<R>(N) ), NULL ); - ASSERT( n.is_valid() == true, NULL ); - ASSERT( n.try_get( v0 ) == true, NULL ); - ASSERT( v0 == R(N), NULL ); - } - - for (int i = 0; i < M; ++i) { - tbb::flow::make_edge( n, r[i] ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - ASSERT(n.successor_count() == M, NULL); - typename tbb::flow::overwrite_node<R>::successor_vector_type my_succs; - n.copy_successors(my_succs); - ASSERT(my_succs.size() == M, NULL); - ASSERT(n.predecessor_count() == 0, NULL); -#endif - - for (int i = 0; i < N; ++i ) { - R v1(static_cast<R>(i)); - ASSERT( n.try_put( v1 ), NULL ); - ASSERT( n.is_valid() == true, NULL ); - for (int j = 0; j < N; ++j ) { - R v2(0); - ASSERT( n.try_get( v2 ), NULL ); - ASSERT( v1 == v2, NULL ); - } - } - for (int i = 0; i < M; ++i) { - size_t c = r[i].my_count; - ASSERT( int(c) == N+t%2, NULL ); - } - for (int i = 0; i < M; ++i) { - tbb::flow::remove_edge( n, r[i] ); - } - ASSERT( n.try_put( R(0) ), NULL ); - for (int i = 0; i < M; ++i) { - size_t c = r[i].my_count; - ASSERT( int(c) == N+t%2, NULL ); - } - n.clear(); - ASSERT( n.is_valid() == false, NULL ); - ASSERT( n.try_get( v0 ) == false, NULL ); - } -} - -template< typename R > -class native_body : NoAssign { - tbb::flow::overwrite_node<R> &my_node; - -public: - - native_body( tbb::flow::overwrite_node<R> &n ) : my_node(n) {} - - void operator()( int i ) const { - R v1(static_cast<R>(i)); - ASSERT( my_node.try_put( v1 ), NULL ); - ASSERT( my_node.is_valid() == true, NULL ); - } -}; - -template< typename R > -void parallel_read_write_tests() { - tbb::flow::graph g; - tbb::flow::overwrite_node<R> n(g); - //Create a vector of identical nodes - std::vector< tbb::flow::overwrite_node<R> > ow_vec(2, n); - - for (size_t node_idx=0; node_idx<ow_vec.size(); ++node_idx) { - for ( int t = 0; t < T; ++t ) { - harness_counting_receiver<R> r[M]; - - for (int i = 0; i < M; ++i) { - tbb::flow::make_edge( ow_vec[node_idx], r[i] ); - } - R v0; - ASSERT( ow_vec[node_idx].is_valid() == false, NULL ); - ASSERT( ow_vec[node_idx].try_get( v0 ) == false, NULL ); - - NativeParallelFor( N, native_body<R>( ow_vec[node_idx] ) ); - - for (int i = 0; i < M; ++i) { - size_t c = r[i].my_count; - ASSERT( int(c) == N, NULL ); - } - for (int i = 0; i < M; ++i) { - tbb::flow::remove_edge( ow_vec[node_idx], r[i] ); - } - ASSERT( ow_vec[node_idx].try_put( R(0) ), NULL ); - for (int i = 0; i < M; ++i) { - size_t c = r[i].my_count; - ASSERT( int(c) == N, NULL ); - } - ow_vec[node_idx].clear(); - ASSERT( ow_vec[node_idx].is_valid() == false, NULL ); - ASSERT( ow_vec[node_idx].try_get( v0 ) == false, NULL ); - } - } -} - -int TestMain() { - if( MinThread<1 ) { - REPORT("number of threads must be positive\n"); - exit(1); - } - simple_read_write_tests<int>(); - simple_read_write_tests<float>(); - for( int p=MinThread; p<=MaxThread; ++p ) { - tbb::task_scheduler_init init(p); - parallel_read_write_tests<int>(); - parallel_read_write_tests<float>(); - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - test_extract_on_node<tbb::flow::overwrite_node, int>(); - test_extract_on_node<tbb::flow::overwrite_node, float>(); -#endif - return Harness::Done; -} - diff --git a/src/tbb/src/test/test_parallel_do.cpp b/src/tbb/src/test/test_parallel_do.cpp deleted file mode 100644 index 9c3235f79..000000000 --- a/src/tbb/src/test/test_parallel_do.cpp +++ /dev/null @@ -1,284 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/parallel_do.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/atomic.h" -#include "harness.h" -#include "harness_cpu.h" -#include <deque> - -#if defined(_MSC_VER) && defined(_Wp64) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (disable: 4267) -#endif /* _MSC_VER && _Wp64 */ - -#define N_DEPTHS 20 - -static tbb::atomic<int> g_values_counter; - -class value_t { - size_t x; - value_t& operator= ( const value_t& ); -public: - value_t ( size_t xx ) : x(xx) { ++g_values_counter; } - value_t ( const value_t& v ) : x(v.value()) { ++g_values_counter; } - ~value_t () { --g_values_counter; } - size_t value() const volatile { return x; } -}; - -#include "harness_iterator.h" - -static size_t g_tasks_expected = 0; -static tbb::atomic<size_t> g_tasks_observed; - -size_t FindNumOfTasks ( size_t max_depth ) { - if( max_depth == 0 ) - return 1; - return max_depth * FindNumOfTasks( max_depth - 1 ) + 1; -} - -//! Simplest form of the parallel_do functor object. -class FakeTaskGeneratorBody { -public: - //! The simplest form of the function call operator - /** It does not allow adding new tasks during its execution. **/ - void operator() ( value_t depth ) const { - g_tasks_observed += FindNumOfTasks(depth.value()); - } -}; - -/** Work item is passed by reference here. **/ -class FakeTaskGeneratorBody_RefVersion { -public: - void operator() ( value_t& depth ) const { - g_tasks_observed += FindNumOfTasks(depth.value()); - } -}; - -/** Work item is passed by reference to const here. **/ -class FakeTaskGeneratorBody_ConstRefVersion { -public: - void operator() ( const value_t& depth ) const { - g_tasks_observed += FindNumOfTasks(depth.value()); - } -}; - -/** Work item is passed by reference to volatile here. **/ -class FakeTaskGeneratorBody_VolatileRefVersion { -public: - void operator() ( volatile value_t& depth, tbb::parallel_do_feeder<value_t>& ) const { - g_tasks_observed += FindNumOfTasks(depth.value()); - } -}; - -void do_work ( const value_t& depth, tbb::parallel_do_feeder<value_t>& feeder ) { - ++g_tasks_observed; - size_t d=depth.value(); - --d; - for( size_t i = 0; i < depth.value(); ++i) - feeder.add(value_t(d)); -} - -//! Standard form of the parallel_do functor object. -/** Allows adding new work items on the fly. **/ -class TaskGeneratorBody -{ -public: - //! This form of the function call operator can be used when the body needs to add more work during the processing - void operator() ( value_t depth, tbb::parallel_do_feeder<value_t>& feeder ) const { - do_work(depth, feeder); - } -private: - // Assert that parallel_do does not ever access body constructors - TaskGeneratorBody () {} - TaskGeneratorBody ( const TaskGeneratorBody& ); - // TestBody() needs access to the default constructor - template<class Body, class Iterator> friend void TestBody( size_t ); -}; - -/** Work item is passed by reference here. **/ -class TaskGeneratorBody_RefVersion -{ -public: - void operator() ( value_t& depth, tbb::parallel_do_feeder<value_t>& feeder ) const { - do_work(depth, feeder); - } -}; - -/** Work item is passed as const here. Compilers must ignore the const qualifier. **/ -class TaskGeneratorBody_ConstVersion -{ -public: - void operator() ( const value_t depth, tbb::parallel_do_feeder<value_t>& feeder ) const { - do_work(depth, feeder); - } -}; - -/** Work item is passed by reference to const here. **/ -class TaskGeneratorBody_ConstRefVersion -{ -public: - void operator() ( const value_t& depth, tbb::parallel_do_feeder<value_t>& feeder ) const { - do_work(depth, feeder); - } -}; - -/** Work item is passed by reference to volatile here. **/ -class TaskGeneratorBody_VolatileRefVersion -{ -public: - void operator() ( volatile value_t& depth, tbb::parallel_do_feeder<value_t>& feeder ) const { - do_work(const_cast<value_t&>(depth), feeder); - } -}; - -/** Work item is passed by reference to const volatile here. **/ -class TaskGeneratorBody_ConstVolatileRefVersion -{ -public: - void operator() ( const volatile value_t& depth, tbb::parallel_do_feeder<value_t>& feeder ) const { - do_work(const_cast<value_t&>(depth), feeder); - } -}; - - -static value_t g_depths[N_DEPTHS] = {0, 1, 2, 3, 4, 0, 1, 0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 0, 1, 2}; - -template<class Body, class Iterator> -void TestBody ( size_t depth ) { - typedef typename std::iterator_traits<Iterator>::value_type value_type; - value_type a_depths[N_DEPTHS] = {0, 1, 2, 3, 4, 0, 1, 0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 0, 1, 2}; - Body body; - Iterator begin(a_depths); - Iterator end(a_depths + depth); - g_tasks_observed = 0; - tbb::parallel_do(begin, end, body); - ASSERT (g_tasks_observed == g_tasks_expected, NULL); -} - -template<class Iterator> -void TestIterator_RvalueOnly ( int /*nthread*/, size_t depth ) { - TestBody<FakeTaskGeneratorBody, Iterator> (depth); - TestBody<FakeTaskGeneratorBody_ConstRefVersion, Iterator> (depth); - TestBody<TaskGeneratorBody, Iterator> (depth); - TestBody<TaskGeneratorBody_ConstVersion, Iterator> (depth); - TestBody<TaskGeneratorBody_ConstRefVersion, Iterator> (depth); -} - -template<class Iterator> -void TestIterator ( int nthread, size_t depth ) { - TestIterator_RvalueOnly<Iterator>(nthread, depth); - TestBody<FakeTaskGeneratorBody_RefVersion, Iterator> (depth); - TestBody<FakeTaskGeneratorBody_VolatileRefVersion, Iterator> (depth); - TestBody<TaskGeneratorBody_RefVersion, Iterator> (depth); - TestBody<TaskGeneratorBody_VolatileRefVersion, Iterator> (depth); - TestBody<TaskGeneratorBody_ConstVolatileRefVersion, Iterator> (depth); -} - -void Run( int nthread ) { - for( size_t depth = 0; depth <= N_DEPTHS; ++depth ) { - g_tasks_expected = 0; - for ( size_t i=0; i < depth; ++i ) - g_tasks_expected += FindNumOfTasks( g_depths[i].value() ); - // Test for iterators over values convertible to work item type - TestIterator_RvalueOnly<size_t*>(nthread, depth); - // Test for random access iterators - TestIterator<value_t*>(nthread, depth); - // Test for input iterators - TestIterator<Harness::InputIterator<value_t> >(nthread, depth); - // Test for forward iterators - TestIterator<Harness::ForwardIterator<value_t> >(nthread, depth); - // Test for const random access iterators - TestIterator_RvalueOnly<Harness::ConstRandomIterator<value_t> >(nthread, depth); - } -} - -const size_t elements = 10000; -const size_t init_sum = 0; -tbb::atomic<size_t> element_counter; - -template<size_t K> -struct set_to { - void operator()(size_t& x) const { - x = K; - ++element_counter; - } -}; - -#include "test_range_based_for.h" -#include <functional> - -void range_do_test() { - using namespace range_based_for_support_tests; - std::deque<size_t> v(elements, 0); - - // iterator, const and non-const range check - element_counter = 0; - tbb::parallel_do(v.begin(), v.end(), set_to<1>()); - ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set"); - ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == v.size(), "elements of v not all ones"); - - element_counter = 0; - tbb::parallel_do(v, set_to<0>()); - ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set"); - ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == init_sum, "elements of v not all zeros"); - - element_counter = 0; - tbb::parallel_do(tbb::blocked_range<std::deque<size_t>::iterator>(v.begin(), v.end()), set_to<1>()); - ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set"); - ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == v.size(), "elements of v not all ones"); - - // same as above with context group - element_counter = 0; - tbb::task_group_context context; - tbb::parallel_do(v.begin(), v.end(), set_to<0>(), context); - ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set"); - ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == init_sum, "elements of v not all ones"); - - element_counter = 0; - tbb::parallel_do(v, set_to<1>(), context); - ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set"); - ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == v.size(), "elements of v not all ones"); - - element_counter = 0; - tbb::parallel_do(tbb::blocked_range<std::deque<size_t>::iterator>(v.begin(), v.end()), set_to<0>(), context); - ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set"); - ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == init_sum, "elements of v not all zeros"); -} - -int TestMain () { - if( MinThread<1 ) { - REPORT("number of threads must be positive\n"); - exit(1); - } - g_values_counter = 0; - for( int p=MinThread; p<=MaxThread; ++p ) { - tbb::task_scheduler_init init( p ); - Run(p); - range_do_test(); - // Test that all workers sleep when no work - TestCPUUserTime(p); - } - // This check must be performed after the scheduler terminated because only in this - // case there is a guarantee that the workers already destroyed their last tasks. - ASSERT( g_values_counter == 0, "Value objects were leaked" ); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_parallel_for.cpp b/src/tbb/src/test/test_parallel_for.cpp deleted file mode 100644 index 3ea13a6b8..000000000 --- a/src/tbb/src/test/test_parallel_for.cpp +++ /dev/null @@ -1,602 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Test for function template parallel_for.h - -// Enable testing of serial subset. -#define TBB_PREVIEW_SERIAL_SUBSET 1 -#include "harness_defs.h" - -#if _MSC_VER -#pragma warning (push) -#if __TBB_MSVC_UNREACHABLE_CODE_IGNORED - // Suppress pointless "unreachable code" warning. - #pragma warning (disable: 4702) -#endif -#if defined(_Wp64) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (disable: 4267) -#endif - -#define _SCL_SECURE_NO_WARNINGS -#endif //#if _MSC_VER - -#include "harness_defs.h" -#include "tbb/parallel_for.h" -#include "tbb/atomic.h" -#include "harness_assert.h" -#include "harness.h" - -static tbb::atomic<int> FooBodyCount; - -//! A range object whose only public members are those required by the Range concept. -template<size_t Pad> -class FooRange { - //! Start of range - int start; - - //! Size of range - int size; - FooRange( int start_, int size_ ) : start(start_), size(size_) { - zero_fill<char>(pad, Pad); - pad[Pad-1] = 'x'; - } - template<typename Flavor_, size_t Pad_> friend void Flog( int nthread ); - template<size_t Pad_> friend class FooBody; - void operator&(); - - char pad[Pad]; -public: - bool empty() const {return size==0;} - bool is_divisible() const {return size>1;} - FooRange( FooRange& original, tbb::split ) : size(original.size/2) { - original.size -= size; - start = original.start+original.size; - ASSERT( original.pad[Pad-1]=='x', NULL ); - pad[Pad-1] = 'x'; - } -}; - -//! A range object whose only public members are those required by the parallel_for.h body concept. -template<size_t Pad> -class FooBody { - static const int LIVE = 0x1234; - tbb::atomic<int>* array; - int state; - friend class FooRange<Pad>; - template<typename Flavor_, size_t Pad_> friend void Flog( int nthread ); - FooBody( tbb::atomic<int>* array_ ) : array(array_), state(LIVE) {} -public: - ~FooBody() { - --FooBodyCount; - for( size_t i=0; i<sizeof(*this); ++i ) - reinterpret_cast<char*>(this)[i] = -1; - } - //! Copy constructor - FooBody( const FooBody& other ) : array(other.array), state(other.state) { - ++FooBodyCount; - ASSERT( state==LIVE, NULL ); - } - void operator()( FooRange<Pad>& r ) const { - for( int k=0; k<r.size; ++k ) { - const int i = array[r.start+k]++; - ASSERT( i==0, NULL ); - } - } -}; - -#include "tbb/tick_count.h" - -static const int N = 500; -static tbb::atomic<int> Array[N]; - -struct serial_tag {}; -struct parallel_tag {}; -struct empty_partitioner_tag {}; - -template <typename Flavor, typename Partitioner, typename Range, typename Body> -struct Invoker; - -template <typename Range, typename Body> -struct Invoker<serial_tag, empty_partitioner_tag, Range, Body> { - void operator()( const Range& r, const Body& body, empty_partitioner_tag& ) { - tbb::serial:: parallel_for( r, body ); - } -}; - -template <typename Partitioner, typename Range, typename Body> -struct Invoker<serial_tag, Partitioner, Range, Body> { - void operator()( const Range& r, const Body& body, Partitioner& p ) { - tbb::serial:: parallel_for( r, body, p ); - } -}; - -template <typename Range, typename Body> -struct Invoker<parallel_tag, empty_partitioner_tag, Range, Body> { - void operator()( const Range& r, const Body& body, empty_partitioner_tag& ) { - tbb:: parallel_for( r, body ); - } -}; - -template <typename Partitioner, typename Range, typename Body> -struct Invoker<parallel_tag, Partitioner, Range, Body> { - void operator()( const Range& r, const Body& body, Partitioner& p ) { - tbb:: parallel_for( r, body, p ); - } -}; - -template <typename Flavor, typename Partitioner, typename T, typename Body> -struct InvokerStep; - -template <typename T, typename Body> -struct InvokerStep<serial_tag, empty_partitioner_tag, T, Body> { - void operator()( const T& first, const T& last, const Body& f, empty_partitioner_tag& ) { - tbb::serial:: parallel_for( first, last, f ); - } - void operator()( const T& first, const T& last, const T& step, const Body& f, empty_partitioner_tag& ) { - tbb::serial:: parallel_for( first, last, step, f ); - } -}; - -template <typename Partitioner, typename T, typename Body> -struct InvokerStep<serial_tag, Partitioner, T, Body> { - void operator()( const T& first, const T& last, const Body& f, Partitioner& p ) { - tbb::serial:: parallel_for( first, last, f, p); - } - void operator()( const T& first, const T& last, const T& step, const Body& f, Partitioner& p ) { - tbb::serial:: parallel_for( first, last, step, f, p ); - } -}; - -template <typename T, typename Body> -struct InvokerStep<parallel_tag, empty_partitioner_tag, T, Body> { - void operator()( const T& first, const T& last, const Body& f, empty_partitioner_tag& ) { - tbb:: parallel_for( first, last, f ); - } - void operator()( const T& first, const T& last, const T& step, const Body& f, empty_partitioner_tag& ) { - tbb:: parallel_for( first, last, step, f ); - } -}; - -template <typename Partitioner, typename T, typename Body> -struct InvokerStep<parallel_tag, Partitioner, T, Body> { - void operator()( const T& first, const T& last, const Body& f, Partitioner& p ) { - tbb:: parallel_for( first, last, f, p ); - } - void operator()( const T& first, const T& last, const T& step, const Body& f, Partitioner& p ) { - tbb:: parallel_for( first, last, step, f, p ); - } -}; - -template<typename Flavor, size_t Pad> -void Flog( int nthread ) { - tbb::tick_count T0 = tbb::tick_count::now(); - for( int i=0; i<N; ++i ) { - for ( int mode = 0; mode < 4; ++mode) { - FooRange<Pad> r( 0, i ); - const FooRange<Pad> rc = r; - FooBody<Pad> f( Array ); - const FooBody<Pad> fc = f; - memset( Array, 0, sizeof(Array) ); - FooBodyCount = 1; - switch (mode) { - case 0: { - empty_partitioner_tag p; - Invoker< Flavor, empty_partitioner_tag, FooRange<Pad>, FooBody<Pad> > invoke_for; - invoke_for( rc, fc, p ); - } - break; - case 1: { - Invoker< Flavor, const tbb::simple_partitioner, FooRange<Pad>, FooBody<Pad> > invoke_for; - invoke_for( rc, fc, tbb::simple_partitioner() ); - } - break; - case 2: { - Invoker< Flavor, const tbb::auto_partitioner, FooRange<Pad>, FooBody<Pad> > invoke_for; - invoke_for( rc, fc, tbb::auto_partitioner() ); - } - break; - case 3: { - static tbb::affinity_partitioner affinity; - Invoker< Flavor, tbb::affinity_partitioner, FooRange<Pad>, FooBody<Pad> > invoke_for; - invoke_for( rc, fc, affinity ); - } - break; - } - for( int j=0; j<i; ++j ) - ASSERT( Array[j]==1, NULL ); - for( int j=i; j<N; ++j ) - ASSERT( Array[j]==0, NULL ); - ASSERT( FooBodyCount==1, NULL ); - } - } - tbb::tick_count T1 = tbb::tick_count::now(); - REMARK("time=%g\tnthread=%d\tpad=%d\n",(T1-T0).seconds(),nthread,int(Pad)); -} - -// Testing parallel_for with step support -const size_t PFOR_BUFFER_TEST_SIZE = 1024; -// test_buffer has some extra items beyond its right bound -const size_t PFOR_BUFFER_ACTUAL_SIZE = PFOR_BUFFER_TEST_SIZE + 1024; -size_t pfor_buffer[PFOR_BUFFER_ACTUAL_SIZE]; - -template<typename T> -class TestFunctor{ -public: - void operator ()(T index) const { - pfor_buffer[index]++; - } -}; - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include <stdexcept> // std::invalid_argument - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -template <typename Flavor, typename T, typename Partitioner> -void TestParallelForWithStepSupportHelper(Partitioner& p) -{ - const T pfor_buffer_test_size = static_cast<T>(PFOR_BUFFER_TEST_SIZE); - const T pfor_buffer_actual_size = static_cast<T>(PFOR_BUFFER_ACTUAL_SIZE); - // Testing parallel_for with different step values - InvokerStep< Flavor, Partitioner, T, TestFunctor<T> > invoke_for; - for (T begin = 0; begin < pfor_buffer_test_size - 1; begin += pfor_buffer_test_size / 10 + 1) { - T step; - for (step = 1; step < pfor_buffer_test_size; step++) { - memset(pfor_buffer, 0, pfor_buffer_actual_size * sizeof(size_t)); - if (step == 1){ - invoke_for(begin, pfor_buffer_test_size, TestFunctor<T>(), p); - } else { - invoke_for(begin, pfor_buffer_test_size, step, TestFunctor<T>(), p); - } - // Verifying that parallel_for processed all items it should - for (T i = begin; i < pfor_buffer_test_size; i = i + step) { - ASSERT(pfor_buffer[i] == 1, "parallel_for didn't process all required elements"); - pfor_buffer[i] = 0; - } - // Verifying that no extra items were processed and right bound of array wasn't crossed - for (T i = 0; i < pfor_buffer_actual_size; i++) { - ASSERT(pfor_buffer[i] == 0, "parallel_for processed an extra element"); - } - } - } -} - -template <typename Flavor, typename T> -void TestParallelForWithStepSupport() -{ - static tbb::affinity_partitioner affinity_p; - tbb::auto_partitioner auto_p; - tbb::simple_partitioner simple_p; - empty_partitioner_tag p; - - // Try out all partitioner combinations - TestParallelForWithStepSupportHelper< Flavor,T,empty_partitioner_tag >(p); - TestParallelForWithStepSupportHelper< Flavor,T,const tbb::auto_partitioner >(auto_p); - TestParallelForWithStepSupportHelper< Flavor,T,const tbb::simple_partitioner >(simple_p); - TestParallelForWithStepSupportHelper< Flavor,T,tbb::affinity_partitioner >(affinity_p); - - // Testing some corner cases - tbb::parallel_for(static_cast<T>(2), static_cast<T>(1), static_cast<T>(1), TestFunctor<T>()); -#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN - try{ - tbb::parallel_for(static_cast<T>(1), static_cast<T>(100), static_cast<T>(0), TestFunctor<T>()); // should cause std::invalid_argument - }catch(std::invalid_argument){ - return; - } - catch ( ... ) { - ASSERT ( __TBB_EXCEPTION_TYPE_INFO_BROKEN, "Unrecognized exception. std::invalid_argument is expected" ); - } -#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN */ -} - -#if __TBB_TASK_GROUP_CONTEXT -// Exception support test -#define HARNESS_EH_SIMPLE_MODE 1 -#include "tbb/tbb_exception.h" -#include "harness_eh.h" - -#if TBB_USE_EXCEPTIONS -class test_functor_with_exception { -public: - void operator ()(size_t) const { ThrowTestException(); } -}; - -void TestExceptionsSupport() { - REMARK (__FUNCTION__); - { // Tests version with a step provided - ResetEhGlobals(); - TRY(); - tbb::parallel_for((size_t)0, (size_t)PFOR_BUFFER_TEST_SIZE, (size_t)1, test_functor_with_exception()); - CATCH_AND_ASSERT(); - } - { // Tests version without a step - ResetEhGlobals(); - TRY(); - tbb::parallel_for((size_t)0, (size_t)PFOR_BUFFER_TEST_SIZE, test_functor_with_exception()); - CATCH_AND_ASSERT(); - } -} -#endif /* TBB_USE_EXCEPTIONS */ - -// Cancellation support test -class functor_to_cancel { -public: - void operator()(size_t) const { - ++g_CurExecuted; - CancellatorTask::WaitUntilReady(); - } -}; - -size_t g_worker_task_step = 0; - -class my_worker_pfor_step_task : public tbb::task -{ - tbb::task_group_context &my_ctx; - - tbb::task* execute () { - if (g_worker_task_step == 0){ - tbb::parallel_for((size_t)0, (size_t)PFOR_BUFFER_TEST_SIZE, functor_to_cancel(), my_ctx); - }else{ - tbb::parallel_for((size_t)0, (size_t)PFOR_BUFFER_TEST_SIZE, g_worker_task_step, functor_to_cancel(), my_ctx); - } - return NULL; - } -public: - my_worker_pfor_step_task ( tbb::task_group_context &context_) : my_ctx(context_) { } -}; - -void TestCancellation() -{ - // tests version without a step - g_worker_task_step = 0; - ResetEhGlobals(); - RunCancellationTest<my_worker_pfor_step_task, CancellatorTask>(); - - // tests version with step - g_worker_task_step = 1; - ResetEhGlobals(); - RunCancellationTest<my_worker_pfor_step_task, CancellatorTask>(); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#include "harness_m128.h" - -#if (HAVE_m128 || HAVE_m256) && !__TBB_SSE_STACK_ALIGNMENT_BROKEN -template<typename ClassWithVectorType> -struct SSE_Functor { - ClassWithVectorType* Src, * Dst; - SSE_Functor( ClassWithVectorType* src, ClassWithVectorType* dst ) : Src(src), Dst(dst) {} - - void operator()( tbb::blocked_range<int>& r ) const { - for( int i=r.begin(); i!=r.end(); ++i ) - Dst[i] = Src[i]; - } -}; - -//! Test that parallel_for works with stack-allocated __m128 -template<typename ClassWithVectorType> -void TestVectorTypes() { - ClassWithVectorType Array1[N], Array2[N]; - for( int i=0; i<N; ++i ) { - // VC8 does not properly align a temporary value; to work around, use explicit variable - ClassWithVectorType foo(i); - Array1[i] = foo; - } - tbb::parallel_for( tbb::blocked_range<int>(0,N), SSE_Functor<ClassWithVectorType>(Array1, Array2) ); - for( int i=0; i<N; ++i ) { - ClassWithVectorType foo(i); - ASSERT( Array2[i]==foo, NULL ) ; - } -} -#endif /* HAVE_m128 || HAVE_m256 */ - -#include <vector> -#include <tbb/blocked_range.h> -#include <sstream> -struct TestSimplePartitionerStabilityFunctor:NoAssign{ - std::vector<int> & ranges; - TestSimplePartitionerStabilityFunctor(std::vector<int> & theRanges):ranges(theRanges){} - void operator()(tbb::blocked_range<size_t>& r)const{ - ranges.at(r.begin())=true; - } -}; -void TestSimplePartitionerStability(){ - const std::size_t repeat_count= 10; - const std::size_t rangeToSplitSize=1000000; - const std::size_t grainsizeStep=rangeToSplitSize/repeat_count; - typedef TestSimplePartitionerStabilityFunctor FunctorType; - - for (std::size_t i=0 , grainsize=grainsizeStep; i<repeat_count;i++, grainsize+=grainsizeStep){ - std::vector<int> firstSeries(rangeToSplitSize,0); - std::vector<int> secondSeries(rangeToSplitSize,0); - - tbb::parallel_for(tbb::blocked_range<size_t>(0,rangeToSplitSize,grainsize),FunctorType(firstSeries),tbb::simple_partitioner()); - tbb::parallel_for(tbb::blocked_range<size_t>(0,rangeToSplitSize,grainsize),FunctorType(secondSeries),tbb::simple_partitioner()); - std::stringstream str; str<<i; - ASSERT(firstSeries==secondSeries,("splitting range with tbb::simple_partitioner must be reproducible; i=" +str.str()).c_str() ); - } -} -#include <cstdio> -#include "tbb/task_scheduler_init.h" -#include "harness_cpu.h" -#include "harness_barrier.h" -#include "test_partitioner.h" - -namespace interaction_with_range_and_partitioner { - -// Test checks compatibility of parallel_for algorithm with various range implementations - -void test() { - using namespace test_partitioner_utils::interaction_with_range_and_partitioner; - - test_partitioner_utils::SimpleBody b; - tbb::affinity_partitioner ap; - - parallel_for(Range1(true, false), b, ap); - parallel_for(Range2(true, false), b, ap); - parallel_for(Range3(true, false), b, ap); - parallel_for(Range4(false, true), b, ap); - parallel_for(Range5(false, true), b, ap); - parallel_for(Range6(false, true), b, ap); - - parallel_for(Range1(false, true), b, tbb::simple_partitioner()); - parallel_for(Range2(false, true), b, tbb::simple_partitioner()); - parallel_for(Range3(false, true), b, tbb::simple_partitioner()); - parallel_for(Range4(false, true), b, tbb::simple_partitioner()); - parallel_for(Range5(false, true), b, tbb::simple_partitioner()); - parallel_for(Range6(false, true), b, tbb::simple_partitioner()); - - parallel_for(Range1(false, true), b, tbb::auto_partitioner()); - parallel_for(Range2(false, true), b, tbb::auto_partitioner()); - parallel_for(Range3(false, true), b, tbb::auto_partitioner()); - parallel_for(Range4(false, true), b, tbb::auto_partitioner()); - parallel_for(Range5(false, true), b, tbb::auto_partitioner()); - parallel_for(Range6(false, true), b, tbb::auto_partitioner()); -} - -} // namespace interaction_with_range_and_partitioner - -namespace uniform_work_distribution { - -/* - * Test checks that initial work distribution is done uniformly - * through affinity mechanism and not through work stealing - */ - -class Body { - Harness::SpinBarrier &m_sb; -public: - Body(Harness::SpinBarrier& sb) : m_sb(sb) { } - Body(Body& b, tbb::split) : m_sb(b.m_sb) { } - Body& operator =(const Body&) { return *this; } - template <typename Range> - void operator()(Range& r) const { - REMARK("Executing range [%lu, %lu)\n", r.begin(), r.end()); - m_sb.timed_wait(10); // waiting for all threads - } -}; - - - -template <typename RangeType> -void test_uniform_work_distribution() { - int thread_num = tbb::task_scheduler_init::default_num_threads(); - Harness::SpinBarrier sb(thread_num); - tbb::affinity_partitioner ap; - tbb::parallel_for(RangeType(0, thread_num), Body(sb), ap); -} - -void test() { - using namespace test_partitioner_utils::TestRanges; - - test_uniform_work_distribution<RoundedDownRange>(); - test_uniform_work_distribution<RoundedUpRange>(); - test_uniform_work_distribution< tbb::blocked_range<size_t> >(); - test_uniform_work_distribution<Range1_2>(); - test_uniform_work_distribution<Range1_999>(); - test_uniform_work_distribution<Range999_1>(); -} - -} // namespace uniform_work_distribution - -int TestMain () { - if( MinThread<1 ) { - REPORT("number of threads must be positive\n"); - exit(1); - } - for( int p=MinThread; p<=MaxThread; ++p ) { - if( p>0 ) { - tbb::task_scheduler_init init( p ); - Flog<parallel_tag,1>(p); - Flog<parallel_tag,10>(p); - Flog<parallel_tag,100>(p); - Flog<parallel_tag,1000>(p); - Flog<parallel_tag,10000>(p); - - // Testing with different integer types - TestParallelForWithStepSupport<parallel_tag,short>(); - TestParallelForWithStepSupport<parallel_tag,unsigned short>(); - TestParallelForWithStepSupport<parallel_tag,int>(); - TestParallelForWithStepSupport<parallel_tag,unsigned int>(); - TestParallelForWithStepSupport<parallel_tag,long>(); - TestParallelForWithStepSupport<parallel_tag,unsigned long>(); - TestParallelForWithStepSupport<parallel_tag,long long>(); - TestParallelForWithStepSupport<parallel_tag,unsigned long long>(); - TestParallelForWithStepSupport<parallel_tag,size_t>(); - - // This is for testing serial implementation. - if( p == MaxThread ) { - Flog<serial_tag,1>(p); - Flog<serial_tag,10>(p); - Flog<serial_tag,100>(p); - TestParallelForWithStepSupport<serial_tag,short>(); - TestParallelForWithStepSupport<serial_tag,unsigned short>(); - TestParallelForWithStepSupport<serial_tag,int>(); - TestParallelForWithStepSupport<serial_tag,unsigned int>(); - TestParallelForWithStepSupport<serial_tag,long>(); - TestParallelForWithStepSupport<serial_tag,unsigned long>(); - TestParallelForWithStepSupport<serial_tag,long long>(); - TestParallelForWithStepSupport<serial_tag,unsigned long long>(); - TestParallelForWithStepSupport<serial_tag,size_t>(); - } - -#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN - TestExceptionsSupport(); -#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN */ -#if __TBB_TASK_GROUP_CONTEXT - if ( p > 1 ) - TestCancellation(); -#endif /* __TBB_TASK_GROUP_CONTEXT */ -#if !__TBB_SSE_STACK_ALIGNMENT_BROKEN - #if HAVE_m128 - TestVectorTypes<ClassWithSSE>(); - #endif - #if HAVE_m256 - if (have_AVX()) TestVectorTypes<ClassWithAVX>(); - #endif -#endif /*!__TBB_SSE_STACK_ALIGNMENT_BROKEN*/ - // Test that all workers sleep when no work - TestCPUUserTime(p); - TestSimplePartitionerStability(); - } - } -#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN - REPORT("Known issue: exception handling tests are skipped.\n"); -#endif -#if (HAVE_m128 || HAVE_m256) && __TBB_SSE_STACK_ALIGNMENT_BROKEN - REPORT("Known issue: stack alignment for SIMD instructions not tested.\n"); -#endif - - uniform_work_distribution::test(); - interaction_with_range_and_partitioner::test(); - return Harness::Done; -} - -#if _MSC_VER -#pragma warning (pop) -#endif diff --git a/src/tbb/src/test/test_parallel_for_each.cpp b/src/tbb/src/test/test_parallel_for_each.cpp deleted file mode 100644 index 4399e3d71..000000000 --- a/src/tbb/src/test/test_parallel_for_each.cpp +++ /dev/null @@ -1,248 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if _MSC_VER && !defined(__INTEL_COMPILER) -#pragma warning(disable: 4180) // "qualifier applied to function type has no meaning; ignored" -#endif - -#include "tbb/parallel_for_each.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/atomic.h" -#include "harness.h" -#include "harness_iterator.h" -#include <list> - -// Some old compilers can't deduce template paremeter type for parallel_for_each -// if the function name is passed without explicit cast to function pointer. -typedef void (*TestFunctionType)(size_t); - -tbb::atomic<size_t> sum; - -// This function is called via parallel_for_each -void TestFunction (size_t value) { - sum += (unsigned int)value; -} - -const size_t NUMBER_OF_ELEMENTS = 1000; - -// Tests tbb::parallel_for_each functionality -template <typename Iterator> -void RunPForEachTests() -{ - size_t test_vector[NUMBER_OF_ELEMENTS + 1]; - - sum = 0; - size_t test_sum = 0; - - for (size_t i =0; i < NUMBER_OF_ELEMENTS; i++) { - test_vector[i] = i; - test_sum += i; - } - test_vector[NUMBER_OF_ELEMENTS] = 1000000; // parallel_for_each shouldn't touch this element - - Iterator begin(&test_vector[0]); - Iterator end(&test_vector[NUMBER_OF_ELEMENTS]); - - tbb::parallel_for_each(begin, end, (TestFunctionType)TestFunction); - ASSERT(sum == test_sum, "Not all items of test vector were processed by parallel_for_each"); - ASSERT(test_vector[NUMBER_OF_ELEMENTS] == 1000000, "parallel_for_each processed an extra element"); -} - -typedef void (*TestMutatorType)(size_t&); - -void TestMutator(size_t& value) { - ASSERT(value==0,NULL); - ++sum; - ++value; -} - -//! Test that tbb::parallel_for_each works for mutable iterators. -template <typename Iterator> -void RunMutablePForEachTests() { - size_t test_vector[NUMBER_OF_ELEMENTS]; - for( size_t i=0; i<NUMBER_OF_ELEMENTS; ++i ) - test_vector[i] = 0; - sum = 0; - tbb::parallel_for_each( Iterator(test_vector), Iterator(test_vector+NUMBER_OF_ELEMENTS), (TestMutatorType)TestMutator ); - ASSERT( sum==NUMBER_OF_ELEMENTS, "parallel_for_each called function wrong number of times" ); - for( size_t i=0; i<NUMBER_OF_ELEMENTS; ++i ) - ASSERT( test_vector[i]==1, "parallel_for_each did not process each element exactly once" ); -} - -#if __TBB_TASK_GROUP_CONTEXT -#define HARNESS_EH_SIMPLE_MODE 1 -#include "tbb/tbb_exception.h" -#include "harness_eh.h" - -#if TBB_USE_EXCEPTIONS -void test_function_with_exception(size_t) { - ThrowTestException(); -} - -template <typename Iterator> -void TestExceptionsSupport() -{ - REMARK (__FUNCTION__); - size_t test_vector[NUMBER_OF_ELEMENTS + 1]; - - for (size_t i = 0; i < NUMBER_OF_ELEMENTS; i++) { - test_vector[i] = i; - } - - Iterator begin(&test_vector[0]); - Iterator end(&test_vector[NUMBER_OF_ELEMENTS]); - - TRY(); - tbb::parallel_for_each(begin, end, (TestFunctionType)test_function_with_exception); - CATCH_AND_ASSERT(); -} -#endif /* TBB_USE_EXCEPTIONS */ - -// Cancelation support test -void function_to_cancel(size_t ) { - ++g_CurExecuted; - CancellatorTask::WaitUntilReady(); -} - -template <typename Iterator> -class my_worker_pforeach_task : public tbb::task -{ - tbb::task_group_context &my_ctx; - - tbb::task* execute () { - size_t test_vector[NUMBER_OF_ELEMENTS + 1]; - for (size_t i = 0; i < NUMBER_OF_ELEMENTS; i++) { - test_vector[i] = i; - } - Iterator begin(&test_vector[0]); - Iterator end(&test_vector[NUMBER_OF_ELEMENTS]); - - tbb::parallel_for_each(begin, end, (TestFunctionType)function_to_cancel); - - return NULL; - } -public: - my_worker_pforeach_task ( tbb::task_group_context &ctx) : my_ctx(ctx) { } -}; - -template <typename Iterator> -void TestCancellation() -{ - REMARK (__FUNCTION__); - ResetEhGlobals(); - RunCancellationTest<my_worker_pforeach_task<Iterator>, CancellatorTask>(); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#include "harness_cpu.h" - -const size_t elements = 10000; -const size_t init_sum = 0; -tbb::atomic<size_t> element_counter; - -template<size_t K> -struct set_to { - void operator()(size_t& x) const { - x = K; - ++element_counter; - } -}; - -#include "test_range_based_for.h" -#include <functional> - -void range_for_each_test() { - using namespace range_based_for_support_tests; - std::list<size_t> v(elements, 0); - - // iterator, const and non-const range check - element_counter = 0; - tbb::parallel_for_each(v.begin(), v.end(), set_to<1>()); - ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set"); - ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == v.size(), "elements of v not all ones"); - - element_counter = 0; - tbb::parallel_for_each(v, set_to<0>()); - ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set"); - ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == init_sum , "elements of v not all zeros"); - - element_counter = 0; - tbb::parallel_for_each(tbb::blocked_range<std::list<size_t>::iterator>(v.begin(), v.end()), set_to<1>()); - ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set"); - ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == v.size(), "elements of v not all ones"); - - // iterator, const and non-const range check with context - element_counter = 0; - tbb::task_group_context context; - tbb::parallel_for_each(v.begin(), v.end(), set_to<0>(), context); - ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set"); - ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == init_sum , "elements of v not all zeros"); - - element_counter = 0; - tbb::parallel_for_each(v, set_to<1>(), context); - ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set"); - ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == v.size(), "elements of v not all ones"); - - element_counter = 0; - tbb::parallel_for_each(tbb::blocked_range<std::list<size_t>::iterator>(v.begin(), v.end()), set_to<0>(), context); - ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set"); - ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == init_sum , "elements of v not all zeros"); -} - -int TestMain () { - if( MinThread<1 ) { - REPORT("number of threads must be positive\n"); - exit(1); - } - for( int p=MinThread; p<=MaxThread; ++p ) { - tbb::task_scheduler_init init( p ); - - RunPForEachTests<Harness::RandomIterator<size_t> >(); - RunPForEachTests<Harness::ConstRandomIterator<size_t> >(); - RunPForEachTests<Harness::InputIterator<size_t> >(); - RunPForEachTests<Harness::ForwardIterator<size_t> >(); - - RunMutablePForEachTests<Harness::RandomIterator<size_t> >(); - RunMutablePForEachTests<Harness::ForwardIterator<size_t> >(); - -#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN - TestExceptionsSupport<Harness::RandomIterator<size_t> >(); - TestExceptionsSupport<Harness::InputIterator<size_t> >(); - TestExceptionsSupport<Harness::ForwardIterator<size_t> >(); -#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN */ - -#if __TBB_TASK_GROUP_CONTEXT - if (p > 1) { - TestCancellation<Harness::RandomIterator<size_t> >(); - TestCancellation<Harness::InputIterator<size_t> >(); - TestCancellation<Harness::ForwardIterator<size_t> >(); - } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - range_for_each_test(); - - // Test that all workers sleep when no work - TestCPUUserTime(p); - } -#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN - REPORT("Known issue: exception handling tests are skipped.\n"); -#endif - return Harness::Done; -} diff --git a/src/tbb/src/test/test_parallel_for_vectorization.cpp b/src/tbb/src/test/test_parallel_for_vectorization.cpp deleted file mode 100644 index ac0994b40..000000000 --- a/src/tbb/src/test/test_parallel_for_vectorization.cpp +++ /dev/null @@ -1,67 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// The test checks if the vectorization happens when PPL-style parallel_for is -// used. The test implements two ideas: -// 1. "pragma always assert" issues a compiler-time error if the vectorization -// cannot be produced; -// 2. "#pragma ivdep" has a peculiarity which also can be used for detection of -// successful vectorization. See the comment below. - -// For now, only Intel(R) C++ Compiler 12.0 and later is supported. Also, no -// sense to run the test in debug mode. -#define HARNESS_SKIP_TEST ( __INTEL_COMPILER < 1200 || TBB_USE_DEBUG ) - -// __TBB_ASSERT_ON_VECTORIZATION_FAILURE enables "pragma always assert" for -// Intel(R) C++ Compiler. -#define __TBB_ASSERT_ON_VECTORIZATION_FAILURE ( !HARNESS_SKIP_TEST ) -#include "tbb/parallel_for.h" -#include "tbb/task_scheduler_init.h" - -#include "harness.h" -#include "harness_assert.h" - -class Body : NoAssign { - int ∑ -public: - Body( int& s ) : sum(s) {} - void operator() ( int i ) const { - sum += i / i; - } -}; - -int TestMain () { - // Should be big enough that the partitioner generated at least a one range - // with a size greater than 1. See the comment below. - const int N = 10000; - int sum = 1; - tbb::task_scheduler_init init(1); - tbb::parallel_for( 1, N, Body(sum) ); - - // The ppl-style parallel_for implementation has pragma ivdep before the - // range loop. This pragma suppresses the dependency about "sum" in "Body". - // Thus the vectorizer should generate code which just add to "sum" only - // one iteration of the range (despite the real number of iterations in the - // range). So "sum" is just number of calls of "Body". And it should be - // less than N if at least one range was greater than 1. - ASSERT( sum < N, "The loop was not vectorized." ); - - return Harness::Done; -} diff --git a/src/tbb/src/test/test_parallel_invoke.cpp b/src/tbb/src/test/test_parallel_invoke.cpp deleted file mode 100644 index 9fc97f68b..000000000 --- a/src/tbb/src/test/test_parallel_invoke.cpp +++ /dev/null @@ -1,322 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if _MSC_VER && !defined(__INTEL_COMPILER) -#pragma warning(disable: 4180) // "qualifier applied to function type has no meaning; ignored" -#endif - -#ifndef TBB_PREVIEW_VARIADIC_PARALLEL_INVOKE - #define TBB_PREVIEW_VARIADIC_PARALLEL_INVOKE __TBB_CPF_BUILD -#endif - -#include "tbb/parallel_invoke.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/atomic.h" -#include "tbb/tbb_exception.h" -#include "harness.h" - -#if !__INTEL_COMPILER && (_MSC_VER && _MSC_VER <= 1400 || __GNUC__==3 && __GNUC_MINOR__<=3 || __SUNPRO_CC) - #define __TBB_FUNCTION_BY_CONSTREF_IN_TEMPLATE_BROKEN 1 -#endif - -static const size_t MAX_NUMBER_OF_PINVOKE_ARGS = 10; -tbb::atomic<size_t> function_counter; - -// Some macros to make the test easier to read - -// 10 functions test0 ... test9 are defined -// pointer to each function is also defined - -#define TEST_FUNCTION(value) void test##value () \ -{ \ - ASSERT(!(function_counter & (1 << value)), "Test function has already been called"); \ - function_counter += 1 << value; \ -} \ -void (*test_pointer##value)(void) = test##value; - -TEST_FUNCTION(0) -TEST_FUNCTION(1) -TEST_FUNCTION(2) -TEST_FUNCTION(3) -TEST_FUNCTION(4) -TEST_FUNCTION(5) -TEST_FUNCTION(6) -TEST_FUNCTION(7) -TEST_FUNCTION(8) -TEST_FUNCTION(9) - -// The same with functors -#define TEST_FUNCTOR(value) class test_functor##value \ -{ \ -public: \ - void operator() () const { \ - function_counter += 1 << value; \ - } \ -} functor##value; - -TEST_FUNCTOR(0) -TEST_FUNCTOR(1) -TEST_FUNCTOR(2) -TEST_FUNCTOR(3) -TEST_FUNCTOR(4) -TEST_FUNCTOR(5) -TEST_FUNCTOR(6) -TEST_FUNCTOR(7) -TEST_FUNCTOR(8) -TEST_FUNCTOR(9) - -#define INIT_TEST function_counter = 0; - -#define VALIDATE_INVOKE_RUN(number_of_args, test_type) \ - ASSERT( (size_t)function_counter == (size_t)(1 << number_of_args) - 1, "parallel_invoke called with " #number_of_args " arguments didn't process all " #test_type); - -// Calls parallel_invoke for different number of arguments -// It can be called with and without user context -template <typename F0, typename F1, typename F2, typename F3, typename F4, typename F5, - typename F6, typename F7, typename F8, typename F9> -void call_parallel_invoke( size_t n, F0& f0, F1& f1, F2& f2, F3& f3, F4 &f4, F5 &f5, - F6& f6, F7 &f7, F8 &f8, F9 &f9, tbb::task_group_context* context) { - switch(n) { - default: - ASSERT(false, "number of arguments must be between 2 and 10"); - case 2: - if (context) - tbb::parallel_invoke (f0, f1, *context); - else - tbb::parallel_invoke (f0, f1); - break; - case 3: - if (context) - tbb::parallel_invoke (f0, f1, f2, *context); - else - tbb::parallel_invoke (f0, f1, f2); - break; - case 4: - if(context) - tbb::parallel_invoke (f0, f1, f2, f3, *context); - else - tbb::parallel_invoke (f0, f1, f2, f3); - break; - case 5: - if(context) - tbb::parallel_invoke (f0, f1, f2, f3, f4, *context); - else - tbb::parallel_invoke (f0, f1, f2, f3, f4); - break; - case 6: - if(context) - tbb::parallel_invoke (f0, f1, f2, f3, f4, f5, *context); - else - tbb::parallel_invoke (f0, f1, f2, f3, f4, f5); - break; - case 7: - if(context) - tbb::parallel_invoke (f0, f1, f2, f3, f4, f5, f6, *context); - else - tbb::parallel_invoke (f0, f1, f2, f3, f4, f5, f6); - break; - case 8: - if(context) - tbb::parallel_invoke (f0, f1, f2, f3, f4, f5, f6, f7, *context); - else - tbb::parallel_invoke (f0, f1, f2, f3, f4, f5, f6, f7); - break; - case 9: - if(context) - tbb::parallel_invoke (f0, f1, f2, f3, f4, f5, f6, f7, f8, *context); - else - tbb::parallel_invoke (f0, f1, f2, f3, f4, f5, f6, f7, f8); - break; - case 10: - if(context) - tbb::parallel_invoke (f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, *context); - else - tbb::parallel_invoke (f0, f1, f2, f3, f4, f5, f6, f7, f8, f9); - break; - } -} - -#if !__TBB_FUNCTION_BY_CONSTREF_IN_TEMPLATE_BROKEN -template<typename function> void aux_invoke(const function& f) { - f(); -} - -bool function_by_constref_in_template_codegen_broken() { - function_counter = 0; - aux_invoke(test1); - return function_counter==0; -} -#endif /* !__TBB_FUNCTION_BY_CONSTREF_IN_TEMPLATE_BROKEN */ - -void test_parallel_invoke() -{ - REMARK (__FUNCTION__); - // Testing with pointers to functions - for (int n = 2; n <=10; n++) - { - INIT_TEST; - call_parallel_invoke(n, test_pointer0, test_pointer1, test_pointer2, test_pointer3, test_pointer4, - test_pointer5, test_pointer6, test_pointer7, test_pointer8, test_pointer9, NULL); - VALIDATE_INVOKE_RUN(n, "pointers to function"); - } - - // Testing parallel_invoke with functors - for (int n = 2; n <=10; n++) - { - INIT_TEST; - call_parallel_invoke(n, functor0, functor1, functor2, functor3, functor4, - functor5, functor6, functor7, functor8, functor9, NULL); - VALIDATE_INVOKE_RUN(n, "functors"); - } - -#if __TBB_FUNCTION_BY_CONSTREF_IN_TEMPLATE_BROKEN - // some old compilers can't cope with passing function name into parallel_invoke -#else - // and some compile but generate broken code that does not call the function - if (function_by_constref_in_template_codegen_broken()) - return; - - // Testing parallel_invoke with functions - for (int n = 2; n <=10; n++) - { - INIT_TEST; - call_parallel_invoke(n, test0, test1, test2, test3, test4, test5, test6, test7, test8, test9, NULL); - VALIDATE_INVOKE_RUN(n, "functions"); - } -#endif -} - -// Exception handling support test - -#if __TBB_TASK_GROUP_CONTEXT -#define HARNESS_EH_SIMPLE_MODE 1 -#include "harness_eh.h" - -#if TBB_USE_EXCEPTIONS -volatile size_t exception_mask; // each bit represents whether the function should throw exception or not - -// throws exception if corresponding exception_mask bit is set -#define TEST_FUNCTOR_WITH_THROW(value) \ -struct throwing_functor##value { \ - void operator() () const { \ - if (exception_mask & (1 << value)) \ - ThrowTestException(); \ - } \ -} test_with_throw##value; - -TEST_FUNCTOR_WITH_THROW(0) -TEST_FUNCTOR_WITH_THROW(1) -TEST_FUNCTOR_WITH_THROW(2) -TEST_FUNCTOR_WITH_THROW(3) -TEST_FUNCTOR_WITH_THROW(4) -TEST_FUNCTOR_WITH_THROW(5) -TEST_FUNCTOR_WITH_THROW(6) -TEST_FUNCTOR_WITH_THROW(7) -TEST_FUNCTOR_WITH_THROW(8) -TEST_FUNCTOR_WITH_THROW(9) - -void TestExceptionHandling() -{ - REMARK (__FUNCTION__); - for( size_t n = 2; n <= 10; ++n ) { - for( exception_mask = 1; exception_mask < (size_t) (1 << n); ++exception_mask ) { - ResetEhGlobals(); - TRY(); - REMARK("Calling parallel_invoke, number of functions = %d, exception_mask = %d\n", n, exception_mask); - call_parallel_invoke(n, test_with_throw0, test_with_throw1, test_with_throw2, test_with_throw3, - test_with_throw4, test_with_throw5, test_with_throw6, test_with_throw7, test_with_throw8, test_with_throw9, NULL); - CATCH_AND_ASSERT(); - } - } -} -#endif /* TBB_USE_EXCEPTIONS */ - -// Cancelation support test -void function_to_cancel() { - ++g_CurExecuted; - CancellatorTask::WaitUntilReady(); -} - -// The function is used to test cancellation -void simple_test_nothrow (){ - ++g_CurExecuted; -} - -size_t g_numFunctions, - g_functionToCancel; - -class ParInvokeLauncherTask : public tbb::task -{ - tbb::task_group_context &my_ctx; - void(*func_array[10])(void); - - tbb::task* execute () { - func_array[g_functionToCancel] = &function_to_cancel; - call_parallel_invoke(g_numFunctions, func_array[0], func_array[1], func_array[2], func_array[3], - func_array[4], func_array[5], func_array[6], func_array[7], func_array[8], func_array[9], &my_ctx); - return NULL; - } -public: - ParInvokeLauncherTask ( tbb::task_group_context& ctx ) : my_ctx(ctx) { - for (int i = 0; i <=9; ++i) - func_array[i] = &simple_test_nothrow; - } -}; - -void TestCancellation () -{ - REMARK (__FUNCTION__); - for ( int n = 2; n <= 10; ++n ) { - for ( int m = 0; m <= n - 1; ++m ) { - g_numFunctions = n; - g_functionToCancel = m; - ResetEhGlobals(); - RunCancellationTest<ParInvokeLauncherTask, CancellatorTask>(); - } - } -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -//------------------------------------------------------------------------ -// Entry point -//------------------------------------------------------------------------ - -#include "harness_cpu.h" - -int TestMain () { - MinThread = min(MinThread, MaxThread); - ASSERT (MinThread>=1, "Minimal number of threads must be 1 or more"); - for ( int p = MinThread; p <= MaxThread; ++p ) { - tbb::task_scheduler_init init(p); - test_parallel_invoke(); - if (p > 1) { -#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN - REPORT("Known issue: exception handling tests are skipped.\n"); -#elif TBB_USE_EXCEPTIONS - TestExceptionHandling(); -#endif /* TBB_USE_EXCEPTIONS */ -#if __TBB_TASK_GROUP_CONTEXT - TestCancellation(); -#endif /* __TBB_TASK_GROUP_CONTEXT */ - } - TestCPUUserTime(p); - } - return Harness::Done; -} diff --git a/src/tbb/src/test/test_parallel_pipeline.cpp b/src/tbb/src/test/test_parallel_pipeline.cpp deleted file mode 100644 index 511686128..000000000 --- a/src/tbb/src/test/test_parallel_pipeline.cpp +++ /dev/null @@ -1,708 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Before including pipeline.h, set up the variable to count heap allocated -// filter_node objects, and make it known for the header. -int filter_node_count = 0; -#define __TBB_TEST_FILTER_NODE_COUNT filter_node_count -#include "tbb/pipeline.h" - -#include "tbb/atomic.h" -#include "harness.h" -#include <string.h> - -#include "tbb/tbb_allocator.h" -#include "tbb/spin_mutex.h" - -const unsigned n_tokens = 8; -// we can conceivably have two buffers used in the middle filter for every token in flight, so -// we must allocate two buffers for every token. Unlikely, but possible. -const unsigned n_buffers = 2*n_tokens; -const unsigned max_counter = 16; -static tbb::atomic<int> output_counter; -static tbb::atomic<int> input_counter; -static tbb::atomic<int> check_type_counter; -static tbb::atomic<int> non_pointer_specialized_calls; -static tbb::atomic<int> pointer_specialized_calls; -static tbb::atomic<int> first_pointer_specialized_calls; -static tbb::atomic<int> second_pointer_specialized_calls; -static tbb::spin_mutex buffer_mutex; - -static int intbuffer[max_counter]; // store results for <int,int> parallel pipeline test -static bool check_intbuffer; - -static void* buffers[n_buffers]; -static bool buf_available[n_buffers]; - -void *fetchNextBuffer() { - tbb::spin_mutex::scoped_lock sl1(buffer_mutex); - for(size_t icnt = 0; icnt < n_buffers; ++icnt) { - if(buf_available[icnt]) { - buf_available[icnt] = false; - return buffers[icnt]; - } - } - ASSERT(0, "Ran out of buffers"); - return 0; -} -void freeBuffer(void *buf) { - for(size_t i=0; i < n_buffers;++i) { - if(buffers[i] == buf) { - buf_available[i] = true; - return; - } - } - ASSERT(0, "Tried to free a buffer not in our list"); -} - -template<typename T> -class free_on_scope_exit { -public: - free_on_scope_exit(T *p) : my_p(p) {} - ~free_on_scope_exit() { if(!my_p) return; my_p->~T(); freeBuffer(my_p); } -private: - T *my_p; -}; - -template<class Counter> -class check_type : Harness::NoAfterlife { - Counter id; - bool am_ready; -public: - check_type( ) : id(0), am_ready(false) { - ++check_type_counter; - } - - check_type(const check_type& other) : Harness::NoAfterlife(other) { - other.AssertLive(); - AssertLive(); - id = other.id; - am_ready = other.am_ready; - ++check_type_counter; - } - - ~check_type() { - AssertLive(); - --check_type_counter; - ASSERT(check_type_counter >= 0, "too many destructions"); - } - - unsigned int my_id() { AssertLive(); return id; } - bool is_ready() { AssertLive(); return am_ready; } - void function() { - AssertLive(); - if( id == 0 ) { - id = 1; - am_ready = true; - } - } -}; - -// methods for testing check_type< >, that return okay values for other types. -template<typename T> -bool middle_is_ready(T &/*p*/) { return false; } - -template<typename U> -bool middle_is_ready(check_type<U> &p) { return p.is_ready(); } - -template<typename T> -bool output_is_ready(T &/*p*/) { return true; } - -template<typename U> -bool output_is_ready(check_type<U> &p) { return p.is_ready(); } - -template<typename T> -int middle_my_id( T &/*p*/) { return 0; } - -template<typename U> -int middle_my_id(check_type<U> &p) { return p.my_id(); } - -template<typename T> -int output_my_id( T &/*p*/) { return 1; } - -template<typename U> -int output_my_id(check_type<U> &p) { return p.my_id(); } - -template<typename T> -void my_function(T &p) { p = 0; } - -template<typename U> -void my_function(check_type<U> &p) { p.function(); } - -// Filters must be copy-constructible, and be const-qualifiable. -template<typename U> -class input_filter : Harness::NoAfterlife { -public: - U operator()( tbb::flow_control& control ) const { - AssertLive(); - if( --input_counter < 0 ) { - control.stop(); - } - else // only count successful reads - ++non_pointer_specialized_calls; - return U(); // default constructed - } - -}; - -// specialization for pointer -template<typename U> -class input_filter<U*> : Harness::NoAfterlife { -public: - U* operator()(tbb::flow_control& control) const { - AssertLive(); - int ival = --input_counter; - if(ival < 0) { - control.stop(); - return NULL; - } - ++pointer_specialized_calls; - if(ival == max_counter / 2) { - return NULL; // non-stop NULL - } - U* myReturn = new(fetchNextBuffer()) U(); - return myReturn; - } -}; - -template<> -class input_filter<void> : Harness::NoAfterlife { -public: - void operator()( tbb::flow_control& control ) const { - AssertLive(); - if( --input_counter < 0 ) { - control.stop(); - } - else - ++non_pointer_specialized_calls; - } - -}; - -// specialization for int that passes back a sequence of integers -template<> -class input_filter<int> : Harness::NoAfterlife { -public: - int - operator()(tbb::flow_control& control ) const { - AssertLive(); - int oldval = --input_counter; - if( oldval < 0 ) { - control.stop(); - } - else - ++non_pointer_specialized_calls; - return oldval+1; - } -}; - -template<typename T, typename U> -class middle_filter : Harness::NoAfterlife { -public: - U operator()(T t) const { - AssertLive(); - ASSERT(!middle_my_id(t), "bad id value"); - ASSERT(!middle_is_ready(t), "Already ready" ); - U out; - my_function(out); - ++non_pointer_specialized_calls; - return out; - } -}; - -template<typename T, typename U> -class middle_filter<T*,U> : Harness::NoAfterlife { -public: - U operator()(T* my_storage) const { - free_on_scope_exit<T> my_ptr(my_storage); // free_on_scope_exit marks the buffer available - AssertLive(); - if(my_storage) { // may have been passed in a NULL - ASSERT(!middle_my_id(*my_storage), "bad id value"); - ASSERT(!middle_is_ready(*my_storage), "Already ready" ); - } - ++first_pointer_specialized_calls; - U out; - my_function(out); - return out; - } -}; - -template<typename T, typename U> -class middle_filter<T,U*> : Harness::NoAfterlife { -public: - U* operator()(T my_storage) const { - AssertLive(); - ASSERT(!middle_my_id(my_storage), "bad id value"); - ASSERT(!middle_is_ready(my_storage), "Already ready" ); - // allocate new space from buffers - U* my_return = new(fetchNextBuffer()) U(); - my_function(*my_return); - ++second_pointer_specialized_calls; - return my_return; - } -}; - -template<typename T, typename U> -class middle_filter<T*,U*> : Harness::NoAfterlife { -public: - U* operator()(T* my_storage) const { - free_on_scope_exit<T> my_ptr(my_storage); // free_on_scope_exit marks the buffer available - AssertLive(); - if(my_storage) { - ASSERT(!middle_my_id(*my_storage), "bad id value"); - ASSERT(!middle_is_ready(*my_storage), "Already ready" ); - } - // may have been passed a NULL - ++pointer_specialized_calls; - if(!my_storage) return NULL; - ASSERT(!middle_my_id(*my_storage), "bad id value"); - ASSERT(!middle_is_ready(*my_storage), "Already ready" ); - U* my_return = new(fetchNextBuffer()) U(); - my_function(*my_return); - return my_return; - } -}; - -// specialization for int that squares the input and returns that. -template<> -class middle_filter<int,int> : Harness::NoAfterlife { -public: - int operator()(int my_input) const { - AssertLive(); - ++non_pointer_specialized_calls; - return my_input*my_input; - } -}; - -// --------------------------------- -template<typename T> -class output_filter : Harness::NoAfterlife { -public: - void operator()(T c) const { - AssertLive(); - ASSERT(output_my_id(c), "unset id value"); - ASSERT(output_is_ready(c), "not yet ready"); - ++non_pointer_specialized_calls; - output_counter++; - } -}; - -// specialization for int that puts the received value in an array -template<> -class output_filter<int> : Harness::NoAfterlife { -public: - void operator()(int my_input) const { - AssertLive(); - ++non_pointer_specialized_calls; - int myindx = output_counter++; - intbuffer[myindx] = my_input; - } -}; - - -template<typename T> -class output_filter<T*> : Harness::NoAfterlife { -public: - void operator()(T* c) const { - free_on_scope_exit<T> my_ptr(c); - AssertLive(); - if(c) { - ASSERT(output_my_id(*c), "unset id value"); - ASSERT(output_is_ready(*c), "not yet ready"); - } - output_counter++; - ++pointer_specialized_calls; - } -}; - -typedef enum { - no_pointer_counts, - assert_nonpointer, - assert_firstpointer, - assert_secondpointer, - assert_allpointer -} final_assert_type; - -void resetCounters() { - output_counter = 0; - input_counter = max_counter; - non_pointer_specialized_calls = 0; - pointer_specialized_calls = 0; - first_pointer_specialized_calls = 0; - second_pointer_specialized_calls = 0; - // we have to reset the buffer flags because our input filters return allocated space on end-of-input, - // (on eof a default-constructed object is returned) and they do not pass through the filter further. - for(size_t i = 0; i < n_buffers; ++i) - buf_available[i] = true; -} - -void checkCounters(final_assert_type my_t) { - ASSERT(output_counter == max_counter, "not all tokens were passed through pipeline"); - switch(my_t) { - case assert_nonpointer: - ASSERT(pointer_specialized_calls+first_pointer_specialized_calls+second_pointer_specialized_calls == 0, "non-pointer filters specialized to pointer"); - ASSERT(non_pointer_specialized_calls == 3*max_counter, "bad count for non-pointer filters"); - if(check_intbuffer) { - for(int i = 1; i <= (int)max_counter; ++i) { - int j = i*i; - bool found_val = false; - for(int k = 0; k < (int)max_counter; ++k) { - if(intbuffer[k] == j) { - found_val = true; - break; - } - } - ASSERT(found_val, "Missing value in output array" ); - } - } - break; - case assert_firstpointer: - ASSERT(pointer_specialized_calls == max_counter && // input filter extra invocation - first_pointer_specialized_calls == max_counter && - non_pointer_specialized_calls == max_counter && - second_pointer_specialized_calls == 0, "incorrect specialization for firstpointer"); - break; - case assert_secondpointer: - ASSERT(pointer_specialized_calls == max_counter && - first_pointer_specialized_calls == 0 && - non_pointer_specialized_calls == max_counter && // input filter - second_pointer_specialized_calls == max_counter, "incorrect specialization for firstpointer"); - break; - case assert_allpointer: - ASSERT(non_pointer_specialized_calls+first_pointer_specialized_calls+second_pointer_specialized_calls == 0, "pointer filters specialized to non-pointer"); - ASSERT(pointer_specialized_calls == 3*max_counter, "bad count for pointer filters"); - break; - case no_pointer_counts: - break; - } - if(check_type_counter > 0) { - REMARK("check_type_counter == %lu\n", (unsigned long)check_type_counter); - } - ASSERT(!check_type_counter, "Error in check_type creation/destruction"); -} - -static const tbb::filter::mode filter_table[] = { tbb::filter::parallel, tbb::filter::serial_in_order, tbb::filter::serial_out_of_order}; -const unsigned number_of_filter_types = sizeof(filter_table)/sizeof(filter_table[0]); - -typedef tbb::filter_t<void, void> filter_chain; -typedef tbb::filter::mode mode_array; - -// The filters are passed by value, which forces a temporary copy to be created. This is -// to reproduce the bug where a filter_chain uses refs to filters, which after a call -// would be references to destructed temporaries. -template<typename type1, typename type2> -void fill_chain( filter_chain &my_chain, mode_array *filter_type, input_filter<type1> i_filter, - middle_filter<type1, type2> m_filter, output_filter<type2> o_filter ) { - my_chain = tbb::make_filter<void, type1>(filter_type[0], i_filter) & - tbb::make_filter<type1, type2>(filter_type[1], m_filter) & - tbb::make_filter<type2, void>(filter_type[2], o_filter); -} - -void run_function_spec() { - ASSERT(!filter_node_count, NULL); - REMARK("Testing < void, void > (single filter in pipeline)"); -#if __TBB_LAMBDAS_PRESENT - REMARK( " ( + lambdas)"); -#endif - REMARK("\n"); - input_filter<void> i_filter; - // Test pipeline that contains only one filter - for( unsigned i = 0; i<number_of_filter_types; i++) { - tbb::filter_t<void, void> one_filter( filter_table[i], i_filter ); - ASSERT(filter_node_count==1, "some filter nodes left after previous iteration?"); - resetCounters(); - tbb::parallel_pipeline( n_tokens, one_filter ); - // no need to check counters -#if __TBB_LAMBDAS_PRESENT - tbb::atomic<int> counter; - counter = max_counter; - // Construct filter using lambda-syntax when parallel_pipeline() is being run; - tbb::parallel_pipeline( n_tokens, - tbb::make_filter<void, void>(filter_table[i], [&counter]( tbb::flow_control& control ) { - if( counter-- == 0 ) - control.stop(); - } - ) - ); -#endif - } - ASSERT(!filter_node_count, "filter_node objects leaked"); -} - -template<typename t1, typename t2> -void run_filter_set( - input_filter<t1>& i_filter, - middle_filter<t1,t2>& m_filter, - output_filter<t2>& o_filter, - mode_array *filter_type, - final_assert_type my_t) { - tbb::filter_t<void, t1> filter1( filter_type[0], i_filter ); - tbb::filter_t<t1, t2> filter2( filter_type[1], m_filter ); - tbb::filter_t<t2, void> filter3( filter_type[2], o_filter ); - ASSERT(filter_node_count==3, "some filter nodes left after previous iteration?"); - resetCounters(); - // Create filters sequence when parallel_pipeline() is being run - tbb::parallel_pipeline( n_tokens, filter1 & filter2 & filter3 ); - checkCounters(my_t); - - // Create filters sequence partially outside parallel_pipeline() and also when parallel_pipeline() is being run - tbb::filter_t<void, t2> filter12; - filter12 = filter1 & filter2; - resetCounters(); - tbb::parallel_pipeline( n_tokens, filter12 & filter3 ); - checkCounters(my_t); - - tbb::filter_t<void, void> filter123 = filter12 & filter3; - // Run pipeline twice with the same filter sequence - for( unsigned i = 0; i<2; i++ ) { - resetCounters(); - tbb::parallel_pipeline( n_tokens, filter123 ); - checkCounters(my_t); - } - - // Now copy-construct another filter_t instance, and use it to run pipeline - { - tbb::filter_t<void, void> copy123( filter123 ); - resetCounters(); - tbb::parallel_pipeline( n_tokens, copy123 ); - checkCounters(my_t); - } - - // Construct filters and create the sequence when parallel_pipeline() is being run - resetCounters(); - tbb::parallel_pipeline( n_tokens, - tbb::make_filter<void, t1>(filter_type[0], i_filter) & - tbb::make_filter<t1, t2>(filter_type[1], m_filter) & - tbb::make_filter<t2, void>(filter_type[2], o_filter) ); - checkCounters(my_t); - - // Construct filters, make a copy, destroy the original filters, and run with the copy - int cnt = filter_node_count; - { - tbb::filter_t<void, void>* p123 = new tbb::filter_t<void,void> ( - tbb::make_filter<void, t1>(filter_type[0], i_filter) & - tbb::make_filter<t1, t2>(filter_type[1], m_filter) & - tbb::make_filter<t2, void>(filter_type[2], o_filter) ); - ASSERT(filter_node_count==cnt+5, "filter node accounting error?"); - tbb::filter_t<void, void> copy123( *p123 ); - delete p123; - ASSERT(filter_node_count==cnt+5, "filter nodes deleted prematurely?"); - resetCounters(); - tbb::parallel_pipeline( n_tokens, copy123 ); - checkCounters(my_t); - } - - // construct a filter with temporaries - { - tbb::filter_t<void, void> my_filter; - fill_chain<t1,t2>( my_filter, filter_type, i_filter, m_filter, o_filter ); - resetCounters(); - tbb::parallel_pipeline( n_tokens, my_filter ); - checkCounters(my_t); - } - ASSERT(filter_node_count==cnt, "scope ended but filter nodes not deleted?"); -} - -#if __TBB_LAMBDAS_PRESENT -template <typename t1, typename t2> -void run_lambdas_test( mode_array *filter_type ) { - tbb::atomic<int> counter; - counter = max_counter; - // Construct filters using lambda-syntax and create the sequence when parallel_pipeline() is being run; - resetCounters(); // only need the output_counter reset. - tbb::parallel_pipeline( n_tokens, - tbb::make_filter<void, t1>(filter_type[0], [&counter]( tbb::flow_control& control ) -> t1 { - if( --counter < 0 ) - control.stop(); - return t1(); } - ) & - tbb::make_filter<t1, t2>(filter_type[1], []( t1 /*my_storage*/ ) -> t2 { - return t2(); } - ) & - tbb::make_filter<t2, void>(filter_type[2], [] ( t2 ) -> void { - output_counter++; } - ) - ); - checkCounters(no_pointer_counts); // don't have to worry about specializations - counter = max_counter; - // pointer filters - resetCounters(); - tbb::parallel_pipeline( n_tokens, - tbb::make_filter<void, t1*>(filter_type[0], [&counter]( tbb::flow_control& control ) -> t1* { - if( --counter < 0 ) { - control.stop(); - return NULL; - } - return new(fetchNextBuffer()) t1(); } - ) & - tbb::make_filter<t1*, t2*>(filter_type[1], []( t1* my_storage ) -> t2* { - tbb::tbb_allocator<t1>().destroy(my_storage); // my_storage->~t1(); - return new(my_storage) t2(); } - ) & - tbb::make_filter<t2*, void>(filter_type[2], [] ( t2* my_storage ) -> void { - tbb::tbb_allocator<t2>().destroy(my_storage); // my_storage->~t2(); - freeBuffer(my_storage); - output_counter++; } - ) - ); - checkCounters(no_pointer_counts); - // first filter outputs pointer - counter = max_counter; - resetCounters(); - tbb::parallel_pipeline( n_tokens, - tbb::make_filter<void, t1*>(filter_type[0], [&counter]( tbb::flow_control& control ) -> t1* { - if( --counter < 0 ) { - control.stop(); - return NULL; - } - return new(fetchNextBuffer()) t1(); } - ) & - tbb::make_filter<t1*, t2>(filter_type[1], []( t1* my_storage ) -> t2 { - tbb::tbb_allocator<t1>().destroy(my_storage); // my_storage->~t1(); - freeBuffer(my_storage); - return t2(); } - ) & - tbb::make_filter<t2, void>(filter_type[2], [] ( t2 /*my_storage*/) -> void { - output_counter++; } - ) - ); - checkCounters(no_pointer_counts); - // second filter outputs pointer - counter = max_counter; - resetCounters(); - tbb::parallel_pipeline( n_tokens, - tbb::make_filter<void, t1>(filter_type[0], [&counter]( tbb::flow_control& control ) -> t1 { - if( --counter < 0 ) { - control.stop(); - } - return t1(); } - ) & - tbb::make_filter<t1, t2*>(filter_type[1], []( t1 /*my_storage*/ ) -> t2* { - return new(fetchNextBuffer()) t2(); } - ) & - tbb::make_filter<t2*, void>(filter_type[2], [] ( t2* my_storage) -> void { - tbb::tbb_allocator<t2>().destroy(my_storage); // my_storage->~t2(); - freeBuffer(my_storage); - output_counter++; } - ) - ); - checkCounters(no_pointer_counts); -} -#endif - -template<typename type1, typename type2> -void run_function(const char *l1, const char *l2) { - ASSERT(!filter_node_count, NULL); - REMARK("Testing < %s, %s >", l1, l2 ); -#if __TBB_LAMBDAS_PRESENT - REMARK( " ( + lambdas)"); -#endif - check_intbuffer = (!strcmp(l1,"int") && !strcmp(l2,"int")); - if(check_intbuffer) REMARK(", check output of filters"); - REMARK("\n"); - - const size_t number_of_filters = 3; - - input_filter<type1> i_filter; - input_filter<type1*> p_i_filter; - - middle_filter<type1, type2> m_filter; - middle_filter<type1*, type2> pr_m_filter; - middle_filter<type1, type2*> rp_m_filter; - middle_filter<type1*, type2*> pp_m_filter; - - output_filter<type2> o_filter; - output_filter<type2*> p_o_filter; - - // allocate the buffers for the filters - unsigned max_size = (sizeof(type1) > sizeof(type2) ) ? sizeof(type1) : sizeof(type2); - for(unsigned i = 0; i < (unsigned)n_buffers; ++i) { - buffers[i] = malloc(max_size); - buf_available[i] = true; - } - - unsigned limit = 1; - // Test pipeline that contains number_of_filters filters - for( unsigned i=0; i<number_of_filters; ++i) - limit *= number_of_filter_types; - // Iterate over possible filter sequences - for( unsigned numeral=0; numeral<limit; ++numeral ) { - unsigned temp = numeral; - tbb::filter::mode filter_type[number_of_filter_types]; - for( unsigned i=0; i<number_of_filters; ++i, temp/=number_of_filter_types ) - filter_type[i] = filter_table[temp%number_of_filter_types]; - - run_filter_set<type1,type2>(i_filter, m_filter, o_filter, filter_type, assert_nonpointer ); - run_filter_set<type1*,type2>(p_i_filter, pr_m_filter, o_filter, filter_type, assert_firstpointer); - run_filter_set<type1,type2*>(i_filter, rp_m_filter, p_o_filter, filter_type, assert_secondpointer); - run_filter_set<type1*,type2*>(p_i_filter, pp_m_filter, p_o_filter, filter_type, assert_allpointer); - -#if __TBB_LAMBDAS_PRESENT - run_lambdas_test<type1,type2>(filter_type); -#endif - } - ASSERT(!filter_node_count, "filter_node objects leaked"); - - for(unsigned i = 0; i < (unsigned)n_buffers; ++i) { - free(buffers[i]); - } -} - -#include "tbb/task_scheduler_init.h" - -int TestMain() { -#if TBB_USE_DEBUG - // size and copyability. - REMARK("is_large_object<int>::value=%d\n", tbb::interface6::internal::is_large_object<int>::value); - REMARK("is_large_object<double>::value=%d\n", tbb::interface6::internal::is_large_object<double>::value); - REMARK("is_large_object<int *>::value=%d\n", tbb::interface6::internal::is_large_object<int *>::value); - REMARK("is_large_object<check_type<int> >::value=%d\n", tbb::interface6::internal::is_large_object<check_type<int> >::value); - REMARK("is_large_object<check_type<int>* >::value=%d\n", tbb::interface6::internal::is_large_object<check_type<int>* >::value); - REMARK("is_large_object<check_type<short> >::value=%d\n\n", tbb::interface6::internal::is_large_object<check_type<short> >::value); -#endif - // Test with varying number of threads. - for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) { - // Initialize TBB task scheduler - REMARK("\nTesting with nthread=%d\n", nthread); - tbb::task_scheduler_init init(nthread); - - // Run test several times with different types - run_function_spec(); - run_function<size_t,int>("size_t", "int"); - run_function<int,double>("int", "double"); - run_function<size_t,double>("size_t", "double"); - run_function<size_t,bool>("size_t", "bool"); - run_function<int,int>("int","int"); - check_type_counter = 0; - run_function<check_type<unsigned int>,size_t>("check_type<unsigned int>", "size_t"); - ASSERT(!check_type_counter, "Error in check_type<unsigned int> creation/destruction"); - run_function<check_type<unsigned short>,size_t>("check_type<unsigned short>", "size_t"); - ASSERT(!check_type_counter, "Error in check_type<unsigned short> creation/destruction"); - run_function<check_type<unsigned int>, check_type<unsigned int> >("check_type<unsigned int>", "check_type<unsigned int>"); - run_function<check_type<unsigned int>, check_type<unsigned short> >("check_type<unsigned int>", "check_type<unsigned short>"); - ASSERT(!check_type_counter, "Error in check_type<unsigned int> creation/destruction"); - run_function<check_type<unsigned short>, check_type<unsigned short> >("check_type<unsigned short>", "check_type<unsigned short>"); - ASSERT(!check_type_counter, "Error in check_type<unsigned short> creation/destruction"); - run_function<double, check_type<unsigned short> >("double", "check_type<unsigned short>"); - ASSERT(!check_type_counter, "Error in check_type<unsigned short> creation/destruction"); - } - return Harness::Done; -} - diff --git a/src/tbb/src/test/test_parallel_reduce.cpp b/src/tbb/src/test/test_parallel_reduce.cpp deleted file mode 100644 index 67bd8e762..000000000 --- a/src/tbb/src/test/test_parallel_reduce.cpp +++ /dev/null @@ -1,317 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/parallel_reduce.h" -#include "tbb/atomic.h" -#include "harness_assert.h" - -using namespace std; - -static tbb::atomic<long> ForkCount; -static tbb::atomic<long> FooBodyCount; - -//! Class with public interface that is exactly minimal requirements for Range concept -class MinimalRange { - size_t begin, end; - friend class FooBody; - explicit MinimalRange( size_t i ) : begin(0), end(i) {} - friend void Flog( int nthread, bool inteference ); -public: - MinimalRange( MinimalRange& r, tbb::split ) : end(r.end) { - begin = r.end = (r.begin+r.end)/2; - } - bool is_divisible() const {return end-begin>=2;} - bool empty() const {return begin==end;} -}; - -//! Class with public interface that is exactly minimal requirements for Body of a parallel_reduce -class FooBody { -private: - FooBody( const FooBody& ); // Deny access - void operator=( const FooBody& ); // Deny access - friend void Flog( int nthread, bool interference ); - //! Parent that created this body via split operation. NULL if original body. - FooBody* parent; - //! Total number of index values processed by body and its children. - size_t sum; - //! Number of join operations done so far on this body and its children. - long join_count; - //! Range that has been processed so far by this body and its children. - size_t begin, end; - //! True if body has not yet been processed at least once by operator(). - bool is_new; - //! 1 if body was created by split; 0 if original body. - int forked; - FooBody() {++FooBodyCount;} -public: - ~FooBody() { - forked = 0xDEADBEEF; - sum=0xDEADBEEF; - join_count=0xDEADBEEF; - --FooBodyCount; - } - FooBody( FooBody& other, tbb::split ) { - ++FooBodyCount; - ++ForkCount; - sum = 0; - parent = &other; - join_count = 0; - is_new = true; - forked = 1; - } - void join( FooBody& s ) { - ASSERT( s.forked==1, NULL ); - ASSERT( this!=&s, NULL ); - ASSERT( this==s.parent, NULL ); - ASSERT( end==s.begin, NULL ); - end = s.end; - sum += s.sum; - join_count += s.join_count + 1; - s.forked = 2; - } - void operator()( const MinimalRange& r ) { - for( size_t k=r.begin; k<r.end; ++k ) - ++sum; - if( is_new ) { - is_new = false; - begin = r.begin; - } else - ASSERT( end==r.begin, NULL ); - end = r.end; - } -}; - -#include <cstdio> -#include "harness.h" -#include "tbb/tick_count.h" - -void Flog( int nthread, bool interference=false ) { - for (int mode = 0; mode < 4; mode++) { - tbb::tick_count T0 = tbb::tick_count::now(); - long join_count = 0; - tbb::affinity_partitioner ap; - for( size_t i=0; i<=1000; ++i ) { - FooBody f; - f.sum = 0; - f.parent = NULL; - f.join_count = 0; - f.is_new = true; - f.forked = 0; - f.begin = ~size_t(0); - f.end = ~size_t(0); - ASSERT( FooBodyCount==1, NULL ); - switch (mode) { - case 0: - tbb::parallel_reduce( MinimalRange(i), f ); - break; - case 1: - tbb::parallel_reduce( MinimalRange(i), f, tbb::simple_partitioner() ); - break; - case 2: - tbb::parallel_reduce( MinimalRange(i), f, tbb::auto_partitioner() ); - break; - case 3: - tbb::parallel_reduce( MinimalRange(i), f, ap ); - break; - } - join_count += f.join_count; - ASSERT( FooBodyCount==1, NULL ); - ASSERT( f.sum==i, NULL ); - ASSERT( f.begin==(i==0 ? ~size_t(0) : 0), NULL ); - ASSERT( f.end==(i==0 ? ~size_t(0) : i), NULL ); - } - tbb::tick_count T1 = tbb::tick_count::now(); - REMARK("time=%g join_count=%ld ForkCount=%ld nthread=%d%s\n", - (T1-T0).seconds(),join_count,long(ForkCount), nthread, interference ? " with interference":""); - } -} - -#include "tbb/blocked_range.h" - -#if _MSC_VER - typedef tbb::internal::uint64_t ValueType; -#else - typedef uint64_t ValueType; -#endif - -struct Sum { - template<typename T> - T operator() ( const T& v1, const T& v2 ) const { - return v1 + v2; - } -}; - -struct Accumulator { - ValueType operator() ( const tbb::blocked_range<ValueType*>& r, ValueType value ) const { - for ( ValueType* pv = r.begin(); pv != r.end(); ++pv ) - value += *pv; - return value; - } -}; - -void ParallelSum () { - const ValueType I = 0, - N = 1000000, - R = N * (N + 1) / 2; - ValueType *array = new ValueType[N + 1]; - for ( ValueType i = 0; i < N; ++i ) - array[i] = i + 1; - tbb::blocked_range<ValueType*> range(array, array + N); - ValueType r1 = tbb::parallel_reduce( range, I, Accumulator(), Sum() ); - ASSERT( r1 == R, NULL ); -#if __TBB_LAMBDAS_PRESENT - ValueType r2 = tbb::parallel_reduce( range, I, - [](const tbb::blocked_range<ValueType*>& r, ValueType value) -> ValueType { - for ( ValueType* pv = r.begin(); pv != r.end(); ++pv ) - value += *pv; - return value; - }, - Sum() - ); - ASSERT( r2 == R, NULL ); -#endif /* LAMBDAS */ - delete[] array; -} - -const int N = 1000; - -#include "harness_concurrency_tracker.h" - -template <class Op> -struct ReduceBody { - typename Op::Type my_value; - - ReduceBody() : my_value() {} - ReduceBody( ReduceBody &, tbb::split ) : my_value() {} - - void operator() ( const tbb::blocked_range<int>& r ) { - Harness::ConcurrencyTracker ct; - for ( int i = r.begin(); i != r.end(); ++i ) { - Op op; - my_value = op(my_value, i); - } - } - - void join( const ReduceBody& y ) { - Op op; - my_value = op.join(my_value, y.my_value); - } -}; - -template <class Op> -void TestDeterministicReduction () { - typedef typename Op::Type Type; - const tbb::blocked_range<int> range(0, N); - ReduceBody<Op> body; - tbb::parallel_deterministic_reduce( range,body ); - Type R = body.my_value; - for ( int i=0; i<100; ++i ) { - ReduceBody<Op> body2; - tbb::parallel_deterministic_reduce( range,body2 ); - ASSERT( body2.my_value == R, NULL ); -#if __TBB_LAMBDAS_PRESENT - Type r = tbb::parallel_deterministic_reduce( range, Type(), - [](const tbb::blocked_range<int>& br, Type value) -> Type { - Harness::ConcurrencyTracker ct; - for ( int ii = br.begin(); ii != br.end(); ++ii ) { - Op op; - value = op(value, ii); - } - return value; - }, - [](const Type& v1, const Type& v2) -> Type { - Op op; - return op.join(v1,v2); - } - ); - ASSERT( r == R, NULL ); -#endif /* LAMBDAS */ - } - ASSERT_WARNING((Harness::ConcurrencyTracker::PeakParallelism() > 1), "no parallel execution\n"); -} - -class RotOp { -public: - typedef int Type; - int operator() ( int x, int i ) const { - return ( x<<1 ) ^ i; - } - int join( int x, int y ) const { - return operator()( x, y ); - } -}; - -#include "tbb/task_scheduler_init.h" -#include "harness_cpu.h" -#include "test_partitioner.h" - -namespace interaction_with_range_and_partitioner { - -// Test checks compatibility of parallel_reduce algorithm with various range implementations - -void test() { - using namespace test_partitioner_utils::interaction_with_range_and_partitioner; - - test_partitioner_utils::SimpleReduceBody body; - tbb::affinity_partitioner ap; - - parallel_reduce(Range1(true, false), body, ap); - parallel_reduce(Range2(true, false), body, ap); - parallel_reduce(Range3(true, false), body, ap); - parallel_reduce(Range4(false, true), body, ap); - parallel_reduce(Range5(false, true), body, ap); - parallel_reduce(Range6(false, true), body, ap); - - parallel_reduce(Range1(false, true), body, tbb::simple_partitioner()); - parallel_reduce(Range2(false, true), body, tbb::simple_partitioner()); - parallel_reduce(Range3(false, true), body, tbb::simple_partitioner()); - parallel_reduce(Range4(false, true), body, tbb::simple_partitioner()); - parallel_reduce(Range5(false, true), body, tbb::simple_partitioner()); - parallel_reduce(Range6(false, true), body, tbb::simple_partitioner()); - - parallel_reduce(Range1(false, true), body, tbb::auto_partitioner()); - parallel_reduce(Range2(false, true), body, tbb::auto_partitioner()); - parallel_reduce(Range3(false, true), body, tbb::auto_partitioner()); - parallel_reduce(Range4(false, true), body, tbb::auto_partitioner()); - parallel_reduce(Range5(false, true), body, tbb::auto_partitioner()); - parallel_reduce(Range6(false, true), body, tbb::auto_partitioner()); -} - -} // interaction_with_range_and_partitioner - - -int TestMain () { - if( MinThread<0 ) { - REPORT("Usage: nthread must be positive\n"); - exit(1); - } - for( int p=MinThread; p<=MaxThread; ++p ) { - tbb::task_scheduler_init init( p ); - Flog(p); - ParallelSum(); - if ( p>=2 ) - TestDeterministicReduction<RotOp>(); - // Test that all workers sleep when no work - TestCPUUserTime(p); - } - - interaction_with_range_and_partitioner::test(); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_parallel_scan.cpp b/src/tbb/src/test/test_parallel_scan.cpp deleted file mode 100644 index e56bbc94f..000000000 --- a/src/tbb/src/test/test_parallel_scan.cpp +++ /dev/null @@ -1,262 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/parallel_scan.h" -#include "tbb/blocked_range.h" -#include "harness_assert.h" - -typedef tbb::blocked_range<long> Range; - -static volatile bool ScanIsRunning = false; - -//! Sum of 0..i with wrap around on overflow. -inline int TriangularSum( int i ) { - return i&1 ? ((i>>1)+1)*i : (i>>1)*(i+1); -} - -//! Verify that sum is sum of integers in closed interval [start_index..finish_index]. -/** line should be the source line of the caller */ -static void VerifySum( long start_index, long finish_index, int sum, int line ); - -const int MAXN = 2000; - -enum AddendFlag { - UNUSED=0, - USED_NONFINAL=1, - USED_FINAL=2 -}; - -//! Array recording how each addend was used. -/** 'unsigned char' instead of AddendFlag for sake of compactness. */ -static unsigned char AddendHistory[MAXN]; - -//! Set to 1 for debugging output -#define PRINT_DEBUG 0 - -#include "tbb/atomic.h" -#if PRINT_DEBUG -#include <stdio.h> -tbb::atomic<long> NextBodyId; -#endif /* PRINT_DEBUG */ - -struct BodyId { -#if PRINT_DEBUG - const int id; - BodyId() : id(NextBodyId++) {} -#endif /* PRINT_DEBUG */ -}; - -tbb::atomic<long> NumberOfLiveAccumulator; - -static void Snooze( bool scan_should_be_running ) { - ASSERT( ScanIsRunning==scan_should_be_running, NULL ); -} - -template<typename T> -class Accumulator: BodyId { - T my_total; - const T* my_array; - T* my_sum; - Range my_range; - //! Equals this while object is fully constructed, NULL otherwise. - /** Used to detect premature destruction and accidental bitwise copy. */ - Accumulator* self; - Accumulator( const T array[], T sum[] ) : - my_total(), my_array(array), my_sum(sum), my_range(-1,-1,1) - { - ++NumberOfLiveAccumulator; - // Set self as last action of constructor, to indicate that object is fully constructed. - self = this; - } - friend void TestAccumulator( int mode, int nthread ); -public: -#if PRINT_DEBUG - void print() const { - REPORT("%d [%ld..%ld)\n", id,my_range.begin(),my_range.end() ); - } -#endif /* PRINT_DEBUG */ - ~Accumulator() { -#if PRINT_DEBUG - REPORT("%d [%ld..%ld) destroyed\n",id,my_range.begin(),my_range.end() ); -#endif /* PRINT_DEBUG */ - // Clear self as first action of destructor, to indicate that object is not fully constructed. - self = 0; - --NumberOfLiveAccumulator; - } - Accumulator( Accumulator& a, tbb::split ) : - my_total(0), my_array(a.my_array), my_sum(a.my_sum), my_range(-1,-1,1) - { - ++NumberOfLiveAccumulator; -#if PRINT_DEBUG - REPORT("%d forked from %d\n",id,a.id); -#endif /* PRINT_DEBUG */ - Snooze(true); - // Set self as last action of constructor, to indicate that object is fully constructed. - self = this; - } - template<typename Tag> - void operator()( const Range& r, Tag /*tag*/ ) { - Snooze(true); -#if PRINT_DEBUG - if( my_range.empty() ) - REPORT("%d computing %s [%ld..%ld)\n",id,Tag::is_final_scan()?"final":"lookahead",r.begin(),r.end() ); - else - REPORT("%d computing %s [%ld..%ld) [%ld..%ld)\n",id,Tag::is_final_scan()?"final":"lookahead",my_range.begin(),my_range.end(),r.begin(),r.end()); -#endif /* PRINT_DEBUG */ - ASSERT( !Tag::is_final_scan() || (my_range.begin()==0 && my_range.end()==r.begin()) || (my_range.empty() && r.begin()==0), NULL ); - for( long i=r.begin(); i<r.end(); ++i ) { - my_total += my_array[i]; - if( Tag::is_final_scan() ) { - ASSERT( AddendHistory[i]<USED_FINAL, "addend used 'finally' twice?" ); - AddendHistory[i] |= USED_FINAL; - my_sum[i] = my_total; - VerifySum( 0L, i, int(my_sum[i]), __LINE__ ); - } else { - ASSERT( AddendHistory[i]==UNUSED, "addend used too many times" ); - AddendHistory[i] |= USED_NONFINAL; - } - } - if( my_range.empty() ) - my_range = r; - else - my_range = Range(my_range.begin(), r.end(), 1 ); - Snooze(true); - ASSERT( self==this, "this Accumulator corrupted or prematurely destroyed" ); - } - void reverse_join( const Accumulator& left ) { -#if PRINT_DEBUG - REPORT("reverse join %d [%ld..%ld) %d [%ld..%ld)\n", - left.id,left.my_range.begin(),left.my_range.end(), - id,my_range.begin(),my_range.end()); -#endif /* PRINT_DEBUG */ - Snooze(true); - ASSERT( ScanIsRunning, NULL ); - ASSERT( left.my_range.end()==my_range.begin(), NULL ); - my_total += left.my_total; - my_range = Range( left.my_range.begin(), my_range.end(), 1 ); - ASSERT( ScanIsRunning, NULL ); - Snooze(true); - ASSERT( ScanIsRunning, NULL ); - ASSERT( self==this, NULL ); - ASSERT( left.self==&left, NULL ); - } - void assign( const Accumulator& other ) { - my_total = other.my_total; - my_range = other.my_range; - ASSERT( self==this, NULL ); - ASSERT( other.self==&other, "other Accumulator corrupted or prematurely destroyed" ); - } -}; - -#include "tbb/tick_count.h" -#include "harness.h" - -static void VerifySum( long start_index, long finish_index, int sum, int line ) { - int expected = TriangularSum( finish_index ) - TriangularSum( start_index ); - if( expected!=sum ) { - REPORT( "line %d: sum[%ld..%ld] should be = %d, but was computed as %d\n", - line, start_index, finish_index, expected, sum ); - abort(); - } -} - -void TestAccumulator( int mode, int nthread ) { - typedef int T; - T* addend = new T[MAXN]; - T* sum = new T[MAXN]; - for( long n=0; n<=MAXN; ++n ) { - for( long i=0; i<MAXN; ++i ) { - addend[i] = -1; - sum[i] = -2; - AddendHistory[i] = UNUSED; - } - for( long i=0; i<n; ++i ) - addend[i] = i; - Accumulator<T> acc( addend, sum ); - tbb::tick_count t0 = tbb::tick_count::now(); -#if PRINT_DEBUG - REPORT("--------- mode=%d range=[0..%ld)\n",mode,n); -#endif /* PRINT_DEBUG */ - ScanIsRunning = true; - - switch (mode) { - case 0: - tbb::parallel_scan( Range( 0, n, 1 ), acc ); - break; - case 1: - tbb::parallel_scan( Range( 0, n, 1 ), acc, tbb::simple_partitioner() ); - break; - case 2: - tbb::parallel_scan( Range( 0, n, 1 ), acc, tbb::auto_partitioner() ); - break; - } - - ScanIsRunning = false; -#if PRINT_DEBUG - REPORT("=========\n"); -#endif /* PRINT_DEBUG */ - Snooze(false); - tbb::tick_count t1 = tbb::tick_count::now(); - long used_once_count = 0; - for( long i=0; i<n; ++i ) - if( !(AddendHistory[i]&USED_FINAL) ) { - REPORT("failed to use addend[%ld] %s\n",i,AddendHistory[i]&USED_NONFINAL?"(but used nonfinal)":""); - } - for( long i=0; i<n; ++i ) { - VerifySum( 0, i, sum[i], __LINE__ ); - used_once_count += AddendHistory[i]==USED_FINAL; - } - if( n ) - ASSERT( acc.my_total==sum[n-1], NULL ); - else - ASSERT( acc.my_total==0, NULL ); - REMARK("time [n=%ld] = %g\tused_once%% = %g\tnthread=%d\n",n,(t1-t0).seconds(), n==0 ? 0 : 100.0*used_once_count/n,nthread); - } - delete[] addend; - delete[] sum; -} - -static void TestScanTags() { - ASSERT( tbb::pre_scan_tag::is_final_scan()==false, NULL ); - ASSERT( tbb::final_scan_tag::is_final_scan()==true, NULL ); -} - -#include "tbb/task_scheduler_init.h" -#include "harness_cpu.h" - -int TestMain () { - TestScanTags(); - for( int p=MinThread; p<=MaxThread; ++p ) { - for (int mode = 0; mode < 3; mode++) { - tbb::task_scheduler_init init(p); - NumberOfLiveAccumulator = 0; - TestAccumulator(mode, p); - - // Test that all workers sleep when no work - TestCPUUserTime(p); - - // Checking has to be done late, because when parallel_scan makes copies of - // the user's "Body", the copies might be destroyed slightly after parallel_scan - // returns. - ASSERT( NumberOfLiveAccumulator==0, NULL ); - } - } - return Harness::Done; -} diff --git a/src/tbb/src/test/test_parallel_sort.cpp b/src/tbb/src/test/test_parallel_sort.cpp deleted file mode 100644 index 730c2445a..000000000 --- a/src/tbb/src/test/test_parallel_sort.cpp +++ /dev/null @@ -1,578 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness_defs.h" -#include "tbb/parallel_sort.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/concurrent_vector.h" -#include "harness.h" -#include <math.h> -#include <vector> -#include <exception> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include <algorithm> -#include <iterator> -#include <functional> -#include <string> -#include <cstring> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -/** Has tightly controlled interface so that we can verify - that parallel_sort uses only the required interface. */ -class Minimal { - int val; -public: - Minimal() {} - void set_val(int i) { val = i; } - static bool CompareWith (const Minimal &a, const Minimal &b) { - return (a.val < b.val); - } - static bool AreEqual( Minimal &a, Minimal &b) { - return a.val == b.val; - } -}; - -//! Defines a comparison function object for Minimal -class MinimalCompare { -public: - bool operator() (const Minimal &a, const Minimal &b) const { - return Minimal::CompareWith(a,b); - } -}; - -//! The default validate; but it uses operator== which is not required -template<typename RandomAccessIterator> -bool Validate(RandomAccessIterator a, RandomAccessIterator b, size_t n) { - for (size_t i = 0; i < n; i++) { - ASSERT( a[i] == b[i], NULL ); - } - return true; -} - -//! A Validate specialized to string for debugging-only -template<> -bool Validate<std::string *>(std::string * a, std::string * b, size_t n) { - for (size_t i = 0; i < n; i++) { - if ( Verbose && a[i] != b[i]) { - for (size_t j = 0; j < n; j++) { - REPORT("a[%llu] == %s and b[%llu] == %s\n", static_cast<unsigned long long>(j), a[j].c_str(), static_cast<unsigned long long>(j), b[j].c_str()); - } - } - ASSERT( a[i] == b[i], NULL ); - } - return true; -} - -//! A Validate specialized to Minimal since it does not define an operator== -template<> -bool Validate<Minimal *>(Minimal *a, Minimal *b, size_t n) { - for (size_t i = 0; i < n; i++) { - ASSERT( Minimal::AreEqual(a[i],b[i]), NULL ); - } - return true; -} - -//! A Validate specialized to concurrent_vector<Minimal> since it does not define an operator== -template<> -bool Validate<tbb::concurrent_vector<Minimal>::iterator>(tbb::concurrent_vector<Minimal>::iterator a, - tbb::concurrent_vector<Minimal>::iterator b, size_t n) { - for (size_t i = 0; i < n; i++) { - ASSERT( Minimal::AreEqual(a[i],b[i]), NULL ); - } - return true; -} - -//! used in Verbose mode for identifying which data set is being used -static std::string test_type; - -//! The default initialization routine. -/*! This routine assumes that you can assign to the elements from a float. - It assumes that iter and sorted_list have already been allocated. It fills - them according to the current data set (tracked by a local static variable). - Returns true if a valid test has been setup, or false if there is no test to - perform. -*/ - -template < typename RandomAccessIterator, typename Compare > -bool init_iter(RandomAccessIterator iter, RandomAccessIterator sorted_list, size_t n, const Compare &compare, bool reset) { - static char test_case = 0; - const char num_cases = 3; - - if (reset) test_case = 0; - - if (test_case < num_cases) { - // switch on the current test case, filling the iter and sorted_list appropriately - switch(test_case) { - case 0: - /* use sin to generate the values */ - test_type = "sin"; - for (size_t i = 0; i < n; i++) - iter[i] = sorted_list[i] = static_cast<typename std::iterator_traits< RandomAccessIterator >::value_type>(sin(float(i))); - break; - case 1: - /* presorted list */ - test_type = "pre-sorted"; - for (size_t i = 0; i < n; i++) - iter[i] = sorted_list[i] = static_cast<typename std::iterator_traits< RandomAccessIterator >::value_type>(i); - break; - case 2: - /* reverse-sorted list */ - test_type = "reverse-sorted"; - for (size_t i = 0; i < n; i++) - iter[i] = sorted_list[i] = static_cast<typename std::iterator_traits< RandomAccessIterator >::value_type>(n - i); - break; - } - - // pre-sort sorted_list for later validity testing - std::sort(sorted_list, sorted_list + n, compare); - test_case++; - return true; - } - return false; -} - -template < typename T, typename Compare > -bool init_iter(T * iter, T * sorted_list, size_t n, const Compare &compare, bool reset) { - static char test_case = 0; - const char num_cases = 3; - - if (reset) test_case = 0; - - if (test_case < num_cases) { - // switch on the current test case, filling the iter and sorted_list appropriately - switch(test_case) { - case 0: - /* use sin to generate the values */ - test_type = "sin"; - for (size_t i = 0; i < n; i++) - iter[i] = sorted_list[i] = T(sin(float(i))); - break; - case 1: - /* presorted list */ - test_type = "pre-sorted"; - for (size_t i = 0; i < n; i++) - iter[i] = sorted_list[i] = T(i); - break; - case 2: - /* reverse-sorted list */ - test_type = "reverse-sorted"; - for (size_t i = 0; i < n; i++) - iter[i] = sorted_list[i] = T(n - i); - break; - } - - // pre-sort sorted_list for later validity testing - std::sort(sorted_list, sorted_list + n, compare); - test_case++; - return true; - } - return false; -} - - -//! The initialization routine specialized to the class Minimal -/*! Minimal cannot have floats assigned to it. This function uses the set_val method -*/ - -template < > -bool init_iter(Minimal* iter, Minimal * sorted_list, size_t n, const MinimalCompare &compare, bool reset) { - static char test_case = 0; - const char num_cases = 3; - - if (reset) test_case = 0; - - if (test_case < num_cases) { - switch(test_case) { - case 0: - /* use sin to generate the values */ - test_type = "sin"; - for (size_t i = 0; i < n; i++) { - iter[i].set_val( int( sin( float(i) ) * 1000.f) ); - sorted_list[i].set_val( int ( sin( float(i) ) * 1000.f) ); - } - break; - case 1: - /* presorted list */ - test_type = "pre-sorted"; - for (size_t i = 0; i < n; i++) { - iter[i].set_val( int(i) ); - sorted_list[i].set_val( int(i) ); - } - break; - case 2: - /* reverse-sorted list */ - test_type = "reverse-sorted"; - for (size_t i = 0; i < n; i++) { - iter[i].set_val( int(n-i) ); - sorted_list[i].set_val( int(n-i) ); - } - break; - } - std::sort(sorted_list, sorted_list + n, compare); - test_case++; - return true; - } - return false; -} - -//! The initialization routine specialized to the class concurrent_vector<Minimal> -/*! Minimal cannot have floats assigned to it. This function uses the set_val method -*/ - -template < > -bool init_iter(tbb::concurrent_vector<Minimal>::iterator iter, tbb::concurrent_vector<Minimal>::iterator sorted_list, - size_t n, const MinimalCompare &compare, bool reset) { - static char test_case = 0; - const char num_cases = 3; - - if (reset) test_case = 0; - - if (test_case < num_cases) { - switch(test_case) { - case 0: - /* use sin to generate the values */ - test_type = "sin"; - for (size_t i = 0; i < n; i++) { - iter[i].set_val( int( sin( float(i) ) * 1000.f) ); - sorted_list[i].set_val( int ( sin( float(i) ) * 1000.f) ); - } - break; - case 1: - /* presorted list */ - test_type = "pre-sorted"; - for (size_t i = 0; i < n; i++) { - iter[i].set_val( int(i) ); - sorted_list[i].set_val( int(i) ); - } - break; - case 2: - /* reverse-sorted list */ - test_type = "reverse-sorted"; - for (size_t i = 0; i < n; i++) { - iter[i].set_val( int(n-i) ); - sorted_list[i].set_val( int(n-i) ); - } - break; - } - std::sort(sorted_list, sorted_list + n, compare); - test_case++; - return true; - } - return false; -} - -//! The initialization routine specialized to the class string -/*! strings are created from floats. -*/ - -template<> -bool init_iter(std::string *iter, std::string *sorted_list, size_t n, const std::less<std::string> &compare, bool reset) { - static char test_case = 0; - const char num_cases = 1; - - if (reset) test_case = 0; - - if (test_case < num_cases) { - switch(test_case) { - case 0: - /* use sin to generate the values */ - test_type = "sin"; - for (size_t i = 0; i < n; i++) { - char buffer[20]; -// Getting rid of secure warning issued by C++ 14.00 and newer -// sprintf_s is not defined in msvcrt.dll in windows XP and windows 2003 (used by MinGW gcc 4.5.2 with default spec) -#if __STDC_SECURE_LIB__>=200411 && !__MINGW64__ - sprintf_s(buffer, sizeof(buffer), "%f", float(sin(float(i)))); -#else - sprintf(buffer, "%f", float(sin(float(i)))); -#endif /* __STDC_SECURE_LIB__>=200411 || && !__MINGW64__ */ - sorted_list[i] = iter[i] = std::string(buffer); - } - break; - } - std::sort(sorted_list, sorted_list + n, compare); - test_case++; - return true; - } - return false; -} - -//! The current number of threads in use (for Verbose only) -static size_t current_p; - -//! The current data type being sorted (for Verbose only) -static std::string current_type; - -//! The default test routine. -/*! Tests all data set sizes from 0 to N, all grainsizes from 0 to G=10, and selects from - all possible interfaces to parallel_sort depending on whether a scratch space and - compare have been provided. -*/ -template<typename RandomAccessIterator, typename Compare> -bool parallel_sortTest(size_t n, RandomAccessIterator iter, RandomAccessIterator sorted_list, const Compare *comp) { - bool passed = true; - - Compare local_comp; - - init_iter(iter, sorted_list, n, local_comp, true); - do { - REMARK("%s %s p=%llu n=%llu :",current_type.c_str(), test_type.c_str(), - static_cast<unsigned long long>(current_p), static_cast<unsigned long long>(n)); - if (comp != NULL) { - tbb::parallel_sort(iter, iter + n, local_comp ); - } else { - tbb::parallel_sort(iter, iter + n ); - } - if (!Validate(iter, sorted_list, n)) - passed = false; - REMARK("passed\n"); - } while (init_iter(iter, sorted_list, n, local_comp, false)); - return passed; -} - -//! The test routine specialize to Minimal, since it does not have a less defined for it -template<> -bool parallel_sortTest(size_t n, Minimal * iter, Minimal * sorted_list, const MinimalCompare *compare) { - bool passed = true; - - if (compare == NULL) return passed; - - init_iter(iter, sorted_list, n, *compare, true); - do { - REMARK("%s %s p=%llu n=%llu :",current_type.c_str(), test_type.c_str(), - static_cast<unsigned long long>(current_p), static_cast<unsigned long long>(n)); - - tbb::parallel_sort(iter, iter + n, *compare ); - - if (!Validate(iter, sorted_list, n)) - passed = false; - REMARK("passed\n"); - } while (init_iter(iter, sorted_list, n, *compare, false)); - return passed; -} - -//! The test routine specialize to concurrent_vector of Minimal, since it does not have a less defined for it -template<> -bool parallel_sortTest(size_t n, tbb::concurrent_vector<Minimal>::iterator iter, - tbb::concurrent_vector<Minimal>::iterator sorted_list, const MinimalCompare *compare) { - bool passed = true; - - if (compare == NULL) return passed; - - init_iter(iter, sorted_list, n, *compare, true); - do { - REMARK("%s %s p=%llu n=%llu :",current_type.c_str(), test_type.c_str(), - static_cast<unsigned long long>(current_p), static_cast<unsigned long long>(n)); - - tbb::parallel_sort(iter, iter + n, *compare ); - - if (!Validate(iter, sorted_list, n)) - passed = false; - REMARK("passed\n"); - } while (init_iter(iter, sorted_list, n, *compare, false)); - return passed; -} - -//! The main driver for the tests. -/*! Minimal, float and string types are used. All interfaces to parallel_sort that are usable - by each type are tested. -*/ -void Flog() { - // For each type create: - // the list to be sorted by parallel_sort (array) - // the list to be sort by STL sort (array_2) - // and a less function object - - const size_t N = 50000; - - Minimal *minimal_array = new Minimal[N]; - Minimal *minimal_array_2 = new Minimal[N]; - MinimalCompare minimal_less; - - float *float_array = new float[N]; - float *float_array_2 = new float[N]; - std::less<float> float_less; - - tbb::concurrent_vector<float> float_cv1; - tbb::concurrent_vector<float> float_cv2; - float_cv1.grow_to_at_least(N); - float_cv2.grow_to_at_least(N); - - std::string *string_array = new std::string[N]; - std::string *string_array_2 = new std::string[N]; - std::less<std::string> string_less; - - tbb::concurrent_vector<Minimal> minimal_cv1; - tbb::concurrent_vector<Minimal> minimal_cv2; - minimal_cv1.grow_to_at_least(N); - minimal_cv2.grow_to_at_least(N); - - - // run the appropriate tests for each type - - current_type = "Minimal(less)"; - parallel_sortTest(0, minimal_array, minimal_array_2, &minimal_less); - parallel_sortTest(1, minimal_array, minimal_array_2, &minimal_less); - parallel_sortTest(10, minimal_array, minimal_array_2, &minimal_less); - parallel_sortTest(9999, minimal_array, minimal_array_2, &minimal_less); - parallel_sortTest(50000, minimal_array, minimal_array_2, &minimal_less); - - current_type = "float (no less)"; - parallel_sortTest(0, float_array, float_array_2, static_cast<std::less<float> *>(NULL)); - parallel_sortTest(1, float_array, float_array_2, static_cast<std::less<float> *>(NULL)); - parallel_sortTest(10, float_array, float_array_2, static_cast<std::less<float> *>(NULL)); - parallel_sortTest(9999, float_array, float_array_2, static_cast<std::less<float> *>(NULL)); - parallel_sortTest(50000, float_array, float_array_2, static_cast<std::less<float> *>(NULL)); - - current_type = "float (less)"; - parallel_sortTest(0, float_array, float_array_2, &float_less); - parallel_sortTest(1, float_array, float_array_2, &float_less); - parallel_sortTest(10, float_array, float_array_2, &float_less); - parallel_sortTest(9999, float_array, float_array_2, &float_less); - parallel_sortTest(50000, float_array, float_array_2, &float_less); - - current_type = "concurrent_vector<float> (no less)"; - parallel_sortTest(0, float_cv1.begin(), float_cv2.begin(), static_cast<std::less<float> *>(NULL)); - parallel_sortTest(1, float_cv1.begin(), float_cv2.begin(), static_cast<std::less<float> *>(NULL)); - parallel_sortTest(10, float_cv1.begin(), float_cv2.begin(), static_cast<std::less<float> *>(NULL)); - parallel_sortTest(9999, float_cv1.begin(), float_cv2.begin(), static_cast<std::less<float> *>(NULL)); - parallel_sortTest(50000, float_cv1.begin(), float_cv2.begin(), static_cast<std::less<float> *>(NULL)); - - current_type = "concurrent_vector<float> (less)"; - parallel_sortTest(0, float_cv1.begin(), float_cv2.begin(), &float_less); - parallel_sortTest(1, float_cv1.begin(), float_cv2.begin(), &float_less); - parallel_sortTest(10, float_cv1.begin(), float_cv2.begin(), &float_less); - parallel_sortTest(9999, float_cv1.begin(), float_cv2.begin(), &float_less); - parallel_sortTest(50000, float_cv1.begin(), float_cv2.begin(), &float_less); - - current_type = "string (no less)"; - parallel_sortTest(0, string_array, string_array_2, static_cast<std::less<std::string> *>(NULL)); - parallel_sortTest(1, string_array, string_array_2, static_cast<std::less<std::string> *>(NULL)); - parallel_sortTest(10, string_array, string_array_2, static_cast<std::less<std::string> *>(NULL)); - parallel_sortTest(9999, string_array, string_array_2, static_cast<std::less<std::string> *>(NULL)); - parallel_sortTest(50000, string_array, string_array_2, static_cast<std::less<std::string> *>(NULL)); - - current_type = "string (less)"; - parallel_sortTest(0, string_array, string_array_2, &string_less); - parallel_sortTest(1, string_array, string_array_2, &string_less); - parallel_sortTest(10, string_array, string_array_2, &string_less); - parallel_sortTest(9999, string_array, string_array_2, &string_less); - parallel_sortTest(50000, string_array, string_array_2, &string_less); - - current_type = "concurrent_vector<Minimal> (less)"; - parallel_sortTest(0, minimal_cv1.begin(), minimal_cv2.begin(), &minimal_less); - parallel_sortTest(1, minimal_cv1.begin(), minimal_cv2.begin(), &minimal_less); - parallel_sortTest(10, minimal_cv1.begin(), minimal_cv2.begin(), &minimal_less); - parallel_sortTest(9999, minimal_cv1.begin(), minimal_cv2.begin(), &minimal_less); - parallel_sortTest(50000, minimal_cv1.begin(), minimal_cv2.begin(), &minimal_less); - - delete [] minimal_array; - delete [] minimal_array_2; - - delete [] float_array; - delete [] float_array_2; - - delete [] string_array; - delete [] string_array_2; -} - -const int elements = 10000; - -void rand_vec(std::vector<int> &v) { - for (int i=0; i<elements; ++i) { - (v.push_back(rand()%elements*10)); - } -} - -void range_sort_test() { - std::vector<int> v; - - typedef std::vector<int>::iterator itor; - // iterator checks - rand_vec(v); - tbb::parallel_sort(v.begin(), v.end()); - for(itor a=v.begin(); a<v.end()-1; ++a) ASSERT(*a <= *(a+1), "v not sorted"); - v.clear(); - - rand_vec(v); - tbb::parallel_sort(v.begin(), v.end(), std::greater<int>()); - for(itor a=v.begin(); a<v.end()-1; ++a) ASSERT(*a >= *(a+1), "v not sorted"); - v.clear(); - - // range checks - rand_vec(v); - tbb::parallel_sort(v); - for(itor a=v.begin(); a<v.end()-1; ++a) ASSERT(*a <= *(a+1), "v not sorted"); - v.clear(); - - rand_vec(v); - tbb::parallel_sort(v, std::greater<int>()); - for(itor a=v.begin(); a<v.end()-1; ++a) ASSERT(*a >= *(a+1), "v not sorted"); - v.clear(); - - // const range checks - rand_vec(v); - tbb::parallel_sort(tbb::blocked_range<std::vector<int>::iterator>(v.begin(), v.end())); - for(itor a=v.begin(); a<v.end()-1; ++a) ASSERT(*a <= *(a+1), "v not sorted"); - v.clear(); - - rand_vec(v); - tbb::parallel_sort(tbb::blocked_range<std::vector<int>::iterator>(v.begin(), v.end()), std::greater<int>()); - for(itor a=v.begin(); a<v.end()-1; ++a) ASSERT(*a >= *(a+1), "v not sorted"); - v.clear(); - - // array tests - int arr[elements]; - for(int i=0; i<elements; ++i) arr[i] = rand()%(elements*10); - tbb::parallel_sort(arr); - for(int i=0; i<elements-1; ++i) ASSERT(arr[i] <= arr[i+1], "arr not sorted"); -} - -#include <cstdio> -#include "harness_cpu.h" - -int TestMain () { - if( MinThread<1 ) { - REPORT("Usage: number of threads must be positive\n"); - exit(1); - } - for( int p=MinThread; p<=MaxThread; ++p ) { - if( p>0 ) { - tbb::task_scheduler_init init( p ); - current_p = p; - Flog(); - range_sort_test(); - - // Test that all workers sleep when no work - TestCPUUserTime(p); - } - } - return Harness::Done; -} - diff --git a/src/tbb/src/test/test_parallel_while.cpp b/src/tbb/src/test/test_parallel_while.cpp deleted file mode 100644 index 006436ee5..000000000 --- a/src/tbb/src/test/test_parallel_while.cpp +++ /dev/null @@ -1,171 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/parallel_while.h" -#include "harness.h" - -const int N = 200; - -typedef int Element; - -//! Representation of an array index with only those signatures required by parallel_while. -class MinimalArgumentType { - void operator=( const MinimalArgumentType& ); - long my_value; - enum { - DEAD=0xDEAD, - LIVE=0x2718, - INITIALIZED=0x3141 - } my_state; -public: - ~MinimalArgumentType() { - ASSERT( my_state==LIVE||my_state==INITIALIZED, NULL ); - my_state = DEAD; - } - MinimalArgumentType() { - my_state = LIVE; - } - void set_value( long i ) { - ASSERT( my_state==LIVE||my_state==INITIALIZED, NULL ); - my_value = i; - my_state = INITIALIZED; - } - long get_value() const { - ASSERT( my_state==INITIALIZED, NULL ); - return my_value; - } -}; - -class IntegerStream { - long my_limit; - long my_index; -public: - IntegerStream( long n ) : my_limit(n), my_index(0) {} - bool pop_if_present( MinimalArgumentType& v ) { - if( my_index>=my_limit ) - return false; - v.set_value( my_index ); - my_index+=2; - return true; - } -}; - -class MatrixMultiplyBody: NoAssign { - Element (*a)[N]; - Element (*b)[N]; - Element (*c)[N]; - const int n; - tbb::parallel_while<MatrixMultiplyBody>& my_while; -public: - typedef MinimalArgumentType argument_type; - void operator()( argument_type i_arg ) const { - long i = i_arg.get_value(); - if( (i&1)==0 && i+1<N ) { - MinimalArgumentType value; - value.set_value(i+1); - my_while.add( value ); - } - for( int j=0; j<n; ++j ) - c[i][j] = 0; - for( int k=0; k<n; ++k ) { - Element aik = a[i][k]; - for( int j=0; j<n; ++j ) - c[i][j] += aik*b[k][j]; - } - } - MatrixMultiplyBody( tbb::parallel_while<MatrixMultiplyBody>& w, Element c_[N][N], Element a_[N][N], Element b_[N][N], int n_ ) : - a(a_), b(b_), c(c_), n(n_), my_while(w) - {} -}; - -void WhileMatrixMultiply( Element c[N][N], Element a[N][N], Element b[N][N], int n ) { - IntegerStream stream( N ); - tbb::parallel_while<MatrixMultiplyBody> w; - MatrixMultiplyBody body(w,c,a,b,n); - w.run( stream, body ); -} - -#include "tbb/tick_count.h" -#include <cstdlib> -#include <cstdio> -using namespace std; - -static long Iterations = 5; - -static void SerialMatrixMultiply( Element c[N][N], Element a[N][N], Element b[N][N], int n ) { - for( int i=0; i<n; ++i ) { - for( int j=0; j<n; ++j ) - c[i][j] = 0; - for( int k=0; k<n; ++k ) { - Element aik = a[i][k]; - for( int j=0; j<n; ++j ) - c[i][j] += aik*b[k][j]; - } - } -} - -static void InitializeMatrix( Element x[N][N], int n, int salt ) { - for( int i=0; i<n; ++i ) - for( int j=0; j<n; ++j ) - x[i][j] = (i*n+j)^salt; -} - -static Element A[N][N], B[N][N], C[N][N], D[N][N]; - -static void Run( int nthread, int n ) { - /* Initialize matrices */ - InitializeMatrix(A,n,5); - InitializeMatrix(B,n,10); - InitializeMatrix(C,n,0); - InitializeMatrix(D,n,15); - - tbb::tick_count t0 = tbb::tick_count::now(); - for( long i=0; i<Iterations; ++i ) { - WhileMatrixMultiply( C, A, B, n ); - } - tbb::tick_count t1 = tbb::tick_count::now(); - SerialMatrixMultiply( D, A, B, n ); - - // Check result - for( int i=0; i<n; ++i ) - for( int j=0; j<n; ++j ) - ASSERT( C[i][j]==D[i][j], NULL ); - REMARK("time=%g\tnthread=%d\tn=%d\n",(t1-t0).seconds(),nthread,n); -} - -#include "tbb/task_scheduler_init.h" -#include "harness_cpu.h" - -int TestMain () { - if( MinThread<1 ) { - REPORT("number of threads must be positive\n"); - exit(1); - } - for( int p=MinThread; p<=MaxThread; ++p ) { - tbb::task_scheduler_init init( p ); - for( int n=N/4; n<=N; n+=N/4 ) - Run(p,n); - - // Test that all workers sleep when no work - TestCPUUserTime(p); - } - return Harness::Done; -} - diff --git a/src/tbb/src/test/test_partitioner.h b/src/tbb/src/test/test_partitioner.h deleted file mode 100644 index 72b4354ec..000000000 --- a/src/tbb/src/test/test_partitioner.h +++ /dev/null @@ -1,505 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include <cmath> -#include "tbb/tbb_stddef.h" -#include "harness.h" - -namespace test_partitioner_utils { - -using tbb::internal::uint64_t; - -struct RangeStatisticData { - // denotes the number of range objects - size_t m_rangeNum; - - // store minimal and maximal range sizes (in terms of number of iterations) - size_t m_minRangeSize; - size_t m_maxRangeSize; - - bool m_wasMinRangeSizeWritten; // shows whether relevant field was written or not -}; - -using tbb::split; -using tbb::proportional_split; -using tbb::blocked_range; - -// helper for calculating number of range objects created before balancing phase is started -// and for finding maximum and minimum number of iterations among all such ranges -// Note: class does not provide exclusive access to members -class RangeStatisticCollector { -public: - RangeStatisticCollector(RangeStatisticData *statisticData) : - m_statData(statisticData) - { - m_called = false; - if (m_statData) - m_statData->m_rangeNum++; - } - - // constructor is called from non-proportional split constructor of derived Range - RangeStatisticCollector(RangeStatisticCollector& sc, size_t rangeSize) { - if (!sc.m_called) { - // this is the first time non-proportional split constructor is called - // it means that work distribution phase has been completed and - // work balancing phase has been just started - sc.m_called = true; - - if (sc.m_statData) { - size_t *minRangeSize = &sc.m_statData->m_minRangeSize; - if (*minRangeSize > rangeSize || !sc.m_statData->m_wasMinRangeSizeWritten) { // if minimum is not an actual minimum - *minRangeSize = rangeSize; - sc.m_statData->m_wasMinRangeSizeWritten = true; - } - size_t *maxRangeSize = &sc.m_statData->m_maxRangeSize; - if (*maxRangeSize < rangeSize) { // if maximum is not an actual maximum - *maxRangeSize = rangeSize; - } - } - } - *this = sc; - // constructor is used on work balancing phase only, so no need to increment number of range objects created - } - - RangeStatisticCollector(RangeStatisticCollector& sc, proportional_split&) { - if (sc.m_statData) - sc.m_statData->m_rangeNum++; - *this = sc; - } - -private: - RangeStatisticData *m_statData; - - // turns to 'true' when non-proportional split constructor is called first time - bool m_called; -}; - -// Base class for fake ranges used in vaious tests for parallel -// algorithms as well as for partitioner -template <typename DerivedRange, typename T> -class RangeBase: public RangeStatisticCollector { -protected: - size_t my_begin, my_end; -public: - RangeBase(size_t _begin, size_t _end, RangeStatisticData *statData) : - RangeStatisticCollector(statData), my_begin(_begin), my_end(_end) { } - RangeBase(RangeBase& r, tbb::split) : RangeStatisticCollector(r, r.size()) { - my_end = r.my_end; - size_t middle = r.my_begin + (r.my_end - r.my_begin) / 2u; - r.my_end = my_begin = middle; - } - - RangeBase(RangeBase& r, tbb::proportional_split& p) : RangeStatisticCollector(r, p) { - size_t original_size = r.size(); - my_end = r.my_end; - T right = self().compute_right_part(r, p); - size_t right_part_size = self().round(right); - right_part_size = (original_size == right_part_size) ? (original_size - 1) : right_part_size; - right_part_size = (right_part_size != 0) ? right_part_size : 1; - r.my_end = my_begin = r.my_end - right_part_size; - p.set_proportion(original_size - right_part_size, right_part_size); - ASSERT(r.my_end != r.my_begin && my_end != my_begin, "Incorrect range split"); - } - - DerivedRange& self() { return static_cast<DerivedRange&>(*this); } - size_t round(T part) { return size_t(part); } - T compute_right_part(RangeBase& r, tbb::proportional_split& p) { - return T(r.size() * T(p.right())) / T(p.left() + p.right()); - } - - size_t begin() const { return my_begin; } - size_t end() const { return my_end; } - bool is_divisible() const { return (my_end - my_begin) > 1; } - bool empty() const { return my_end == my_begin; } - size_t size() const { return my_end - my_begin; } -}; - -namespace TestRanges { -/* - * RoundedUpRange rounds result up - * RoundedDownRange rounds result down - * Range1_2 forces proportion always to be 1:2 and rounds up - * Range1_999 uses weird proportion 1:999 and rounds up - * Range1_999 uses weird proportion 999:1 and rounds up - */ - -class RoundedDownRange: public RangeBase<RoundedDownRange, float> { -public: - RoundedDownRange(size_t _begin, size_t _end, RangeStatisticData *statData = NULL) : - RangeBase<RoundedDownRange, float>(_begin, _end, statData) { } - RoundedDownRange(RoundedDownRange& r, tbb::split) : - RangeBase<RoundedDownRange, float>(r, tbb::split()) { } - RoundedDownRange(RoundedDownRange& r, tbb::proportional_split& p) : - RangeBase<RoundedDownRange, float>(r, p) { } - // uses default implementation of RangeBase::round() which rounds down - static const bool is_divisible_in_proportion = true; -}; - -class RoundedUpRange: public RangeBase<RoundedUpRange, float> { -public: - RoundedUpRange(size_t _begin, size_t _end, RangeStatisticData *statData = NULL) : - RangeBase<RoundedUpRange, float>(_begin, _end, statData) { } - RoundedUpRange(RoundedUpRange& r, tbb::split) : - RangeBase<RoundedUpRange, float>(r, tbb::split()) { } - RoundedUpRange(RoundedUpRange& r, tbb::proportional_split& p) : - RangeBase<RoundedUpRange, float>(r, p) { } - size_t round(float part) { return size_t(std::ceil(part)); } - static const bool is_divisible_in_proportion = true; -}; - -class Range1_2: public RangeBase<Range1_2, float> { -public: - Range1_2(size_t _begin, size_t _end, RangeStatisticData *statData = NULL) : - RangeBase<Range1_2, float>(_begin, _end, statData) { } - Range1_2(Range1_2& r, tbb::split) : RangeBase<Range1_2, float>(r, tbb::split()) { } - Range1_2(Range1_2& r, tbb::proportional_split& p) : RangeBase<Range1_2, float>(r, p) { } - static const bool is_divisible_in_proportion = true; - float compute_right_part(RangeBase<Range1_2, float>& r, tbb::proportional_split&) { - return float(r.size() * 2) / 3.0f; - } - // uses default implementation of RangeBase::round() which rounds down -}; - -class Range1_999: public RangeBase<Range1_999, float> { -public: - Range1_999(size_t _begin, size_t _end, RangeStatisticData *statData = NULL) : - RangeBase<Range1_999, float>(_begin, _end, statData) { } - Range1_999(Range1_999& r, tbb::split) : RangeBase<Range1_999, float>(r, tbb::split()) { } - Range1_999(Range1_999& r, tbb::proportional_split& p) : RangeBase<Range1_999, float>(r, p) { } - static const bool is_divisible_in_proportion = true; - float compute_right_part(RangeBase<Range1_999, float>& r, tbb::proportional_split&) { - return float(r.size() * 999) / 1000.0f; - } - // uses default implementation of RangeBase::round() which rounds down -}; - -class Range999_1: public RangeBase<Range999_1, float> { -public: - Range999_1(size_t _begin, size_t _end, RangeStatisticData *statData = NULL) : - RangeBase<Range999_1, float>(_begin, _end, statData) { } - Range999_1(Range999_1& r, tbb::split) : RangeBase<Range999_1, float>(r, tbb::split()) { } - Range999_1(Range999_1& r, tbb::proportional_split& p) : RangeBase<Range999_1, float>(r, p) { } - static const bool is_divisible_in_proportion = true; - float compute_right_part(RangeBase<Range999_1, float>& r, tbb::proportional_split&) { - return float(r.size()) / 1000.0f; - } - // uses default implementation of RangeBase::round() which rounds down -}; -} // namespace TestRanges - -struct TreeNode { - size_t m_affinity; - size_t m_range_begin, m_range_end; - TreeNode *m_left, *m_right; -private: - TreeNode(size_t range_begin, size_t range_end, size_t affinity, - TreeNode* left, TreeNode* right) : - m_affinity(affinity), m_range_begin(range_begin), m_range_end(range_end), - m_left(left), m_right(right) - { } - - friend TreeNode* make_node(size_t range_begin, size_t range_end, size_t affinity, - TreeNode *left, TreeNode *right); -}; - -TreeNode* make_node(size_t range_begin, size_t range_end, size_t affinity, - TreeNode* left = NULL, TreeNode* right = NULL) { - ASSERT(range_begin < range_end, "Incorrect range interval"); - return new TreeNode(range_begin, range_end, affinity, left, right); -} - -// Class stores nodes as a binary tree -// (marshals TreeNode objects in accordance with values of range intervals) -// Note: BinaryTree deletes all TreeNode objects pushed into it in a destruction phase -class BinaryTree { -public: - BinaryTree() : m_root(NULL) { } - ~BinaryTree() { - if (m_root) - remove_node_recursively(m_root); - } - - // pushed node must be within subrange of the parent nodes - void push_node(TreeNode* node) { - if (!node) - return; - - if (m_root) { - ASSERT(node->m_range_begin >= m_root->m_range_begin && - node->m_range_end <= m_root->m_range_end, - "Cannot push node not from subrange"); - } - - push_subnode(m_root, node); - } - - void visualize() { - if (!m_root) { // nothing to visualize - REPORT("Tree is empty\n"); - return; - } - visualize_node(m_root); - } - -private: - TreeNode *m_root; - - void push_subnode(TreeNode *&root_node, TreeNode *node) { - if (!root_node) { - root_node = node; - return; - } else if (are_nodes_equal(root_node, node)) { - // no need to push the same node - return; - } - - if (!has_children(root_node)) { - // if current root_node does not have children passed node - // should has one of the interval bounds to be equal to - // the same bound in the root_node - if (is_look_like_left_sibling(root_node, node)) - push_subnode(root_node->m_left, node); - else - push_subnode(root_node->m_right, node); - return; - } - - if (has_left_child(root_node)) { - if (is_subnode(root_node->m_left, node)) { - push_subnode(root_node->m_left, node); - return; - } - push_subnode(root_node->m_right, node); - return; - } - - ASSERT(root_node->m_right != NULL, "Right child is NULL but must be present"); - if (is_subnode(root_node->m_right, node)) { - push_subnode(root_node->m_right, node); - return; - } - push_subnode(root_node->m_left, node); - return; - } - - bool has_children(TreeNode *node) { return node->m_left || node->m_right; } - - bool is_look_like_left_sibling(TreeNode *root_node, TreeNode *node) { - if (root_node->m_range_begin == node->m_range_begin) - return true; - ASSERT(root_node->m_range_end == node->m_range_end, NULL); - return false; - } - - bool has_left_child(TreeNode *node) { return node->m_left != NULL; } - - bool is_subnode(TreeNode *root_node, TreeNode *node) { - return root_node->m_range_begin <= node->m_range_begin && - node->m_range_end <= root_node->m_range_end; - } - - bool are_nodes_equal(TreeNode *node1, TreeNode *node2) const { - return node1->m_range_begin == node2->m_range_begin && - node1->m_range_end == node2->m_range_end; - } - - void remove_node_recursively(TreeNode *node) { - if (node->m_left) - remove_node_recursively(node->m_left); - if (node->m_right) - remove_node_recursively(node->m_right); - delete node; - } - - static void visualize_node(const TreeNode* node, unsigned indent = 0) { - // respecting indent - const char *indentStep = " "; - for (unsigned i = 0; i < indent; ++i) - REPORT("%s", indentStep); - - size_t rangeSize = node->m_range_end - node->m_range_begin; - REPORT("[%llu, %llu)%%%llu@%llu\n", uint64_t(node->m_range_begin), uint64_t(node->m_range_end), - uint64_t(rangeSize), uint64_t(node->m_affinity)); - - if (node->m_left) - visualize_node(node->m_left, indent + 1); - if (node->m_right) - visualize_node(node->m_right, indent + 1); - } -}; - -class SimpleBody { -public: - SimpleBody() { } - template <typename Range> - void operator()(Range&) const { } -}; - -class SimpleReduceBody { -public: - SimpleReduceBody() { } - SimpleReduceBody(SimpleReduceBody&, tbb::split) { } - template <typename Range> - void operator()(Range&) { } - void join(SimpleReduceBody&) { } -}; - -namespace interaction_with_range_and_partitioner { - -class SplitConstructorAssertedRange { - mutable bool is_divisible_called; - mutable bool is_empty_called; - bool my_assert_in_nonproportional, my_assert_in_proportional; -public: - SplitConstructorAssertedRange(bool assert_in_nonproportional, bool assert_in_proportional) - : is_divisible_called(false), - is_empty_called(false), - my_assert_in_nonproportional(assert_in_nonproportional), - my_assert_in_proportional(assert_in_proportional) { } - SplitConstructorAssertedRange(SplitConstructorAssertedRange& r, tbb::split) { - *this = r; - ASSERT( !my_assert_in_nonproportional, "Disproportional splitting constructor was called but should not been" ); - } - SplitConstructorAssertedRange(SplitConstructorAssertedRange& r, tbb::proportional_split&) { - *this = r; - ASSERT( !my_assert_in_proportional, "Proportional splitting constructor was called but should not been" ); - } - bool is_divisible() const { - if (!is_divisible_called) { - is_divisible_called = true; - return true; - } - return false; - } - bool empty() const { - if (!is_empty_called) { - is_empty_called = true; - return false; - } - return true; - } -}; - -/* - * Possible use cases are: - * ------------------------------------------------------------------------------------------------------------- - * Range# is_divisible_in_proportion Range proportional ctor Used partitioner Result Effect - * ------------------------------------------------------------------------------------------------------------- - * 1 true available proportional pMN, r(p), part(p) - * ------------------------------------------------------------------------------------------------------------- - * 2 false available proportional p11, r(p), part(p) - * ------------------------------------------------------------------------------------------------------------- - * 3 not defined available proportional p11, r(p), part(p) - * ------------------------------------------------------------------------------------------------------------- - * 4 true not available proportional pMN, r(s), part(p) * - * ------------------------------------------------------------------------------------------------------------- - * 5 false not available proportional p11, r(s), part(p) - * ------------------------------------------------------------------------------------------------------------- - * 6 not defined not available proportional p11, r(s), part(p) - * ------------------------------------------------------------------------------------------------------------- - * 1 true available simple s, r(s), part(s) - * ------------------------------------------------------------------------------------------------------------- - * 2 false available simple s, r(s), part(s) - * ------------------------------------------------------------------------------------------------------------- - * 3 not defined available simple s, r(s), part(s) - * ------------------------------------------------------------------------------------------------------------- - * 4 true not available simple s, r(s), part(s) - * ------------------------------------------------------------------------------------------------------------- - * 5 false not available simple s, r(s), part(s) - * ------------------------------------------------------------------------------------------------------------- - * 6 not defined not available simple s, r(s), part(s) - * ------------------------------------------------------------------------------------------------------------- - * - * Legend: - * proportional - with proportional splits (e.g. affinity_partitioner) - * simple - without proportional splits (e.g. simple_partitioner, auto_partitioner) - * pMN - proportional_split object with proportion M to N is created. (p11 - proportion 1 to 1) - * s - split object is created - * r(p) - range's proportional split constructor is called - * r(s) - range's ordinary split constructor is called - * part(p) - partitioner's proportional split constructor is called - * part(s) - partitioner's ordinary split constructor is called - * * - incorrect split behavior is possible (e.g. partitioner divides at an arbitrary ratio while - * range divides into halves) - */ - - -// is_divisible_in_proportion = true, proportional_split ctor -class Range1: public SplitConstructorAssertedRange { -public: - Range1(bool assert_in_nonproportional, bool assert_in_proportional) - : SplitConstructorAssertedRange(assert_in_nonproportional, assert_in_proportional) { } - Range1( Range1& r, tbb::split ) : SplitConstructorAssertedRange(r, tbb::split()) { } - Range1( Range1& r, tbb::proportional_split& proportion ) : SplitConstructorAssertedRange(r, proportion) { } - static const bool is_divisible_in_proportion = true; -}; - -// is_divisible_in_proportion = false, proportional_split ctor -class Range2: public SplitConstructorAssertedRange { -public: - Range2(bool assert_in_nonproportional, bool assert_in_proportional) - : SplitConstructorAssertedRange(assert_in_nonproportional, assert_in_proportional) { } - Range2(Range2& r, tbb::split) : SplitConstructorAssertedRange(r, tbb::split()) { } - Range2(Range2& r, tbb::proportional_split& p) : SplitConstructorAssertedRange(r, p) { - // TODO: add check that 'is_divisible_in_proportion==false' results only in 1:1 proportions - } - static const bool is_divisible_in_proportion = false; -}; - -// is_divisible_in_proportion is not defined, proportional_split ctor -class Range3: public SplitConstructorAssertedRange { -public: - Range3(bool assert_in_nonproportional, bool assert_in_proportional) - : SplitConstructorAssertedRange(assert_in_nonproportional, assert_in_proportional) { } - Range3(Range3& r, tbb::split) : SplitConstructorAssertedRange(r, tbb::split()) { } - Range3(Range3& r, tbb::proportional_split& p) : SplitConstructorAssertedRange(r, p) { - // TODO: add check that absence of 'is_divisible_in_proportion' results only in 1:1 proportions - } -}; - -// is_divisible_in_proportion = true, proportional_split ctor is not defined -class Range4: public SplitConstructorAssertedRange { -public: - Range4(bool assert_in_nonproportional, bool assert_in_proportional) - : SplitConstructorAssertedRange(assert_in_nonproportional, assert_in_proportional) { } - Range4(Range4& r, tbb::split) : SplitConstructorAssertedRange(r, tbb::split()) { } - static const bool is_divisible_in_proportion = true; -}; - -// is_divisible_in_proportion = false, proportional_split ctor is not defined -class Range5: public SplitConstructorAssertedRange { -public: - Range5(bool assert_in_nonproportional, bool assert_in_proportional) - : SplitConstructorAssertedRange(assert_in_nonproportional, assert_in_proportional) { } - Range5(Range5& r, tbb::split) : SplitConstructorAssertedRange(r, tbb::split()) { } - static const bool is_divisible_in_proportion = false; -}; - -// is_divisible_in_proportion is not defined, proportional_split ctor is not defined -class Range6: public SplitConstructorAssertedRange { -public: - Range6(bool assert_in_nonproportional, bool assert_in_proportional) - : SplitConstructorAssertedRange(assert_in_nonproportional, assert_in_proportional) { } - Range6(Range6& r, tbb::split) : SplitConstructorAssertedRange(r, tbb::split()) { } -}; - -} // namespace interaction_with_range_and_partitioner - -} // namespace test_partitioner_utils diff --git a/src/tbb/src/test/test_partitioner_whitebox.cpp b/src/tbb/src/test/test_partitioner_whitebox.cpp deleted file mode 100644 index 0dc43f5fc..000000000 --- a/src/tbb/src/test/test_partitioner_whitebox.cpp +++ /dev/null @@ -1,559 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include <algorithm> -#include <typeinfo> - -#include "tbb/blocked_range.h" -#include "tbb/tbb_thread.h" -#include "tbb/enumerable_thread_specific.h" - -#include "harness.h" -#include "harness_assert.h" -#include "test_partitioner.h" - - -typedef tbb::enumerable_thread_specific<size_t> ThreadNumsType; -size_t g_threadNumInitialValue = 10; -ThreadNumsType g_threadNums(g_threadNumInitialValue); - -// simulate a subset of task.h -namespace tbb { -namespace internal { -typedef unsigned short affinity_id; -} -class fake_task { -public: - typedef internal::affinity_id affinity_id; - void set_affinity(affinity_id a) { my_affinity = a; } - affinity_id affinity() const { return my_affinity; } - void set_parent(fake_task* p) { my_parent = p; } - fake_task *parent() const { return my_parent; } - bool is_stolen_task() const { return false; } - intptr_t ref_count() const { return 1; } - bool is_cancelled() const { return false; } - static void spawn(fake_task &) {} // for legacy in partitioner.h - virtual fake_task* execute() = 0; // enables dynamic_cast - - fake_task() : my_parent(0), my_affinity(0) {} - virtual ~fake_task() {} -private: - fake_task *my_parent; - affinity_id my_affinity; -}; -} - -#define __TBB_task_H -#define get_initial_auto_partitioner_divisor my_get_initial_auto_partitioner_divisor -#define affinity_partitioner_base_v3 my_affinity_partitioner_base_v3 -#define task fake_task -#define __TBB_STATIC_THRESHOLD 0 -#include "tbb/partitioner.h" -#undef __TBB_STATIC_THRESHOLD -#undef task -#undef affinity_partitioner_base_v3 -#undef get_initial_auto_partitioner_divisor - -#include "string.h" -#include "harness.h" - -// replace library functions to simulate concurrency -namespace tbb { -namespace internal { -size_t my_get_initial_auto_partitioner_divisor() { - const size_t X_FACTOR = 4; - return X_FACTOR * g_threadNums.local(); -} - -void* __TBB_EXPORTED_FUNC NFS_Allocate( size_t n_element, size_t element_size, void* hint ); -void __TBB_EXPORTED_FUNC NFS_Free( void* ); - -void my_affinity_partitioner_base_v3::resize( unsigned factor ) { - // Check factor to avoid asking for number of workers while there might be no arena. - size_t new_size = factor ? factor * g_threadNums.local() : 0; - if (new_size != my_size) { - if (my_array) { - NFS_Free(my_array); - // Following two assignments must be done here for sake of exception safety. - my_array = NULL; - my_size = 0; - } - if (new_size) { - my_array = static_cast<affinity_id*>(NFS_Allocate(new_size, sizeof(affinity_id), NULL )); - memset(my_array, 0, sizeof(affinity_id) * new_size); - my_size = new_size; - } - } -} - -} //namespace internal -// simulate a subset of parallel_for -namespace interface7 { -namespace internal { - -// paralle_for algorithm that executes sequentially -template<typename Range, typename Body, typename Partitioner> -class start_for : public fake_task { - Range my_range; - Body my_body; - typename Partitioner::task_partition_type my_partition; - size_t m_executedBegin, m_executedEnd; - bool m_firstTimeRun; - size_t m_joinedBegin, m_joinedEnd; - test_partitioner_utils::BinaryTree* m_tree; -public: - start_for( const Range& range, const Body& body, Partitioner& partitioner, - test_partitioner_utils::BinaryTree* tree ) : - my_range(range), my_body(body), my_partition(partitioner), - m_executedBegin(0), m_executedEnd(0), m_firstTimeRun(true), - m_joinedBegin(/* grows left */ range.end()), m_joinedEnd(range.end()), m_tree(tree) - { - if (m_tree) { - m_tree->push_node( test_partitioner_utils::make_node(my_range.begin(), my_range.end(), affinity()) ); - } - } - //! Splitting constructor used to generate children. - /** parent_ becomes left child. Newly constructed object is right child. */ - start_for( start_for& parent_, typename Partitioner::split_type& split_obj) : - my_range(parent_.my_range, split_obj), - my_body(parent_.my_body), - my_partition(parent_.my_partition, split_obj), - m_executedBegin(0), m_executedEnd(0), m_firstTimeRun(true), - m_joinedBegin(/* grows left */ my_range.end()), m_joinedEnd(my_range.end()), - m_tree(parent_.m_tree) - { - set_parent(parent_.parent()); - my_partition.set_affinity(*this); - - if (m_tree) { - // collecting splitting statistics - m_tree->push_node( test_partitioner_utils::make_node(my_range.begin(), my_range.end(), affinity()) ); - m_tree->push_node( test_partitioner_utils::make_node(parent_.my_range.begin(), parent_.my_range.end(), parent_.affinity()) ); - } - } - //! Construct right child from the given range as response to the demand. - /** parent_ remains left child. Newly constructed object is right child. */ - start_for( start_for& parent_, const Range& r, depth_t d ) : - my_range(r), - my_body(parent_.my_body), - my_partition(parent_.my_partition, tbb::split()), - m_executedBegin(0), m_executedEnd(0), m_firstTimeRun(true), - m_joinedBegin(/* grows left */ r.end()), m_joinedEnd(r.end()), - m_tree(parent_.m_tree) - { - set_parent(parent_.parent()); - my_partition.set_affinity(*this); - my_partition.align_depth( d ); - } - fake_task* execute() { - my_partition.check_being_stolen( *this ); - size_t origBegin = my_range.begin(); - size_t origEnd = my_range.end(); - - my_partition.execute(*this, my_range); - - ASSERT(m_executedEnd == m_joinedBegin, "Non-continuous execution"); - m_executedEnd = m_joinedEnd; - - ASSERT(origBegin == m_executedBegin && origEnd == m_executedEnd, - "Not all iterations were processed"); - return NULL; - } - //! Run body for range, serves as callback for partitioner - void run_body( Range &r ) { - my_body(r); - - if (m_firstTimeRun) { - m_firstTimeRun = false; - m_executedBegin = m_executedEnd = r.begin(); - } - - ASSERT(m_executedBegin <= r.begin() && m_executedEnd < r.end(), "Non-continuous execution"); - m_executedEnd = r.end(); - } - //! spawn right task, serves as callback for partitioner - void offer_work(typename Partitioner::split_type& split_obj) { - start_for sibling(*this, split_obj); - sibling.execute(); - join(sibling.m_executedBegin, sibling.m_executedEnd); - } - //! spawn right task, serves as callback for partitioner - void offer_work(const Range& r, depth_t d = 0) { - start_for sibling(*this, r, d); - sibling.execute(); - join(sibling.m_executedBegin, sibling.m_executedEnd); - } - void join(size_t siblingExecutedBegin, size_t siblingExecutedEnd) { - ASSERT(siblingExecutedEnd == m_joinedBegin, "?"); - m_joinedBegin = siblingExecutedBegin; - } -}; - -} //namespace internal -} //namespace interface7 -} //namespace tbb - -namespace whitebox_simulation { -using namespace tbb::interface7::internal; -template<typename Range, typename Body, typename Partitioner> -void parallel_for( const Range& range, const Body& body, Partitioner& partitioner, - test_partitioner_utils::BinaryTree* tree = NULL) { - if (!range.empty()) { - flag_task parent; - start_for<Range, Body, Partitioner> start(range, body, partitioner, tree); - start.set_parent(&parent); - start.execute(); - } -} - -}//namespace whitebox_simulation - -namespace uniform_iterations_distribution { - -/* - * Test checks uniform distribution of range's iterations among all tasks just after - * work distribution phase has been completed and just before work balancing phase has been started - */ - - -/* - * BlockedRange uses tbb::blocked_range formula for proportion calculation - * InvertedProportionRange inverts proportion suggested by partitioner (e.g. 1:3 --> 3:1) - * ExactSplitRange uses integer arithmetic for accurate splitting - */ - -using namespace test_partitioner_utils; -using tbb::blocked_range; -using tbb::split; -using tbb::proportional_split; - -class BlockedRange: public RangeStatisticCollector, public blocked_range<size_t> { -public: - BlockedRange(size_t _begin, size_t _end, RangeStatisticData *statData) : - RangeStatisticCollector(statData), - blocked_range<size_t>(_begin, _end) - { } - - BlockedRange(BlockedRange& r, split) : - RangeStatisticCollector(r, r.size()), - blocked_range<size_t>(r, split()) - { } - - BlockedRange(BlockedRange& r, proportional_split& p) : - RangeStatisticCollector(r, p), - blocked_range<size_t>(r, p) - { } - - static const bool is_divisible_in_proportion = true; -}; - -class InvertedProportionRange: public RangeBase<InvertedProportionRange, float> { -public: - InvertedProportionRange(size_t _begin, size_t _end, RangeStatisticData *statData) : - RangeBase<InvertedProportionRange, float>(_begin, _end, statData) { } - InvertedProportionRange(InvertedProportionRange& r, split) : - RangeBase<InvertedProportionRange, float>(r, split()) { } - InvertedProportionRange(InvertedProportionRange& r, proportional_split& p) : - RangeBase<InvertedProportionRange, float>(r, p) { } - float compute_right_part(RangeBase<InvertedProportionRange, float>& r, tbb::proportional_split& p) { - return float(r.size() * float(p.left())) / float(p.left() + p.right()); - } - static const bool is_divisible_in_proportion = true; -}; - -class ExactSplitRange: public RangeBase<ExactSplitRange, size_t> { -public: - ExactSplitRange(size_t _begin, size_t _end, RangeStatisticData *statData) : - RangeBase<ExactSplitRange, size_t>(_begin, _end, statData) { } - ExactSplitRange(ExactSplitRange& r, split) : - RangeBase<ExactSplitRange, size_t>(r, split()) { } - ExactSplitRange(ExactSplitRange& r, proportional_split& p) : - RangeBase<ExactSplitRange, size_t>(r, p) { } - size_t compute_right_part(RangeBase<ExactSplitRange, size_t>& r, tbb::proportional_split& p) { - size_t parts = size_t(p.left() + p.right()); - size_t currSize = r.size(); - size_t int_part = currSize / parts * p.right(); - size_t remainder = currSize % parts * p.right(); - int_part += remainder / parts; - remainder %= parts; - size_t right_part = int_part + (remainder > parts/2 ? 1 : 0); - return right_part; - } - static const bool is_divisible_in_proportion = true; -}; - -template <typename Range, typename Body, typename Partitioner> -void test_case(Range& range, Body& body, Partitioner& partitioner, - test_partitioner_utils::BinaryTree* tree = NULL) { - whitebox_simulation::parallel_for(range, body, partitioner, tree); -} - -// Functions generate size for range objects used in tests -template <typename T> -size_t default_range_size_generator(T factor, size_t task_num) { - return size_t(factor * task_num); -} - -size_t shifted_left_range_size_generator(size_t factor, size_t task_num) { - return factor * task_num - 1; -} - -size_t shifted_right_range_size_generator(size_t factor, size_t task_num) { - return factor * task_num + 1; -} - -size_t max_range_size_generator(size_t, size_t) { - return size_t(-1); -} - -template <typename RangeType, typename T> -class ParallelTestBody { -public: - typedef size_t (*RangeSizeGenFunc)(T, size_t); - - ParallelTestBody(T *range_size_factors, unsigned len, - size_t tolerance, RangeSizeGenFunc range_size_generator, size_t range_begin) : - m_factors(range_size_factors), m_len(len), m_tolerance(tolerance), - m_range_size_generator(range_size_generator), m_range_begin(range_begin) - { } - - void operator ()(size_t task_num) const { - task_num++; // since NativeParalleFor starts indexing from zero - - g_threadNums.local() = task_num; - - for (unsigned i = 0; i < m_len; ++i) { - // initializing - size_t range_end = m_range_size_generator(m_factors[i], task_num); - RangeStatisticData stat = { - 0, // range num - 0, // minimal size of range - 0, // maximal size of range - false // minimal size of range was not rewritten yet - }; - - RangeType range = RangeType(m_range_begin, range_end, &stat); - tbb::affinity_partitioner ap; - SimpleBody body; - test_case(range, body, ap, NULL); - - // Checking that all threads were given a task - size_t rangeObjsCreated = stat.m_rangeNum; - size_t range_size = range_end - m_range_begin; - if (range_size >= task_num) { - if (rangeObjsCreated != task_num) { - REPORT("During processing of '%s' range of size (%llu) number of range objects created (%lu)" - " was not equal to number of tasks (%llu)\n", typeid(range).name(), uint64_t(range_size), uint64_t(rangeObjsCreated), uint64_t(task_num)); - ASSERT(rangeObjsCreated == task_num, "Incorrect number of range objects was created before work balancing phase started"); - } - } else if (rangeObjsCreated != range_size && range_size != 0) { - REPORT("['%s'] number of range objects created (%llu) was not equal to range's size (%llu)" - " when number of tasks = %llu\n", - typeid(range).name(), uint64_t(rangeObjsCreated), uint64_t(range_size), uint64_t(task_num)); - ASSERT(rangeObjsCreated == range_size, "Incorrect number of range objects was created before work balancing phase started"); - } - - // Checking difference between min and max number of range iterations - size_t diff = stat.m_maxRangeSize - stat.m_minRangeSize; - if (diff > m_tolerance) { - REPORT("Difference (%llu) between maximum and minimum number of splitted '%s' range iterations (%llu), " - "while executing test for %llu number of tasks, is greater than allowed tolerance (%llu)\n", - uint64_t(diff), typeid(range).name(), uint64_t(range_size), uint64_t(task_num), uint64_t(m_tolerance)); - ASSERT(diff <= m_tolerance, "Uniform iteration distribution error"); - } - } - } -private: - T *m_factors; // array of multipliers for parallel_for's number of iterations - unsigned m_len; // array size - size_t m_tolerance; // tolerance for min and max size of ranges - RangeSizeGenFunc m_range_size_generator; // function generates range sizes - size_t m_range_begin; // beginning of range iterations -}; - -template <typename RangeType, typename T> -void test(T range_size_factors[], unsigned len, size_t tolerance = 2, - size_t (*rsgFunc)(T, size_t) = default_range_size_generator<T>, size_t range_begin = 0) { - // Ideally difference should be equal to zero in case if iterations can be divided - // without a remainder and equals to one otherwise but due to floating point rounding - // on some systems difference reaches default value of 'tolerance' arg - - // some reasonable value for possible number of threads - size_t max_simulated_threads = 1024; - size_t hw_threads_num = tbb::tbb_thread::hardware_concurrency(); - size_t threadsToRunOn = std::min<size_t>(max_simulated_threads, hw_threads_num); - - for (size_t task_num = 1; task_num <= max_simulated_threads; task_num += threadsToRunOn) { - ParallelTestBody<RangeType, T> b = ParallelTestBody<RangeType, T> - (range_size_factors, len, tolerance, rsgFunc, range_begin); - NativeParallelFor(threadsToRunOn, b); - } -} - -void test() { - using namespace test_partitioner_utils::TestRanges; - - { - // Multipliers for number of tasks (iterations should be distributed uniformly) - size_t range_size_factors[] = { 1, 2, 3, 4, 5, 7, 9, 13, 27, 29, 30, 31, 32 }; - unsigned len = sizeof(range_size_factors) / sizeof(range_size_factors[0]); - - // tolerance for blocked_range is equal to zero - test<BlockedRange>(range_size_factors, len, 0); - test<InvertedProportionRange>(range_size_factors, len); - test<RoundedDownRange>(range_size_factors, len); - test<RoundedUpRange>(range_size_factors, len); - - // check only quantity of range objects - test<Range1_2>(range_size_factors, len, size_t(-1)); - test<Range1_999>(range_size_factors, len, size_t(-1)); - test<Range999_1>(range_size_factors, len, size_t(-1)); - } - - { - // Multipliers for number of tasks (iterations might not be distributed uniformly) - float range_size_factors[] = { 1.2f, 2.5f, 3.7f, 4.2f, 5.1f, 8.9f, 27.8f }; - unsigned len = sizeof(range_size_factors) / sizeof(range_size_factors[0]); - - // tolerance for blocked_range is equal to one - test<BlockedRange>(range_size_factors, len, 1); - test<InvertedProportionRange>(range_size_factors, len); - test<RoundedDownRange>(range_size_factors, len); - test<RoundedUpRange>(range_size_factors, len); - } - - - { - // Multipliers for number of tasks (iterations might not be distributed uniformly) - size_t range_size_factors[] = { 1, 2, 3, 4, 5, 7, 9, 11, 13, 27, 29, 30, 31, 32 }; - unsigned len = sizeof(range_size_factors) / sizeof(range_size_factors[0]); - - // tolerance for blocked_range is equal to one - test<BlockedRange>(range_size_factors, len, 1, &shifted_left_range_size_generator); - test<BlockedRange>(range_size_factors, len, 1, &shifted_right_range_size_generator); - - test<InvertedProportionRange>(range_size_factors, len, 2, &shifted_left_range_size_generator); - test<InvertedProportionRange>(range_size_factors, len, 2, &shifted_right_range_size_generator); - - test<RoundedDownRange>(range_size_factors, len, 2, &shifted_left_range_size_generator); - test<RoundedDownRange>(range_size_factors, len, 2, &shifted_right_range_size_generator); - - test<RoundedUpRange>(range_size_factors, len, 2, &shifted_left_range_size_generator); - test<RoundedUpRange>(range_size_factors, len, 2, &shifted_right_range_size_generator); - } - - - { - size_t range_size_factors[] = { 0 }; - unsigned len = sizeof(range_size_factors) / sizeof(range_size_factors[0]); - - // tolerance is 1 since range iterations number is not divided without a remainder - test<ExactSplitRange>(range_size_factors, len, 1, &max_range_size_generator); - test<ExactSplitRange>(range_size_factors, len, 1, &max_range_size_generator, size_t(-1) - 10000); - } -} - -} // namespace uniform_iterations_distribution - -namespace overflow_during_split { - -using tbb::blocked_range; -using tbb::proportional_split; -using tbb::interface7::internal::adaptive_partition_type_base; -using tbb::internal::uint64_t; - -class partitioner: public adaptive_partition_type_base<partitioner> { - size_t m_right_part; -public: - partitioner(size_t rp) : adaptive_partition_type_base<partitioner>(), m_right_part(rp) { } - partitioner(partitioner &src, const proportional_split &p) : - adaptive_partition_type_base<partitioner>(src, p) { - // computation without potential overflow - size_t parts = p.left() + p.right(); - size_t int_part = g_threadNums.local() / parts; - size_t fraction = g_threadNums.local() - int_part * parts; // fraction < parts - size_t right_divisor = int_part * src.m_right_part + fraction * src.m_right_part / parts; - - // Division in 'right_divisor' very likely is inexact also. - size_t tolerance = 1; - size_t diff = (right_divisor < my_divisor) ? (my_divisor - right_divisor) : (right_divisor - my_divisor); - if (diff > tolerance) { - REPORT("Difference between %llu and %llu is >= 2, but should be <\n", uint64_t(my_divisor), - uint64_t(right_divisor)); - ASSERT(diff <= tolerance, "Overflow occurred in 'adaptive_partition_type_base'"); - } - } -}; - -void test() { - g_threadNums.local() = size_t(-1) / 4; - size_t right_part = 6; - partitioner fp(right_part); - // trying to generate overflow in adaptive_partition_type_base - partitioner(fp, proportional_split(2, right_part)); -} - -} // namespace overflow_during_split - -// In order to see splitting in action enable 'VISUALIZE_SPLITTING' -// macro alogn with the following options: -// - PARTITIONER='partitioner-type' - partitioner to be used for splitting -// - RANGE_SIZE='<number>' - number of initial iterations in tbb::blocked_range -// - THREADS_NUM='<number>' - simulated number of threads to be used -// If these options are not defined the default value will be used: -// PARTITIONER = tbb::affinity_partitioner, RANGE_SIZE = 123, THREADS_NUM = 10 -void visualize_splitting_topology() { - using test_partitioner_utils::BinaryTree; - using test_partitioner_utils::SimpleBody; - using uniform_iterations_distribution::test_case; - using tbb::blocked_range; - -#ifndef PARTITIONER -#define PARTITIONER tbb::affinity_partitioner -#endif -#define PARTITIONER_NAME2(p) #p -#define PARTITIONER_NAME(p) PARTITIONER_NAME2(p) -#ifndef RANGE_SIZE -#define RANGE_SIZE 123 -#endif -#ifndef THREADS_NUM -#define THREADS_NUM 10 -#endif - - g_threadNums.local() = THREADS_NUM; - PARTITIONER partitioner; - blocked_range<size_t> range(0, RANGE_SIZE); - BinaryTree t; - SimpleBody body; - test_case(range, body, partitioner, &t); - REPORT("partitioner: %s\nrange_size: %u\nthreads num: %u\n", PARTITIONER_NAME(PARTITIONER), unsigned(RANGE_SIZE), unsigned(THREADS_NUM)); - t.visualize(); -} - -int TestMain () { -#ifdef VISUALIZE_SPLITTING - visualize_splitting_topology(); -#else - uniform_iterations_distribution::test(); - overflow_during_split::test(); -#endif - - return Harness::Done; -} diff --git a/src/tbb/src/test/test_pipeline.cpp b/src/tbb/src/test/test_pipeline.cpp deleted file mode 100644 index 7f1309eae..000000000 --- a/src/tbb/src/test/test_pipeline.cpp +++ /dev/null @@ -1,313 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_stddef.h" -#include "tbb/pipeline.h" -#include "tbb/spin_mutex.h" -#include "tbb/atomic.h" -#include <cstdlib> -#include <cstdio> -#include "harness.h" - -// In the test, variables related to token counting are declared -// as unsigned long to match definition of tbb::internal::Token. - -struct Buffer { - //! Indicates that the buffer is not used. - static const unsigned long unused = ~0ul; - unsigned long id; - //! True if Buffer is in use. - bool is_busy; - unsigned long sequence_number; - Buffer() : id(unused), is_busy(false), sequence_number(unused) {} -}; - -class waiting_probe { - size_t check_counter; -public: - waiting_probe() : check_counter(0) {} - bool required( ) { - ++check_counter; - return !((check_counter+1)&size_t(0x7FFF)); - } - void probe( ); // defined below -}; - -static const unsigned MaxStreamSize = 8000; -static const unsigned MaxStreamItemsPerThread = 1000; -//! Maximum number of filters allowed -static const unsigned MaxFilters = 5; -static unsigned StreamSize; -static const unsigned MaxBuffer = 8; -static bool Done[MaxFilters][MaxStreamSize]; -static waiting_probe WaitTest; -static unsigned out_of_order_count; - -#include "harness_concurrency_tracker.h" - -class BaseFilter: public tbb::filter { - bool* const my_done; - const bool my_is_last; - bool my_is_running; -public: - tbb::atomic<tbb::internal::Token> current_token; - BaseFilter( tbb::filter::mode type, bool done[], bool is_last ) : - filter(type), - my_done(done), - my_is_last(is_last), - my_is_running(false), - current_token() - {} - virtual Buffer* get_buffer( void* item ) { - current_token++; - return static_cast<Buffer*>(item); - } - /*override*/void* operator()( void* item ) { - Harness::ConcurrencyTracker ct; - if( is_serial() ) - ASSERT( !my_is_running, "premature entry to serial stage" ); - my_is_running = true; - Buffer* b = get_buffer(item); - if( b ) { - if( is_ordered() ) { - if( b->sequence_number == Buffer::unused ) - b->sequence_number = current_token-1; - else - ASSERT( b->sequence_number==current_token-1, "item arrived out of order" ); - } else if( is_serial() ) { - if( b->sequence_number != current_token-1 && b->sequence_number != Buffer::unused ) - out_of_order_count++; - } - ASSERT( b->id < StreamSize, NULL ); - ASSERT( !my_done[b->id], "duplicate processing of token?" ); - ASSERT( b->is_busy, NULL ); - my_done[b->id] = true; - if( my_is_last ) { - b->id = Buffer::unused; - b->sequence_number = Buffer::unused; - __TBB_store_with_release(b->is_busy, false); - } - } - my_is_running = false; - return b; - } -}; - -class InputFilter: public BaseFilter { - tbb::spin_mutex input_lock; - Buffer buffer[MaxBuffer]; - const tbb::internal::Token my_number_of_tokens; -public: - InputFilter( tbb::filter::mode type, tbb::internal::Token ntokens, bool done[], bool is_last ) : - BaseFilter(type, done, is_last), - my_number_of_tokens(ntokens) - {} - /*override*/Buffer* get_buffer( void* ) { - unsigned long next_input; - unsigned free_buffer = 0; - { // lock protected scope - tbb::spin_mutex::scoped_lock lock(input_lock); - if( current_token>=StreamSize ) - return NULL; - next_input = current_token++; - // once in a while, emulate waiting for input; this only makes sense for serial input - if( is_serial() && WaitTest.required() ) - WaitTest.probe( ); - while( free_buffer<MaxBuffer ) - if( __TBB_load_with_acquire(buffer[free_buffer].is_busy) ) - ++free_buffer; - else { - buffer[free_buffer].is_busy = true; - break; - } - } - ASSERT( free_buffer<my_number_of_tokens, "premature reuse of buffer" ); - Buffer* b = &buffer[free_buffer]; - ASSERT( &buffer[0] <= b, NULL ); - ASSERT( b <= &buffer[MaxBuffer-1], NULL ); - ASSERT( b->id == Buffer::unused, NULL); - b->id = next_input; - ASSERT( b->sequence_number == Buffer::unused, NULL); - return b; - } -}; - -//! The struct below repeats layout of tbb::pipeline. -struct hacked_pipeline { - tbb::filter* filter_list; - tbb::filter* filter_end; - tbb::empty_task* end_counter; - tbb::atomic<tbb::internal::Token> input_tokens; - tbb::atomic<tbb::internal::Token> token_counter; - bool end_of_input; - bool has_thread_bound_filters; - - virtual ~hacked_pipeline(); -}; - -//! The struct below repeats layout of tbb::internal::input_buffer. -struct hacked_input_buffer { - void* array; // This should be changed to task_info* if ever used - void* my_sem; // This should be changed to semaphore* if ever used - tbb::internal::Token array_size; - tbb::internal::Token low_token; - tbb::spin_mutex array_mutex; - tbb::internal::Token high_token; - bool is_ordered; - bool is_bound; -}; - -//! The struct below repeats layout of tbb::filter. -struct hacked_filter { - tbb::filter* next_filter_in_pipeline; - hacked_input_buffer* my_input_buffer; - unsigned char my_filter_mode; - tbb::filter* prev_filter_in_pipeline; - tbb::pipeline* my_pipeline; - tbb::filter* next_segment; - - virtual ~hacked_filter(); -}; - -bool do_hacking_tests = true; -const tbb::internal::Token tokens_before_wraparound = 0xF; - -void TestTrivialPipeline( unsigned nthread, unsigned number_of_filters ) { - // There are 3 filter types: parallel, serial_in_order and serial_out_of_order - static const tbb::filter::mode filter_table[] = { tbb::filter::parallel, tbb::filter::serial_in_order, tbb::filter::serial_out_of_order}; - const unsigned number_of_filter_types = sizeof(filter_table)/sizeof(filter_table[0]); - REMARK( "testing with %lu threads and %lu filters\n", nthread, number_of_filters ); - ASSERT( number_of_filters<=MaxFilters, "too many filters" ); - ASSERT( sizeof(hacked_pipeline) == sizeof(tbb::pipeline), "layout changed for tbb::pipeline?" ); - ASSERT( sizeof(hacked_filter) == sizeof(tbb::filter), "layout changed for tbb::filter?" ); - tbb::internal::Token ntokens = nthread<MaxBuffer ? nthread : MaxBuffer; - // Count maximum iterations number - unsigned limit = 1; - for( unsigned i=0; i<number_of_filters; ++i) - limit *= number_of_filter_types; - // Iterate over possible filter sequences - for( unsigned numeral=0; numeral<limit; ++numeral ) { - // Build pipeline - tbb::pipeline pipeline; - if( do_hacking_tests ) { - // A private member of pipeline is hacked there for sake of testing wrap-around immunity. - tbb::internal::punned_cast<hacked_pipeline*>(&pipeline)->token_counter = ~tokens_before_wraparound; - } - tbb::filter* filter[MaxFilters]; - unsigned temp = numeral; - // parallelism_limit is the upper bound on the possible parallelism - unsigned parallelism_limit = 0; - for( unsigned i=0; i<number_of_filters; ++i, temp/=number_of_filter_types ) { - tbb::filter::mode filter_type = filter_table[temp%number_of_filter_types]; - const bool is_last = i==number_of_filters-1; - if( i==0 ) - filter[i] = new InputFilter(filter_type,ntokens,Done[i],is_last); - else - filter[i] = new BaseFilter(filter_type,Done[i],is_last); - pipeline.add_filter(*filter[i]); - // The ordered buffer of serial filters is hacked as well. - if ( filter[i]->is_serial() ) { - if( do_hacking_tests ) { - ((hacked_filter*)(void*)filter[i])->my_input_buffer->low_token = ~tokens_before_wraparound; - ((hacked_filter*)(void*)filter[i])->my_input_buffer->high_token = ~tokens_before_wraparound; - } - parallelism_limit += 1; - } else { - parallelism_limit = nthread; - } - } - // Account for clipping of parallelism. - if( parallelism_limit>nthread ) - parallelism_limit = nthread; - if( parallelism_limit>ntokens ) - parallelism_limit = (unsigned)ntokens; - Harness::ConcurrencyTracker::Reset(); - unsigned streamSizeLimit = min( MaxStreamSize, nthread * MaxStreamItemsPerThread ); - for( StreamSize=0; StreamSize<=streamSizeLimit; ) { - memset( Done, 0, sizeof(Done) ); - for( unsigned i=0; i<number_of_filters; ++i ) { - static_cast<BaseFilter*>(filter[i])->current_token=0; - } - pipeline.run( ntokens ); - ASSERT( !Harness::ConcurrencyTracker::InstantParallelism(), "filter still running?" ); - for( unsigned i=0; i<number_of_filters; ++i ) - ASSERT( static_cast<BaseFilter*>(filter[i])->current_token==StreamSize, NULL ); - for( unsigned i=0; i<MaxFilters; ++i ) - for( unsigned j=0; j<StreamSize; ++j ) { - ASSERT( Done[i][j]==(i<number_of_filters), NULL ); - } - if( StreamSize < min(nthread*8, 32u) ) { - ++StreamSize; - } else { - StreamSize = StreamSize*8/3; - } - } - if( Harness::ConcurrencyTracker::PeakParallelism() < parallelism_limit ) - REMARK( "nthread=%lu ntokens=%lu MaxParallelism=%lu parallelism_limit=%lu\n", - nthread, ntokens, Harness::ConcurrencyTracker::PeakParallelism(), parallelism_limit ); - for( unsigned i=0; i < number_of_filters; ++i ) { - delete filter[i]; - filter[i] = NULL; - } - pipeline.clear(); - } -} - -#include "harness_cpu.h" - -static int nthread; // knowing number of threads is necessary to call TestCPUUserTime - -void waiting_probe::probe( ) { - if( nthread==1 ) return; - REMARK("emulating wait for input\n"); - // Test that threads sleep while no work. - // The master doesn't sleep so there could be 2 active threads if a worker is waiting for input - TestCPUUserTime(nthread, 2); -} - -#include "tbb/task_scheduler_init.h" - -int TestMain () { - out_of_order_count = 0; - if( MinThread<1 ) { - REPORT("must have at least one thread"); - exit(1); - } - if( tbb::TBB_runtime_interface_version()>TBB_INTERFACE_VERSION) { - REMARK("Warning: implementation dependent tests disabled\n"); - do_hacking_tests = false; - } - - // Test with varying number of threads. - for( nthread=MinThread; nthread<=MaxThread; ++nthread ) { - // Initialize TBB task scheduler - tbb::task_scheduler_init init(nthread); - - // Test pipelines with n filters - for( unsigned n=0; n<=MaxFilters; ++n ) - TestTrivialPipeline(nthread,n); - - // Test that all workers sleep when no work - TestCPUUserTime(nthread); - } - if( !out_of_order_count ) - REPORT("Warning: out of order serial filter received tokens in order\n"); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_pipeline_with_tbf.cpp b/src/tbb/src/test/test_pipeline_with_tbf.cpp deleted file mode 100644 index 744b8d92d..000000000 --- a/src/tbb/src/test/test_pipeline_with_tbf.cpp +++ /dev/null @@ -1,527 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/pipeline.h" -#include "tbb/spin_mutex.h" -#include "tbb/atomic.h" -#include "tbb/tbb_thread.h" -#include <cstdlib> -#include <cstdio> -#include "harness.h" - -// In the test, variables related to token counting are declared -// as unsigned long to match definition of tbb::internal::Token. - -//! Id of thread that first executes work on non-thread-bound stages -tbb::tbb_thread::id thread_id; -//! Zero thread id -tbb::tbb_thread::id id0; -//! True if non-thread-bound stages must be executed on one thread -bool is_serial_execution; -double sleeptime; // how long is a non-thread-bound stage to sleep? - -struct Buffer { - //! Indicates that the buffer is not used. - static const unsigned long unused = ~0ul; - unsigned long id; - //! True if Buffer is in use. - bool is_busy; - unsigned long sequence_number; - Buffer() : id(unused), is_busy(false), sequence_number(unused) {} -}; - -class waiting_probe { - size_t check_counter; -public: - waiting_probe() : check_counter(0) {} - bool required( ) { - ++check_counter; - return !((check_counter+1)&size_t(0x7FFF)); - } - void probe( ); // defined below -}; - -static const unsigned MaxStreamSize = 8000; -//! Maximum number of filters allowed -static const unsigned MaxFilters = 4; -static unsigned StreamSize; -static const unsigned MaxBuffer = 8; -static bool Done[MaxFilters][MaxStreamSize]; -static waiting_probe WaitTest; -static unsigned out_of_order_count; - -#include "harness_concurrency_tracker.h" - -template<typename T> -class BaseFilter: public T { - bool* const my_done; - const bool my_is_last; - bool my_is_running; -public: - tbb::atomic<tbb::internal::Token> current_token; - BaseFilter( tbb::filter::mode type, bool done[], bool is_last ) : - T(type), - my_done(done), - my_is_last(is_last), - my_is_running(false), - current_token() - {} - virtual Buffer* get_buffer( void* item ) { - current_token++; - return static_cast<Buffer*>(item); - } - /*override*/void* operator()( void* item ) { - // Check if work is done only on one thread when ntokens==1 or - // when pipeline has only one filter that is serial and non-thread-bound - if( is_serial_execution && !this->is_bound() ) { - // Get id of current thread - tbb::tbb_thread::id id = tbb::this_tbb_thread::get_id(); - // At first execution, set thread_id to current thread id. - // Serialized execution is expected, so there should be no race. - if( thread_id == id0 ) - thread_id = id; - // Check if work is done on one thread - ASSERT( thread_id == id, "non-thread-bound stages executed on different threads when must be executed on a single one"); - } - Harness::ConcurrencyTracker ct; - if( this->is_serial() ) - ASSERT( !my_is_running, "premature entry to serial stage" ); - my_is_running = true; - Buffer* b = get_buffer(item); - if( b ) { - if(!this->is_bound() && sleeptime > 0) { - if(this->is_serial()) { - Harness::Sleep((int)sleeptime); - } - else { - // early parallel tokens sleep longer... - int i = (int)((5 - b->sequence_number) * sleeptime); - if(i < (int)sleeptime) i = (int)sleeptime; - Harness::Sleep(i); - } - } - if( this->is_ordered() ) { - if( b->sequence_number == Buffer::unused ) - b->sequence_number = current_token-1; - else - ASSERT( b->sequence_number==current_token-1, "item arrived out of order" ); - } else if( this->is_serial() ) { - if( b->sequence_number != current_token-1 && b->sequence_number != Buffer::unused ) - out_of_order_count++; - } - ASSERT( b->id < StreamSize, NULL ); - ASSERT( !my_done[b->id], "duplicate processing of token?" ); - ASSERT( b->is_busy, NULL ); - my_done[b->id] = true; - if( my_is_last ) { - b->id = Buffer::unused; - b->sequence_number = Buffer::unused; - __TBB_store_with_release(b->is_busy, false); - } - } - my_is_running = false; - return b; - } -}; - -template<typename T> -class InputFilter: public BaseFilter<T> { - tbb::spin_mutex input_lock; - Buffer buffer[MaxBuffer]; - const tbb::internal::Token my_number_of_tokens; -public: - InputFilter( tbb::filter::mode type, tbb::internal::Token ntokens, bool done[], bool is_last ) : - BaseFilter<T>(type, done, is_last), - my_number_of_tokens(ntokens) - {} - /*override*/Buffer* get_buffer( void* ) { - unsigned long next_input; - unsigned free_buffer = 0; - { // lock protected scope - tbb::spin_mutex::scoped_lock lock(input_lock); - if( this->current_token>=StreamSize ) - return NULL; - next_input = this->current_token++; - // once in a while, emulate waiting for input; this only makes sense for serial input - if( this->is_serial() && WaitTest.required() ) - WaitTest.probe( ); - while( free_buffer<MaxBuffer ) - if( __TBB_load_with_acquire(buffer[free_buffer].is_busy) ) - ++free_buffer; - else { - buffer[free_buffer].is_busy = true; - break; - } - } - ASSERT( free_buffer<my_number_of_tokens, "premature reuse of buffer" ); - Buffer* b = &buffer[free_buffer]; - ASSERT( &buffer[0] <= b, NULL ); - ASSERT( b <= &buffer[MaxBuffer-1], NULL ); - ASSERT( b->id == Buffer::unused, NULL); - b->id = next_input; - ASSERT( b->sequence_number == Buffer::unused, NULL); - return b; - } -}; - -class process_loop { -public: - void operator()( tbb::thread_bound_filter* tbf ) { - tbb::thread_bound_filter::result_type flag; - do - flag = tbf->process_item(); - while( flag != tbb::thread_bound_filter::end_of_stream ); - } -}; - -//! The struct below repeats layout of tbb::pipeline. -struct hacked_pipeline { - tbb::filter* filter_list; - tbb::filter* filter_end; - tbb::empty_task* end_counter; - tbb::atomic<tbb::internal::Token> input_tokens; - tbb::atomic<tbb::internal::Token> global_token_counter; - bool end_of_input; - bool has_thread_bound_filters; - - virtual ~hacked_pipeline(); -}; - -//! The struct below repeats layout of tbb::internal::ordered_buffer. -struct hacked_ordered_buffer { - void* array; // This should be changed to task_info* if ever used - tbb::internal::Token array_size; - tbb::internal::Token low_token; - tbb::spin_mutex array_mutex; - tbb::internal::Token high_token; - bool is_ordered; - bool is_bound; -}; - -//! The struct below repeats layout of tbb::filter. -struct hacked_filter { - tbb::filter* next_filter_in_pipeline; - hacked_ordered_buffer* input_buffer; - unsigned char my_filter_mode; - tbb::filter* prev_filter_in_pipeline; - tbb::pipeline* my_pipeline; - tbb::filter* next_segment; - - virtual ~hacked_filter(); -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings - // Suppress compiler warning about constant conditional expression - #pragma warning (disable: 4127) -#endif - -void clear_global_state() { - Harness::ConcurrencyTracker::Reset(); - memset( Done, 0, sizeof(Done) ); - thread_id = id0; - is_serial_execution = false; -} - - -class PipelineTest { - // There are 3 non-thread-bound filter types: serial_in_order and serial_out_of_order, parallel - static const tbb::filter::mode non_tb_filters_table[3]; // = { tbb::filter::serial_in_order, tbb::filter::serial_out_of_order, tbb::filter::parallel}; - // There are 2 thread-bound filter types: serial_in_order and serial_out_of_order - static const tbb::filter::mode tb_filters_table[2]; // = { tbb::filter::serial_in_order, tbb::filter::serial_out_of_order }; - - static const unsigned number_of_non_tb_filter_types = sizeof(non_tb_filters_table)/sizeof(non_tb_filters_table[0]); - static const unsigned number_of_tb_filter_types = sizeof(tb_filters_table)/sizeof(tb_filters_table[0]); - static const unsigned number_of_filter_types = number_of_non_tb_filter_types + number_of_tb_filter_types; - // static unsigned my_nthread; - public: - static double TestOneConfiguration( unsigned numeral, unsigned nthread, unsigned number_of_filters, tbb::internal::Token ntokens); - static void TestTrivialPipeline( unsigned nthread, unsigned number_of_filters ); - static void TestIdleSpinning(unsigned nthread); - - static void PrintConfiguration(unsigned numeral, unsigned nFilters) { - REMARK( "{ "); - for( unsigned i = 0; i < nFilters; ++i) { - switch( numeral % number_of_filter_types ) { - case 0: REMARK("s "); break; - case 1: REMARK("B "); break; - case 2: REMARK("o "); break; - case 3: REMARK("Bo "); break; - case 4: REMARK("P "); break; - default: REMARK(" ** ERROR** "); break; - } - numeral /= number_of_filter_types; - } - REMARK("}"); - } - static bool ContainsBoundFilter(unsigned numeral) { - for( ;numeral != 0; numeral /= number_of_filter_types) - if(numeral & 0x1) return true; - return false; - } -}; - -const tbb::filter::mode PipelineTest::non_tb_filters_table[3] = { - tbb::filter::serial_in_order, // 0 - tbb::filter::serial_out_of_order, // 2 - tbb::filter::parallel // 4 -}; -const tbb::filter::mode PipelineTest::tb_filters_table[2] = { - tbb::filter::serial_in_order, // 1 - tbb::filter::serial_out_of_order // 3 -}; - -#include "harness_cpu.h" - -double PipelineTest::TestOneConfiguration(unsigned numeral, unsigned nthread, unsigned number_of_filters, tbb::internal::Token ntokens) -{ - // Build pipeline - tbb::pipeline pipeline; - tbb::filter* filter[MaxFilters]; - unsigned temp = numeral; - // parallelism_limit is the upper bound on the possible parallelism - unsigned parallelism_limit = 0; - // number of thread-bound-filters in the current sequence - unsigned number_of_tb_filters = 0; - // ordinal numbers of thread-bound-filters in the current sequence - unsigned array_of_tb_filter_numbers[MaxFilters]; - if(!ContainsBoundFilter(numeral)) return 0.0; - for( unsigned i=0; i<number_of_filters; ++i, temp/=number_of_filter_types ) { - bool is_bound = temp%number_of_filter_types&0x1; - tbb::filter::mode filter_type; - if( is_bound ) { - filter_type = tb_filters_table[temp%number_of_filter_types/number_of_non_tb_filter_types]; - } else - filter_type = non_tb_filters_table[temp%number_of_filter_types/number_of_tb_filter_types]; - const bool is_last = i==number_of_filters-1; - if( is_bound ) { - if( i == 0 ) - filter[i] = new InputFilter<tbb::thread_bound_filter>(filter_type,ntokens,Done[i],is_last); - else - filter[i] = new BaseFilter<tbb::thread_bound_filter>(filter_type,Done[i],is_last); - array_of_tb_filter_numbers[number_of_tb_filters] = i; - number_of_tb_filters++; - } else { - if( i == 0 ) - filter[i] = new InputFilter<tbb::filter>(filter_type,ntokens,Done[i],is_last); - else - filter[i] = new BaseFilter<tbb::filter>(filter_type,Done[i],is_last); - } - pipeline.add_filter(*filter[i]); - if ( filter[i]->is_serial() ) { - parallelism_limit += 1; - } else { - parallelism_limit = nthread; - } - } - ASSERT(number_of_tb_filters,NULL); - clear_global_state(); - // Account for clipping of parallelism. - if( parallelism_limit>nthread ) - parallelism_limit = nthread; - if( parallelism_limit>ntokens ) - parallelism_limit = (unsigned)ntokens; - StreamSize = nthread; // min( MaxStreamSize, nthread * MaxStreamItemsPerThread ); - - for( unsigned i=0; i<number_of_filters; ++i ) { - static_cast<BaseFilter<tbb::filter>*>(filter[i])->current_token=0; - } - tbb::tbb_thread* t[MaxFilters]; - for( unsigned j = 0; j<number_of_tb_filters; j++) - t[j] = new tbb::tbb_thread(process_loop(), static_cast<tbb::thread_bound_filter*>(filter[array_of_tb_filter_numbers[j]])); - if( ntokens == 1 || ( number_of_filters == 1 && number_of_tb_filters == 0 && filter[0]->is_serial() )) - is_serial_execution = true; - double strttime = GetCPUUserTime(); - pipeline.run( ntokens ); - double endtime = GetCPUUserTime(); - for( unsigned j = 0; j<number_of_tb_filters; j++) - t[j]->join(); - ASSERT( !Harness::ConcurrencyTracker::InstantParallelism(), "filter still running?" ); - for( unsigned i=0; i<number_of_filters; ++i ) - ASSERT( static_cast<BaseFilter<tbb::filter>*>(filter[i])->current_token==StreamSize, NULL ); - for( unsigned i=0; i<MaxFilters; ++i ) - for( unsigned j=0; j<StreamSize; ++j ) { - ASSERT( Done[i][j]==(i<number_of_filters), NULL ); - } - if( Harness::ConcurrencyTracker::PeakParallelism() < parallelism_limit ) - REMARK( "nthread=%lu ntokens=%lu MaxParallelism=%lu parallelism_limit=%lu\n", - nthread, ntokens, Harness::ConcurrencyTracker::PeakParallelism(), parallelism_limit ); - for( unsigned i=0; i < number_of_filters; ++i ) { - delete filter[i]; - filter[i] = NULL; - } - for( unsigned j = 0; j<number_of_tb_filters; j++) - delete t[j]; - pipeline.clear(); - return endtime - strttime; -} // TestOneConfiguration - -void PipelineTest::TestTrivialPipeline( unsigned nthread, unsigned number_of_filters ) { - - REMARK( "testing with %lu threads and %lu filters\n", nthread, number_of_filters ); - ASSERT( number_of_filters<=MaxFilters, "too many filters" ); - tbb::internal::Token max_tokens = nthread < MaxBuffer ? nthread : MaxBuffer; - // The loop has 1 iteration if max_tokens=1 and 2 iterations if max_tokens>1: - // one iteration for ntokens=1 and second for ntokens=max_tokens - // Iteration for ntokens=1 is required in each test case to check if pipeline run only on one thread - unsigned max_iteration = max_tokens > 1 ? 2 : 1; - tbb::internal::Token ntokens = 1; - for( unsigned iteration = 0; iteration < max_iteration; iteration++) { - if( iteration > 0 ) - ntokens = max_tokens; - // Count maximum iterations number - unsigned limit = 1; - for( unsigned i=0; i<number_of_filters; ++i) - limit *= number_of_filter_types; - // Iterate over possible filter sequences - for( unsigned numeral=0; numeral<limit; ++numeral ) { - REMARK( "testing configuration %lu of %lu\n", numeral, limit ); - (void)TestOneConfiguration(numeral, nthread, number_of_filters, ntokens); - } - } -} - -// varying times for sleep result in different user times for all pipelines. -// So we compare the running time of an all non-TBF pipeline with different (with -// luck representative) TBF configurations. -// -// We run the tests multiple times and compare the average runtimes for those cases -// that don't return 0 user time. configurations that exceed the allowable extra -// time are reported. -void PipelineTest::TestIdleSpinning( unsigned nthread) { - unsigned sample_setups[] = { - // in the comments below, s == serial, o == serial out-of-order, - // B == thread bound, Bo == thread bound out-of-order, p == parallel - 1, // B s s s - 5, // s B s s - 25, // s s B s - 125, // s s s B - 6, // B B s s - 26, // B s B s - 126, // B s s B - 30, // s B B s - 130, // s B s B - 150, // s s B B - 31, // B B B s - 131, // B B s B - 155, // s B B B - 495, // s p p Bo - 71, // B p o s - 355, // s B p o - 95, // s p Bo s - 475, // s s p Bo - }; - const int nsetups = sizeof(sample_setups) / sizeof(unsigned); - const int ntests = 4; - const double bignum = 1000000000.0; - const double allowable_slowdown = 3.5; - unsigned zero_count = 0; - - REMARK( "testing idle spinning with %lu threads\n", nthread ); - tbb::internal::Token max_tokens = nthread < MaxBuffer ? nthread : MaxBuffer; - for( int i=0; i<nsetups; ++i ) { - unsigned numeral = sample_setups[i]; - unsigned temp = numeral; - unsigned nbound = 0; - while(temp) { - if((temp%number_of_filter_types)&0x01) nbound++; - temp /= number_of_filter_types; - } - sleeptime = 20.0; - double s0 = bignum; - double s1 = bignum; - int v0cnt = 0; - int v1cnt = 0; - double s0sum = 0.0; - double s1sum = 0.0; - REMARK(" TestOneConfiguration, pipeline == "); - PrintConfiguration(numeral, MaxFilters); - REMARK(", max_tokens== %d\n", (int)max_tokens); - for(int j = 0; j < ntests; ++j) { - double s1a = TestOneConfiguration(numeral, nthread, MaxFilters, max_tokens); - double s0a = TestOneConfiguration((unsigned)0, nthread, MaxFilters, max_tokens); - s1sum += s1a; - s0sum += s0a; - if(s0a > 0.0) { - ++v0cnt; - s0 = (s0a < s0) ? s0a : s0; - } - else { - ++zero_count; - } - if(s1a > 0.0) { - ++v1cnt; - s1 = (s1a < s1) ? s1a : s1; - } - else { - ++zero_count; - } - } - if(s0 == bignum || s1 == bignum) continue; - s0sum /= (double)v0cnt; - s1sum /= (double)v1cnt; - double slowdown = (s1sum-s0sum)/s0sum; - if(slowdown > allowable_slowdown) - REMARK( "with %lu threads configuration %lu has slowdown > %g (%g)\n", nthread, numeral, allowable_slowdown, slowdown ); - } - REMARK("Total of %lu zero times\n", zero_count); -} - -static int nthread; // knowing number of threads is necessary to call TestCPUUserTime - -void waiting_probe::probe( ) { - if( nthread==1 ) return; - REMARK("emulating wait for input\n"); - // Test that threads sleep while no work. - // The master doesn't sleep so there could be 2 active threads if a worker is waiting for input - TestCPUUserTime(nthread, 2); -} - -#include "tbb/task_scheduler_init.h" - -int TestMain () { - out_of_order_count = 0; - if( MinThread<1 ) { - REPORT("must have at least one thread"); - exit(1); - } - - // Test with varying number of threads. - for( nthread=MinThread; nthread<=MaxThread; ++nthread ) { - // Initialize TBB task scheduler - tbb::task_scheduler_init init(nthread); - sleeptime = 0.0; // msec : 0 == no_timing, > 0, each filter stage sleeps for sleeptime - - // Test pipelines with 1 and maximal number of filters - for( unsigned n=1; n<=MaxFilters; n*=MaxFilters ) { - // Thread-bound stages are serviced by user-created threads; those - // don't run the pipeline and don't service non-thread-bound stages - PipelineTest::TestTrivialPipeline(nthread,n); - } - - // Test that all workers sleep when no work - TestCPUUserTime(nthread); - if((unsigned)nthread >= MaxFilters) // test works when number of threads >= number of stages - PipelineTest::TestIdleSpinning(nthread); - } - if( !out_of_order_count ) - REPORT("Warning: out of order serial filter received tokens in order\n"); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_priority_queue_node.cpp b/src/tbb/src/test/test_priority_queue_node.cpp deleted file mode 100644 index e596e5069..000000000 --- a/src/tbb/src/test/test_priority_queue_node.cpp +++ /dev/null @@ -1,348 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// TO DO: Add overlapping put / receive tests - -#include "harness.h" -#include "tbb/flow_graph.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/tick_count.h" -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES -#include "harness_graph.h" -#endif - -#include <cstdio> - -#define N 10 -#define C 10 - -template< typename T > -void spin_try_get( tbb::flow::priority_queue_node<T> &q, T &value ) { - while ( q.try_get(value) != true ) ; -} - -template< typename T > -void check_item( T* next_value, T &value ) { - int tid = value / N; - int offset = value % N; - ASSERT( next_value[tid] == T(offset), NULL ); - ++next_value[tid]; -} - -template< typename T > -struct parallel_puts : NoAssign { - tbb::flow::priority_queue_node<T> &my_q; - parallel_puts( tbb::flow::priority_queue_node<T> &q ) : my_q(q) {} - void operator()(int i) const { - for (int j = 0; j < N; ++j) { - bool msg = my_q.try_put( T(N*i + j) ); - ASSERT( msg == true, NULL ); - } - } -}; - -template< typename T > -struct parallel_gets : NoAssign { - tbb::flow::priority_queue_node<T> &my_q; - parallel_gets( tbb::flow::priority_queue_node<T> &q) : my_q(q) {} - void operator()(int) const { - T prev; - spin_try_get( my_q, prev ); - for (int j = 0; j < N-1; ++j) { - T v; - spin_try_get( my_q, v ); - ASSERT(v < prev, NULL); - } - } -}; - -template< typename T > -struct parallel_put_get : NoAssign { - tbb::flow::priority_queue_node<T> &my_q; - parallel_put_get( tbb::flow::priority_queue_node<T> &q ) : my_q(q) {} - void operator()(int tid) const { - for ( int i = 0; i < N; i+=C ) { - int j_end = ( N < i + C ) ? N : i + C; - // dump about C values into the Q - for ( int j = i; j < j_end; ++j ) { - ASSERT( my_q.try_put( T (N*tid + j ) ) == true, NULL ); - } - // receive about C values from the Q - for ( int j = i; j < j_end; ++j ) { - T v; - spin_try_get( my_q, v ); - } - } - } -}; - -// -// Tests -// -// Item can be reserved, released, consumed ( single serial receiver ) -// -template< typename T > -int test_reservation(int) { - tbb::flow::graph g; - T bogus_value(-1); - - // Simple tests - tbb::flow::priority_queue_node<T> q(g); - - q.try_put(T(1)); - q.try_put(T(2)); - q.try_put(T(3)); - g.wait_for_all(); - - T v=bogus_value, w=bogus_value; - ASSERT( q.try_reserve(v) == true, NULL ); - ASSERT( v == T(3), NULL ); - ASSERT( q.try_release() == true, NULL ); - v = bogus_value; - g.wait_for_all(); - ASSERT( q.try_reserve(v) == true, NULL ); - ASSERT( v == T(3), NULL ); - ASSERT( q.try_consume() == true, NULL ); - v = bogus_value; - g.wait_for_all(); - - ASSERT( q.try_get(v) == true, NULL ); - ASSERT( v == T(2), NULL ); - v = bogus_value; - g.wait_for_all(); - - ASSERT( q.try_reserve(v) == true, NULL ); - ASSERT( v == T(1), NULL ); - ASSERT( q.try_reserve(w) == false, NULL ); - ASSERT( w == bogus_value, NULL ); - ASSERT( q.try_get(w) == false, NULL ); - ASSERT( w == bogus_value, NULL ); - ASSERT( q.try_release() == true, NULL ); - v = bogus_value; - g.wait_for_all(); - ASSERT( q.try_reserve(v) == true, NULL ); - ASSERT( v == T(1), NULL ); - ASSERT( q.try_consume() == true, NULL ); - v = bogus_value; - g.wait_for_all(); - ASSERT( q.try_get(v) == false, NULL ); - return 0; -} - -// -// Tests -// -// multilpe parallel senders, items in FIFO (relatively to sender) order -// multilpe parallel senders, multiple parallel receivers, items in FIFO order (relative to sender/receiver) and all items received -// * overlapped puts / gets -// * all puts finished before any getS -// -template< typename T > -int test_parallel(int num_threads) { - tbb::flow::graph g; - tbb::flow::priority_queue_node<T> q(g); - tbb::flow::priority_queue_node<T> q2(g); - tbb::flow::priority_queue_node<T> q3(g); - T bogus_value(-1); - T j = bogus_value; - - NativeParallelFor( num_threads, parallel_puts<T>(q) ); - for (int i = num_threads*N -1; i>=0; --i) { - spin_try_get( q, j ); - ASSERT(j == i, NULL); - j = bogus_value; - } - g.wait_for_all(); - ASSERT( q.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - NativeParallelFor( num_threads, parallel_puts<T>(q) ); - g.wait_for_all(); - NativeParallelFor( num_threads, parallel_gets<T>(q) ); - g.wait_for_all(); - j = bogus_value; - ASSERT( q.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - NativeParallelFor( num_threads, parallel_put_get<T>(q) ); - g.wait_for_all(); - j = bogus_value; - ASSERT( q.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - tbb::flow::make_edge( q, q2 ); - tbb::flow::make_edge( q2, q3 ); - NativeParallelFor( num_threads, parallel_puts<T>(q) ); - g.wait_for_all(); - NativeParallelFor( num_threads, parallel_gets<T>(q3) ); - g.wait_for_all(); - j = bogus_value; - ASSERT( q.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - ASSERT( q2.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - ASSERT( q3.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - // test copy constructor - ASSERT( q.remove_successor( q2 ) == true, NULL ); - NativeParallelFor( num_threads, parallel_puts<T>(q) ); - tbb::flow::priority_queue_node<T> q_copy(q); - g.wait_for_all(); - j = bogus_value; - ASSERT( q_copy.try_get( j ) == false, NULL ); - ASSERT( q.register_successor( q_copy ) == true, NULL ); - for (int i = num_threads*N -1; i>=0; --i) { - spin_try_get( q_copy, j ); - ASSERT(j == i, NULL); - j = bogus_value; - } - g.wait_for_all(); - ASSERT( q.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - ASSERT( q_copy.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - return 0; -} - -// -// Tests -// -// Predecessors cannot be registered -// Empty Q rejects item requests -// Single serial sender, items in FIFO order -// Chained Qs ( 2 & 3 ), single sender, items at last Q in FIFO order -// - -template< typename T > -int test_serial() { - tbb::flow::graph g; - T bogus_value(-1); - - tbb::flow::priority_queue_node<T> q(g); - tbb::flow::priority_queue_node<T> q2(g); - T j = bogus_value; - - // - // Rejects attempts to add / remove predecessor - // Rejects request from empty Q - // - ASSERT( q.register_predecessor( q2 ) == false, NULL ); - ASSERT( q.remove_predecessor( q2 ) == false, NULL ); - ASSERT( q.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - // - // Simple puts and gets - // - - for (int i = 0; i < N; ++i) - ASSERT( q.try_put( T(i) ), NULL ); - for (int i = N-1; i >=0; --i) { - j = bogus_value; - spin_try_get( q, j ); - ASSERT( i == j, NULL ); - } - j = bogus_value; - g.wait_for_all(); - ASSERT( q.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - tbb::flow::make_edge( q, q2 ); - - for (int i = 0; i < N; ++i) - ASSERT( q.try_put( T(i) ), NULL ); - g.wait_for_all(); - for (int i = N-1; i >= 0; --i) { - j = bogus_value; - spin_try_get( q2, j ); - ASSERT( i == j, NULL ); - } - j = bogus_value; - g.wait_for_all(); - ASSERT( q.try_get( j ) == false, NULL ); - g.wait_for_all(); - ASSERT( q2.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - tbb::flow::remove_edge( q, q2 ); - ASSERT( q.try_put( 1 ) == true, NULL ); - g.wait_for_all(); - ASSERT( q2.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - g.wait_for_all(); - ASSERT( q.try_get( j ) == true, NULL ); - ASSERT( j == 1, NULL ); - - tbb::flow::priority_queue_node<T> q3(g); - tbb::flow::make_edge( q, q2 ); - tbb::flow::make_edge( q2, q3 ); - - for (int i = 0; i < N; ++i) - ASSERT( q.try_put( T(i) ), NULL ); - g.wait_for_all(); - for (int i = N-1; i >= 0; --i) { - j = bogus_value; - spin_try_get( q3, j ); - ASSERT( i == j, NULL ); - } - j = bogus_value; - g.wait_for_all(); - ASSERT( q.try_get( j ) == false, NULL ); - g.wait_for_all(); - ASSERT( q2.try_get( j ) == false, NULL ); - g.wait_for_all(); - ASSERT( q3.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - tbb::flow::remove_edge( q, q2 ); - ASSERT( q.try_put( 1 ) == true, NULL ); - g.wait_for_all(); - ASSERT( q2.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - g.wait_for_all(); - ASSERT( q3.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - g.wait_for_all(); - ASSERT( q.try_get( j ) == true, NULL ); - ASSERT( j == 1, NULL ); - - return 0; -} - -int TestMain() { - tbb::tick_count start = tbb::tick_count::now(), stop; - for (int p = 2; p <= 4; ++p) { - tbb::task_scheduler_init init(p); - test_serial<int>(); - test_reservation<int>(p); - test_parallel<int>(p); - } - stop = tbb::tick_count::now(); - REMARK("Priority_Queue_Node Time=%6.6f\n", (stop-start).seconds()); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - REMARK("Testing resets\n"); - test_resets<int,tbb::flow::priority_queue_node<int> >(); - test_resets<float,tbb::flow::priority_queue_node<float> >(); - test_buffer_extract<tbb::flow::priority_queue_node<int> >().run_tests(); -#endif - return Harness::Done; -} diff --git a/src/tbb/src/test/test_queue_node.cpp b/src/tbb/src/test/test_queue_node.cpp deleted file mode 100644 index 00d0741f8..000000000 --- a/src/tbb/src/test/test_queue_node.cpp +++ /dev/null @@ -1,469 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// TO DO: Add overlapping put / receive tests - -#include "harness.h" -#include "tbb/flow_graph.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/tick_count.h" -#include "harness_checktype.h" -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES -#include "harness_graph.h" -#endif - -#include <cstdio> - -#define N 1000 -#define C 10 - -template< typename T > -void spin_try_get( tbb::flow::queue_node<T> &q, T &value ) { - while ( q.try_get(value) != true ) ; -} - -template< typename T > -void check_item( T* next_value, T &value ) { - int tid = value / N; - int offset = value % N; - ASSERT( next_value[tid] == T(offset), NULL ); - ++next_value[tid]; -} - -template< typename T > -struct parallel_puts : NoAssign { - - tbb::flow::queue_node<T> &my_q; - - parallel_puts( tbb::flow::queue_node<T> &q ) : my_q(q) {} - - void operator()(int i) const { - for (int j = 0; j < N; ++j) { - bool msg = my_q.try_put( T(N*i + j) ); - ASSERT( msg == true, NULL ); - } - } - -}; - - - -template< typename T > -struct touches { - - bool **my_touches; - T **my_last_touch; - int my_num_threads; - - touches( int num_threads ) : my_num_threads(num_threads) { - my_last_touch = new T* [my_num_threads]; - my_touches = new bool* [my_num_threads]; - for ( int p = 0; p < my_num_threads; ++p) { - my_last_touch[p] = new T[my_num_threads]; - for ( int p2 = 0; p2 < my_num_threads; ++p2) - my_last_touch[p][p2] = -1; - - my_touches[p] = new bool[N*my_num_threads]; - for ( int n = 0; n < N*my_num_threads; ++n) - my_touches[p][n] = false; - } - } - - ~touches() { - for ( int p = 0; p < my_num_threads; ++p) { - delete [] my_touches[p]; - delete [] my_last_touch[p]; - } - delete [] my_touches; - delete [] my_last_touch; - } - - bool check( int tid, T v ) { - int v_tid = v / N; - if ( my_touches[tid][v] != false ) { - printf("Error: value seen twice by local thread\n"); - return false; - } - if ( v <= my_last_touch[tid][v_tid] ) { - printf("Error: value seen in wrong order by local thread\n"); - return false; - } - my_last_touch[tid][v_tid] = v; - my_touches[tid][v] = true; - return true; - } - - bool validate_touches() { - bool *all_touches = new bool[N*my_num_threads]; - for ( int n = 0; n < N*my_num_threads; ++n) - all_touches[n] = false; - - for ( int p = 0; p < my_num_threads; ++p) { - for ( int n = 0; n < N*my_num_threads; ++n) { - if ( my_touches[p][n] == true ) { - ASSERT( all_touches[n] == false, "value see by more than one thread\n" ); - all_touches[n] = true; - } - } - } - for ( int n = 0; n < N*my_num_threads; ++n) { - if ( !all_touches[n] ) - printf("No touch at %d, my_num_threads = %d\n", n, my_num_threads); - //ASSERT( all_touches[n] == true, "value not seen by any thread\n" ); - } - delete all_touches; - return true; - } - -}; - -template< typename T > -struct parallel_gets : NoAssign { - - tbb::flow::queue_node<T> &my_q; - touches<T> &my_touches; - - parallel_gets( tbb::flow::queue_node<T> &q, touches<T> &t) : my_q(q), my_touches(t) {} - - void operator()(int tid) const { - for (int j = 0; j < N; ++j) { - T v; - spin_try_get( my_q, v ); - my_touches.check( tid, v ); - } - } - -}; - -template< typename T > -struct parallel_put_get : NoAssign { - - tbb::flow::queue_node<T> &my_q; - touches<T> &my_touches; - - parallel_put_get( tbb::flow::queue_node<T> &q, touches<T> &t ) : my_q(q), my_touches(t) {} - - void operator()(int tid) const { - - for ( int i = 0; i < N; i+=C ) { - int j_end = ( N < i + C ) ? N : i + C; - // dump about C values into the Q - for ( int j = i; j < j_end; ++j ) { - ASSERT( my_q.try_put( T (N*tid + j ) ) == true, NULL ); - } - // receiver about C values from the Q - for ( int j = i; j < j_end; ++j ) { - T v; - spin_try_get( my_q, v ); - my_touches.check( tid, v ); - } - } - } - -}; - -// -// Tests -// -// Item can be reserved, released, consumed ( single serial receiver ) -// -template< typename T > -int test_reservation() { - tbb::flow::graph g; - T bogus_value(-1); - - // Simple tests - tbb::flow::queue_node<T> q(g); - - q.try_put(T(1)); - q.try_put(T(2)); - q.try_put(T(3)); - - T v; - ASSERT( q.reserve_item(v) == true, NULL ); - ASSERT( v == T(1), NULL ); - ASSERT( q.release_reservation() == true, NULL ); - v = bogus_value; - g.wait_for_all(); - ASSERT( q.reserve_item(v) == true, NULL ); - ASSERT( v == T(1), NULL ); - ASSERT( q.consume_reservation() == true, NULL ); - v = bogus_value; - g.wait_for_all(); - - ASSERT( q.try_get(v) == true, NULL ); - ASSERT( v == T(2), NULL ); - v = bogus_value; - g.wait_for_all(); - - ASSERT( q.reserve_item(v) == true, NULL ); - ASSERT( v == T(3), NULL ); - ASSERT( q.release_reservation() == true, NULL ); - v = bogus_value; - g.wait_for_all(); - ASSERT( q.reserve_item(v) == true, NULL ); - ASSERT( v == T(3), NULL ); - ASSERT( q.consume_reservation() == true, NULL ); - v = bogus_value; - g.wait_for_all(); - - return 0; -} - -// -// Tests -// -// multilpe parallel senders, items in FIFO (relatively to sender) order -// multilpe parallel senders, multiple parallel receivers, items in FIFO order (relative to sender/receiver) and all items received -// * overlapped puts / gets -// * all puts finished before any getS -// -template< typename T > -int test_parallel(int num_threads) { - tbb::flow::graph g; - tbb::flow::queue_node<T> q(g); - tbb::flow::queue_node<T> q2(g); - tbb::flow::queue_node<T> q3(g); - { - Check< T > my_check; - T bogus_value(-1); - T j = bogus_value; - NativeParallelFor( num_threads, parallel_puts<T>(q) ); - - T *next_value = new T[num_threads]; - for (int tid = 0; tid < num_threads; ++tid) next_value[tid] = T(0); - - for (int i = 0; i < num_threads * N; ++i ) { - spin_try_get( q, j ); - check_item( next_value, j ); - j = bogus_value; - } - for (int tid = 0; tid < num_threads; ++tid) { - ASSERT( next_value[tid] == T(N), NULL ); - } - delete[] next_value; - - j = bogus_value; - g.wait_for_all(); - ASSERT( q.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - NativeParallelFor( num_threads, parallel_puts<T>(q) ); - - { - touches< T > t( num_threads ); - NativeParallelFor( num_threads, parallel_gets<T>(q, t) ); - g.wait_for_all(); - ASSERT( t.validate_touches(), NULL ); - } - j = bogus_value; - ASSERT( q.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - g.wait_for_all(); - { - touches< T > t2( num_threads ); - NativeParallelFor( num_threads, parallel_put_get<T>(q, t2) ); - g.wait_for_all(); - ASSERT( t2.validate_touches(), NULL ); - } - j = bogus_value; - ASSERT( q.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - tbb::flow::make_edge( q, q2 ); - tbb::flow::make_edge( q2, q3 ); - - NativeParallelFor( num_threads, parallel_puts<T>(q) ); - { - touches< T > t3( num_threads ); - NativeParallelFor( num_threads, parallel_gets<T>(q3, t3) ); - g.wait_for_all(); - ASSERT( t3.validate_touches(), NULL ); - } - j = bogus_value; - g.wait_for_all(); - ASSERT( q.try_get( j ) == false, NULL ); - g.wait_for_all(); - ASSERT( q2.try_get( j ) == false, NULL ); - g.wait_for_all(); - ASSERT( q3.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - // test copy constructor - ASSERT( q.remove_successor( q2 ), NULL ); - NativeParallelFor( num_threads, parallel_puts<T>(q) ); - tbb::flow::queue_node<T> q_copy(q); - j = bogus_value; - g.wait_for_all(); - ASSERT( q_copy.try_get( j ) == false, NULL ); - ASSERT( q.register_successor( q_copy ) == true, NULL ); - { - touches< T > t( num_threads ); - NativeParallelFor( num_threads, parallel_gets<T>(q_copy, t) ); - g.wait_for_all(); - ASSERT( t.validate_touches(), NULL ); - } - j = bogus_value; - ASSERT( q.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - ASSERT( q_copy.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - } - - return 0; -} - -// -// Tests -// -// Predecessors cannot be registered -// Empty Q rejects item requests -// Single serial sender, items in FIFO order -// Chained Qs ( 2 & 3 ), single sender, items at last Q in FIFO order -// - -template< typename T > -int test_serial() { - tbb::flow::graph g; - tbb::flow::queue_node<T> q(g); - tbb::flow::queue_node<T> q2(g); - { // destroy the graph after manipulating it, and see if all the items in the buffers - // have been destroyed before the graph - Check<T> my_check; // if check_type< U > count constructions and destructions - T bogus_value(-1); - T j = bogus_value; - - // - // Rejects attempts to add / remove predecessor - // Rejects request from empty Q - // - ASSERT( q.register_predecessor( q2 ) == false, NULL ); - ASSERT( q.remove_predecessor( q2 ) == false, NULL ); - ASSERT( q.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - // - // Simple puts and gets - // - - for (int i = 0; i < N; ++i) { - bool msg = q.try_put( T(i) ); - ASSERT( msg == true, NULL ); - } - - - for (int i = 0; i < N; ++i) { - j = bogus_value; - spin_try_get( q, j ); - ASSERT( i == j, NULL ); - } - j = bogus_value; - g.wait_for_all(); - ASSERT( q.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - tbb::flow::make_edge( q, q2 ); - - for (int i = 0; i < N; ++i) { - bool msg = q.try_put( T(i) ); - ASSERT( msg == true, NULL ); - } - - - for (int i = 0; i < N; ++i) { - j = bogus_value; - spin_try_get( q2, j ); - ASSERT( i == j, NULL ); - } - j = bogus_value; - g.wait_for_all(); - ASSERT( q.try_get( j ) == false, NULL ); - g.wait_for_all(); - ASSERT( q2.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - tbb::flow::remove_edge( q, q2 ); - ASSERT( q.try_put( 1 ) == true, NULL ); - g.wait_for_all(); - ASSERT( q2.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - g.wait_for_all(); - ASSERT( q.try_get( j ) == true, NULL ); - ASSERT( j == 1, NULL ); - - tbb::flow::queue_node<T> q3(g); - tbb::flow::make_edge( q, q2 ); - tbb::flow::make_edge( q2, q3 ); - - for (int i = 0; i < N; ++i) { - bool msg = q.try_put( T(i) ); - ASSERT( msg == true, NULL ); - } - - for (int i = 0; i < N; ++i) { - j = bogus_value; - spin_try_get( q3, j ); - ASSERT( i == j, NULL ); - } - j = bogus_value; - g.wait_for_all(); - ASSERT( q.try_get( j ) == false, NULL ); - g.wait_for_all(); - ASSERT( q2.try_get( j ) == false, NULL ); - g.wait_for_all(); - ASSERT( q3.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - tbb::flow::remove_edge( q, q2 ); - ASSERT( q.try_put( 1 ) == true, NULL ); - g.wait_for_all(); - ASSERT( q2.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - g.wait_for_all(); - ASSERT( q3.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - g.wait_for_all(); - ASSERT( q.try_get( j ) == true, NULL ); - ASSERT( j == 1, NULL ); - } - - return 0; -} - -int TestMain() { - tbb::tick_count start = tbb::tick_count::now(), stop; - for (int p = 2; p <= 4; ++p) { - tbb::task_scheduler_init init(p); - test_serial<int>(); - test_serial<check_type<int> >(); - test_parallel<int>(p); - test_parallel<check_type<int> >(p); - } - stop = tbb::tick_count::now(); - REMARK("Queue_Node Time=%6.6f\n", (stop-start).seconds()); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - REMARK("Testing resets\n"); - test_resets<int, tbb::flow::queue_node<int> >(); - test_resets<float, tbb::flow::queue_node<float> >(); - test_buffer_extract<tbb::flow::queue_node<int> >().run_tests(); -#endif - return Harness::Done; -} diff --git a/src/tbb/src/test/test_range_based_for.h b/src/tbb/src/test/test_range_based_for.h deleted file mode 100644 index a240a5d87..000000000 --- a/src/tbb/src/test/test_range_based_for.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#ifndef __TBB_test_range_based_for_H -#define __TBB_test_range_based_for_H - -#include <utility> //for std::pair -namespace range_based_for_support_tests{ - - template<typename value_type, typename container, typename binary_op_type, typename init_value_type> - inline init_value_type range_based_for_accumulate(container const& c, binary_op_type accumulator, init_value_type init ) - { - init_value_type range_for_accumulated = init; - #if __TBB_RANGE_BASED_FOR_PRESENT - for (value_type x : c) { - range_for_accumulated = accumulator(range_for_accumulated, x); - } - #else - for (typename container::const_iterator x =c.begin(); x != c.end(); ++x) { - range_for_accumulated = accumulator(range_for_accumulated, *x); - } - #endif - return range_for_accumulated; - } - - template<typename container, typename binary_op_type, typename init_value_type> - inline init_value_type range_based_for_accumulate(container const& c, binary_op_type accumulator, init_value_type init ) - { - typedef typename container::value_type value_type; - return range_based_for_accumulate<value_type>(c,accumulator,init); - } - - template <typename integral_type > - integral_type gauss_summ_of_int_sequence(integral_type sequence_length){ - return (sequence_length +1)* sequence_length /2; - } - - struct pair_second_summer{ - template<typename first_type, typename second_type> - second_type operator() (second_type const& lhs, std::pair<first_type, second_type> const& rhs) const - { - return lhs + rhs.second; - } - }; -} - -#endif /* __TBB_test_range_based_for_H */ diff --git a/src/tbb/src/test/test_reader_writer_lock.cpp b/src/tbb/src/test/test_reader_writer_lock.cpp deleted file mode 100644 index 5abf82ebb..000000000 --- a/src/tbb/src/test/test_reader_writer_lock.cpp +++ /dev/null @@ -1,238 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// test reader_writer_lock -#include "tbb/reader_writer_lock.h" -#include "tbb/atomic.h" -#include "tbb/tbb_exception.h" -#include "harness.h" -#include "harness_barrier.h" - -tbb::reader_writer_lock the_mutex; -const int MAX_WORK = 10000; - -tbb::atomic<size_t> active_readers, active_writers; -tbb::atomic<bool> sim_readers; -size_t n_tested__sim_readers; - - -int BusyWork(int percentOfMaxWork) { - int iters = 0; - for (int i=0; i<MAX_WORK*((double)percentOfMaxWork/100.0); ++i) { - iters++; - } - return iters; -} - -struct StressRWLBody : NoAssign { - const int nThread; - const int percentMax; - - StressRWLBody(int nThread_, int percentMax_) : nThread(nThread_), percentMax(percentMax_) {} - - void operator()(const int /* threadID */ ) const { - int nIters = 100; - int r_result=0, w_result=0; - for(int i=0; i<nIters; ++i) { - // test unscoped blocking write lock - the_mutex.lock(); - w_result += BusyWork(percentMax); -#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN - // test exception for recursive write lock - bool was_caught = false; - try { - the_mutex.lock(); - } - catch(tbb::improper_lock& ex) { - REMARK("improper_lock: %s\n", ex.what()); - was_caught = true; - } - catch(...) { - REPORT("Wrong exception caught during recursive lock attempt."); - } - ASSERT(was_caught, "Recursive lock attempt exception not caught properly."); - // test exception for recursive read lock - was_caught = false; - try { - the_mutex.lock_read(); - } - catch(tbb::improper_lock& ex) { - REMARK("improper_lock: %s\n", ex.what()); - was_caught = true; - } - catch(...) { - REPORT("Wrong exception caught during recursive lock attempt."); - } - ASSERT(was_caught, "Recursive lock attempt exception not caught properly."); -#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN */ - the_mutex.unlock(); - // test unscoped non-blocking write lock - if (the_mutex.try_lock()) { - w_result += BusyWork(percentMax); - the_mutex.unlock(); - } - // test unscoped blocking read lock - the_mutex.lock_read(); - r_result += BusyWork(percentMax); - the_mutex.unlock(); - // test unscoped non-blocking read lock - if(the_mutex.try_lock_read()) { - r_result += BusyWork(percentMax); - the_mutex.unlock(); - } - { // test scoped blocking write lock - tbb::reader_writer_lock::scoped_lock my_lock(the_mutex); - w_result += BusyWork(percentMax); - } - { // test scoped blocking read lock - tbb::reader_writer_lock::scoped_lock_read my_lock(the_mutex); - r_result += BusyWork(percentMax); - } - } - REMARK(" R%d/W%d", r_result, w_result); // reader/writer iterations of busy work completed - } -}; - -struct CorrectRWLScopedBody : NoAssign { - const int nThread; - Harness::SpinBarrier& my_barrier; - - CorrectRWLScopedBody(int nThread_, Harness::SpinBarrier& b_) : nThread(nThread_),my_barrier(b_) {} - - void operator()(const int /* threadID */ ) const { - my_barrier.wait(); - for (int i=0; i<50; i++) { - const bool is_reader = i%5==0; // 1 writer for every 4 readers - - if (is_reader) { - tbb::reader_writer_lock::scoped_lock_read my_lock(the_mutex); - active_readers++; - if (active_readers > 1) sim_readers = true; - ASSERT(active_writers==0, "Active writers in read-locked region."); - Harness::Sleep(10); - active_readers--; - } - else { // is writer - tbb::reader_writer_lock::scoped_lock my_lock(the_mutex); - active_writers++; - ASSERT(active_readers==0, "Active readers in write-locked region."); - ASSERT(active_writers<=1, "More than one active writer in write-locked region."); - Harness::Sleep(10); - active_writers--; - } - } - } -}; - -struct CorrectRWLBody : NoAssign { - const int nThread; - Harness::SpinBarrier& my_barrier; - - CorrectRWLBody(int nThread_, Harness::SpinBarrier& b_ ) : nThread(nThread_), my_barrier(b_) {} - - void operator()(const int /* threadID */ ) const { - my_barrier.wait(); - for (int i=0; i<50; i++) { - const bool is_reader = i%5==0; // 1 writer for every 4 readers - - if (is_reader) { - the_mutex.lock_read(); - active_readers++; - if (active_readers > 1) sim_readers = true; - ASSERT(active_writers==0, "Active writers in read-locked region."); - } - else { // is writer - the_mutex.lock(); - active_writers++; - ASSERT(active_readers==0, "Active readers in write-locked region."); - ASSERT(active_writers<=1, "More than one active writer in write-locked region."); - } - Harness::Sleep(10); - if (is_reader) { - active_readers--; - } - else { // is writer - active_writers--; - } - the_mutex.unlock(); - } - } -}; - -void TestReaderWriterLockOnNThreads(int nThreads) { - // Stress-test all interfaces - for (int pc=0; pc<=100; pc+=20) { - REMARK("Testing with %d threads, percent of MAX_WORK=%d...", nThreads, pc); - StressRWLBody myStressBody(nThreads, pc); - NativeParallelFor(nThreads, myStressBody); - REMARK(" OK.\n"); - } - - int i; - n_tested__sim_readers = 0; - REMARK("Testing with %d threads, direct/unscoped locking mode...", nThreads); // TODO: choose direct or unscoped? - // TODO: refactor the following two for loops into a shared function - for( i=0; i<100; ++i ) { - Harness::SpinBarrier bar0(nThreads); - - CorrectRWLBody myCorrectBody(nThreads,bar0); - active_writers = active_readers = 0; - sim_readers = false; - NativeParallelFor(nThreads, myCorrectBody); - - if( sim_readers || nThreads==1 ) { - if( ++n_tested__sim_readers>5 ) - break; - } - } - ASSERT(i<100, "There were no simultaneous readers."); - REMARK(" OK.\n"); - - n_tested__sim_readers = 0; - REMARK("Testing with %d threads, scoped locking mode...", nThreads); - for( i=0; i<100; ++i ) { - Harness::SpinBarrier bar0(nThreads); - CorrectRWLScopedBody myCorrectScopedBody(nThreads, bar0); - active_writers = active_readers = 0; - sim_readers = false; - NativeParallelFor(nThreads, myCorrectScopedBody); - if( sim_readers || nThreads==1 ) { - if( ++n_tested__sim_readers>5 ) - break; - } - } - ASSERT(i<100, "There were no simultaneous readers."); - REMARK(" OK.\n"); -} - -void TestReaderWriterLock() { - for(int p = MinThread; p <= MaxThread; p++) { - TestReaderWriterLockOnNThreads(p); - } -} - - -int TestMain() { - if(MinThread <= 0) MinThread = 1; - if(MaxThread > 0) { - TestReaderWriterLock(); - } - return Harness::Done; -} diff --git a/src/tbb/src/test/test_runtime_loader.cpp b/src/tbb/src/test/test_runtime_loader.cpp deleted file mode 100644 index 85f151a8e..000000000 --- a/src/tbb/src/test/test_runtime_loader.cpp +++ /dev/null @@ -1,293 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#if !(_WIN32||_WIN64) || (__MINGW64__||__MINGW32__) - -#include "harness.h" - -int TestMain () { - return Harness::Skipped; -} - -#else // !(_WIN32||_WIN64) - -#define TBB_PREVIEW_RUNTIME_LOADER 1 -#include "tbb/runtime_loader.h" -#include "tbb/tbb_stddef.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/tbb_exception.h" - -#include <cstdio> -#include <cstdlib> -#include <cerrno> -#include <vector> -#include <string> -#include <utility> -#include <typeinfo> -#include <stdexcept> - -#ifdef HARNESS_USE_RUNTIME_LOADER - #undef HARNESS_USE_RUNTIME_LOADER // We do not want harness to preload tbb. -#endif -#include "harness.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -static int errors = 0; - -#define CHECK( cond ) { \ - if ( ! (cond) ) { \ - ++ errors; \ - REPORT( "%s:%d: --- TEST FAILED ---\n", __FILE__, __LINE__ ); \ - }; \ -} - -#define SAY( msg ) \ - REMARK( "%s:%d: %s\n", __FILE__, __LINE__, msg ) - -typedef int (*int_func_t)(); - -namespace tbb { -namespace interface6 { -namespace internal { -namespace runtime_loader { - extern tbb::runtime_loader::error_mode stub_mode; -} } } } // namespaces runtime_loader, internal, interface6, tbb - -using tbb::interface6::internal::runtime_loader::stub_mode; - -#define _CHECK_TBB( code ) { \ - stub_mode = tbb::runtime_loader::em_status; \ - int ver = tbb::TBB_runtime_interface_version(); \ - stub_mode = tbb::runtime_loader::em_abort; \ - CHECK( ver == code ); \ -} - -#define CHECK_TBB_IS_LOADED() \ - _CHECK_TBB( TBB_INTERFACE_VERSION ) - -#define CHECK_TBB_IS_NOT_LOADED() \ - _CHECK_TBB( tbb::runtime_loader::ec_no_lib ) - -int TestMain() { - - - __TBB_TRY { - - { - SAY( "Call a function when library is not yet loaded, stub should return a error." ); - CHECK_TBB_IS_NOT_LOADED(); - } - - { - SAY( "Create a runtime_loader object, do not load library but make some bad calls." ); - tbb::runtime_loader rtl( tbb::runtime_loader::em_status ); - SAY( "After creating status should be ok." ); - CHECK( rtl.status() == tbb::runtime_loader::ec_ok ); - SAY( "Call a function, stub should return a error." ); - CHECK_TBB_IS_NOT_LOADED(); - } - - { - SAY( "Create a runtime_loader object and call load() with bad arguments." ); - char const * path[] = { ".", NULL }; - tbb::runtime_loader rtl( tbb::runtime_loader::em_status ); - SAY( "Min version is bad." ); - rtl.load( path, -1 ); - CHECK( rtl.status() == tbb::runtime_loader::ec_bad_arg ); - SAY( "Max version is bad." ); - rtl.load( path, TBB_INTERFACE_VERSION, -1 ); - CHECK( rtl.status() == tbb::runtime_loader::ec_bad_arg ); - SAY( "Both versions are bad." ); - rtl.load( path, -1, -1 ); - CHECK( rtl.status() == tbb::runtime_loader::ec_bad_arg ); - SAY( "Min is bigger than max." ); - rtl.load( path, TBB_INTERFACE_VERSION + 1, TBB_INTERFACE_VERSION - 1 ); - CHECK( rtl.status() == tbb::runtime_loader::ec_bad_arg ); - } - - { - SAY( "Create a proxy object and call load() with good arguments but not availabe version." ); - char const * path[] = { ".", NULL }; - tbb::runtime_loader rtl( tbb::runtime_loader::em_status ); - SAY( "Min version too big." ); - rtl.load( path, TBB_INTERFACE_VERSION + 1, TBB_INTERFACE_VERSION + 1 ); - CHECK( rtl.status() == tbb::runtime_loader::ec_no_lib ); - SAY( "Max version is too small." ); - rtl.load( path, TBB_INTERFACE_VERSION - 1, TBB_INTERFACE_VERSION - 1 ); - CHECK( rtl.status() == tbb::runtime_loader::ec_no_lib ); - } - - { - SAY( "Test em_throw mode." ); - char const * path[] = { ".", NULL }; - tbb::runtime_loader rtl( tbb::runtime_loader::em_throw ); - tbb::runtime_loader::error_code code = tbb::runtime_loader::ec_ok; - __TBB_TRY { - rtl.load( path, -1 ); - } __TBB_CATCH ( tbb::runtime_loader::error_code c ) { - code = c; - }; // __TBB_TRY - CHECK( code == tbb::runtime_loader::ec_bad_arg ); - __TBB_TRY { - rtl.load( path, TBB_INTERFACE_VERSION + 1 ); - } __TBB_CATCH ( tbb::runtime_loader::error_code c ) { - code = c; - }; // __TBB_TRY - CHECK( code == tbb::runtime_loader::ec_no_lib ); - } - - { - SAY( "Load current version, but specify wrong directories." ); - tbb::runtime_loader rtl( tbb::runtime_loader::em_status ); - SAY( "Specify no directories." ); - char const * path0[] = { NULL }; - rtl.load( path0 ); - CHECK( rtl.status() == tbb::runtime_loader::ec_no_lib ); - SAY( "Specify directories without library." ); - char const * path1[] = { "..", "/", NULL }; - rtl.load( path1 ); - CHECK( rtl.status() == tbb::runtime_loader::ec_no_lib ); - } - - { - SAY( "Now really load library and do various tests." ); - char const * path[] = { ".", NULL }; - tbb::runtime_loader rtl( tbb::runtime_loader::em_status ); - SAY( "Load current version." ); - rtl.load( path, TBB_INTERFACE_VERSION, TBB_INTERFACE_VERSION ); - CHECK( rtl.status() == tbb::runtime_loader::ec_ok ); - if ( rtl.status() == tbb::runtime_loader::ec_ok ) { - { - SAY( "Make sure the library really loaded." ); - CHECK_TBB_IS_LOADED(); - } - SAY( "Call load() again, it should return a error." ); - rtl.load( path, TBB_INTERFACE_VERSION, TBB_INTERFACE_VERSION ); - CHECK( rtl.status() == tbb::runtime_loader::ec_bad_call ); - { - SAY( "Initialize task_scheduler." ); - tbb::task_scheduler_init init( 1 ); - // Check what? - } - - // There was a problem on Linux* OS, and still a problem on OS X*. - SAY( "Throw an exception." ); - // Iterate thru all the ids first. - for ( int id = 1; id < tbb::internal::eid_max; ++ id ) { - bool ex_caught = false; - __TBB_TRY { - tbb::internal::throw_exception( tbb::internal::exception_id( id ) ); - } __TBB_CATCH ( std::exception const & ) { - SAY( "Expected exception caught." ); - ex_caught = true; - } __TBB_CATCH ( ... ) { - SAY( "Unexpected exception caught." ); - }; // try - CHECK( ex_caught ); - }; // for - // Now try to catch exceptions of specific types. - #define CHECK_EXCEPTION( id, type ) \ - { \ - SAY( "Trowing " #id " exception of " #type " type..." ); \ - bool ex_caught = false; \ - __TBB_TRY { \ - tbb::internal::throw_exception( tbb::internal::id ); \ - } __TBB_CATCH ( type const & ) { \ - SAY( #type " exception caught." ); \ - ex_caught = true; \ - } __TBB_CATCH ( ... ) { \ - SAY( "Unexpected exception caught." ); \ - }; /* try */ \ - CHECK( ex_caught ); \ - } - CHECK_EXCEPTION( eid_bad_alloc, std::bad_alloc ); - CHECK_EXCEPTION( eid_bad_last_alloc, tbb::bad_last_alloc ); - CHECK_EXCEPTION( eid_nonpositive_step, std::invalid_argument ); - CHECK_EXCEPTION( eid_out_of_range, std::out_of_range ); - CHECK_EXCEPTION( eid_segment_range_error, std::range_error ); - CHECK_EXCEPTION( eid_missing_wait, tbb::missing_wait ); - CHECK_EXCEPTION( eid_invalid_multiple_scheduling, tbb::invalid_multiple_scheduling ); - CHECK_EXCEPTION( eid_improper_lock, tbb::improper_lock ); - CHECK_EXCEPTION( eid_possible_deadlock, std::runtime_error ); - CHECK_EXCEPTION( eid_reservation_length_error, std::length_error ); - CHECK_EXCEPTION( eid_user_abort, tbb::user_abort ); - #undef CHECK_EXCEPTION - { - bool ex_caught = false; - __TBB_TRY { - tbb::internal::handle_perror( EAGAIN, "apple" ); - } __TBB_CATCH ( std::runtime_error const & ) { - SAY( "Expected exception caught." ); - ex_caught = true; - } __TBB_CATCH ( ... ) { - SAY( "Unexpected exception caught." ); - }; // try - CHECK( ex_caught ); - } - }; // if - } - - { - SAY( "Test multiple proxies." ); - char const * path[] = { ".", NULL }; - tbb::runtime_loader rtl0( tbb::runtime_loader::em_status ); - tbb::runtime_loader rtl1( tbb::runtime_loader::em_status ); - CHECK( rtl0.status() == tbb::runtime_loader::ec_ok ); - CHECK( rtl1.status() == tbb::runtime_loader::ec_ok ); - SAY( "Load current version with the first rtl." ); - rtl0.load( path ); - CHECK( rtl0.status() == tbb::runtime_loader::ec_ok ); - CHECK_TBB_IS_LOADED(); - SAY( "Load another version with the second proxy, it should return a error." ); - rtl1.load( path, TBB_INTERFACE_VERSION + 1 ); - CHECK( rtl1.status() == tbb::runtime_loader::ec_bad_ver ); - SAY( "Load the same version with the second proxy, it should return ok." ); - rtl1.load( path ); - CHECK( rtl1.status() == tbb::runtime_loader::ec_ok ); - CHECK_TBB_IS_LOADED(); - } - - } __TBB_CATCH( ... ) { - - ASSERT( 0, "unexpected exception" ); - - }; // __TBB_TRY - - if ( errors > 0 ) { - REPORT( "Some tests failed.\n" ); - exit( 1 ); - }; // if - - return Harness::Done; - -} // main - -#endif // !(_WIN32||_WIN64) - -// end of file // diff --git a/src/tbb/src/test/test_rwm_upgrade_downgrade.cpp b/src/tbb/src/test/test_rwm_upgrade_downgrade.cpp deleted file mode 100644 index 3bc7f5db4..000000000 --- a/src/tbb/src/test/test_rwm_upgrade_downgrade.cpp +++ /dev/null @@ -1,71 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/queuing_rw_mutex.h" -#include "tbb/spin_rw_mutex.h" -#include "harness.h" - -using namespace tbb; - -volatile int Count; - -template<typename RWMutex> -struct Hammer: NoAssign { - RWMutex &MutexProtectingCount; - mutable volatile int dummy; - - Hammer(RWMutex &m): MutexProtectingCount(m) {} - void operator()( int /*thread_id*/ ) const { - for( int j=0; j<100000; ++j ) { - typename RWMutex::scoped_lock lock(MutexProtectingCount,false); - int c = Count; - for( int k=0; k<10; ++k ) { - ++dummy; - } - if( lock.upgrade_to_writer() ) { - // The upgrade succeeded without any intervening writers - ASSERT( c==Count, "another thread modified Count while I held a read lock" ); - } else { - c = Count; - } - for( int k=0; k<10; ++k ) { - ++Count; - } - lock.downgrade_to_reader(); - for( int k=0; k<10; ++k ) { - ++dummy; - } - } - } -}; - -queuing_rw_mutex QRW_mutex; -spin_rw_mutex SRW_mutex; - -int TestMain () { - for( int p=MinThread; p<=MaxThread; ++p ) { - REMARK("Testing on %d threads", p); - Count = 0; - NativeParallelFor( p, Hammer<queuing_rw_mutex>(QRW_mutex) ); - Count = 0; - NativeParallelFor( p, Hammer<spin_rw_mutex>(SRW_mutex) ); - } - return Harness::Done; -} diff --git a/src/tbb/src/test/test_semaphore.cpp b/src/tbb/src/test/test_semaphore.cpp deleted file mode 100644 index cf0b82f89..000000000 --- a/src/tbb/src/test/test_semaphore.cpp +++ /dev/null @@ -1,316 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// -// Test for counting semaphore. -// -// set semaphore to N -// create N + M threads -// have each thread -// A. P() -// B. increment atomic count -// C. spin for awhile checking the value of the count; make sure it doesn't exceed N -// D. decrement atomic count -// E. V() -// - -#include "../tbb/semaphore.h" -#include "tbb/atomic.h" -#include "tbb/blocked_range.h" - -#include <vector> -using std::vector; - -#include "harness_assert.h" -#include "harness.h" - -using tbb::internal::semaphore; - -#include "harness_barrier.h" - -tbb::atomic<int> pCount; - -Harness::SpinBarrier sBarrier; - -#include "tbb/tick_count.h" -// semaphore basic function: -// set semaphore to initial value -// see that semaphore only allows that number of threads to be active -class Body: NoAssign { - const int nThreads; - const int nIters; - tbb::internal::semaphore &mySem; - vector<int> &ourCounts; - vector<double> &tottime; - static const int tickCounts = 1; // millisecond - static const int innerWait = 5; // millisecond -public: - Body(int nThread_, int nIter_, semaphore &mySem_, - vector<int>& ourCounts_, - vector<double>& tottime_ - ) : nThreads(nThread_), nIters(nIter_), mySem(mySem_), ourCounts(ourCounts_), tottime(tottime_) { sBarrier.initialize(nThread_); pCount = 0; } -void operator()(const int tid) const { - sBarrier.wait(); - for(int i=0; i < nIters; ++i) { - Harness::Sleep( tid * tickCounts ); - tbb::tick_count t0 = tbb::tick_count::now(); - mySem.P(); - tbb::tick_count t1 = tbb::tick_count::now(); - tottime[tid] += (t1-t0).seconds(); - int curval = ++pCount; - if(curval > ourCounts[tid]) ourCounts[tid] = curval; - Harness::Sleep( innerWait ); - --pCount; - ASSERT((int)pCount >= 0, NULL); - mySem.V(); - } -} -}; - - -void testSemaphore( int semInitCnt, int extraThreads ) { - semaphore my_sem(semInitCnt); - // tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred); - int nThreads = semInitCnt + extraThreads; - vector<int> maxVals(nThreads); - vector<double> totTimes(nThreads); - int nIters = 10; - Body myBody(nThreads, nIters, my_sem, maxVals, totTimes); - - REMARK( " sem(%d) with %d extra threads\n", semInitCnt, extraThreads); - pCount = 0; - NativeParallelFor(nThreads, myBody); - if(extraThreads == 0) { - double allPWaits = 0; - for(vector<double>::const_iterator j = totTimes.begin(); j != totTimes.end(); ++j) { - allPWaits += *j; - } - allPWaits /= static_cast<double>(nThreads * nIters); - REMARK("Average wait for P() in uncontested case for nThreads = %d is %g\n", nThreads, allPWaits); - } - ASSERT(!pCount, "not all threads decremented pCount"); - int maxCount = -1; - for(vector<int>::const_iterator i=maxVals.begin(); i!= maxVals.end();++i) { - maxCount = max(maxCount,*i); - } - ASSERT(maxCount <= semInitCnt,"too many threads in semaphore-protected increment"); - if(maxCount < semInitCnt) { - REMARK("Not enough threads in semaphore-protected region (%d < %d)\n", static_cast<int>(maxCount), semInitCnt); - } -} - -#include "../tbb/semaphore.cpp" -#if _WIN32||_WIN64 -#include "../tbb/dynamic_link.cpp" - -void testOSVersion() { -#if __TBB_USE_SRWLOCK - BOOL bIsWindowsVistaOrLater; -#if __TBB_WIN8UI_SUPPORT - bIsWindowsVistaOrLater = true; -#else - OSVERSIONINFO osvi; - - memset( (void*)&osvi, 0, sizeof(OSVERSIONINFO) ); - osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); - GetVersionEx(&osvi); - bIsWindowsVistaOrLater = (osvi.dwMajorVersion >= 6 ); -#endif - - if( bIsWindowsVistaOrLater ) { - REMARK("Checking SRWLock is loaded\n"); - tbb::internal::binary_semaphore s; - ASSERT( (uintptr_t)tbb::internal::__TBB_init_binsem!=(uintptr_t)&tbb::internal::init_binsem_using_event, NULL ); - ASSERT( (uintptr_t)tbb::internal::__TBB_acquire_binsem!=(uintptr_t)&tbb::internal::acquire_binsem_using_event, NULL ); - ASSERT( (uintptr_t)tbb::internal::__TBB_release_binsem!=(uintptr_t)&tbb::internal::release_binsem_using_event, NULL ); - } -#endif /* __TBB_USE_SRWLOCK */ -} -#endif /* _WIN32||_WIN64 */ - -#define N_TIMES 1000 - -template<typename S> -struct Counter { - volatile long value; - S my_sem; - Counter() : value(0) {} -}; - -//! Function object for use with parallel_for.h. -template<typename C> -struct AddOne: NoAssign { - C& my_counter; - /** Increments counter once for each iteration in the iteration space. */ - void operator()( int /*tid*/ ) const { - for( size_t i=0; i<N_TIMES; ++i ) { - my_counter.my_sem.P(); - my_counter.value = my_counter.value + 1; - my_counter.my_sem.V(); - } - } - AddOne( C& c_ ) : my_counter(c_) { my_counter.my_sem.V(); } -}; - -void testBinarySemaphore( int nThreads ) { - REMARK("Testing binary semaphore\n"); - Counter<tbb::internal::binary_semaphore> counter; - AddOne<Counter<tbb::internal::binary_semaphore> > myAddOne(counter); - NativeParallelFor( nThreads, myAddOne ); - ASSERT( nThreads*N_TIMES==counter.value, "Binary semaphore operations P()/V() have a race"); -} - -// Power of 2, the most tokens that can be in flight. -#define MAX_TOKENS 32 -enum FilterType { imaProducer, imaConsumer }; -class FilterBase : NoAssign { -protected: - FilterType ima; - unsigned totTokens; // total number of tokens to be emitted, only used by producer - tbb::atomic<unsigned>& myTokens; - tbb::atomic<unsigned>& otherTokens; - unsigned myWait; - semaphore &mySem; - semaphore &nextSem; - unsigned* myBuffer; - unsigned* nextBuffer; - unsigned curToken; -public: - FilterBase( FilterType ima_ - ,unsigned totTokens_ - ,tbb::atomic<unsigned>& myTokens_ - ,tbb::atomic<unsigned>& otherTokens_ - ,unsigned myWait_ - ,semaphore &mySem_ - ,semaphore &nextSem_ - ,unsigned* myBuffer_ - ,unsigned* nextBuffer_ - ) - : ima(ima_),totTokens(totTokens_),myTokens(myTokens_),otherTokens(otherTokens_),myWait(myWait_),mySem(mySem_), - nextSem(nextSem_),myBuffer(myBuffer_),nextBuffer(nextBuffer_) - { - curToken = 0; - } - void Produce(const int tid); - void Consume(const int tid); - void operator()(const int tid) { if(ima == imaConsumer) Consume(tid); else Produce(tid); } -}; - -class ProduceConsumeBody { - FilterBase** myFilters; - public: - ProduceConsumeBody(FilterBase** myFilters_) : myFilters(myFilters_) {} - void operator()(const int tid) const { - myFilters[tid]->operator()(tid); - } -}; - -// send a bunch of non-Null "tokens" to consumer, then a NULL. -void FilterBase::Produce(const int /*tid*/) { - nextBuffer[0] = 0; // just in case we provide no tokens - sBarrier.wait(); - while(totTokens) { - while(!myTokens) - mySem.P(); - // we have a slot available. - --myTokens; // moving this down reduces spurious wakeups - --totTokens; - if(totTokens) - nextBuffer[curToken&(MAX_TOKENS-1)] = curToken*3+1; - else - nextBuffer[curToken&(MAX_TOKENS-1)] = (unsigned)NULL; - ++curToken; - Harness::Sleep(myWait); - unsigned temp = ++otherTokens; - if(temp == 1) - nextSem.V(); - } - nextSem.V(); // final wakeup -} - -void FilterBase::Consume(const int /*tid*/) { - unsigned myToken; - sBarrier.wait(); - do { - while(!myTokens) - mySem.P(); - // we have a slot available. - --myTokens; // moving this down reduces spurious wakeups - myToken = myBuffer[curToken&(MAX_TOKENS-1)]; - if(myToken) { - ASSERT(myToken == curToken*3+1, "Error in received token"); - ++curToken; - Harness::Sleep(myWait); - unsigned temp = ++otherTokens; - if(temp == 1) - nextSem.V(); - } - } while(myToken); - // end of processing - ASSERT(curToken + 1 == totTokens, "Didn't receive enough tokens"); -} - -// -- test of producer/consumer with atomic buffer cnt and semaphore -// nTokens are total number of tokens through the pipe -// pWait is the wait time for the producer -// cWait is the wait time for the consumer -void testProducerConsumer( unsigned totTokens, unsigned nTokens, unsigned pWait, unsigned cWait) { - semaphore pSem; - semaphore cSem; - tbb::atomic<unsigned> pTokens; - tbb::atomic<unsigned> cTokens; - cTokens = 0; - unsigned cBuffer[MAX_TOKENS]; - FilterBase* myFilters[2]; // one producer, one consumer - REMARK("Testing producer/consumer with %lu total tokens, %lu tokens at a time, producer wait(%lu), consumer wait (%lu)\n", totTokens, nTokens, pWait, cWait); - ASSERT(nTokens <= MAX_TOKENS, "Not enough slots for tokens"); - myFilters[0] = new FilterBase(imaProducer, totTokens, pTokens, cTokens, pWait, cSem, pSem, (unsigned *)NULL, &(cBuffer[0])); - myFilters[1] = new FilterBase(imaConsumer, totTokens, cTokens, pTokens, cWait, pSem, cSem, cBuffer, (unsigned *)NULL); - pTokens = nTokens; - ProduceConsumeBody myBody(myFilters); - sBarrier.initialize(2); - NativeParallelFor(2, myBody); - delete myFilters[0]; - delete myFilters[1]; -} - -int TestMain() { - REMARK("Started\n"); -#if _WIN32||_WIN64 - testOSVersion(); -#endif - if(MaxThread > 0) { - testBinarySemaphore( MaxThread ); - for(int semSize = 1; semSize <= MaxThread; ++semSize) { - for(int exThreads = 0; exThreads <= MaxThread - semSize; ++exThreads) { - testSemaphore( semSize, exThreads ); - } - } - } - // Test producer/consumer with varying execution times and buffer sizes - // ( total tokens, tokens in buffer, sleep for producer, sleep for consumer ) - testProducerConsumer( 10, 2, 5, 5 ); - testProducerConsumer( 10, 2, 20, 5 ); - testProducerConsumer( 10, 2, 5, 20 ); - testProducerConsumer( 10, 1, 5, 5 ); - testProducerConsumer( 20, 10, 5, 20 ); - testProducerConsumer( 64, 32, 1, 20 ); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_sequencer_node.cpp b/src/tbb/src/test/test_sequencer_node.cpp deleted file mode 100644 index c8db88f10..000000000 --- a/src/tbb/src/test/test_sequencer_node.cpp +++ /dev/null @@ -1,404 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness.h" -#include "tbb/flow_graph.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/tick_count.h" -#include "tbb/atomic.h" -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES -#include "harness_graph.h" -#endif - -#include <cstdio> - -#define N 1000 -#define C 10 - -template< typename T > -struct seq_inspector { - size_t operator()(const T &v) const { return size_t(v); } -}; - -template< typename T > -bool wait_try_get( tbb::flow::graph &g, tbb::flow::sequencer_node<T> &q, T &value ) { - g.wait_for_all(); - return q.try_get(value); -} - -template< typename T > -void spin_try_get( tbb::flow::queue_node<T> &q, T &value ) { - while ( q.try_get(value) != true ) ; -} - -template< typename T > -struct parallel_puts : NoAssign { - - tbb::flow::sequencer_node<T> &my_q; - int my_num_threads; - - parallel_puts( tbb::flow::sequencer_node<T> &q, int num_threads ) : my_q(q), my_num_threads(num_threads) {} - - void operator()(int tid) const { - for (int j = tid; j < N; j+=my_num_threads) { - bool msg = my_q.try_put( T(j) ); - ASSERT( msg == true, NULL ); - } - } - -}; - -template< typename T > -struct touches { - - bool **my_touches; - T *my_last_touch; - int my_num_threads; - - touches( int num_threads ) : my_num_threads(num_threads) { - my_last_touch = new T[my_num_threads]; - my_touches = new bool* [my_num_threads]; - for ( int p = 0; p < my_num_threads; ++p) { - my_last_touch[p] = T(-1); - my_touches[p] = new bool[N]; - for ( int n = 0; n < N; ++n) - my_touches[p][n] = false; - } - } - - ~touches() { - for ( int p = 0; p < my_num_threads; ++p) { - delete [] my_touches[p]; - } - delete [] my_touches; - delete [] my_last_touch; - } - - bool check( int tid, T v ) { - if ( my_touches[tid][v] != false ) { - printf("Error: value seen twice by local thread\n"); - return false; - } - if ( v <= my_last_touch[tid] ) { - printf("Error: value seen in wrong order by local thread\n"); - return false; - } - my_last_touch[tid] = v; - my_touches[tid][v] = true; - return true; - } - - bool validate_touches() { - bool *all_touches = new bool[N]; - for ( int n = 0; n < N; ++n) - all_touches[n] = false; - - for ( int p = 0; p < my_num_threads; ++p) { - for ( int n = 0; n < N; ++n) { - if ( my_touches[p][n] == true ) { - ASSERT( all_touches[n] == false, "value see by more than one thread\n" ); - all_touches[n] = true; - } - } - } - for ( int n = 0; n < N; ++n) { - if ( !all_touches[n] ) - printf("No touch at %d, my_num_threads = %d\n", n, my_num_threads); - //ASSERT( all_touches[n] == true, "value not seen by any thread\n" ); - } - delete all_touches; - return true; - } - -}; - -template< typename T > -struct parallel_gets : NoAssign { - - tbb::flow::sequencer_node<T> &my_q; - int my_num_threads; - touches<T> &my_touches; - - parallel_gets( tbb::flow::sequencer_node<T> &q, int num_threads, touches<T> &t ) : my_q(q), my_num_threads(num_threads), my_touches(t) {} - - void operator()(int tid) const { - for (int j = tid; j < N; j+=my_num_threads) { - T v; - spin_try_get( my_q, v ); - my_touches.check( tid, v ); - } - } - -}; - -template< typename T > -struct parallel_put_get : NoAssign { - - tbb::flow::sequencer_node<T> &my_s1; - tbb::flow::sequencer_node<T> &my_s2; - int my_num_threads; - tbb::atomic< int > &my_counter; - touches<T> &my_touches; - - parallel_put_get( tbb::flow::sequencer_node<T> &s1, tbb::flow::sequencer_node<T> &s2, int num_threads, - tbb::atomic<int> &counter, touches<T> &t ) : my_s1(s1), my_s2(s2), my_num_threads(num_threads), my_counter(counter), my_touches(t) {} - - void operator()(int tid) const { - int i_start = 0; - - while ( (i_start = my_counter.fetch_and_add(C)) < N ) { - int i_end = ( N < i_start + C ) ? N : i_start + C; - for (int i = i_start; i < i_end; ++i) { - bool msg = my_s1.try_put( T(i) ); - ASSERT( msg == true, NULL ); - } - - for (int i = i_start; i < i_end; ++i) { - T v; - spin_try_get( my_s2, v ); - my_touches.check( tid, v ); - } - } - } - -}; - -// -// Tests -// -// multiple parallel senders, multiple receivers, properly sequenced (relative to receiver) at output -// chained sequencers, multiple parallel senders, multiple receivers, properly sequenced (relative to receiver) at output -// - -template< typename T > -int test_parallel(int num_threads) { - tbb::flow::graph g; - - tbb::flow::sequencer_node<T> s(g, seq_inspector<T>()); - NativeParallelFor( num_threads, parallel_puts<T>(s, num_threads) ); - { - touches<T> t( num_threads ); - NativeParallelFor( num_threads, parallel_gets<T>(s, num_threads, t) ); - g.wait_for_all(); - ASSERT( t.validate_touches(), NULL ); - } - T bogus_value(-1); - T j = bogus_value; - ASSERT( s.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - g.wait_for_all(); - - tbb::flow::sequencer_node<T> s1(g, seq_inspector<T>()); - tbb::flow::sequencer_node<T> s2(g, seq_inspector<T>()); - tbb::flow::sequencer_node<T> s3(g, seq_inspector<T>()); - tbb::flow::make_edge( s1, s2 ); - tbb::flow::make_edge( s2, s3 ); - - { - touches<T> t( num_threads ); - tbb::atomic<int> counter; - counter = 0; - NativeParallelFor( num_threads, parallel_put_get<T>(s1, s3, num_threads, counter, t) ); - g.wait_for_all(); - t.validate_touches(); - } - g.wait_for_all(); - ASSERT( s1.try_get( j ) == false, NULL ); - g.wait_for_all(); - ASSERT( s2.try_get( j ) == false, NULL ); - g.wait_for_all(); - ASSERT( s3.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - // test copy constructor - tbb::flow::sequencer_node<T> s_copy(s); - NativeParallelFor( num_threads, parallel_puts<T>(s_copy, num_threads) ); - for (int i = 0; i < N; ++i) { - j = bogus_value; - spin_try_get( s_copy, j ); - ASSERT( i == j, NULL ); - } - j = bogus_value; - g.wait_for_all(); - ASSERT( s_copy.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - return 0; -} - - -// -// Tests -// -// No predecessors can be registered -// Request from empty buffer fails -// In-order puts, single sender, single receiver, properly sequenced at output -// Reverse-order puts, single sender, single receiver, properly sequenced at output -// Chained sequencers (3), in-order and reverse-order tests, properly sequenced at output -// - -template< typename T > -int test_serial() { - tbb::flow::graph g; - T bogus_value(-1); - - tbb::flow::sequencer_node<T> s(g, seq_inspector<T>()); - tbb::flow::sequencer_node<T> s2(g, seq_inspector<T>()); - T j = bogus_value; - - // - // Rejects attempts to add / remove predecessor - // Rejects request from empty Q - // - ASSERT( s.register_predecessor( s2 ) == false, NULL ); - ASSERT( s.remove_predecessor( s2 ) == false, NULL ); - ASSERT( s.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - // - // In-order simple puts and gets - // - - for (int i = 0; i < N; ++i) { - bool msg = s.try_put( T(i) ); - ASSERT( msg == true, NULL ); - ASSERT(!s.try_put( T(i) ), NULL); // second attempt to put should reject - } - - - for (int i = 0; i < N; ++i) { - j = bogus_value; - ASSERT(wait_try_get( g, s, j ) == true, NULL); - ASSERT( i == j, NULL ); - ASSERT(!s.try_put( T(i) ),NULL ); // after retrieving value, subsequent put should fail - } - j = bogus_value; - g.wait_for_all(); - ASSERT( s.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - // - // Reverse-order simple puts and gets - // - - for (int i = N-1; i >= 0; --i) { - bool msg = s2.try_put( T(i) ); - ASSERT( msg == true, NULL ); - } - - for (int i = 0; i < N; ++i) { - j = bogus_value; - ASSERT(wait_try_get( g, s2, j ) == true, NULL); - ASSERT( i == j, NULL ); - } - j = bogus_value; - g.wait_for_all(); - ASSERT( s2.try_get( j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - // - // Chained in-order simple puts and gets - // - - tbb::flow::sequencer_node<T> s3(g, seq_inspector<T>()); - tbb::flow::sequencer_node<T> s4(g, seq_inspector<T>()); - tbb::flow::sequencer_node<T> s5(g, seq_inspector<T>()); - tbb::flow::make_edge( s3, s4 ); - tbb::flow::make_edge( s4, s5 ); - - for (int i = 0; i < N; ++i) { - bool msg = s3.try_put( T(i) ); - ASSERT( msg == true, NULL ); - } - - for (int i = 0; i < N; ++i) { - j = bogus_value; - ASSERT(wait_try_get( g, s5, j ) == true, NULL); - ASSERT( i == j, NULL ); - } - j = bogus_value; - ASSERT( wait_try_get( g, s3, j ) == false, NULL ); - ASSERT( wait_try_get( g, s4, j ) == false, NULL ); - ASSERT( wait_try_get( g, s5, j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - g.wait_for_all(); - tbb::flow::remove_edge( s3, s4 ); - ASSERT( s3.try_put( N ) == true, NULL ); - ASSERT( wait_try_get( g, s4, j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - ASSERT( wait_try_get( g, s5, j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - ASSERT( wait_try_get( g, s3, j ) == true, NULL ); - ASSERT( j == N, NULL ); - - // - // Chained reverse-order simple puts and gets - // - - tbb::flow::sequencer_node<T> s6(g, seq_inspector<T>()); - tbb::flow::sequencer_node<T> s7(g, seq_inspector<T>()); - tbb::flow::sequencer_node<T> s8(g, seq_inspector<T>()); - tbb::flow::make_edge( s6, s7 ); - tbb::flow::make_edge( s7, s8 ); - - for (int i = N-1; i >= 0; --i) { - bool msg = s6.try_put( T(i) ); - ASSERT( msg == true, NULL ); - } - - for (int i = 0; i < N; ++i) { - j = bogus_value; - ASSERT( wait_try_get( g, s8, j ) == true, NULL ); - ASSERT( i == j, NULL ); - } - j = bogus_value; - ASSERT( wait_try_get( g, s6, j ) == false, NULL ); - ASSERT( wait_try_get( g, s7, j ) == false, NULL ); - ASSERT( wait_try_get( g, s8, j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - - g.wait_for_all(); - tbb::flow::remove_edge( s6, s7 ); - ASSERT( s6.try_put( N ) == true, NULL ); - ASSERT( wait_try_get( g, s7, j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - ASSERT( wait_try_get( g, s8, j ) == false, NULL ); - ASSERT( j == bogus_value, NULL ); - ASSERT( wait_try_get( g, s6, j ) == true, NULL ); - ASSERT( j == N, NULL ); - - return 0; -} - -int TestMain() { - tbb::tick_count start = tbb::tick_count::now(), stop; - for (int p = 2; p <= 4; ++p) { - tbb::task_scheduler_init init(p); - test_serial<int>(); - test_parallel<int>(p); - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - test_buffer_extract<tbb::flow::sequencer_node<int> >().run_tests(); -#endif - stop = tbb::tick_count::now(); - REMARK("Sequencer_Node Time=%6.6f\n", (stop-start).seconds()); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_source_node.cpp b/src/tbb/src/test/test_source_node.cpp deleted file mode 100644 index e3c1809e1..000000000 --- a/src/tbb/src/test/test_source_node.cpp +++ /dev/null @@ -1,324 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness.h" -#include "harness_graph.h" -#include "tbb/flow_graph.h" -#include "tbb/task.h" -#include "tbb/task_scheduler_init.h" - -const int N = 1000; - -template< typename T > -class test_push_receiver : public tbb::flow::receiver<T> { - - tbb::atomic<int> my_counters[N]; - -public: - - test_push_receiver() { - for (int i = 0; i < N; ++i ) - my_counters[i] = 0; - } - - int get_count( int i ) { - int v = my_counters[i]; - return v; - } - - typedef tbb::flow::sender<T> predecessor_type; - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - void internal_add_built_predecessor( predecessor_type & ) { } - void internal_delete_built_predecessor( predecessor_type & ) { } - void copy_predecessors( std::vector<predecessor_type *> & ) { } - size_t predecessor_count() { return 0; } -#endif - - tbb::task *try_put_task( const T &v ) { - int i = (int)v; - ++my_counters[i]; - return const_cast<tbb::task *>(tbb::flow::interface7::SUCCESSFULLY_ENQUEUED); - } - - - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - /*override*/void reset_receiver(tbb::flow::reset_flags /*f*/) {} -#else - /*override*/void reset_receiver() {} -#endif -}; - -template< typename T > -class source_body { - - tbb::atomic<int> my_count; - int *ninvocations; - -public: - - source_body() : ninvocations(NULL) { my_count = 0; } - source_body(int &_inv) : ninvocations(&_inv) { my_count = 0; } - - bool operator()( T &v ) { - v = (T)my_count.fetch_and_increment(); - if(ninvocations) ++(*ninvocations); - if ( (int)v < N ) - return true; - else - return false; - } - -}; - -template< typename T > -class function_body { - - tbb::atomic<int> *my_counters; - -public: - - function_body( tbb::atomic<int> *counters ) : my_counters(counters) { - for (int i = 0; i < N; ++i ) - my_counters[i] = 0; - } - - bool operator()( T v ) { - ++my_counters[(int)v]; - return true; - } - -}; - -template< typename T > -void test_single_dest() { - - // push only - tbb::flow::graph g; - tbb::flow::source_node<T> src(g, source_body<T>() ); - test_push_receiver<T> dest; - tbb::flow::make_edge( src, dest ); - g.wait_for_all(); - for (int i = 0; i < N; ++i ) { - ASSERT( dest.get_count(i) == 1, NULL ); - } - - // push only - tbb::atomic<int> counters3[N]; - tbb::flow::source_node<T> src3(g, source_body<T>() ); - function_body<T> b3( counters3 ); - tbb::flow::function_node<T,bool> dest3(g, tbb::flow::unlimited, b3 ); - tbb::flow::make_edge( src3, dest3 ); - g.wait_for_all(); - for (int i = 0; i < N; ++i ) { - int v = counters3[i]; - ASSERT( v == 1, NULL ); - } - - // push & pull - tbb::flow::source_node<T> src2(g, source_body<T>() ); - tbb::atomic<int> counters2[N]; - function_body<T> b2( counters2 ); - tbb::flow::function_node<T,bool> dest2(g, tbb::flow::serial, b2 ); - tbb::flow::make_edge( src2, dest2 ); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - ASSERT(src2.successor_count() == 1, NULL); - typename tbb::flow::source_node<T>::successor_vector_type my_succs; - src2.copy_successors(my_succs); - ASSERT(my_succs.size() == 1, NULL); -#endif - g.wait_for_all(); - for (int i = 0; i < N; ++i ) { - int v = counters2[i]; - ASSERT( v == 1, NULL ); - } - - // test copy constructor - tbb::flow::source_node<T> src_copy(src); - test_push_receiver<T> dest_c; - ASSERT( src_copy.register_successor(dest_c), NULL ); - g.wait_for_all(); - for (int i = 0; i < N; ++i ) { - ASSERT( dest_c.get_count(i) == 1, NULL ); - } -} - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES -void test_extract() { - int counts = 0; - tbb::flow::tuple<int,int> dont_care; - tbb::flow::graph g; - typedef tbb::flow::source_node<int> snode_type; - tbb::flow::source_node<int> s0(g, source_body<int>(counts), /*is_active*/false ); - tbb::flow::join_node< tbb::flow::tuple<int,int>, tbb::flow::reserving > j0(g); - tbb::flow::join_node< tbb::flow::tuple<int,int>, tbb::flow::reserving > j1(g); - tbb::flow::join_node< tbb::flow::tuple<int,int>, tbb::flow::reserving > j2(g); - tbb::flow::queue_node<int> q0(g); - tbb::flow::queue_node<tbb::flow::tuple<int,int> > q1(g); - tbb::flow::make_edge(s0, tbb::flow::get<0>(j0.input_ports())); - /* s0 ----+ */ - /* | j0 */ - /* + */ - ASSERT(!counts, "source_node activated too soon"); - s0.activate(); - g.wait_for_all(); // should produce one value, buffer it. - ASSERT(counts == 1, "source_node did not react to activation"); - - g.reset(tbb::flow::rf_reset_bodies); - counts = 0; - s0.extract(); - /* s0 + */ - /* | j0 */ - /* + */ - s0.activate(); - g.wait_for_all(); // no successors, so the body will not execute - ASSERT(counts == 0, "source_node shouldn't forward (no successors)"); - s0.extract(tbb::flow::rf_reset_bodies); - - tbb::flow::make_edge(s0, tbb::flow::get<0>(j0.input_ports())); - tbb::flow::make_edge(s0, tbb::flow::get<0>(j1.input_ports())); - tbb::flow::make_edge(s0, tbb::flow::get<0>(j2.input_ports())); - - /* /+ */ - /* / | j0 */ - /* / + */ - /* / */ - /* / /--+ */ - /* s0-/ | j1 */ - /* \ + */ - /* \ */ - /* \--+ */ - /* | j2 */ - /* + */ - - // do all joins appear in successor list? - std::vector<tbb::flow::receiver<int>*> jv1; - jv1.push_back(&(tbb::flow::get<0>(j0.input_ports()))); - jv1.push_back(&(tbb::flow::get<0>(j1.input_ports()))); - jv1.push_back(&(tbb::flow::get<0>(j2.input_ports()))); - tbb::flow::source_node<int>::successor_vector_type sv; - s0.copy_successors(sv); - ASSERT(lists_match(sv, jv1), "mismatch in successor list"); - - tbb::flow::make_edge(q0, tbb::flow::get<1>(j2.input_ports())); - tbb::flow::make_edge(j2, q1); - s0.activate(); - - /* /+ */ - /* / | j0 */ - /* / + */ - /* / */ - /* / /--+ */ - /* s0-/ | j1 */ - /* \ + */ - /* \ */ - /* \--+ */ - /* | j2----q1 */ - /* q0-----+ */ - - q0.try_put(1); - g.wait_for_all(); - ASSERT(q1.try_get(dont_care), "join did not emit result"); - j2.extract(); - tbb::flow::make_edge(q0, tbb::flow::get<1>(j2.input_ports())); - tbb::flow::make_edge(j2, q1); - - /* /+ */ - /* / | j0 */ - /* / + */ - /* / */ - /* / /--+ */ - /* s0-/ | j1 */ - /* + */ - /* */ - /* + */ - /* | j2----q1 */ - /* q0-----+ */ - - jv1.clear(); - jv1.push_back(&(tbb::flow::get<0>(j0.input_ports()))); - jv1.push_back(&(tbb::flow::get<0>(j1.input_ports()))); - s0.copy_successors(sv); - ASSERT(lists_match(sv, jv1), "mismatch in successor list"); - - q0.try_put(1); - g.wait_for_all(); - ASSERT(!q1.try_get(dont_care), "extract of successor did not remove pred link"); - - s0.extract(); - - /* + */ - /* | j0 */ - /* + */ - /* */ - /* + */ - /* s0 | j1 */ - /* + */ - /* */ - /* + */ - /* | j2----q1 */ - /* q0-----+ */ - - ASSERT(s0.successor_count() == 0, "successor list not cleared"); - s0.copy_successors(sv); - ASSERT(sv.size() == 0, "non-empty successor list"); - - tbb::flow::make_edge(s0, tbb::flow::get<0>(j2.input_ports())); - - /* + */ - /* | j0 */ - /* + */ - /* */ - /* + */ - /* s0 | j1 */ - /* \ + */ - /* \ */ - /* \--+ */ - /* | j2----q1 */ - /* q0-----+ */ - - jv1.clear(); - jv1.push_back(&(tbb::flow::get<0>(j2.input_ports()))); - s0.copy_successors(sv); - ASSERT(lists_match(sv, jv1), "mismatch in successor list"); - - q0.try_put(1); - g.wait_for_all(); - ASSERT(!q1.try_get(dont_care), "extract of successor did not remove pred link"); -} -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - -int TestMain() { - if( MinThread<1 ) { - REPORT("number of threads must be positive\n"); - exit(1); - } - for ( int p = MinThread; p < MaxThread; ++p ) { - tbb::task_scheduler_init init(p); - test_single_dest<int>(); - test_single_dest<float>(); - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - test_extract(); -#endif - return Harness::Done; -} - diff --git a/src/tbb/src/test/test_split_node.cpp b/src/tbb/src/test/test_split_node.cpp deleted file mode 100644 index 31abe5ed8..000000000 --- a/src/tbb/src/test/test_split_node.cpp +++ /dev/null @@ -1,357 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness.h" -#include "tbb/flow_graph.h" -#include "tbb/task_scheduler_init.h" - -#if defined(_MSC_VER) && _MSC_VER < 1600 - #pragma warning (disable : 4503) //disabling the "decorated name length exceeded" warning for VS2008 and earlier -#endif - -// -// Tests -// - -const int Count = 300; -const int MaxPorts = 10; -const int MaxNSources = 5; // max # of source_nodes to register for each split_node input in parallel test - -std::vector<bool> flags; // for checking output - -template<typename T> -class name_of { -public: - static const char* name() { return "Unknown"; } -}; -template<> -class name_of<int> { -public: - static const char* name() { return "int"; } -}; -template<> -class name_of<float> { -public: - static const char* name() { return "float"; } -}; -template<> -class name_of<double> { -public: - static const char* name() { return "double"; } -}; -template<> -class name_of<long> { -public: - static const char* name() { return "long"; } -}; -template<> -class name_of<short> { -public: - static const char* name() { return "short"; } -}; - -// T must be arithmetic, and shouldn't wrap around for reasonable sizes of Count (which is now 150, and maxPorts is 10, -// so the max number generated right now is 1500 or so.) Source will generate a series of TT with value -// (init_val + (i-1)*addend) * my_mult, where i is the i-th invocation of the body. We are attaching addend -// source nodes to a join_port, and each will generate part of the numerical series the port is expecting -// to receive. If there is only one source node, the series order will be maintained; if more than one, -// this is not guaranteed. - -template<int N> -struct tuple_helper { - template<typename TupleType> - static void set_element( TupleType &t, int i) { - tbb::flow::get<N-1>(t) = (typename tbb::flow::tuple_element<N-1,TupleType>::type)(i * (N+1)); - tuple_helper<N-1>::set_element(t, i); - } -}; - -template<> -struct tuple_helper<1> { - template<typename TupleType> - static void set_element(TupleType &t, int i) { - tbb::flow::get<0>(t) = (typename tbb::flow::tuple_element<0,TupleType>::type)(i * 2); - } -}; - -// if we start N source_bodys they will all have the addend N, and my_count should be initialized to 0 .. N-1. -// the output tuples should have all the sequence, but the order will in general vary. -template<typename TupleType> -class source_body { - typedef TupleType TT; - static const int N = tbb::flow::tuple_size<TT>::value; - int my_count; - int addend; -public: - source_body(int init_val, int addto) : my_count(init_val), addend(addto) { } - void operator=( const source_body& other) { my_count = other.my_count; addend = other.addend; } - bool operator()( TT &v) { - if(my_count >= Count) return false; - tuple_helper<N>::set_element(v, my_count); - my_count += addend; - return true; - } -}; - -// allocator for split_node. - -template<int N, typename SType> -class makeSplit { -public: - static SType *create(tbb::flow::graph& g) { - SType *temp = new SType(g); - return temp; - } - static void destroy(SType *p) { delete p; } -}; - -// holder for sink_node pointers for eventual deletion - -static void* all_sink_nodes[MaxPorts]; - - -template<int ELEM, typename SType> -class sink_node_helper { -public: - typedef typename SType::input_type TT; - typedef typename tbb::flow::tuple_element<ELEM-1,TT>::type IT; - typedef typename tbb::flow::queue_node<IT> my_sink_node_type; - static void print_parallel_remark() { - sink_node_helper<ELEM-1,SType>::print_parallel_remark(); - REMARK(", %s", name_of<IT>::name()); - } - static void print_serial_remark() { - sink_node_helper<ELEM-1,SType>::print_serial_remark(); - REMARK(", %s", name_of<IT>::name()); - } - static void add_sink_nodes(SType &my_split, tbb::flow::graph &g) { - my_sink_node_type *new_node = new my_sink_node_type(g); - tbb::flow::make_edge( tbb::flow::output_port<ELEM-1>(my_split) , *new_node); - all_sink_nodes[ELEM-1] = (void *)new_node; - sink_node_helper<ELEM-1, SType>::add_sink_nodes(my_split, g); - } - - static void check_sink_values() { - my_sink_node_type *dp = reinterpret_cast<my_sink_node_type *>(all_sink_nodes[ELEM-1]); - for(int i = 0; i < Count; ++i) { - IT v; - ASSERT(dp->try_get(v), NULL); - flags[((int)v) / (ELEM+1)] = true; - } - for(int i = 0; i < Count; ++i) { - ASSERT(flags[i], NULL); - flags[i] = false; // reset for next test - } - sink_node_helper<ELEM-1,SType>::check_sink_values(); - } - static void remove_sink_nodes(SType& my_split) { - my_sink_node_type *dp = reinterpret_cast<my_sink_node_type *>(all_sink_nodes[ELEM-1]); - tbb::flow::remove_edge( tbb::flow::output_port<ELEM-1>(my_split) , *dp); - delete dp; - sink_node_helper<ELEM-1, SType>::remove_sink_nodes(my_split); - } -}; - -template<typename SType> -class sink_node_helper<1, SType> { - typedef typename SType::input_type TT; - typedef typename tbb::flow::tuple_element<0,TT>::type IT; - typedef typename tbb::flow::queue_node<IT> my_sink_node_type; -public: - static void print_parallel_remark() { - REMARK("Parallel test of split_node< %s", name_of<IT>::name()); - } - static void print_serial_remark() { - REMARK("Serial test of split_node< %s", name_of<IT>::name()); - } - static void add_sink_nodes(SType &my_split, tbb::flow::graph &g) { - my_sink_node_type *new_node = new my_sink_node_type(g); - tbb::flow::make_edge( tbb::flow::output_port<0>(my_split) , *new_node); - all_sink_nodes[0] = (void *)new_node; - } - static void check_sink_values() { - my_sink_node_type *dp = reinterpret_cast<my_sink_node_type *>(all_sink_nodes[0]); - for(int i = 0; i < Count; ++i) { - IT v; - ASSERT(dp->try_get(v), NULL); - flags[((int)v) / 2] = true; - } - for(int i = 0; i < Count; ++i) { - ASSERT(flags[i], NULL); - flags[i] = false; // reset for next test - } - } - static void remove_sink_nodes(SType& my_split) { - my_sink_node_type *dp = reinterpret_cast<my_sink_node_type *>(all_sink_nodes[0]); - tbb::flow::remove_edge( tbb::flow::output_port<0>(my_split) , *dp); - delete dp; - } -}; - -// parallel_test: create source_nodes that feed tuples into the split node -// and queue_nodes that receive the output. -template<typename SType> -class parallel_test { -public: - typedef typename SType::input_type TType; - typedef tbb::flow::source_node<TType> source_type; - static const int N = tbb::flow::tuple_size<TType>::value; - static void test() { - TType v; - source_type* all_source_nodes[MaxNSources]; - sink_node_helper<N,SType>::print_parallel_remark(); - REMARK(" >\n"); - for(int i=0; i < MaxPorts; ++i) { - all_sink_nodes[i] = NULL; - } - // try test for # sources 1 .. MaxNSources - for(int nInputs = 1; nInputs <= MaxNSources; ++nInputs) { - tbb::flow::graph g; - SType* my_split = makeSplit<N,SType>::create(g); - - // add sinks first so when sources start spitting out values they are there to catch them - sink_node_helper<N, SType>::add_sink_nodes((*my_split), g); - - // now create nInputs source_nodes, each spitting out i, i+nInputs, i+2*nInputs ... - // each element of the tuple is i*(n+1), where n is the tuple element index (1-N) - for(int i = 0; i < nInputs; ++i) { - // create source node - source_type *s = new source_type(g, source_body<TType>(i, nInputs) ); - tbb::flow::make_edge(*s, *my_split); - all_source_nodes[i] = s; - } - - g.wait_for_all(); - - // check that we got Count values in each output queue, and all the index values - // are there. - sink_node_helper<N, SType>::check_sink_values(); - - sink_node_helper<N, SType>::remove_sink_nodes(*my_split); - for(int i = 0; i < nInputs; ++i) { - delete all_source_nodes[i]; - } - makeSplit<N,SType>::destroy(my_split); - } - } -}; - -// -// Single predecessor, single accepting successor at each port - -template<typename SType> -void test_one_serial( SType &my_split, tbb::flow::graph &g) { - typedef typename SType::input_type TType; - static const int TUPLE_SIZE = tbb::flow::tuple_size<TType>::value; - sink_node_helper<TUPLE_SIZE, SType>::add_sink_nodes(my_split,g); - typedef TType q3_input_type; - tbb::flow::queue_node< q3_input_type > q3(g); - - tbb::flow::make_edge( q3, my_split ); - - // fill the queue with its value one-at-a-time - flags.clear(); - for (int i = 0; i < Count; ++i ) { - TType v; - tuple_helper<TUPLE_SIZE>::set_element(v, i); - ASSERT(my_split.try_put(v), NULL); - flags.push_back(false); - } - - g.wait_for_all(); - - sink_node_helper<TUPLE_SIZE,SType>::check_sink_values(); - - sink_node_helper<TUPLE_SIZE, SType>::remove_sink_nodes(my_split); - -} - -template<typename SType> -class serial_test { - typedef typename SType::input_type TType; - static const int TUPLE_SIZE = tbb::flow::tuple_size<TType>::value; - static const int ELEMS = 3; -public: -static void test() { - tbb::flow::graph g; - flags.reserve(Count); - SType* my_split = makeSplit<TUPLE_SIZE,SType>::create(g); - sink_node_helper<TUPLE_SIZE, SType>::print_serial_remark(); REMARK(" >\n"); - - test_one_serial<SType>( *my_split, g); - // build the vector with copy construction from the used split node. - std::vector<SType>split_vector(ELEMS, *my_split); - // destroy the tired old split_node in case we're accidentally reusing pieces of it. - makeSplit<TUPLE_SIZE,SType>::destroy(my_split); - - - for(int e = 0; e < ELEMS; ++e) { // exercise each of the vector elements - test_one_serial<SType>( split_vector[e], g); - } -} - -}; // serial_test - -template< - template<typename> class TestType, // serial_test or parallel_test - typename TupleType > // type of the input of the split -struct generate_test { - typedef tbb::flow::split_node<TupleType> split_node_type; - static void do_test() { - TestType<split_node_type>::test(); - } -}; // generate_test - -int TestMain() { -#if __TBB_USE_TBB_TUPLE - REMARK(" Using TBB tuple\n"); -#else - REMARK(" Using platform tuple\n"); -#endif - for (int p = 0; p < 2; ++p) { - generate_test<serial_test, tbb::flow::tuple<float, double> >::do_test(); -#if MAX_TUPLE_TEST_SIZE >= 4 - generate_test<serial_test, tbb::flow::tuple<float, double, int, long> >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 6 - generate_test<serial_test, tbb::flow::tuple<double, double, int, long, int, short> >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 8 - generate_test<serial_test, tbb::flow::tuple<float, double, double, double, float, int, float, long> >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 10 - generate_test<serial_test, tbb::flow::tuple<float, double, int, double, double, float, long, int, float, long> >::do_test(); -#endif - generate_test<parallel_test, tbb::flow::tuple<float, double> >::do_test(); -#if MAX_TUPLE_TEST_SIZE >= 3 - generate_test<parallel_test, tbb::flow::tuple<float, int, long> >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 5 - generate_test<parallel_test, tbb::flow::tuple<double, double, int, int, short> >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 7 - generate_test<parallel_test, tbb::flow::tuple<float, int, double, float, long, float, long> >::do_test(); -#endif -#if MAX_TUPLE_TEST_SIZE >= 9 - generate_test<parallel_test, tbb::flow::tuple<float, double, int, double, double, long, int, float, long> >::do_test(); -#endif - } - return Harness::Done; -} diff --git a/src/tbb/src/test/test_static_assert.cpp b/src/tbb/src/test/test_static_assert.cpp deleted file mode 100644 index 41c230644..000000000 --- a/src/tbb/src/test/test_static_assert.cpp +++ /dev/null @@ -1,88 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_stddef.h" - -void TestInsideFunction(){ - __TBB_STATIC_ASSERT(sizeof(char)>=1,""); -} -void TestTwiceAtTheSameLine(){ -// for current implementation it is not possible to use -// two __TBB_STATIC_ASSERT on a same line -// __TBB_STATIC_ASSERT(true,""); __TBB_STATIC_ASSERT(true,""); -} - -void TestInsideStructure(){ - struct helper{ - __TBB_STATIC_ASSERT(true,""); - }; -} - -void TestTwiceInsideStructure(){ - struct helper{ - //for current implementation it is not possible to use - //two __TBB_STATIC_ASSERT on a same line inside a class definition - //__TBB_STATIC_ASSERT(true,"");__TBB_STATIC_ASSERT(true,""); - - __TBB_STATIC_ASSERT(true,""); - __TBB_STATIC_ASSERT(true,""); - }; -} - -namespace TestTwiceInsideNamespaceHelper{ - __TBB_STATIC_ASSERT(true,""); - __TBB_STATIC_ASSERT(true,""); -} - -namespace TestTwiceInsideClassTemplateHelper{ - template <typename T> - struct template_struct{ - __TBB_STATIC_ASSERT(true,""); - __TBB_STATIC_ASSERT(true,""); - }; -} - -void TestTwiceInsideTemplateClass(){ - using namespace TestTwiceInsideClassTemplateHelper; - typedef template_struct<int> template_struct_int_typedef; - typedef template_struct<char> template_struct_char_typedef; -} - -template<typename T> -void TestTwiceInsideTemplateFunction(){ - __TBB_STATIC_ASSERT(sizeof(T)>=1,""); - __TBB_STATIC_ASSERT(true,""); -} - -#include "harness.h" -int TestMain() { - #if __TBB_STATIC_ASSERT_PRESENT - REPORT("Known issue: %s\n", "no need to test ad-hoc implementation as native feature of C++11 is used"); - return Harness::Skipped; - #else - TestInsideFunction(); - TestInsideStructure(); - TestTwiceAtTheSameLine(); - TestTwiceInsideStructure(); - TestTwiceInsideTemplateClass(); - TestTwiceInsideTemplateFunction<char>(); - return Harness::Done; - #endif -} diff --git a/src/tbb/src/test/test_std_thread.cpp b/src/tbb/src/test/test_std_thread.cpp deleted file mode 100644 index 50befe0a3..000000000 --- a/src/tbb/src/test/test_std_thread.cpp +++ /dev/null @@ -1,43 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#define TBB_IMPLEMENT_CPP0X 1 -#include "tbb/tbb_config.h" - -#if __TBB_WIN8UI_SUPPORT -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#include "harness.h" -int TestMain() { - return Harness::Skipped; -} -#else -#include "tbb/compat/thread" -#define THREAD std::thread -#define THIS_THREAD std::this_thread -#define THIS_THREAD_SLEEP THIS_THREAD::sleep_for -#include "test_thread.h" -#include "harness.h" - -int TestMain () { - CheckSignatures(); - RunTests(); - return Harness::Done; -} -#endif diff --git a/src/tbb/src/test/test_tagged_msg.cpp b/src/tbb/src/test/test_tagged_msg.cpp deleted file mode 100644 index c85a97230..000000000 --- a/src/tbb/src/test/test_tagged_msg.cpp +++ /dev/null @@ -1,262 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#define _VARIADIC_MAX 10 // Visual Studio 2012 -#include "harness.h" -#include "tbb/atomic.h" -#include "harness_checktype.h" - -#include "tbb/flow_graph.h" -#include <cstdio> -#include <stdexcept> -#include <vector> - -#if __TBB_GCC_STRICT_ALIASING_BROKEN - #pragma GCC diagnostic ignored "-Wstrict-aliasing" -#endif - -// given a tuple, return the type of the element that has the maximum alignment requirement. -// Given a tuple and that type, return the number of elements of the object with the max -// alignment requirement that is at least as big as the largest object in the tuple. - -using tbb::flow::tuple_element; -using tbb::flow::tuple_size; -using tbb::flow::cast_to; -using tbb::flow::is_a; - -typedef int *int_ptr; -typedef char odd_array_type[15]; -typedef char odder_array[17]; -typedef check_type<int> counted_array_type[12]; -typedef std::vector<double> d_vector; -typedef std::vector<int> i_vector; -typedef i_vector i_vector_array[2]; -typedef tbb::flow::tagged_msg<size_t, int, char, double, odd_array_type, odder_array, d_vector, check_type<int>, counted_array_type, i_vector_array> tagged_msg_type; - -// test base of tagged_msg -void TestWrapper() { - tbb::flow::interface7::internal::Wrapper<int> wi(42); - const tbb::flow::interface7::internal::Wrapper<int> wic(23); - - REMARK("Value of wic is %d\n", wic.value()); - - // pointer-type creation - int point_to_me = 23; - tbb::flow::interface7::internal::Wrapper<int_ptr> wip(&point_to_me); - ASSERT(*(wip.value()) == 23, "Error in wip value"); - - odd_array_type ww; - for(int ii = 0; ii < 15; ++ii) { ww[ii] = char('0' + ii); } ww[14] = 0; - - tbb::flow::interface7::internal::Wrapper<odd_array_type> ci(ww); - ASSERT(!strncmp(ci.value(), ww, 14), "odd_array_type ci not properly-constructed" ); - - tbb::flow::interface7::internal::Wrapper<odd_array_type> ci2(ci); - - ASSERT(!strncmp(ci2.value(), ww, 14), "odd_array_type ci2 not properly-constructed" ); - - d_vector di; - di.clear(); - di.push_back(2.0); - tbb::flow::interface7::internal::Wrapper<d_vector> dvec(di); - ASSERT(dvec.value()[0] == 2.0, "incorrect value in vector"); - - // test array of non-PODs. - i_vector_array oia; - oia[0].clear(); - oia[1].clear(); - oia[0].push_back(3); - oia[1].push_back(2); - tbb::flow::interface7::internal::Wrapper<i_vector_array> ia(oia); - ASSERT((ia.value()[1])[0] == 2, "integer vector array element[1] misbehaved"); - ASSERT((ia.value()[0])[0] == 3, "integer vector array element[0] misbehaved"); - tbb::flow::interface7::internal::Wrapper<i_vector_array> iac(ia); - ASSERT((iac.value()[1])[0] == 2, "integer vector array element[1] misbehaved"); - ASSERT((iac.value()[0])[0] == 3, "integer vector array element[0] misbehaved"); - - // counted_array - counted_array_type cat_orig; - for(int i = 0; i < 12; ++i) cat_orig[i] = i + 1; - tbb::flow::interface7::internal::Wrapper<counted_array_type> cat(cat_orig); - for(int j = 0; j < 12; ++j) - ASSERT(1 + j == cat.value()[j], "Error in cat array"); - - int i = wi.value(); - ASSERT(i == 42, "Assignment to i failed"); - ASSERT(wi.value() == 42, "Assignment to wi failed"); - double d = wi.value(); - ASSERT(d == 42, "Implicit cast in assign to double failed"); - int_ptr ip = wip.value(); - ASSERT(ip == &(point_to_me), "Error in assignment of pointer"); -} - -void RunTests() { - tagged_msg_type def; - tagged_msg_type i(1,3); - check_type<int>::check_type_counter = 0; - int z; - #if TBB_USE_EXCEPTIONS - try { - z = cast_to<int>(def); // disallowed (non-array returning int) - ASSERT(false, "should not allow cast to int of non-array"); - } - catch(...) { - REMARK("cast of non-array to int disallowed (okay)\n"); - } - #endif - z = cast_to<int>(i); - ASSERT(is_a<int>(i), "wrong type for i ( == int)"); - ASSERT(!(is_a<double>(i)), "Wrong type for i ( != double)"); - z = 5; - z = cast_to<int>(i); - - const int &ref_i(cast_to<int>(i)); - ASSERT(ref_i == 3, "ref_i got wrong value"); - tagged_msg_type j(2,4); - i = j; - ASSERT(ref_i == 4, "assign to i did not affect ref_i"); - - ASSERT( z == 3, "Error retrieving value from i"); - - //updating and retrieving tags - ASSERT(j.tag() == 2, "Error retrieving tag for j"); - j.set_tag(10); - ASSERT(j.tag() == 10, "Error updating tag for j"); - - tbb::flow::tagged_msg<char, int, char, double> k('a', 4); - k.set_tag('b'); - ASSERT(k.tag() == 'b', "Error updating char tag"); - - tagged_msg_type double_tagged_msg(3, 8.0); - ASSERT(is_a<double>(double_tagged_msg), "Wrong type for double_tagged_msg (== double)"); - ASSERT(!is_a<char>(double_tagged_msg), "Wrong type for double_tagged_msg (!= char)"); - ASSERT(!is_a<int>(double_tagged_msg), "Wrong type for double_tagged_msg (!= int)"); - tagged_msg_type copytype(double_tagged_msg); - ASSERT(is_a<double>(copytype), "Wrong type for double_tagged_msg (== double)"); - ASSERT(!is_a<char>(copytype), "Wrong type for double_tagged_msg (!= char)"); - ASSERT(!is_a<int>(copytype), "Wrong type for double_tagged_msg (!= int)"); - tagged_msg_type default_tagged_msg; - ASSERT(!(is_a<double>(default_tagged_msg)), "wrong type for default ( != double)"); - ASSERT(!(is_a<int>(default_tagged_msg)), "wrong type for default ( != int)"); - ASSERT(!(is_a<bool>(default_tagged_msg)), "wrong type for default ( != bool)"); - check_type<int> c; - ASSERT(check_type<int>::check_type_counter == 1, "Incorrect number of check_type<int>s created"); - tagged_msg_type cnt_type(4, c); - ASSERT(check_type<int>::check_type_counter == 2, "Incorrect number of check_type<int>s created"); - ASSERT(is_a<check_type<int> >(cnt_type), "Incorrect type for cnt_type"); - cnt_type = default_tagged_msg; - ASSERT(check_type<int>::check_type_counter == 1, "Incorrect number of check_type<int>s after reassignment"); - ASSERT(cnt_type.is_default_constructed(), "Assigned check_type<int>s is not default-constructed"); - // having problem with init on gcc 3.4.6 (fxeolin16) constructor for elements of array not called - // for this version. - // counted_array_type counted_array; - check_type<int> counted_array[12]; // this is okay - ASSERT(check_type<int>::check_type_counter == 13, "Incorrect number of check_type<int>s after counted_array construction"); - tagged_msg_type counted_array_tagged_msg(5, counted_array); - // the is_a<>() should return exact type matches. - ASSERT(!is_a<check_type<int> *>(counted_array_tagged_msg), "Test of is_a for counted_array_tagged_msg fails"); - #if TBB_USE_EXCEPTIONS - try { - int *iip = cast_to<int *>(counted_array_tagged_msg); - ASSERT(false, "did not throw on invalid cast"); - *iip = 2; // avoids "ipp set but not used" warning - } - catch(std::runtime_error &re) { - REMARK("attempt to cast to invalid type caught %s\n", re.what()); - } - ASSERT(is_a<counted_array_type>(counted_array_tagged_msg), "testing"); - const check_type<int> *ctip = cast_to<counted_array_type>(counted_array_tagged_msg); - - ASSERT((int)(*ctip) == 0, "ctip incorrect"); - - ASSERT(check_type<int>::check_type_counter == 25, "Incorrect number of check_type<int>s after counted_array_tagged_msg construction"); - counted_array_tagged_msg = default_tagged_msg; - ASSERT(check_type<int>::check_type_counter == 13, "Incorrect number of check_type<int>s after counted_array_tagged_msg destruction"); - ASSERT(counted_array_tagged_msg.is_default_constructed(), "Assigned counted_array_type is not default-constructed"); - - default_tagged_msg = double_tagged_msg; - const double my_dval = cast_to<double>(default_tagged_msg); - ASSERT(my_dval == 8.0, "did not retrieve correct value from assigned default_tagged_msg"); - - { - odd_array_type my_b; - for(size_t ii=0; ii < 14;++ii) { - my_b[ii] = (char)('0' + ii); - } - my_b[14] = 0; - { - tagged_msg_type odd_array_tagged_msg(6, my_b); - const char *my_copy = cast_to<odd_array_type>(odd_array_tagged_msg); - ASSERT(!strncmp(my_b, my_copy, 14), "copied char array not correct value"); - default_tagged_msg = odd_array_tagged_msg; - try { - const char *my_copy2 = cast_to<odd_array_type>(default_tagged_msg); - ASSERT(!strncmp(my_b, my_copy2, 14), "char array from default tagged_msg assign not correct value"); - } - catch(...) { - ASSERT(false, "Bad cast"); - } - } - } - - ASSERT(!is_a<double>(i), "bad type for i"); - try { - double y = cast_to<double>(i); - // use '&' to force eval of RHS (fixes "initialized but not referenced" vs2012 warnings) - ASSERT(false & (0 != y), "Error: cast to type in tuple did not get exception"); - } - catch(std::runtime_error &bc) { - ASSERT(0 == strcmp(bc.what(), "Illegal tagged_msg cast"), "Incorrect std:runtime_error"); - } - catch(...) { - ASSERT(false & cast_to<int>(i), "Error: improper exception thrown"); - } - - try { - int *ip = cast_to<int *>(i); - ASSERT(false & (NULL!=ip), "Error: non-array cast to pointer type."); - } - catch(std::runtime_error &bc) { - ASSERT(0 == strcmp(bc.what(), "Illegal tagged_msg cast"), "Incorrect std:runtime_error"); - } - catch(...) { - ASSERT(false, "did not get runtime_error exception in casting non-array to pointer"); - } - - try { - bool b = cast_to<bool>(i); - ASSERT(false & b, "Error: cast against type did not get exception"); - } - catch(std::runtime_error &bc) { - ASSERT(0 == strcmp(bc.what(), "Illegal tagged_msg cast"), "Incorrect std:runtime_error"); - } - catch(...) { - ASSERT(false, "did not get runtime_error exception casting to disparate types"); - } - #endif //TBB_USE_EXCEPTIONS -} - -int TestMain() { - TestWrapper(); - ASSERT(check_type<int>::check_type_counter == 0, "After TestWrapper return not all check_type<int>s were destroyed"); - RunTests(); - ASSERT(check_type<int>::check_type_counter == 0, "After RunTests return not all check_type<int>s were destroyed"); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_task.cpp b/src/tbb/src/test/test_task.cpp deleted file mode 100644 index 254dd9742..000000000 --- a/src/tbb/src/test/test_task.cpp +++ /dev/null @@ -1,801 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness_task.h" -#include "tbb/atomic.h" -#include "tbb/tbb_thread.h" -#include "tbb/task_scheduler_init.h" -#include <cstdlib> - -//------------------------------------------------------------------------ -// Test for task::spawn_children and task_list -//------------------------------------------------------------------------ - -class UnboundedlyRecursiveOnUnboundedStealingTask : public tbb::task { - typedef UnboundedlyRecursiveOnUnboundedStealingTask this_type; - - this_type *m_Parent; - const int m_Depth; - volatile bool m_GoAhead; - - // Well, virtually unboundedly, for any practical purpose - static const int max_depth = 1000000; - -public: - UnboundedlyRecursiveOnUnboundedStealingTask( this_type *parent_ = NULL, int depth_ = max_depth ) - : m_Parent(parent_) - , m_Depth(depth_) - , m_GoAhead(true) - {} - - /*override*/ - tbb::task* execute() { - // Using large padding array speeds up reaching stealing limit - const int paddingSize = 16 * 1024; - volatile char padding[paddingSize]; - if( !m_Parent || (m_Depth > 0 && m_Parent->m_GoAhead) ) { - if ( m_Parent ) { - // We are stolen, let our parent start waiting for us - m_Parent->m_GoAhead = false; - } - tbb::task &t = *new( allocate_child() ) this_type(this, m_Depth - 1); - set_ref_count( 2 ); - spawn( t ); - // Give a willing thief a chance to steal - for( int i = 0; i < 1000000 && m_GoAhead; ++i ) { - ++padding[i % paddingSize]; - __TBB_Yield(); - } - // If our child has not been stolen yet, then prohibit it siring ones - // of its own (when this thread executes it inside the next wait_for_all) - m_GoAhead = false; - wait_for_all(); - } - return NULL; - } -}; // UnboundedlyRecursiveOnUnboundedStealingTask - -tbb::atomic<int> Count; - -class RecursiveTask: public tbb::task { - const int m_ChildCount; - const int m_Depth; - //! Spawn tasks in list. Exact method depends upon m_Depth&bit_mask. - void SpawnList( tbb::task_list& list, int bit_mask ) { - if( m_Depth&bit_mask ) { - // Take address to check that signature of spawn(task_list&) is static. - void (*s)(tbb::task_list&) = &tbb::task::spawn; - (*s)(list); - ASSERT( list.empty(), NULL ); - wait_for_all(); - } else { - spawn_and_wait_for_all(list); - ASSERT( list.empty(), NULL ); - } - } -public: - RecursiveTask( int child_count, int depth_ ) : m_ChildCount(child_count), m_Depth(depth_) {} - /*override*/ tbb::task* execute() { - ++Count; - if( m_Depth>0 ) { - tbb::task_list list; - ASSERT( list.empty(), NULL ); - for( int k=0; k<m_ChildCount; ++k ) { - list.push_back( *new( allocate_child() ) RecursiveTask(m_ChildCount/2,m_Depth-1 ) ); - ASSERT( !list.empty(), NULL ); - } - set_ref_count( m_ChildCount+1 ); - SpawnList( list, 1 ); - // Now try reusing this as the parent. - set_ref_count(2); - list.push_back( *new ( allocate_child() ) tbb::empty_task() ); - SpawnList( list, 2 ); - } - return NULL; - } -}; - -//! Compute what Count should be after RecursiveTask(child_count,depth) runs. -static int Expected( int child_count, int depth ) { - return depth<=0 ? 1 : 1+child_count*Expected(child_count/2,depth-1); -} - -void TestStealLimit( int nthread ) { -#if __TBB_DEFINE_MIC - REMARK( "skipping steal limiting heuristics for %d threads\n", nthread ); -#else// !_TBB_DEFINE_MIC - REMARK( "testing steal limiting heuristics for %d threads\n", nthread ); - tbb::task_scheduler_init init(nthread); - tbb::task &t = *new( tbb::task::allocate_root() ) UnboundedlyRecursiveOnUnboundedStealingTask(); - tbb::task::spawn_root_and_wait(t); -#endif// _TBB_DEFINE_MIC -} - -//! Test task::spawn( task_list& ) -void TestSpawnChildren( int nthread ) { - REMARK("testing task::spawn(task_list&) for %d threads\n",nthread); - tbb::task_scheduler_init init(nthread); - for( int j=0; j<50; ++j ) { - Count = 0; - RecursiveTask& p = *new( tbb::task::allocate_root() ) RecursiveTask(j,4); - tbb::task::spawn_root_and_wait(p); - int expected = Expected(j,4); - ASSERT( Count==expected, NULL ); - } -} - -//! Test task::spawn_root_and_wait( task_list& ) -void TestSpawnRootList( int nthread ) { - REMARK("testing task::spawn_root_and_wait(task_list&) for %d threads\n",nthread); - tbb::task_scheduler_init init(nthread); - for( int j=0; j<5; ++j ) - for( int k=0; k<10; ++k ) { - Count = 0; - tbb::task_list list; - for( int i=0; i<k; ++i ) - list.push_back( *new( tbb::task::allocate_root() ) RecursiveTask(j,4) ); - tbb::task::spawn_root_and_wait(list); - int expected = k*Expected(j,4); - ASSERT( Count==expected, NULL ); - } -} - -//------------------------------------------------------------------------ -// Test for task::recycle_as_safe_continuation -//------------------------------------------------------------------------ - -void TestSafeContinuation( int nthread ) { - REMARK("testing task::recycle_as_safe_continuation for %d threads\n",nthread); - tbb::task_scheduler_init init(nthread); - for( int j=8; j<33; ++j ) { - TaskGenerator& p = *new( tbb::task::allocate_root() ) TaskGenerator(j,5); - tbb::task::spawn_root_and_wait(p); - } -} - -//------------------------------------------------------------------------ -// Test affinity interface -//------------------------------------------------------------------------ -tbb::atomic<int> TotalCount; - -struct AffinityTask: public tbb::task { - const affinity_id expected_affinity_id; - bool noted; - /** Computing affinities is NOT supported by TBB, and may disappear in the future. - It is done here for sake of unit testing. */ - AffinityTask( int expected_affinity_id_ ) : - expected_affinity_id(affinity_id(expected_affinity_id_)), - noted(false) - { - set_affinity(expected_affinity_id); - ASSERT( 0u-expected_affinity_id>0u, "affinity_id not an unsigned integral type?" ); - ASSERT( affinity()==expected_affinity_id, NULL ); - } - /*override*/ tbb::task* execute() { - ++TotalCount; - return NULL; - } - /*override*/ void note_affinity( affinity_id id ) { - // There is no guarantee in TBB that a task runs on its affinity thread. - // However, the current implementation does accidentally guarantee it - // under certain conditions, such as the conditions here. - // We exploit those conditions for sake of unit testing. - ASSERT( id!=expected_affinity_id, NULL ); - ASSERT( !noted, "note_affinity_id called twice!" ); - ASSERT ( &self() == (tbb::task*)this, "Wrong innermost running task" ); - noted = true; - } -}; - -/** Note: This test assumes a lot about the internal implementation of affinity. - Do NOT use this as an example of good programming practice with TBB */ -void TestAffinity( int nthread ) { - TotalCount = 0; - int n = tbb::task_scheduler_init::default_num_threads(); - if( n>nthread ) - n = nthread; - tbb::task_scheduler_init init(n); - tbb::empty_task* t = new( tbb::task::allocate_root() ) tbb::empty_task; - tbb::task::affinity_id affinity_id = t->affinity(); - ASSERT( affinity_id==0, NULL ); - // Set ref_count for n-1 children, plus 1 for the wait. - t->set_ref_count(n); - // Spawn n-1 affinitized children. - for( int i=1; i<n; ++i ) - tbb::task::spawn( *new(t->allocate_child()) AffinityTask(i) ); - if( n>1 ) { - // Keep master from stealing - while( TotalCount!=n-1 ) - __TBB_Yield(); - } - // Wait for the children - t->wait_for_all(); - int k = 0; - GetTaskPtr(k)->destroy(*t); - ASSERT(k==1,NULL); -} - -struct NoteAffinityTask: public tbb::task { - bool noted; - NoteAffinityTask( int id ) : noted(false) - { - set_affinity(affinity_id(id)); - } - ~NoteAffinityTask () { - ASSERT (noted, "note_affinity has not been called"); - } - /*override*/ tbb::task* execute() { - return NULL; - } - /*override*/ void note_affinity( affinity_id /*id*/ ) { - noted = true; - ASSERT ( &self() == (tbb::task*)this, "Wrong innermost running task" ); - } -}; - -// This test checks one of the paths inside the scheduler by affinitizing the child task -// to non-existent thread so that it is proxied in the local task pool but not retrieved -// by another thread. -// If no workers requested, the extra slot #2 is allocated for a worker thread to serve -// "enqueued" tasks. In this test, it is used only for the affinity purpose. -void TestNoteAffinityContext() { - tbb::task_scheduler_init init(1); - tbb::empty_task* t = new( tbb::task::allocate_root() ) tbb::empty_task; - t->set_ref_count(2); - // This master in the absence of workers will have an affinity id of 1. - // So use another number to make the task get proxied. - tbb::task::spawn( *new(t->allocate_child()) NoteAffinityTask(2) ); - t->wait_for_all(); - tbb::task::destroy(*t); -} - -//------------------------------------------------------------------------ -// Test that recovery actions work correctly for task::allocate_* methods -// when a task's constructor throws an exception. -//------------------------------------------------------------------------ - -#if TBB_USE_EXCEPTIONS -static int TestUnconstructibleTaskCount; - -struct ConstructionFailure { -}; - -#if __TBB_MSVC_UNREACHABLE_CODE_IGNORED - // Suppress pointless "unreachable code" warning. - #pragma warning (push) - #pragma warning (disable: 4702) -#endif - -//! Task that cannot be constructed. -template<size_t N> -struct UnconstructibleTask: public tbb::empty_task { - char space[N]; - UnconstructibleTask() { - throw ConstructionFailure(); - } -}; - -#if __TBB_MSVC_UNREACHABLE_CODE_IGNORED - #pragma warning (pop) -#endif - -#define TRY_BAD_CONSTRUCTION(x) \ - { \ - try { \ - new(x) UnconstructibleTask<N>; \ - } catch( const ConstructionFailure& ) { \ - ASSERT( parent()==original_parent, NULL ); \ - ASSERT( ref_count()==original_ref_count, "incorrectly changed ref_count" );\ - ++TestUnconstructibleTaskCount; \ - } \ - } - -template<size_t N> -struct RootTaskForTestUnconstructibleTask: public tbb::task { - tbb::task* execute() { - tbb::task* original_parent = parent(); - ASSERT( original_parent!=NULL, NULL ); - int original_ref_count = ref_count(); - TRY_BAD_CONSTRUCTION( allocate_root() ); - TRY_BAD_CONSTRUCTION( allocate_child() ); - TRY_BAD_CONSTRUCTION( allocate_continuation() ); - TRY_BAD_CONSTRUCTION( allocate_additional_child_of(*this) ); - return NULL; - } -}; - -template<size_t N> -void TestUnconstructibleTask() { - TestUnconstructibleTaskCount = 0; - tbb::task_scheduler_init init; - tbb::task* t = new( tbb::task::allocate_root() ) RootTaskForTestUnconstructibleTask<N>; - tbb::task::spawn_root_and_wait(*t); - ASSERT( TestUnconstructibleTaskCount==4, NULL ); -} -#endif /* TBB_USE_EXCEPTIONS */ - -//------------------------------------------------------------------------ -// Test for alignment problems with task objects. -//------------------------------------------------------------------------ - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Workaround for pointless warning "structure was padded due to __declspec(align()) - #pragma warning (push) - #pragma warning (disable: 4324) -#endif - -//! Task with members of type T. -/** The task recursively creates tasks. */ -template<typename T> -class TaskWithMember: public tbb::task { - T x; - T y; - unsigned char count; - /*override*/ tbb::task* execute() { - x = y; - if( count>0 ) { - set_ref_count(2); - tbb::task* t = new( allocate_child() ) TaskWithMember<T>(count-1); - spawn_and_wait_for_all(*t); - } - return NULL; - } -public: - TaskWithMember( unsigned char n ) : count(n) {} -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif - -template<typename T> -void TestAlignmentOfOneClass() { - typedef TaskWithMember<T> task_type; - tbb::task* t = new( tbb::task::allocate_root() ) task_type(10); - tbb::task::spawn_root_and_wait(*t); -} - -#include "harness_m128.h" - -void TestAlignment() { - REMARK("testing alignment\n"); - tbb::task_scheduler_init init; - // Try types that have variety of alignments - TestAlignmentOfOneClass<char>(); - TestAlignmentOfOneClass<short>(); - TestAlignmentOfOneClass<int>(); - TestAlignmentOfOneClass<long>(); - TestAlignmentOfOneClass<void*>(); - TestAlignmentOfOneClass<float>(); - TestAlignmentOfOneClass<double>(); -#if HAVE_m128 - TestAlignmentOfOneClass<__m128>(); -#endif -#if HAVE_m256 - if (have_AVX()) TestAlignmentOfOneClass<__m256>(); -#endif -} - -//------------------------------------------------------------------------ -// Test for recursing on left while spawning on right -//------------------------------------------------------------------------ - -int Fib( int n ); - -struct RightFibTask: public tbb::task { - int* y; - const int n; - RightFibTask( int* y_, int n_ ) : y(y_), n(n_) {} - task* execute() { - *y = Fib(n-1); - return 0; - } -}; - -int Fib( int n ) { - if( n<2 ) { - return n; - } else { - // y actually does not need to be initialized. It is initialized solely to suppress - // a gratuitous warning "potentially uninitialized local variable". - int y=-1; - tbb::task* root_task = new( tbb::task::allocate_root() ) tbb::empty_task; - root_task->set_ref_count(2); - tbb::task::spawn( *new( root_task->allocate_child() ) RightFibTask(&y,n) ); - int x = Fib(n-2); - root_task->wait_for_all(); - tbb::task::destroy(*root_task); - return y+x; - } -} - -void TestLeftRecursion( int p ) { - REMARK("testing non-spawned roots for %d threads\n",p); - tbb::task_scheduler_init init(p); - int sum = 0; - for( int i=0; i<100; ++i ) - sum +=Fib(10); - ASSERT( sum==5500, NULL ); -} - -//------------------------------------------------------------------------ -// Test for computing with DAG of tasks. -//------------------------------------------------------------------------ - -class DagTask: public tbb::task { - typedef unsigned long long number_t; - const int i, j; - number_t sum_from_left, sum_from_above; - void check_sum( number_t sum ) { - number_t expected_sum = 1; - for( int k=i+1; k<=i+j; ++k ) - expected_sum *= k; - for( int k=1; k<=j; ++k ) - expected_sum /= k; - ASSERT(sum==expected_sum, NULL); - } -public: - DagTask *successor_to_below, *successor_to_right; - DagTask( int i_, int j_ ) : i(i_), j(j_), sum_from_left(0), sum_from_above(0) {} - task* execute() { - ASSERT( ref_count()==0, NULL ); - number_t sum = i==0 && j==0 ? 1 : sum_from_left+sum_from_above; - check_sum(sum); - ++execution_count; - if( DagTask* t = successor_to_right ) { - t->sum_from_left = sum; - if( t->decrement_ref_count()==0 ) - // Test using spawn to evaluate DAG - spawn( *t ); - } - if( DagTask* t = successor_to_below ) { - t->sum_from_above = sum; - if( t->decrement_ref_count()==0 ) - // Test using bypass to evaluate DAG - return t; - } - return NULL; - } - ~DagTask() {++destruction_count;} - static tbb::atomic<int> execution_count; - static tbb::atomic<int> destruction_count; -}; - -tbb::atomic<int> DagTask::execution_count; -tbb::atomic<int> DagTask::destruction_count; - -void TestDag( int p ) { - REMARK("testing evaluation of DAG for %d threads\n",p); - tbb::task_scheduler_init init(p); - DagTask::execution_count=0; - DagTask::destruction_count=0; - const int n = 10; - DagTask* a[n][n]; - for( int i=0; i<n; ++i ) - for( int j=0; j<n; ++j ) - a[i][j] = new( tbb::task::allocate_root() ) DagTask(i,j); - for( int i=0; i<n; ++i ) - for( int j=0; j<n; ++j ) { - a[i][j]->successor_to_below = i+1<n ? a[i+1][j] : NULL; - a[i][j]->successor_to_right = j+1<n ? a[i][j+1] : NULL; - a[i][j]->set_ref_count((i>0)+(j>0)); - } - a[n-1][n-1]->increment_ref_count(); - a[n-1][n-1]->spawn_and_wait_for_all(*a[0][0]); - ASSERT( DagTask::execution_count == n*n - 1, NULL ); - tbb::task::destroy(*a[n-1][n-1]); - ASSERT( DagTask::destruction_count > n*n - p, NULL ); - while ( DagTask::destruction_count != n*n ) - __TBB_Yield(); -} - -#include "harness_barrier.h" - -class RelaxedOwnershipTask: public tbb::task { - tbb::task &m_taskToSpawn, - &m_taskToDestroy, - &m_taskToExecute; - static Harness::SpinBarrier m_barrier; - - tbb::task* execute () { - tbb::task &p = *parent(); - tbb::task &r = *new( allocate_root() ) tbb::empty_task; - r.set_ref_count( 1 ); - m_barrier.wait(); - p.spawn( *new(p.allocate_child()) tbb::empty_task ); - p.spawn( *new(task::allocate_additional_child_of(p)) tbb::empty_task ); - p.spawn( m_taskToSpawn ); - p.destroy( m_taskToDestroy ); - r.spawn_and_wait_for_all( m_taskToExecute ); - p.destroy( r ); - return NULL; - } -public: - RelaxedOwnershipTask ( tbb::task& toSpawn, tbb::task& toDestroy, tbb::task& toExecute ) - : m_taskToSpawn(toSpawn) - , m_taskToDestroy(toDestroy) - , m_taskToExecute(toExecute) - {} - static void SetBarrier ( int numThreads ) { m_barrier.initialize( numThreads ); } -}; - -Harness::SpinBarrier RelaxedOwnershipTask::m_barrier; - -void TestRelaxedOwnership( int p ) { - if ( p < 2 ) - return; - - if( unsigned(p)>tbb::tbb_thread::hardware_concurrency() ) - return; - - REMARK("testing tasks exercising relaxed ownership freedom for %d threads\n", p); - tbb::task_scheduler_init init(p); - RelaxedOwnershipTask::SetBarrier(p); - tbb::task &r = *new( tbb::task::allocate_root() ) tbb::empty_task; - tbb::task_list tl; - for ( int i = 0; i < p; ++i ) { - tbb::task &tS = *new( r.allocate_child() ) tbb::empty_task, - &tD = *new( r.allocate_child() ) tbb::empty_task, - &tE = *new( r.allocate_child() ) tbb::empty_task; - tl.push_back( *new( r.allocate_child() ) RelaxedOwnershipTask(tS, tD, tE) ); - } - r.set_ref_count( 5 * p + 1 ); - int k=0; - GetTaskPtr(k)->spawn( tl ); - ASSERT(k==1,NULL); - r.wait_for_all(); - r.destroy( r ); -} - -//------------------------------------------------------------------------ -// Test for running TBB scheduler on user-created thread. -//------------------------------------------------------------------------ - -void RunSchedulerInstanceOnUserThread( int n_child ) { - tbb::task* e = new( tbb::task::allocate_root() ) tbb::empty_task; - e->set_ref_count(1+n_child); - for( int i=0; i<n_child; ++i ) - tbb::task::spawn( *new(e->allocate_child()) tbb::empty_task ); - e->wait_for_all(); - e->destroy(*e); -} - -void TestUserThread( int p ) { - tbb::task_scheduler_init init(p); - // Try with both 0 and 1 children. Only the latter scenario permits stealing. - for( int n_child=0; n_child<2; ++n_child ) { - tbb::tbb_thread t( RunSchedulerInstanceOnUserThread, n_child ); - t.join(); - } -} - -class TaskWithChildToSteal : public tbb::task { - const int m_Depth; - volatile bool m_GoAhead; - -public: - TaskWithChildToSteal( int depth_ ) - : m_Depth(depth_) - , m_GoAhead(false) - {} - - /*override*/ - tbb::task* execute() { - m_GoAhead = true; - if ( m_Depth > 0 ) { - TaskWithChildToSteal &t = *new( allocate_child() ) TaskWithChildToSteal(m_Depth - 1); - t.SpawnAndWaitOnParent(); - } - else - Harness::Sleep(50); // The last task in chain sleeps for 50 ms - return NULL; - } - - void SpawnAndWaitOnParent() { - parent()->set_ref_count( 2 ); - parent()->spawn( *this ); - while (!this->m_GoAhead ) - __TBB_Yield(); - parent()->wait_for_all(); - } -}; // TaskWithChildToSteal - -// Success criterion of this test is not hanging -void TestDispatchLoopResponsiveness() { - REMARK("testing that dispatch loops do not go into eternal sleep when all remaining children are stolen\n"); - // Recursion depth values test the following sorts of dispatch loops - // 0 - master's outermost - // 1 - worker's nested - // 2 - master's nested - tbb::task_scheduler_init init(2); - tbb::task &r = *new( tbb::task::allocate_root() ) tbb::empty_task; - for ( int depth = 0; depth < 3; ++depth ) { - TaskWithChildToSteal &t = *new( r.allocate_child() ) TaskWithChildToSteal(depth); - t.SpawnAndWaitOnParent(); - } - r.destroy(r); -} - -void TestWaitDiscriminativenessWithoutStealing() { - REMARK( "testing that task::wait_for_all is specific to the root it is called on (no workers)\n" ); - // The test relies on the strict LIFO scheduling order in the absence of workers - tbb::task_scheduler_init init(1); - tbb::task &r1 = *new( tbb::task::allocate_root() ) tbb::empty_task; - tbb::task &r2 = *new( tbb::task::allocate_root() ) tbb::empty_task; - const int NumChildren = 10; - r1.set_ref_count( NumChildren + 1 ); - r2.set_ref_count( NumChildren + 1 ); - for( int i=0; i < NumChildren; ++i ) { - tbb::empty_task &t1 = *new( r1.allocate_child() ) tbb::empty_task; - tbb::empty_task &t2 = *new( r2.allocate_child() ) tbb::empty_task; - tbb::task::spawn(t1); - tbb::task::spawn(t2); - } - r2.wait_for_all(); - ASSERT( r2.ref_count() <= 1, "Not all children of r2 executed" ); - ASSERT( r1.ref_count() > 1, "All children of r1 prematurely executed" ); - r1.wait_for_all(); - ASSERT( r1.ref_count() <= 1, "Not all children of r1 executed" ); - r1.destroy(r1); - r2.destroy(r2); -} - - -using tbb::internal::spin_wait_until_eq; - -//! Deterministic emulation of a long running task -class LongRunningTask : public tbb::task { - volatile bool& m_CanProceed; - - tbb::task* execute() { - spin_wait_until_eq( m_CanProceed, true ); - return NULL; - } -public: - LongRunningTask ( volatile bool& canProceed ) : m_CanProceed(canProceed) {} -}; - -void TestWaitDiscriminativenessWithStealing() { - if( tbb::tbb_thread::hardware_concurrency() < 2 ) - return; - REMARK( "testing that task::wait_for_all is specific to the root it is called on (one worker)\n" ); - volatile bool canProceed = false; - tbb::task_scheduler_init init(2); - tbb::task &r1 = *new( tbb::task::allocate_root() ) tbb::empty_task; - tbb::task &r2 = *new( tbb::task::allocate_root() ) tbb::empty_task; - r1.set_ref_count( 2 ); - r2.set_ref_count( 2 ); - tbb::task& t1 = *new( r1.allocate_child() ) tbb::empty_task; - tbb::task& t2 = *new( r2.allocate_child() ) LongRunningTask(canProceed); - tbb::task::spawn(t2); - tbb::task::spawn(t1); - r1.wait_for_all(); - ASSERT( r1.ref_count() <= 1, "Not all children of r1 executed" ); - ASSERT( r2.ref_count() == 2, "All children of r2 prematurely executed" ); - canProceed = true; - r2.wait_for_all(); - ASSERT( r2.ref_count() <= 1, "Not all children of r2 executed" ); - r1.destroy(r1); - r2.destroy(r2); -} - -struct MasterBody : NoAssign, Harness::NoAfterlife { - static Harness::SpinBarrier my_barrier; - - class BarrenButLongTask : public tbb::task { - volatile bool& m_Started; - volatile bool& m_CanProceed; - - tbb::task* execute() { - m_Started = true; - spin_wait_until_eq( m_CanProceed, true ); - volatile int k = 0; - for ( int i = 0; i < 1000000; ++i ) ++k; - return NULL; - } - public: - BarrenButLongTask ( volatile bool& started, volatile bool& can_proceed ) - : m_Started(started), m_CanProceed(can_proceed) - {} - }; - - class BinaryRecursiveTask : public tbb::task { - int m_Depth; - - tbb::task* execute() { - if( !m_Depth ) - return NULL; - set_ref_count(3); - spawn( *new( allocate_child() ) BinaryRecursiveTask(m_Depth - 1) ); - spawn( *new( allocate_child() ) BinaryRecursiveTask(m_Depth - 1) ); - wait_for_all(); - return NULL; - } - - void note_affinity( affinity_id ) { - ASSERT( false, "These tasks cannot be stolen" ); - } - public: - BinaryRecursiveTask ( int depth_ ) : m_Depth(depth_) {} - }; - - void operator() ( int id ) const { - if ( id ) { - tbb::task_scheduler_init init(2); - volatile bool child_started = false, - can_proceed = false; - tbb::task& r = *new( tbb::task::allocate_root() ) tbb::empty_task; - r.set_ref_count(2); - r.spawn( *new(r.allocate_child()) BarrenButLongTask(child_started, can_proceed) ); - spin_wait_until_eq( child_started, true ); - my_barrier.wait(); - can_proceed = true; - r.wait_for_all(); - r.destroy(r); - } - else { - my_barrier.wait(); - tbb::task_scheduler_init init(1); - Count = 0; - int depth = 16; - BinaryRecursiveTask& r = *new( tbb::task::allocate_root() ) BinaryRecursiveTask(depth); - tbb::task::spawn_root_and_wait(r); - } - } -public: - MasterBody ( int num_masters ) { my_barrier.initialize(num_masters); } -}; - -Harness::SpinBarrier MasterBody::my_barrier; - -/** Ensures that tasks spawned by a master thread or one of the workers servicing - it cannot be stolen by another master thread. **/ -void TestMastersIsolation ( int p ) { - // The test requires at least 3-way parallelism to work correctly - if ( p > 2 && tbb::task_scheduler_init::default_num_threads() >= p ) { - tbb::task_scheduler_init init(p); - NativeParallelFor( p, MasterBody(p) ); - } -} - -int TestMain () { -#if TBB_USE_EXCEPTIONS - TestUnconstructibleTask<1>(); - TestUnconstructibleTask<10000>(); -#endif - TestAlignment(); - TestNoteAffinityContext(); - TestDispatchLoopResponsiveness(); - TestWaitDiscriminativenessWithoutStealing(); - TestWaitDiscriminativenessWithStealing(); - for( int p=MinThread; p<=MaxThread; ++p ) { - TestSpawnChildren( p ); - TestSpawnRootList( p ); - TestSafeContinuation( p ); - TestLeftRecursion( p ); - TestDag( p ); - TestAffinity( p ); - TestUserThread( p ); - TestStealLimit( p ); - TestRelaxedOwnership( p ); - TestMastersIsolation( p ); - } - return Harness::Done; -} diff --git a/src/tbb/src/test/test_task_arena.cpp b/src/tbb/src/test/test_task_arena.cpp deleted file mode 100644 index a889c9c01..000000000 --- a/src/tbb/src/test/test_task_arena.cpp +++ /dev/null @@ -1,485 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// undefine __TBB_CPF_BUILD to simulate user's setup -#undef __TBB_CPF_BUILD - -#define TBB_PREVIEW_LOCAL_OBSERVER 1 -#define __TBB_EXTRA_DEBUG 1 - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include <stdexcept> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include <cstdlib> -#include <cstdio> - -#include "harness_fp.h" - -#include "tbb/task_arena.h" -#include "tbb/task_scheduler_observer.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/parallel_for.h" -#include "tbb/blocked_range.h" -#include "tbb/enumerable_thread_specific.h" - -#include "harness_assert.h" -#include "harness.h" -#include "harness_barrier.h" - -#if _MSC_VER -// plays around __TBB_NO_IMPLICIT_LINKAGE. __TBB_LIB_NAME should be defined (in makefiles) - #pragma comment(lib, __TBB_STRING(__TBB_LIB_NAME)) -#endif - -//! Test that task_arena::initialize and task_arena::terminate work when doing nothing else. -/** maxthread is treated as the biggest possible concurrency level. */ -void InitializeAndTerminate( int maxthread ) { - __TBB_TRY { - for( int i=0; i<200; ++i ) { - switch( i&3 ) { - // Arena is created inactive, initialization is always explicit. Lazy initialization is covered by other test functions. - // Explicit initialization can either keep the original values or change those. - // Arena termination can be explicit or implicit (in the destructor). - // TODO: extend with concurrency level checks if such a method is added. - // TODO: test for different master slot reservation values (perhaps in another function) - default: { - tbb::task_arena arena( std::rand() % maxthread + 1 ); - ASSERT(!arena.is_active(), "arena should not be active until initialized"); - arena.initialize(); - ASSERT(arena.is_active(), NULL); - arena.terminate(); - ASSERT(!arena.is_active(), "arena should not be active; it was terminated"); - break; - } - case 0: { - tbb::task_arena arena( 1 ); - ASSERT(!arena.is_active(), "arena should not be active until initialized"); - arena.initialize( std::rand() % maxthread + 1 ); // change the parameters - ASSERT(arena.is_active(), NULL); - break; - } - case 1: { - tbb::task_arena arena( tbb::task_arena::automatic ); - ASSERT(!arena.is_active(), NULL); - arena.initialize(); - ASSERT(arena.is_active(), NULL); - break; - } - case 2: { - tbb::task_arena arena; - ASSERT(!arena.is_active(), "arena should not be active until initialized"); - arena.initialize( std::rand() % maxthread + 1 ); - ASSERT(arena.is_active(), NULL); - arena.terminate(); - ASSERT(!arena.is_active(), "arena should not be active; it was terminated"); - break; - } - } - } - } __TBB_CATCH( std::runtime_error& error ) { -#if TBB_USE_EXCEPTIONS - REPORT("ERROR: %s\n", error.what() ); -#endif /* TBB_USE_EXCEPTIONS */ - } -} - -typedef tbb::blocked_range<int> Range; - -Harness::SpinBarrier our_barrier; - -static tbb::enumerable_thread_specific<int> local_id, old_id, slot_id(-1); -void ResetTLS() { - local_id.clear(); - old_id.clear(); - slot_id.clear(); -} - -class ConcurrencyTrackingBody { -public: - void operator() ( const Range& ) const { - ASSERT(slot_id.local() == tbb::task_arena::current_thread_index(), NULL); - for ( volatile int i = 0; i < 50000; ++i ) - ; - } -}; - -class ArenaObserver : public tbb::task_scheduler_observer { - int myId; - /*override*/ - void on_scheduler_entry( bool is_worker ) { - REMARK("a %s #%p is entering arena %d from %d on slot %d\n", is_worker?"worker":"master", - &local_id.local(), myId, local_id.local(), - tbb::task_arena::current_thread_index()); - ASSERT(!old_id.local(), "double-call to on_scheduler_entry"); - old_id.local() = local_id.local(); - ASSERT(old_id.local() != myId, "double-entry to the same arena"); - local_id.local() = myId; - slot_id.local() = tbb::task_arena::current_thread_index(); - if(is_worker) ASSERT(tbb::task_arena::current_thread_index()>0, NULL); - else ASSERT(tbb::task_arena::current_thread_index()==0, NULL); - } - /*override*/ - void on_scheduler_exit( bool is_worker ) { - REMARK("a %s #%p is leaving arena %d to %d\n", is_worker?"worker":"master", - &local_id.local(), myId, old_id.local()); - ASSERT(local_id.local() == myId, "nesting of arenas is broken"); - ASSERT(slot_id.local() == tbb::task_arena::current_thread_index(), NULL); - slot_id.local() = -1; - local_id.local() = old_id.local(); - old_id.local() = 0; - } -public: - ArenaObserver(tbb::task_arena &a, int id) : tbb::task_scheduler_observer(a) { - ASSERT(id, NULL); - myId = id; - observe(true); - } - ~ArenaObserver () { - ASSERT(!old_id.local(), "inconsistent observer state"); - } -}; - -struct AsynchronousWork : NoAssign { - Harness::SpinBarrier &my_barrier; - bool my_is_blocking; - AsynchronousWork(Harness::SpinBarrier &a_barrier, bool blocking = true) - : my_barrier(a_barrier), my_is_blocking(blocking) {} - void operator()() const { - ASSERT(local_id.local() != 0, "not in explicit arena"); - tbb::parallel_for(Range(0,500), ConcurrencyTrackingBody(), tbb::simple_partitioner(), *tbb::task::self().group()); - if(my_is_blocking) my_barrier.timed_wait(10); // must be asynchronous to master thread - else my_barrier.signal_nowait(); - } -}; - -void TestConcurrentArenas(int p) { - //Harness::ConcurrencyTracker::Reset(); - tbb::task_arena a1; - a1.initialize(1,0); - ArenaObserver o1(a1, p*2+1); - tbb::task_arena a2(2,1); - ArenaObserver o2(a2, p*2+2); - Harness::SpinBarrier barrier(2); - AsynchronousWork work(barrier); - a1.enqueue(work); // put async work - barrier.timed_wait(10); - a2.enqueue(work); // another work - a2.execute(work); // my_barrier.timed_wait(10) inside - a1.debug_wait_until_empty(); - a2.debug_wait_until_empty(); -} - -class MultipleMastersBody : NoAssign { - tbb::task_arena &my_a; - Harness::SpinBarrier &my_b1, &my_b2; -public: - MultipleMastersBody( tbb::task_arena &a, Harness::SpinBarrier &b1, Harness::SpinBarrier &b2) - : my_a(a), my_b1(b1), my_b2(b2) {} - void operator()(int) const { - my_a.execute(AsynchronousWork(my_b2, /*blocking=*/false)); - my_b1.timed_wait(10); - // A regression test for bugs 1954 & 1971 - my_a.enqueue(AsynchronousWork(my_b2, /*blocking=*/false)); - } -}; - -class MultipleMastersPart2 : NoAssign { - tbb::task_arena &my_a; - Harness::SpinBarrier &my_b; -public: - MultipleMastersPart2( tbb::task_arena &a, Harness::SpinBarrier &b) : my_a(a), my_b(b) {} - void operator()(int) const { - my_a.execute(AsynchronousWork(my_b, /*blocking=*/false)); - } -}; - -class MultipleMastersPart3 : NoAssign { - tbb::task_arena &my_a; - Harness::SpinBarrier &my_b; - - struct Runner : NoAssign { - tbb::task* const a_task; - Runner(tbb::task* const t) : a_task(t) {} - void operator()() const { - for ( volatile int i = 0; i < 10000; ++i ) - ; - a_task->decrement_ref_count(); - } - }; - - struct Waiter : NoAssign { - tbb::task* const a_task; - Waiter(tbb::task* const t) : a_task(t) {} - void operator()() const { - a_task->wait_for_all(); - } - }; - -public: - MultipleMastersPart3(tbb::task_arena &a, Harness::SpinBarrier &b) - : my_a(a), my_b(b) {} - void operator()(int idx) const { - tbb::empty_task* root_task = new(tbb::task::allocate_root()) tbb::empty_task; - for( int i=0; i<100; ++i) { - root_task->set_ref_count(2); - my_a.enqueue(Runner(root_task)); - my_a.execute(Waiter(root_task)); - } - tbb::task::destroy(*root_task); - REMARK("Master #%d: job completed, wait for others\n", idx); - my_b.timed_wait(10); - } -}; - -class MultipleMastersPart4 : NoAssign { - tbb::task_arena &my_a; - Harness::SpinBarrier &my_b; - tbb::task_group_context *my_ag; - - struct Getter : NoAssign { - tbb::task_group_context *& my_g; - Getter(tbb::task_group_context *&a_g) : my_g(a_g) {} - void operator()() const { - my_g = tbb::task::self().group(); - } - }; - struct Checker : NoAssign { - tbb::task_group_context *my_g; - Checker(tbb::task_group_context *a_g) : my_g(a_g) {} - void operator()() const { - ASSERT(my_g == tbb::task::self().group(), NULL); - tbb::task *t = new( tbb::task::allocate_root() ) tbb::empty_task; - ASSERT(my_g == t->group(), NULL); - tbb::task::destroy(*t); - } - }; - struct NestedChecker : NoAssign { - const MultipleMastersPart4 &my_body; - NestedChecker(const MultipleMastersPart4 &b) : my_body(b) {} - void operator()() const { - tbb::task_group_context *nested_g = tbb::task::self().group(); - ASSERT(my_body.my_ag != nested_g, NULL); - tbb::task *t = new( tbb::task::allocate_root() ) tbb::empty_task; - ASSERT(nested_g == t->group(), NULL); - tbb::task::destroy(*t); - my_body.my_a.enqueue(Checker(my_body.my_ag)); - } - }; -public: - MultipleMastersPart4( tbb::task_arena &a, Harness::SpinBarrier &b) : my_a(a), my_b(b) { - my_a.execute(Getter(my_ag)); - } - // NativeParallelFor's functor - void operator()(int) const { - my_a.execute(*this); - } - // Arena's functor - void operator()() const { - Checker check(my_ag); - check(); - tbb::task_arena nested(1,1); - nested.execute(NestedChecker(*this)); // change arena - tbb::parallel_for(tbb::blocked_range<int>(0,1),*this); // change group context only - my_b.timed_wait(10); - my_a.execute(check); - check(); - } - // parallel_for's functor - void operator()(const tbb::blocked_range<int> &) const { - NestedChecker(*this)(); - my_a.execute(Checker(my_ag)); // restore arena context - } -}; - -void TestMultipleMasters(int p) { - { - REMARK("multiple masters, part 1\n"); - tbb::task_arena a(1,0); - a.initialize(); - ArenaObserver o(a, 1); - Harness::SpinBarrier barrier1(p), barrier2(2*p+1); // each of p threads will submit two tasks signaling the barrier - NativeParallelFor( p, MultipleMastersBody(a, barrier1, barrier2) ); - barrier2.timed_wait(10); - a.debug_wait_until_empty(); - } { - REMARK("multiple masters, part 2\n"); - tbb::task_arena a(2,1); - ArenaObserver o(a, 2); - Harness::SpinBarrier barrier(p+2); - a.enqueue(AsynchronousWork(barrier, /*blocking=*/true)); // occupy the worker, a regression test for bug 1981 - NativeParallelFor( p, MultipleMastersPart2(a, barrier) ); - barrier.timed_wait(10); - a.debug_wait_until_empty(); - } { - // Regression test for the bug 1981 part 2 (task_arena::execute() with wait_for_all for an enqueued task) - REMARK("multiple masters, part 3: wait_for_all() in execute()\n"); - tbb::task_arena a(p,1); - Harness::SpinBarrier barrier(p+1); // for masters to avoid endless waiting at least in some runs - // "Oversubscribe" the arena by 1 master thread - NativeParallelFor( p+1, MultipleMastersPart3(a, barrier) ); - a.debug_wait_until_empty(); - } { - int c = p%3? (p%2? p : 2) : 3; - REMARK("multiple masters, part 4: contexts, arena(%d)\n", c); - tbb::task_arena a(c, 1); - ArenaObserver o(a, c); - Harness::SpinBarrier barrier(c); - MultipleMastersPart4 test(a, barrier); - NativeParallelFor(p, test); - a.debug_wait_until_empty(); - } -} - -#include <sstream> -#if TBB_USE_EXCEPTIONS -#include <stdexcept> -#include "tbb/tbb_exception.h" -#endif - -struct TestArenaEntryBody : FPModeContext { - tbb::atomic<int> &my_stage; // each execute increases it - std::stringstream my_id; - bool is_caught, is_expected; - enum { arenaFPMode = 1 }; - - TestArenaEntryBody(tbb::atomic<int> &s, int idx, int i) // init thread-specific instance - : FPModeContext(idx+i) - , my_stage(s) - , is_caught(false) - , is_expected( (idx&(1<<i)) != 0 && (TBB_USE_EXCEPTIONS) != 0 ) - { - my_id << idx << ':' << i << '@'; - } - void operator()() { // inside task_arena::execute() - // synchronize with other stages - int stage = my_stage++; - int slot = tbb::task_arena::current_thread_index(); - ASSERT(slot >= 0 && slot <= 1, "master or the only worker"); - // wait until the third stage is delegated and then starts on slot 0 - while(my_stage < 2+slot) __TBB_Yield(); - // deduct its entry type and put it into id, it helps to find source of a problem - my_id << (stage < 3 ? (tbb::task_arena::current_thread_index()? - "delegated_to_worker" : stage < 2? "direct" : "delegated_to_master") - : stage == 3? "nested_same_ctx" : "nested_alien_ctx"); - REMARK("running %s\n", my_id.str().c_str()); - AssertFPMode(arenaFPMode); - if(is_expected) - __TBB_THROW(std::logic_error(my_id.str())); - // no code can be put here since exceptions can be thrown - } - void on_exception(const char *e) { // outside arena, in catch block - is_caught = true; - REMARK("caught %s\n", e); - ASSERT(my_id.str() == e, NULL); - assertFPMode(); - } - void after_execute() { // outside arena and catch block - REMARK("completing %s\n", my_id.str().c_str() ); - ASSERT(is_caught == is_expected, NULL); - assertFPMode(); - } -}; - -class ForEachArenaEntryBody : NoAssign { - tbb::task_arena &my_a; // expected task_arena(2,1) - tbb::atomic<int> &my_stage; // each execute increases it - int my_idx; - -public: - ForEachArenaEntryBody(tbb::task_arena &a, tbb::atomic<int> &c) - : my_a(a), my_stage(c), my_idx(0) {} - - void test(int idx) { - my_idx = idx; - my_stage = 0; - NativeParallelFor(3, *this); // test cross-arena calls - ASSERT(my_stage == 3, NULL); - my_a.execute(*this); // test nested calls - ASSERT(my_stage == 5, NULL); - } - - // task_arena functor for nested tests - void operator()() const { - test_arena_entry(3); // in current task group context - tbb::parallel_for(4, 5, *this); // in different context - } - - // NativeParallelFor & parallel_for functor - void operator()(int i) const { - test_arena_entry(i); - } - -private: - void test_arena_entry(int i) const { - TestArenaEntryBody scoped_functor(my_stage, my_idx, i); - __TBB_TRY { - my_a.execute(scoped_functor); - } -#if TBB_USE_EXCEPTIONS - catch(tbb::captured_exception &e) { - scoped_functor.on_exception(e.what()); - ASSERT_WARNING(TBB_USE_CAPTURED_EXCEPTION, "Caught captured_exception while expecting exact one"); - } catch(std::logic_error &e) { - scoped_functor.on_exception(e.what()); - ASSERT(!TBB_USE_CAPTURED_EXCEPTION, "Caught exception of wrong type"); - } catch(...) { ASSERT(false, "Unexpected exception type"); } -#endif //TBB_USE_EXCEPTIONS - scoped_functor.after_execute(); - } -}; - -void TestArenaEntryConsistency() { - REMARK("test arena entry consistency\n" ); - - tbb::task_arena a(2,1); - tbb::atomic<int> c; - ForEachArenaEntryBody body(a, c); - - FPModeContext fp_scope(TestArenaEntryBody::arenaFPMode); - a.initialize(); // capture FP settings to arena - fp_scope.setNextFPMode(); - - for(int i = 0; i < 100; i++) // not less than 32 = 2^5 of entry types - body.test(i); -} - -int TestMain () { - // TODO: a workaround for temporary p-1 issue in market - tbb::task_scheduler_init init_market_p_plus_one(MaxThread+1); - InitializeAndTerminate(MaxThread); - for( int p=MinThread; p<=MaxThread; ++p ) { - REMARK("testing with %d threads\n", p ); - NativeParallelFor( p, &TestConcurrentArenas ); - ResetTLS(); - TestMultipleMasters( p ); - ResetTLS(); - } - TestArenaEntryConsistency(); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_task_assertions.cpp b/src/tbb/src/test/test_task_assertions.cpp deleted file mode 100644 index c2be058de..000000000 --- a/src/tbb/src/test/test_task_assertions.cpp +++ /dev/null @@ -1,94 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Test correctness of forceful TBB initialization before any dynamic initialization -// of static objects inside the library took place. -namespace tbb { -namespace internal { - // Forward declaration of the TBB general initialization routine from task.cpp - void DoOneTimeInitializations(); -}} - -struct StaticInitializationChecker { - StaticInitializationChecker () { tbb::internal::DoOneTimeInitializations(); } -} theChecker; - -//------------------------------------------------------------------------ -// Test that important assertions in class task fail as expected. -//------------------------------------------------------------------------ - -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#include "harness_inject_scheduler.h" -#include "harness.h" -#include "harness_bad_expr.h" - -#if TRY_BAD_EXPR_ENABLED -//! Task that will be abused. -tbb::task* volatile AbusedTask; - -//! Number of times that AbuseOneTask -int AbuseOneTaskRan; - -//! Body used to create task in thread 0 and abuse it in thread 1. -struct AbuseOneTask { - void operator()( int ) const { - tbb::task_scheduler_init init; - // Thread 1 attempts to incorrectly use the task created by thread 0. - tbb::task_list list; - // spawn_root_and_wait over empty list should vacuously succeed. - tbb::task::spawn_root_and_wait(list); - - // Check that spawn_root_and_wait fails on non-empty list. - list.push_back(*AbusedTask); - - // Try abusing recycle_as_continuation - TRY_BAD_EXPR(AbusedTask->recycle_as_continuation(), "execute" ); - TRY_BAD_EXPR(AbusedTask->recycle_as_safe_continuation(), "execute" ); - TRY_BAD_EXPR(AbusedTask->recycle_to_reexecute(), "execute" ); - ++AbuseOneTaskRan; - } -}; - -//! Test various __TBB_ASSERT assertions related to class tbb::task. -void TestTaskAssertions() { - // Catch assertion failures - tbb::set_assertion_handler( AssertionFailureHandler ); - tbb::task_scheduler_init init; - // Create task to be abused - AbusedTask = new( tbb::task::allocate_root() ) tbb::empty_task; - NativeParallelFor( 1, AbuseOneTask() ); - ASSERT( AbuseOneTaskRan==1, NULL ); - tbb::task::destroy(*AbusedTask); - // Restore normal assertion handling - tbb::set_assertion_handler( ReportError ); -} - -int TestMain () { - TestTaskAssertions(); - return Harness::Done; -} - -#else /* !TRY_BAD_EXPR_ENABLED */ - -int TestMain () { - return Harness::Skipped; -} - -#endif /* !TRY_BAD_EXPR_ENABLED */ diff --git a/src/tbb/src/test/test_task_auto_init.cpp b/src/tbb/src/test/test_task_auto_init.cpp deleted file mode 100644 index af6c8d9b6..000000000 --- a/src/tbb/src/test/test_task_auto_init.cpp +++ /dev/null @@ -1,202 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Testing automatic initialization of TBB task scheduler, so do not use task_scheduler_init anywhere. - -#include "tbb/task.h" - -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#include "harness.h" -#include "tbb/atomic.h" - -static tbb::atomic<int> g_NumTestsExecuted; - -#define TEST_PROLOGUE() ++g_NumTestsExecuted - -// Global data used in testing use cases with cross-thread usage of TBB objects -static tbb::task *g_Root1 = NULL, - *g_Root2 = NULL, - *g_Root3 = NULL, - *g_Task = NULL; - -#if __TBB_TASK_GROUP_CONTEXT -static tbb::task_group_context* g_Ctx = NULL; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - -void TestTaskSelf () { - TEST_PROLOGUE(); - tbb::task& t = tbb::task::self(); - ASSERT( !t.parent() && t.ref_count() == 1 && !t.affinity(), "Master's default task properties changed?" ); -} - -void TestRootAllocation () { - TEST_PROLOGUE(); - tbb::task &r = *new( tbb::task::allocate_root() ) tbb::empty_task; - tbb::task::spawn_root_and_wait(r); -} - -inline void ExecuteChildAndCleanup ( tbb::task &r, tbb::task &t ) { - r.set_ref_count(2); - r.spawn_and_wait_for_all(t); - r.destroy(r); -} - -void TestChildAllocation () { - TEST_PROLOGUE(); - tbb::task &t = *new( g_Root1->allocate_child() ) tbb::empty_task; - ExecuteChildAndCleanup( *g_Root1, t ); -} - -void TestAdditionalChildAllocation () { - TEST_PROLOGUE(); - tbb::task &t = *new( tbb::task::allocate_additional_child_of(*g_Root2) ) tbb::empty_task; - ExecuteChildAndCleanup( *g_Root2, t ); -} - -#if __TBB_TASK_GROUP_CONTEXT -void TestTaskGroupContextCreation () { - TEST_PROLOGUE(); - tbb::task_group_context ctx; - tbb::task &r = *new( tbb::task::allocate_root(ctx) ) tbb::empty_task; - tbb::task::spawn_root_and_wait(r); -} - -void TestRootAllocationWithContext () { - TEST_PROLOGUE(); - tbb::task* root = new( tbb::task::allocate_root(*g_Ctx) ) tbb::empty_task; - tbb::task::spawn_root_and_wait(*root); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -void TestSpawn () { - TEST_PROLOGUE(); - tbb::task::spawn(*g_Task); -} - -void TestWaitForAll () { - TEST_PROLOGUE(); - g_Root3->wait_for_all(); - tbb::task::destroy(*g_Root3); -} - -typedef void (*TestFnPtr)(); - -const TestFnPtr TestFuncsTable[] = { - TestTaskSelf, TestRootAllocation, TestChildAllocation, TestAdditionalChildAllocation, -#if __TBB_TASK_GROUP_CONTEXT - TestTaskGroupContextCreation, TestRootAllocationWithContext, -#endif /* __TBB_TASK_GROUP_CONTEXT */ - TestSpawn, TestWaitForAll }; - -const int NumTestFuncs = sizeof(TestFuncsTable) / sizeof(TestFnPtr); - -struct TestThreadBody : NoAssign, Harness::NoAfterlife { - // Each invocation of operator() happens in a fresh thread with zero-based ID - // id, and checks a specific auto-initialization scenario. - void operator() ( int id ) const { - ASSERT( id >= 0 && id < NumTestFuncs, "Test diver: NativeParallelFor is used incorrectly" ); - TestFuncsTable[id](); - } -}; - - -#include "../tbb/tls.h" - -void UseAFewNewTlsKeys () { - tbb::internal::tls<intptr_t> tls1, tls2, tls3, tls4; - tls1 = tls2 = tls3 = tls4 = -1; -} - -using tbb::internal::spin_wait_until_eq; - -volatile bool FafStarted = false, - FafCanFinish = false, - FafCompleted = false; - -//! This task is supposed to be executed during termination of an auto-initialized master thread -class FireAndForgetTask : public tbb::task { - tbb::task* execute () { - // Let another master thread proceed requesting new TLS keys - FafStarted = true; - UseAFewNewTlsKeys(); - // Wait while another master thread dirtied its new TLS slots - spin_wait_until_eq( FafCanFinish, true ); - FafCompleted = true; - return NULL; - } -public: // to make gcc 3.2.3 happy - ~FireAndForgetTask() { - ASSERT(FafCompleted, "FireAndForgetTask got erroneously cancelled?"); - } -}; - -#include "harness_barrier.h" -Harness::SpinBarrier driver_barrier(2); - -struct DriverThreadBody : NoAssign, Harness::NoAfterlife { - void operator() ( int id ) const { - ASSERT( id < 2, "Only two test driver threads are expected" ); - // a barrier is required to ensure both threads started; otherwise the test may deadlock: - // the first thread would execute FireAndForgetTask at shutdown and wait for FafCanFinish, - // while the second thread wouldn't even start waiting for the loader lock hold by the first one. - if ( id == 0 ) { - driver_barrier.wait(); - // Prepare global data - g_Root1 = new( tbb::task::allocate_root() ) tbb::empty_task; - g_Root2 = new( tbb::task::allocate_root() ) tbb::empty_task; - g_Root3 = new( tbb::task::allocate_root() ) tbb::empty_task; - g_Task = new( g_Root3->allocate_child() ) tbb::empty_task; - g_Root3->set_ref_count(2); - // Run tests - NativeParallelFor( NumTestFuncs, TestThreadBody() ); - ASSERT( g_NumTestsExecuted == NumTestFuncs, "Test driver: Wrong number of tests executed" ); - - // This test checks the validity of temporarily restoring the value of - // the last TLS slot for a given key during the termination of an - // auto-initialized master thread (in governor::auto_terminate). - // If anything goes wrong, generic_scheduler::cleanup_master() will assert. - // The context for this task must be valid till the task completion. -#if __TBB_TASK_GROUP_CONTEXT - tbb::task &r = *new( tbb::task::allocate_root(*g_Ctx) ) FireAndForgetTask; -#else - tbb::task &r = *new( tbb::task::allocate_root() ) FireAndForgetTask; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - tbb::task::spawn(r); - } - else { -#if __TBB_TASK_GROUP_CONTEXT - tbb::task_group_context ctx; - g_Ctx = &ctx; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - driver_barrier.wait(); - spin_wait_until_eq( FafStarted, true ); - UseAFewNewTlsKeys(); - FafCanFinish = true; - spin_wait_until_eq( FafCompleted, true ); - } - } -}; - -int TestMain () { - // Do not use any TBB functionality in the main thread! - NativeParallelFor( 2, DriverThreadBody() ); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_task_enqueue.cpp b/src/tbb/src/test/test_task_enqueue.cpp deleted file mode 100644 index cf68906a2..000000000 --- a/src/tbb/src/test/test_task_enqueue.cpp +++ /dev/null @@ -1,366 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness_task.h" -#include "harness_barrier.h" -#include "tbb/atomic.h" -#include "tbb/tbb_thread.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/tick_count.h" - -//////////////////////////////////////////////////////////////////////////////// -// Test for basic FIFO scheduling functionality - -const int PairsPerTrack = 100; - -class EnqueuedTask : public tbb::task { - task* my_successor; - int my_enqueue_order; - int* my_track; - tbb::task* execute() { - // Capture execution order in the very beginning - int execution_order = 2 - my_successor->decrement_ref_count(); - // Create some local work. - TaskGenerator& p = *new( allocate_root() ) TaskGenerator(2,2); - spawn_root_and_wait(p); - if( execution_order==2 ) { // the "slower" of two peer tasks - ++nCompletedPairs; - // Of course execution order can differ from dequeue order. - // But there is no better approximation at hand; and a single worker - // will execute in dequeue order, which is enough for our check. - if (my_enqueue_order==execution_order) - ++nOrderedPairs; - FireTwoTasks(my_track); - destroy(*my_successor); - } - return NULL; - } -public: - EnqueuedTask( task* successor, int enq_order, int* track ) - : my_successor(successor), my_enqueue_order(enq_order), my_track(track) {} - - // Create and enqueue two tasks - static void FireTwoTasks( int* track ) { - int progress = ++*track; - if( progress < PairsPerTrack ) { - task* successor = new (allocate_root()) tbb::empty_task; - successor->set_ref_count(2); - enqueue( *new (allocate_root()) EnqueuedTask(successor, 1, track) ); - enqueue( *new (allocate_root()) EnqueuedTask(successor, 2, track) ); - } - } - - static tbb::atomic<int> nCompletedPairs; - static tbb::atomic<int> nOrderedPairs; -}; - -tbb::atomic<int> EnqueuedTask::nCompletedPairs; -tbb::atomic<int> EnqueuedTask::nOrderedPairs; - -const int nTracks = 10; -static int TaskTracks[nTracks]; -const int stall_threshold = 1000000; // 1 sec - -void TimedYield( double pause_time ) { - tbb::tick_count start = tbb::tick_count::now(); - while( (tbb::tick_count::now()-start).seconds() < pause_time ) - tbb::this_tbb_thread::sleep(tbb::tick_count::interval_t(pause_time)); -} - -class ProgressMonitor { -public: - void operator() ( ) { - int track_snapshot[nTracks]; - int stall_count = 0, uneven_progress_count = 0, last_progress_mask = 0; - for(int i=0; i<nTracks; ++i) - track_snapshot[i]=0; - bool completed; - do { - // Yield repeatedly for at least 1 usec - TimedYield( 1E-6 ); - int overall_progress = 0, progress_mask = 0; - const int all_progressed = (1<<nTracks) - 1; - completed = true; - for(int i=0; i<nTracks; ++i) { - int ti = TaskTracks[i]; - int pi = ti-track_snapshot[i]; - if( pi ) progress_mask |= 1<<i; - overall_progress += pi; - completed = completed && ti==PairsPerTrack; - track_snapshot[i]=ti; - } - // The constants in the next asserts are subjective and may need correction. - if( overall_progress ) - stall_count=0; - else { - ++stall_count; - // no progress; consider it dead. - ASSERT(stall_count < stall_threshold, "no progress on enqueued tasks; deadlock, or the machine is heavily oversubscribed?"); - } - if( progress_mask==all_progressed || progress_mask^last_progress_mask ) { - uneven_progress_count = 0; - last_progress_mask = progress_mask; - } - else if ( overall_progress > 2 ) { - ++uneven_progress_count; - // The threshold of 32 is 4x bigger than what was observed on a 8-core machine with oversubscription. - ASSERT_WARNING(uneven_progress_count < 32, - "some enqueued tasks seem stalling; no simultaneous progress, or the machine is oversubscribed? Investigate if repeated"); - } - } while( !completed ); - } -}; - -void TestEnqueue( int p ) { - REMARK("Testing task::enqueue for %d threads\n", p); - for(int mode=0;mode<3;++mode) { - tbb::task_scheduler_init init(p); - EnqueuedTask::nCompletedPairs = EnqueuedTask::nOrderedPairs = 0; - for(int i=0; i<nTracks; ++i) { - TaskTracks[i] = -1; // to accomodate for the starting call - EnqueuedTask::FireTwoTasks(TaskTracks+i); - } - ProgressMonitor pm; - tbb::tbb_thread thr( pm ); - if(mode==1) { - // do some parallel work in the meantime - for(int i=0; i<10; i++) { - TaskGenerator& g = *new( tbb::task::allocate_root() ) TaskGenerator(2,5); - tbb::task::spawn_root_and_wait(g); - TimedYield( 1E-6 ); - } - } - if( mode==2 ) { - // Additionally enqueue a bunch of empty tasks. The goal is to test that tasks - // allocated and enqueued by a thread are safe to use after the thread leaves TBB. - tbb::task* root = new (tbb::task::allocate_root()) tbb::empty_task; - root->set_ref_count(100); - for( int i=0; i<100; ++i ) - tbb::task::enqueue( *new (root->allocate_child()) tbb::empty_task ); - init.terminate(); // master thread deregistered - } - thr.join(); - ASSERT(EnqueuedTask::nCompletedPairs==nTracks*PairsPerTrack, NULL); - ASSERT(EnqueuedTask::nOrderedPairs<EnqueuedTask::nCompletedPairs, - "all task pairs executed in enqueue order; de facto guarantee is too strong?"); - } -} - -//////////////////////////////////////////////////////////////////////////////// -// Tests for Fire-And-Forget scheduling functionality - -const int NumRepeats = 200; -const int MaxNumThreads = 16; -static volatile bool Finished[MaxNumThreads] = {}; - -static volatile bool CanStart; - -//! Custom user task interface -class ITask { -public: - virtual ~ITask() {} - virtual void Execute() = 0; - virtual void Release() { delete this; } -}; - -class TestTask : public ITask { - volatile bool *m_pDone; -public: - TestTask ( volatile bool *pDone ) : m_pDone(pDone) {} - - /* override */ void Execute() { - *m_pDone = true; - } -}; - -class CarrierTask : public tbb::task { - ITask* m_pTask; -public: - CarrierTask(ITask* pTask) : m_pTask(pTask) {} - - /*override*/ task* execute() { - m_pTask->Execute(); - m_pTask->Release(); - return NULL; - } -}; - -class SpawnerTask : public ITask { - ITask* m_taskToSpawn; -public: - SpawnerTask(ITask* job) : m_taskToSpawn(job) {} - - void Execute() { - while ( !CanStart ) - __TBB_Yield(); - Harness::Sleep(10); // increases probability of the bug - tbb::task::enqueue( *new( tbb::task::allocate_root() ) CarrierTask(m_taskToSpawn) ); - } -}; - -class EnqueuerBody { -public: - void operator() ( int id ) const { - tbb::task_scheduler_init init(tbb::task_scheduler_init::default_num_threads() + 1); - - SpawnerTask* pTask = new SpawnerTask( new TestTask(Finished + id) ); - tbb::task::enqueue( *new( tbb::task::allocate_root() ) CarrierTask(pTask) ); - } -}; - -//! Regression test for a bug that caused premature arena destruction -void TestCascadedEnqueue () { - REMARK("Testing cascaded enqueue\n"); - tbb::task_scheduler_init init(tbb::task_scheduler_init::default_num_threads() + 1); - - int minNumThreads = min(tbb::task_scheduler_init::default_num_threads(), MaxNumThreads) / 2; - int maxNumThreads = min(tbb::task_scheduler_init::default_num_threads() * 2, MaxNumThreads); - - for ( int numThreads = minNumThreads; numThreads <= maxNumThreads; ++numThreads ) { - for ( int i = 0; i < NumRepeats; ++i ) { - CanStart = false; - __TBB_Yield(); - NativeParallelFor( numThreads, EnqueuerBody() ); - CanStart = true; - int j = 0; - while ( j < numThreads ) { - if ( Finished[j] ) - ++j; - else - __TBB_Yield(); - } - for ( j = 0; j < numThreads; ++j ) - Finished[j] = false; - REMARK("\r%02d threads; Iteration %03d", numThreads, i); - } - } - REMARK( "\r \r" ); -} - -class DummyTask : public tbb::task { -public: - task *execute() { - Harness::Sleep(1); - return NULL; - } -}; - -class SharedRootBody { - tbb::task *my_root; -public: - SharedRootBody ( tbb::task *root ) : my_root(root) {} - - void operator() ( int ) const { - tbb::task::enqueue( *new( tbb::task::allocate_additional_child_of(*my_root) ) DummyTask ); - } -}; - -//! Test for enqueuing children of the same root from different master threads -void TestSharedRoot ( int p ) { - REMARK("Testing enqueuing siblings from different masters\n"); - tbb::task_scheduler_init init(p); - tbb::task *root = new ( tbb::task::allocate_root() ) tbb::empty_task; - root->set_ref_count(1); - for( int n = MinThread; n <= MaxThread; ++n ) { - REMARK("%d masters, %d requested workers\r", n, p-1); - NativeParallelFor( n, SharedRootBody(root) ); - } - REMARK( " \r" ); - root->wait_for_all(); - tbb::task::destroy(*root); -} - -class BlockingTask : public tbb::task { - Harness::SpinBarrier &m_Barrier; - - tbb::task* execute () { - m_Barrier.wait(); - return 0; - } - -public: - BlockingTask ( Harness::SpinBarrier& bar ) : m_Barrier(bar) {} -}; - -//! Test making sure that masters can dequeue tasks -/** Success criterion is not hanging. **/ -void TestDequeueByMaster () { - REMARK("Testing task dequeuing by master\n"); - tbb::task_scheduler_init init(1); - Harness::SpinBarrier bar(2); - tbb::task &r = *new ( tbb::task::allocate_root() ) tbb::empty_task; - r.set_ref_count(3); - tbb::task::enqueue( *new(r.allocate_child()) BlockingTask(bar) ); - tbb::task::enqueue( *new(r.allocate_child()) BlockingTask(bar) ); - r.wait_for_all(); - tbb::task::destroy(r); -} - -////////////////////// Missed wake-ups /////// -#include "tbb/blocked_range.h" -#include "tbb/parallel_for.h" - -static const int NUM_TASKS = 4; -static const size_t NUM_REPEATS = TBB_USE_DEBUG ? 50000 : 100000; -static tbb::task_group_context persistent_context(tbb::task_group_context::isolated); - -struct Functor : NoAssign -{ - Harness::SpinBarrier &my_barrier; - Functor(Harness::SpinBarrier &a_barrier) : my_barrier(a_barrier) { } - void operator()(const tbb::blocked_range<int>& r) const - { - ASSERT(r.size() == 1, NULL); - // allocate_root() uses current context of parallel_for which is destroyed when it finishes. - // But enqueued tasks can outlive parallel_for execution. Thus, use a persistent context. - tbb::task *t = new(tbb::task::allocate_root(persistent_context)) tbb::empty_task(); - tbb::task::enqueue(*t); // ensure no missing wake-ups - my_barrier.timed_wait(10, "Attention: poorly reproducible event, if seen stress testing required" ); - } -}; - -void TestWakeups() -{ - tbb::task_scheduler_init my(tbb::task_scheduler_init::deferred); - if( tbb::task_scheduler_init::default_num_threads() <= NUM_TASKS ) - my.initialize(NUM_TASKS*2); - Harness::SpinBarrier barrier(NUM_TASKS); - REMARK("Missing wake-up: affinity_partitioner\n"); - tbb::affinity_partitioner aff; - for (size_t i = 0; i < NUM_REPEATS; ++i) - tbb::parallel_for(tbb::blocked_range<int>(0, NUM_TASKS), Functor(barrier), aff); - REMARK("Missing wake-up: simple_partitioner\n"); - for (size_t i = 0; i < NUM_REPEATS; ++i) - tbb::parallel_for(tbb::blocked_range<int>(0, NUM_TASKS), Functor(barrier), tbb::simple_partitioner()); - REMARK("Missing wake-up: auto_partitioner\n"); - for (size_t i = 0; i < NUM_REPEATS; ++i) - tbb::parallel_for(tbb::blocked_range<int>(0, NUM_TASKS), Functor(barrier)); // auto -} - -int TestMain () { - TestWakeups(); - TestDequeueByMaster(); - TestCascadedEnqueue(); - for( int p=MinThread; p<=MaxThread; ++p ) { - TestEnqueue(p); - TestSharedRoot(p); - } - return Harness::Done; -} diff --git a/src/tbb/src/test/test_task_group.cpp b/src/tbb/src/test/test_task_group.cpp deleted file mode 100644 index 0d183338f..000000000 --- a/src/tbb/src/test/test_task_group.cpp +++ /dev/null @@ -1,868 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness_defs.h" - -//Concurrency scheduler is not supported by Windows* new UI apps -//TODO: check whether we can test anything here -#include "tbb/tbb_config.h" -#if !__TBB_WIN8UI_SUPPORT -#ifndef TBBTEST_USE_TBB - #define TBBTEST_USE_TBB 1 -#endif -#else - #define TBBTEST_USE_TBB 0 - #undef __TBB_TASK_GROUP_CONTEXT - #define __TBB_TASK_GROUP_CONTEXT 0 -#endif - -#if !TBBTEST_USE_TBB - #if defined(_MSC_VER) && _MSC_VER < 1600 - #ifdef TBBTEST_USE_TBB - #undef TBBTEST_USE_TBB - #endif - #define TBBTEST_USE_TBB 1 - #endif -#endif - -#if TBBTEST_USE_TBB - - #include "tbb/compat/ppl.h" - #include "tbb/task_scheduler_init.h" - - #if _MSC_VER - typedef tbb::internal::uint32_t uint_t; - #else - typedef uint32_t uint_t; - #endif - -#else /* !TBBTEST_USE_TBB */ - - #if defined(_MSC_VER) - #pragma warning(disable: 4100 4180) - #endif - - #include <ppl.h> - - typedef unsigned int uint_t; - - #define __TBB_SILENT_CANCELLATION_BROKEN (_MSC_VER == 1600) - -#endif /* !TBBTEST_USE_TBB */ - -#if __TBB_TASK_GROUP_CONTEXT - -#include "tbb/atomic.h" -#include "tbb/aligned_space.h" -#include "harness_concurrency_tracker.h" - -unsigned g_MaxConcurrency = 0; - -typedef tbb::atomic<uint_t> atomic_t; -typedef Concurrency::task_handle<void(*)()> handle_type; - -//------------------------------------------------------------------------ -// Tests for the thread safety of the task_group manipulations -//------------------------------------------------------------------------ - -#include "harness_barrier.h" - -enum SharingMode { - VagabondGroup = 1, - ParallelWait = 2 -}; - -class SharedGroupBodyImpl : NoCopy, Harness::NoAfterlife { - static const uint_t c_numTasks0 = 4096, - c_numTasks1 = 1024; - - const uint_t m_numThreads; - const uint_t m_sharingMode; - - Concurrency::task_group *m_taskGroup; - atomic_t m_tasksSpawned, - m_threadsReady; - Harness::SpinBarrier m_barrier; - - static atomic_t s_tasksExecuted; - - struct TaskFunctor { - SharedGroupBodyImpl *m_pOwner; - void operator () () const { - if ( m_pOwner->m_sharingMode & ParallelWait ) { - while ( Harness::ConcurrencyTracker::PeakParallelism() < m_pOwner->m_numThreads ) - __TBB_Yield(); - } - ++s_tasksExecuted; - } - }; - - TaskFunctor m_taskFunctor; - - void Spawn ( uint_t numTasks ) { - for ( uint_t i = 0; i < numTasks; ++i ) { - ++m_tasksSpawned; - Harness::ConcurrencyTracker ct; - m_taskGroup->run( m_taskFunctor ); - } - ++m_threadsReady; - } - - void DeleteTaskGroup () { - delete m_taskGroup; - m_taskGroup = NULL; - } - - void Wait () { - while ( m_threadsReady != m_numThreads ) - __TBB_Yield(); - const uint_t numSpawned = c_numTasks0 + c_numTasks1 * (m_numThreads - 1); - ASSERT ( m_tasksSpawned == numSpawned, "Wrong number of spawned tasks. The test is broken" ); - REMARK("Max spawning parallelism is %u out of %u\n", Harness::ConcurrencyTracker::PeakParallelism(), g_MaxConcurrency); - if ( m_sharingMode & ParallelWait ) { - m_barrier.wait( &Harness::ConcurrencyTracker::Reset ); - { - Harness::ConcurrencyTracker ct; - m_taskGroup->wait(); - } - if ( Harness::ConcurrencyTracker::PeakParallelism() == 1 ) - REPORT ( "Warning: No parallel waiting detected in TestParallelWait\n" ); - m_barrier.wait(); - } - else - m_taskGroup->wait(); - ASSERT ( m_tasksSpawned == numSpawned, "No tasks should be spawned after wait starts. The test is broken" ); - ASSERT ( s_tasksExecuted == numSpawned, "Not all spawned tasks were executed" ); - } - -public: - SharedGroupBodyImpl ( uint_t numThreads, uint_t sharingMode = 0 ) - : m_numThreads(numThreads) - , m_sharingMode(sharingMode) - , m_taskGroup(NULL) - , m_barrier(numThreads) - { - ASSERT ( m_numThreads > 1, "SharedGroupBody tests require concurrency" ); - ASSERT ( !(m_sharingMode & VagabondGroup) || m_numThreads == 2, "In vagabond mode SharedGroupBody must be used with 2 threads only" ); - Harness::ConcurrencyTracker::Reset(); - s_tasksExecuted = 0; - m_tasksSpawned = 0; - m_threadsReady = 0; - m_taskFunctor.m_pOwner = this; - } - - void Run ( uint_t idx ) { -#if TBBTEST_USE_TBB - tbb::task_scheduler_init init(g_MaxConcurrency); -#endif - AssertLive(); - if ( idx == 0 ) { - ASSERT ( !m_taskGroup && !m_tasksSpawned, "SharedGroupBody must be reset before reuse"); - m_taskGroup = new Concurrency::task_group; - Spawn( c_numTasks0 ); - Wait(); - if ( m_sharingMode & VagabondGroup ) - m_barrier.wait(); - else - DeleteTaskGroup(); - } - else { - while ( m_tasksSpawned == 0 ) - __TBB_Yield(); - ASSERT ( m_taskGroup, "Task group is not initialized"); - Spawn (c_numTasks1); - if ( m_sharingMode & ParallelWait ) - Wait(); - if ( m_sharingMode & VagabondGroup ) { - ASSERT ( idx == 1, "In vagabond mode SharedGroupBody must be used with 2 threads only" ); - m_barrier.wait(); - DeleteTaskGroup(); - } - } - AssertLive(); - } -}; - -atomic_t SharedGroupBodyImpl::s_tasksExecuted; - -class SharedGroupBody : NoAssign, Harness::NoAfterlife { - bool m_bOwner; - SharedGroupBodyImpl *m_pImpl; -public: - SharedGroupBody ( uint_t numThreads, uint_t sharingMode = 0 ) - : m_bOwner(true) - , m_pImpl( new SharedGroupBodyImpl(numThreads, sharingMode) ) - {} - SharedGroupBody ( const SharedGroupBody& src ) - : NoAssign() - , Harness::NoAfterlife() - , m_bOwner(false) - , m_pImpl(src.m_pImpl) - {} - ~SharedGroupBody () { - if ( m_bOwner ) - delete m_pImpl; - } - void operator() ( uint_t idx ) const { m_pImpl->Run(idx); } -}; - -void TestParallelSpawn () { - NativeParallelFor( g_MaxConcurrency, SharedGroupBody(g_MaxConcurrency) ); -} - -void TestParallelWait () { - NativeParallelFor( g_MaxConcurrency, SharedGroupBody(g_MaxConcurrency, ParallelWait) ); -} - -// Tests non-stack-bound task group (the group that is allocated by one thread and destroyed by the other) -void TestVagabondGroup () { - NativeParallelFor( 2, SharedGroupBody(2, VagabondGroup) ); -} - -//------------------------------------------------------------------------ -// Common requisites of the Fibonacci tests -//------------------------------------------------------------------------ - -const uint_t N = 20; -const uint_t F = 6765; - -atomic_t g_Sum; - -#define FIB_TEST_PROLOGUE() \ - const unsigned numRepeats = g_MaxConcurrency * (TBB_USE_DEBUG ? 4 : 16); \ - Harness::ConcurrencyTracker::Reset() - -#define FIB_TEST_EPILOGUE(sum) \ - ASSERT( sum == numRepeats * F, NULL ); \ - REMARK("Realized parallelism in Fib test is %u out of %u\n", Harness::ConcurrencyTracker::PeakParallelism(), g_MaxConcurrency) - -//------------------------------------------------------------------------ -// Test for a complex tree of task groups -// -// The test executes a tree of task groups of the same sort with asymmetric -// descendant nodes distribution at each level at each level. -// -// The chores are specified as functor objects. Each task group contains only one chore. -//------------------------------------------------------------------------ - -template<uint_t Func(uint_t)> -struct FibTask : NoAssign, Harness::NoAfterlife { - uint_t* m_pRes; - const uint_t m_Num; - FibTask( uint_t* y, uint_t n ) : m_pRes(y), m_Num(n) {} - void operator() () const { - *m_pRes = Func(m_Num); - } -}; - -uint_t Fib_SpawnRightChildOnly ( uint_t n ) { - Harness::ConcurrencyTracker ct; - if( n<2 ) { - return n; - } else { - uint_t y = ~0u; - Concurrency::task_group tg; - tg.run( FibTask<Fib_SpawnRightChildOnly>(&y, n-1) ); - uint_t x = Fib_SpawnRightChildOnly(n-2); - tg.wait(); - return y+x; - } -} - -void TestFib1 () { - FIB_TEST_PROLOGUE(); - uint_t sum = 0; - for( unsigned i = 0; i < numRepeats; ++i ) - sum += Fib_SpawnRightChildOnly(N); - FIB_TEST_EPILOGUE(sum); -} - - -//------------------------------------------------------------------------ -// Test for a mixed tree of task groups. -// -// The test executes a tree with multiple task of one sort at the first level, -// each of which originates in its turn a binary tree of descendant task groups. -// -// The chores are specified both as functor objects and as function pointers -//------------------------------------------------------------------------ - -uint_t Fib_SpawnBothChildren( uint_t n ) { - Harness::ConcurrencyTracker ct; - if( n<2 ) { - return n; - } else { - uint_t y = ~0u, - x = ~0u; - Concurrency::task_group tg; - tg.run( FibTask<Fib_SpawnBothChildren>(&x, n-2) ); - tg.run( FibTask<Fib_SpawnBothChildren>(&y, n-1) ); - tg.wait(); - return y + x; - } -} - -void RunFib2 () { - g_Sum += Fib_SpawnBothChildren(N); -} - -void TestFib2 () { - FIB_TEST_PROLOGUE(); - g_Sum = 0; - Concurrency::task_group rg; - for( unsigned i = 0; i < numRepeats - 1; ++i ) - rg.run( &RunFib2 ); - rg.wait(); - rg.run( &RunFib2 ); - rg.wait(); - FIB_TEST_EPILOGUE(g_Sum); -} - - -//------------------------------------------------------------------------ -// Test for a complex tree of task groups -// The chores are specified as task handles for recursive functor objects. -//------------------------------------------------------------------------ - -class FibTask_SpawnRightChildOnly : NoAssign, Harness::NoAfterlife { - uint_t* m_pRes; - mutable uint_t m_Num; - -public: - FibTask_SpawnRightChildOnly( uint_t* y, uint_t n ) : m_pRes(y), m_Num(n) {} - void operator() () const { - Harness::ConcurrencyTracker ct; - AssertLive(); - if( m_Num < 2 ) { - *m_pRes = m_Num; - } else { - uint_t y = ~0u; - Concurrency::task_group tg; - Concurrency::task_handle<FibTask_SpawnRightChildOnly> h = FibTask_SpawnRightChildOnly(&y, m_Num-1); - tg.run( h ); - m_Num -= 2; - tg.run_and_wait( *this ); - *m_pRes += y; - } - } -}; - -uint_t RunFib3 ( uint_t n ) { - uint_t res = ~0u; - FibTask_SpawnRightChildOnly func(&res, n); - func(); - return res; -} - -void TestTaskHandle () { - FIB_TEST_PROLOGUE(); - uint_t sum = 0; - for( unsigned i = 0; i < numRepeats; ++i ) - sum += RunFib3(N); - FIB_TEST_EPILOGUE(sum); -} - -//------------------------------------------------------------------------ -// Test for a mixed tree of task groups. -// The chores are specified as task handles for both functor objects and function pointers -//------------------------------------------------------------------------ - -template<class task_group_type> -class FibTask_SpawnBothChildren : NoAssign, Harness::NoAfterlife { - uint_t* m_pRes; - uint_t m_Num; -public: - FibTask_SpawnBothChildren( uint_t* y, uint_t n ) : m_pRes(y), m_Num(n) {} - void operator() () const { - Harness::ConcurrencyTracker ct; - AssertLive(); - if( m_Num < 2 ) { - *m_pRes = m_Num; - } else { - uint_t x = ~0u, // initialized only to suppress warning - y = ~0u; - task_group_type tg; - Concurrency::task_handle<FibTask_SpawnBothChildren> h1 = FibTask_SpawnBothChildren(&y, m_Num-1), - h2 = FibTask_SpawnBothChildren(&x, m_Num-2); - tg.run( h1 ); - tg.run( h2 ); - tg.wait(); - *m_pRes = x + y; - } - } -}; - -template<class task_group_type> -void RunFib4 () { - uint_t res = ~0u; - FibTask_SpawnBothChildren<task_group_type> func(&res, N); - func(); - g_Sum += res; -} - -template<class task_group_type> -void TestTaskHandle2 () { - FIB_TEST_PROLOGUE(); - g_Sum = 0; - task_group_type rg; - typedef tbb::aligned_space<handle_type> handle_space_t; - handle_space_t *handles = new handle_space_t[numRepeats]; - handle_type *h = NULL; -#if __TBB_ipf && __TBB_GCC_VERSION==40601 - volatile // Workaround for unexpected exit from the loop below after the exception was caught -#endif - unsigned i = 0; - for( ;; ++i ) { - h = handles[i].begin(); -#if __TBB_FUNC_PTR_AS_TEMPL_PARAM_BROKEN - new ( h ) handle_type((void(*)())RunFib4<task_group_type>); -#else - new ( h ) handle_type(RunFib4<task_group_type>); -#endif - if ( i == numRepeats - 1 ) - break; - rg.run( *h ); -#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN - bool caught = false; - try { - if( i&1 ) rg.run( *h ); - else rg.run_and_wait( *h ); - } - catch ( Concurrency::invalid_multiple_scheduling& e ) { - ASSERT( e.what(), "Error message is absent" ); - caught = true; - } - catch ( ... ) { - ASSERT ( __TBB_EXCEPTION_TYPE_INFO_BROKEN, "Unrecognized exception" ); - } - ASSERT ( caught, "Expected invalid_multiple_scheduling exception is missing" ); -#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN */ - } - ASSERT( i == numRepeats - 1, "unexpected exit from the loop" ); - rg.run_and_wait( *h ); - - for( i = 0; i < numRepeats; ++i ) -#if __TBB_UNQUALIFIED_CALL_OF_DTOR_BROKEN - handles[i].begin()->Concurrency::task_handle<void(*)()>::~task_handle(); -#else - handles[i].begin()->~handle_type(); -#endif - delete []handles; - FIB_TEST_EPILOGUE(g_Sum); -} - -#if __TBB_LAMBDAS_PRESENT -//------------------------------------------------------------------------ -// Test for a mixed tree of task groups. -// The chores are specified as lambdas -//------------------------------------------------------------------------ - -void TestFibWithLambdas () { - REMARK ("Lambdas test"); - FIB_TEST_PROLOGUE(); - atomic_t sum; - sum = 0; - Concurrency::task_group rg; - for( unsigned i = 0; i < numRepeats; ++i ) - rg.run( [&](){sum += Fib_SpawnBothChildren(N);} ); - rg.wait(); - FIB_TEST_EPILOGUE(sum); -} - -//------------------------------------------------------------------------ -// Test for make_task. -// The chores are specified as lambdas converted to task_handles. -//------------------------------------------------------------------------ - -void TestFibWithMakeTask () { - REMARK ("Make_task test\n"); - atomic_t sum; - sum = 0; - Concurrency::task_group rg; - const auto &h1 = Concurrency::make_task( [&](){sum += Fib_SpawnBothChildren(N);} ); - const auto &h2 = Concurrency::make_task( [&](){sum += Fib_SpawnBothChildren(N);} ); - rg.run( h1 ); - rg.run_and_wait( h2 ); - ASSERT( sum == 2 * F, NULL ); -} -#endif /* __TBB_LAMBDAS_PRESENT */ - - -//------------------------------------------------------------------------ -// Tests for exception handling and cancellation behavior. -//------------------------------------------------------------------------ - -class test_exception : public std::exception -{ - const char* m_strDescription; -public: - test_exception ( const char* descr ) : m_strDescription(descr) {} - - const char* what() const throw() { return m_strDescription; } -}; - -#if TBB_USE_CAPTURED_EXCEPTION - #include "tbb/tbb_exception.h" - typedef tbb::captured_exception TestException; -#else - typedef test_exception TestException; -#endif - -#include <string.h> - -#define NUM_CHORES 512 -#define NUM_GROUPS 64 -#define SKIP_CHORES (NUM_CHORES/4) -#define SKIP_GROUPS (NUM_GROUPS/4) -#define EXCEPTION_DESCR1 "Test exception 1" -#define EXCEPTION_DESCR2 "Test exception 2" - -atomic_t g_ExceptionCount; -atomic_t g_TaskCount; -unsigned g_ExecutedAtCancellation; -bool g_Rethrow; -bool g_Throw; -#if __TBB_SILENT_CANCELLATION_BROKEN - volatile bool g_CancellationPropagationInProgress; - #define CATCH_ANY() \ - __TBB_CATCH( ... ) { \ - if ( g_CancellationPropagationInProgress ) { \ - if ( g_Throw ) { \ - exceptionCaught = true; \ - ++g_ExceptionCount; \ - } \ - } else \ - ASSERT( false, "Unknown exception" ); \ - } -#else - #define CATCH_ANY() __TBB_CATCH( ... ) { ASSERT( __TBB_EXCEPTION_TYPE_INFO_BROKEN, "Unknown exception" ); } -#endif - -inline -void ResetGlobals ( bool bThrow, bool bRethrow ) { - g_Throw = bThrow; - g_Rethrow = bRethrow; -#if __TBB_SILENT_CANCELLATION_BROKEN - g_CancellationPropagationInProgress = false; -#endif - g_ExceptionCount = 0; - g_TaskCount = 0; - Harness::ConcurrencyTracker::Reset(); -} - -class ThrowingTask : NoAssign, Harness::NoAfterlife { - atomic_t &m_TaskCount; -public: - ThrowingTask( atomic_t& counter ) : m_TaskCount(counter) {} - void operator() () const { - Harness::ConcurrencyTracker ct; - AssertLive(); - if ( g_Throw ) { - if ( ++m_TaskCount == SKIP_CHORES ) - __TBB_THROW( test_exception(EXCEPTION_DESCR1) ); - __TBB_Yield(); - } - else { - ++g_TaskCount; - while( !Concurrency::is_current_task_group_canceling() ) - __TBB_Yield(); - } - } -}; - -void LaunchChildren () { - atomic_t count; - count = 0; - Concurrency::task_group g; - bool exceptionCaught = false; - for( unsigned i = 0; i < NUM_CHORES; ++i ) - g.run( ThrowingTask(count) ); - Concurrency::task_group_status status = Concurrency::not_complete; - __TBB_TRY { - status = g.wait(); - } __TBB_CATCH ( TestException& e ) { -#if TBB_USE_EXCEPTIONS - ASSERT( e.what(), "Empty what() string" ); - ASSERT( __TBB_EXCEPTION_TYPE_INFO_BROKEN || strcmp(e.what(), EXCEPTION_DESCR1) == 0, "Unknown exception" ); -#endif /* TBB_USE_EXCEPTIONS */ - exceptionCaught = true; - ++g_ExceptionCount; - } CATCH_ANY(); - ASSERT( !g_Throw || exceptionCaught || status == Concurrency::canceled, "No exception in the child task group" ); - if ( g_Rethrow && g_ExceptionCount > SKIP_GROUPS ) { -#if __TBB_SILENT_CANCELLATION_BROKEN - g_CancellationPropagationInProgress = true; -#endif - __TBB_THROW( test_exception(EXCEPTION_DESCR2) ); - } -} - -#if TBB_USE_EXCEPTIONS -void TestEh1 () { - ResetGlobals( true, false ); - Concurrency::task_group rg; - for( unsigned i = 0; i < NUM_GROUPS; ++i ) - // TBB version does not require taking function address - rg.run( &LaunchChildren ); - try { - rg.wait(); - } catch ( ... ) { - ASSERT( false, "Unexpected exception" ); - } - ASSERT( g_ExceptionCount <= NUM_GROUPS, "Too many exceptions from the child groups. The test is broken" ); - ASSERT( g_ExceptionCount == NUM_GROUPS, "Not all child groups threw the exception" ); -} - -void TestEh2 () { - ResetGlobals( true, true ); - Concurrency::task_group rg; - bool exceptionCaught = false; - for( unsigned i = 0; i < NUM_GROUPS; ++i ) - // TBB version does not require taking function address - rg.run( &LaunchChildren ); - try { - rg.wait(); - } catch ( TestException& e ) { - ASSERT( e.what(), "Empty what() string" ); - ASSERT( __TBB_EXCEPTION_TYPE_INFO_BROKEN || strcmp(e.what(), EXCEPTION_DESCR2) == 0, "Unknown exception" ); - ASSERT ( !rg.is_canceling(), "wait() has not reset cancellation state" ); - exceptionCaught = true; - } CATCH_ANY(); - ASSERT( exceptionCaught, "No exception thrown from the root task group" ); - ASSERT( g_ExceptionCount >= SKIP_GROUPS, "Too few exceptions from the child groups. The test is broken" ); - ASSERT( g_ExceptionCount <= NUM_GROUPS - SKIP_GROUPS, "Too many exceptions from the child groups. The test is broken" ); - ASSERT( g_ExceptionCount < NUM_GROUPS - SKIP_GROUPS, "None of the child groups was cancelled" ); -} -#endif /* TBB_USE_EXCEPTIONS */ - -//------------------------------------------------------------------------ -// Tests for manual cancellation of the task_group hierarchy -//------------------------------------------------------------------------ - -void TestCancellation1 () { - ResetGlobals( false, false ); - Concurrency::task_group rg; - for( unsigned i = 0; i < NUM_GROUPS; ++i ) - // TBB version does not require taking function address - rg.run( &LaunchChildren ); - ASSERT ( !Concurrency::is_current_task_group_canceling(), "Unexpected cancellation" ); - ASSERT ( !rg.is_canceling(), "Unexpected cancellation" ); -#if __TBB_SILENT_CANCELLATION_BROKEN - g_CancellationPropagationInProgress = true; -#endif - while ( g_MaxConcurrency > 1 && g_TaskCount == 0 ) - __TBB_Yield(); - rg.cancel(); - g_ExecutedAtCancellation = g_TaskCount; - ASSERT ( rg.is_canceling(), "No cancellation reported" ); - rg.wait(); - ASSERT( g_TaskCount <= NUM_GROUPS * NUM_CHORES, "Too many tasks reported. The test is broken" ); - ASSERT( g_TaskCount < NUM_GROUPS * NUM_CHORES, "No tasks were cancelled. Cancellation model changed?" ); - ASSERT( g_TaskCount <= g_ExecutedAtCancellation + Harness::ConcurrencyTracker::PeakParallelism(), "Too many tasks survived cancellation" ); -} - -//------------------------------------------------------------------------ -// Tests for manual cancellation of the structured_task_group hierarchy -//------------------------------------------------------------------------ - -void StructuredLaunchChildren () { - atomic_t count; - count = 0; - Concurrency::structured_task_group g; - bool exceptionCaught = false; - typedef Concurrency::task_handle<ThrowingTask> throwing_handle_type; - tbb::aligned_space<throwing_handle_type,NUM_CHORES> handles; - for( unsigned i = 0; i < NUM_CHORES; ++i ) { - throwing_handle_type *h = handles.begin()+i; - new ( h ) throwing_handle_type( ThrowingTask(count) ); - g.run( *h ); - } - __TBB_TRY { - g.wait(); - } __TBB_CATCH( TestException& e ) { -#if TBB_USE_EXCEPTIONS - ASSERT( e.what(), "Empty what() string" ); - ASSERT( __TBB_EXCEPTION_TYPE_INFO_BROKEN || strcmp(e.what(), EXCEPTION_DESCR1) == 0, "Unknown exception" ); -#endif /* TBB_USE_EXCEPTIONS */ -#if __TBB_SILENT_CANCELLATION_BROKEN - ASSERT ( !g.is_canceling() || g_CancellationPropagationInProgress, "wait() has not reset cancellation state" ); -#else - ASSERT ( !g.is_canceling(), "wait() has not reset cancellation state" ); -#endif - exceptionCaught = true; - ++g_ExceptionCount; - } CATCH_ANY(); - ASSERT( !g_Throw || exceptionCaught, "No exception in the child task group" ); - for( unsigned i = 0; i < NUM_CHORES; ++i ) - (handles.begin()+i)->~throwing_handle_type(); - if ( g_Rethrow && g_ExceptionCount > SKIP_GROUPS ) { -#if __TBB_SILENT_CANCELLATION_BROKEN - g_CancellationPropagationInProgress = true; -#endif - __TBB_THROW( test_exception(EXCEPTION_DESCR2) ); - } -} - -class StructuredCancellationTestDriver { - tbb::aligned_space<handle_type,NUM_CHORES> m_handles; - -public: - void Launch ( Concurrency::structured_task_group& rg ) { - ResetGlobals( false, false ); - for( unsigned i = 0; i < NUM_GROUPS; ++i ) { - handle_type *h = m_handles.begin()+i; - new ( h ) handle_type( StructuredLaunchChildren ); - rg.run( *h ); - } - ASSERT ( !Concurrency::is_current_task_group_canceling(), "Unexpected cancellation" ); - ASSERT ( !rg.is_canceling(), "Unexpected cancellation" ); -#if __TBB_SILENT_CANCELLATION_BROKEN - g_CancellationPropagationInProgress = true; -#endif - while ( g_MaxConcurrency > 1 && g_TaskCount == 0 ) - __TBB_Yield(); - } - - void Finish () { - for( unsigned i = 0; i < NUM_GROUPS; ++i ) - (m_handles.begin()+i)->~handle_type(); - ASSERT( g_TaskCount <= NUM_GROUPS * NUM_CHORES, "Too many tasks reported. The test is broken" ); - ASSERT( g_TaskCount < NUM_GROUPS * NUM_CHORES, "No tasks were cancelled. Cancellation model changed?" ); - ASSERT( g_TaskCount <= g_ExecutedAtCancellation + g_MaxConcurrency, "Too many tasks survived cancellation" ); - } -}; // StructuredCancellationTestDriver - -void TestStructuredCancellation1 () { - StructuredCancellationTestDriver driver; - Concurrency::structured_task_group sg; - driver.Launch( sg ); - sg.cancel(); - g_ExecutedAtCancellation = g_TaskCount; - ASSERT ( sg.is_canceling(), "No cancellation reported" ); - sg.wait(); - driver.Finish(); -} - -#if TBB_USE_EXCEPTIONS -#if defined(_MSC_VER) - #pragma warning (disable: 4127) -#endif - -template<bool Throw> -void TestStructuredCancellation2 () { - bool exception_occurred = false, - unexpected_exception = false; - StructuredCancellationTestDriver driver; - try { - Concurrency::structured_task_group tg; - driver.Launch( tg ); - if ( Throw ) - throw int(); // Initiate stack unwinding - } - catch ( const Concurrency::missing_wait& e ) { - ASSERT( e.what(), "Error message is absent" ); - exception_occurred = true; - unexpected_exception = Throw; - } - catch ( int ) { - exception_occurred = true; - unexpected_exception = !Throw; - } - catch ( ... ) { - exception_occurred = unexpected_exception = true; - } - ASSERT( exception_occurred, NULL ); - ASSERT( !unexpected_exception, NULL ); - driver.Finish(); -} -#endif /* TBB_USE_EXCEPTIONS */ - -void EmptyFunction () {} - -void TestStructuredWait () { - Concurrency::structured_task_group sg; - handle_type h(EmptyFunction); - sg.run(h); - sg.wait(); - handle_type h2(EmptyFunction); - sg.run(h2); - sg.wait(); -} - -int TestMain () { - REMARK ("Testing %s task_group functionality\n", TBBTEST_USE_TBB ? "TBB" : "PPL"); - for( int p=MinThread; p<=MaxThread; ++p ) { - g_MaxConcurrency = p; -#if TBBTEST_USE_TBB - tbb::task_scheduler_init init(p); -#else - Concurrency::SchedulerPolicy sp( 4, - Concurrency::SchedulerKind, Concurrency::ThreadScheduler, - Concurrency::MinConcurrency, 1, - Concurrency::MaxConcurrency, p, - Concurrency::TargetOversubscriptionFactor, 1); - Concurrency::Scheduler *s = Concurrency::Scheduler::Create( sp ); -#endif /* !TBBTEST_USE_TBB */ - if ( p > 1 ) { - TestParallelSpawn(); - TestParallelWait(); - TestVagabondGroup(); - } - TestFib1(); - TestFib2(); - TestTaskHandle(); - TestTaskHandle2<Concurrency::task_group>(); - TestTaskHandle2<Concurrency::structured_task_group>(); -#if __TBB_LAMBDAS_PRESENT - TestFibWithLambdas(); - TestFibWithMakeTask(); -#endif - TestCancellation1(); - TestStructuredCancellation1(); -#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN - TestEh1(); - TestEh2(); - TestStructuredWait(); - TestStructuredCancellation2<true>(); -#if !__TBB_THROW_FROM_DTOR_BROKEN - TestStructuredCancellation2<false>(); -#else - REPORT("Known issue: TestStructuredCancellation2<false>() is skipped.\n"); -#endif -#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN */ -#if !TBBTEST_USE_TBB - s->Release(); -#endif - } -#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN - REPORT("Known issue: exception handling tests are skipped.\n"); -#endif - return Harness::Done; -} - -#else /* !__TBB_TASK_GROUP_CONTEXT */ - -#include "harness.h" - -int TestMain () { - return Harness::Skipped; -} - -#endif /* !__TBB_TASK_GROUP_CONTEXT */ diff --git a/src/tbb/src/test/test_task_leaks.cpp b/src/tbb/src/test/test_task_leaks.cpp deleted file mode 100644 index f3e603057..000000000 --- a/src/tbb/src/test/test_task_leaks.cpp +++ /dev/null @@ -1,272 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -/* The test uses "single produces multiple consumers" (SPMC )pattern to check - if the memory of the tasks stolen by consumer threads is returned to the - producer thread and is reused. - - The test consists of a series of iterations, which execute a task tree. - the test fails is the memory consumption is not stabilized during some - number of iterations. - - After the memory consumption stabilized the memory state is perturbed by - switching producer thread, and the check is repeated. -*/ - -#define HARNESS_DEFAULT_MIN_THREADS -1 - -#define __TBB_COUNT_TASK_NODES 1 -#include "harness_inject_scheduler.h" - -#include "tbb/atomic.h" -#include "harness_assert.h" -#include <cstdlib> - - -// Test configuration parameters - -//! Maximal number of test iterations -const int MaxIterations = 600; -//! Number of iterations during which the memory consumption must stabilize -const int AsymptoticRange = 100; -//! Number of times the memory state is perturbed to repeat the check -const int NumProducerSwitches = 2; -//! Number of iterations after which the success of producer switch is checked -const int ProducerCheckTimeout = 10; -//! Number of initial iteration used to collect statistics to be used in later checks -const int InitialStatsIterations = 20; -//! Inner iterations of RunTaskGenerators() -const int TaskGeneratorsIterations = TBB_USE_DEBUG ? 30 : 100; - -tbb::atomic<int> Count; -tbb::atomic<tbb::task*> Exchanger; -tbb::internal::scheduler* Producer; - -#include "tbb/task_scheduler_init.h" - -#include "harness.h" - -using namespace tbb; -using namespace tbb::internal; - -class ChangeProducer: public tbb::task { -public: - /*override*/ tbb::task* execute() { - if( is_stolen_task() ) { - Producer = internal::governor::local_scheduler(); - } - return NULL; - } -}; - -class TaskGenerator: public tbb::task { - const int my_child_count; - int my_depth; -public: - TaskGenerator(int child_count, int d) : my_child_count(child_count), my_depth(d) { - ASSERT(my_child_count>1, "The TaskGenerator should produce at least two children"); - } - /*override*/ tbb::task* execute() { - if( my_depth>0 ) { - int child_count = my_child_count; - scheduler* my_sched = internal::governor::local_scheduler(); - tbb::task& c = *new( allocate_continuation() ) tbb::empty_task; - c.set_ref_count( child_count ); - recycle_as_child_of(c); - --child_count; - if( Producer==my_sched ) { - // produce a task and put it into Exchanger - tbb::task* t = new( c.allocate_child() ) tbb::empty_task; - --child_count; - t = Exchanger.fetch_and_store(t); - if( t ) spawn(*t); - } else { - tbb::task* t = Exchanger.fetch_and_store(NULL); - if( t ) spawn(*t); - } - while( child_count ) { - tbb::task* t = new( c.allocate_child() ) TaskGenerator(my_child_count, my_depth-1); - if( my_depth >4 ) enqueue(*t); - else spawn(*t); - --child_count; - } - --my_depth; - return this; - } else { - tbb::task* t = Exchanger.fetch_and_store(NULL); - if( t ) spawn(*t); - return NULL; - } - } -}; - -#include "harness_memory.h" -#if _MSC_VER==1500 && !defined(__INTEL_COMPILER) - // VS2008/VC9 seems to have an issue - #pragma warning( push ) - #pragma warning( disable: 4985 ) -#endif -#include <math.h> -#if _MSC_VER==1500 && !defined(__INTEL_COMPILER) - #pragma warning( pop ) -#endif - -void RunTaskGenerators( bool switchProducer = false, bool checkProducer = false ) { - if( switchProducer ) - Producer = NULL; - tbb::task* dummy_root = new( tbb::task::allocate_root() ) tbb::empty_task; - dummy_root->set_ref_count( 2 ); - // If no producer, start elections; some worker will take the role - if( Producer ) - tbb::task::spawn( *new( dummy_root->allocate_child() ) tbb::empty_task ); - else - tbb::task::spawn( *new( dummy_root->allocate_child() ) ChangeProducer ); - if( checkProducer && !Producer ) - REPORT("Warning: producer has not changed after 10 attempts; running on a single core?\n"); - for( int j=0; j<TaskGeneratorsIterations; ++j ) { - if( j&1 ) { - tbb::task& t = *new( tbb::task::allocate_root() ) TaskGenerator(/*child_count=*/4, /*depth=*/6); - tbb::task::spawn_root_and_wait(t); - } else { - tbb::task& t = *new (tbb::task::allocate_additional_child_of(*dummy_root)) - TaskGenerator(/*child_count=*/4, /*depth=*/6); - tbb::task::enqueue(t); - } - } - dummy_root->wait_for_all(); - tbb::task::destroy( *dummy_root ); -} - -class TaskList: public tbb::task { - const int my_num_childs; -public: - TaskList(const int num_childs) : my_num_childs(num_childs) {} - tbb::task* execute() { - tbb::task_list list; - for (int i=0; i<my_num_childs; ++i) - { - list.push_back( *new( allocate_child() ) tbb::empty_task ); - } - set_ref_count(my_num_childs+1); - spawn(list); - - wait_for_all(); - return 0; - } -}; - -void RunTaskListGenerator() -{ - const int max_num_childs = 10000; - int num_childs=3; - - while ( num_childs < max_num_childs ) - { - tbb::task& root = *new( tbb::task::allocate_root() ) TaskList(num_childs); - - tbb::task::spawn_root_and_wait(root); - - num_childs = 3 * num_childs; - } -} - -//! Tests whether task scheduler allows thieves to hoard task objects. -/** The test takes a while to run, so we run it only with the default - number of threads. */ -void TestTaskReclamation() { - REMARK("testing task reclamation\n"); - - size_t initial_amount_of_memory = 0; - double task_count_sum = 0; - double task_count_sum_square = 0; - double average, sigma; - - tbb::task_scheduler_init init (MinThread); - REMARK("Starting with %d threads\n", MinThread); - // For now, the master will produce "additional" tasks; later a worker will replace it; - Producer = internal::governor::local_scheduler(); - int N = InitialStatsIterations; - // First N iterations fill internal buffers and collect initial statistics - for( int i=0; i<N; ++i ) { - // First N iterations fill internal buffers and collect initial statistics - RunTaskGenerators(); - RunTaskListGenerator(); - - size_t m = GetMemoryUsage(); - if( m-initial_amount_of_memory > 0) - initial_amount_of_memory = m; - - intptr_t n = internal::governor::local_scheduler()->get_task_node_count( /*count_arena_workers=*/true ); - task_count_sum += n; - task_count_sum_square += n*n; - - REMARK( "Consumed %ld bytes and %ld objects (iteration=%d)\n", long(m), long(n), i ); - } - // Calculate statistical values - average = task_count_sum / N; - sigma = sqrt( (task_count_sum_square - task_count_sum*task_count_sum/N)/N ); - REMARK("Average task count: %g, sigma: %g, sum: %g, square sum:%g \n", average, sigma, task_count_sum, task_count_sum_square); - - int last_error_iteration = 0, - producer_switch_iteration = 0, - producer_switches = 0; - bool switchProducer = false, - checkProducer = false; - for( int i=0; i < MaxIterations; ++i ) { - // These iterations check for excessive memory use and unreasonable task count - RunTaskGenerators( switchProducer, checkProducer ); - RunTaskListGenerator(); - - intptr_t n = internal::governor::local_scheduler()->get_task_node_count( /*count_arena_workers=*/true ); - size_t m = GetMemoryUsage(); - - if( (m-initial_amount_of_memory > 0) && (n > average+4*sigma) ) { - // Use 4*sigma interval (for normal distribution, 3*sigma contains ~99% of values). - REMARK( "Warning: possible leak of up to %ld bytes; currently %ld cached task objects (iteration=%d)\n", - static_cast<unsigned long>(m-initial_amount_of_memory), long(n), i ); - last_error_iteration = i; - initial_amount_of_memory = m; - } else { - REMARK( "Consumed %ld bytes and %ld objects (iteration=%d)\n", long(m), long(n), i ); - } - if ( i == last_error_iteration + AsymptoticRange ) { - if ( producer_switches++ == NumProducerSwitches ) - break; - else { - last_error_iteration = producer_switch_iteration = i; - switchProducer = true; - } - } - else { - switchProducer = false; - checkProducer = producer_switch_iteration && (i == producer_switch_iteration + ProducerCheckTimeout); - } - } - ASSERT( last_error_iteration < MaxIterations - AsymptoticRange, "The amount of allocated tasks keeps growing. Leak is possible." ); -} - -int TestMain () { - if( !GetMemoryUsage() ) { - REMARK("GetMemoryUsage is not implemented for this platform\n"); - return Harness::Skipped; - } - TestTaskReclamation(); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_task_priority.cpp b/src/tbb/src/test/test_task_priority.cpp deleted file mode 100644 index c83dc66c3..000000000 --- a/src/tbb/src/test/test_task_priority.cpp +++ /dev/null @@ -1,585 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" -#include "harness.h" - -#if __TBB_GCC_STRICT_ALIASING_BROKEN - #pragma GCC diagnostic ignored "-Wstrict-aliasing" -#endif - -#if __TBB_TASK_GROUP_CONTEXT - -#include "tbb/task.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/atomic.h" -#include <cstdlib> - -#if _MSC_VER && __TBB_NO_IMPLICIT_LINKAGE -// plays around __TBB_NO_IMPLICIT_LINKAGE. __TBB_LIB_NAME should be defined (in makefiles) - #pragma comment(lib, __TBB_STRING(__TBB_LIB_NAME)) -#endif - -const int NumIterations = 100; -const int NumLeafTasks = 2; -int MinBaseDepth = 8; -int MaxBaseDepth = 10; -int BaseDepth = 0; - -const int DesiredNumThreads = 12; - -const int NumTests = 8; -const int TestRepeats = 4; - -int g_NumMasters = 0; -volatile intptr_t *g_LeavesExecuted = NULL; - -int g_TestFailures[NumTests]; -int g_CurConfig = 0; - -int P = 0; - -#if !__TBB_TASK_PRIORITY -namespace tbb { - enum priority_t { - priority_low = 0, - priority_normal = 1, - priority_high = 2 - }; -} -#endif /* __TBB_TASK_PRIORITY */ - -tbb::priority_t Low = tbb::priority_normal, - High = tbb::priority_high; -int PreemptionActivatorId = 1; - -enum Options { - NoPriorities = 0, - TestPreemption = 1, - Flog = 2, - FlogEncloser = Flog | 4 -}; - -const char *PriorityName(tbb::priority_t p) { - if( p == tbb::priority_high ) return "high"; - if( p == tbb::priority_normal ) return "normal"; - if( p == tbb::priority_low ) return "low"; - return "bad"; -} - -void PrepareGlobals ( int numMasters ) { - ASSERT( !g_NumMasters && !g_LeavesExecuted, NULL ); - g_NumMasters = numMasters; - if ( !g_LeavesExecuted ) - g_LeavesExecuted = new intptr_t[numMasters]; - g_CurConfig = 0; - memset( const_cast<intptr_t*>(g_LeavesExecuted), 0, sizeof(intptr_t) * numMasters ); - memset( g_TestFailures, 0, sizeof(int) * NumTests ); -} - -void ClearGlobals () { - ASSERT( g_LeavesExecuted, NULL ); - delete [] g_LeavesExecuted; - g_LeavesExecuted = NULL; - g_NumMasters = 0; - REMARK("\r \r"); -} - -class LeafTask : public tbb::task { - int m_tid; - uintptr_t m_opts; - - tbb::task* execute () { - volatile int anchor = 0; - for ( int i = 0; i < NumIterations; ++i ) - anchor += i; - __TBB_FetchAndAddW(g_LeavesExecuted + m_tid, 1); -#if __TBB_TASK_PRIORITY - ASSERT( !m_opts || (m_opts & Flog) || (!(m_opts & TestPreemption) ^ (m_tid == PreemptionActivatorId)), NULL ); - if ( (m_opts & TestPreemption) && g_LeavesExecuted[0] > P && group_priority() == tbb::priority_normal ) { - ASSERT( m_tid == PreemptionActivatorId, NULL ); - ASSERT( (PreemptionActivatorId == 1 ? High > tbb::priority_normal : Low < tbb::priority_normal), NULL ); - set_group_priority( PreemptionActivatorId == 1 ? High : Low ); - } -#endif /* __TBB_TASK_PRIORITY */ - return NULL; - } -public: - LeafTask ( int tid, uintptr_t opts ) : m_tid(tid), m_opts(opts) { - ASSERT( tid < g_NumMasters, NULL ); - } -}; - -template<class NodeType> -class NodeTask : public tbb::task { -protected: - int m_tid; - int m_depth; - uintptr_t m_opts; - task *m_root; - - void SpawnChildren ( task* parent_node ) { - ASSERT( m_depth > 0, NULL ); - if ( g_LeavesExecuted[m_tid] % (100 / m_depth) == 0 ) { - if ( m_opts & Flog ) { -#if __TBB_TASK_PRIORITY - task *r = m_opts & FlogEncloser ? this : m_root; - tbb::priority_t p = r->group_priority(); - r->set_group_priority( p == Low ? High : Low ); -#endif /* __TBB_TASK_PRIORITY */ - } - else - __TBB_Yield(); - } - parent_node->set_ref_count(NumLeafTasks + 1); - --m_depth; - for ( int i = 0; i < NumLeafTasks; ++i ) { - task *t = m_depth ? (task*) new(parent_node->allocate_child()) NodeType(m_tid, m_depth, m_opts, m_root) - : (task*) new(parent_node->allocate_child()) LeafTask(m_tid, m_opts); - task::spawn(*t); - } - } - -public: - NodeTask ( int tid, int _depth, uintptr_t opts, task *r = NULL ) - : m_tid(tid), m_depth(_depth), m_opts(opts), m_root(r) - {} -}; - -class NestedGroupNodeTask : public NodeTask<NestedGroupNodeTask> { - task* execute () { - tbb::task_group_context ctx; // Use bound context - tbb::empty_task &r = *new( task::allocate_root(ctx) ) tbb::empty_task; - SpawnChildren(&r); - r.wait_for_all(); - task::destroy(r); - return NULL; - } -public: - NestedGroupNodeTask ( int tid, int _depth, uintptr_t opts, task *r = NULL ) - : NodeTask<NestedGroupNodeTask>(tid, _depth, opts, r) - {} -}; - -class BlockingNodeTask : public NodeTask<BlockingNodeTask> { - task* execute () { - SpawnChildren(this); - wait_for_all(); - return NULL; - } -public: - BlockingNodeTask ( int tid, int _depth, uintptr_t opts, task *r = NULL ) - : NodeTask<BlockingNodeTask>(tid, _depth, opts, r) {} -}; - -class NonblockingNodeTask : public NodeTask<NonblockingNodeTask> { - task* execute () { - if ( m_depth < 0 ) - return NULL; // I'm just a continuation now - recycle_as_safe_continuation(); - SpawnChildren(this); - m_depth = -1; - return NULL; - } -public: - NonblockingNodeTask ( int tid, int _depth, uintptr_t opts, task *r = NULL ) - : NodeTask<NonblockingNodeTask>(tid, _depth, opts, r) - {} -}; - -template<class NodeType> -class MasterBodyBase : NoAssign, Harness::NoAfterlife { -protected: - uintptr_t m_opts; - -public: - void RunTaskForest ( int id ) const { - ASSERT( id < g_NumMasters, NULL ); - g_LeavesExecuted[id] = 0; - int d = BaseDepth + id; - tbb::task_scheduler_init init(P-1); - tbb::task_group_context ctx (tbb::task_group_context::isolated); - tbb::empty_task &r = *new( tbb::task::allocate_root(ctx) ) tbb::empty_task; - const int R = 4; - r.set_ref_count( R * P + 1 ); - // Only PreemptionActivator thread changes its task tree priority in preemption test mode - const uintptr_t opts = (id == PreemptionActivatorId) ? m_opts : (m_opts & ~(uintptr_t)TestPreemption); - for ( int i = 0; i < R; ++i ) { - for ( int j = 1; j < P; ++j ) - r.spawn( *new(r.allocate_child()) NodeType(id, MinBaseDepth + id, opts, &r) ); - r.spawn( *new(r.allocate_child()) NodeType(id, d, opts, &r) ); - } - int count = 1; - intptr_t lastExecuted = 0; - while ( r.ref_count() > 1 ) { - // Give workers time to make some progress. - for ( int i = 0; i < 10 * count; ++i ) - __TBB_Yield(); -#if __TBB_TASK_PRIORITY - if ( lastExecuted == g_LeavesExecuted[id] ) { - // No progress. Likely all workers left to higher priority arena, - // and then returned to RML. Request workers back from RML. - tbb::task::enqueue( *new(tbb::task::allocate_root() ) tbb::empty_task, id == 0 ? Low : High ); - Harness::Sleep(count); -#if __TBB_ipf - // Increased sleep periods are required on systems with unfair hyperthreading (Itanium(R) 2 processor) - count += 10; -#endif - } - else { - count = 1; - lastExecuted = g_LeavesExecuted[id]; - } -#else /* !__TBB_TASK_PRIORITY */ - (void)lastExecuted; - tbb::task::enqueue( *new(tbb::task::allocate_root() ) tbb::empty_task ); -#endif /* !__TBB_TASK_PRIORITY */ - } - ASSERT( g_LeavesExecuted[id] == R * ((1 << d) + ((P - 1) * (1 << (MinBaseDepth + id)))), NULL ); - g_LeavesExecuted[id] = -1; - tbb::task::destroy(r); - } - - MasterBodyBase ( uintptr_t opts ) : m_opts(opts) {} -}; - -template<class NodeType> -class MasterBody : public MasterBodyBase<NodeType> { - int m_testIndex; -public: - void operator() ( int id ) const { - this->RunTaskForest(id); - if ( this->m_opts & Flog ) - return; - if ( this->m_opts & TestPreemption ) { - if ( id == 1 && g_LeavesExecuted[0] == -1 ) { - //REMARK( "Warning: Low priority master finished too early [depth %d]\n", Depth ); - ++g_TestFailures[m_testIndex]; - } - } - else { - if ( id == 0 && g_LeavesExecuted[1] == -1 ) { - //REMARK( "Warning: Faster master takes too long [depth %d]\n", Depth ); - ++g_TestFailures[m_testIndex]; - } - } - } - - MasterBody ( int idx, uintptr_t opts ) : MasterBodyBase<NodeType>(opts), m_testIndex(idx) {} -}; - -template<class NodeType> -void RunPrioritySwitchBetweenTwoMasters ( int idx, uintptr_t opts ) { - ASSERT( idx < NumTests, NULL ); - REMARK( "Config %d: idx=%i, opts=%u\r", ++g_CurConfig, idx, (unsigned)opts ); - NativeParallelFor ( 2, MasterBody<NodeType>(idx, opts) ); - Harness::Sleep(50); -} - -void TestPrioritySwitchBetweenTwoMasters () { - if ( P > DesiredNumThreads ) { - REPORT_ONCE( "Known issue: TestPrioritySwitchBetweenTwoMasters is skipped for big number of threads\n" ); - return; - } - REMARK( "Stress tests: %s / %s \n", Low == tbb::priority_low ? "Low" : "Normal", High == tbb::priority_normal ? "Normal" : "High" ); - PrepareGlobals( 2 ); - for ( int i = 0; i < TestRepeats; ++i ) { - for ( BaseDepth = MinBaseDepth; BaseDepth <= MaxBaseDepth; ++BaseDepth ) { - RunPrioritySwitchBetweenTwoMasters<BlockingNodeTask>( 0, NoPriorities ); - RunPrioritySwitchBetweenTwoMasters<BlockingNodeTask>( 1, TestPreemption ); - RunPrioritySwitchBetweenTwoMasters<NonblockingNodeTask>( 2, NoPriorities ); - RunPrioritySwitchBetweenTwoMasters<NonblockingNodeTask>( 3, TestPreemption ); - if ( i == 0 ) { - RunPrioritySwitchBetweenTwoMasters<BlockingNodeTask>( 4, Flog ); - RunPrioritySwitchBetweenTwoMasters<NonblockingNodeTask>( 5, Flog ); - RunPrioritySwitchBetweenTwoMasters<NestedGroupNodeTask>( 6, Flog ); - RunPrioritySwitchBetweenTwoMasters<NestedGroupNodeTask>( 7, FlogEncloser ); - } - } - } -#if __TBB_TASK_PRIORITY - const int NumRuns = TestRepeats * (MaxBaseDepth - MinBaseDepth + 1); - for ( int i = 0; i < NumTests; ++i ) { - if ( g_TestFailures[i] ) - REMARK( "Test %d: %d failures in %d runs\n", i, g_TestFailures[i], NumRuns ); - if ( g_TestFailures[i] * 100 / NumRuns > 50 ) { - if ( i == 1 ) - REPORT_ONCE( "Known issue: priority effect is limited in case of blocking-style nesting\n" ); - else - REPORT( "Warning: test %d misbehaved too often (%d out of %d)\n", i, g_TestFailures[i], NumRuns ); - } - } -#endif /* __TBB_TASK_PRIORITY */ - ClearGlobals(); -} - -class SingleChildRootTask : public tbb::task { - tbb::task* execute () { - set_ref_count(2); - spawn ( *new(allocate_child()) tbb::empty_task ); - wait_for_all(); - return NULL; - } -}; - -int TestSimplePriorityOps ( tbb::priority_t prio ) { - tbb::task_scheduler_init init; - tbb::task_group_context ctx; -#if __TBB_TASK_PRIORITY - ctx.set_priority( prio ); -#else /* !__TBB_TASK_PRIORITY */ - (void)prio; -#endif /* !__TBB_TASK_PRIORITY */ - tbb::task *r = new( tbb::task::allocate_root(ctx) ) tbb::empty_task; - r->set_ref_count(2); - r->spawn ( *new(r->allocate_child()) tbb::empty_task ); - REMARK( "TestSimplePriorityOps: waiting for a child\n" ); - r->wait_for_all(); - ASSERT( !r->ref_count(), NULL ); - REMARK( "TestLowPriority: executing an empty root\n" ); - tbb::task::spawn_root_and_wait(*r); - r = new( tbb::task::allocate_root(ctx) ) SingleChildRootTask; - REMARK( "TestLowPriority: executing a root with a single child\n" ); - tbb::task::spawn_root_and_wait(*r); - return 0; -} - -#include "tbb/parallel_for.h" - -void EmulateWork( int ) { - for ( int i = 0; i < 1000; ++i ) - __TBB_Yield(); -} - -class PeriodicActivitiesBody { -public: - void operator() ( int id ) const { - tbb::task_group_context ctx; -#if __TBB_TASK_PRIORITY - ctx.set_priority( id ? High : Low ); -#else /* !__TBB_TASK_PRIORITY */ - (void)id; -#endif /* !__TBB_TASK_PRIORITY */ - for ( int i = 0; i < 5; ++i ) { - tbb::task_scheduler_init init; - tbb::parallel_for( 1, 10000, &EmulateWork, ctx ); - } - } -}; - -void TestPeriodicConcurrentActivities () { - REMARK( "TestPeriodicConcurrentActivities: %s / %s \n", Low == tbb::priority_low ? "Low" : "Normal", High == tbb::priority_normal ? "Normal" : "High" ); - NativeParallelFor ( 2, PeriodicActivitiesBody() ); -} - -#include "harness_bad_expr.h" - -void TestPriorityAssertions () { -#if TRY_BAD_EXPR_ENABLED && __TBB_TASK_PRIORITY - REMARK( "TestPriorityAssertions\n" ); - tbb::priority_t bad_low_priority = tbb::priority_t( tbb::priority_low - 1 ), - bad_high_priority = tbb::priority_t( tbb::priority_high + 1 ); - tbb::task_group_context ctx; - // Catch assertion failures - tbb::set_assertion_handler( AssertionFailureHandler ); - TRY_BAD_EXPR( ctx.set_priority( bad_low_priority ), "Invalid priority level value" ); - tbb::task &t = *new( tbb::task::allocate_root() ) tbb::empty_task; - TRY_BAD_EXPR( tbb::task::enqueue( t, bad_high_priority ), "Invalid priority level value" ); - // Restore normal assertion handling - tbb::set_assertion_handler( ReportError ); -#endif /* TRY_BAD_EXPR_ENABLED && __TBB_TASK_PRIORITY */ -} - -#if __TBB_TASK_PRIORITY - -tbb::atomic<tbb::priority_t> g_order; -tbb::atomic<bool> g_order_established; -class OrderedTask : public tbb::task { - tbb::priority_t my_priority; -public: - OrderedTask(tbb::priority_t p) : my_priority(p) {} - tbb::task* execute() { - tbb::priority_t prev = g_order.fetch_and_store(my_priority); - if( my_priority != prev) { - REMARK("prev:%s --> new:%s\n", PriorityName(prev), PriorityName(my_priority)); - // TODO: improve the test for concurrent workers - if(!g_order_established) { - // initial transition path allowed low->[normal]->high - if(my_priority == tbb::priority_high) - g_order_established = true; - else ASSERT(my_priority == tbb::priority_normal && prev == tbb::priority_low, NULL); - } else { //transition path allowed high->normal->low - if(prev == tbb::priority_high) ASSERT( my_priority == tbb::priority_normal, "previous priority is high - bad order"); - else if(prev == tbb::priority_normal) ASSERT( my_priority == tbb::priority_low, "previous priority is normal - bad order"); - else ASSERT(!g_order_established, "transition from low priority but not during initialization"); - } - } - EmulateWork(0); - return NULL; - } - static void start(int i) { - tbb::priority_t p = i%3==0? tbb::priority_low : (i%3==1? tbb::priority_normal : tbb::priority_high ); - OrderedTask &t = *new(tbb::task::allocate_root()) OrderedTask(p); - tbb::task::enqueue(t, p); - } -}; - -//Look for discussion of the issue at http://software.intel.com/en-us/forums/showthread.php?t=102159 -void TestEnqueueOrder () { - REMARK("Testing order of enqueued tasks\n"); - tbb::task_scheduler_init init(1); // to simplify transition checks only one extra worker for enqueue - g_order = tbb::priority_low; - g_order_established = false; - for( int i = 0; i < 1000; i++) - OrderedTask::start(i); - while( g_order == tbb::priority_low ) __TBB_Yield(); - while( g_order != tbb::priority_low ) __TBB_Yield(); -} - -namespace test_propagation { - -// This test creates two binary trees of task_group_context objects. -// Indices in a binary tree have the following layout: -// [1]--> [2] -> [4],[5] -// \-> [3] -> [6],[7] -static const int first = 1, last = 7; -tbb::task_group_context* g_trees[2][/*last+1*/8]; -tbb::task_group_context* g_default_ctx; -tbb::atomic<int> g_barrier; -tbb::atomic<bool> is_finished; - -class TestSetPriorityTask : public tbb::task { - const int m_tree, m_i; -public: - TestSetPriorityTask(int t, int i) : m_tree(t), m_i(i) {} - tbb::task* execute() { - if( !m_i ) { // the first task creates two trees - g_default_ctx = group(); - for( int i = 0; i <= 1; ++i ) { - g_trees[i][1] = new tbb::task_group_context( tbb::task_group_context::isolated ); - tbb::task::spawn(*new(tbb::task::allocate_root(*g_trees[i][1])) TestSetPriorityTask(i, 1)); - } - } - else if( m_i <= last/2 ) { // is divisible - for( int i = 0; i <= 1; ++i ) { - const int index = 2*m_i + i; - g_trees[m_tree][index] = new tbb::task_group_context ( tbb::task_group_context::bound ); - tbb::task::spawn(*new(tbb::task::allocate_root(*g_trees[m_tree][index])) TestSetPriorityTask(m_tree, index)); - } - } - --g_barrier; - //REMARK("Task %i executing\n", m_i); - while (!is_finished) __TBB_Yield(); - change_group(*g_default_ctx); // avoid races with destruction of custom contexts - --g_barrier; - return NULL; - } -}; - -// Tests task_group_context state propagation, also for cancellation. -void TestSetPriority() { - REMARK("Testing set_priority() with existing forest\n"); - const int workers = last*2+1; // +1 is worker thread executing the first task - tbb::task_scheduler_init init(workers+1); // +1 is master thread - g_barrier = workers; - is_finished = false; - tbb::task::spawn(*new(tbb::task::allocate_root()) TestSetPriorityTask(0,0)); - while(g_barrier) __TBB_Yield(); - g_trees[0][2]->set_priority(tbb::priority_high); - g_trees[0][4]->set_priority(tbb::priority_normal); - g_trees[1][3]->set_priority(tbb::priority_high); // Regression test: it must not set priority_high to g_trees[0][4] - // - 1 2 3 4 5 6 7 - const int expected_priority[2][last+1] = {{0, 0, 1, 0, 0, 1, 0, 0}, - {0, 0, 0, 1, 0, 0, 1, 1}}; - for (int t = 0; t < 2; ++t) - for (int i = first; i <= last; ++i) { - REMARK("\r \rTask %i... ", i); - ASSERT(g_trees[t][i]->priority() == expected_priority[t][i]? tbb::priority_high : tbb::priority_normal, NULL); - REMARK("OK"); - } - REMARK("\r \r"); - REMARK("Also testing cancel_group_execution()\n"); // cancellation shares propagation logic with set_priority() but there are also differences - g_trees[0][4]->cancel_group_execution(); - g_trees[0][5]->cancel_group_execution(); - g_trees[1][3]->cancel_group_execution(); - // - 1 2 3 4 5 6 7 - const int expected_cancellation[2][last+1] = {{0, 0, 0, 0, 1, 1, 0, 0}, - {0, 0, 0, 1, 0, 0, 1, 1}}; - for (int t = 0; t < 2; ++t) - for (int i = first; i <= last; ++i) { - REMARK("\r \rTask %i... ", i); - ASSERT( g_trees[t][i]->is_group_execution_cancelled() == (expected_cancellation[t][i]==1), NULL); - REMARK("OK"); - } - REMARK("\r \r"); - g_barrier = workers; - is_finished = true; - REMARK("waiting tasks to terminate\n"); - while(g_barrier) __TBB_Yield(); - for (int t = 0; t < 2; ++t) - for (int i = first; i <= last; ++i) - delete g_trees[t][i]; -} -}//namespace test_propagation -#endif /* __TBB_TASK_PRIORITY */ - -#if !__TBB_TEST_SKIP_AFFINITY -#include "harness_concurrency.h" -#endif - -int TestMain () { -#if !__TBB_TEST_SKIP_AFFINITY - Harness::LimitNumberOfThreads( DesiredNumThreads ); -#endif -#if !__TBB_TASK_PRIORITY - REMARK( "Priorities disabled: Running as just yet another task scheduler test\n" ); -#else - test_propagation::TestSetPriority(); // TODO: move down when bug 1996 is fixed - TestEnqueueOrder(); -#endif /* __TBB_TASK_PRIORITY */ - TestPriorityAssertions(); - TestSimplePriorityOps(tbb::priority_low); - TestSimplePriorityOps(tbb::priority_high); - P = tbb::task_scheduler_init::default_num_threads(); - REMARK( "The number of threads: %d\n", P ); - if ( P < 3 ) - return Harness::Skipped; - TestPeriodicConcurrentActivities(); - TestPrioritySwitchBetweenTwoMasters(); - Low = tbb::priority_low; - High = tbb::priority_normal; - PreemptionActivatorId = 0; - TestPeriodicConcurrentActivities(); - TestPrioritySwitchBetweenTwoMasters(); - High = tbb::priority_high; - TestPeriodicConcurrentActivities(); - TestPrioritySwitchBetweenTwoMasters(); - PreemptionActivatorId = 1; - TestPrioritySwitchBetweenTwoMasters(); - return Harness::Done; -} - -#else /* !__TBB_TASK_GROUP_CONTEXT */ - -int TestMain () { - return Harness::Skipped; -} - -#endif /* !__TBB_TASK_GROUP_CONTEXT */ diff --git a/src/tbb/src/test/test_task_scheduler_init.cpp b/src/tbb/src/test/test_task_scheduler_init.cpp deleted file mode 100644 index eceacbf45..000000000 --- a/src/tbb/src/test/test_task_scheduler_init.cpp +++ /dev/null @@ -1,143 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/task_scheduler_init.h" -#include <cstdlib> -#include "harness_assert.h" - -#include <cstdio> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include <stdexcept> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "harness.h" - -//! Test that task::initialize and task::terminate work when doing nothing else. -/** maxthread is treated as the "maximum" number of worker threads. */ -void InitializeAndTerminate( int maxthread ) { - __TBB_TRY { - for( int i=0; i<200; ++i ) { - switch( i&3 ) { - default: { - tbb::task_scheduler_init init( std::rand() % maxthread + 1 ); - ASSERT(init.is_active(), NULL); - break; - } - case 0: { - tbb::task_scheduler_init init; - ASSERT(init.is_active(), NULL); - break; - } - case 1: { - tbb::task_scheduler_init init( tbb::task_scheduler_init::automatic ); - ASSERT(init.is_active(), NULL); - break; - } - case 2: { - tbb::task_scheduler_init init( tbb::task_scheduler_init::deferred ); - ASSERT(!init.is_active(), "init should not be active; initialization was deferred"); - init.initialize( std::rand() % maxthread + 1 ); - ASSERT(init.is_active(), NULL); - init.terminate(); - ASSERT(!init.is_active(), "init should not be active; it was terminated"); - break; - } - } - } - } __TBB_CATCH( std::runtime_error& error ) { -#if TBB_USE_EXCEPTIONS - REPORT("ERROR: %s\n", error.what() ); -#endif /* TBB_USE_EXCEPTIONS */ - } -} - -#if _WIN64 -namespace std { // 64-bit Windows compilers have not caught up with 1998 ISO C++ standard - using ::srand; -} -#endif /* _WIN64 */ - -struct ThreadedInit { - void operator()( int ) const { - InitializeAndTerminate(MaxThread); - } -}; - -#if _MSC_VER -#include "tbb/machine/windows_api.h" -#include <tchar.h> -#endif /* _MSC_VER */ - -#include "harness_concurrency_tracker.h" -#include "tbb/parallel_for.h" -#include "tbb/blocked_range.h" - -typedef tbb::blocked_range<int> Range; - -class ConcurrencyTrackingBody { -public: - void operator() ( const Range& ) const { - Harness::ConcurrencyTracker ct; - for ( volatile int i = 0; i < 1000000; ++i ) - ; - } -}; - -/** The test will fail in particular if task_scheduler_init mistakenly hooks up - auto-initialization mechanism. **/ -void AssertExplicitInitIsNotSupplanted () { - int hardwareConcurrency = tbb::task_scheduler_init::default_num_threads(); - tbb::task_scheduler_init init(1); - Harness::ConcurrencyTracker::Reset(); - tbb::parallel_for( Range(0, hardwareConcurrency * 2, 1), ConcurrencyTrackingBody(), tbb::simple_partitioner() ); - ASSERT( Harness::ConcurrencyTracker::PeakParallelism() == 1, - "Manual init provided more threads than requested. See also the comment at the beginning of main()." ); -} - -int TestMain () { - // Do not use tbb::task_scheduler_init directly in the scope of main's body, - // as a static variable, or as a member of a static variable. -#if _MSC_VER && !__TBB_NO_IMPLICIT_LINKAGE && !defined(__TBB_LIB_NAME) - #ifdef _DEBUG - ASSERT(!GetModuleHandle(_T("tbb.dll")) && GetModuleHandle(_T("tbb_debug.dll")), - "test linked with wrong (non-debug) tbb library"); - #else - ASSERT(!GetModuleHandle(_T("tbb_debug.dll")) && GetModuleHandle(_T("tbb.dll")), - "test linked with wrong (debug) tbb library"); - #endif -#endif /* _MSC_VER && !__TBB_NO_IMPLICIT_LINKAGE && !__TBB_LIB_NAME */ - std::srand(2); - InitializeAndTerminate(MaxThread); - for( int p=MinThread; p<=MaxThread; ++p ) { - REMARK("testing with %d threads\n", p ); - NativeParallelFor( p, ThreadedInit() ); - } - AssertExplicitInitIsNotSupplanted(); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_task_scheduler_observer.cpp b/src/tbb/src/test/test_task_scheduler_observer.cpp deleted file mode 100644 index e80cedf36..000000000 --- a/src/tbb/src/test/test_task_scheduler_observer.cpp +++ /dev/null @@ -1,427 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#if __TBB_CPF_BUILD -#define TEST_SLEEP_PERMISSION 1 -#define TBB_USE_PREVIEW_BINARY 1 -#endif -// undefine __TBB_CPF_BUILD to simulate user's setup -#undef __TBB_CPF_BUILD - -#define TBB_PREVIEW_LOCAL_OBSERVER 1 - -#include "tbb/tbb_config.h" -#include "harness.h" - -#if __TBB_SCHEDULER_OBSERVER -#include "tbb/task_scheduler_observer.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/atomic.h" -#include "tbb/task.h" -#include "tbb/enumerable_thread_specific.h" -#include "../tbb/tls.h" -#include "tbb/tick_count.h" -#include "harness_barrier.h" - -#if _MSC_VER && __TBB_NO_IMPLICIT_LINKAGE -// plays around __TBB_NO_IMPLICIT_LINKAGE. __TBB_LIB_NAME should be defined (in makefiles) - #pragma comment(lib, __TBB_STRING(__TBB_LIB_NAME)) -#endif - -const int MaxFlagIndex = sizeof(uintptr_t)*8-1; - -struct ObserverStats { - tbb::atomic<int> m_entries; - tbb::atomic<int> m_exits; - tbb::atomic<int> m_workerEntries; - tbb::atomic<int> m_workerSleeps; - tbb::atomic<int> m_workerExits; - - void Reset () { - m_entries = m_exits = m_workerEntries = m_workerSleeps = m_workerExits = 0; - } - - void operator += ( const ObserverStats& s ) { - m_entries += s.m_entries; - m_exits += s.m_exits; - m_workerEntries += s.m_workerEntries; - m_workerSleeps += s.m_workerSleeps; - m_workerExits += s.m_workerExits; - } -}; - -struct ThreadState { - uintptr_t m_flags; - tbb::task_scheduler_observer *m_dyingObserver; - uintptr_t m_maySleepCalls; - bool m_canSleep; - bool m_isMaster; - ThreadState() { reset(); } - void reset() { - m_maySleepCalls = m_flags = 0; - m_dyingObserver = NULL; - m_canSleep = m_isMaster = false; - } - static ThreadState &get(); -}; - -tbb::enumerable_thread_specific<ThreadState> theLocalState; -tbb::internal::tls<intptr_t> theThreadPrivate; - -ThreadState &ThreadState::get() { - bool exists; - ThreadState& state = theLocalState.local(exists); - // ETS will not detect that a thread was allocated with the same id as a destroyed thread - if( exists && theThreadPrivate.get() == 0 ) state.reset(); - theThreadPrivate = 1; // mark thread constructed - return state; -} - -static ObserverStats theStats; -static tbb::atomic<int> theNumObservers; - -const int P = min( tbb::task_scheduler_init::default_num_threads(), (int)sizeof(int) * CHAR_BIT ); - -enum TestMode { - //! Ensure timely workers destruction in order to guarantee all exit notification are fired. - tmSynchronized = 1, - //! Use local observer. - tmLocalObservation = 2, - //! Observer causes autoinitialization of the scheduler - tmAutoinitialization = 4, - //! test may_sleep - tmLeavingControl = 8 -}; - -uintptr_t theTestMode, - thePrevMode = 0; - -class MyObserver : public tbb::task_scheduler_observer, public ObserverStats { - uintptr_t m_flag; - tbb::atomic<int> m_leave_ticket; - tbb::atomic<bool> m_dying; - - /*override*/ - void on_scheduler_entry( bool is_worker ) { - ThreadState& state = ThreadState::get(); - ASSERT( is_worker==!state.m_isMaster, NULL ); - if ( theTestMode & tmLeavingControl ) - ASSERT( m_leave_ticket, NULL ); - if ( thePrevMode & tmSynchronized ) { - ASSERT( !(state.m_flags & m_flag), "Observer repeatedly invoked for the same thread" ); - if ( theTestMode & tmLocalObservation ) - ASSERT( !state.m_flags, "Observer locality breached" ); - } - if ( m_dying && theTestMode & tmLocalObservation ) { - // In case of local observation a worker may enter the arena after - // the wait for lagging on_entry calls in the MyObserver destructor - // succeeds but before its base class tbb::task_scheduler_observer - // destructor removes it from the internal list maintained by the - // task scheduler. This will result in on_entry notification without, - // subsequent on_exit as the observer is likely to be destroyed before - // the worker discovers that the arena is empty and leaves it. - // - // To prevent statistics distortion, ignore the notifications for - // observers about to be destroyed. - ASSERT( !state.m_dyingObserver || state.m_dyingObserver != this || thePrevMode & tmSynchronized, NULL ); - state.m_dyingObserver = this; - return; - } - state.m_dyingObserver = NULL; - ++m_entries; - state.m_flags |= m_flag; - if ( is_worker ) - ++m_workerEntries; - } - /*override*/ - void on_scheduler_exit( bool is_worker ) { - ThreadState& state = ThreadState::get(); - ASSERT( is_worker==!state.m_isMaster, NULL ); - if ( m_dying && state.m_dyingObserver ) { - ASSERT( state.m_dyingObserver == this, "Exit without entry (for a dying observer)" ); - state.m_dyingObserver = NULL; - return; - } - ASSERT( state.m_flags & m_flag, "Exit without entry" ); - state.m_flags &= ~m_flag; - ++m_exits; - if ( is_worker ) - ++m_workerExits; - } - /*override*/ - bool may_sleep() { - ThreadState& state = ThreadState::get(); - ++state.m_maySleepCalls; - Harness::Sleep(10); // helps to reproduce the issues - ASSERT( !state.m_isMaster, NULL ); - if( m_dying ) { // check the anti-starvation logic - return keep_awake; // thread should exit despite the return value - } - if( state.m_canSleep ) {// the permission for sleep was previously received - // though, it is an important check for the test, we still do not guarantee this condition - ASSERT_WARNING( !(theTestMode & tmLeavingControl), "may_sleep() called again after leaving permission was granted once, check if repeated"); - return allow_sleep; - } - // note, may_sleep can be called before on_entry() - if( !(theTestMode & tmLeavingControl) || m_leave_ticket.fetch_and_store(-1) > 0 ) { - state.m_canSleep = true; - ++m_workerSleeps; - return allow_sleep; - } - return keep_awake; - } -public: - // the method is called before the work in new arena starts enabling the leaving test mode - // in this mode may_sleep() does not allow a thread to fall asleep unless permitted below - void enable_leaving_test() { - ASSERT(theTestMode & tmLeavingControl, NULL); - m_leave_ticket.store<tbb::relaxed>(-1); - ASSERT(!is_observing(), NULL); - observe(true); - } - - // the work is just done in the only arena, assume workers start entering may_sleep - void test_leaving() { -#if TEST_SLEEP_PERMISSION - if( !(theTestMode & tmLeavingControl) ) - return; // second call to the test TODO: extend the test for the second round as well - REMARK( "Testing may_sleep()\n"); - ASSERT( !m_workerSleeps, "permission for sleep was given before the test starts?"); - ASSERT( (theTestMode & tmSynchronized) && m_workerEntries >= P-1, "test_leaving assumes full subscription of the only arena"); - for ( int j = 0; j < m_workerEntries; j++ ) { - REMARK( "Round %d: entries %d, sleeps %d\n", j, (int)m_workerEntries, (int)m_workerSleeps ); - ASSERT( m_leave_ticket == -1, "unexpected mode, signal was not consumed by a worker?" ); - m_leave_ticket = 1; // dismiss one - double n_seconds = 10; - (Harness::TimedWaitWhileEq(n_seconds))(m_workerSleeps, j); - ASSERT( n_seconds >= 0, "Time out while waiting for a worker to call may_sleep for the first time"); - __TBB_Yield(); - } - // the first time this method is called the work will be executed again, - // the next time time, the scheduler will start shutting down - theTestMode &= ~tmLeavingControl; - m_leave_ticket = m_workerSleeps = 0; // reset for the next round -#endif - } - - MyObserver( uintptr_t flag ) - : tbb::task_scheduler_observer(theTestMode & tmLocalObservation ? true : false) - , m_flag(flag) - { - m_leave_ticket.store<tbb::relaxed>(0); - ++theNumObservers; - Reset(); - m_dying = false; - // Local observer causes automatic scheduler initialization - // in the current thread, so here, we must postpone the activation. - if ( !(theTestMode & tmLocalObservation) && !(theTestMode & tmLeavingControl) ) - observe(true); - } - - ~MyObserver () { - m_dying = true; - ASSERT( m_exits <= m_entries, NULL ); - if ( theTestMode & tmSynchronized ) { - tbb::tick_count t0 = tbb::tick_count::now(); - while ( m_exits < m_entries && (tbb::tick_count::now() - t0).seconds() < 5 ) - Harness::Sleep(10); - if ( m_exits < m_entries ) - REPORT( "Warning: Entry/exit count mismatch (%d, %d). Observer is broken or machine is overloaded.\n", (int)m_entries, (int)m_exits ); - } - theStats += *this; - --theNumObservers; - // it is recommended to disable observation before destructor of the base class starts, - // otherwise it can lead to concurrent notification callback on partly destroyed object, - // which in turn can harm (in addition) if derived class has new virtual methods. - // This class has no, and for test purposes we rely on implementation failsafe mechanism. - //observe(false); - } -}; // class MyObserver - -Harness::SpinBarrier theGlobalBarrier; -bool theGlobalBarrierActive = true; - -class FibTask : public tbb::task { - const int N; - uintptr_t m_flag; - MyObserver &m_observer; -public: - FibTask( int n, uintptr_t flags, MyObserver &obs ) : N(n), m_flag(flags), m_observer(obs) {} - - /*override*/ tbb::task* execute() { - ThreadState& s = ThreadState::get(); - ASSERT( !(~s.m_flags & m_flag), NULL ); - if( N < 2 ) - return NULL; - bool globalBarrierActive = false; - if ( s.m_isMaster ) { - if ( theGlobalBarrierActive ) { - // This is the root task. Its N is equal to the number of threads. - // Spawn a task for each worker. - set_ref_count(N); - for ( int i = 1; i < N; ++i ) - spawn( *new( allocate_child() ) FibTask(20, m_flag, m_observer) ); - if ( theTestMode & tmSynchronized ) { - theGlobalBarrier.wait(); - ASSERT( m_observer.m_entries >= N, "Wrong number of on_entry calls after the first barrier" ); - // All the spawned tasks have been stolen by workers. - // Now wait for workers to spawn some more tasks for this thread to steal back. - theGlobalBarrier.wait(); - ASSERT( !theGlobalBarrierActive, "Workers are expected to have reset this flag" ); - } - else - theGlobalBarrierActive = false; - wait_for_all(); - return NULL; - } - } - else { - if ( theGlobalBarrierActive ) { - if ( theTestMode & tmSynchronized ) { - theGlobalBarrier.wait(); - globalBarrierActive = true; - } - theGlobalBarrierActive = false; - } - } - set_ref_count(3); - spawn( *new( allocate_child() ) FibTask(N-1, m_flag, m_observer) ); - spawn( *new( allocate_child() ) FibTask(N-2, m_flag, m_observer) ); - if ( globalBarrierActive ) { - // It's the first task executed by a worker. Release the master thread. - theGlobalBarrier.wait(); - } - wait_for_all(); - return NULL; - } -}; // class FibTask - -Harness::SpinBarrier theMasterBarrier; - -class TestBody { - int m_numThreads; -public: - TestBody( int numThreads ) : m_numThreads(numThreads) {} - - void operator()( int i ) const { - ThreadState &state = ThreadState::get(); - ASSERT( !state.m_isMaster, "should be newly initialized thread"); - state.m_isMaster = true; - uintptr_t f = i <= MaxFlagIndex ? 1<<i : 0; - MyObserver o(f); - if ( theTestMode & tmSynchronized ) - theMasterBarrier.wait(); - // when mode is local observation but not synchronized and when num threads == default - if ( theTestMode & tmAutoinitialization ) - o.observe(true); // test autoinitialization can be done by observer - // when mode is synchronized observation and when num threads == default - if ( theTestMode & tmLeavingControl ) - o.enable_leaving_test(); - // Observer in enabled state must outlive the scheduler to ensure that - // all exit notifications are called. - tbb::task_scheduler_init init(m_numThreads); - // when local & non-autoinitialized observation mode - if ( theTestMode & tmLocalObservation ) - o.observe(true); - for ( int j = 0; j < 2; ++j ) { - tbb::task &t = *new( tbb::task::allocate_root() ) FibTask(m_numThreads, f, o); - tbb::task::spawn_root_and_wait(t); - if ( theTestMode & tmLeavingControl ) - o.test_leaving(); - thePrevMode = theTestMode; - } - } -}; // class TestBody - -void TestObserver( int M, int T, uintptr_t testMode ) { - theLocalState.clear(); - theStats.Reset(); - theGlobalBarrierActive = true; - theTestMode = testMode; - NativeParallelFor( M, TestBody(T) ); - // When T (number of threads in arena, i.e. master + workers) is less than P - // (hardware concurrency), more than T-1 workers can visit the same arena. This - // is possible in case of imbalance or when other arenas are activated/deactivated - // concurrently). - ASSERT( !theNumObservers, "Unexpected alive observer(s)" ); - REMARK( "Entries %d / %d, exits %d\n", (int)theStats.m_entries, (int)theStats.m_workerEntries, (int)theStats.m_exits ); - if ( testMode & tmSynchronized ) { - if ( testMode & tmLocalObservation ) { - ASSERT( theStats.m_entries >= M * T, "Too few on_entry calls" ); - ASSERT( theStats.m_workerEntries >= M * (T - 1), "Too few worker entries" ); - } - else { - ASSERT( theStats.m_entries >= M * M * T, "Too few on_entry calls" ); - ASSERT( theStats.m_entries <= M * (P + 1), "Too many on_entry calls" ); - ASSERT( theStats.m_workerEntries >= M * M * (T - 1), "Too few worker entries" ); - ASSERT( theStats.m_workerEntries <= M * (P - 1), "Too many worker entries" ); - } - ASSERT( theStats.m_entries == theStats.m_exits, "Entries/exits mismatch" ); - } - else { - ASSERT( theStats.m_entries >= M, "Too few on_entry calls" ); - ASSERT( theStats.m_exits >= M || (testMode & tmAutoinitialization), "Too few on_exit calls" ); - if ( !(testMode & tmLocalObservation) ) { - ASSERT( theStats.m_entries <= M * M * P, "Too many on_entry calls" ); - ASSERT( theStats.m_exits <= M * M * T, "Too many on_exit calls" ); - } - ASSERT( theStats.m_entries >= theStats.m_exits, "More exits than entries" ); - } -} - -int TestMain () { - if ( P < 2 ) - return Harness::Skipped; - theNumObservers = 0; - // Fully- and under-utilized mode - for ( int M = 1; M < P; M <<= 1 ) { - if ( M > P/2 ) { - ASSERT( P & (P-1), "Can get here only in case of non power of two cores" ); - M = P/2; - if ( M==1 || (M & (M-1)) ) - break; // Already tested this configuration - } - int T = P / M; - ASSERT( T > 1, NULL ); - REMARK( "Masters: %d; Arena size: %d\n", M, T ); - theMasterBarrier.initialize(M); - theGlobalBarrier.initialize(M * T); - TestObserver(M, T, 0); - TestObserver(M, T, tmSynchronized | tmLocalObservation ); - TestObserver(M, T, tmSynchronized | ( T==P? tmLeavingControl : 0)); - // keep tmAutoInitialization the last, as it does not release worker threads - TestObserver(M, T, tmLocalObservation | ( T==P? tmAutoinitialization : 0) ); - } - // Oversubscribed mode - for ( int i = 0; i < 4; ++i ) { - REMARK( "Masters: %d; Arena size: %d\n", P-1, P ); - TestObserver(P-1, P, 0); - TestObserver(P-1, P, tmLocalObservation); - } - Harness::Sleep(20); - return Harness::Done; -} - -#else /* !__TBB_SCHEDULER_OBSERVER */ - -int TestMain () { - return Harness::Skipped; -} -#endif /* !__TBB_SCHEDULER_OBSERVER */ diff --git a/src/tbb/src/test/test_task_steal_limit.cpp b/src/tbb/src/test/test_task_steal_limit.cpp deleted file mode 100644 index 45d0e83bf..000000000 --- a/src/tbb/src/test/test_task_steal_limit.cpp +++ /dev/null @@ -1,79 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/task.h" -#include "harness.h" -#include "tbb/task_scheduler_init.h" - -using tbb::task; - -#if __TBB_ipf - const unsigned StackSize = 1024*1024*6; -#else /* */ - const unsigned StackSize = 1024*1024*3; -#endif - -// GCC and ICC on Linux store TLS data in the stack space. This test makes sure -// that the stealing limiting heuristic used by the task scheduler does not -// switch off stealing when a large amount of TLS data is reserved. -#if _MSC_VER -__declspec(thread) -#elif __linux__ || ((__MINGW32__ || __MINGW64__) && __TBB_GCC_VERSION >= 40500) -__thread -#endif - char map2[1024*1024*2]; - -class TestTask : public task { -public: - static volatile int completed; - task* execute() { - completed = 1; - return NULL; - }; -}; - -volatile int TestTask::completed = 0; - -void TestStealingIsEnabled () { - tbb::task_scheduler_init init(2, StackSize); - task &r = *new( task::allocate_root() ) tbb::empty_task; - task &t = *new( r.allocate_child() ) TestTask; - r.set_ref_count(2); - r.spawn(t); - int count = 0; - while ( !TestTask::completed && ++count < 6 ) - Harness::Sleep(1000); - ASSERT( TestTask::completed, "Stealing is disabled or the machine is heavily oversubscribed" ); - r.wait_for_all(); - task::destroy(r); -} - -int TestMain () { -#if !__TBB_THREAD_LOCAL_VARIABLES_PRESENT - REPORT( "Known issue: Test skipped because no compiler support for __thread keyword.\n" ); - return Harness::Skipped; -#endif - if ( tbb::task_scheduler_init::default_num_threads() == 1 ) { - REPORT( "Known issue: Test requires at least 2 hardware threads.\n" ); - return Harness::Skipped; - } - TestStealingIsEnabled(); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_tbb_condition_variable.cpp b/src/tbb/src/test/test_tbb_condition_variable.cpp deleted file mode 100644 index 8ad2d1262..000000000 --- a/src/tbb/src/test/test_tbb_condition_variable.cpp +++ /dev/null @@ -1,37 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" - -#if !TBB_IMPLEMENT_CPP0X -#include "harness.h" - -int TestMain() { - return Harness::Skipped; -} -#else -#include "test_condition_variable.h" - -int TestMain() { - REMARK( "testing with tbb condvar\n" ); - DoCondVarTest<tbb::mutex,tbb::recursive_mutex>(); - return Harness::Done; -} -#endif diff --git a/src/tbb/src/test/test_tbb_fork.cpp b/src/tbb/src/test/test_tbb_fork.cpp deleted file mode 100644 index a3838aad0..000000000 --- a/src/tbb/src/test/test_tbb_fork.cpp +++ /dev/null @@ -1,204 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#define TBB_PREVIEW_WAITING_FOR_WORKERS 1 -#include "tbb/task_scheduler_init.h" -#include "tbb/blocked_range.h" -#include "tbb/cache_aligned_allocator.h" -#include "tbb/parallel_for.h" - -#define HARNESS_DEFAULT_MIN_THREADS (tbb::task_scheduler_init::default_num_threads()) -#define HARNESS_DEFAULT_MAX_THREADS (4*tbb::task_scheduler_init::default_num_threads()) -#if __bg__ -// CNK does not support fork() -#define HARNESS_SKIP_TEST 1 -#endif -#include "harness.h" - -#if _WIN32||_WIN64 -#include "tbb/concurrent_hash_map.h" - -HANDLE getCurrentThreadHandle() -{ - HANDLE hProc = GetCurrentProcess(), hThr = INVALID_HANDLE_VALUE; -#if TBB_USE_ASSERT - BOOL res = -#endif - DuplicateHandle( hProc, GetCurrentThread(), hProc, &hThr, 0, FALSE, DUPLICATE_SAME_ACCESS ); - __TBB_ASSERT( res, "Retrieving current thread handle failed" ); - return hThr; -} - -bool threadTerminated(HANDLE h) -{ - DWORD ret = WaitForSingleObjectEx(h, 0, FALSE); - return WAIT_OBJECT_0 == ret; -} - -struct Data { - HANDLE h; -}; - -typedef tbb::concurrent_hash_map<DWORD, Data> TidTableType; - -static TidTableType tidTable; - -#else - -#if __sun || __SUNPRO_CC -#define _POSIX_PTHREAD_SEMANTICS 1 // to get standard-conforming sigwait(2) -#endif -#include <signal.h> -#include <sys/types.h> -#include <unistd.h> -#include <sys/wait.h> -#include <sched.h> - -#include "tbb/tick_count.h" - -static void SigHandler(int) { } - -#endif // _WIN32||_WIN64 - -class AllocTask { -public: - void operator() (const tbb::blocked_range<int> &r) const { -#if _WIN32||_WIN64 - HANDLE h = getCurrentThreadHandle(); - DWORD tid = GetCurrentThreadId(); - { - TidTableType::accessor acc; - if (tidTable.insert(acc, tid)) { - acc->second.h = h; - } - } -#endif - for (int y = r.begin(); y != r.end(); ++y) { - void *p = tbb::internal::NFS_Allocate(1, 7000, NULL); - tbb::internal::NFS_Free(p); - } - } - AllocTask() {} -}; - -int TestMain() -{ - using namespace Harness; - - bool child = false; -#if _WIN32||_WIN64 - DWORD masterTid = GetCurrentThreadId(); -#else - struct sigaction sa; - sigset_t sig_set; - - sigemptyset(&sa.sa_mask); - sa.sa_flags = 0; - sa.sa_handler = SigHandler; - if (sigaction(SIGCHLD, &sa, NULL)) - ASSERT(0, "sigaction failed"); - if (sigaction(SIGALRM, &sa, NULL)) - ASSERT(0, "sigaction failed"); - // block SIGCHLD and SIGALRM, the mask is inherited by worker threads - sigemptyset(&sig_set); - sigaddset(&sig_set, SIGCHLD); - sigaddset(&sig_set, SIGALRM); - if (pthread_sigmask(SIG_BLOCK, &sig_set, NULL)) - ASSERT(0, "pthread_sigmask failed"); -#endif - for (int threads=MinThread; threads<=MaxThread; threads+=MinThread) { - for (int i=0; i<20; i++) { - if (!child) - REMARK("\rThreads %d %d ", threads, i); - { - tbb::task_scheduler_init sch(threads, 0, /*wait_workers=*/true); - } - tbb::task_scheduler_init sch(threads, 0, /*wait_workers=*/true); - - tbb::parallel_for(tbb::blocked_range<int>(0, 10000, 1), AllocTask(), - tbb::simple_partitioner()); - sch.terminate(); - -#if _WIN32||_WIN64 - // check that there is no alive threads after terminate() - for (TidTableType::const_iterator it = tidTable.begin(); - it != tidTable.end(); ++it) { - if (masterTid != it->first) { - ASSERT(threadTerminated(it->second.h), NULL); - } - } - tidTable.clear(); -#else // _WIN32||_WIN64 - if (child) - exit(0); - else { - pid_t pid = fork(); - if (!pid) { - i = -1; - child = true; - } else { - int sig; - pid_t w_ret = 0; - // wait for SIGCHLD up to timeout - alarm(30); - if (0 != sigwait(&sig_set, &sig)) - ASSERT(0, "sigwait failed"); - alarm(0); - w_ret = waitpid(pid, NULL, WNOHANG); - ASSERT(w_ret>=0, "waitpid failed"); - if (!w_ret) { - ASSERT(!kill(pid, SIGKILL), NULL); - w_ret = waitpid(pid, NULL, 0); - ASSERT(w_ret!=-1, "waitpid failed"); - - ASSERT(0, "Hang after fork"); - } - // clean pending signals (if any occurs since sigwait) - sigset_t p_mask; - for (;;) { - sigemptyset(&p_mask); - sigpending(&p_mask); - if (sigismember(&p_mask, SIGALRM) - || sigismember(&p_mask, SIGCHLD)) { - if (0 != sigwait(&p_mask, &sig)) - ASSERT(0, "sigwait failed"); - } else - break; - } - } - } -#endif // _WIN32||_WIN64 - } - } - REMARK("\n"); -#if TBB_USE_EXCEPTIONS - REMARK("Testing exceptions\n"); - try { - { - tbb::task_scheduler_init schBlock(2, 0, /*wait_workers=*/true); - tbb::task_scheduler_init schBlock1(2, 0, /*wait_workers=*/true); - } - ASSERT(0, "Nesting of blocking schedulers is impossible."); - } catch (...) {} -#endif - - return Harness::Done; -} - diff --git a/src/tbb/src/test/test_tbb_header.cpp b/src/tbb/src/test/test_tbb_header.cpp deleted file mode 100644 index 41cdd73ba..000000000 --- a/src/tbb/src/test/test_tbb_header.cpp +++ /dev/null @@ -1,261 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -/** - This test ensures that tbb.h brings in all the public TBB interface definitions, - and if all the necessary symbols are exported from the library. - - Most of the checks happen at the compilation or link phases. -**/ -#if __TBB_CPF_BUILD -// Add testing of preview features -#define TBB_PREVIEW_AGGREGATOR 1 -#define TBB_PREVIEW_CONCURRENT_LRU_CACHE 1 -#endif - -#include "harness_defs.h" -#if !(__TBB_TEST_SECONDARY && __TBB_CPP11_STD_PLACEHOLDERS_LINKAGE_BROKEN) - -#if _MSC_VER -#pragma warning (disable : 4503) // decorated name length exceeded, name was truncated -#if !TBB_USE_EXCEPTIONS - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif -#endif - -#include "tbb/tbb.h" - -static volatile size_t g_sink; - -#define TestTypeDefinitionPresence( Type ) g_sink = sizeof(tbb::Type); -#define TestTypeDefinitionPresence2(TypeStart, TypeEnd) g_sink = sizeof(tbb::TypeStart,TypeEnd); -#define TestFuncDefinitionPresence(Fn, Args, ReturnType) { ReturnType (*pfn)Args = &tbb::Fn; (void)pfn; } - -struct Body { - void operator() () const {} -}; -struct Body1 { - void operator() ( int ) const {} -}; -struct Body1a { - int operator() ( const tbb::blocked_range<int>&, const int ) const { return 0; } -}; -struct Body1b { - int operator() ( const int, const int ) const { return 0; } -}; -struct Body2 { - Body2 () {} - Body2 ( const Body2&, tbb::split ) {} - void operator() ( const tbb::blocked_range<int>& ) const {} - void join( const Body2& ) {} -}; -struct Body3 { - Body3 () {} - Body3 ( const Body3&, tbb::split ) {} - void operator() ( const tbb::blocked_range2d<int>&, tbb::pre_scan_tag ) const {} - void operator() ( const tbb::blocked_range2d<int>&, tbb::final_scan_tag ) const {} - void reverse_join( Body3& ) {} - void assign( const Body3& ) {} -}; - -#if !__TBB_TEST_SECONDARY - -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#include "harness.h" - -// Test if all the necessary symbols are exported for the exceptions thrown by TBB. -// Missing exports result either in link error or in runtime assertion failure. -#include <stdexcept> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - - -template <typename E> -void TestExceptionClassExports ( const E& exc, tbb::internal::exception_id eid ) { - // The assertion here serves to shut up warnings about "eid not used". - ASSERT( eid<tbb::internal::eid_max, NULL ); -#if TBB_USE_EXCEPTIONS - for ( int i = 0; i < 2; ++i ) { - try { - if ( i == 0 ) - throw exc; -#if !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN - else - tbb::internal::throw_exception( eid ); -#endif - } - catch ( E& e ) { - ASSERT ( e.what(), "Missing what() string" ); - } - catch ( ... ) { - ASSERT ( __TBB_EXCEPTION_TYPE_INFO_BROKEN, "Unrecognized exception. Likely RTTI related exports are missing" ); - } - } -#else /* TBB_USE_EXCEPTIONS */ - (void)exc; -#endif /* TBB_USE_EXCEPTIONS */ -} - -void TestExceptionClassesExports () { - TestExceptionClassExports( std::bad_alloc(), tbb::internal::eid_bad_alloc ); - TestExceptionClassExports( tbb::bad_last_alloc(), tbb::internal::eid_bad_last_alloc ); - TestExceptionClassExports( std::invalid_argument("test"), tbb::internal::eid_nonpositive_step ); - TestExceptionClassExports( std::out_of_range("test"), tbb::internal::eid_out_of_range ); - TestExceptionClassExports( std::range_error("test"), tbb::internal::eid_segment_range_error ); - TestExceptionClassExports( std::range_error("test"), tbb::internal::eid_index_range_error ); - TestExceptionClassExports( tbb::missing_wait(), tbb::internal::eid_missing_wait ); - TestExceptionClassExports( tbb::invalid_multiple_scheduling(), tbb::internal::eid_invalid_multiple_scheduling ); - TestExceptionClassExports( tbb::improper_lock(), tbb::internal::eid_improper_lock ); - TestExceptionClassExports( std::runtime_error("test"), tbb::internal::eid_possible_deadlock ); - TestExceptionClassExports( std::runtime_error("test"), tbb::internal::eid_operation_not_permitted ); - TestExceptionClassExports( std::runtime_error("test"), tbb::internal::eid_condvar_wait_failed ); - TestExceptionClassExports( std::out_of_range("test"), tbb::internal::eid_invalid_load_factor ); - TestExceptionClassExports( std::invalid_argument("test"), tbb::internal::eid_invalid_swap ); - TestExceptionClassExports( std::length_error("test"), tbb::internal::eid_reservation_length_error ); - TestExceptionClassExports( std::out_of_range("test"), tbb::internal::eid_invalid_key ); - TestExceptionClassExports( tbb::user_abort(), tbb::internal::eid_user_abort ); - TestExceptionClassExports( std::runtime_error("test"), tbb::internal::eid_bad_tagged_msg_cast ); -} -#endif /* !__TBB_TEST_SECONDARY */ - -#if __TBB_CPF_BUILD -// These names are only tested in "preview" configuration -// When a feature becomes fully supported, its names should be moved to the main test -struct Handler { - void operator()( tbb::aggregator_operation* ) {} -}; -static void TestPreviewNames() { - TestTypeDefinitionPresence( aggregator ); - TestTypeDefinitionPresence( aggregator_ext<Handler> ); - TestTypeDefinitionPresence2(concurrent_lru_cache<int, int> ); -} -#endif - -#if __TBB_TEST_SECONDARY -/* This mode is used to produce a secondary object file that is linked with - the main one in order to detect "multiple definition" linker error. -*/ -void secondary() -#else -int TestMain () -#endif -{ - #if __TBB_CPP11_STD_PLACEHOLDERS_LINKAGE_BROKEN - REPORT("Known issue: \"multiple definition\" linker error detection test skipped.\n"); - #endif - TestTypeDefinitionPresence( aligned_space<int> ); - TestTypeDefinitionPresence( atomic<int> ); - TestTypeDefinitionPresence( cache_aligned_allocator<int> ); - TestTypeDefinitionPresence( tbb_hash_compare<int> ); - TestTypeDefinitionPresence2(concurrent_hash_map<int, int> ); - TestTypeDefinitionPresence2(concurrent_unordered_map<int, int> ); - TestTypeDefinitionPresence2(concurrent_unordered_multimap<int, int> ); - TestTypeDefinitionPresence( concurrent_unordered_set<int> ); - TestTypeDefinitionPresence( concurrent_unordered_multiset<int> ); - TestTypeDefinitionPresence( concurrent_bounded_queue<int> ); - TestTypeDefinitionPresence( concurrent_queue<int> ); - TestTypeDefinitionPresence( strict_ppl::concurrent_queue<int> ); - TestTypeDefinitionPresence( concurrent_priority_queue<int> ); - TestTypeDefinitionPresence( combinable<int> ); - TestTypeDefinitionPresence( concurrent_vector<int> ); - TestTypeDefinitionPresence( enumerable_thread_specific<int> ); - /* Flow graph names */ - TestTypeDefinitionPresence( flow::graph ); - // TODO: add a check for make_edge and maybe other functions in tbb::flow - TestTypeDefinitionPresence( flow::source_node<int> ); - TestTypeDefinitionPresence2(flow::function_node<int, int> ); - typedef tbb::flow::tuple<int, int> intpair; - TestTypeDefinitionPresence2(flow::multifunction_node<int, intpair> ); - TestTypeDefinitionPresence( flow::split_node<intpair> ); - TestTypeDefinitionPresence( flow::continue_node<int> ); - TestTypeDefinitionPresence( flow::overwrite_node<int> ); - TestTypeDefinitionPresence( flow::write_once_node<int> ); - TestTypeDefinitionPresence( flow::broadcast_node<int> ); - TestTypeDefinitionPresence( flow::buffer_node<int> ); - TestTypeDefinitionPresence( flow::queue_node<int> ); - TestTypeDefinitionPresence( flow::sequencer_node<int> ); - TestTypeDefinitionPresence( flow::priority_queue_node<int> ); - TestTypeDefinitionPresence( flow::limiter_node<int> ); - TestTypeDefinitionPresence2(flow::indexer_node<int, int> ); - using tbb::flow::queueing; - TestTypeDefinitionPresence2( flow::join_node< intpair, queueing > ); - /* Mutex names */ - TestTypeDefinitionPresence( mutex ); - TestTypeDefinitionPresence( null_mutex ); - TestTypeDefinitionPresence( null_rw_mutex ); - TestTypeDefinitionPresence( queuing_mutex ); - TestTypeDefinitionPresence( queuing_rw_mutex ); - TestTypeDefinitionPresence( recursive_mutex ); - TestTypeDefinitionPresence( spin_mutex ); - TestTypeDefinitionPresence( spin_rw_mutex ); - TestTypeDefinitionPresence( speculative_spin_mutex ); - TestTypeDefinitionPresence( speculative_spin_rw_mutex ); - TestTypeDefinitionPresence( critical_section ); - TestTypeDefinitionPresence( reader_writer_lock ); -#if __TBB_TASK_GROUP_CONTEXT - TestTypeDefinitionPresence( tbb_exception ); - TestTypeDefinitionPresence( captured_exception ); - TestTypeDefinitionPresence( movable_exception<int> ); -#if !TBB_USE_CAPTURED_EXCEPTION - TestTypeDefinitionPresence( internal::tbb_exception_ptr ); -#endif /* !TBB_USE_CAPTURED_EXCEPTION */ - TestTypeDefinitionPresence( task_group_context ); - TestTypeDefinitionPresence( task_group ); - TestTypeDefinitionPresence( structured_task_group ); - TestTypeDefinitionPresence( task_handle<Body> ); -#endif /* __TBB_TASK_GROUP_CONTEXT */ - TestTypeDefinitionPresence( blocked_range3d<int> ); - TestFuncDefinitionPresence( parallel_invoke, (const Body&, const Body&), void ); - TestFuncDefinitionPresence( parallel_do, (int*, int*, const Body1&), void ); - TestFuncDefinitionPresence( parallel_for_each, (int*, int*, const Body1&), void ); - TestFuncDefinitionPresence( parallel_for, (int, int, int, const Body1&), void ); - TestFuncDefinitionPresence( parallel_for, (const tbb::blocked_range<int>&, const Body2&, const tbb::simple_partitioner&), void ); - TestFuncDefinitionPresence( parallel_reduce, (const tbb::blocked_range<int>&, const int&, const Body1a&, const Body1b&, const tbb::auto_partitioner&), int ); - TestFuncDefinitionPresence( parallel_reduce, (const tbb::blocked_range<int>&, Body2&, tbb::affinity_partitioner&), void ); - TestFuncDefinitionPresence( parallel_deterministic_reduce, (const tbb::blocked_range<int>&, const int&, const Body1a&, const Body1b&), int ); - TestFuncDefinitionPresence( parallel_deterministic_reduce, (const tbb::blocked_range<int>&, Body2&), void ); - TestFuncDefinitionPresence( parallel_scan, (const tbb::blocked_range2d<int>&, Body3&, const tbb::auto_partitioner&), void ); - TestFuncDefinitionPresence( parallel_sort, (int*, int*), void ); - TestTypeDefinitionPresence( pipeline ); - TestFuncDefinitionPresence( parallel_pipeline, (size_t, const tbb::filter_t<void,void>&), void ); - TestTypeDefinitionPresence( task ); - TestTypeDefinitionPresence( empty_task ); - TestTypeDefinitionPresence( task_list ); - TestTypeDefinitionPresence( task_arena ); - TestTypeDefinitionPresence( task_scheduler_init ); - TestTypeDefinitionPresence( task_scheduler_observer ); - TestTypeDefinitionPresence( tbb_thread ); - TestTypeDefinitionPresence( tbb_allocator<int> ); - TestTypeDefinitionPresence( zero_allocator<int> ); - TestTypeDefinitionPresence( tick_count ); - -#if __TBB_CPF_BUILD - TestPreviewNames(); -#endif -#if !__TBB_TEST_SECONDARY - TestExceptionClassesExports(); - return Harness::Done; -#endif -} -#endif //!(__TBB_TEST_SECONDARY && __TBB_CPP11_STD_PLACEHOLDERS_LINKING_BROKEN) diff --git a/src/tbb/src/test/test_tbb_thread.cpp b/src/tbb/src/test/test_tbb_thread.cpp deleted file mode 100644 index 719728e9f..000000000 --- a/src/tbb/src/test/test_tbb_thread.cpp +++ /dev/null @@ -1,33 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_thread.h" -#define THREAD tbb::tbb_thread -#define THIS_THREAD tbb::this_tbb_thread -#define THIS_THREAD_SLEEP THIS_THREAD::sleep -#include "test_thread.h" -#include "harness.h" - -/* we want to test tbb::tbb_thread */ -int TestMain () { - CheckSignatures(); - RunTests(); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_tbb_version.cpp b/src/tbb/src/test/test_tbb_version.cpp deleted file mode 100644 index fdb685426..000000000 --- a/src/tbb/src/test/test_tbb_version.cpp +++ /dev/null @@ -1,294 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tbb_stddef.h" - -#if __TBB_WIN8UI_SUPPORT -// TODO: figure out how the test can be enabled for win8ui -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#include "harness.h" -int TestMain() { - return Harness::Skipped; -} -#else - -#include <stdio.h> -#include <stdlib.h> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include <vector> -#include <string> -#include <utility> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "tbb/task_scheduler_init.h" - -#define HARNESS_CUSTOM_MAIN 1 -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#define HARNESS_NO_MAIN_ARGS 0 -#include "harness.h" - -#if defined (_WIN32) || defined (_WIN64) -#define TEST_SYSTEM_COMMAND "test_tbb_version.exe @" -#define putenv _putenv -#else -#define TEST_SYSTEM_COMMAND "./test_tbb_version.exe @" -#endif - -enum string_required { - required, - optional, - optional_multiple - }; - -typedef std::pair <std::string, string_required> string_pair; - -void initialize_strings_vector(std::vector <string_pair>* vector); - -const char stderr_stream[] = "version_test.err"; -const char stdout_stream[] = "version_test.out"; - -HARNESS_EXPORT -int main(int argc, char *argv[] ) { - const size_t psBuffer_len = 2048; - char psBuffer[psBuffer_len]; -/* We first introduced runtime version identification in 3014 */ -#if TBB_INTERFACE_VERSION>=3014 - // For now, just test that run-time TBB version matches the compile-time version, - // since otherwise the subsequent test of "TBB: INTERFACE VERSION" string will fail anyway. - // We need something more clever in future. - if ( tbb::TBB_runtime_interface_version()!=TBB_INTERFACE_VERSION ){ - snprintf( psBuffer, psBuffer_len, - "%s %s %d %s %d.", - "Running with the library of different version than the test was compiled against.", - "Expected", - TBB_INTERFACE_VERSION, - "- got", - tbb::TBB_runtime_interface_version() - ); - ASSERT( tbb::TBB_runtime_interface_version()==TBB_INTERFACE_VERSION, psBuffer ); - } -#endif -#if __TBB_MIC_OFFLOAD - // Skip the test in offload mode. - // Run the test in 'true' native mode (because 'system()' works in 'true' native mode). - (argc, argv); - REPORT("skip\n"); -#elif __TBB_MPI_INTEROP || __bg__ - (void) argc; // unused - (void) argv; // unused - REPORT("skip\n"); -#else - __TBB_TRY { - FILE *stream_out; - FILE *stream_err; - - if(argc>1 && argv[1][0] == '@' ) { - stream_err = freopen( stderr_stream, "w", stderr ); - if( stream_err == NULL ){ - REPORT( "Internal test error (freopen)\n" ); - exit( 1 ); - } - stream_out = freopen( stdout_stream, "w", stdout ); - if( stream_out == NULL ){ - REPORT( "Internal test error (freopen)\n" ); - exit( 1 ); - } - { - tbb::task_scheduler_init init(1); - } - fclose( stream_out ); - fclose( stream_err ); - exit(0); - } - //1st step check that output is empty if TBB_VERSION is not defined. - if ( getenv("TBB_VERSION") ){ - REPORT( "TBB_VERSION defined, skipping step 1 (empty output check)\n" ); - }else{ - if( ( system(TEST_SYSTEM_COMMAND) ) != 0 ){ - REPORT( "Error (step 1): Internal test error\n" ); - exit( 1 ); - } - //Checking output streams - they should be empty - stream_err = fopen( stderr_stream, "r" ); - if( stream_err == NULL ){ - REPORT( "Error (step 1):Internal test error (stderr open)\n" ); - exit( 1 ); - } - while( !feof( stream_err ) ) { - if( fgets( psBuffer, psBuffer_len, stream_err ) != NULL ){ - REPORT( "Error (step 1): stderr should be empty\n" ); - exit( 1 ); - } - } - fclose( stream_err ); - stream_out = fopen( stdout_stream, "r" ); - if( stream_out == NULL ){ - REPORT( "Error (step 1):Internal test error (stdout open)\n" ); - exit( 1 ); - } - while( !feof( stream_out ) ) { - if( fgets( psBuffer, psBuffer_len, stream_out ) != NULL ){ - REPORT( "Error (step 1): stdout should be empty\n" ); - exit( 1 ); - } - } - fclose( stream_out ); - } - - //Setting TBB_VERSION in case it is not set - if ( !getenv("TBB_VERSION") ){ - putenv(const_cast<char*>("TBB_VERSION=1")); - } - - if( ( system(TEST_SYSTEM_COMMAND) ) != 0 ){ - REPORT( "Error (step 2):Internal test error\n" ); - exit( 1 ); - } - //Checking pipe - it should contain version data - std::vector <string_pair> strings_vector; - std::vector <string_pair>::iterator strings_iterator; - - initialize_strings_vector( &strings_vector ); - strings_iterator = strings_vector.begin(); - - stream_out = fopen( stdout_stream, "r" ); - if( stream_out == NULL ){ - REPORT( "Error (step 2):Internal test error (stdout open)\n" ); - exit( 1 ); - } - while( !feof( stream_out ) ) { - if( fgets( psBuffer, psBuffer_len, stream_out ) != NULL ){ - REPORT( "Error (step 2): stdout should be empty\n" ); - exit( 1 ); - } - } - fclose( stream_out ); - - stream_err = fopen( stderr_stream, "r" ); - if( stream_err == NULL ){ - REPORT( "Error (step 1):Internal test error (stderr open)\n" ); - exit( 1 ); - } - - while( !feof( stream_err ) ) { - if( fgets( psBuffer, psBuffer_len, stream_err ) != NULL ){ - if (strstr( psBuffer, "TBBmalloc: " )) { - // TBB allocator might or might not be here, ignore it - continue; - } - bool match_found = false; - do{ - if ( strings_iterator == strings_vector.end() ){ - REPORT( "Error: version string dictionary ended prematurely.\n" ); - REPORT( "No match for: \t%s", psBuffer ); - exit( 1 ); - } - if ( strstr( psBuffer, strings_iterator->first.c_str() ) == NULL ){ // mismatch - if( strings_iterator->second == required ){ - REPORT( "Error: version strings do not match.\n" ); - REPORT( "Expected \"%s\" not found in:\n\t%s", strings_iterator->first.c_str(), psBuffer ); - exit( 1 ); - } - ++strings_iterator; - }else{ - match_found = true; - if( strings_iterator->second != optional_multiple ) - ++strings_iterator; - } - }while( !match_found ); - } - } - fclose( stream_err ); - } __TBB_CATCH(...) { - ASSERT( 0,"unexpected exception" ); - } - REPORT("done\n"); -#endif //__TBB_MIC_OFFLOAD, __TBB_MPI_INTEROP etc - return 0; -} - - -// Fill dictionary with version strings for platforms -void initialize_strings_vector(std::vector <string_pair>* vector) -{ - vector->push_back(string_pair("TBB: VERSION\t\t4.3", required)); // check TBB_VERSION - vector->push_back(string_pair("TBB: INTERFACE VERSION\t8000", required)); // check TBB_INTERFACE_VERSION - vector->push_back(string_pair("TBB: BUILD_DATE", required)); - vector->push_back(string_pair("TBB: BUILD_HOST", required)); - vector->push_back(string_pair("TBB: BUILD_OS", required)); -#if _WIN32||_WIN64 -#if !__MINGW32__ - vector->push_back(string_pair("TBB: BUILD_CL", required)); -#endif - vector->push_back(string_pair("TBB: BUILD_COMPILER", required)); -#elif __APPLE__ - vector->push_back(string_pair("TBB: BUILD_KERNEL", required)); - vector->push_back(string_pair("TBB: BUILD_GCC", required)); - vector->push_back(string_pair("TBB: BUILD_COMPILER", optional)); //if( getenv("COMPILER_VERSION") ) -#elif __sun - vector->push_back(string_pair("TBB: BUILD_KERNEL", required)); - vector->push_back(string_pair("TBB: BUILD_SUNCC", required)); - vector->push_back(string_pair("TBB: BUILD_COMPILER", optional)); //if( getenv("COMPILER_VERSION") ) -#else // We use version_info_linux.sh for unsupported OSes -#if __ANDROID__ - vector->push_back(string_pair("TBB: BUILD_TARGET_OS", required)); - vector->push_back(string_pair("TBB: BUILD_TARGET_KERNEL", required)); -#else - vector->push_back(string_pair("TBB: BUILD_KERNEL", required)); -#endif // !__ANDROID__ - vector->push_back(string_pair("TBB: BUILD_GCC", required)); - vector->push_back(string_pair("TBB: BUILD_COMPILER", optional)); //if( getenv("COMPILER_VERSION") ) -#if __ANDROID__ - vector->push_back(string_pair("TBB: BUILD_NDK", optional)); -#else - vector->push_back(string_pair("TBB: BUILD_LIBC", required)); -#endif // !__ANDROID__ - vector->push_back(string_pair("TBB: BUILD_LD", required)); -#endif // OS - vector->push_back(string_pair("TBB: BUILD_TARGET", required)); - vector->push_back(string_pair("TBB: BUILD_COMMAND", required)); - vector->push_back(string_pair("TBB: TBB_USE_DEBUG", required)); - vector->push_back(string_pair("TBB: TBB_USE_ASSERT", required)); -#if __TBB_CPF_BUILD - vector->push_back(string_pair("TBB: TBB_PREVIEW_BINARY", required)); -#endif - vector->push_back(string_pair("TBB: DO_ITT_NOTIFY", required)); - vector->push_back(string_pair("TBB: ITT", optional)); //#ifdef DO_ITT_NOTIFY - vector->push_back(string_pair("TBB: ALLOCATOR", required)); -#if _WIN32||_WIN64 - vector->push_back(string_pair("TBB: Processor groups", required)); - vector->push_back(string_pair("TBB: ----- Group", optional_multiple)); -#endif - vector->push_back(string_pair("TBB: RML", optional)); - vector->push_back(string_pair("TBB: Intel(R) RML library built:", optional)); - vector->push_back(string_pair("TBB: Intel(R) RML library version:", optional)); - vector->push_back(string_pair("TBB: Tools support", required)); - return; -} -#endif /* __TBB_WIN8UI_SUPPORT */ diff --git a/src/tbb/src/test/test_thread.h b/src/tbb/src/test/test_thread.h deleted file mode 100644 index 36cf8cd81..000000000 --- a/src/tbb/src/test/test_thread.h +++ /dev/null @@ -1,310 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/atomic.h" -#if __TBB_CPP11_RVALUE_REF_PRESENT -#include <utility> // std::move -#endif - -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#include "harness_report.h" -#include "harness_assert.h" - -static const int THRDS = 3; -static const int THRDS_DETACH = 2; -static tbb::atomic<int> sum; -static tbb::atomic<int> BaseCount; -static THREAD::id real_ids[THRDS+THRDS_DETACH]; - -class Base { - mutable int copy_throws; - friend void RunTests(); - friend void CheckExceptionSafety(); - void operator=( const Base& ); // Deny access -protected: - Base() : copy_throws(100) {++BaseCount;} - Base( const Base& c ) : copy_throws(c.copy_throws) { - if( --copy_throws<=0 ) - __TBB_THROW(0); - ++BaseCount; - } - ~Base() {--BaseCount;} -}; - -template<int N> -class Data: Base { - Data(); // Deny access - explicit Data(int v) : value(v) {} - - friend void RunTests(); - friend void CheckExceptionSafety(); -public: - int value; -}; - - -#include "harness_barrier.h" - -class ThreadFunc: Base { - ThreadFunc() {} - - static Harness::SpinBarrier init_barrier; - - friend void RunTests(); -public: - void operator()(){ - real_ids[0] = THIS_THREAD::get_id(); - init_barrier.wait(); - - sum.fetch_and_add(1); - } - void operator()(int num){ - real_ids[num] = THIS_THREAD::get_id(); - init_barrier.wait(); - - sum.fetch_and_add(num); - } - void operator()(int num, Data<0> dx) { - real_ids[num] = THIS_THREAD::get_id(); - - const double WAIT = .1; -#if _WIN32 || _WIN64 - const double LONG_TOLERANCE = 0.120; // maximal scheduling quantum for Windows Server -#else - const double LONG_TOLERANCE = 0.200; // reasonable upper bound -#endif - tbb::tick_count::interval_t test_interval(WAIT); - tbb::tick_count t0 = tbb::tick_count::now(); - THIS_THREAD_SLEEP ( test_interval ); - tbb::tick_count t1 = tbb::tick_count::now(); - double delta = ((t1-t0)-test_interval).seconds(); - if(delta < 0.0) - REPORT("ERROR: Sleep interval too short (%g < %g)\n", - (t1-t0).seconds(), test_interval.seconds() ); - if(delta > LONG_TOLERANCE) - REPORT("Warning: Sleep interval too long (%g outside long tolerance(%g))\n", - (t1-t0).seconds(), test_interval.seconds() + LONG_TOLERANCE); - init_barrier.wait(); - - sum.fetch_and_add(num); - sum.fetch_and_add(dx.value); - } - void operator()(Data<0> d) { - THIS_THREAD_SLEEP ( tbb::tick_count::interval_t(d.value*1.) ); - } -}; - -Harness::SpinBarrier ThreadFunc::init_barrier(THRDS); - -void CheckRelations( const THREAD::id ids[], int n, bool duplicates_allowed ) { - for( int i=0; i<n; ++i ) { - const THREAD::id x = ids[i]; - for( int j=0; j<n; ++j ) { - const THREAD::id y = ids[j]; - ASSERT( (x==y)==!(x!=y), NULL ); - ASSERT( (x<y)==!(x>=y), NULL ); - ASSERT( (x>y)==!(x<=y), NULL ); - ASSERT( (x<y)+(x==y)+(x>y)==1, NULL ); - ASSERT( x!=y || i==j || duplicates_allowed, NULL ); - for( int k=0; k<n; ++k ) { - const THREAD::id z = ids[j]; - ASSERT( !(x<y && y<z) || x<z, "< is not transitive" ); - } - } - } -} - -class AnotherThreadFunc: Base { -public: - void operator()() {} - void operator()(const Data<1>&) {} - void operator()(const Data<1>&, const Data<2>&) {} - friend void CheckExceptionSafety(); -}; - -#if TBB_USE_EXCEPTIONS -void CheckExceptionSafety() { - int original_count = BaseCount; - // d loops over number of copies before throw occurs - for( int d=1; d<=3; ++d ) { - // Try all combinations of throw/nothrow for f, x, and y's copy constructor. - for( int i=0; i<8; ++i ) { - { - const AnotherThreadFunc f = AnotherThreadFunc(); - if( i&1 ) f.copy_throws = d; - const Data<1> x(0); - if( i&2 ) x.copy_throws = d; - const Data<2> y(0); - if( i&4 ) y.copy_throws = d; - bool exception_caught = false; - for( int j=0; j<3; ++j ) { - try { - switch(j) { - case 0: {THREAD t(f); t.join();} break; - case 1: {THREAD t(f,x); t.join();} break; - case 2: {THREAD t(f,x,y); t.join();} break; - } - } catch(...) { - exception_caught = true; - } - ASSERT( !exception_caught||(i&((1<<(j+1))-1))!=0, NULL ); - } - } - ASSERT( BaseCount==original_count, "object leak detected" ); - } - } -} -#endif /* TBB_USE_EXCEPTIONS */ - -#include <cstdio> - -#if __TBB_CPP11_RVALUE_REF_PRESENT - -tbb::tbb_thread returnThread() { - return tbb::tbb_thread(); -} -#endif - -void RunTests() { - - ThreadFunc t; - Data<0> d100(100), d1(1), d0(0); - const THREAD::id id_zero; - THREAD::id id0, uniq_ids[THRDS]; - - THREAD thrs[THRDS]; - THREAD thr; - THREAD thr0(t); - THREAD thr1(t, 2); - THREAD thr2(t, 1, d100); - - ASSERT( thr0.get_id() != id_zero, NULL ); - id0 = thr0.get_id(); - tbb::move(thrs[0], thr0); - ASSERT( thr0.get_id() == id_zero, NULL ); - ASSERT( thrs[0].get_id() == id0, NULL ); - - THREAD::native_handle_type h1 = thr1.native_handle(); - THREAD::native_handle_type h2 = thr2.native_handle(); - THREAD::id id1 = thr1.get_id(); - THREAD::id id2 = thr2.get_id(); - tbb::swap(thr1, thr2); - ASSERT( thr1.native_handle() == h2, NULL ); - ASSERT( thr2.native_handle() == h1, NULL ); - ASSERT( thr1.get_id() == id2, NULL ); - ASSERT( thr2.get_id() == id1, NULL ); -#if __TBB_CPP11_RVALUE_REF_PRESENT - { - THREAD tmp_thr(std::move(thr1)); - ASSERT( tmp_thr.native_handle() == h2 && tmp_thr.get_id() == id2, NULL ); - thr1 = std::move(tmp_thr); - ASSERT( thr1.native_handle() == h2 && thr1.get_id() == id2, NULL ); - } -#endif - - thr1.swap(thr2); - ASSERT( thr1.native_handle() == h1, NULL ); - ASSERT( thr2.native_handle() == h2, NULL ); - ASSERT( thr1.get_id() == id1, NULL ); - ASSERT( thr2.get_id() == id2, NULL ); - thr1.swap(thr2); - - tbb::move(thrs[1], thr1); - ASSERT( thr1.get_id() == id_zero, NULL ); - -#if __TBB_CPP11_RVALUE_REF_PRESENT - thrs[2] = returnThread(); - ASSERT( thrs[2].get_id() == id_zero, NULL ); -#endif - tbb::move(thrs[2], thr2); - ASSERT( thr2.get_id() == id_zero, NULL ); - - for (int i=0; i<THRDS; i++) - uniq_ids[i] = thrs[i].get_id(); - - ASSERT( thrs[2].joinable(), NULL ); - - for (int i=0; i<THRDS; i++) - thrs[i].join(); - -#if !__TBB_WIN8UI_SUPPORT - // TODO: to find out the way to find thread_id without GetThreadId and other - // desktop functions. - // Now tbb_thread does have its own thread_id that stores std::thread object - // Test will fail in case it is run in desktop mode against New Windows*8 UI library - for (int i=0; i<THRDS; i++) - ASSERT( real_ids[i] == uniq_ids[i], NULL ); -#endif - - int current_sum = sum; - ASSERT( current_sum == 104, NULL ); - ASSERT( ! thrs[2].joinable(), NULL ); - ASSERT( BaseCount==4, "object leak detected" ); - -#if TBB_USE_EXCEPTIONS - CheckExceptionSafety(); -#endif - - // Note: all tests involving BaseCount should be put before the tests - // involing detached threads, because there is no way of knowing when - // a detached thread destroys its arguments. - - THREAD thr_detach_0(t, d0); - real_ids[THRDS] = thr_detach_0.get_id(); - thr_detach_0.detach(); - ASSERT( thr_detach_0.get_id() == id_zero, NULL ); - - THREAD thr_detach_1(t, d1); - real_ids[THRDS+1] = thr_detach_1.get_id(); - thr_detach_1.detach(); - ASSERT( thr_detach_1.get_id() == id_zero, NULL ); - - CheckRelations(real_ids, THRDS+THRDS_DETACH, true); - - CheckRelations(uniq_ids, THRDS, false); - - for (int i=0; i<2; i++) { - AnotherThreadFunc empty_func; - THREAD thr_to(empty_func), thr_from(empty_func); - THREAD::id from_id = thr_from.get_id(); - if (i) thr_to.join(); -#if __TBB_CPP11_RVALUE_REF_PRESENT - thr_to = std::move(thr_from); -#else - thr_to = thr_from; -#endif - ASSERT( thr_from.get_id() == THREAD::id(), NULL ); - ASSERT( thr_to.get_id() == from_id, NULL ); - } - - ASSERT( THREAD::hardware_concurrency() > 0, NULL); -} - -typedef bool (*id_relation)( THREAD::id, THREAD::id ); - -id_relation CheckSignatures() { - id_relation r[6] = {&tbb::operator==, - &tbb::operator!=, - &tbb::operator<, - &tbb::operator>, - &tbb::operator<=, - &tbb::operator>=}; - return r[1]; -} diff --git a/src/tbb/src/test/test_tick_count.cpp b/src/tbb/src/test/test_tick_count.cpp deleted file mode 100644 index 596bd08be..000000000 --- a/src/tbb/src/test/test_tick_count.cpp +++ /dev/null @@ -1,171 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "tbb/tick_count.h" -#include "harness.h" -#include <cstdio> - -//! Assert that two times in seconds are very close. -void AssertNear( double x, double y ) { - ASSERT( -1.0E-10 <= x-y && x-y <=1.0E-10, NULL ); -} - -//! Test arithmetic operators on tick_count::interval_t -void TestArithmetic( const tbb::tick_count& t0, const tbb::tick_count& t1, const tbb::tick_count& t2 ) { - tbb::tick_count::interval_t i= t1-t0; - tbb::tick_count::interval_t j = t2-t1; - tbb::tick_count::interval_t k = t2-t0; - AssertSameType( tbb::tick_count::interval_t(), i-j ); - AssertSameType( tbb::tick_count::interval_t(), i+j ); - ASSERT( i.seconds()>1E-9, NULL ); - ASSERT( j.seconds()>1E-9, NULL ); - ASSERT( k.seconds()>2E-9, NULL ); - AssertNear( (i+j).seconds(), k.seconds() ); - AssertNear( (k-j).seconds(), i.seconds() ); - AssertNear( ((k-j)+(j-i)).seconds(), k.seconds()-i.seconds() ); - tbb::tick_count::interval_t sum; - sum += i; - sum += j; - AssertNear( sum.seconds(), k.seconds() ); - sum -= i; - AssertNear( sum.seconds(), j.seconds() ); - sum -= j; - AssertNear( sum.seconds(), 0.0 ); -} - -//------------------------------------------------------------------------ -// Test for overhead in calls to tick_count -//------------------------------------------------------------------------ - -//! Wait for given duration. -/** The duration parameter is in units of seconds. */ -static void WaitForDuration( double duration ) { - tbb::tick_count start = tbb::tick_count::now(); - while( (tbb::tick_count::now()-start).seconds() < duration ) - continue; -} - -//! Test that average timer overhead is within acceptable limit. -/** The 'tolerance' value inside the test specifies the limit. */ -void TestSimpleDelay( int ntrial, double duration, double tolerance ) { - double total_worktime = 0; - // Iteration -1 warms up the code cache. - for( int trial=-1; trial<ntrial; ++trial ) { - tbb::tick_count t0 = tbb::tick_count::now(); - if( duration ) WaitForDuration(duration); - tbb::tick_count t1 = tbb::tick_count::now(); - if( trial>=0 ) { - total_worktime += (t1-t0).seconds(); - } - } - // Compute average worktime and average delta - double worktime = total_worktime/ntrial; - double delta = worktime-duration; - REMARK("worktime=%g delta=%g tolerance=%g\n", worktime, delta, tolerance); - - // Check that delta is acceptable - if( delta<0 ) - REPORT("ERROR: delta=%g < 0\n",delta); - if( delta>tolerance ) - REPORT("%s: delta=%g > %g=tolerance where duration=%g\n",delta>3*tolerance?"ERROR":"Warning",delta,tolerance,duration); -} - -//------------------------------------------------------------------------ -// Test for subtracting calls to tick_count from different threads. -//------------------------------------------------------------------------ - -#include "tbb/atomic.h" -static tbb::atomic<int> Counter; -static volatile bool Flag; -static tbb::tick_count *tick_count_array; - -struct TickCountDifferenceBody { - void operator()( int id ) const { - if( --Counter==0 ) Flag = true; - while( !Flag ) continue; - tick_count_array[id] = tbb::tick_count::now(); - } -}; - -//! Test that two tick_count values recorded on different threads can be meaningfully subtracted. -void TestTickCountDifference( int n ) { - double tolerance = 3E-4; - tick_count_array = new tbb::tick_count[n]; - for( int trial=0; trial<10; ++trial ) { - Counter = n; - Flag = false; - NativeParallelFor( n, TickCountDifferenceBody() ); - ASSERT( Counter==0, NULL ); - for( int i=0; i<n; ++i ) - for( int j=0; j<i; ++j ) { - double diff = (tick_count_array[i]-tick_count_array[j]).seconds(); - if( diff<0 ) diff = -diff; - if( diff>tolerance ) { - REPORT("%s: cross-thread tick_count difference = %g > %g = tolerance\n", - diff>3*tolerance?"ERROR":"Warning",diff,tolerance); - } - } - } - delete[] tick_count_array; -} - -void TestResolution() { - static double target_value = 0.314159265358979323846264338327950288419; - static double step_value = 0.00027182818284590452353602874713526624977572; - static int range_value = 100; - double avg_diff = 0.0; - double max_diff = 0.0; - for( int i = -range_value; i <= range_value; ++i ) { - double my_time = target_value + step_value * i; - tbb::tick_count::interval_t t0(my_time); - double interval_time = t0.seconds(); - avg_diff += (my_time - interval_time); - if ( max_diff < my_time-interval_time) max_diff = my_time-interval_time; - // time always truncates - ASSERT(interval_time >= 0 && my_time - interval_time < tbb::tick_count::resolution(), "tick_count resolution out of range"); - } - avg_diff = (avg_diff/(2*range_value+1))/tbb::tick_count::resolution(); - max_diff /= tbb::tick_count::resolution(); - REMARK("avg_diff = %g ticks, max_diff = %g ticks\n", avg_diff, max_diff); -} - -#include <tbb/compat/thread> - -int TestMain () { - tbb::tick_count t0 = tbb::tick_count::now(); - TestSimpleDelay(/*ntrial=*/1000000,/*duration=*/0, /*tolerance=*/2E-6); - tbb::tick_count t1 = tbb::tick_count::now(); - TestSimpleDelay(/*ntrial=*/10, /*duration=*/0.125,/*tolerance=*/5E-6); - tbb::tick_count t2 = tbb::tick_count::now(); - TestArithmetic(t0,t1,t2); - - TestResolution(); - - int num_threads = tbb::tbb_thread::hardware_concurrency(); - ASSERT( num_threads > 0, "tbb::thread::hardware_concurrency() has returned an incorrect value" ); - if ( num_threads > 1 ) { - REMARK( "num_threads = %d\n", num_threads ); - TestTickCountDifference( num_threads ); - } else { - REPORT( "Warning: concurrency is too low for TestTickCountDifference ( num_threads = %d )\n", num_threads ); - } - - return Harness::Done; -} diff --git a/src/tbb/src/test/test_tuple.cpp b/src/tbb/src/test/test_tuple.cpp deleted file mode 100644 index ac1c54166..000000000 --- a/src/tbb/src/test/test_tuple.cpp +++ /dev/null @@ -1,200 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// tbb::flow::tuple (implementation used in tbb::flow) -// if <tuple> is available on the compiler/platform, that version should be the -// one tested. - -#include "harness.h" -// this test should match that in graph.h, so we test whatever tuple is -// being used by the join_node. -#if __TBB_CPP11_TUPLE_PRESENT -#define __TESTING_STD_TUPLE__ 1 -#include <tuple> -using namespace std; -#else -#define __TESTING_STD_TUPLE__ 0 -#include "tbb/compat/tuple" -using namespace tbb::flow; -#endif /*!__TBB_CPP11_TUPLE_PRESENT*/ -#include <string> -#include <iostream> - -class non_trivial { -public: - non_trivial() {} - ~non_trivial() {} - non_trivial(const non_trivial& other) : my_int(other.my_int), my_float(other.my_float) { } - int get_int() const { return my_int; } - float get_float() const { return my_float; } - void set_int(int newval) { my_int = newval; } - void set_float(float newval) { my_float = newval; } -private: - int my_int; - float my_float; -}; - -template<typename T1, typename T2, typename T3, typename U1, typename U2, typename U3> -void RunOneComparisonTest() { - typedef tuple<T1,T2,T3> t_tuple; - typedef tuple<U1,U2,U3> u_tuple; - - ASSERT(t_tuple((T1)1,(T2)1,(T3)1) == u_tuple((U1)1,(U2)1,(U3)1),NULL); - ASSERT(t_tuple((T1)1,(T2)0,(T3)1) < u_tuple((U1)1,(U2)1,(U3)1),NULL); - ASSERT(t_tuple((T1)1,(T2)1,(T3)1) > u_tuple((U1)1,(U2)1,(U3)0),NULL); - ASSERT(t_tuple((T1)1,(T2)0,(T3)1) != u_tuple((U1)1,(U2)1,(U3)1),NULL); - ASSERT(t_tuple((T1)1,(T2)0,(T3)1) <= u_tuple((U1)1,(U2)1,(U3)0),NULL); - ASSERT(t_tuple((T1)1,(T2)0,(T3)0) <= u_tuple((U1)1,(U2)0,(U3)0),NULL); - ASSERT(t_tuple((T1)1,(T2)1,(T3)1) >= u_tuple((U1)1,(U2)0,(U3)1),NULL); - ASSERT(t_tuple((T1)0,(T2)1,(T3)1) >= u_tuple((U1)0,(U2)1,(U3)1),NULL); - - ASSERT(!(t_tuple((T1)2,(T2)1,(T3)1) == u_tuple((U1)1,(U2)1,(U3)1)),NULL); - ASSERT(!(t_tuple((T1)1,(T2)2,(T3)1) == u_tuple((U1)1,(U2)1,(U3)1)),NULL); - ASSERT(!(t_tuple((T1)1,(T2)1,(T3)2) == u_tuple((U1)1,(U2)1,(U3)1)),NULL); - - ASSERT(!(t_tuple((T1)1,(T2)1,(T3)1) < u_tuple((U1)1,(U2)1,(U3)1)),NULL); - ASSERT(!(t_tuple((T1)1,(T2)1,(T3)1) > u_tuple((U1)1,(U2)1,(U3)1)),NULL); - ASSERT(!(t_tuple((T1)1,(T2)1,(T3)1) != u_tuple((U1)1,(U2)1,(U3)1)),NULL); - - ASSERT(t_tuple((T1)1,(T2)1,(T3)1) <= u_tuple((U1)1,(U2)1,(U3)1),NULL); - ASSERT(t_tuple((T1)1,(T2)1,(T3)1) >= u_tuple((U1)1,(U2)1,(U3)1),NULL); - -} - -void RunTests() { - -#if __TESTING_STD_TUPLE__ - REMARK("Testing platform tuple\n"); -#else - REMARK("Testing compat/tuple\n"); -#endif - tuple<int> ituple1(3); - tuple<int> ituple2(5); - tuple<double> ftuple2(4.1); - - ASSERT(!(ituple1 == ituple2), NULL); - ASSERT(ituple1 != ituple2, NULL); - ASSERT(!(ituple1 > ituple2), NULL); - ASSERT(ituple1 < ituple2, NULL); - ASSERT(ituple1 <= ituple2, NULL); - ASSERT(!(ituple1 >= ituple2), NULL); - ASSERT(ituple1 < ftuple2, NULL); - - typedef tuple<int,double,float> tuple_type1; - typedef tuple<int,int,int> int_tuple_type; - typedef tuple<int,non_trivial,int> non_trivial_tuple_type; - typedef tuple<double,std::string,char> stringy_tuple_type; - const tuple_type1 tup1(42,3.14159,2.0f); - int_tuple_type int_tup(4, 5, 6); - non_trivial_tuple_type nti; - stringy_tuple_type stv; - get<1>(stv) = "hello"; - get<2>(stv) = 'x'; - - ASSERT(get<0>(stv) == 0.0, NULL); - ASSERT(get<1>(stv) == "hello", NULL); - ASSERT(get<2>(stv) == 'x', NULL); - - ASSERT(tuple_size<tuple_type1>::value == 3, NULL); - ASSERT(get<0>(tup1) == 42, NULL); - ASSERT(get<1>(tup1) == 3.14159, NULL); - ASSERT(get<2>(tup1) == 2.0, NULL); - - get<1>(nti).set_float(1.0); - get<1>(nti).set_int(32); - ASSERT(get<1>(nti).get_int() == 32, NULL); - ASSERT(get<1>(nti).get_float() == 1.0, NULL); - - // converting constructor - tuple<double,double,double> tup2(1,2.0,3.0f); - tuple<double,double,double> tup3(9,4.0,7.0f); - ASSERT(tup2 != tup3, NULL); - - ASSERT(tup2 < tup3, NULL); - - // assignment - tup2 = tup3; - ASSERT(tup2 == tup3, NULL); - - tup2 = int_tup; - ASSERT(get<0>(tup2) == 4, NULL); - ASSERT(get<1>(tup2) == 5, NULL); - ASSERT(get<2>(tup2) == 6, NULL); - - // increment component of tuple - get<0>(tup2) += 1; - ASSERT(get<0>(tup2) == 5, NULL); - - std::pair<int,int> two_pair( 4, 8); - tuple<int,int> two_pair_tuple; - two_pair_tuple = two_pair; - ASSERT(get<0>(two_pair_tuple) == 4, NULL); - ASSERT(get<1>(two_pair_tuple) == 8, NULL); - - //relational ops - ASSERT(int_tuple_type(1,1,0) == int_tuple_type(1,1,0),NULL); - ASSERT(int_tuple_type(1,0,1) < int_tuple_type(1,1,1),NULL); - ASSERT(int_tuple_type(1,0,0) > int_tuple_type(0,1,0),NULL); - ASSERT(int_tuple_type(0,0,0) != int_tuple_type(1,0,1),NULL); - ASSERT(int_tuple_type(0,1,0) <= int_tuple_type(0,1,1),NULL); - ASSERT(int_tuple_type(0,0,1) <= int_tuple_type(0,0,1),NULL); - ASSERT(int_tuple_type(1,1,1) >= int_tuple_type(1,0,0),NULL); - ASSERT(int_tuple_type(0,1,1) >= int_tuple_type(0,1,1),NULL); - - typedef tuple<int,float,double,char> mixed_tuple_left; - typedef tuple<float,int,char,double> mixed_tuple_right; - - ASSERT(mixed_tuple_left(1,1.f,1,1) == mixed_tuple_right(1.f,1,1,1),NULL); - ASSERT(mixed_tuple_left(1,0.f,1,1) < mixed_tuple_right(1.f,1,1,1),NULL); - ASSERT(mixed_tuple_left(1,1.f,1,1) > mixed_tuple_right(1.f,1,0,1),NULL); - ASSERT(mixed_tuple_left(1,1.f,1,0) != mixed_tuple_right(1.f,1,1,1),NULL); - ASSERT(mixed_tuple_left(1,0.f,1,1) <= mixed_tuple_right(1.f,1,0,1),NULL); - ASSERT(mixed_tuple_left(1,0.f,0,1) <= mixed_tuple_right(1.f,0,0,1),NULL); - ASSERT(mixed_tuple_left(1,1.f,1,0) >= mixed_tuple_right(1.f,0,1,1),NULL); - ASSERT(mixed_tuple_left(0,1.f,1,0) >= mixed_tuple_right(0.f,1,1,0),NULL); - - ASSERT(!(mixed_tuple_left(2,1.f,1,1) == mixed_tuple_right(1.f,1,1,1)),NULL); - ASSERT(!(mixed_tuple_left(1,2.f,1,1) == mixed_tuple_right(1.f,1,1,1)),NULL); - ASSERT(!(mixed_tuple_left(1,1.f,2,1) == mixed_tuple_right(1.f,1,1,1)),NULL); - ASSERT(!(mixed_tuple_left(1,1.f,1,2) == mixed_tuple_right(1.f,1,1,1)),NULL); - - ASSERT(!(mixed_tuple_left(1,1.f,1,1) < mixed_tuple_right(1.f,1,1,1)),NULL); - ASSERT(!(mixed_tuple_left(1,1.f,1,1) > mixed_tuple_right(1.f,1,1,1)),NULL); - ASSERT(!(mixed_tuple_left(1,1.f,1,1) != mixed_tuple_right(1.f,1,1,1)),NULL); - - ASSERT(mixed_tuple_left(1,1.f,1,1) <= mixed_tuple_right(1.f,1,1,1),NULL); - ASSERT(mixed_tuple_left(1,1.f,1,1) >= mixed_tuple_right(1.f,1,1,1),NULL); - - RunOneComparisonTest<int,float,char,float,char,int>(); - RunOneComparisonTest<double,float,char,float,double,int>(); - RunOneComparisonTest<int,float,char,short,char,short>(); - RunOneComparisonTest<double,float,short,float,char,int>(); - - - // the following should result in a syntax error - // typedef tuple<float,float> mixed_short_tuple; - // ASSERT(mixed_tuple_left(1,1.f,1,1) != mixed_short_tuple(1.f,1.f),NULL); - -} - -int TestMain() { - RunTests(); - return Harness::Done; -} diff --git a/src/tbb/src/test/test_write_once_node.cpp b/src/tbb/src/test/test_write_once_node.cpp deleted file mode 100644 index 6f92c1141..000000000 --- a/src/tbb/src/test/test_write_once_node.cpp +++ /dev/null @@ -1,168 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -#include "harness_graph.h" - -#include "tbb/task_scheduler_init.h" - -#define N 300 -#define T 4 -#define M 4 - -template< typename R > -void simple_read_write_tests() { - tbb::flow::graph g; - tbb::flow::write_once_node<R> n(g); - - for ( int t = 0; t < T; ++t ) { - R v0(0); - harness_counting_receiver<R> r[M]; - - ASSERT( n.is_valid() == false, NULL ); - ASSERT( n.try_get( v0 ) == false, NULL ); - - if ( t % 2 ) { - ASSERT( n.try_put( static_cast<R>(N+1) ), NULL ); - ASSERT( n.is_valid() == true, NULL ); - ASSERT( n.try_get( v0 ) == true, NULL ); - ASSERT( v0 == R(N+1), NULL ); - } - - for (int i = 0; i < M; ++i) { - tbb::flow::make_edge( n, r[i] ); - } - - if ( t%2 ) { - for (int i = 0; i < M; ++i) { - size_t c = r[i].my_count; - ASSERT( int(c) == 1, NULL ); - } - } - - for (int i = 1; i <= N; ++i ) { - R v1(static_cast<R>(i)); - - bool result = n.try_put( v1 ); - if ( !(t%2) && i == 1 ) - ASSERT( result == true, NULL ); - else - ASSERT( result == false, NULL ); - - ASSERT( n.is_valid() == true, NULL ); - - for (int j = 0; j < N; ++j ) { - R v2(0); - ASSERT( n.try_get( v2 ), NULL ); - if ( t%2 ) - ASSERT( R(N+1) == v2, NULL ); - else - ASSERT( R(1) == v2, NULL ); - } - } - for (int i = 0; i < M; ++i) { - size_t c = r[i].my_count; - ASSERT( int(c) == 1, NULL ); - } - for (int i = 0; i < M; ++i) { - tbb::flow::remove_edge( n, r[i] ); - } - ASSERT( n.try_put( R(0) ) == false, NULL ); - for (int i = 0; i < M; ++i) { - size_t c = r[i].my_count; - ASSERT( int(c) == 1, NULL ); - } - n.clear(); - ASSERT( n.is_valid() == false, NULL ); - ASSERT( n.try_get( v0 ) == false, NULL ); - } -} - -template< typename R > -class native_body : NoAssign { - tbb::flow::write_once_node<R> &my_node; - -public: - - native_body( tbb::flow::write_once_node<R> &n ) : my_node(n) {} - - void operator()( int i ) const { - R v1(static_cast<R>(i)); - ASSERT( my_node.try_put( v1 ) == false, NULL ); - ASSERT( my_node.is_valid() == true, NULL ); - ASSERT( my_node.try_get( v1 ) == true, NULL ); - ASSERT( v1 == R(-1), NULL ); - } -}; - -template< typename R > -void parallel_read_write_tests() { - tbb::flow::graph g; - tbb::flow::write_once_node<R> n(g); - //Create a vector of identical nodes - std::vector< tbb::flow::write_once_node<R> > wo_vec(2, n); - - for (size_t node_idx=0; node_idx<wo_vec.size(); ++node_idx) { - for ( int t = 0; t < T; ++t ) { - harness_counting_receiver<R> r[M]; - - for (int i = 0; i < M; ++i) { - tbb::flow::make_edge( wo_vec[node_idx], r[i] ); - } - R v0; - ASSERT( wo_vec[node_idx].is_valid() == false, NULL ); - ASSERT( wo_vec[node_idx].try_get( v0 ) == false, NULL ); - - ASSERT( wo_vec[node_idx].try_put( R(-1) ), NULL ); - - NativeParallelFor( N, native_body<R>( wo_vec[node_idx] ) ); - - for (int i = 0; i < M; ++i) { - size_t c = r[i].my_count; - ASSERT( int(c) == 1, NULL ); - } - for (int i = 0; i < M; ++i) { - tbb::flow::remove_edge( wo_vec[node_idx], r[i] ); - } - ASSERT( wo_vec[node_idx].try_put( R(0) ) == false, NULL ); - for (int i = 0; i < M; ++i) { - size_t c = r[i].my_count; - ASSERT( int(c) == 1, NULL ); - } - wo_vec[node_idx].clear(); - ASSERT( wo_vec[node_idx].is_valid() == false, NULL ); - ASSERT( wo_vec[node_idx].try_get( v0 ) == false, NULL ); - } - } -} - -int TestMain() { - simple_read_write_tests<int>(); - simple_read_write_tests<float>(); - for( int p=MinThread; p<=MaxThread; ++p ) { - tbb::task_scheduler_init init(p); - parallel_read_write_tests<int>(); - parallel_read_write_tests<float>(); - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - test_extract_on_node<tbb::flow::write_once_node, int>(); -#endif - return Harness::Done; -} - diff --git a/src/tbb/src/test/test_yield.cpp b/src/tbb/src/test/test_yield.cpp deleted file mode 100644 index ddfacbdf8..000000000 --- a/src/tbb/src/test/test_yield.cpp +++ /dev/null @@ -1,65 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// Test that __TBB_Yield works. -// On Red Hat EL4 U1, it does not work, because sched_yield is broken. - -#include "tbb/tbb_machine.h" -#include "tbb/tick_count.h" -#include "harness.h" - -static volatile long CyclicCounter; -static volatile bool Quit; -double SingleThreadTime; - -struct RoundRobin: NoAssign { - const int number_of_threads; - RoundRobin( long p ) : number_of_threads(p) {} - void operator()( long k ) const { - tbb::tick_count t0 = tbb::tick_count::now(); - for( long i=0; i<10000; ++i ) { - // Wait for previous thread to notify us - for( int j=0; CyclicCounter!=k && !Quit; ++j ) { - __TBB_Yield(); - if( j%100==0 ) { - tbb::tick_count t1 = tbb::tick_count::now(); - if( (t1-t0).seconds()>=1.0*number_of_threads ) { - REPORT("Warning: __TBB_Yield failing to yield with %d threads (or system is heavily loaded)\n",number_of_threads); - Quit = true; - return; - } - } - } - // Notify next thread that it can run - CyclicCounter = (k+1)%number_of_threads; - } - } -}; - -int TestMain () { - for( int p=MinThread; p<=MaxThread; ++p ) { - REMARK("testing with %d threads\n", p ); - CyclicCounter = 0; - Quit = false; - NativeParallelFor( long(p), RoundRobin(p) ); - } - return Harness::Done; -} - diff --git a/src/tbb/third-party-programs.txt b/src/tbb/third-party-programs.txt new file mode 100644 index 000000000..c088429c2 --- /dev/null +++ b/src/tbb/third-party-programs.txt @@ -0,0 +1,198 @@ +oneAPI Threading Building Blocks (oneTBB) Third Party Programs File + +This file is the "third-party-programs.txt" file specified in the associated Intel end user license +agreement for the Intel software you are licensing. + +The third party programs and their corresponding required notices and/or license +terms are listed below. +_______________________________________________________________________________________________________ + +1. Instrumentation and Tracing Technology (ITT) Notify User API: + Copyright (c) 2005-2023 Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +_______________________________________________________________________________________________________ + +2. Portable Hardware Locality (hwloc): + + Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana University Research and + Technology Corporation. All rights reserved. + Copyright (c) 2004-2005 The University of Tennessee and The University of Tennessee Research + Foundation. All rights reserved. + Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, University of Stuttgart. + All rights reserved. + Copyright (c) 2004-2005 The Regents of the University of California. All rights reserved. + Copyright (c) 2009 CNRS + Copyright (c) 2009-2016 Inria. All rights reserved. + Copyright (c) 2009-2015 Université Bordeaux + Copyright (c) 2009-2015 Cisco Systems, Inc. All rights reserved. + Copyright (c) 2009-2012 Oracle and/or its affiliates. All rights reserved. + Copyright (c) 2010 IBM + Copyright (c) 2010 Jirka Hladky + Copyright (c) 2012 Aleksej Saushev, The NetBSD Foundation + Copyright (c) 2012 Blue Brain Project, EPFL. All rights reserved. + Copyright (c) 2013-2014 University of Wisconsin-La Crosse. All rights reserved. + Copyright (c) 2015 Research Organization for Information Science and Technology (RIST). + All rights reserved. + Copyright (c) 2015-2016 Intel, Inc. All rights reserved. + See COPYING in top-level directory. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +_______________________________________________________________________________________________________ + +3. gperftools: Copyright (c) 2011, Google Inc. + + Tachyon: Copyright (c) 1994-2008 John E. Stone. All rights reserved. + + BSD 3-Clause "New" or "Revised" License + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +_______________________________________________________________________________________________________ + +4. Mateusz Kwiatkowski Workaround for bug 62258 in libstdc++ + + ******************************************************************************** + * Author: Mateusz Kwiatkowski <m.kwiatkowski@avsystem.com> * + * * + * I hereby renounce all copyright to this file and my rights resulting from * + * it, to the broadest extent permitted by law. It may be treated as public * + * domain. * + * * + * However, as this file interfaces with GCC internal ABI, it may be subject to * + * the terms and conditions of the GNU General Public License. Please consult * + * the GCC licensing terms and/or a lawyer for details. * + * * + * Note that libstdc++ licensing terms grant additional permissions described * + * in the GCC Runtime Library Exception, version 3.1, as published by the * + * Free Software Foundation. * + *******************************************************************************/ +_______________________________________________________________________________________________________ + +5. ActiveState Thread pool with same API as (multi) processing. Pool (Python recipe) + + # + # Copyright (c) 2008,2016 david decotigny (this file) + # Copyright (c) 2006-2008, R Oudkerk (multiprocessing.Pool) + # All rights reserved. + # + # Redistribution and use in source and binary forms, with or without + # modification, are permitted provided that the following conditions + # are met: + # + # 1. Redistributions of source code must retain the above copyright + # notice, this list of conditions and the following disclaimer. + # 2. Redistributions in binary form must reproduce the above copyright + # notice, this list of conditions and the following disclaimer in the + # documentation and/or other materials provided with the distribution. + # 3. Neither the name of author nor the names of any contributors may be + # used to endorse or promote products derived from this software + # without specific prior written permission. + # + # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND + # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + # SUCH DAMAGE. + +_______________________________________________________________________________________________________ + +6. doctest + + Copyright (c) 2016-2023 Viktor Kirilov + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + +_______________________________________________________________________________________________________ + +*Other names and brands may be claimed as the property of others. \ No newline at end of file diff --git a/tests/doRUnit.R b/tests/doRUnit.R new file mode 100644 index 000000000..34d49eb7b --- /dev/null +++ b/tests/doRUnit.R @@ -0,0 +1,41 @@ +stopifnot(require(RUnit, quietly = TRUE)) +stopifnot(require(Rcpp, quietly = TRUE)) +stopifnot(require(RcppParallel, quietly = TRUE)) + +## Set a seed to make the test deterministic +set.seed(42) + +## Set a default backend +backend <- Sys.getenv("RCPP_PARALLEL_BACKEND", unset = NA) +if (is.na(backend)) + Sys.setenv(RCPP_PARALLEL_BACKEND = "tinythread") + +writeLines(paste("Using backend:", Sys.getenv("RCPP_PARALLEL_BACKEND"))) + +## Define tests +suite <- defineTestSuite( + name = "RcppParallel Unit Tests", + dirs = system.file("tests", package = "RcppParallel") +) + +## Based on practice in Rcpp to avoid some test failures +Sys.setenv("R_TESTS" = "") + +## Run tests +tests <- runTestSuite(suite) + +## Print results +printTextProtocol(tests) + +## Return success or failure to R CMD CHECK +if (getErrors(tests)$nFail > 0) { + stop("TEST FAILED!") +} + +if (getErrors(tests)$nErr > 0) { + stop("TEST HAD ERRORS!") +} + +if (getErrors(tests)$nTestFunc < 1) { + stop("NO TEST FUNCTIONS RUN!") +} diff --git a/tests/testthat.R b/tests/testthat.R deleted file mode 100644 index 11a33d13b..000000000 --- a/tests/testthat.R +++ /dev/null @@ -1,17 +0,0 @@ -require(methods) -require(RcppParallel) -require(testthat) - -RCPP <- Sys.getenv( "RCPP" ) -if( RCPP == "Rcpp" ){ - message( "testing against Rcpp" ) - require(Rcpp) -} else if( RCPP == "Rcpp11" ){ - message( "testing against Rcpp11" ) - require(attributes) -} else { - stop( "Rcpp implementation not setup, please set the $RCPP environment variable" ) -} - -test_dir("testthat") - diff --git a/tests/testthat/test-distance.R b/tests/testthat/test-distance.R deleted file mode 100644 index cba8e8a95..000000000 --- a/tests/testthat/test-distance.R +++ /dev/null @@ -1,15 +0,0 @@ -context( "distance" ) - -test_that( "distance works with Rcpp", { - sourceCpp( "cpp/distance.cpp" ) - - n = 1000 - m = matrix(runif(n*10), ncol = 10) - m = m/rowSums(m) - - expect_equal( - rcpp_js_distance(m), - rcpp_parallel_js_distance(m) - ) -}) - diff --git a/tests/testthat/test-inner-product.R b/tests/testthat/test-inner-product.R deleted file mode 100644 index bbdb59da0..000000000 --- a/tests/testthat/test-inner-product.R +++ /dev/null @@ -1,11 +0,0 @@ -context( "inner product" ) - -test_that( "parallelInnerProduct works with Rcpp", { - sourceCpp( "cpp/innerproduct.cpp" ) - - x <- runif(1000000) - y <- runif(1000000) - - expect_equal(innerProduct(x, y), parallelInnerProduct(x, y)) -}) - diff --git a/tests/testthat/test-sum.R b/tests/testthat/test-sum.R deleted file mode 100644 index cb0a8dabf..000000000 --- a/tests/testthat/test-sum.R +++ /dev/null @@ -1,13 +0,0 @@ -context( "sum" ) - -test_that( "sum works with Rcpp", { - sourceCpp( "cpp/sum.cpp" ) - - v <- as.numeric(c(1:10000000)) - - expect_equal( - vectorSum(v), - parallelVectorSum(v) - ) -}) - diff --git a/tests/testthat/test-transform.R b/tests/testthat/test-transform.R deleted file mode 100644 index 6678b0dde..000000000 --- a/tests/testthat/test-transform.R +++ /dev/null @@ -1,10 +0,0 @@ -context( "transform" ) - -test_that( "transform works with Rcpp", { - sourceCpp( "cpp/transform.cpp" ) - - m <- matrix(as.numeric(c(1:1000000)), nrow = 1000, ncol = 1000) - - expect_equal(matrixSqrt(m), parallelMatrixSqrt(m)) -}) - diff --git a/tools/config.R b/tools/config.R new file mode 100644 index 000000000..abf31e9d1 --- /dev/null +++ b/tools/config.R @@ -0,0 +1,615 @@ +# configure-database.R ------------------------------------------------------- + +#' Retrieve the Global Configuration Database +#' +#' Retrieve the global configuration database. +#' `db` is a helper alias for the database +#' returned by `configure_database()`. +#' +#' @export +configure_database <- local({ + database <- new.env(parent = emptyenv()) + class(database) <- "configure_database" + function() database +}) + +#' @export +print.configure_database <- function(x, ...) { + str.configure_database(x, ...) +} + +#' @export +str.configure_database <- function(object, ...) { + writeLines("<configure database>") + objects <- mget(ls(envir = object, all.names = TRUE), object) + output <- utils::capture.output(utils::str(objects, ...)) + writeLines(output[-1]) + invisible(output) +} + +#' Define Variables for the Configuration Database +#' +#' Define variables to be used as part of the default configuration database. +#' These will be used by [configure_file()] when no configuration database +#' is explicitly supplied. [define()] is provided as a shorter alias for the +#' same function. +#' +#' @param ... A set of named arguments, mapping configuration names to values. +#' +#' @export +configure_define <- function(...) { + envir <- configure_database() + list2env(list(...), envir = envir) +} + +#' @rdname configure_define +#' @export +define <- configure_define + +#' @rdname configure_database +#' @export +db <- configure_database() + + +# utils.R -------------------------------------------------------------------- + +#' Configure a File +#' +#' Configure a file, replacing (by default) any instances of `@`-delimited +#' variables, e.g. `@VAR@`, with the value of the variable called `VAR` in the +#' associated `config` environment. +#' +#' @param source The file to be configured. +#' @param target The file to be generated. +#' @param config The configuration database. +#' @param lhs The left-hand side marker; defaults to `@`. +#' @param rhs The right-hand side marker; defaults to `@`. +#' @param verbose Boolean; report files as they are configured? +#' +#' @family configure +#' +#' @export +configure_file <- function( + source, + target = sub("[.]in$", "", source), + config = configure_database(), + lhs = "@", + rhs = "@", + verbose = configure_verbose()) +{ + # read source file + contents <- readLines(source, warn = FALSE) + + # replace defined variables + enumerate(config, function(key, val) { + needle <- paste(lhs, key, rhs, sep = "") + replacement <- val + contents <<- gsub(needle, replacement, contents, fixed = TRUE) + }) + + ensure_directory(dirname(target)) + + # write configured file to target location + # prefer unix newlines for Makevars + mode <- if (basename(target) %in% "Makevars") "wb" else "w" + conn <- file(target, open = mode) + on.exit(close(conn), add = TRUE) + writeLines(contents, con = conn) + + # copy over source permissions + info <- file.info(source) + Sys.chmod(target, mode = info$mode) + + if (isTRUE(verbose)) { + fmt <- "*** configured file: '%s' => '%s'" + message(sprintf(fmt, source, target)) + } +} + +#' Configure Files in a Directory +#' +#' This companion function to [configure_file()] can be used to +#' configure all `.in` files within a directory. +#' +#' @param path The path to a directory in which files should be configured. +#' @param config The configuration database to be used. +#' @param verbose Boolean; report files as they are configured? +#' +#' @family configure +#' +#' @export +configure_directory <- function( + path = ".", + config = configure_database(), + verbose = configure_verbose()) +{ + files <- list.files( + path = path, + pattern = "[.]in$", + full.names = TRUE + ) + + lapply(files, configure_file, config = config, verbose = verbose) +} + +configure_auto <- function(type) { + + if (!isTRUE(getOption("configure.auto", default = TRUE))) + return(invisible(FALSE)) + + if (isTRUE(getOption("configure.common", default = TRUE))) + configure_common(type = type) + + if (isTRUE(getOption("configure.platform", default = TRUE))) + configure_platform(type = type) + +} + +configure_common <- function(type) { + + sources <- list.files( + path = c("R", "src"), + pattern = "[.]in$", + full.names = TRUE + ) + + sources <- sub("[.]/", "", sources) + + if (type == "configure") { + lapply(sources, configure_file) + } else if (type == "cleanup") { + targets <- sub("[.]in$", "", sources) + lapply(targets, remove_file) + } + + invisible(TRUE) +} + +configure_platform <- function(type) { + + sysname <- tolower(Sys.info()[["sysname"]]) + + subdirs <- sysname + if (sysname != "windows") + subdirs <- c("unix", subdirs) + + dirs <- c("R", "src") + for (dir in dirs) { + + # list files (take care to remove directories) + sources <- Filter( + function(file) identical(file.info(file)$isdir, FALSE), + list.files(file.path(dir, subdirs), full.names = TRUE) + ) + + # configure all discovered sources + for (source in sources) { + target <- file.path(dir, basename(source)) + switch(type, + configure = configure_file(source, target), + cleanup = remove_file(target)) + } + } +} + +#' Execute R CMD config +#' +#' Read information about how \R is configured as through `R CMD config`. +#' +#' @param ... The names of potential configuration values. +#' @param simplify Boolean; simplify in the case where a single value was +#' requested? +#' +#' @export +r_cmd_config <- function(..., simplify = TRUE) { + R <- file.path(R.home("bin"), "R") + + # suppress cygwin path warnings for windows + if (Sys.info()[["sysname"]] == "Windows") { + CYGWIN <- Sys.getenv("CYGWIN") + Sys.setenv(CYGWIN = "nodosfilewarning") + on.exit(Sys.setenv(CYGWIN = CYGWIN), add = TRUE) + } + + # loop through requested values and call R CMD config + values <- unlist(list(...), recursive = TRUE) + config <- lapply(values, function(value) { + + # execute it + stdout <- tempfile("r-cmd-config-", fileext = ".txt") + on.exit(unlink(stdout), add = TRUE) + status <- system2(R, c("CMD", "config", value), stdout = stdout) + + # report failures as NULL (distinct from empty string) + if (status) + return(NULL) + + readLines(stdout) + + }) + + names(config) <- values + + if (simplify && length(config) == 1) + return(config[[1]]) + + config +} + +#' Read R Configuration for a Package +#' +#' Read the \R configuration, as through `R CMD config`. +#' +#' @param ... The \R configuration values to read (as a character vector). +#' If empty, all values are read as through `R CMD config --all`). +#' @param package The path to the \R package's sources. +#' @param envir The environment in which the configuration information should +#' be assigned. By default, the [configure_database()] is populated with the +#' requested values. +#' @param verbose Boolean; notify the user as \R configuration is read? +#' +#' @export +read_r_config <- function( + ..., + package = Sys.getenv("R_PACKAGE_DIR", unset = "."), + envir = configure_database(), + verbose = configure_verbose()) +{ + # move to requested directory + owd <- setwd(package) + on.exit(setwd(owd), add = TRUE) + R <- file.path(R.home("bin"), "R") + + # suppress cygwin path warnings for windows + if (Sys.info()[["sysname"]] == "Windows") { + CYGWIN <- Sys.getenv("CYGWIN") + Sys.setenv(CYGWIN = "nodosfilewarning") + on.exit(Sys.setenv(CYGWIN = CYGWIN), add = TRUE) + } + + values <- unlist(list(...), recursive = TRUE) + if (length(values) == 0) { + + # R CMD config --all only available since R 3.4.0 + if (getRversion() < "3.4.0") { + fmt <- "'R CMD config --all' not available in R version '%s'" + stop(sprintf(fmt, getRversion())) + } + + # execute action + stdout <- tempfile("r-cmd-config-", fileext = ".txt") + on.exit(unlink(stdout), add = TRUE) + status <- system2(R, c("CMD", "config", "--all"), stdout = stdout) + if (status) + stop("failed to execute 'R CMD config --all'") + + # read and parse output + output <- readLines(stdout, warn = FALSE) + config <- parse_key_value(output) + + } else { + + # loop through requested values and call R CMD config + config <- lapply(values, function(value) { + + # execute it + stdout <- tempfile("r-cmd-config-", fileext = ".txt") + on.exit(unlink(stdout), add = TRUE) + status <- system2(R, c("CMD", "config", value), stdout = stdout) + + # report failures as NULL (distinct from empty string) + if (status) + return(NULL) + + readLines(stdout) + + }) + names(config) <- values + } + + if (is.null(envir)) + return(config) + + list2env(config, envir = envir) +} + +#' Concatenate the Contents of a Set of Files +#' +#' Given a set of files, concatenate their contents into +#' a single file. +#' +#' @param sources An \R list of files +#' @param target The file to use for generation. +#' @param headers Headers to be used for each file copied. +#' @param preamble Text to be included at the beginning of the document. +#' @param postamble Text to be included at the end of the document. +#' @param verbose Boolean; inform the user when the requested file is created? +#' +#' @export +concatenate_files <- function( + sources, + target, + headers = section_header(basename(sources)), + preamble = NULL, + postamble = NULL, + verbose = configure_verbose()) +{ + pieces <- vapply(seq_along(sources), function(i) { + source <- sources[[i]] + header <- headers[[i]] + contents <- trim_whitespace(read_file(source)) + paste(header, contents, "", sep = "\n\n") + }, character(1)) + + all <- c(preamble, pieces, postamble) + + ensure_directory(dirname(target)) + writeLines(all, con = target) + + if (verbose) { + fmt <- "*** created file '%s'" + message(sprintf(fmt, target)) + } + + TRUE +} + +#' Add Configure Infrastructure to an R Package +#' +#' Add the infrastructure needed to configure an R package. +#' +#' @param package The path to the top-level directory of an \R package. +#' @export +use_configure <- function(package = ".") { + + # preserve working directory + owd <- getwd() + on.exit(setwd(owd), add = TRUE) + + # find resources + package <- normalizePath(package, winslash = "/") + resources <- system.file("resources", package = "configure") + + # copy into temporary directory + dir <- tempfile("configure-") + on.exit(unlink(dir, recursive = TRUE), add = TRUE) + + dir.create(dir) + file.copy(resources, dir, recursive = TRUE) + + # rename resources directory + setwd(dir) + file.rename(basename(resources), basename(package)) + + # now, copy these files back into the target directory + file.copy(basename(package), dirname(package), recursive = TRUE) + + # ensure DESCRIPTION contains 'Biarch: TRUE' for Windows + setwd(package) + DESCRIPTION <- read_file("DESCRIPTION") + if (!grepl("(?:^|\n)Biarch:", DESCRIPTION)) { + DESCRIPTION <- paste(DESCRIPTION, "Biarch: TRUE", sep = "\n") + DESCRIPTION <- gsub("\n{2,}", "\n", DESCRIPTION) + cat(DESCRIPTION, file = "DESCRIPTION", sep = "\n") + } + + # write placeholders for 'configure.R', 'cleanup.R' if none exist + ensure_directory("tools/config") + configure <- "tools/config/configure.R" + if (!file.exists("tools/config/configure.R")) { + text <- c( + "# Prepare your package for installation here.", + "# Use 'define()' to define configuration variables.", + "# Use 'configure_file()' to substitute configuration values.", + "", + "" + ) + writeLines(text, con = configure) + } + + cleanup <- "tools/config/cleanup.R" + if (!file.exists("tools/config/cleanup.R")) { + text <- c( + "# Clean up files generated during configuration here.", + "# Use 'remove_file()' to remove files generated during configuration.", + "", + "" + ) + writeLines(text, con = cleanup) + } + + # notify the user what we did + message("* Copied 'configure{.win}' and 'cleanup{.win}'.") + message("* Updated 'tools/config.R'.") + + # open 'configure.R', 'cleanup.R' for editing if in RStudio + rstudio <- + !is.na(Sys.getenv("RSTUDIO", unset = NA)) && + requireNamespace("rstudioapi", quietly = TRUE) + + if (rstudio) { + rstudioapi::navigateToFile("tools/config/configure.R", 5, 1) + rstudioapi::navigateToFile("tools/config/cleanup.R", 4, 1) + } else { + message("* Use 'tools/config/configure.R' for package configuration.") + message("* Use 'tools/config/cleanup.R' for package cleanup.") + } +} + +ensure_directory <- function(dir) { + info <- file.info(dir) + + # no file exists at this location; try to make it + if (is.na(info$isdir)) { + dir.create(dir, recursive = TRUE, showWarnings = FALSE) + if (!file.exists(dir)) + stop("failed to create directory '", dir, "'") + return(TRUE) + } + + # a directory already exists + if (isTRUE(info$isdir)) + return(TRUE) + + # a file exists, but it's not a directory + stop("file already exists at path '", dir, "'") +} + +enumerate <- function(x, f, ...) { + nms <- if (is.environment(x)) ls(envir = x) else names(x) + lapply(nms, function(nm) { + f(nm, x[[nm]], ...) + }) +} + +read_file <- function(path) { + paste(readLines(path, warn = FALSE), collapse = "\n") +} + +remove_file <- function( + path, + verbose = configure_verbose()) +{ + info <- file.info(path) + if (is.na(info$isdir)) + return(TRUE) + + name <- if (info$isdir) "directory" else "file" + + unlink(path, recursive = isTRUE(info$isdir)) + if (file.exists(path)) { + fmt <- "failed to remove %s '%s' (insufficient permissions?)" + stop(sprintf(fmt, name, path)) + } + + if (verbose) { + fmt <- "*** removed %s '%s'" + message(sprintf(fmt, name, path)) + } + + TRUE +} + +source_file <- function( + path, + envir = parent.frame()) +{ + contents <- read_file(path) + invisible(eval(parse(text = contents), envir = envir)) +} + +trim_whitespace <- function(x) { + gsub("^[[:space:]]*|[[:space:]]*$", "", x) +} + +configure_verbose <- function() { + getOption("configure.verbose", !interactive()) +} + +named <- function(object, nm) { + names(object) <- nm + object +} + +parse_key_value <- function( + text, + separator = "=", + trim = TRUE) +{ + # find the separator + index <- regexpr(separator, text, fixed = TRUE) + + # split into parts + keys <- substring(text, 1, index - 1) + vals <- substring(text, index + 1) + + # trim if requested + if (trim) { + keys <- trim_whitespace(keys) + vals <- trim_whitespace(vals) + } + + # put together into R list + named(as.list(vals), keys) +} + +move_directory <- function(source, target) { + + # ensure we're trying to move a directory + info <- file.info(source) + if (is.na(info$isdir)) { + fmt <- "no directory exists at path '%s'" + stop(sprintf(fmt, source), call. = FALSE) + } + + if (!info$isdir) { + fmt <- "'%s' exists but is not a directory" + stop(sprintf(fmt, source), call. = FALSE) + } + + # good to go -- let's move it + unlink(target, recursive = TRUE) + file.rename(source, target) + unlink(source, recursive = TRUE) + +} + +section_header <- function( + label, + prefix = "#", + suffix = "-", + length = 78L) +{ + + # figure out length of full header + n <- length - nchar(label) - nchar(prefix) - 2L + n[n < 0] <- 0 + + # generate '-' suffixes + tail <- vapply(n, function(i) { + paste(rep(suffix, i), collapse = "") + }, character(1)) + + # join it all together + paste(prefix, label, tail) + +} + + +# run.R ---------------------------------------------------------------------- + +if (!interactive()) { + + # extract path to install script + args <- commandArgs(TRUE) + type <- args[[1]] + + # preserve working directory + owd <- getwd() + on.exit(setwd(owd), add = TRUE) + + # switch working directory to the calling scripts's directory as set + # by the shell, in case the R working directory was set to something else + basedir <- Sys.getenv("PWD", unset = NA) + if (!is.na(basedir)) + setwd(basedir) + + # report start of execution + package <- Sys.getenv("R_PACKAGE_NAME", unset = "<unknown>") + fmt <- "** preparing to %s package '%s' ..." + message(sprintf(fmt, type, package)) + + # execute the requested script + path <- sprintf("tools/config/%s.R", type) + if (file.exists(path)) source_file(path) + + # perform automatic configuration + configure_auto(type = type) + + # report end of execution + fmt <- "** finished %s for package '%s'" + message(sprintf(fmt, type, package)) + +} + + diff --git a/tools/config/cleanup.R b/tools/config/cleanup.R new file mode 100644 index 000000000..7746f1bc6 --- /dev/null +++ b/tools/config/cleanup.R @@ -0,0 +1,13 @@ + +# Clean up files generated during configuration here. +# Use 'remove_file()' to remove files generated during configuration. + +# unlink("src/tbb/build", recursive = TRUE) +# unlink("src/tbb/build-tbb", recursive = TRUE) +unlink("inst/lib", recursive = TRUE) +unlink("inst/libs", recursive = TRUE) +unlink("inst/include/index.html", recursive = TRUE) +unlink("inst/include/oneapi", recursive = TRUE) +unlink("inst/include/serial", recursive = TRUE) +unlink("inst/include/tbb", recursive = TRUE) + diff --git a/tools/config/configure.R b/tools/config/configure.R new file mode 100644 index 000000000..a001e63b8 --- /dev/null +++ b/tools/config/configure.R @@ -0,0 +1,330 @@ + +# make sure we call correct version of R +rExe <- if (.Platform$OS.type == "windows") "R.exe" else "R" +define(R = file.path(R.home("bin"), rExe)) + +# check whether user has Makevars file that might cause trouble +makevars <- Sys.getenv("R_MAKEVARS_USER", unset = "~/.R/Makevars") +if (file.exists(makevars)) { + contents <- readLines(makevars, warn = FALSE) + pattern <- "^(PKG_CPPFLAGS|PKG_CXXFLAGS)\\s*=" + bad <- grep(pattern, contents, perl = TRUE, value = TRUE) + if (length(bad)) { + + text <- c( + "", + sprintf("NOTE: '%s' contains variable declarations incompatible with RcppParallel:", makevars), + "", + paste0("\t", bad), + "", + "Makevars variables prefixed with 'PKG_' should be considered reserved for use by R packages.", + "" + ) + + writeLines(text, con = stdout()) + + } +} + +# Figure out the appropriate CXX prefix for the current +# version of R + configuration. +cxx <- "/usr/bin/c++" +candidates <- c("CXX11", "CXX1X", "CXX") +for (candidate in candidates) { + value <- r_cmd_config(candidate) + if (!is.null(value)) { + if (any(grepl("icpc", value))) { + define(COMPILER = "icc") + } + cxx <- candidate + break + } +} + +# work around issue with '-Werror=format-security' being specified without +# a prior '-Wformat', which makes gcc angry +cxxflags <- read_r_config(sprintf("%sFLAGS", cxx), envir = NULL)[[1]] +broken <- + grepl(" -Werror=format-security ", cxxflags) && + !grepl(" -Wformat ", cxxflags) + +if (broken) + cxxflags <- gsub("-Werror=format-security", "-Wformat -Werror=format-security", cxxflags) + +# add C++ standard if not set +if (!grepl("-std=", cxxflags, fixed = TRUE)) { + stdflag <- if (getRversion() < "4.0") { + "-std=c++0x" + } else { + "$(CXX11STD)" + } + cxxflags <- paste(stdflag, cxxflags) +} + +# avoid including /usr/local/include, as this can cause +# RcppParallel to find and use a version of libtbb installed +# there as opposed to the bundled version +cppflags <- read_r_config("CPPFLAGS", envir = NULL)[[1]] +cppflags <- sub("(?: )?-I/usr/local/include", "", cppflags) +cppflags <- sub("(?: )?-I/opt/homebrew/include", "", cppflags) +cppflags <- sub("(?: )?-I/opt/local/libexec/onetbb/include", "", cppflags) + +# define the set of flags appropriate to the current +# configuration of R +switch( + cxx, + + CXX11 = define( + CC = "$(CC)", + CPPFLAGS = cppflags, + CXX11 = "$(CXX11)", + CXX11FLAGS = cxxflags, + CXX11STD = "$(CXX11STD)", + CXX11PICFLAGS = "$(CXX11PICFLAGS)" + ), + + CXX1X = define( + CC = "$(CC)", + CPPFLAGS = cppflags, + CXX11 = "$(CXX1X)", + CXX11FLAGS = cxxflags, + CXX11STD = "$(CXX1XSTD)", + CXX11PICFLAGS = "$(CXX1XPICFLAGS)" + ), + + CXX = define( + CC = "$(CC)", + CPPFLAGS = cppflags, + CXX11 = "$(CXX)", + CXX11FLAGS = cxxflags, + CXX11STD = "-std=c++0x", + CXX11PICFLAGS = "-fPIC" + ), + + stop("Failed to infer C / C++ compilation flags") +) + +# on Windows, check for Rtools; if it exists, and we have tbb, use it +if (.Platform$OS.type == "windows") { + + gccPath <- normalizePath(Sys.which("gcc"), winslash = "/") + + tbbLib <- Sys.getenv("TBB_LIB", unset = NA) + if (is.na(tbbLib)) + tbbLib <- normalizePath(file.path(gccPath, "../../lib"), winslash = "/") + + tbbInc <- Sys.getenv("TBB_INC", unset = NA) + if (is.na(tbbInc)) + tbbInc <- normalizePath(file.path(gccPath, "../../include"), winslash = "/") + + tbbFiles <- list.files(tbbLib, pattern = "^libtbb") + if (length(tbbFiles)) { + + tbbPattern <- "^lib(tbb\\d*(?:_static)?)\\.a$" + tbbName <- grep(tbbPattern, tbbFiles, perl = TRUE, value = TRUE) + tbbName <- gsub(tbbPattern, "\\1", tbbName, perl = TRUE) + + tbbMallocPattern <- "^lib(tbbmalloc\\d*(?:_static)?)\\.a$" + tbbMallocName <- grep(tbbMallocPattern, tbbFiles, perl = TRUE, value = TRUE) + tbbMallocName <- gsub(tbbMallocPattern, "\\1", tbbMallocName, perl = TRUE) + + Sys.setenv( + TBB_LIB = tbbLib, + TBB_INC = tbbInc, + TBB_NAME = tbbName, + TBB_MALLOC_NAME = tbbMallocName + ) + + } + +} + +# try and figure out path to TBB +tbbRoot <- Sys.getenv("TBB_ROOT", unset = NA) +tbbLib <- Sys.getenv("TBB_LIB", unset = NA) +tbbInc <- Sys.getenv("TBB_INC", unset = NA) + +tbbName <- Sys.getenv("TBB_NAME", unset = "tbb") +tbbMallocName <- Sys.getenv("TBB_MALLOC_NAME", unset = "tbbmalloc") + +# check TBB_ROOT first if defined +if (!is.na(tbbRoot)) { + + if (is.na(tbbLib)) { + tbbLib <- file.path(tbbRoot, "lib") + } + + if (is.na(tbbInc)) { + tbbInc <- file.path(tbbRoot, "include") + } + +} + +# if TBB_LIB is defined, guess TBB_INC +if (!is.na(tbbLib) && is.na(tbbInc)) { + tbbIncCandidate <- file.path(tbbLib, "../include") + if (file.exists(tbbIncCandidate)) { + tbbInc <- normalizePath(tbbIncCandidate) + } +} + +# if TBB_LIB and TBB_INC are still not defined, try auto-detecting +tryAutoDetect <- + .Platform$OS.type == "unix" && + Sys.getenv("TBB_AUTODETECT", unset = "FALSE") == "TRUE" && + is.na(tbbLib) && + is.na(tbbInc) + +if (tryAutoDetect) { + + sysInfo <- as.list(Sys.info()) + + homebrewPrefix <- if (sysInfo$sysname == "Darwin") { + "/opt/homebrew" + } else { + "/usr/local" + } + + tbbLibSearch <- if (sysInfo$sysname == "Darwin") { + file.path(homebrewPrefix, "opt/tbb/lib/libtbb.dylib") + } else { + Sys.glob(c( + "/usr/*/libtbb.so", + "/usr/*/*/libtbb.so", + "/usr/*/*/*/libtbb.so" + )) + } + + tbbIncSearch <- if (sysInfo$sysname == "Darwin") { + file.path(homebrewPrefix, "opt/tbb/include/tbb") + } else { + Sys.glob(c( + "/usr/include/tbb.h", + "/usr/include/*/tbb.h" + )) + } + + if (length(tbbLibSearch) && + length(tbbIncSearch) && + file.exists(tbbLibSearch[[1L]]) && + file.exists(tbbIncSearch[[1L]])) + { + tbbLib <- dirname(tbbLibSearch[[1L]]) + tbbInc <- dirname(tbbIncSearch[[1L]]) + } + +} + +# now, define TBB_LIB and TBB_INC as appropriate +define( + TBB_LIB = if (!is.na(tbbLib)) tbbLib else "", + TBB_INC = if (!is.na(tbbInc)) tbbInc else "", + TBB_NAME = tbbName, + TBB_MALLOC_NAME = tbbMallocName +) + +# set PKG_LIBS +pkgLibs <- if (!is.na(tbbLib)) { + + c( + "-Wl,-L\"$(TBB_LIB)\"", + sprintf("-Wl,-rpath,%s", shQuote(tbbLib)), + "-l$(TBB_NAME)", + "-l$(TBB_MALLOC_NAME)" + ) + +} else if (.Platform$OS.type == "windows") { + + NULL + +} else if (R.version$os == "emscripten") { + + c( + "-Wl,-Ltbb/build/lib_release", + "-l$(TBB_NAME)" + ) + +} else { + + c( + "-Wl,-Ltbb/build/lib_release", + "-l$(TBB_NAME)", + "-l$(TBB_MALLOC_NAME)" + ) + +} + + +# on Windows, we may need to link to ssp; otherwise, +# we see errors like +# +# C:\rtools43\x86_64-w64-mingw32.static.posix\bin/ld.exe: C:/rtools43/x86_64-w64-mingw32.static.posix/lib/libtbb12.a(allocator.cpp.obj):allocator.cpp:(.text+0x18b): undefined reference to `__stack_chk_fail' +# +if (.Platform$OS.type == "windows") { + pkgLibs <- c(pkgLibs, "-lssp") +} + +define(PKG_LIBS = paste(pkgLibs, collapse = " ")) + +# if we're going to build tbb from sources, check for cmake +define(CMAKE = "") +if (is.na(tbbLib)) { + + cmake <- local({ + + # check for envvar + cmake <- Sys.getenv("CMAKE", unset = NA) + if (!is.na(cmake)) + return(cmake) + + # check for path + cmake <- Sys.which("cmake") + if (nzchar(cmake)) + return(cmake) + + # check for macOS cmake + cmake <- "/Applications/CMake.app/Contents/bin/cmake" + if (file.exists(cmake)) + return(cmake) + + stop("cmake was not found") + + }) + + # make sure we have an appropriate version of cmake installed + output <- system("cmake --version", intern = TRUE)[[1L]] + cmakeVersion <- numeric_version(sub("cmake version ", "", output)) + if (cmakeVersion < "3.5") { + stop("error: RcppParallel requires cmake (>= 3.6); you have ", cmakeVersion) + } + + define(CMAKE = cmake) + +} + + +# now, set up PKG_CPPFLAGS +if (!is.na(tbbLib)) { + define(PKG_CPPFLAGS = "-I../inst/include -I\"$(TBB_INC)\"") +} else { + define(PKG_CPPFLAGS = "-I../inst/include") +} + +# PKG_CXXFLAGS +if (.Platform$OS.type == "windows" && is.na(tbbLib)) { + define(TBB_ENABLED = FALSE) + define(PKG_CXXFLAGS = "-DRCPP_PARALLEL_USE_TBB=0") +} else { + define(TBB_ENABLED = TRUE) + define(PKG_CXXFLAGS = "-DRCPP_PARALLEL_USE_TBB=1") +} + +# macOS needs some extra flags set +if (Sys.info()[["sysname"]] == "Darwin") { + define(PKG_LIBS_EXTRA = "-Wl,-rpath,@loader_path/../lib") +} else if (Sys.info()[["sysname"]] == "Linux") { + define(PKG_LIBS_EXTRA = "-Wl,-rpath,$(ORIGIN)/../lib") +} else { + define(PKG_LIBS_EXTRA = "") + +} diff --git a/tools/tbb/disable-pragmas.R b/tools/tbb/disable-pragmas.R new file mode 100644 index 000000000..7e255f4ed --- /dev/null +++ b/tools/tbb/disable-pragmas.R @@ -0,0 +1,23 @@ +# Disable TBB pragmas that silence diagnostic warnings. +# This is necessary for CRAN submissions of RcppParallel. + +files <- list.files( + path = "src/tbb", + pattern = "[.](?:h|cpp)$", + all.files = TRUE, + full.names = TRUE, + recursive = TRUE +) + +for (file in files) { + + before <- readLines(file) + + after <- before + after <- gsub("^(\\s*)#pragma warning", "\\1// #pragma warning", after, perl = TRUE) + after <- gsub("^(\\s*)#pragma GCC", "\\1// #pragma GCC", after, perl = TRUE) + after <- gsub("^(\\s*)#pragma clang", "\\1// #pragma clang", after, perl = TRUE) + + if (!identical(before, after)) + writeLines(after, con = file) +} diff --git a/tools/tbb/fix-memset.R b/tools/tbb/fix-memset.R new file mode 100644 index 000000000..d9ba62b1a --- /dev/null +++ b/tools/tbb/fix-memset.R @@ -0,0 +1,22 @@ + +# Avoid usages of memset() that might cause compiler warnings. +# This is necessary for CRAN submissions of RcppParallel. + +files <- list.files( + path = "src/tbb", + pattern = "[.](?:h|cpp)$", + all.files = TRUE, + full.names = TRUE, + recursive = TRUE +) + +pattern <- "(memset\\s*\\(\\s*)(\\w+)(\\s*[,)])" + +for (file in files) { + + before <- readLines(file) + after <- gsub(pattern, "\\1static_cast<void*>(\\2)\\3", before, perl = TRUE) + + if (!identical(before, after)) + writeLines(after, con = file) +} diff --git a/tools/tbb/update-tbb.R b/tools/tbb/update-tbb.R new file mode 100644 index 000000000..5fd48e5ed --- /dev/null +++ b/tools/tbb/update-tbb.R @@ -0,0 +1,24 @@ + +# update as appropriate for new TBB releases, then re-run script +url <- "https://github.com/uxlfoundation/oneTBB/archive/refs/tags/v2022.0.0.tar.gz" + +owd <- setwd("src") +unlink("tbb", recursive = TRUE) +download.file(url, destfile = basename(url), mode = "wb") + +before <- list.files() +untar(basename(url)) +after <- list.files() + +folder <- setdiff(after, before) +print(folder) +file.rename(folder, "tbb") + +setwd("tbb") +remove <- c(".gitattributes", ".github", "doc", "examples", "python", "test") +unlink(remove, recursive = TRUE) +bazel <- list.files(pattern = "[Bb]azel", all.files = TRUE) +unlink(bazel) +setwd("..") + +unlink(basename(url))